Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42
43 bool disable_ertm;
44
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134 {
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144 }
145
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155 }
156
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
188 }
189
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191 {
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199 }
200
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211 }
212
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220 }
221
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229 }
230
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236 }
237
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 }
514 break;
515
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
522
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
529
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
535 }
536
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543
544 l2cap_chan_hold(chan);
545
546 list_add(&chan->list, &conn->chan_l);
547 }
548
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
554 }
555
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 struct l2cap_conn *conn = chan->conn;
559
560 __clear_chan_timer(chan);
561
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
568
569 l2cap_chan_put(chan);
570
571 chan->conn = NULL;
572
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
575
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
578 }
579
580 if (chan->hs_hchan) {
581 struct hci_chan *hs_hchan = chan->hs_hchan;
582
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
585 }
586
587 chan->ops->teardown(chan, err);
588
589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 return;
591
592 switch(chan->mode) {
593 case L2CAP_MODE_BASIC:
594 break;
595
596 case L2CAP_MODE_ERTM:
597 __clear_retrans_timer(chan);
598 __clear_monitor_timer(chan);
599 __clear_ack_timer(chan);
600
601 skb_queue_purge(&chan->srej_q);
602
603 l2cap_seq_list_free(&chan->srej_list);
604 l2cap_seq_list_free(&chan->retrans_list);
605
606 /* fall through */
607
608 case L2CAP_MODE_STREAMING:
609 skb_queue_purge(&chan->tx_q);
610 break;
611 }
612
613 return;
614 }
615
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
617 {
618 struct l2cap_conn *conn = chan->conn;
619 struct sock *sk = chan->sk;
620
621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 sk);
623
624 switch (chan->state) {
625 case BT_LISTEN:
626 chan->ops->teardown(chan, 0);
627 break;
628
629 case BT_CONNECTED:
630 case BT_CONFIG:
631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 conn->hcon->type == ACL_LINK) {
633 __set_chan_timer(chan, sk->sk_sndtimeo);
634 l2cap_send_disconn_req(chan, reason);
635 } else
636 l2cap_chan_del(chan, reason);
637 break;
638
639 case BT_CONNECT2:
640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 conn->hcon->type == ACL_LINK) {
642 struct l2cap_conn_rsp rsp;
643 __u16 result;
644
645 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 result = L2CAP_CR_SEC_BLOCK;
647 else
648 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
650
651 rsp.scid = cpu_to_le16(chan->dcid);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.result = cpu_to_le16(result);
654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 sizeof(rsp), &rsp);
657 }
658
659 l2cap_chan_del(chan, reason);
660 break;
661
662 case BT_CONNECT:
663 case BT_DISCONN:
664 l2cap_chan_del(chan, reason);
665 break;
666
667 default:
668 chan->ops->teardown(chan, 0);
669 break;
670 }
671 }
672
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
674 {
675 if (chan->chan_type == L2CAP_CHAN_RAW) {
676 switch (chan->sec_level) {
677 case BT_SECURITY_HIGH:
678 return HCI_AT_DEDICATED_BONDING_MITM;
679 case BT_SECURITY_MEDIUM:
680 return HCI_AT_DEDICATED_BONDING;
681 default:
682 return HCI_AT_NO_BONDING;
683 }
684 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 if (chan->sec_level == BT_SECURITY_LOW)
686 chan->sec_level = BT_SECURITY_SDP;
687
688 if (chan->sec_level == BT_SECURITY_HIGH)
689 return HCI_AT_NO_BONDING_MITM;
690 else
691 return HCI_AT_NO_BONDING;
692 } else {
693 switch (chan->sec_level) {
694 case BT_SECURITY_HIGH:
695 return HCI_AT_GENERAL_BONDING_MITM;
696 case BT_SECURITY_MEDIUM:
697 return HCI_AT_GENERAL_BONDING;
698 default:
699 return HCI_AT_NO_BONDING;
700 }
701 }
702 }
703
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
706 {
707 struct l2cap_conn *conn = chan->conn;
708 __u8 auth_type;
709
710 auth_type = l2cap_get_auth_type(chan);
711
712 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
713 }
714
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
716 {
717 u8 id;
718
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
723 */
724
725 spin_lock(&conn->lock);
726
727 if (++conn->tx_ident > 128)
728 conn->tx_ident = 1;
729
730 id = conn->tx_ident;
731
732 spin_unlock(&conn->lock);
733
734 return id;
735 }
736
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 void *data)
739 {
740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 u8 flags;
742
743 BT_DBG("code 0x%2.2x", code);
744
745 if (!skb)
746 return;
747
748 if (lmp_no_flush_capable(conn->hcon->hdev))
749 flags = ACL_START_NO_FLUSH;
750 else
751 flags = ACL_START;
752
753 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 skb->priority = HCI_PRIO_MAX;
755
756 hci_send_acl(conn->hchan, skb, flags);
757 }
758
759 static bool __chan_is_moving(struct l2cap_chan *chan)
760 {
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763 }
764
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
766 {
767 struct hci_conn *hcon = chan->conn->hcon;
768 u16 flags;
769
770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 skb->priority);
772
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 if (chan->hs_hchan)
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 else
777 kfree_skb(skb);
778
779 return;
780 }
781
782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 lmp_no_flush_capable(hcon->hdev))
784 flags = ACL_START_NO_FLUSH;
785 else
786 flags = ACL_START;
787
788 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 hci_send_acl(chan->conn->hchan, skb, flags);
790 }
791
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
793 {
794 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
796
797 if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 /* S-Frame */
799 control->sframe = 1;
800 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
802
803 control->sar = 0;
804 control->txseq = 0;
805 } else {
806 /* I-Frame */
807 control->sframe = 0;
808 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
810
811 control->poll = 0;
812 control->super = 0;
813 }
814 }
815
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
817 {
818 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
820
821 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 /* S-Frame */
823 control->sframe = 1;
824 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
826
827 control->sar = 0;
828 control->txseq = 0;
829 } else {
830 /* I-Frame */
831 control->sframe = 0;
832 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
834
835 control->poll = 0;
836 control->super = 0;
837 }
838 }
839
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 struct sk_buff *skb)
842 {
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 __unpack_extended_control(get_unaligned_le32(skb->data),
845 &bt_cb(skb)->control);
846 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 } else {
848 __unpack_enhanced_control(get_unaligned_le16(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
851 }
852 }
853
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
855 {
856 u32 packed;
857
858 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
860
861 if (control->sframe) {
862 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 } else {
866 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
868 }
869
870 return packed;
871 }
872
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
874 {
875 u16 packed;
876
877 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
879
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_CTRL_FRAME_TYPE;
884 } else {
885 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
887 }
888
889 return packed;
890 }
891
892 static inline void __pack_control(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control,
894 struct sk_buff *skb)
895 {
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 put_unaligned_le32(__pack_extended_control(control),
898 skb->data + L2CAP_HDR_SIZE);
899 } else {
900 put_unaligned_le16(__pack_enhanced_control(control),
901 skb->data + L2CAP_HDR_SIZE);
902 }
903 }
904
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
906 {
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 return L2CAP_EXT_HDR_SIZE;
909 else
910 return L2CAP_ENH_HDR_SIZE;
911 }
912
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 u32 control)
915 {
916 struct sk_buff *skb;
917 struct l2cap_hdr *lh;
918 int hlen = __ertm_hdr_size(chan);
919
920 if (chan->fcs == L2CAP_FCS_CRC16)
921 hlen += L2CAP_FCS_SIZE;
922
923 skb = bt_skb_alloc(hlen, GFP_KERNEL);
924
925 if (!skb)
926 return ERR_PTR(-ENOMEM);
927
928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 lh->cid = cpu_to_le16(chan->dcid);
931
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 else
935 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
936
937 if (chan->fcs == L2CAP_FCS_CRC16) {
938 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
940 }
941
942 skb->priority = HCI_PRIO_MAX;
943 return skb;
944 }
945
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 struct l2cap_ctrl *control)
948 {
949 struct sk_buff *skb;
950 u32 control_field;
951
952 BT_DBG("chan %p, control %p", chan, control);
953
954 if (!control->sframe)
955 return;
956
957 if (__chan_is_moving(chan))
958 return;
959
960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 !control->poll)
962 control->final = 1;
963
964 if (control->super == L2CAP_SUPER_RR)
965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 else if (control->super == L2CAP_SUPER_RNR)
967 set_bit(CONN_RNR_SENT, &chan->conn_state);
968
969 if (control->super != L2CAP_SUPER_SREJ) {
970 chan->last_acked_seq = control->reqseq;
971 __clear_ack_timer(chan);
972 }
973
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 control->final, control->poll, control->super);
976
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 control_field = __pack_extended_control(control);
979 else
980 control_field = __pack_enhanced_control(control);
981
982 skb = l2cap_create_sframe_pdu(chan, control_field);
983 if (!IS_ERR(skb))
984 l2cap_do_send(chan, skb);
985 }
986
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
988 {
989 struct l2cap_ctrl control;
990
991 BT_DBG("chan %p, poll %d", chan, poll);
992
993 memset(&control, 0, sizeof(control));
994 control.sframe = 1;
995 control.poll = poll;
996
997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 control.super = L2CAP_SUPER_RNR;
999 else
1000 control.super = L2CAP_SUPER_RR;
1001
1002 control.reqseq = chan->buffer_seq;
1003 l2cap_send_sframe(chan, &control);
1004 }
1005
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1007 {
1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1009 }
1010
1011 static bool __amp_capable(struct l2cap_chan *chan)
1012 {
1013 struct l2cap_conn *conn = chan->conn;
1014
1015 if (enable_hs &&
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1022 }
1023
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 /* Check EFS parameters */
1027 return true;
1028 }
1029
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1034
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1037
1038 chan->ident = l2cap_get_ident(conn);
1039
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1051
1052 chan->ident = l2cap_get_ident(chan->conn);
1053
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1056 }
1057
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 struct sk_buff *skb;
1061
1062 BT_DBG("chan %p", chan);
1063
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1066
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1070
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1077 }
1078
1079 chan->expected_tx_seq = chan->buffer_seq;
1080
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1086
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1089
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1097
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1103
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1112 }
1113 }
1114
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1120
1121 chan->state = BT_CONNECTED;
1122
1123 chan->ops->ready(chan);
1124 }
1125
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1133 }
1134 }
1135
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 struct l2cap_conn *conn = chan->conn;
1139
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1142 return;
1143 }
1144
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 return;
1148
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1152 }
1153 } else {
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1159
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 sizeof(req), &req);
1164 }
1165 }
1166
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 u32 local_feat_mask = l2cap_feat_mask;
1170 if (!disable_ertm)
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172
1173 switch (mode) {
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 default:
1179 return 0x00;
1180 }
1181 }
1182
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1184 {
1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1187 struct l2cap_disconn_req req;
1188
1189 if (!conn)
1190 return;
1191
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1196 }
1197
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1200 return;
1201 }
1202
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 sizeof(req), &req);
1207
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1211 release_sock(sk);
1212 }
1213
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 struct l2cap_chan *chan, *tmp;
1218
1219 BT_DBG("conn %p", conn);
1220
1221 mutex_lock(&conn->chan_lock);
1222
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1225
1226 l2cap_chan_lock(chan);
1227
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1230 continue;
1231 }
1232
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1238 }
1239
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1245 continue;
1246 }
1247
1248 l2cap_start_connection(chan);
1249
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1252 char buf[128];
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1255
1256 if (l2cap_chan_check_security(chan)) {
1257 lock_sock(sk);
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1263
1264 } else {
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 }
1269 release_sock(sk);
1270 } else {
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 }
1274
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 sizeof(rsp), &rsp);
1277
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1281 continue;
1282 }
1283
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1288 }
1289
1290 l2cap_chan_unlock(chan);
1291 }
1292
1293 mutex_unlock(&conn->chan_lock);
1294 }
1295
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1298 */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 bdaddr_t *src,
1301 bdaddr_t *dst)
1302 {
1303 struct l2cap_chan *c, *c1 = NULL;
1304
1305 read_lock(&chan_list_lock);
1306
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1309
1310 if (state && c->state != state)
1311 continue;
1312
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1316
1317 /* Exact match. */
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1322 return c;
1323 }
1324
1325 /* Closest match */
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1330 c1 = c;
1331 }
1332 }
1333
1334 read_unlock(&chan_list_lock);
1335
1336 return c1;
1337 }
1338
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1343
1344 BT_DBG("");
1345
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1349 if (!pchan)
1350 return;
1351
1352 parent = pchan->sk;
1353
1354 lock_sock(parent);
1355
1356 chan = pchan->ops->new_connection(pchan);
1357 if (!chan)
1358 goto clean;
1359
1360 sk = chan->sk;
1361
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1367
1368 l2cap_chan_add(conn, chan);
1369
1370 l2cap_chan_ready(chan);
1371
1372 clean:
1373 release_sock(parent);
1374 }
1375
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1380
1381 BT_DBG("conn %p", conn);
1382
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1385
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1388
1389 mutex_lock(&conn->chan_lock);
1390
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1392
1393 l2cap_chan_lock(chan);
1394
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1397 continue;
1398 }
1399
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1403
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1407 lock_sock(sk);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1410 release_sock(sk);
1411
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1414
1415 l2cap_chan_unlock(chan);
1416 }
1417
1418 mutex_unlock(&conn->chan_lock);
1419 }
1420
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 struct l2cap_chan *chan;
1425
1426 BT_DBG("conn %p", conn);
1427
1428 mutex_lock(&conn->chan_lock);
1429
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1433 }
1434
1435 mutex_unlock(&conn->chan_lock);
1436 }
1437
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 info_timer.work);
1442
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1445
1446 l2cap_conn_start(conn);
1447 }
1448
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1450 {
1451 struct l2cap_conn *conn = hcon->l2cap_data;
1452 struct l2cap_chan *chan, *l;
1453
1454 if (!conn)
1455 return;
1456
1457 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1458
1459 kfree_skb(conn->rx_skb);
1460
1461 mutex_lock(&conn->chan_lock);
1462
1463 /* Kill channels */
1464 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 l2cap_chan_hold(chan);
1466 l2cap_chan_lock(chan);
1467
1468 l2cap_chan_del(chan, err);
1469
1470 l2cap_chan_unlock(chan);
1471
1472 chan->ops->close(chan);
1473 l2cap_chan_put(chan);
1474 }
1475
1476 mutex_unlock(&conn->chan_lock);
1477
1478 hci_chan_del(conn->hchan);
1479
1480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 cancel_delayed_work_sync(&conn->info_timer);
1482
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 cancel_delayed_work_sync(&conn->security_timer);
1485 smp_chan_destroy(conn);
1486 }
1487
1488 hcon->l2cap_data = NULL;
1489 kfree(conn);
1490 }
1491
1492 static void security_timeout(struct work_struct *work)
1493 {
1494 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 security_timer.work);
1496
1497 BT_DBG("conn %p", conn);
1498
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 smp_chan_destroy(conn);
1501 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1502 }
1503 }
1504
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1506 {
1507 struct l2cap_conn *conn = hcon->l2cap_data;
1508 struct hci_chan *hchan;
1509
1510 if (conn || status)
1511 return conn;
1512
1513 hchan = hci_chan_create(hcon);
1514 if (!hchan)
1515 return NULL;
1516
1517 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1518 if (!conn) {
1519 hci_chan_del(hchan);
1520 return NULL;
1521 }
1522
1523 hcon->l2cap_data = conn;
1524 conn->hcon = hcon;
1525 conn->hchan = hchan;
1526
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1528
1529 switch (hcon->type) {
1530 case LE_LINK:
1531 if (hcon->hdev->le_mtu) {
1532 conn->mtu = hcon->hdev->le_mtu;
1533 break;
1534 }
1535 /* fall through */
1536 default:
1537 conn->mtu = hcon->hdev->acl_mtu;
1538 break;
1539 }
1540
1541 conn->src = &hcon->hdev->bdaddr;
1542 conn->dst = &hcon->dst;
1543
1544 conn->feat_mask = 0;
1545
1546 spin_lock_init(&conn->lock);
1547 mutex_init(&conn->chan_lock);
1548
1549 INIT_LIST_HEAD(&conn->chan_l);
1550
1551 if (hcon->type == LE_LINK)
1552 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1553 else
1554 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1555
1556 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1557
1558 return conn;
1559 }
1560
1561 /* ---- Socket interface ---- */
1562
1563 /* Find socket with psm and source / destination bdaddr.
1564 * Returns closest match.
1565 */
1566 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1567 bdaddr_t *src,
1568 bdaddr_t *dst)
1569 {
1570 struct l2cap_chan *c, *c1 = NULL;
1571
1572 read_lock(&chan_list_lock);
1573
1574 list_for_each_entry(c, &chan_list, global_l) {
1575 struct sock *sk = c->sk;
1576
1577 if (state && c->state != state)
1578 continue;
1579
1580 if (c->psm == psm) {
1581 int src_match, dst_match;
1582 int src_any, dst_any;
1583
1584 /* Exact match. */
1585 src_match = !bacmp(&bt_sk(sk)->src, src);
1586 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1587 if (src_match && dst_match) {
1588 read_unlock(&chan_list_lock);
1589 return c;
1590 }
1591
1592 /* Closest match */
1593 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1594 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1595 if ((src_match && dst_any) || (src_any && dst_match) ||
1596 (src_any && dst_any))
1597 c1 = c;
1598 }
1599 }
1600
1601 read_unlock(&chan_list_lock);
1602
1603 return c1;
1604 }
1605
1606 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1607 bdaddr_t *dst, u8 dst_type)
1608 {
1609 struct sock *sk = chan->sk;
1610 bdaddr_t *src = &bt_sk(sk)->src;
1611 struct l2cap_conn *conn;
1612 struct hci_conn *hcon;
1613 struct hci_dev *hdev;
1614 __u8 auth_type;
1615 int err;
1616
1617 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1618 dst_type, __le16_to_cpu(psm));
1619
1620 hdev = hci_get_route(dst, src);
1621 if (!hdev)
1622 return -EHOSTUNREACH;
1623
1624 hci_dev_lock(hdev);
1625
1626 l2cap_chan_lock(chan);
1627
1628 /* PSM must be odd and lsb of upper byte must be 0 */
1629 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1630 chan->chan_type != L2CAP_CHAN_RAW) {
1631 err = -EINVAL;
1632 goto done;
1633 }
1634
1635 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1636 err = -EINVAL;
1637 goto done;
1638 }
1639
1640 switch (chan->mode) {
1641 case L2CAP_MODE_BASIC:
1642 break;
1643 case L2CAP_MODE_ERTM:
1644 case L2CAP_MODE_STREAMING:
1645 if (!disable_ertm)
1646 break;
1647 /* fall through */
1648 default:
1649 err = -ENOTSUPP;
1650 goto done;
1651 }
1652
1653 switch (chan->state) {
1654 case BT_CONNECT:
1655 case BT_CONNECT2:
1656 case BT_CONFIG:
1657 /* Already connecting */
1658 err = 0;
1659 goto done;
1660
1661 case BT_CONNECTED:
1662 /* Already connected */
1663 err = -EISCONN;
1664 goto done;
1665
1666 case BT_OPEN:
1667 case BT_BOUND:
1668 /* Can connect */
1669 break;
1670
1671 default:
1672 err = -EBADFD;
1673 goto done;
1674 }
1675
1676 /* Set destination address and psm */
1677 lock_sock(sk);
1678 bacpy(&bt_sk(sk)->dst, dst);
1679 release_sock(sk);
1680
1681 chan->psm = psm;
1682 chan->dcid = cid;
1683
1684 auth_type = l2cap_get_auth_type(chan);
1685
1686 if (chan->dcid == L2CAP_CID_LE_DATA)
1687 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1688 chan->sec_level, auth_type);
1689 else
1690 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1691 chan->sec_level, auth_type);
1692
1693 if (IS_ERR(hcon)) {
1694 err = PTR_ERR(hcon);
1695 goto done;
1696 }
1697
1698 conn = l2cap_conn_add(hcon, 0);
1699 if (!conn) {
1700 hci_conn_put(hcon);
1701 err = -ENOMEM;
1702 goto done;
1703 }
1704
1705 if (hcon->type == LE_LINK) {
1706 err = 0;
1707
1708 if (!list_empty(&conn->chan_l)) {
1709 err = -EBUSY;
1710 hci_conn_put(hcon);
1711 }
1712
1713 if (err)
1714 goto done;
1715 }
1716
1717 /* Update source addr of the socket */
1718 bacpy(src, conn->src);
1719
1720 l2cap_chan_unlock(chan);
1721 l2cap_chan_add(conn, chan);
1722 l2cap_chan_lock(chan);
1723
1724 l2cap_state_change(chan, BT_CONNECT);
1725 __set_chan_timer(chan, sk->sk_sndtimeo);
1726
1727 if (hcon->state == BT_CONNECTED) {
1728 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1729 __clear_chan_timer(chan);
1730 if (l2cap_chan_check_security(chan))
1731 l2cap_state_change(chan, BT_CONNECTED);
1732 } else
1733 l2cap_do_start(chan);
1734 }
1735
1736 err = 0;
1737
1738 done:
1739 l2cap_chan_unlock(chan);
1740 hci_dev_unlock(hdev);
1741 hci_dev_put(hdev);
1742 return err;
1743 }
1744
1745 int __l2cap_wait_ack(struct sock *sk)
1746 {
1747 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1748 DECLARE_WAITQUEUE(wait, current);
1749 int err = 0;
1750 int timeo = HZ/5;
1751
1752 add_wait_queue(sk_sleep(sk), &wait);
1753 set_current_state(TASK_INTERRUPTIBLE);
1754 while (chan->unacked_frames > 0 && chan->conn) {
1755 if (!timeo)
1756 timeo = HZ/5;
1757
1758 if (signal_pending(current)) {
1759 err = sock_intr_errno(timeo);
1760 break;
1761 }
1762
1763 release_sock(sk);
1764 timeo = schedule_timeout(timeo);
1765 lock_sock(sk);
1766 set_current_state(TASK_INTERRUPTIBLE);
1767
1768 err = sock_error(sk);
1769 if (err)
1770 break;
1771 }
1772 set_current_state(TASK_RUNNING);
1773 remove_wait_queue(sk_sleep(sk), &wait);
1774 return err;
1775 }
1776
1777 static void l2cap_monitor_timeout(struct work_struct *work)
1778 {
1779 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1780 monitor_timer.work);
1781
1782 BT_DBG("chan %p", chan);
1783
1784 l2cap_chan_lock(chan);
1785
1786 if (!chan->conn) {
1787 l2cap_chan_unlock(chan);
1788 l2cap_chan_put(chan);
1789 return;
1790 }
1791
1792 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1793
1794 l2cap_chan_unlock(chan);
1795 l2cap_chan_put(chan);
1796 }
1797
1798 static void l2cap_retrans_timeout(struct work_struct *work)
1799 {
1800 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1801 retrans_timer.work);
1802
1803 BT_DBG("chan %p", chan);
1804
1805 l2cap_chan_lock(chan);
1806
1807 if (!chan->conn) {
1808 l2cap_chan_unlock(chan);
1809 l2cap_chan_put(chan);
1810 return;
1811 }
1812
1813 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1814 l2cap_chan_unlock(chan);
1815 l2cap_chan_put(chan);
1816 }
1817
1818 static void l2cap_streaming_send(struct l2cap_chan *chan,
1819 struct sk_buff_head *skbs)
1820 {
1821 struct sk_buff *skb;
1822 struct l2cap_ctrl *control;
1823
1824 BT_DBG("chan %p, skbs %p", chan, skbs);
1825
1826 if (__chan_is_moving(chan))
1827 return;
1828
1829 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1830
1831 while (!skb_queue_empty(&chan->tx_q)) {
1832
1833 skb = skb_dequeue(&chan->tx_q);
1834
1835 bt_cb(skb)->control.retries = 1;
1836 control = &bt_cb(skb)->control;
1837
1838 control->reqseq = 0;
1839 control->txseq = chan->next_tx_seq;
1840
1841 __pack_control(chan, control, skb);
1842
1843 if (chan->fcs == L2CAP_FCS_CRC16) {
1844 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1845 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1846 }
1847
1848 l2cap_do_send(chan, skb);
1849
1850 BT_DBG("Sent txseq %u", control->txseq);
1851
1852 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1853 chan->frames_sent++;
1854 }
1855 }
1856
1857 static int l2cap_ertm_send(struct l2cap_chan *chan)
1858 {
1859 struct sk_buff *skb, *tx_skb;
1860 struct l2cap_ctrl *control;
1861 int sent = 0;
1862
1863 BT_DBG("chan %p", chan);
1864
1865 if (chan->state != BT_CONNECTED)
1866 return -ENOTCONN;
1867
1868 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1869 return 0;
1870
1871 if (__chan_is_moving(chan))
1872 return 0;
1873
1874 while (chan->tx_send_head &&
1875 chan->unacked_frames < chan->remote_tx_win &&
1876 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1877
1878 skb = chan->tx_send_head;
1879
1880 bt_cb(skb)->control.retries = 1;
1881 control = &bt_cb(skb)->control;
1882
1883 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1884 control->final = 1;
1885
1886 control->reqseq = chan->buffer_seq;
1887 chan->last_acked_seq = chan->buffer_seq;
1888 control->txseq = chan->next_tx_seq;
1889
1890 __pack_control(chan, control, skb);
1891
1892 if (chan->fcs == L2CAP_FCS_CRC16) {
1893 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1894 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1895 }
1896
1897 /* Clone after data has been modified. Data is assumed to be
1898 read-only (for locking purposes) on cloned sk_buffs.
1899 */
1900 tx_skb = skb_clone(skb, GFP_KERNEL);
1901
1902 if (!tx_skb)
1903 break;
1904
1905 __set_retrans_timer(chan);
1906
1907 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1908 chan->unacked_frames++;
1909 chan->frames_sent++;
1910 sent++;
1911
1912 if (skb_queue_is_last(&chan->tx_q, skb))
1913 chan->tx_send_head = NULL;
1914 else
1915 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1916
1917 l2cap_do_send(chan, tx_skb);
1918 BT_DBG("Sent txseq %u", control->txseq);
1919 }
1920
1921 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1922 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1923
1924 return sent;
1925 }
1926
1927 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1928 {
1929 struct l2cap_ctrl control;
1930 struct sk_buff *skb;
1931 struct sk_buff *tx_skb;
1932 u16 seq;
1933
1934 BT_DBG("chan %p", chan);
1935
1936 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1937 return;
1938
1939 if (__chan_is_moving(chan))
1940 return;
1941
1942 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1943 seq = l2cap_seq_list_pop(&chan->retrans_list);
1944
1945 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1946 if (!skb) {
1947 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1948 seq);
1949 continue;
1950 }
1951
1952 bt_cb(skb)->control.retries++;
1953 control = bt_cb(skb)->control;
1954
1955 if (chan->max_tx != 0 &&
1956 bt_cb(skb)->control.retries > chan->max_tx) {
1957 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1958 l2cap_send_disconn_req(chan, ECONNRESET);
1959 l2cap_seq_list_clear(&chan->retrans_list);
1960 break;
1961 }
1962
1963 control.reqseq = chan->buffer_seq;
1964 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1965 control.final = 1;
1966 else
1967 control.final = 0;
1968
1969 if (skb_cloned(skb)) {
1970 /* Cloned sk_buffs are read-only, so we need a
1971 * writeable copy
1972 */
1973 tx_skb = skb_copy(skb, GFP_KERNEL);
1974 } else {
1975 tx_skb = skb_clone(skb, GFP_KERNEL);
1976 }
1977
1978 if (!tx_skb) {
1979 l2cap_seq_list_clear(&chan->retrans_list);
1980 break;
1981 }
1982
1983 /* Update skb contents */
1984 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1985 put_unaligned_le32(__pack_extended_control(&control),
1986 tx_skb->data + L2CAP_HDR_SIZE);
1987 } else {
1988 put_unaligned_le16(__pack_enhanced_control(&control),
1989 tx_skb->data + L2CAP_HDR_SIZE);
1990 }
1991
1992 if (chan->fcs == L2CAP_FCS_CRC16) {
1993 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1994 put_unaligned_le16(fcs, skb_put(tx_skb,
1995 L2CAP_FCS_SIZE));
1996 }
1997
1998 l2cap_do_send(chan, tx_skb);
1999
2000 BT_DBG("Resent txseq %d", control.txseq);
2001
2002 chan->last_acked_seq = chan->buffer_seq;
2003 }
2004 }
2005
2006 static void l2cap_retransmit(struct l2cap_chan *chan,
2007 struct l2cap_ctrl *control)
2008 {
2009 BT_DBG("chan %p, control %p", chan, control);
2010
2011 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2012 l2cap_ertm_resend(chan);
2013 }
2014
2015 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2016 struct l2cap_ctrl *control)
2017 {
2018 struct sk_buff *skb;
2019
2020 BT_DBG("chan %p, control %p", chan, control);
2021
2022 if (control->poll)
2023 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2024
2025 l2cap_seq_list_clear(&chan->retrans_list);
2026
2027 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2028 return;
2029
2030 if (chan->unacked_frames) {
2031 skb_queue_walk(&chan->tx_q, skb) {
2032 if (bt_cb(skb)->control.txseq == control->reqseq ||
2033 skb == chan->tx_send_head)
2034 break;
2035 }
2036
2037 skb_queue_walk_from(&chan->tx_q, skb) {
2038 if (skb == chan->tx_send_head)
2039 break;
2040
2041 l2cap_seq_list_append(&chan->retrans_list,
2042 bt_cb(skb)->control.txseq);
2043 }
2044
2045 l2cap_ertm_resend(chan);
2046 }
2047 }
2048
2049 static void l2cap_send_ack(struct l2cap_chan *chan)
2050 {
2051 struct l2cap_ctrl control;
2052 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2053 chan->last_acked_seq);
2054 int threshold;
2055
2056 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2057 chan, chan->last_acked_seq, chan->buffer_seq);
2058
2059 memset(&control, 0, sizeof(control));
2060 control.sframe = 1;
2061
2062 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2063 chan->rx_state == L2CAP_RX_STATE_RECV) {
2064 __clear_ack_timer(chan);
2065 control.super = L2CAP_SUPER_RNR;
2066 control.reqseq = chan->buffer_seq;
2067 l2cap_send_sframe(chan, &control);
2068 } else {
2069 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2070 l2cap_ertm_send(chan);
2071 /* If any i-frames were sent, they included an ack */
2072 if (chan->buffer_seq == chan->last_acked_seq)
2073 frames_to_ack = 0;
2074 }
2075
2076 /* Ack now if the window is 3/4ths full.
2077 * Calculate without mul or div
2078 */
2079 threshold = chan->ack_win;
2080 threshold += threshold << 1;
2081 threshold >>= 2;
2082
2083 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2084 threshold);
2085
2086 if (frames_to_ack >= threshold) {
2087 __clear_ack_timer(chan);
2088 control.super = L2CAP_SUPER_RR;
2089 control.reqseq = chan->buffer_seq;
2090 l2cap_send_sframe(chan, &control);
2091 frames_to_ack = 0;
2092 }
2093
2094 if (frames_to_ack)
2095 __set_ack_timer(chan);
2096 }
2097 }
2098
2099 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2100 struct msghdr *msg, int len,
2101 int count, struct sk_buff *skb)
2102 {
2103 struct l2cap_conn *conn = chan->conn;
2104 struct sk_buff **frag;
2105 int sent = 0;
2106
2107 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2108 return -EFAULT;
2109
2110 sent += count;
2111 len -= count;
2112
2113 /* Continuation fragments (no L2CAP header) */
2114 frag = &skb_shinfo(skb)->frag_list;
2115 while (len) {
2116 struct sk_buff *tmp;
2117
2118 count = min_t(unsigned int, conn->mtu, len);
2119
2120 tmp = chan->ops->alloc_skb(chan, count,
2121 msg->msg_flags & MSG_DONTWAIT);
2122 if (IS_ERR(tmp))
2123 return PTR_ERR(tmp);
2124
2125 *frag = tmp;
2126
2127 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2128 return -EFAULT;
2129
2130 (*frag)->priority = skb->priority;
2131
2132 sent += count;
2133 len -= count;
2134
2135 skb->len += (*frag)->len;
2136 skb->data_len += (*frag)->len;
2137
2138 frag = &(*frag)->next;
2139 }
2140
2141 return sent;
2142 }
2143
2144 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2145 struct msghdr *msg, size_t len,
2146 u32 priority)
2147 {
2148 struct l2cap_conn *conn = chan->conn;
2149 struct sk_buff *skb;
2150 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2151 struct l2cap_hdr *lh;
2152
2153 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2154
2155 count = min_t(unsigned int, (conn->mtu - hlen), len);
2156
2157 skb = chan->ops->alloc_skb(chan, count + hlen,
2158 msg->msg_flags & MSG_DONTWAIT);
2159 if (IS_ERR(skb))
2160 return skb;
2161
2162 skb->priority = priority;
2163
2164 /* Create L2CAP header */
2165 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2166 lh->cid = cpu_to_le16(chan->dcid);
2167 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2168 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2169
2170 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2171 if (unlikely(err < 0)) {
2172 kfree_skb(skb);
2173 return ERR_PTR(err);
2174 }
2175 return skb;
2176 }
2177
2178 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2179 struct msghdr *msg, size_t len,
2180 u32 priority)
2181 {
2182 struct l2cap_conn *conn = chan->conn;
2183 struct sk_buff *skb;
2184 int err, count;
2185 struct l2cap_hdr *lh;
2186
2187 BT_DBG("chan %p len %zu", chan, len);
2188
2189 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2190
2191 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2192 msg->msg_flags & MSG_DONTWAIT);
2193 if (IS_ERR(skb))
2194 return skb;
2195
2196 skb->priority = priority;
2197
2198 /* Create L2CAP header */
2199 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2200 lh->cid = cpu_to_le16(chan->dcid);
2201 lh->len = cpu_to_le16(len);
2202
2203 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2204 if (unlikely(err < 0)) {
2205 kfree_skb(skb);
2206 return ERR_PTR(err);
2207 }
2208 return skb;
2209 }
2210
2211 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2212 struct msghdr *msg, size_t len,
2213 u16 sdulen)
2214 {
2215 struct l2cap_conn *conn = chan->conn;
2216 struct sk_buff *skb;
2217 int err, count, hlen;
2218 struct l2cap_hdr *lh;
2219
2220 BT_DBG("chan %p len %zu", chan, len);
2221
2222 if (!conn)
2223 return ERR_PTR(-ENOTCONN);
2224
2225 hlen = __ertm_hdr_size(chan);
2226
2227 if (sdulen)
2228 hlen += L2CAP_SDULEN_SIZE;
2229
2230 if (chan->fcs == L2CAP_FCS_CRC16)
2231 hlen += L2CAP_FCS_SIZE;
2232
2233 count = min_t(unsigned int, (conn->mtu - hlen), len);
2234
2235 skb = chan->ops->alloc_skb(chan, count + hlen,
2236 msg->msg_flags & MSG_DONTWAIT);
2237 if (IS_ERR(skb))
2238 return skb;
2239
2240 /* Create L2CAP header */
2241 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2242 lh->cid = cpu_to_le16(chan->dcid);
2243 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2244
2245 /* Control header is populated later */
2246 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2247 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2248 else
2249 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2250
2251 if (sdulen)
2252 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2253
2254 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2255 if (unlikely(err < 0)) {
2256 kfree_skb(skb);
2257 return ERR_PTR(err);
2258 }
2259
2260 bt_cb(skb)->control.fcs = chan->fcs;
2261 bt_cb(skb)->control.retries = 0;
2262 return skb;
2263 }
2264
2265 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2266 struct sk_buff_head *seg_queue,
2267 struct msghdr *msg, size_t len)
2268 {
2269 struct sk_buff *skb;
2270 u16 sdu_len;
2271 size_t pdu_len;
2272 u8 sar;
2273
2274 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2275
2276 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2277 * so fragmented skbs are not used. The HCI layer's handling
2278 * of fragmented skbs is not compatible with ERTM's queueing.
2279 */
2280
2281 /* PDU size is derived from the HCI MTU */
2282 pdu_len = chan->conn->mtu;
2283
2284 /* Constrain PDU size for BR/EDR connections */
2285 if (!chan->hs_hcon)
2286 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2287
2288 /* Adjust for largest possible L2CAP overhead. */
2289 if (chan->fcs)
2290 pdu_len -= L2CAP_FCS_SIZE;
2291
2292 pdu_len -= __ertm_hdr_size(chan);
2293
2294 /* Remote device may have requested smaller PDUs */
2295 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2296
2297 if (len <= pdu_len) {
2298 sar = L2CAP_SAR_UNSEGMENTED;
2299 sdu_len = 0;
2300 pdu_len = len;
2301 } else {
2302 sar = L2CAP_SAR_START;
2303 sdu_len = len;
2304 pdu_len -= L2CAP_SDULEN_SIZE;
2305 }
2306
2307 while (len > 0) {
2308 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2309
2310 if (IS_ERR(skb)) {
2311 __skb_queue_purge(seg_queue);
2312 return PTR_ERR(skb);
2313 }
2314
2315 bt_cb(skb)->control.sar = sar;
2316 __skb_queue_tail(seg_queue, skb);
2317
2318 len -= pdu_len;
2319 if (sdu_len) {
2320 sdu_len = 0;
2321 pdu_len += L2CAP_SDULEN_SIZE;
2322 }
2323
2324 if (len <= pdu_len) {
2325 sar = L2CAP_SAR_END;
2326 pdu_len = len;
2327 } else {
2328 sar = L2CAP_SAR_CONTINUE;
2329 }
2330 }
2331
2332 return 0;
2333 }
2334
2335 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2336 u32 priority)
2337 {
2338 struct sk_buff *skb;
2339 int err;
2340 struct sk_buff_head seg_queue;
2341
2342 /* Connectionless channel */
2343 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2344 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2345 if (IS_ERR(skb))
2346 return PTR_ERR(skb);
2347
2348 l2cap_do_send(chan, skb);
2349 return len;
2350 }
2351
2352 switch (chan->mode) {
2353 case L2CAP_MODE_BASIC:
2354 /* Check outgoing MTU */
2355 if (len > chan->omtu)
2356 return -EMSGSIZE;
2357
2358 /* Create a basic PDU */
2359 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2360 if (IS_ERR(skb))
2361 return PTR_ERR(skb);
2362
2363 l2cap_do_send(chan, skb);
2364 err = len;
2365 break;
2366
2367 case L2CAP_MODE_ERTM:
2368 case L2CAP_MODE_STREAMING:
2369 /* Check outgoing MTU */
2370 if (len > chan->omtu) {
2371 err = -EMSGSIZE;
2372 break;
2373 }
2374
2375 __skb_queue_head_init(&seg_queue);
2376
2377 /* Do segmentation before calling in to the state machine,
2378 * since it's possible to block while waiting for memory
2379 * allocation.
2380 */
2381 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2382
2383 /* The channel could have been closed while segmenting,
2384 * check that it is still connected.
2385 */
2386 if (chan->state != BT_CONNECTED) {
2387 __skb_queue_purge(&seg_queue);
2388 err = -ENOTCONN;
2389 }
2390
2391 if (err)
2392 break;
2393
2394 if (chan->mode == L2CAP_MODE_ERTM)
2395 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2396 else
2397 l2cap_streaming_send(chan, &seg_queue);
2398
2399 err = len;
2400
2401 /* If the skbs were not queued for sending, they'll still be in
2402 * seg_queue and need to be purged.
2403 */
2404 __skb_queue_purge(&seg_queue);
2405 break;
2406
2407 default:
2408 BT_DBG("bad state %1.1x", chan->mode);
2409 err = -EBADFD;
2410 }
2411
2412 return err;
2413 }
2414
2415 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2416 {
2417 struct l2cap_ctrl control;
2418 u16 seq;
2419
2420 BT_DBG("chan %p, txseq %u", chan, txseq);
2421
2422 memset(&control, 0, sizeof(control));
2423 control.sframe = 1;
2424 control.super = L2CAP_SUPER_SREJ;
2425
2426 for (seq = chan->expected_tx_seq; seq != txseq;
2427 seq = __next_seq(chan, seq)) {
2428 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2429 control.reqseq = seq;
2430 l2cap_send_sframe(chan, &control);
2431 l2cap_seq_list_append(&chan->srej_list, seq);
2432 }
2433 }
2434
2435 chan->expected_tx_seq = __next_seq(chan, txseq);
2436 }
2437
2438 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2439 {
2440 struct l2cap_ctrl control;
2441
2442 BT_DBG("chan %p", chan);
2443
2444 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2445 return;
2446
2447 memset(&control, 0, sizeof(control));
2448 control.sframe = 1;
2449 control.super = L2CAP_SUPER_SREJ;
2450 control.reqseq = chan->srej_list.tail;
2451 l2cap_send_sframe(chan, &control);
2452 }
2453
2454 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2455 {
2456 struct l2cap_ctrl control;
2457 u16 initial_head;
2458 u16 seq;
2459
2460 BT_DBG("chan %p, txseq %u", chan, txseq);
2461
2462 memset(&control, 0, sizeof(control));
2463 control.sframe = 1;
2464 control.super = L2CAP_SUPER_SREJ;
2465
2466 /* Capture initial list head to allow only one pass through the list. */
2467 initial_head = chan->srej_list.head;
2468
2469 do {
2470 seq = l2cap_seq_list_pop(&chan->srej_list);
2471 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2472 break;
2473
2474 control.reqseq = seq;
2475 l2cap_send_sframe(chan, &control);
2476 l2cap_seq_list_append(&chan->srej_list, seq);
2477 } while (chan->srej_list.head != initial_head);
2478 }
2479
2480 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2481 {
2482 struct sk_buff *acked_skb;
2483 u16 ackseq;
2484
2485 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2486
2487 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2488 return;
2489
2490 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2491 chan->expected_ack_seq, chan->unacked_frames);
2492
2493 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2494 ackseq = __next_seq(chan, ackseq)) {
2495
2496 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2497 if (acked_skb) {
2498 skb_unlink(acked_skb, &chan->tx_q);
2499 kfree_skb(acked_skb);
2500 chan->unacked_frames--;
2501 }
2502 }
2503
2504 chan->expected_ack_seq = reqseq;
2505
2506 if (chan->unacked_frames == 0)
2507 __clear_retrans_timer(chan);
2508
2509 BT_DBG("unacked_frames %u", chan->unacked_frames);
2510 }
2511
2512 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2513 {
2514 BT_DBG("chan %p", chan);
2515
2516 chan->expected_tx_seq = chan->buffer_seq;
2517 l2cap_seq_list_clear(&chan->srej_list);
2518 skb_queue_purge(&chan->srej_q);
2519 chan->rx_state = L2CAP_RX_STATE_RECV;
2520 }
2521
2522 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2523 struct l2cap_ctrl *control,
2524 struct sk_buff_head *skbs, u8 event)
2525 {
2526 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2527 event);
2528
2529 switch (event) {
2530 case L2CAP_EV_DATA_REQUEST:
2531 if (chan->tx_send_head == NULL)
2532 chan->tx_send_head = skb_peek(skbs);
2533
2534 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2535 l2cap_ertm_send(chan);
2536 break;
2537 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2538 BT_DBG("Enter LOCAL_BUSY");
2539 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2540
2541 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2542 /* The SREJ_SENT state must be aborted if we are to
2543 * enter the LOCAL_BUSY state.
2544 */
2545 l2cap_abort_rx_srej_sent(chan);
2546 }
2547
2548 l2cap_send_ack(chan);
2549
2550 break;
2551 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2552 BT_DBG("Exit LOCAL_BUSY");
2553 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2554
2555 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2556 struct l2cap_ctrl local_control;
2557
2558 memset(&local_control, 0, sizeof(local_control));
2559 local_control.sframe = 1;
2560 local_control.super = L2CAP_SUPER_RR;
2561 local_control.poll = 1;
2562 local_control.reqseq = chan->buffer_seq;
2563 l2cap_send_sframe(chan, &local_control);
2564
2565 chan->retry_count = 1;
2566 __set_monitor_timer(chan);
2567 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2568 }
2569 break;
2570 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2571 l2cap_process_reqseq(chan, control->reqseq);
2572 break;
2573 case L2CAP_EV_EXPLICIT_POLL:
2574 l2cap_send_rr_or_rnr(chan, 1);
2575 chan->retry_count = 1;
2576 __set_monitor_timer(chan);
2577 __clear_ack_timer(chan);
2578 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2579 break;
2580 case L2CAP_EV_RETRANS_TO:
2581 l2cap_send_rr_or_rnr(chan, 1);
2582 chan->retry_count = 1;
2583 __set_monitor_timer(chan);
2584 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2585 break;
2586 case L2CAP_EV_RECV_FBIT:
2587 /* Nothing to process */
2588 break;
2589 default:
2590 break;
2591 }
2592 }
2593
2594 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2595 struct l2cap_ctrl *control,
2596 struct sk_buff_head *skbs, u8 event)
2597 {
2598 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2599 event);
2600
2601 switch (event) {
2602 case L2CAP_EV_DATA_REQUEST:
2603 if (chan->tx_send_head == NULL)
2604 chan->tx_send_head = skb_peek(skbs);
2605 /* Queue data, but don't send. */
2606 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2607 break;
2608 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2609 BT_DBG("Enter LOCAL_BUSY");
2610 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2611
2612 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2613 /* The SREJ_SENT state must be aborted if we are to
2614 * enter the LOCAL_BUSY state.
2615 */
2616 l2cap_abort_rx_srej_sent(chan);
2617 }
2618
2619 l2cap_send_ack(chan);
2620
2621 break;
2622 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2623 BT_DBG("Exit LOCAL_BUSY");
2624 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2625
2626 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2627 struct l2cap_ctrl local_control;
2628 memset(&local_control, 0, sizeof(local_control));
2629 local_control.sframe = 1;
2630 local_control.super = L2CAP_SUPER_RR;
2631 local_control.poll = 1;
2632 local_control.reqseq = chan->buffer_seq;
2633 l2cap_send_sframe(chan, &local_control);
2634
2635 chan->retry_count = 1;
2636 __set_monitor_timer(chan);
2637 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2638 }
2639 break;
2640 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2641 l2cap_process_reqseq(chan, control->reqseq);
2642
2643 /* Fall through */
2644
2645 case L2CAP_EV_RECV_FBIT:
2646 if (control && control->final) {
2647 __clear_monitor_timer(chan);
2648 if (chan->unacked_frames > 0)
2649 __set_retrans_timer(chan);
2650 chan->retry_count = 0;
2651 chan->tx_state = L2CAP_TX_STATE_XMIT;
2652 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2653 }
2654 break;
2655 case L2CAP_EV_EXPLICIT_POLL:
2656 /* Ignore */
2657 break;
2658 case L2CAP_EV_MONITOR_TO:
2659 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2660 l2cap_send_rr_or_rnr(chan, 1);
2661 __set_monitor_timer(chan);
2662 chan->retry_count++;
2663 } else {
2664 l2cap_send_disconn_req(chan, ECONNABORTED);
2665 }
2666 break;
2667 default:
2668 break;
2669 }
2670 }
2671
2672 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2673 struct sk_buff_head *skbs, u8 event)
2674 {
2675 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2676 chan, control, skbs, event, chan->tx_state);
2677
2678 switch (chan->tx_state) {
2679 case L2CAP_TX_STATE_XMIT:
2680 l2cap_tx_state_xmit(chan, control, skbs, event);
2681 break;
2682 case L2CAP_TX_STATE_WAIT_F:
2683 l2cap_tx_state_wait_f(chan, control, skbs, event);
2684 break;
2685 default:
2686 /* Ignore event */
2687 break;
2688 }
2689 }
2690
2691 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2692 struct l2cap_ctrl *control)
2693 {
2694 BT_DBG("chan %p, control %p", chan, control);
2695 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2696 }
2697
2698 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2699 struct l2cap_ctrl *control)
2700 {
2701 BT_DBG("chan %p, control %p", chan, control);
2702 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2703 }
2704
2705 /* Copy frame to all raw sockets on that connection */
2706 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2707 {
2708 struct sk_buff *nskb;
2709 struct l2cap_chan *chan;
2710
2711 BT_DBG("conn %p", conn);
2712
2713 mutex_lock(&conn->chan_lock);
2714
2715 list_for_each_entry(chan, &conn->chan_l, list) {
2716 struct sock *sk = chan->sk;
2717 if (chan->chan_type != L2CAP_CHAN_RAW)
2718 continue;
2719
2720 /* Don't send frame to the socket it came from */
2721 if (skb->sk == sk)
2722 continue;
2723 nskb = skb_clone(skb, GFP_KERNEL);
2724 if (!nskb)
2725 continue;
2726
2727 if (chan->ops->recv(chan, nskb))
2728 kfree_skb(nskb);
2729 }
2730
2731 mutex_unlock(&conn->chan_lock);
2732 }
2733
2734 /* ---- L2CAP signalling commands ---- */
2735 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2736 u8 ident, u16 dlen, void *data)
2737 {
2738 struct sk_buff *skb, **frag;
2739 struct l2cap_cmd_hdr *cmd;
2740 struct l2cap_hdr *lh;
2741 int len, count;
2742
2743 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2744 conn, code, ident, dlen);
2745
2746 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2747 count = min_t(unsigned int, conn->mtu, len);
2748
2749 skb = bt_skb_alloc(count, GFP_KERNEL);
2750 if (!skb)
2751 return NULL;
2752
2753 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2754 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2755
2756 if (conn->hcon->type == LE_LINK)
2757 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2758 else
2759 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2760
2761 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2762 cmd->code = code;
2763 cmd->ident = ident;
2764 cmd->len = cpu_to_le16(dlen);
2765
2766 if (dlen) {
2767 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2768 memcpy(skb_put(skb, count), data, count);
2769 data += count;
2770 }
2771
2772 len -= skb->len;
2773
2774 /* Continuation fragments (no L2CAP header) */
2775 frag = &skb_shinfo(skb)->frag_list;
2776 while (len) {
2777 count = min_t(unsigned int, conn->mtu, len);
2778
2779 *frag = bt_skb_alloc(count, GFP_KERNEL);
2780 if (!*frag)
2781 goto fail;
2782
2783 memcpy(skb_put(*frag, count), data, count);
2784
2785 len -= count;
2786 data += count;
2787
2788 frag = &(*frag)->next;
2789 }
2790
2791 return skb;
2792
2793 fail:
2794 kfree_skb(skb);
2795 return NULL;
2796 }
2797
2798 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2799 unsigned long *val)
2800 {
2801 struct l2cap_conf_opt *opt = *ptr;
2802 int len;
2803
2804 len = L2CAP_CONF_OPT_SIZE + opt->len;
2805 *ptr += len;
2806
2807 *type = opt->type;
2808 *olen = opt->len;
2809
2810 switch (opt->len) {
2811 case 1:
2812 *val = *((u8 *) opt->val);
2813 break;
2814
2815 case 2:
2816 *val = get_unaligned_le16(opt->val);
2817 break;
2818
2819 case 4:
2820 *val = get_unaligned_le32(opt->val);
2821 break;
2822
2823 default:
2824 *val = (unsigned long) opt->val;
2825 break;
2826 }
2827
2828 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2829 return len;
2830 }
2831
2832 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2833 {
2834 struct l2cap_conf_opt *opt = *ptr;
2835
2836 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2837
2838 opt->type = type;
2839 opt->len = len;
2840
2841 switch (len) {
2842 case 1:
2843 *((u8 *) opt->val) = val;
2844 break;
2845
2846 case 2:
2847 put_unaligned_le16(val, opt->val);
2848 break;
2849
2850 case 4:
2851 put_unaligned_le32(val, opt->val);
2852 break;
2853
2854 default:
2855 memcpy(opt->val, (void *) val, len);
2856 break;
2857 }
2858
2859 *ptr += L2CAP_CONF_OPT_SIZE + len;
2860 }
2861
2862 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2863 {
2864 struct l2cap_conf_efs efs;
2865
2866 switch (chan->mode) {
2867 case L2CAP_MODE_ERTM:
2868 efs.id = chan->local_id;
2869 efs.stype = chan->local_stype;
2870 efs.msdu = cpu_to_le16(chan->local_msdu);
2871 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2872 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2873 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2874 break;
2875
2876 case L2CAP_MODE_STREAMING:
2877 efs.id = 1;
2878 efs.stype = L2CAP_SERV_BESTEFFORT;
2879 efs.msdu = cpu_to_le16(chan->local_msdu);
2880 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2881 efs.acc_lat = 0;
2882 efs.flush_to = 0;
2883 break;
2884
2885 default:
2886 return;
2887 }
2888
2889 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2890 (unsigned long) &efs);
2891 }
2892
2893 static void l2cap_ack_timeout(struct work_struct *work)
2894 {
2895 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2896 ack_timer.work);
2897 u16 frames_to_ack;
2898
2899 BT_DBG("chan %p", chan);
2900
2901 l2cap_chan_lock(chan);
2902
2903 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2904 chan->last_acked_seq);
2905
2906 if (frames_to_ack)
2907 l2cap_send_rr_or_rnr(chan, 0);
2908
2909 l2cap_chan_unlock(chan);
2910 l2cap_chan_put(chan);
2911 }
2912
2913 int l2cap_ertm_init(struct l2cap_chan *chan)
2914 {
2915 int err;
2916
2917 chan->next_tx_seq = 0;
2918 chan->expected_tx_seq = 0;
2919 chan->expected_ack_seq = 0;
2920 chan->unacked_frames = 0;
2921 chan->buffer_seq = 0;
2922 chan->frames_sent = 0;
2923 chan->last_acked_seq = 0;
2924 chan->sdu = NULL;
2925 chan->sdu_last_frag = NULL;
2926 chan->sdu_len = 0;
2927
2928 skb_queue_head_init(&chan->tx_q);
2929
2930 chan->local_amp_id = 0;
2931 chan->move_id = 0;
2932 chan->move_state = L2CAP_MOVE_STABLE;
2933 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2934
2935 if (chan->mode != L2CAP_MODE_ERTM)
2936 return 0;
2937
2938 chan->rx_state = L2CAP_RX_STATE_RECV;
2939 chan->tx_state = L2CAP_TX_STATE_XMIT;
2940
2941 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2942 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2943 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2944
2945 skb_queue_head_init(&chan->srej_q);
2946
2947 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2948 if (err < 0)
2949 return err;
2950
2951 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2952 if (err < 0)
2953 l2cap_seq_list_free(&chan->srej_list);
2954
2955 return err;
2956 }
2957
2958 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2959 {
2960 switch (mode) {
2961 case L2CAP_MODE_STREAMING:
2962 case L2CAP_MODE_ERTM:
2963 if (l2cap_mode_supported(mode, remote_feat_mask))
2964 return mode;
2965 /* fall through */
2966 default:
2967 return L2CAP_MODE_BASIC;
2968 }
2969 }
2970
2971 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2972 {
2973 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2974 }
2975
2976 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2977 {
2978 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2979 }
2980
2981 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2982 struct l2cap_conf_rfc *rfc)
2983 {
2984 if (chan->local_amp_id && chan->hs_hcon) {
2985 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2986
2987 /* Class 1 devices have must have ERTM timeouts
2988 * exceeding the Link Supervision Timeout. The
2989 * default Link Supervision Timeout for AMP
2990 * controllers is 10 seconds.
2991 *
2992 * Class 1 devices use 0xffffffff for their
2993 * best-effort flush timeout, so the clamping logic
2994 * will result in a timeout that meets the above
2995 * requirement. ERTM timeouts are 16-bit values, so
2996 * the maximum timeout is 65.535 seconds.
2997 */
2998
2999 /* Convert timeout to milliseconds and round */
3000 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3001
3002 /* This is the recommended formula for class 2 devices
3003 * that start ERTM timers when packets are sent to the
3004 * controller.
3005 */
3006 ertm_to = 3 * ertm_to + 500;
3007
3008 if (ertm_to > 0xffff)
3009 ertm_to = 0xffff;
3010
3011 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3012 rfc->monitor_timeout = rfc->retrans_timeout;
3013 } else {
3014 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3015 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3016 }
3017 }
3018
3019 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3020 {
3021 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3022 __l2cap_ews_supported(chan)) {
3023 /* use extended control field */
3024 set_bit(FLAG_EXT_CTRL, &chan->flags);
3025 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3026 } else {
3027 chan->tx_win = min_t(u16, chan->tx_win,
3028 L2CAP_DEFAULT_TX_WINDOW);
3029 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3030 }
3031 chan->ack_win = chan->tx_win;
3032 }
3033
3034 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3035 {
3036 struct l2cap_conf_req *req = data;
3037 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3038 void *ptr = req->data;
3039 u16 size;
3040
3041 BT_DBG("chan %p", chan);
3042
3043 if (chan->num_conf_req || chan->num_conf_rsp)
3044 goto done;
3045
3046 switch (chan->mode) {
3047 case L2CAP_MODE_STREAMING:
3048 case L2CAP_MODE_ERTM:
3049 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3050 break;
3051
3052 if (__l2cap_efs_supported(chan))
3053 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3054
3055 /* fall through */
3056 default:
3057 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3058 break;
3059 }
3060
3061 done:
3062 if (chan->imtu != L2CAP_DEFAULT_MTU)
3063 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3064
3065 switch (chan->mode) {
3066 case L2CAP_MODE_BASIC:
3067 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3068 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3069 break;
3070
3071 rfc.mode = L2CAP_MODE_BASIC;
3072 rfc.txwin_size = 0;
3073 rfc.max_transmit = 0;
3074 rfc.retrans_timeout = 0;
3075 rfc.monitor_timeout = 0;
3076 rfc.max_pdu_size = 0;
3077
3078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3079 (unsigned long) &rfc);
3080 break;
3081
3082 case L2CAP_MODE_ERTM:
3083 rfc.mode = L2CAP_MODE_ERTM;
3084 rfc.max_transmit = chan->max_tx;
3085
3086 __l2cap_set_ertm_timeouts(chan, &rfc);
3087
3088 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3089 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3090 L2CAP_FCS_SIZE);
3091 rfc.max_pdu_size = cpu_to_le16(size);
3092
3093 l2cap_txwin_setup(chan);
3094
3095 rfc.txwin_size = min_t(u16, chan->tx_win,
3096 L2CAP_DEFAULT_TX_WINDOW);
3097
3098 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3099 (unsigned long) &rfc);
3100
3101 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3102 l2cap_add_opt_efs(&ptr, chan);
3103
3104 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3106 chan->tx_win);
3107
3108 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3109 if (chan->fcs == L2CAP_FCS_NONE ||
3110 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3111 chan->fcs = L2CAP_FCS_NONE;
3112 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3113 chan->fcs);
3114 }
3115 break;
3116
3117 case L2CAP_MODE_STREAMING:
3118 l2cap_txwin_setup(chan);
3119 rfc.mode = L2CAP_MODE_STREAMING;
3120 rfc.txwin_size = 0;
3121 rfc.max_transmit = 0;
3122 rfc.retrans_timeout = 0;
3123 rfc.monitor_timeout = 0;
3124
3125 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3126 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3127 L2CAP_FCS_SIZE);
3128 rfc.max_pdu_size = cpu_to_le16(size);
3129
3130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3131 (unsigned long) &rfc);
3132
3133 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3134 l2cap_add_opt_efs(&ptr, chan);
3135
3136 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3137 if (chan->fcs == L2CAP_FCS_NONE ||
3138 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3139 chan->fcs = L2CAP_FCS_NONE;
3140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3141 chan->fcs);
3142 }
3143 break;
3144 }
3145
3146 req->dcid = cpu_to_le16(chan->dcid);
3147 req->flags = __constant_cpu_to_le16(0);
3148
3149 return ptr - data;
3150 }
3151
3152 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3153 {
3154 struct l2cap_conf_rsp *rsp = data;
3155 void *ptr = rsp->data;
3156 void *req = chan->conf_req;
3157 int len = chan->conf_len;
3158 int type, hint, olen;
3159 unsigned long val;
3160 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3161 struct l2cap_conf_efs efs;
3162 u8 remote_efs = 0;
3163 u16 mtu = L2CAP_DEFAULT_MTU;
3164 u16 result = L2CAP_CONF_SUCCESS;
3165 u16 size;
3166
3167 BT_DBG("chan %p", chan);
3168
3169 while (len >= L2CAP_CONF_OPT_SIZE) {
3170 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3171
3172 hint = type & L2CAP_CONF_HINT;
3173 type &= L2CAP_CONF_MASK;
3174
3175 switch (type) {
3176 case L2CAP_CONF_MTU:
3177 mtu = val;
3178 break;
3179
3180 case L2CAP_CONF_FLUSH_TO:
3181 chan->flush_to = val;
3182 break;
3183
3184 case L2CAP_CONF_QOS:
3185 break;
3186
3187 case L2CAP_CONF_RFC:
3188 if (olen == sizeof(rfc))
3189 memcpy(&rfc, (void *) val, olen);
3190 break;
3191
3192 case L2CAP_CONF_FCS:
3193 if (val == L2CAP_FCS_NONE)
3194 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3195 break;
3196
3197 case L2CAP_CONF_EFS:
3198 remote_efs = 1;
3199 if (olen == sizeof(efs))
3200 memcpy(&efs, (void *) val, olen);
3201 break;
3202
3203 case L2CAP_CONF_EWS:
3204 if (!enable_hs)
3205 return -ECONNREFUSED;
3206
3207 set_bit(FLAG_EXT_CTRL, &chan->flags);
3208 set_bit(CONF_EWS_RECV, &chan->conf_state);
3209 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3210 chan->remote_tx_win = val;
3211 break;
3212
3213 default:
3214 if (hint)
3215 break;
3216
3217 result = L2CAP_CONF_UNKNOWN;
3218 *((u8 *) ptr++) = type;
3219 break;
3220 }
3221 }
3222
3223 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3224 goto done;
3225
3226 switch (chan->mode) {
3227 case L2CAP_MODE_STREAMING:
3228 case L2CAP_MODE_ERTM:
3229 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3230 chan->mode = l2cap_select_mode(rfc.mode,
3231 chan->conn->feat_mask);
3232 break;
3233 }
3234
3235 if (remote_efs) {
3236 if (__l2cap_efs_supported(chan))
3237 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3238 else
3239 return -ECONNREFUSED;
3240 }
3241
3242 if (chan->mode != rfc.mode)
3243 return -ECONNREFUSED;
3244
3245 break;
3246 }
3247
3248 done:
3249 if (chan->mode != rfc.mode) {
3250 result = L2CAP_CONF_UNACCEPT;
3251 rfc.mode = chan->mode;
3252
3253 if (chan->num_conf_rsp == 1)
3254 return -ECONNREFUSED;
3255
3256 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3257 (unsigned long) &rfc);
3258 }
3259
3260 if (result == L2CAP_CONF_SUCCESS) {
3261 /* Configure output options and let the other side know
3262 * which ones we don't like. */
3263
3264 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3265 result = L2CAP_CONF_UNACCEPT;
3266 else {
3267 chan->omtu = mtu;
3268 set_bit(CONF_MTU_DONE, &chan->conf_state);
3269 }
3270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3271
3272 if (remote_efs) {
3273 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3274 efs.stype != L2CAP_SERV_NOTRAFIC &&
3275 efs.stype != chan->local_stype) {
3276
3277 result = L2CAP_CONF_UNACCEPT;
3278
3279 if (chan->num_conf_req >= 1)
3280 return -ECONNREFUSED;
3281
3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3283 sizeof(efs),
3284 (unsigned long) &efs);
3285 } else {
3286 /* Send PENDING Conf Rsp */
3287 result = L2CAP_CONF_PENDING;
3288 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3289 }
3290 }
3291
3292 switch (rfc.mode) {
3293 case L2CAP_MODE_BASIC:
3294 chan->fcs = L2CAP_FCS_NONE;
3295 set_bit(CONF_MODE_DONE, &chan->conf_state);
3296 break;
3297
3298 case L2CAP_MODE_ERTM:
3299 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3300 chan->remote_tx_win = rfc.txwin_size;
3301 else
3302 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3303
3304 chan->remote_max_tx = rfc.max_transmit;
3305
3306 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3307 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3308 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3309 rfc.max_pdu_size = cpu_to_le16(size);
3310 chan->remote_mps = size;
3311
3312 __l2cap_set_ertm_timeouts(chan, &rfc);
3313
3314 set_bit(CONF_MODE_DONE, &chan->conf_state);
3315
3316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3317 sizeof(rfc), (unsigned long) &rfc);
3318
3319 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3320 chan->remote_id = efs.id;
3321 chan->remote_stype = efs.stype;
3322 chan->remote_msdu = le16_to_cpu(efs.msdu);
3323 chan->remote_flush_to =
3324 le32_to_cpu(efs.flush_to);
3325 chan->remote_acc_lat =
3326 le32_to_cpu(efs.acc_lat);
3327 chan->remote_sdu_itime =
3328 le32_to_cpu(efs.sdu_itime);
3329 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3330 sizeof(efs),
3331 (unsigned long) &efs);
3332 }
3333 break;
3334
3335 case L2CAP_MODE_STREAMING:
3336 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3337 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3338 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3339 rfc.max_pdu_size = cpu_to_le16(size);
3340 chan->remote_mps = size;
3341
3342 set_bit(CONF_MODE_DONE, &chan->conf_state);
3343
3344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3345 (unsigned long) &rfc);
3346
3347 break;
3348
3349 default:
3350 result = L2CAP_CONF_UNACCEPT;
3351
3352 memset(&rfc, 0, sizeof(rfc));
3353 rfc.mode = chan->mode;
3354 }
3355
3356 if (result == L2CAP_CONF_SUCCESS)
3357 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3358 }
3359 rsp->scid = cpu_to_le16(chan->dcid);
3360 rsp->result = cpu_to_le16(result);
3361 rsp->flags = __constant_cpu_to_le16(0);
3362
3363 return ptr - data;
3364 }
3365
3366 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3367 void *data, u16 *result)
3368 {
3369 struct l2cap_conf_req *req = data;
3370 void *ptr = req->data;
3371 int type, olen;
3372 unsigned long val;
3373 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3374 struct l2cap_conf_efs efs;
3375
3376 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3377
3378 while (len >= L2CAP_CONF_OPT_SIZE) {
3379 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3380
3381 switch (type) {
3382 case L2CAP_CONF_MTU:
3383 if (val < L2CAP_DEFAULT_MIN_MTU) {
3384 *result = L2CAP_CONF_UNACCEPT;
3385 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3386 } else
3387 chan->imtu = val;
3388 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3389 break;
3390
3391 case L2CAP_CONF_FLUSH_TO:
3392 chan->flush_to = val;
3393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3394 2, chan->flush_to);
3395 break;
3396
3397 case L2CAP_CONF_RFC:
3398 if (olen == sizeof(rfc))
3399 memcpy(&rfc, (void *)val, olen);
3400
3401 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3402 rfc.mode != chan->mode)
3403 return -ECONNREFUSED;
3404
3405 chan->fcs = 0;
3406
3407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3408 sizeof(rfc), (unsigned long) &rfc);
3409 break;
3410
3411 case L2CAP_CONF_EWS:
3412 chan->ack_win = min_t(u16, val, chan->ack_win);
3413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3414 chan->tx_win);
3415 break;
3416
3417 case L2CAP_CONF_EFS:
3418 if (olen == sizeof(efs))
3419 memcpy(&efs, (void *)val, olen);
3420
3421 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3422 efs.stype != L2CAP_SERV_NOTRAFIC &&
3423 efs.stype != chan->local_stype)
3424 return -ECONNREFUSED;
3425
3426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3427 (unsigned long) &efs);
3428 break;
3429
3430 case L2CAP_CONF_FCS:
3431 if (*result == L2CAP_CONF_PENDING)
3432 if (val == L2CAP_FCS_NONE)
3433 set_bit(CONF_RECV_NO_FCS,
3434 &chan->conf_state);
3435 break;
3436 }
3437 }
3438
3439 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3440 return -ECONNREFUSED;
3441
3442 chan->mode = rfc.mode;
3443
3444 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3445 switch (rfc.mode) {
3446 case L2CAP_MODE_ERTM:
3447 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3448 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3449 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3450 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3451 chan->ack_win = min_t(u16, chan->ack_win,
3452 rfc.txwin_size);
3453
3454 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3455 chan->local_msdu = le16_to_cpu(efs.msdu);
3456 chan->local_sdu_itime =
3457 le32_to_cpu(efs.sdu_itime);
3458 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3459 chan->local_flush_to =
3460 le32_to_cpu(efs.flush_to);
3461 }
3462 break;
3463
3464 case L2CAP_MODE_STREAMING:
3465 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3466 }
3467 }
3468
3469 req->dcid = cpu_to_le16(chan->dcid);
3470 req->flags = __constant_cpu_to_le16(0);
3471
3472 return ptr - data;
3473 }
3474
3475 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3476 u16 result, u16 flags)
3477 {
3478 struct l2cap_conf_rsp *rsp = data;
3479 void *ptr = rsp->data;
3480
3481 BT_DBG("chan %p", chan);
3482
3483 rsp->scid = cpu_to_le16(chan->dcid);
3484 rsp->result = cpu_to_le16(result);
3485 rsp->flags = cpu_to_le16(flags);
3486
3487 return ptr - data;
3488 }
3489
3490 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3491 {
3492 struct l2cap_conn_rsp rsp;
3493 struct l2cap_conn *conn = chan->conn;
3494 u8 buf[128];
3495 u8 rsp_code;
3496
3497 rsp.scid = cpu_to_le16(chan->dcid);
3498 rsp.dcid = cpu_to_le16(chan->scid);
3499 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3500 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3501
3502 if (chan->hs_hcon)
3503 rsp_code = L2CAP_CREATE_CHAN_RSP;
3504 else
3505 rsp_code = L2CAP_CONN_RSP;
3506
3507 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3508
3509 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3510
3511 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3512 return;
3513
3514 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3515 l2cap_build_conf_req(chan, buf), buf);
3516 chan->num_conf_req++;
3517 }
3518
3519 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3520 {
3521 int type, olen;
3522 unsigned long val;
3523 /* Use sane default values in case a misbehaving remote device
3524 * did not send an RFC or extended window size option.
3525 */
3526 u16 txwin_ext = chan->ack_win;
3527 struct l2cap_conf_rfc rfc = {
3528 .mode = chan->mode,
3529 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3530 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3531 .max_pdu_size = cpu_to_le16(chan->imtu),
3532 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3533 };
3534
3535 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3536
3537 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3538 return;
3539
3540 while (len >= L2CAP_CONF_OPT_SIZE) {
3541 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3542
3543 switch (type) {
3544 case L2CAP_CONF_RFC:
3545 if (olen == sizeof(rfc))
3546 memcpy(&rfc, (void *)val, olen);
3547 break;
3548 case L2CAP_CONF_EWS:
3549 txwin_ext = val;
3550 break;
3551 }
3552 }
3553
3554 switch (rfc.mode) {
3555 case L2CAP_MODE_ERTM:
3556 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3557 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3558 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3559 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3560 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3561 else
3562 chan->ack_win = min_t(u16, chan->ack_win,
3563 rfc.txwin_size);
3564 break;
3565 case L2CAP_MODE_STREAMING:
3566 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3567 }
3568 }
3569
3570 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3571 struct l2cap_cmd_hdr *cmd, u8 *data)
3572 {
3573 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3574
3575 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3576 return 0;
3577
3578 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3579 cmd->ident == conn->info_ident) {
3580 cancel_delayed_work(&conn->info_timer);
3581
3582 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3583 conn->info_ident = 0;
3584
3585 l2cap_conn_start(conn);
3586 }
3587
3588 return 0;
3589 }
3590
3591 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3592 struct l2cap_cmd_hdr *cmd,
3593 u8 *data, u8 rsp_code, u8 amp_id)
3594 {
3595 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3596 struct l2cap_conn_rsp rsp;
3597 struct l2cap_chan *chan = NULL, *pchan;
3598 struct sock *parent, *sk = NULL;
3599 int result, status = L2CAP_CS_NO_INFO;
3600
3601 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3602 __le16 psm = req->psm;
3603
3604 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3605
3606 /* Check if we have socket listening on psm */
3607 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3608 if (!pchan) {
3609 result = L2CAP_CR_BAD_PSM;
3610 goto sendresp;
3611 }
3612
3613 parent = pchan->sk;
3614
3615 mutex_lock(&conn->chan_lock);
3616 lock_sock(parent);
3617
3618 /* Check if the ACL is secure enough (if not SDP) */
3619 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3620 !hci_conn_check_link_mode(conn->hcon)) {
3621 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3622 result = L2CAP_CR_SEC_BLOCK;
3623 goto response;
3624 }
3625
3626 result = L2CAP_CR_NO_MEM;
3627
3628 /* Check if we already have channel with that dcid */
3629 if (__l2cap_get_chan_by_dcid(conn, scid))
3630 goto response;
3631
3632 chan = pchan->ops->new_connection(pchan);
3633 if (!chan)
3634 goto response;
3635
3636 sk = chan->sk;
3637
3638 hci_conn_hold(conn->hcon);
3639
3640 bacpy(&bt_sk(sk)->src, conn->src);
3641 bacpy(&bt_sk(sk)->dst, conn->dst);
3642 chan->psm = psm;
3643 chan->dcid = scid;
3644 chan->local_amp_id = amp_id;
3645
3646 __l2cap_chan_add(conn, chan);
3647
3648 dcid = chan->scid;
3649
3650 __set_chan_timer(chan, sk->sk_sndtimeo);
3651
3652 chan->ident = cmd->ident;
3653
3654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3655 if (l2cap_chan_check_security(chan)) {
3656 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3657 __l2cap_state_change(chan, BT_CONNECT2);
3658 result = L2CAP_CR_PEND;
3659 status = L2CAP_CS_AUTHOR_PEND;
3660 chan->ops->defer(chan);
3661 } else {
3662 /* Force pending result for AMP controllers.
3663 * The connection will succeed after the
3664 * physical link is up.
3665 */
3666 if (amp_id) {
3667 __l2cap_state_change(chan, BT_CONNECT2);
3668 result = L2CAP_CR_PEND;
3669 } else {
3670 __l2cap_state_change(chan, BT_CONFIG);
3671 result = L2CAP_CR_SUCCESS;
3672 }
3673 status = L2CAP_CS_NO_INFO;
3674 }
3675 } else {
3676 __l2cap_state_change(chan, BT_CONNECT2);
3677 result = L2CAP_CR_PEND;
3678 status = L2CAP_CS_AUTHEN_PEND;
3679 }
3680 } else {
3681 __l2cap_state_change(chan, BT_CONNECT2);
3682 result = L2CAP_CR_PEND;
3683 status = L2CAP_CS_NO_INFO;
3684 }
3685
3686 response:
3687 release_sock(parent);
3688 mutex_unlock(&conn->chan_lock);
3689
3690 sendresp:
3691 rsp.scid = cpu_to_le16(scid);
3692 rsp.dcid = cpu_to_le16(dcid);
3693 rsp.result = cpu_to_le16(result);
3694 rsp.status = cpu_to_le16(status);
3695 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3696
3697 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3698 struct l2cap_info_req info;
3699 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3700
3701 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3702 conn->info_ident = l2cap_get_ident(conn);
3703
3704 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3705
3706 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3707 sizeof(info), &info);
3708 }
3709
3710 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3711 result == L2CAP_CR_SUCCESS) {
3712 u8 buf[128];
3713 set_bit(CONF_REQ_SENT, &chan->conf_state);
3714 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3715 l2cap_build_conf_req(chan, buf), buf);
3716 chan->num_conf_req++;
3717 }
3718
3719 return chan;
3720 }
3721
3722 static int l2cap_connect_req(struct l2cap_conn *conn,
3723 struct l2cap_cmd_hdr *cmd, u8 *data)
3724 {
3725 struct hci_dev *hdev = conn->hcon->hdev;
3726 struct hci_conn *hcon = conn->hcon;
3727
3728 hci_dev_lock(hdev);
3729 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3730 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3731 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3732 hcon->dst_type, 0, NULL, 0,
3733 hcon->dev_class);
3734 hci_dev_unlock(hdev);
3735
3736 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3737 return 0;
3738 }
3739
3740 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3741 struct l2cap_cmd_hdr *cmd, u8 *data)
3742 {
3743 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3744 u16 scid, dcid, result, status;
3745 struct l2cap_chan *chan;
3746 u8 req[128];
3747 int err;
3748
3749 scid = __le16_to_cpu(rsp->scid);
3750 dcid = __le16_to_cpu(rsp->dcid);
3751 result = __le16_to_cpu(rsp->result);
3752 status = __le16_to_cpu(rsp->status);
3753
3754 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3755 dcid, scid, result, status);
3756
3757 mutex_lock(&conn->chan_lock);
3758
3759 if (scid) {
3760 chan = __l2cap_get_chan_by_scid(conn, scid);
3761 if (!chan) {
3762 err = -EFAULT;
3763 goto unlock;
3764 }
3765 } else {
3766 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3767 if (!chan) {
3768 err = -EFAULT;
3769 goto unlock;
3770 }
3771 }
3772
3773 err = 0;
3774
3775 l2cap_chan_lock(chan);
3776
3777 switch (result) {
3778 case L2CAP_CR_SUCCESS:
3779 l2cap_state_change(chan, BT_CONFIG);
3780 chan->ident = 0;
3781 chan->dcid = dcid;
3782 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3783
3784 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3785 break;
3786
3787 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3788 l2cap_build_conf_req(chan, req), req);
3789 chan->num_conf_req++;
3790 break;
3791
3792 case L2CAP_CR_PEND:
3793 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3794 break;
3795
3796 default:
3797 l2cap_chan_del(chan, ECONNREFUSED);
3798 break;
3799 }
3800
3801 l2cap_chan_unlock(chan);
3802
3803 unlock:
3804 mutex_unlock(&conn->chan_lock);
3805
3806 return err;
3807 }
3808
3809 static inline void set_default_fcs(struct l2cap_chan *chan)
3810 {
3811 /* FCS is enabled only in ERTM or streaming mode, if one or both
3812 * sides request it.
3813 */
3814 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3815 chan->fcs = L2CAP_FCS_NONE;
3816 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3817 chan->fcs = L2CAP_FCS_CRC16;
3818 }
3819
3820 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3821 u8 ident, u16 flags)
3822 {
3823 struct l2cap_conn *conn = chan->conn;
3824
3825 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3826 flags);
3827
3828 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3829 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3830
3831 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3832 l2cap_build_conf_rsp(chan, data,
3833 L2CAP_CONF_SUCCESS, flags), data);
3834 }
3835
3836 static inline int l2cap_config_req(struct l2cap_conn *conn,
3837 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3838 u8 *data)
3839 {
3840 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3841 u16 dcid, flags;
3842 u8 rsp[64];
3843 struct l2cap_chan *chan;
3844 int len, err = 0;
3845
3846 dcid = __le16_to_cpu(req->dcid);
3847 flags = __le16_to_cpu(req->flags);
3848
3849 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3850
3851 chan = l2cap_get_chan_by_scid(conn, dcid);
3852 if (!chan)
3853 return -ENOENT;
3854
3855 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3856 struct l2cap_cmd_rej_cid rej;
3857
3858 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3859 rej.scid = cpu_to_le16(chan->scid);
3860 rej.dcid = cpu_to_le16(chan->dcid);
3861
3862 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3863 sizeof(rej), &rej);
3864 goto unlock;
3865 }
3866
3867 /* Reject if config buffer is too small. */
3868 len = cmd_len - sizeof(*req);
3869 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3870 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3871 l2cap_build_conf_rsp(chan, rsp,
3872 L2CAP_CONF_REJECT, flags), rsp);
3873 goto unlock;
3874 }
3875
3876 /* Store config. */
3877 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3878 chan->conf_len += len;
3879
3880 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3881 /* Incomplete config. Send empty response. */
3882 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3883 l2cap_build_conf_rsp(chan, rsp,
3884 L2CAP_CONF_SUCCESS, flags), rsp);
3885 goto unlock;
3886 }
3887
3888 /* Complete config. */
3889 len = l2cap_parse_conf_req(chan, rsp);
3890 if (len < 0) {
3891 l2cap_send_disconn_req(chan, ECONNRESET);
3892 goto unlock;
3893 }
3894
3895 chan->ident = cmd->ident;
3896 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3897 chan->num_conf_rsp++;
3898
3899 /* Reset config buffer. */
3900 chan->conf_len = 0;
3901
3902 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3903 goto unlock;
3904
3905 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3906 set_default_fcs(chan);
3907
3908 if (chan->mode == L2CAP_MODE_ERTM ||
3909 chan->mode == L2CAP_MODE_STREAMING)
3910 err = l2cap_ertm_init(chan);
3911
3912 if (err < 0)
3913 l2cap_send_disconn_req(chan, -err);
3914 else
3915 l2cap_chan_ready(chan);
3916
3917 goto unlock;
3918 }
3919
3920 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3921 u8 buf[64];
3922 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3923 l2cap_build_conf_req(chan, buf), buf);
3924 chan->num_conf_req++;
3925 }
3926
3927 /* Got Conf Rsp PENDING from remote side and asume we sent
3928 Conf Rsp PENDING in the code above */
3929 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3930 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3931
3932 /* check compatibility */
3933
3934 /* Send rsp for BR/EDR channel */
3935 if (!chan->hs_hcon)
3936 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3937 else
3938 chan->ident = cmd->ident;
3939 }
3940
3941 unlock:
3942 l2cap_chan_unlock(chan);
3943 return err;
3944 }
3945
3946 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u8 *data)
3948 {
3949 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3950 u16 scid, flags, result;
3951 struct l2cap_chan *chan;
3952 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3953 int err = 0;
3954
3955 scid = __le16_to_cpu(rsp->scid);
3956 flags = __le16_to_cpu(rsp->flags);
3957 result = __le16_to_cpu(rsp->result);
3958
3959 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3960 result, len);
3961
3962 chan = l2cap_get_chan_by_scid(conn, scid);
3963 if (!chan)
3964 return 0;
3965
3966 switch (result) {
3967 case L2CAP_CONF_SUCCESS:
3968 l2cap_conf_rfc_get(chan, rsp->data, len);
3969 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3970 break;
3971
3972 case L2CAP_CONF_PENDING:
3973 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3974
3975 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3976 char buf[64];
3977
3978 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3979 buf, &result);
3980 if (len < 0) {
3981 l2cap_send_disconn_req(chan, ECONNRESET);
3982 goto done;
3983 }
3984
3985 if (!chan->hs_hcon) {
3986 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3987 0);
3988 } else {
3989 if (l2cap_check_efs(chan)) {
3990 amp_create_logical_link(chan);
3991 chan->ident = cmd->ident;
3992 }
3993 }
3994 }
3995 goto done;
3996
3997 case L2CAP_CONF_UNACCEPT:
3998 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3999 char req[64];
4000
4001 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4002 l2cap_send_disconn_req(chan, ECONNRESET);
4003 goto done;
4004 }
4005
4006 /* throw out any old stored conf requests */
4007 result = L2CAP_CONF_SUCCESS;
4008 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4009 req, &result);
4010 if (len < 0) {
4011 l2cap_send_disconn_req(chan, ECONNRESET);
4012 goto done;
4013 }
4014
4015 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4016 L2CAP_CONF_REQ, len, req);
4017 chan->num_conf_req++;
4018 if (result != L2CAP_CONF_SUCCESS)
4019 goto done;
4020 break;
4021 }
4022
4023 default:
4024 l2cap_chan_set_err(chan, ECONNRESET);
4025
4026 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4027 l2cap_send_disconn_req(chan, ECONNRESET);
4028 goto done;
4029 }
4030
4031 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4032 goto done;
4033
4034 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4035
4036 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4037 set_default_fcs(chan);
4038
4039 if (chan->mode == L2CAP_MODE_ERTM ||
4040 chan->mode == L2CAP_MODE_STREAMING)
4041 err = l2cap_ertm_init(chan);
4042
4043 if (err < 0)
4044 l2cap_send_disconn_req(chan, -err);
4045 else
4046 l2cap_chan_ready(chan);
4047 }
4048
4049 done:
4050 l2cap_chan_unlock(chan);
4051 return err;
4052 }
4053
4054 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4055 struct l2cap_cmd_hdr *cmd, u8 *data)
4056 {
4057 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4058 struct l2cap_disconn_rsp rsp;
4059 u16 dcid, scid;
4060 struct l2cap_chan *chan;
4061 struct sock *sk;
4062
4063 scid = __le16_to_cpu(req->scid);
4064 dcid = __le16_to_cpu(req->dcid);
4065
4066 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4067
4068 mutex_lock(&conn->chan_lock);
4069
4070 chan = __l2cap_get_chan_by_scid(conn, dcid);
4071 if (!chan) {
4072 mutex_unlock(&conn->chan_lock);
4073 return 0;
4074 }
4075
4076 l2cap_chan_lock(chan);
4077
4078 sk = chan->sk;
4079
4080 rsp.dcid = cpu_to_le16(chan->scid);
4081 rsp.scid = cpu_to_le16(chan->dcid);
4082 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4083
4084 lock_sock(sk);
4085 sk->sk_shutdown = SHUTDOWN_MASK;
4086 release_sock(sk);
4087
4088 l2cap_chan_hold(chan);
4089 l2cap_chan_del(chan, ECONNRESET);
4090
4091 l2cap_chan_unlock(chan);
4092
4093 chan->ops->close(chan);
4094 l2cap_chan_put(chan);
4095
4096 mutex_unlock(&conn->chan_lock);
4097
4098 return 0;
4099 }
4100
4101 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4102 struct l2cap_cmd_hdr *cmd, u8 *data)
4103 {
4104 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4105 u16 dcid, scid;
4106 struct l2cap_chan *chan;
4107
4108 scid = __le16_to_cpu(rsp->scid);
4109 dcid = __le16_to_cpu(rsp->dcid);
4110
4111 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4112
4113 mutex_lock(&conn->chan_lock);
4114
4115 chan = __l2cap_get_chan_by_scid(conn, scid);
4116 if (!chan) {
4117 mutex_unlock(&conn->chan_lock);
4118 return 0;
4119 }
4120
4121 l2cap_chan_lock(chan);
4122
4123 l2cap_chan_hold(chan);
4124 l2cap_chan_del(chan, 0);
4125
4126 l2cap_chan_unlock(chan);
4127
4128 chan->ops->close(chan);
4129 l2cap_chan_put(chan);
4130
4131 mutex_unlock(&conn->chan_lock);
4132
4133 return 0;
4134 }
4135
4136 static inline int l2cap_information_req(struct l2cap_conn *conn,
4137 struct l2cap_cmd_hdr *cmd, u8 *data)
4138 {
4139 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4140 u16 type;
4141
4142 type = __le16_to_cpu(req->type);
4143
4144 BT_DBG("type 0x%4.4x", type);
4145
4146 if (type == L2CAP_IT_FEAT_MASK) {
4147 u8 buf[8];
4148 u32 feat_mask = l2cap_feat_mask;
4149 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4150 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4151 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4152 if (!disable_ertm)
4153 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4154 | L2CAP_FEAT_FCS;
4155 if (enable_hs)
4156 feat_mask |= L2CAP_FEAT_EXT_FLOW
4157 | L2CAP_FEAT_EXT_WINDOW;
4158
4159 put_unaligned_le32(feat_mask, rsp->data);
4160 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4161 buf);
4162 } else if (type == L2CAP_IT_FIXED_CHAN) {
4163 u8 buf[12];
4164 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4165
4166 if (enable_hs)
4167 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4168 else
4169 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4170
4171 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4172 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4173 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4174 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4175 buf);
4176 } else {
4177 struct l2cap_info_rsp rsp;
4178 rsp.type = cpu_to_le16(type);
4179 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4180 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4181 &rsp);
4182 }
4183
4184 return 0;
4185 }
4186
4187 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4188 struct l2cap_cmd_hdr *cmd, u8 *data)
4189 {
4190 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4191 u16 type, result;
4192
4193 type = __le16_to_cpu(rsp->type);
4194 result = __le16_to_cpu(rsp->result);
4195
4196 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4197
4198 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4199 if (cmd->ident != conn->info_ident ||
4200 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4201 return 0;
4202
4203 cancel_delayed_work(&conn->info_timer);
4204
4205 if (result != L2CAP_IR_SUCCESS) {
4206 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4207 conn->info_ident = 0;
4208
4209 l2cap_conn_start(conn);
4210
4211 return 0;
4212 }
4213
4214 switch (type) {
4215 case L2CAP_IT_FEAT_MASK:
4216 conn->feat_mask = get_unaligned_le32(rsp->data);
4217
4218 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4219 struct l2cap_info_req req;
4220 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4221
4222 conn->info_ident = l2cap_get_ident(conn);
4223
4224 l2cap_send_cmd(conn, conn->info_ident,
4225 L2CAP_INFO_REQ, sizeof(req), &req);
4226 } else {
4227 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4228 conn->info_ident = 0;
4229
4230 l2cap_conn_start(conn);
4231 }
4232 break;
4233
4234 case L2CAP_IT_FIXED_CHAN:
4235 conn->fixed_chan_mask = rsp->data[0];
4236 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4237 conn->info_ident = 0;
4238
4239 l2cap_conn_start(conn);
4240 break;
4241 }
4242
4243 return 0;
4244 }
4245
4246 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4247 struct l2cap_cmd_hdr *cmd,
4248 u16 cmd_len, void *data)
4249 {
4250 struct l2cap_create_chan_req *req = data;
4251 struct l2cap_create_chan_rsp rsp;
4252 struct l2cap_chan *chan;
4253 struct hci_dev *hdev;
4254 u16 psm, scid;
4255
4256 if (cmd_len != sizeof(*req))
4257 return -EPROTO;
4258
4259 if (!enable_hs)
4260 return -EINVAL;
4261
4262 psm = le16_to_cpu(req->psm);
4263 scid = le16_to_cpu(req->scid);
4264
4265 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4266
4267 /* For controller id 0 make BR/EDR connection */
4268 if (req->amp_id == HCI_BREDR_ID) {
4269 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4270 req->amp_id);
4271 return 0;
4272 }
4273
4274 /* Validate AMP controller id */
4275 hdev = hci_dev_get(req->amp_id);
4276 if (!hdev)
4277 goto error;
4278
4279 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4280 hci_dev_put(hdev);
4281 goto error;
4282 }
4283
4284 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4285 req->amp_id);
4286 if (chan) {
4287 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4288 struct hci_conn *hs_hcon;
4289
4290 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4291 if (!hs_hcon) {
4292 hci_dev_put(hdev);
4293 return -EFAULT;
4294 }
4295
4296 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4297
4298 mgr->bredr_chan = chan;
4299 chan->hs_hcon = hs_hcon;
4300 chan->fcs = L2CAP_FCS_NONE;
4301 conn->mtu = hdev->block_mtu;
4302 }
4303
4304 hci_dev_put(hdev);
4305
4306 return 0;
4307
4308 error:
4309 rsp.dcid = 0;
4310 rsp.scid = cpu_to_le16(scid);
4311 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4312 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4313
4314 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4315 sizeof(rsp), &rsp);
4316
4317 return -EFAULT;
4318 }
4319
4320 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4321 {
4322 struct l2cap_move_chan_req req;
4323 u8 ident;
4324
4325 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4326
4327 ident = l2cap_get_ident(chan->conn);
4328 chan->ident = ident;
4329
4330 req.icid = cpu_to_le16(chan->scid);
4331 req.dest_amp_id = dest_amp_id;
4332
4333 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4334 &req);
4335
4336 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4337 }
4338
4339 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4340 {
4341 struct l2cap_move_chan_rsp rsp;
4342
4343 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4344
4345 rsp.icid = cpu_to_le16(chan->dcid);
4346 rsp.result = cpu_to_le16(result);
4347
4348 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4349 sizeof(rsp), &rsp);
4350 }
4351
4352 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4353 {
4354 struct l2cap_move_chan_cfm cfm;
4355
4356 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4357
4358 chan->ident = l2cap_get_ident(chan->conn);
4359
4360 cfm.icid = cpu_to_le16(chan->scid);
4361 cfm.result = cpu_to_le16(result);
4362
4363 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4364 sizeof(cfm), &cfm);
4365
4366 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4367 }
4368
4369 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4370 {
4371 struct l2cap_move_chan_cfm cfm;
4372
4373 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4374
4375 cfm.icid = cpu_to_le16(icid);
4376 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4377
4378 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4379 sizeof(cfm), &cfm);
4380 }
4381
4382 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4383 u16 icid)
4384 {
4385 struct l2cap_move_chan_cfm_rsp rsp;
4386
4387 BT_DBG("icid 0x%4.4x", icid);
4388
4389 rsp.icid = cpu_to_le16(icid);
4390 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4391 }
4392
4393 static void __release_logical_link(struct l2cap_chan *chan)
4394 {
4395 chan->hs_hchan = NULL;
4396 chan->hs_hcon = NULL;
4397
4398 /* Placeholder - release the logical link */
4399 }
4400
4401 static void l2cap_logical_fail(struct l2cap_chan *chan)
4402 {
4403 /* Logical link setup failed */
4404 if (chan->state != BT_CONNECTED) {
4405 /* Create channel failure, disconnect */
4406 l2cap_send_disconn_req(chan, ECONNRESET);
4407 return;
4408 }
4409
4410 switch (chan->move_role) {
4411 case L2CAP_MOVE_ROLE_RESPONDER:
4412 l2cap_move_done(chan);
4413 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4414 break;
4415 case L2CAP_MOVE_ROLE_INITIATOR:
4416 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4417 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4418 /* Remote has only sent pending or
4419 * success responses, clean up
4420 */
4421 l2cap_move_done(chan);
4422 }
4423
4424 /* Other amp move states imply that the move
4425 * has already aborted
4426 */
4427 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4428 break;
4429 }
4430 }
4431
4432 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4433 struct hci_chan *hchan)
4434 {
4435 struct l2cap_conf_rsp rsp;
4436
4437 chan->hs_hchan = hchan;
4438 chan->hs_hcon->l2cap_data = chan->conn;
4439
4440 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4441
4442 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4443 int err;
4444
4445 set_default_fcs(chan);
4446
4447 err = l2cap_ertm_init(chan);
4448 if (err < 0)
4449 l2cap_send_disconn_req(chan, -err);
4450 else
4451 l2cap_chan_ready(chan);
4452 }
4453 }
4454
4455 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4456 struct hci_chan *hchan)
4457 {
4458 chan->hs_hcon = hchan->conn;
4459 chan->hs_hcon->l2cap_data = chan->conn;
4460
4461 BT_DBG("move_state %d", chan->move_state);
4462
4463 switch (chan->move_state) {
4464 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4465 /* Move confirm will be sent after a success
4466 * response is received
4467 */
4468 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4469 break;
4470 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4471 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4472 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4473 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4474 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4475 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4476 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4477 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4478 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4479 }
4480 break;
4481 default:
4482 /* Move was not in expected state, free the channel */
4483 __release_logical_link(chan);
4484
4485 chan->move_state = L2CAP_MOVE_STABLE;
4486 }
4487 }
4488
4489 /* Call with chan locked */
4490 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4491 u8 status)
4492 {
4493 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4494
4495 if (status) {
4496 l2cap_logical_fail(chan);
4497 __release_logical_link(chan);
4498 return;
4499 }
4500
4501 if (chan->state != BT_CONNECTED) {
4502 /* Ignore logical link if channel is on BR/EDR */
4503 if (chan->local_amp_id)
4504 l2cap_logical_finish_create(chan, hchan);
4505 } else {
4506 l2cap_logical_finish_move(chan, hchan);
4507 }
4508 }
4509
4510 void l2cap_move_start(struct l2cap_chan *chan)
4511 {
4512 BT_DBG("chan %p", chan);
4513
4514 if (chan->local_amp_id == HCI_BREDR_ID) {
4515 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4516 return;
4517 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4518 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4519 /* Placeholder - start physical link setup */
4520 } else {
4521 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4522 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4523 chan->move_id = 0;
4524 l2cap_move_setup(chan);
4525 l2cap_send_move_chan_req(chan, 0);
4526 }
4527 }
4528
4529 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4530 u8 local_amp_id, u8 remote_amp_id)
4531 {
4532 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4533 local_amp_id, remote_amp_id);
4534
4535 chan->fcs = L2CAP_FCS_NONE;
4536
4537 /* Outgoing channel on AMP */
4538 if (chan->state == BT_CONNECT) {
4539 if (result == L2CAP_CR_SUCCESS) {
4540 chan->local_amp_id = local_amp_id;
4541 l2cap_send_create_chan_req(chan, remote_amp_id);
4542 } else {
4543 /* Revert to BR/EDR connect */
4544 l2cap_send_conn_req(chan);
4545 }
4546
4547 return;
4548 }
4549
4550 /* Incoming channel on AMP */
4551 if (__l2cap_no_conn_pending(chan)) {
4552 struct l2cap_conn_rsp rsp;
4553 char buf[128];
4554 rsp.scid = cpu_to_le16(chan->dcid);
4555 rsp.dcid = cpu_to_le16(chan->scid);
4556
4557 if (result == L2CAP_CR_SUCCESS) {
4558 /* Send successful response */
4559 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4560 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4561 } else {
4562 /* Send negative response */
4563 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4564 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4565 }
4566
4567 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4568 sizeof(rsp), &rsp);
4569
4570 if (result == L2CAP_CR_SUCCESS) {
4571 __l2cap_state_change(chan, BT_CONFIG);
4572 set_bit(CONF_REQ_SENT, &chan->conf_state);
4573 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4574 L2CAP_CONF_REQ,
4575 l2cap_build_conf_req(chan, buf), buf);
4576 chan->num_conf_req++;
4577 }
4578 }
4579 }
4580
4581 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4582 u8 remote_amp_id)
4583 {
4584 l2cap_move_setup(chan);
4585 chan->move_id = local_amp_id;
4586 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4587
4588 l2cap_send_move_chan_req(chan, remote_amp_id);
4589 }
4590
4591 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4592 {
4593 struct hci_chan *hchan = NULL;
4594
4595 /* Placeholder - get hci_chan for logical link */
4596
4597 if (hchan) {
4598 if (hchan->state == BT_CONNECTED) {
4599 /* Logical link is ready to go */
4600 chan->hs_hcon = hchan->conn;
4601 chan->hs_hcon->l2cap_data = chan->conn;
4602 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4603 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4604
4605 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4606 } else {
4607 /* Wait for logical link to be ready */
4608 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4609 }
4610 } else {
4611 /* Logical link not available */
4612 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4613 }
4614 }
4615
4616 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4617 {
4618 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4619 u8 rsp_result;
4620 if (result == -EINVAL)
4621 rsp_result = L2CAP_MR_BAD_ID;
4622 else
4623 rsp_result = L2CAP_MR_NOT_ALLOWED;
4624
4625 l2cap_send_move_chan_rsp(chan, rsp_result);
4626 }
4627
4628 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4629 chan->move_state = L2CAP_MOVE_STABLE;
4630
4631 /* Restart data transmission */
4632 l2cap_ertm_send(chan);
4633 }
4634
4635 /* Invoke with locked chan */
4636 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4637 {
4638 u8 local_amp_id = chan->local_amp_id;
4639 u8 remote_amp_id = chan->remote_amp_id;
4640
4641 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4642 chan, result, local_amp_id, remote_amp_id);
4643
4644 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4645 l2cap_chan_unlock(chan);
4646 return;
4647 }
4648
4649 if (chan->state != BT_CONNECTED) {
4650 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4651 } else if (result != L2CAP_MR_SUCCESS) {
4652 l2cap_do_move_cancel(chan, result);
4653 } else {
4654 switch (chan->move_role) {
4655 case L2CAP_MOVE_ROLE_INITIATOR:
4656 l2cap_do_move_initiate(chan, local_amp_id,
4657 remote_amp_id);
4658 break;
4659 case L2CAP_MOVE_ROLE_RESPONDER:
4660 l2cap_do_move_respond(chan, result);
4661 break;
4662 default:
4663 l2cap_do_move_cancel(chan, result);
4664 break;
4665 }
4666 }
4667 }
4668
4669 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4670 struct l2cap_cmd_hdr *cmd,
4671 u16 cmd_len, void *data)
4672 {
4673 struct l2cap_move_chan_req *req = data;
4674 struct l2cap_move_chan_rsp rsp;
4675 struct l2cap_chan *chan;
4676 u16 icid = 0;
4677 u16 result = L2CAP_MR_NOT_ALLOWED;
4678
4679 if (cmd_len != sizeof(*req))
4680 return -EPROTO;
4681
4682 icid = le16_to_cpu(req->icid);
4683
4684 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4685
4686 if (!enable_hs)
4687 return -EINVAL;
4688
4689 chan = l2cap_get_chan_by_dcid(conn, icid);
4690 if (!chan) {
4691 rsp.icid = cpu_to_le16(icid);
4692 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4693 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4694 sizeof(rsp), &rsp);
4695 return 0;
4696 }
4697
4698 chan->ident = cmd->ident;
4699
4700 if (chan->scid < L2CAP_CID_DYN_START ||
4701 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4702 (chan->mode != L2CAP_MODE_ERTM &&
4703 chan->mode != L2CAP_MODE_STREAMING)) {
4704 result = L2CAP_MR_NOT_ALLOWED;
4705 goto send_move_response;
4706 }
4707
4708 if (chan->local_amp_id == req->dest_amp_id) {
4709 result = L2CAP_MR_SAME_ID;
4710 goto send_move_response;
4711 }
4712
4713 if (req->dest_amp_id) {
4714 struct hci_dev *hdev;
4715 hdev = hci_dev_get(req->dest_amp_id);
4716 if (!hdev || hdev->dev_type != HCI_AMP ||
4717 !test_bit(HCI_UP, &hdev->flags)) {
4718 if (hdev)
4719 hci_dev_put(hdev);
4720
4721 result = L2CAP_MR_BAD_ID;
4722 goto send_move_response;
4723 }
4724 hci_dev_put(hdev);
4725 }
4726
4727 /* Detect a move collision. Only send a collision response
4728 * if this side has "lost", otherwise proceed with the move.
4729 * The winner has the larger bd_addr.
4730 */
4731 if ((__chan_is_moving(chan) ||
4732 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4733 bacmp(conn->src, conn->dst) > 0) {
4734 result = L2CAP_MR_COLLISION;
4735 goto send_move_response;
4736 }
4737
4738 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4739 l2cap_move_setup(chan);
4740 chan->move_id = req->dest_amp_id;
4741 icid = chan->dcid;
4742
4743 if (!req->dest_amp_id) {
4744 /* Moving to BR/EDR */
4745 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4746 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4747 result = L2CAP_MR_PEND;
4748 } else {
4749 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4750 result = L2CAP_MR_SUCCESS;
4751 }
4752 } else {
4753 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4754 /* Placeholder - uncomment when amp functions are available */
4755 /*amp_accept_physical(chan, req->dest_amp_id);*/
4756 result = L2CAP_MR_PEND;
4757 }
4758
4759 send_move_response:
4760 l2cap_send_move_chan_rsp(chan, result);
4761
4762 l2cap_chan_unlock(chan);
4763
4764 return 0;
4765 }
4766
4767 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4768 {
4769 struct l2cap_chan *chan;
4770 struct hci_chan *hchan = NULL;
4771
4772 chan = l2cap_get_chan_by_scid(conn, icid);
4773 if (!chan) {
4774 l2cap_send_move_chan_cfm_icid(conn, icid);
4775 return;
4776 }
4777
4778 __clear_chan_timer(chan);
4779 if (result == L2CAP_MR_PEND)
4780 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4781
4782 switch (chan->move_state) {
4783 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4784 /* Move confirm will be sent when logical link
4785 * is complete.
4786 */
4787 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4788 break;
4789 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4790 if (result == L2CAP_MR_PEND) {
4791 break;
4792 } else if (test_bit(CONN_LOCAL_BUSY,
4793 &chan->conn_state)) {
4794 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4795 } else {
4796 /* Logical link is up or moving to BR/EDR,
4797 * proceed with move
4798 */
4799 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4800 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4801 }
4802 break;
4803 case L2CAP_MOVE_WAIT_RSP:
4804 /* Moving to AMP */
4805 if (result == L2CAP_MR_SUCCESS) {
4806 /* Remote is ready, send confirm immediately
4807 * after logical link is ready
4808 */
4809 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4810 } else {
4811 /* Both logical link and move success
4812 * are required to confirm
4813 */
4814 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4815 }
4816
4817 /* Placeholder - get hci_chan for logical link */
4818 if (!hchan) {
4819 /* Logical link not available */
4820 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4821 break;
4822 }
4823
4824 /* If the logical link is not yet connected, do not
4825 * send confirmation.
4826 */
4827 if (hchan->state != BT_CONNECTED)
4828 break;
4829
4830 /* Logical link is already ready to go */
4831
4832 chan->hs_hcon = hchan->conn;
4833 chan->hs_hcon->l2cap_data = chan->conn;
4834
4835 if (result == L2CAP_MR_SUCCESS) {
4836 /* Can confirm now */
4837 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4838 } else {
4839 /* Now only need move success
4840 * to confirm
4841 */
4842 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4843 }
4844
4845 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4846 break;
4847 default:
4848 /* Any other amp move state means the move failed. */
4849 chan->move_id = chan->local_amp_id;
4850 l2cap_move_done(chan);
4851 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4852 }
4853
4854 l2cap_chan_unlock(chan);
4855 }
4856
4857 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4858 u16 result)
4859 {
4860 struct l2cap_chan *chan;
4861
4862 chan = l2cap_get_chan_by_ident(conn, ident);
4863 if (!chan) {
4864 /* Could not locate channel, icid is best guess */
4865 l2cap_send_move_chan_cfm_icid(conn, icid);
4866 return;
4867 }
4868
4869 __clear_chan_timer(chan);
4870
4871 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4872 if (result == L2CAP_MR_COLLISION) {
4873 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4874 } else {
4875 /* Cleanup - cancel move */
4876 chan->move_id = chan->local_amp_id;
4877 l2cap_move_done(chan);
4878 }
4879 }
4880
4881 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4882
4883 l2cap_chan_unlock(chan);
4884 }
4885
4886 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4887 struct l2cap_cmd_hdr *cmd,
4888 u16 cmd_len, void *data)
4889 {
4890 struct l2cap_move_chan_rsp *rsp = data;
4891 u16 icid, result;
4892
4893 if (cmd_len != sizeof(*rsp))
4894 return -EPROTO;
4895
4896 icid = le16_to_cpu(rsp->icid);
4897 result = le16_to_cpu(rsp->result);
4898
4899 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4900
4901 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4902 l2cap_move_continue(conn, icid, result);
4903 else
4904 l2cap_move_fail(conn, cmd->ident, icid, result);
4905
4906 return 0;
4907 }
4908
4909 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4910 struct l2cap_cmd_hdr *cmd,
4911 u16 cmd_len, void *data)
4912 {
4913 struct l2cap_move_chan_cfm *cfm = data;
4914 struct l2cap_chan *chan;
4915 u16 icid, result;
4916
4917 if (cmd_len != sizeof(*cfm))
4918 return -EPROTO;
4919
4920 icid = le16_to_cpu(cfm->icid);
4921 result = le16_to_cpu(cfm->result);
4922
4923 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4924
4925 chan = l2cap_get_chan_by_dcid(conn, icid);
4926 if (!chan) {
4927 /* Spec requires a response even if the icid was not found */
4928 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4929 return 0;
4930 }
4931
4932 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4933 if (result == L2CAP_MC_CONFIRMED) {
4934 chan->local_amp_id = chan->move_id;
4935 if (!chan->local_amp_id)
4936 __release_logical_link(chan);
4937 } else {
4938 chan->move_id = chan->local_amp_id;
4939 }
4940
4941 l2cap_move_done(chan);
4942 }
4943
4944 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4945
4946 l2cap_chan_unlock(chan);
4947
4948 return 0;
4949 }
4950
4951 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4952 struct l2cap_cmd_hdr *cmd,
4953 u16 cmd_len, void *data)
4954 {
4955 struct l2cap_move_chan_cfm_rsp *rsp = data;
4956 struct l2cap_chan *chan;
4957 u16 icid;
4958
4959 if (cmd_len != sizeof(*rsp))
4960 return -EPROTO;
4961
4962 icid = le16_to_cpu(rsp->icid);
4963
4964 BT_DBG("icid 0x%4.4x", icid);
4965
4966 chan = l2cap_get_chan_by_scid(conn, icid);
4967 if (!chan)
4968 return 0;
4969
4970 __clear_chan_timer(chan);
4971
4972 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4973 chan->local_amp_id = chan->move_id;
4974
4975 if (!chan->local_amp_id && chan->hs_hchan)
4976 __release_logical_link(chan);
4977
4978 l2cap_move_done(chan);
4979 }
4980
4981 l2cap_chan_unlock(chan);
4982
4983 return 0;
4984 }
4985
4986 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4987 u16 to_multiplier)
4988 {
4989 u16 max_latency;
4990
4991 if (min > max || min < 6 || max > 3200)
4992 return -EINVAL;
4993
4994 if (to_multiplier < 10 || to_multiplier > 3200)
4995 return -EINVAL;
4996
4997 if (max >= to_multiplier * 8)
4998 return -EINVAL;
4999
5000 max_latency = (to_multiplier * 8 / max) - 1;
5001 if (latency > 499 || latency > max_latency)
5002 return -EINVAL;
5003
5004 return 0;
5005 }
5006
5007 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5008 struct l2cap_cmd_hdr *cmd,
5009 u8 *data)
5010 {
5011 struct hci_conn *hcon = conn->hcon;
5012 struct l2cap_conn_param_update_req *req;
5013 struct l2cap_conn_param_update_rsp rsp;
5014 u16 min, max, latency, to_multiplier, cmd_len;
5015 int err;
5016
5017 if (!(hcon->link_mode & HCI_LM_MASTER))
5018 return -EINVAL;
5019
5020 cmd_len = __le16_to_cpu(cmd->len);
5021 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5022 return -EPROTO;
5023
5024 req = (struct l2cap_conn_param_update_req *) data;
5025 min = __le16_to_cpu(req->min);
5026 max = __le16_to_cpu(req->max);
5027 latency = __le16_to_cpu(req->latency);
5028 to_multiplier = __le16_to_cpu(req->to_multiplier);
5029
5030 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5031 min, max, latency, to_multiplier);
5032
5033 memset(&rsp, 0, sizeof(rsp));
5034
5035 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5036 if (err)
5037 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5038 else
5039 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5040
5041 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5042 sizeof(rsp), &rsp);
5043
5044 if (!err)
5045 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5046
5047 return 0;
5048 }
5049
5050 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5051 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5052 u8 *data)
5053 {
5054 int err = 0;
5055
5056 switch (cmd->code) {
5057 case L2CAP_COMMAND_REJ:
5058 l2cap_command_rej(conn, cmd, data);
5059 break;
5060
5061 case L2CAP_CONN_REQ:
5062 err = l2cap_connect_req(conn, cmd, data);
5063 break;
5064
5065 case L2CAP_CONN_RSP:
5066 case L2CAP_CREATE_CHAN_RSP:
5067 err = l2cap_connect_create_rsp(conn, cmd, data);
5068 break;
5069
5070 case L2CAP_CONF_REQ:
5071 err = l2cap_config_req(conn, cmd, cmd_len, data);
5072 break;
5073
5074 case L2CAP_CONF_RSP:
5075 err = l2cap_config_rsp(conn, cmd, data);
5076 break;
5077
5078 case L2CAP_DISCONN_REQ:
5079 err = l2cap_disconnect_req(conn, cmd, data);
5080 break;
5081
5082 case L2CAP_DISCONN_RSP:
5083 err = l2cap_disconnect_rsp(conn, cmd, data);
5084 break;
5085
5086 case L2CAP_ECHO_REQ:
5087 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5088 break;
5089
5090 case L2CAP_ECHO_RSP:
5091 break;
5092
5093 case L2CAP_INFO_REQ:
5094 err = l2cap_information_req(conn, cmd, data);
5095 break;
5096
5097 case L2CAP_INFO_RSP:
5098 err = l2cap_information_rsp(conn, cmd, data);
5099 break;
5100
5101 case L2CAP_CREATE_CHAN_REQ:
5102 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5103 break;
5104
5105 case L2CAP_MOVE_CHAN_REQ:
5106 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5107 break;
5108
5109 case L2CAP_MOVE_CHAN_RSP:
5110 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5111 break;
5112
5113 case L2CAP_MOVE_CHAN_CFM:
5114 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5115 break;
5116
5117 case L2CAP_MOVE_CHAN_CFM_RSP:
5118 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5119 break;
5120
5121 default:
5122 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5123 err = -EINVAL;
5124 break;
5125 }
5126
5127 return err;
5128 }
5129
5130 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5131 struct l2cap_cmd_hdr *cmd, u8 *data)
5132 {
5133 switch (cmd->code) {
5134 case L2CAP_COMMAND_REJ:
5135 return 0;
5136
5137 case L2CAP_CONN_PARAM_UPDATE_REQ:
5138 return l2cap_conn_param_update_req(conn, cmd, data);
5139
5140 case L2CAP_CONN_PARAM_UPDATE_RSP:
5141 return 0;
5142
5143 default:
5144 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5145 return -EINVAL;
5146 }
5147 }
5148
5149 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5150 struct sk_buff *skb)
5151 {
5152 u8 *data = skb->data;
5153 int len = skb->len;
5154 struct l2cap_cmd_hdr cmd;
5155 int err;
5156
5157 l2cap_raw_recv(conn, skb);
5158
5159 while (len >= L2CAP_CMD_HDR_SIZE) {
5160 u16 cmd_len;
5161 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5162 data += L2CAP_CMD_HDR_SIZE;
5163 len -= L2CAP_CMD_HDR_SIZE;
5164
5165 cmd_len = le16_to_cpu(cmd.len);
5166
5167 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5168 cmd.ident);
5169
5170 if (cmd_len > len || !cmd.ident) {
5171 BT_DBG("corrupted command");
5172 break;
5173 }
5174
5175 if (conn->hcon->type == LE_LINK)
5176 err = l2cap_le_sig_cmd(conn, &cmd, data);
5177 else
5178 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5179
5180 if (err) {
5181 struct l2cap_cmd_rej_unk rej;
5182
5183 BT_ERR("Wrong link type (%d)", err);
5184
5185 /* FIXME: Map err to a valid reason */
5186 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5187 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5188 sizeof(rej), &rej);
5189 }
5190
5191 data += cmd_len;
5192 len -= cmd_len;
5193 }
5194
5195 kfree_skb(skb);
5196 }
5197
5198 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5199 {
5200 u16 our_fcs, rcv_fcs;
5201 int hdr_size;
5202
5203 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5204 hdr_size = L2CAP_EXT_HDR_SIZE;
5205 else
5206 hdr_size = L2CAP_ENH_HDR_SIZE;
5207
5208 if (chan->fcs == L2CAP_FCS_CRC16) {
5209 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5210 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5211 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5212
5213 if (our_fcs != rcv_fcs)
5214 return -EBADMSG;
5215 }
5216 return 0;
5217 }
5218
5219 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5220 {
5221 struct l2cap_ctrl control;
5222
5223 BT_DBG("chan %p", chan);
5224
5225 memset(&control, 0, sizeof(control));
5226 control.sframe = 1;
5227 control.final = 1;
5228 control.reqseq = chan->buffer_seq;
5229 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5230
5231 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5232 control.super = L2CAP_SUPER_RNR;
5233 l2cap_send_sframe(chan, &control);
5234 }
5235
5236 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5237 chan->unacked_frames > 0)
5238 __set_retrans_timer(chan);
5239
5240 /* Send pending iframes */
5241 l2cap_ertm_send(chan);
5242
5243 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5244 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5245 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5246 * send it now.
5247 */
5248 control.super = L2CAP_SUPER_RR;
5249 l2cap_send_sframe(chan, &control);
5250 }
5251 }
5252
5253 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5254 struct sk_buff **last_frag)
5255 {
5256 /* skb->len reflects data in skb as well as all fragments
5257 * skb->data_len reflects only data in fragments
5258 */
5259 if (!skb_has_frag_list(skb))
5260 skb_shinfo(skb)->frag_list = new_frag;
5261
5262 new_frag->next = NULL;
5263
5264 (*last_frag)->next = new_frag;
5265 *last_frag = new_frag;
5266
5267 skb->len += new_frag->len;
5268 skb->data_len += new_frag->len;
5269 skb->truesize += new_frag->truesize;
5270 }
5271
5272 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5273 struct l2cap_ctrl *control)
5274 {
5275 int err = -EINVAL;
5276
5277 switch (control->sar) {
5278 case L2CAP_SAR_UNSEGMENTED:
5279 if (chan->sdu)
5280 break;
5281
5282 err = chan->ops->recv(chan, skb);
5283 break;
5284
5285 case L2CAP_SAR_START:
5286 if (chan->sdu)
5287 break;
5288
5289 chan->sdu_len = get_unaligned_le16(skb->data);
5290 skb_pull(skb, L2CAP_SDULEN_SIZE);
5291
5292 if (chan->sdu_len > chan->imtu) {
5293 err = -EMSGSIZE;
5294 break;
5295 }
5296
5297 if (skb->len >= chan->sdu_len)
5298 break;
5299
5300 chan->sdu = skb;
5301 chan->sdu_last_frag = skb;
5302
5303 skb = NULL;
5304 err = 0;
5305 break;
5306
5307 case L2CAP_SAR_CONTINUE:
5308 if (!chan->sdu)
5309 break;
5310
5311 append_skb_frag(chan->sdu, skb,
5312 &chan->sdu_last_frag);
5313 skb = NULL;
5314
5315 if (chan->sdu->len >= chan->sdu_len)
5316 break;
5317
5318 err = 0;
5319 break;
5320
5321 case L2CAP_SAR_END:
5322 if (!chan->sdu)
5323 break;
5324
5325 append_skb_frag(chan->sdu, skb,
5326 &chan->sdu_last_frag);
5327 skb = NULL;
5328
5329 if (chan->sdu->len != chan->sdu_len)
5330 break;
5331
5332 err = chan->ops->recv(chan, chan->sdu);
5333
5334 if (!err) {
5335 /* Reassembly complete */
5336 chan->sdu = NULL;
5337 chan->sdu_last_frag = NULL;
5338 chan->sdu_len = 0;
5339 }
5340 break;
5341 }
5342
5343 if (err) {
5344 kfree_skb(skb);
5345 kfree_skb(chan->sdu);
5346 chan->sdu = NULL;
5347 chan->sdu_last_frag = NULL;
5348 chan->sdu_len = 0;
5349 }
5350
5351 return err;
5352 }
5353
5354 static int l2cap_resegment(struct l2cap_chan *chan)
5355 {
5356 /* Placeholder */
5357 return 0;
5358 }
5359
5360 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5361 {
5362 u8 event;
5363
5364 if (chan->mode != L2CAP_MODE_ERTM)
5365 return;
5366
5367 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5368 l2cap_tx(chan, NULL, NULL, event);
5369 }
5370
5371 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5372 {
5373 int err = 0;
5374 /* Pass sequential frames to l2cap_reassemble_sdu()
5375 * until a gap is encountered.
5376 */
5377
5378 BT_DBG("chan %p", chan);
5379
5380 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5381 struct sk_buff *skb;
5382 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5383 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5384
5385 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5386
5387 if (!skb)
5388 break;
5389
5390 skb_unlink(skb, &chan->srej_q);
5391 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5392 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5393 if (err)
5394 break;
5395 }
5396
5397 if (skb_queue_empty(&chan->srej_q)) {
5398 chan->rx_state = L2CAP_RX_STATE_RECV;
5399 l2cap_send_ack(chan);
5400 }
5401
5402 return err;
5403 }
5404
5405 static void l2cap_handle_srej(struct l2cap_chan *chan,
5406 struct l2cap_ctrl *control)
5407 {
5408 struct sk_buff *skb;
5409
5410 BT_DBG("chan %p, control %p", chan, control);
5411
5412 if (control->reqseq == chan->next_tx_seq) {
5413 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5414 l2cap_send_disconn_req(chan, ECONNRESET);
5415 return;
5416 }
5417
5418 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5419
5420 if (skb == NULL) {
5421 BT_DBG("Seq %d not available for retransmission",
5422 control->reqseq);
5423 return;
5424 }
5425
5426 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5427 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5428 l2cap_send_disconn_req(chan, ECONNRESET);
5429 return;
5430 }
5431
5432 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5433
5434 if (control->poll) {
5435 l2cap_pass_to_tx(chan, control);
5436
5437 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5438 l2cap_retransmit(chan, control);
5439 l2cap_ertm_send(chan);
5440
5441 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5442 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5443 chan->srej_save_reqseq = control->reqseq;
5444 }
5445 } else {
5446 l2cap_pass_to_tx_fbit(chan, control);
5447
5448 if (control->final) {
5449 if (chan->srej_save_reqseq != control->reqseq ||
5450 !test_and_clear_bit(CONN_SREJ_ACT,
5451 &chan->conn_state))
5452 l2cap_retransmit(chan, control);
5453 } else {
5454 l2cap_retransmit(chan, control);
5455 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5456 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5457 chan->srej_save_reqseq = control->reqseq;
5458 }
5459 }
5460 }
5461 }
5462
5463 static void l2cap_handle_rej(struct l2cap_chan *chan,
5464 struct l2cap_ctrl *control)
5465 {
5466 struct sk_buff *skb;
5467
5468 BT_DBG("chan %p, control %p", chan, control);
5469
5470 if (control->reqseq == chan->next_tx_seq) {
5471 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5472 l2cap_send_disconn_req(chan, ECONNRESET);
5473 return;
5474 }
5475
5476 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5477
5478 if (chan->max_tx && skb &&
5479 bt_cb(skb)->control.retries >= chan->max_tx) {
5480 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5481 l2cap_send_disconn_req(chan, ECONNRESET);
5482 return;
5483 }
5484
5485 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5486
5487 l2cap_pass_to_tx(chan, control);
5488
5489 if (control->final) {
5490 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5491 l2cap_retransmit_all(chan, control);
5492 } else {
5493 l2cap_retransmit_all(chan, control);
5494 l2cap_ertm_send(chan);
5495 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5496 set_bit(CONN_REJ_ACT, &chan->conn_state);
5497 }
5498 }
5499
5500 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5501 {
5502 BT_DBG("chan %p, txseq %d", chan, txseq);
5503
5504 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5505 chan->expected_tx_seq);
5506
5507 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5508 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5509 chan->tx_win) {
5510 /* See notes below regarding "double poll" and
5511 * invalid packets.
5512 */
5513 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5514 BT_DBG("Invalid/Ignore - after SREJ");
5515 return L2CAP_TXSEQ_INVALID_IGNORE;
5516 } else {
5517 BT_DBG("Invalid - in window after SREJ sent");
5518 return L2CAP_TXSEQ_INVALID;
5519 }
5520 }
5521
5522 if (chan->srej_list.head == txseq) {
5523 BT_DBG("Expected SREJ");
5524 return L2CAP_TXSEQ_EXPECTED_SREJ;
5525 }
5526
5527 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5528 BT_DBG("Duplicate SREJ - txseq already stored");
5529 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5530 }
5531
5532 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5533 BT_DBG("Unexpected SREJ - not requested");
5534 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5535 }
5536 }
5537
5538 if (chan->expected_tx_seq == txseq) {
5539 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5540 chan->tx_win) {
5541 BT_DBG("Invalid - txseq outside tx window");
5542 return L2CAP_TXSEQ_INVALID;
5543 } else {
5544 BT_DBG("Expected");
5545 return L2CAP_TXSEQ_EXPECTED;
5546 }
5547 }
5548
5549 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5550 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5551 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5552 return L2CAP_TXSEQ_DUPLICATE;
5553 }
5554
5555 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5556 /* A source of invalid packets is a "double poll" condition,
5557 * where delays cause us to send multiple poll packets. If
5558 * the remote stack receives and processes both polls,
5559 * sequence numbers can wrap around in such a way that a
5560 * resent frame has a sequence number that looks like new data
5561 * with a sequence gap. This would trigger an erroneous SREJ
5562 * request.
5563 *
5564 * Fortunately, this is impossible with a tx window that's
5565 * less than half of the maximum sequence number, which allows
5566 * invalid frames to be safely ignored.
5567 *
5568 * With tx window sizes greater than half of the tx window
5569 * maximum, the frame is invalid and cannot be ignored. This
5570 * causes a disconnect.
5571 */
5572
5573 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5574 BT_DBG("Invalid/Ignore - txseq outside tx window");
5575 return L2CAP_TXSEQ_INVALID_IGNORE;
5576 } else {
5577 BT_DBG("Invalid - txseq outside tx window");
5578 return L2CAP_TXSEQ_INVALID;
5579 }
5580 } else {
5581 BT_DBG("Unexpected - txseq indicates missing frames");
5582 return L2CAP_TXSEQ_UNEXPECTED;
5583 }
5584 }
5585
5586 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5587 struct l2cap_ctrl *control,
5588 struct sk_buff *skb, u8 event)
5589 {
5590 int err = 0;
5591 bool skb_in_use = 0;
5592
5593 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5594 event);
5595
5596 switch (event) {
5597 case L2CAP_EV_RECV_IFRAME:
5598 switch (l2cap_classify_txseq(chan, control->txseq)) {
5599 case L2CAP_TXSEQ_EXPECTED:
5600 l2cap_pass_to_tx(chan, control);
5601
5602 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5603 BT_DBG("Busy, discarding expected seq %d",
5604 control->txseq);
5605 break;
5606 }
5607
5608 chan->expected_tx_seq = __next_seq(chan,
5609 control->txseq);
5610
5611 chan->buffer_seq = chan->expected_tx_seq;
5612 skb_in_use = 1;
5613
5614 err = l2cap_reassemble_sdu(chan, skb, control);
5615 if (err)
5616 break;
5617
5618 if (control->final) {
5619 if (!test_and_clear_bit(CONN_REJ_ACT,
5620 &chan->conn_state)) {
5621 control->final = 0;
5622 l2cap_retransmit_all(chan, control);
5623 l2cap_ertm_send(chan);
5624 }
5625 }
5626
5627 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5628 l2cap_send_ack(chan);
5629 break;
5630 case L2CAP_TXSEQ_UNEXPECTED:
5631 l2cap_pass_to_tx(chan, control);
5632
5633 /* Can't issue SREJ frames in the local busy state.
5634 * Drop this frame, it will be seen as missing
5635 * when local busy is exited.
5636 */
5637 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5638 BT_DBG("Busy, discarding unexpected seq %d",
5639 control->txseq);
5640 break;
5641 }
5642
5643 /* There was a gap in the sequence, so an SREJ
5644 * must be sent for each missing frame. The
5645 * current frame is stored for later use.
5646 */
5647 skb_queue_tail(&chan->srej_q, skb);
5648 skb_in_use = 1;
5649 BT_DBG("Queued %p (queue len %d)", skb,
5650 skb_queue_len(&chan->srej_q));
5651
5652 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5653 l2cap_seq_list_clear(&chan->srej_list);
5654 l2cap_send_srej(chan, control->txseq);
5655
5656 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5657 break;
5658 case L2CAP_TXSEQ_DUPLICATE:
5659 l2cap_pass_to_tx(chan, control);
5660 break;
5661 case L2CAP_TXSEQ_INVALID_IGNORE:
5662 break;
5663 case L2CAP_TXSEQ_INVALID:
5664 default:
5665 l2cap_send_disconn_req(chan, ECONNRESET);
5666 break;
5667 }
5668 break;
5669 case L2CAP_EV_RECV_RR:
5670 l2cap_pass_to_tx(chan, control);
5671 if (control->final) {
5672 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5673
5674 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5675 !__chan_is_moving(chan)) {
5676 control->final = 0;
5677 l2cap_retransmit_all(chan, control);
5678 }
5679
5680 l2cap_ertm_send(chan);
5681 } else if (control->poll) {
5682 l2cap_send_i_or_rr_or_rnr(chan);
5683 } else {
5684 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5685 &chan->conn_state) &&
5686 chan->unacked_frames)
5687 __set_retrans_timer(chan);
5688
5689 l2cap_ertm_send(chan);
5690 }
5691 break;
5692 case L2CAP_EV_RECV_RNR:
5693 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5694 l2cap_pass_to_tx(chan, control);
5695 if (control && control->poll) {
5696 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5697 l2cap_send_rr_or_rnr(chan, 0);
5698 }
5699 __clear_retrans_timer(chan);
5700 l2cap_seq_list_clear(&chan->retrans_list);
5701 break;
5702 case L2CAP_EV_RECV_REJ:
5703 l2cap_handle_rej(chan, control);
5704 break;
5705 case L2CAP_EV_RECV_SREJ:
5706 l2cap_handle_srej(chan, control);
5707 break;
5708 default:
5709 break;
5710 }
5711
5712 if (skb && !skb_in_use) {
5713 BT_DBG("Freeing %p", skb);
5714 kfree_skb(skb);
5715 }
5716
5717 return err;
5718 }
5719
5720 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5721 struct l2cap_ctrl *control,
5722 struct sk_buff *skb, u8 event)
5723 {
5724 int err = 0;
5725 u16 txseq = control->txseq;
5726 bool skb_in_use = 0;
5727
5728 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5729 event);
5730
5731 switch (event) {
5732 case L2CAP_EV_RECV_IFRAME:
5733 switch (l2cap_classify_txseq(chan, txseq)) {
5734 case L2CAP_TXSEQ_EXPECTED:
5735 /* Keep frame for reassembly later */
5736 l2cap_pass_to_tx(chan, control);
5737 skb_queue_tail(&chan->srej_q, skb);
5738 skb_in_use = 1;
5739 BT_DBG("Queued %p (queue len %d)", skb,
5740 skb_queue_len(&chan->srej_q));
5741
5742 chan->expected_tx_seq = __next_seq(chan, txseq);
5743 break;
5744 case L2CAP_TXSEQ_EXPECTED_SREJ:
5745 l2cap_seq_list_pop(&chan->srej_list);
5746
5747 l2cap_pass_to_tx(chan, control);
5748 skb_queue_tail(&chan->srej_q, skb);
5749 skb_in_use = 1;
5750 BT_DBG("Queued %p (queue len %d)", skb,
5751 skb_queue_len(&chan->srej_q));
5752
5753 err = l2cap_rx_queued_iframes(chan);
5754 if (err)
5755 break;
5756
5757 break;
5758 case L2CAP_TXSEQ_UNEXPECTED:
5759 /* Got a frame that can't be reassembled yet.
5760 * Save it for later, and send SREJs to cover
5761 * the missing frames.
5762 */
5763 skb_queue_tail(&chan->srej_q, skb);
5764 skb_in_use = 1;
5765 BT_DBG("Queued %p (queue len %d)", skb,
5766 skb_queue_len(&chan->srej_q));
5767
5768 l2cap_pass_to_tx(chan, control);
5769 l2cap_send_srej(chan, control->txseq);
5770 break;
5771 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5772 /* This frame was requested with an SREJ, but
5773 * some expected retransmitted frames are
5774 * missing. Request retransmission of missing
5775 * SREJ'd frames.
5776 */
5777 skb_queue_tail(&chan->srej_q, skb);
5778 skb_in_use = 1;
5779 BT_DBG("Queued %p (queue len %d)", skb,
5780 skb_queue_len(&chan->srej_q));
5781
5782 l2cap_pass_to_tx(chan, control);
5783 l2cap_send_srej_list(chan, control->txseq);
5784 break;
5785 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5786 /* We've already queued this frame. Drop this copy. */
5787 l2cap_pass_to_tx(chan, control);
5788 break;
5789 case L2CAP_TXSEQ_DUPLICATE:
5790 /* Expecting a later sequence number, so this frame
5791 * was already received. Ignore it completely.
5792 */
5793 break;
5794 case L2CAP_TXSEQ_INVALID_IGNORE:
5795 break;
5796 case L2CAP_TXSEQ_INVALID:
5797 default:
5798 l2cap_send_disconn_req(chan, ECONNRESET);
5799 break;
5800 }
5801 break;
5802 case L2CAP_EV_RECV_RR:
5803 l2cap_pass_to_tx(chan, control);
5804 if (control->final) {
5805 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5806
5807 if (!test_and_clear_bit(CONN_REJ_ACT,
5808 &chan->conn_state)) {
5809 control->final = 0;
5810 l2cap_retransmit_all(chan, control);
5811 }
5812
5813 l2cap_ertm_send(chan);
5814 } else if (control->poll) {
5815 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5816 &chan->conn_state) &&
5817 chan->unacked_frames) {
5818 __set_retrans_timer(chan);
5819 }
5820
5821 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5822 l2cap_send_srej_tail(chan);
5823 } else {
5824 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5825 &chan->conn_state) &&
5826 chan->unacked_frames)
5827 __set_retrans_timer(chan);
5828
5829 l2cap_send_ack(chan);
5830 }
5831 break;
5832 case L2CAP_EV_RECV_RNR:
5833 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5834 l2cap_pass_to_tx(chan, control);
5835 if (control->poll) {
5836 l2cap_send_srej_tail(chan);
5837 } else {
5838 struct l2cap_ctrl rr_control;
5839 memset(&rr_control, 0, sizeof(rr_control));
5840 rr_control.sframe = 1;
5841 rr_control.super = L2CAP_SUPER_RR;
5842 rr_control.reqseq = chan->buffer_seq;
5843 l2cap_send_sframe(chan, &rr_control);
5844 }
5845
5846 break;
5847 case L2CAP_EV_RECV_REJ:
5848 l2cap_handle_rej(chan, control);
5849 break;
5850 case L2CAP_EV_RECV_SREJ:
5851 l2cap_handle_srej(chan, control);
5852 break;
5853 }
5854
5855 if (skb && !skb_in_use) {
5856 BT_DBG("Freeing %p", skb);
5857 kfree_skb(skb);
5858 }
5859
5860 return err;
5861 }
5862
5863 static int l2cap_finish_move(struct l2cap_chan *chan)
5864 {
5865 BT_DBG("chan %p", chan);
5866
5867 chan->rx_state = L2CAP_RX_STATE_RECV;
5868
5869 if (chan->hs_hcon)
5870 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5871 else
5872 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5873
5874 return l2cap_resegment(chan);
5875 }
5876
5877 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5878 struct l2cap_ctrl *control,
5879 struct sk_buff *skb, u8 event)
5880 {
5881 int err;
5882
5883 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5884 event);
5885
5886 if (!control->poll)
5887 return -EPROTO;
5888
5889 l2cap_process_reqseq(chan, control->reqseq);
5890
5891 if (!skb_queue_empty(&chan->tx_q))
5892 chan->tx_send_head = skb_peek(&chan->tx_q);
5893 else
5894 chan->tx_send_head = NULL;
5895
5896 /* Rewind next_tx_seq to the point expected
5897 * by the receiver.
5898 */
5899 chan->next_tx_seq = control->reqseq;
5900 chan->unacked_frames = 0;
5901
5902 err = l2cap_finish_move(chan);
5903 if (err)
5904 return err;
5905
5906 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5907 l2cap_send_i_or_rr_or_rnr(chan);
5908
5909 if (event == L2CAP_EV_RECV_IFRAME)
5910 return -EPROTO;
5911
5912 return l2cap_rx_state_recv(chan, control, NULL, event);
5913 }
5914
5915 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5916 struct l2cap_ctrl *control,
5917 struct sk_buff *skb, u8 event)
5918 {
5919 int err;
5920
5921 if (!control->final)
5922 return -EPROTO;
5923
5924 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5925
5926 chan->rx_state = L2CAP_RX_STATE_RECV;
5927 l2cap_process_reqseq(chan, control->reqseq);
5928
5929 if (!skb_queue_empty(&chan->tx_q))
5930 chan->tx_send_head = skb_peek(&chan->tx_q);
5931 else
5932 chan->tx_send_head = NULL;
5933
5934 /* Rewind next_tx_seq to the point expected
5935 * by the receiver.
5936 */
5937 chan->next_tx_seq = control->reqseq;
5938 chan->unacked_frames = 0;
5939
5940 if (chan->hs_hcon)
5941 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5942 else
5943 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5944
5945 err = l2cap_resegment(chan);
5946
5947 if (!err)
5948 err = l2cap_rx_state_recv(chan, control, skb, event);
5949
5950 return err;
5951 }
5952
5953 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5954 {
5955 /* Make sure reqseq is for a packet that has been sent but not acked */
5956 u16 unacked;
5957
5958 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5959 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5960 }
5961
5962 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5963 struct sk_buff *skb, u8 event)
5964 {
5965 int err = 0;
5966
5967 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5968 control, skb, event, chan->rx_state);
5969
5970 if (__valid_reqseq(chan, control->reqseq)) {
5971 switch (chan->rx_state) {
5972 case L2CAP_RX_STATE_RECV:
5973 err = l2cap_rx_state_recv(chan, control, skb, event);
5974 break;
5975 case L2CAP_RX_STATE_SREJ_SENT:
5976 err = l2cap_rx_state_srej_sent(chan, control, skb,
5977 event);
5978 break;
5979 case L2CAP_RX_STATE_WAIT_P:
5980 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5981 break;
5982 case L2CAP_RX_STATE_WAIT_F:
5983 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5984 break;
5985 default:
5986 /* shut it down */
5987 break;
5988 }
5989 } else {
5990 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5991 control->reqseq, chan->next_tx_seq,
5992 chan->expected_ack_seq);
5993 l2cap_send_disconn_req(chan, ECONNRESET);
5994 }
5995
5996 return err;
5997 }
5998
5999 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6000 struct sk_buff *skb)
6001 {
6002 int err = 0;
6003
6004 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6005 chan->rx_state);
6006
6007 if (l2cap_classify_txseq(chan, control->txseq) ==
6008 L2CAP_TXSEQ_EXPECTED) {
6009 l2cap_pass_to_tx(chan, control);
6010
6011 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6012 __next_seq(chan, chan->buffer_seq));
6013
6014 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6015
6016 l2cap_reassemble_sdu(chan, skb, control);
6017 } else {
6018 if (chan->sdu) {
6019 kfree_skb(chan->sdu);
6020 chan->sdu = NULL;
6021 }
6022 chan->sdu_last_frag = NULL;
6023 chan->sdu_len = 0;
6024
6025 if (skb) {
6026 BT_DBG("Freeing %p", skb);
6027 kfree_skb(skb);
6028 }
6029 }
6030
6031 chan->last_acked_seq = control->txseq;
6032 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6033
6034 return err;
6035 }
6036
6037 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6038 {
6039 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6040 u16 len;
6041 u8 event;
6042
6043 __unpack_control(chan, skb);
6044
6045 len = skb->len;
6046
6047 /*
6048 * We can just drop the corrupted I-frame here.
6049 * Receiver will miss it and start proper recovery
6050 * procedures and ask for retransmission.
6051 */
6052 if (l2cap_check_fcs(chan, skb))
6053 goto drop;
6054
6055 if (!control->sframe && control->sar == L2CAP_SAR_START)
6056 len -= L2CAP_SDULEN_SIZE;
6057
6058 if (chan->fcs == L2CAP_FCS_CRC16)
6059 len -= L2CAP_FCS_SIZE;
6060
6061 if (len > chan->mps) {
6062 l2cap_send_disconn_req(chan, ECONNRESET);
6063 goto drop;
6064 }
6065
6066 if (!control->sframe) {
6067 int err;
6068
6069 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6070 control->sar, control->reqseq, control->final,
6071 control->txseq);
6072
6073 /* Validate F-bit - F=0 always valid, F=1 only
6074 * valid in TX WAIT_F
6075 */
6076 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6077 goto drop;
6078
6079 if (chan->mode != L2CAP_MODE_STREAMING) {
6080 event = L2CAP_EV_RECV_IFRAME;
6081 err = l2cap_rx(chan, control, skb, event);
6082 } else {
6083 err = l2cap_stream_rx(chan, control, skb);
6084 }
6085
6086 if (err)
6087 l2cap_send_disconn_req(chan, ECONNRESET);
6088 } else {
6089 const u8 rx_func_to_event[4] = {
6090 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6091 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6092 };
6093
6094 /* Only I-frames are expected in streaming mode */
6095 if (chan->mode == L2CAP_MODE_STREAMING)
6096 goto drop;
6097
6098 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6099 control->reqseq, control->final, control->poll,
6100 control->super);
6101
6102 if (len != 0) {
6103 BT_ERR("Trailing bytes: %d in sframe", len);
6104 l2cap_send_disconn_req(chan, ECONNRESET);
6105 goto drop;
6106 }
6107
6108 /* Validate F and P bits */
6109 if (control->final && (control->poll ||
6110 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6111 goto drop;
6112
6113 event = rx_func_to_event[control->super];
6114 if (l2cap_rx(chan, control, skb, event))
6115 l2cap_send_disconn_req(chan, ECONNRESET);
6116 }
6117
6118 return 0;
6119
6120 drop:
6121 kfree_skb(skb);
6122 return 0;
6123 }
6124
6125 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6126 struct sk_buff *skb)
6127 {
6128 struct l2cap_chan *chan;
6129
6130 chan = l2cap_get_chan_by_scid(conn, cid);
6131 if (!chan) {
6132 if (cid == L2CAP_CID_A2MP) {
6133 chan = a2mp_channel_create(conn, skb);
6134 if (!chan) {
6135 kfree_skb(skb);
6136 return;
6137 }
6138
6139 l2cap_chan_lock(chan);
6140 } else {
6141 BT_DBG("unknown cid 0x%4.4x", cid);
6142 /* Drop packet and return */
6143 kfree_skb(skb);
6144 return;
6145 }
6146 }
6147
6148 BT_DBG("chan %p, len %d", chan, skb->len);
6149
6150 if (chan->state != BT_CONNECTED)
6151 goto drop;
6152
6153 switch (chan->mode) {
6154 case L2CAP_MODE_BASIC:
6155 /* If socket recv buffers overflows we drop data here
6156 * which is *bad* because L2CAP has to be reliable.
6157 * But we don't have any other choice. L2CAP doesn't
6158 * provide flow control mechanism. */
6159
6160 if (chan->imtu < skb->len)
6161 goto drop;
6162
6163 if (!chan->ops->recv(chan, skb))
6164 goto done;
6165 break;
6166
6167 case L2CAP_MODE_ERTM:
6168 case L2CAP_MODE_STREAMING:
6169 l2cap_data_rcv(chan, skb);
6170 goto done;
6171
6172 default:
6173 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6174 break;
6175 }
6176
6177 drop:
6178 kfree_skb(skb);
6179
6180 done:
6181 l2cap_chan_unlock(chan);
6182 }
6183
6184 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6185 struct sk_buff *skb)
6186 {
6187 struct l2cap_chan *chan;
6188
6189 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6190 if (!chan)
6191 goto drop;
6192
6193 BT_DBG("chan %p, len %d", chan, skb->len);
6194
6195 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6196 goto drop;
6197
6198 if (chan->imtu < skb->len)
6199 goto drop;
6200
6201 if (!chan->ops->recv(chan, skb))
6202 return;
6203
6204 drop:
6205 kfree_skb(skb);
6206 }
6207
6208 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6209 struct sk_buff *skb)
6210 {
6211 struct l2cap_chan *chan;
6212
6213 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6214 if (!chan)
6215 goto drop;
6216
6217 BT_DBG("chan %p, len %d", chan, skb->len);
6218
6219 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6220 goto drop;
6221
6222 if (chan->imtu < skb->len)
6223 goto drop;
6224
6225 if (!chan->ops->recv(chan, skb))
6226 return;
6227
6228 drop:
6229 kfree_skb(skb);
6230 }
6231
6232 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6233 {
6234 struct l2cap_hdr *lh = (void *) skb->data;
6235 u16 cid, len;
6236 __le16 psm;
6237
6238 skb_pull(skb, L2CAP_HDR_SIZE);
6239 cid = __le16_to_cpu(lh->cid);
6240 len = __le16_to_cpu(lh->len);
6241
6242 if (len != skb->len) {
6243 kfree_skb(skb);
6244 return;
6245 }
6246
6247 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6248
6249 switch (cid) {
6250 case L2CAP_CID_LE_SIGNALING:
6251 case L2CAP_CID_SIGNALING:
6252 l2cap_sig_channel(conn, skb);
6253 break;
6254
6255 case L2CAP_CID_CONN_LESS:
6256 psm = get_unaligned((__le16 *) skb->data);
6257 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6258 l2cap_conless_channel(conn, psm, skb);
6259 break;
6260
6261 case L2CAP_CID_LE_DATA:
6262 l2cap_att_channel(conn, cid, skb);
6263 break;
6264
6265 case L2CAP_CID_SMP:
6266 if (smp_sig_channel(conn, skb))
6267 l2cap_conn_del(conn->hcon, EACCES);
6268 break;
6269
6270 default:
6271 l2cap_data_channel(conn, cid, skb);
6272 break;
6273 }
6274 }
6275
6276 /* ---- L2CAP interface with lower layer (HCI) ---- */
6277
6278 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6279 {
6280 int exact = 0, lm1 = 0, lm2 = 0;
6281 struct l2cap_chan *c;
6282
6283 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6284
6285 /* Find listening sockets and check their link_mode */
6286 read_lock(&chan_list_lock);
6287 list_for_each_entry(c, &chan_list, global_l) {
6288 struct sock *sk = c->sk;
6289
6290 if (c->state != BT_LISTEN)
6291 continue;
6292
6293 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6294 lm1 |= HCI_LM_ACCEPT;
6295 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6296 lm1 |= HCI_LM_MASTER;
6297 exact++;
6298 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6299 lm2 |= HCI_LM_ACCEPT;
6300 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6301 lm2 |= HCI_LM_MASTER;
6302 }
6303 }
6304 read_unlock(&chan_list_lock);
6305
6306 return exact ? lm1 : lm2;
6307 }
6308
6309 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6310 {
6311 struct l2cap_conn *conn;
6312
6313 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6314
6315 if (!status) {
6316 conn = l2cap_conn_add(hcon, status);
6317 if (conn)
6318 l2cap_conn_ready(conn);
6319 } else {
6320 l2cap_conn_del(hcon, bt_to_errno(status));
6321 }
6322 }
6323
6324 int l2cap_disconn_ind(struct hci_conn *hcon)
6325 {
6326 struct l2cap_conn *conn = hcon->l2cap_data;
6327
6328 BT_DBG("hcon %p", hcon);
6329
6330 if (!conn)
6331 return HCI_ERROR_REMOTE_USER_TERM;
6332 return conn->disc_reason;
6333 }
6334
6335 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6336 {
6337 BT_DBG("hcon %p reason %d", hcon, reason);
6338
6339 l2cap_conn_del(hcon, bt_to_errno(reason));
6340 }
6341
6342 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6343 {
6344 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6345 return;
6346
6347 if (encrypt == 0x00) {
6348 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6349 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6350 } else if (chan->sec_level == BT_SECURITY_HIGH)
6351 l2cap_chan_close(chan, ECONNREFUSED);
6352 } else {
6353 if (chan->sec_level == BT_SECURITY_MEDIUM)
6354 __clear_chan_timer(chan);
6355 }
6356 }
6357
6358 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6359 {
6360 struct l2cap_conn *conn = hcon->l2cap_data;
6361 struct l2cap_chan *chan;
6362
6363 if (!conn)
6364 return 0;
6365
6366 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6367
6368 if (hcon->type == LE_LINK) {
6369 if (!status && encrypt)
6370 smp_distribute_keys(conn, 0);
6371 cancel_delayed_work(&conn->security_timer);
6372 }
6373
6374 mutex_lock(&conn->chan_lock);
6375
6376 list_for_each_entry(chan, &conn->chan_l, list) {
6377 l2cap_chan_lock(chan);
6378
6379 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6380 state_to_string(chan->state));
6381
6382 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6383 l2cap_chan_unlock(chan);
6384 continue;
6385 }
6386
6387 if (chan->scid == L2CAP_CID_LE_DATA) {
6388 if (!status && encrypt) {
6389 chan->sec_level = hcon->sec_level;
6390 l2cap_chan_ready(chan);
6391 }
6392
6393 l2cap_chan_unlock(chan);
6394 continue;
6395 }
6396
6397 if (!__l2cap_no_conn_pending(chan)) {
6398 l2cap_chan_unlock(chan);
6399 continue;
6400 }
6401
6402 if (!status && (chan->state == BT_CONNECTED ||
6403 chan->state == BT_CONFIG)) {
6404 struct sock *sk = chan->sk;
6405
6406 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6407 sk->sk_state_change(sk);
6408
6409 l2cap_check_encryption(chan, encrypt);
6410 l2cap_chan_unlock(chan);
6411 continue;
6412 }
6413
6414 if (chan->state == BT_CONNECT) {
6415 if (!status) {
6416 l2cap_start_connection(chan);
6417 } else {
6418 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6419 }
6420 } else if (chan->state == BT_CONNECT2) {
6421 struct sock *sk = chan->sk;
6422 struct l2cap_conn_rsp rsp;
6423 __u16 res, stat;
6424
6425 lock_sock(sk);
6426
6427 if (!status) {
6428 if (test_bit(BT_SK_DEFER_SETUP,
6429 &bt_sk(sk)->flags)) {
6430 res = L2CAP_CR_PEND;
6431 stat = L2CAP_CS_AUTHOR_PEND;
6432 chan->ops->defer(chan);
6433 } else {
6434 __l2cap_state_change(chan, BT_CONFIG);
6435 res = L2CAP_CR_SUCCESS;
6436 stat = L2CAP_CS_NO_INFO;
6437 }
6438 } else {
6439 __l2cap_state_change(chan, BT_DISCONN);
6440 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6441 res = L2CAP_CR_SEC_BLOCK;
6442 stat = L2CAP_CS_NO_INFO;
6443 }
6444
6445 release_sock(sk);
6446
6447 rsp.scid = cpu_to_le16(chan->dcid);
6448 rsp.dcid = cpu_to_le16(chan->scid);
6449 rsp.result = cpu_to_le16(res);
6450 rsp.status = cpu_to_le16(stat);
6451 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6452 sizeof(rsp), &rsp);
6453
6454 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6455 res == L2CAP_CR_SUCCESS) {
6456 char buf[128];
6457 set_bit(CONF_REQ_SENT, &chan->conf_state);
6458 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6459 L2CAP_CONF_REQ,
6460 l2cap_build_conf_req(chan, buf),
6461 buf);
6462 chan->num_conf_req++;
6463 }
6464 }
6465
6466 l2cap_chan_unlock(chan);
6467 }
6468
6469 mutex_unlock(&conn->chan_lock);
6470
6471 return 0;
6472 }
6473
6474 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6475 {
6476 struct l2cap_conn *conn = hcon->l2cap_data;
6477 struct l2cap_hdr *hdr;
6478 int len;
6479
6480 /* For AMP controller do not create l2cap conn */
6481 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6482 goto drop;
6483
6484 if (!conn)
6485 conn = l2cap_conn_add(hcon, 0);
6486
6487 if (!conn)
6488 goto drop;
6489
6490 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6491
6492 switch (flags) {
6493 case ACL_START:
6494 case ACL_START_NO_FLUSH:
6495 case ACL_COMPLETE:
6496 if (conn->rx_len) {
6497 BT_ERR("Unexpected start frame (len %d)", skb->len);
6498 kfree_skb(conn->rx_skb);
6499 conn->rx_skb = NULL;
6500 conn->rx_len = 0;
6501 l2cap_conn_unreliable(conn, ECOMM);
6502 }
6503
6504 /* Start fragment always begin with Basic L2CAP header */
6505 if (skb->len < L2CAP_HDR_SIZE) {
6506 BT_ERR("Frame is too short (len %d)", skb->len);
6507 l2cap_conn_unreliable(conn, ECOMM);
6508 goto drop;
6509 }
6510
6511 hdr = (struct l2cap_hdr *) skb->data;
6512 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6513
6514 if (len == skb->len) {
6515 /* Complete frame received */
6516 l2cap_recv_frame(conn, skb);
6517 return 0;
6518 }
6519
6520 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6521
6522 if (skb->len > len) {
6523 BT_ERR("Frame is too long (len %d, expected len %d)",
6524 skb->len, len);
6525 l2cap_conn_unreliable(conn, ECOMM);
6526 goto drop;
6527 }
6528
6529 /* Allocate skb for the complete frame (with header) */
6530 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6531 if (!conn->rx_skb)
6532 goto drop;
6533
6534 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6535 skb->len);
6536 conn->rx_len = len - skb->len;
6537 break;
6538
6539 case ACL_CONT:
6540 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6541
6542 if (!conn->rx_len) {
6543 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6544 l2cap_conn_unreliable(conn, ECOMM);
6545 goto drop;
6546 }
6547
6548 if (skb->len > conn->rx_len) {
6549 BT_ERR("Fragment is too long (len %d, expected %d)",
6550 skb->len, conn->rx_len);
6551 kfree_skb(conn->rx_skb);
6552 conn->rx_skb = NULL;
6553 conn->rx_len = 0;
6554 l2cap_conn_unreliable(conn, ECOMM);
6555 goto drop;
6556 }
6557
6558 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6559 skb->len);
6560 conn->rx_len -= skb->len;
6561
6562 if (!conn->rx_len) {
6563 /* Complete frame received */
6564 l2cap_recv_frame(conn, conn->rx_skb);
6565 conn->rx_skb = NULL;
6566 }
6567 break;
6568 }
6569
6570 drop:
6571 kfree_skb(skb);
6572 return 0;
6573 }
6574
6575 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6576 {
6577 struct l2cap_chan *c;
6578
6579 read_lock(&chan_list_lock);
6580
6581 list_for_each_entry(c, &chan_list, global_l) {
6582 struct sock *sk = c->sk;
6583
6584 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6585 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6586 c->state, __le16_to_cpu(c->psm),
6587 c->scid, c->dcid, c->imtu, c->omtu,
6588 c->sec_level, c->mode);
6589 }
6590
6591 read_unlock(&chan_list_lock);
6592
6593 return 0;
6594 }
6595
6596 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6597 {
6598 return single_open(file, l2cap_debugfs_show, inode->i_private);
6599 }
6600
6601 static const struct file_operations l2cap_debugfs_fops = {
6602 .open = l2cap_debugfs_open,
6603 .read = seq_read,
6604 .llseek = seq_lseek,
6605 .release = single_release,
6606 };
6607
6608 static struct dentry *l2cap_debugfs;
6609
6610 int __init l2cap_init(void)
6611 {
6612 int err;
6613
6614 err = l2cap_init_sockets();
6615 if (err < 0)
6616 return err;
6617
6618 if (bt_debugfs) {
6619 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6620 NULL, &l2cap_debugfs_fops);
6621 if (!l2cap_debugfs)
6622 BT_ERR("Failed to create L2CAP debug file");
6623 }
6624
6625 return 0;
6626 }
6627
6628 void l2cap_exit(void)
6629 {
6630 debugfs_remove(l2cap_debugfs);
6631 l2cap_cleanup_sockets();
6632 }
6633
6634 module_param(disable_ertm, bool, 0644);
6635 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.507075 seconds and 6 git commands to generate.