Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
65
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked channel. */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 u16 cid)
118 {
119 struct l2cap_chan *c;
120
121 mutex_lock(&conn->chan_lock);
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c)
124 l2cap_chan_lock(c);
125 mutex_unlock(&conn->chan_lock);
126
127 return c;
128 }
129
130 /* Find channel with given DCID.
131 * Returns locked channel.
132 */
133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 u16 cid)
135 {
136 struct l2cap_chan *c;
137
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_dcid(conn, cid);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
143
144 return c;
145 }
146
147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 u8 ident)
149 {
150 struct l2cap_chan *c;
151
152 list_for_each_entry(c, &conn->chan_l, list) {
153 if (c->ident == ident)
154 return c;
155 }
156 return NULL;
157 }
158
159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 u8 ident)
161 {
162 struct l2cap_chan *c;
163
164 mutex_lock(&conn->chan_lock);
165 c = __l2cap_get_chan_by_ident(conn, ident);
166 if (c)
167 l2cap_chan_lock(c);
168 mutex_unlock(&conn->chan_lock);
169
170 return c;
171 }
172
173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
174 {
175 struct l2cap_chan *c;
176
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (c->sport == psm && !bacmp(&c->src, src))
179 return c;
180 }
181 return NULL;
182 }
183
184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
185 {
186 int err;
187
188 write_lock(&chan_list_lock);
189
190 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
191 err = -EADDRINUSE;
192 goto done;
193 }
194
195 if (psm) {
196 chan->psm = psm;
197 chan->sport = psm;
198 err = 0;
199 } else {
200 u16 p;
201
202 err = -EINVAL;
203 for (p = 0x1001; p < 0x1100; p += 2)
204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
205 chan->psm = cpu_to_le16(p);
206 chan->sport = cpu_to_le16(p);
207 err = 0;
208 break;
209 }
210 }
211
212 done:
213 write_unlock(&chan_list_lock);
214 return err;
215 }
216 EXPORT_SYMBOL_GPL(l2cap_add_psm);
217
218 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
219 {
220 write_lock(&chan_list_lock);
221
222 /* Override the defaults (which are for conn-oriented) */
223 chan->omtu = L2CAP_DEFAULT_MTU;
224 chan->chan_type = L2CAP_CHAN_FIXED;
225
226 chan->scid = scid;
227
228 write_unlock(&chan_list_lock);
229
230 return 0;
231 }
232
233 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
234 {
235 u16 cid, dyn_end;
236
237 if (conn->hcon->type == LE_LINK)
238 dyn_end = L2CAP_CID_LE_DYN_END;
239 else
240 dyn_end = L2CAP_CID_DYN_END;
241
242 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
243 if (!__l2cap_get_chan_by_scid(conn, cid))
244 return cid;
245 }
246
247 return 0;
248 }
249
250 static void l2cap_state_change(struct l2cap_chan *chan, int state)
251 {
252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
253 state_to_string(state));
254
255 chan->state = state;
256 chan->ops->state_change(chan, state, 0);
257 }
258
259 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
260 int state, int err)
261 {
262 chan->state = state;
263 chan->ops->state_change(chan, chan->state, err);
264 }
265
266 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
267 {
268 chan->ops->state_change(chan, chan->state, err);
269 }
270
271 static void __set_retrans_timer(struct l2cap_chan *chan)
272 {
273 if (!delayed_work_pending(&chan->monitor_timer) &&
274 chan->retrans_timeout) {
275 l2cap_set_timer(chan, &chan->retrans_timer,
276 msecs_to_jiffies(chan->retrans_timeout));
277 }
278 }
279
280 static void __set_monitor_timer(struct l2cap_chan *chan)
281 {
282 __clear_retrans_timer(chan);
283 if (chan->monitor_timeout) {
284 l2cap_set_timer(chan, &chan->monitor_timer,
285 msecs_to_jiffies(chan->monitor_timeout));
286 }
287 }
288
289 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
290 u16 seq)
291 {
292 struct sk_buff *skb;
293
294 skb_queue_walk(head, skb) {
295 if (bt_cb(skb)->l2cap.txseq == seq)
296 return skb;
297 }
298
299 return NULL;
300 }
301
302 /* ---- L2CAP sequence number lists ---- */
303
304 /* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
310 * allocs or frees.
311 */
312
313 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
314 {
315 size_t alloc_size, i;
316
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
320 */
321 alloc_size = roundup_pow_of_two(size);
322
323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
324 if (!seq_list->list)
325 return -ENOMEM;
326
327 seq_list->mask = alloc_size - 1;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 for (i = 0; i < alloc_size; i++)
331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
332
333 return 0;
334 }
335
336 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
337 {
338 kfree(seq_list->list);
339 }
340
341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
342 u16 seq)
343 {
344 /* Constant-time check for list membership */
345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
349 {
350 u16 seq = seq_list->head;
351 u16 mask = seq_list->mask;
352
353 seq_list->head = seq_list->list[seq & mask];
354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
355
356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 }
360
361 return seq;
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 /* Set default lock nesting level */
437 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
438
439 write_lock(&chan_list_lock);
440 list_add(&chan->global_l, &chan_list);
441 write_unlock(&chan_list_lock);
442
443 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
444
445 chan->state = BT_OPEN;
446
447 kref_init(&chan->kref);
448
449 /* This flag is cleared in l2cap_chan_ready() */
450 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
451
452 BT_DBG("chan %p", chan);
453
454 return chan;
455 }
456 EXPORT_SYMBOL_GPL(l2cap_chan_create);
457
458 static void l2cap_chan_destroy(struct kref *kref)
459 {
460 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
461
462 BT_DBG("chan %p", chan);
463
464 write_lock(&chan_list_lock);
465 list_del(&chan->global_l);
466 write_unlock(&chan_list_lock);
467
468 kfree(chan);
469 }
470
471 void l2cap_chan_hold(struct l2cap_chan *c)
472 {
473 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474
475 kref_get(&c->kref);
476 }
477
478 void l2cap_chan_put(struct l2cap_chan *c)
479 {
480 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
481
482 kref_put(&c->kref, l2cap_chan_destroy);
483 }
484 EXPORT_SYMBOL_GPL(l2cap_chan_put);
485
486 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
487 {
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
492 chan->remote_max_tx = chan->max_tx;
493 chan->remote_tx_win = chan->tx_win;
494 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
495 chan->sec_level = BT_SECURITY_LOW;
496 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
497 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
498 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
499 chan->conf_state = 0;
500
501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
502 }
503 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
504
505 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
506 {
507 chan->sdu = NULL;
508 chan->sdu_last_frag = NULL;
509 chan->sdu_len = 0;
510 chan->tx_credits = 0;
511 chan->rx_credits = le_max_credits;
512 chan->mps = min_t(u16, chan->imtu, le_default_mps);
513
514 skb_queue_head_init(&chan->tx_q);
515 }
516
517 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
518 {
519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
520 __le16_to_cpu(chan->psm), chan->dcid);
521
522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
523
524 chan->conn = conn;
525
526 switch (chan->chan_type) {
527 case L2CAP_CHAN_CONN_ORIENTED:
528 /* Alloc CID for connection-oriented socket */
529 chan->scid = l2cap_alloc_cid(conn);
530 if (conn->hcon->type == ACL_LINK)
531 chan->omtu = L2CAP_DEFAULT_MTU;
532 break;
533
534 case L2CAP_CHAN_CONN_LESS:
535 /* Connectionless socket */
536 chan->scid = L2CAP_CID_CONN_LESS;
537 chan->dcid = L2CAP_CID_CONN_LESS;
538 chan->omtu = L2CAP_DEFAULT_MTU;
539 break;
540
541 case L2CAP_CHAN_FIXED:
542 /* Caller will set CID and CID specific MTU values */
543 break;
544
545 default:
546 /* Raw socket can send/recv signalling messages only */
547 chan->scid = L2CAP_CID_SIGNALING;
548 chan->dcid = L2CAP_CID_SIGNALING;
549 chan->omtu = L2CAP_DEFAULT_MTU;
550 }
551
552 chan->local_id = L2CAP_BESTEFFORT_ID;
553 chan->local_stype = L2CAP_SERV_BESTEFFORT;
554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
558
559 l2cap_chan_hold(chan);
560
561 /* Only keep a reference for fixed channels if they requested it */
562 if (chan->chan_type != L2CAP_CHAN_FIXED ||
563 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
564 hci_conn_hold(conn->hcon);
565
566 list_add(&chan->list, &conn->chan_l);
567 }
568
569 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
570 {
571 mutex_lock(&conn->chan_lock);
572 __l2cap_chan_add(conn, chan);
573 mutex_unlock(&conn->chan_lock);
574 }
575
576 void l2cap_chan_del(struct l2cap_chan *chan, int err)
577 {
578 struct l2cap_conn *conn = chan->conn;
579
580 __clear_chan_timer(chan);
581
582 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
583 state_to_string(chan->state));
584
585 chan->ops->teardown(chan, err);
586
587 if (conn) {
588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
589 /* Delete from channel list */
590 list_del(&chan->list);
591
592 l2cap_chan_put(chan);
593
594 chan->conn = NULL;
595
596 /* Reference was only held for non-fixed channels or
597 * fixed channels that explicitly requested it using the
598 * FLAG_HOLD_HCI_CONN flag.
599 */
600 if (chan->chan_type != L2CAP_CHAN_FIXED ||
601 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
602 hci_conn_drop(conn->hcon);
603
604 if (mgr && mgr->bredr_chan == chan)
605 mgr->bredr_chan = NULL;
606 }
607
608 if (chan->hs_hchan) {
609 struct hci_chan *hs_hchan = chan->hs_hchan;
610
611 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
612 amp_disconnect_logical_link(hs_hchan);
613 }
614
615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 return;
617
618 switch(chan->mode) {
619 case L2CAP_MODE_BASIC:
620 break;
621
622 case L2CAP_MODE_LE_FLOWCTL:
623 skb_queue_purge(&chan->tx_q);
624 break;
625
626 case L2CAP_MODE_ERTM:
627 __clear_retrans_timer(chan);
628 __clear_monitor_timer(chan);
629 __clear_ack_timer(chan);
630
631 skb_queue_purge(&chan->srej_q);
632
633 l2cap_seq_list_free(&chan->srej_list);
634 l2cap_seq_list_free(&chan->retrans_list);
635
636 /* fall through */
637
638 case L2CAP_MODE_STREAMING:
639 skb_queue_purge(&chan->tx_q);
640 break;
641 }
642
643 return;
644 }
645 EXPORT_SYMBOL_GPL(l2cap_chan_del);
646
647 static void l2cap_conn_update_id_addr(struct work_struct *work)
648 {
649 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
650 id_addr_update_work);
651 struct hci_conn *hcon = conn->hcon;
652 struct l2cap_chan *chan;
653
654 mutex_lock(&conn->chan_lock);
655
656 list_for_each_entry(chan, &conn->chan_l, list) {
657 l2cap_chan_lock(chan);
658 bacpy(&chan->dst, &hcon->dst);
659 chan->dst_type = bdaddr_dst_type(hcon);
660 l2cap_chan_unlock(chan);
661 }
662
663 mutex_unlock(&conn->chan_lock);
664 }
665
666 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
667 {
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_le_conn_rsp rsp;
670 u16 result;
671
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_AUTHORIZATION;
674 else
675 result = L2CAP_CR_BAD_PSM;
676
677 l2cap_state_change(chan, BT_DISCONN);
678
679 rsp.dcid = cpu_to_le16(chan->scid);
680 rsp.mtu = cpu_to_le16(chan->imtu);
681 rsp.mps = cpu_to_le16(chan->mps);
682 rsp.credits = cpu_to_le16(chan->rx_credits);
683 rsp.result = cpu_to_le16(result);
684
685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
686 &rsp);
687 }
688
689 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
690 {
691 struct l2cap_conn *conn = chan->conn;
692 struct l2cap_conn_rsp rsp;
693 u16 result;
694
695 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
696 result = L2CAP_CR_SEC_BLOCK;
697 else
698 result = L2CAP_CR_BAD_PSM;
699
700 l2cap_state_change(chan, BT_DISCONN);
701
702 rsp.scid = cpu_to_le16(chan->dcid);
703 rsp.dcid = cpu_to_le16(chan->scid);
704 rsp.result = cpu_to_le16(result);
705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
706
707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
708 }
709
710 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
711 {
712 struct l2cap_conn *conn = chan->conn;
713
714 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
715
716 switch (chan->state) {
717 case BT_LISTEN:
718 chan->ops->teardown(chan, 0);
719 break;
720
721 case BT_CONNECTED:
722 case BT_CONFIG:
723 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
724 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
725 l2cap_send_disconn_req(chan, reason);
726 } else
727 l2cap_chan_del(chan, reason);
728 break;
729
730 case BT_CONNECT2:
731 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
732 if (conn->hcon->type == ACL_LINK)
733 l2cap_chan_connect_reject(chan);
734 else if (conn->hcon->type == LE_LINK)
735 l2cap_chan_le_connect_reject(chan);
736 }
737
738 l2cap_chan_del(chan, reason);
739 break;
740
741 case BT_CONNECT:
742 case BT_DISCONN:
743 l2cap_chan_del(chan, reason);
744 break;
745
746 default:
747 chan->ops->teardown(chan, 0);
748 break;
749 }
750 }
751 EXPORT_SYMBOL(l2cap_chan_close);
752
753 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
754 {
755 switch (chan->chan_type) {
756 case L2CAP_CHAN_RAW:
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 case BT_SECURITY_FIPS:
760 return HCI_AT_DEDICATED_BONDING_MITM;
761 case BT_SECURITY_MEDIUM:
762 return HCI_AT_DEDICATED_BONDING;
763 default:
764 return HCI_AT_NO_BONDING;
765 }
766 break;
767 case L2CAP_CHAN_CONN_LESS:
768 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
769 if (chan->sec_level == BT_SECURITY_LOW)
770 chan->sec_level = BT_SECURITY_SDP;
771 }
772 if (chan->sec_level == BT_SECURITY_HIGH ||
773 chan->sec_level == BT_SECURITY_FIPS)
774 return HCI_AT_NO_BONDING_MITM;
775 else
776 return HCI_AT_NO_BONDING;
777 break;
778 case L2CAP_CHAN_CONN_ORIENTED:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
782
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
788 }
789 /* fall through */
790 default:
791 switch (chan->sec_level) {
792 case BT_SECURITY_HIGH:
793 case BT_SECURITY_FIPS:
794 return HCI_AT_GENERAL_BONDING_MITM;
795 case BT_SECURITY_MEDIUM:
796 return HCI_AT_GENERAL_BONDING;
797 default:
798 return HCI_AT_NO_BONDING;
799 }
800 break;
801 }
802 }
803
804 /* Service level security */
805 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
806 {
807 struct l2cap_conn *conn = chan->conn;
808 __u8 auth_type;
809
810 if (conn->hcon->type == LE_LINK)
811 return smp_conn_security(conn->hcon, chan->sec_level);
812
813 auth_type = l2cap_get_auth_type(chan);
814
815 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
816 initiator);
817 }
818
819 static u8 l2cap_get_ident(struct l2cap_conn *conn)
820 {
821 u8 id;
822
823 /* Get next available identificator.
824 * 1 - 128 are used by kernel.
825 * 129 - 199 are reserved.
826 * 200 - 254 are used by utilities like l2ping, etc.
827 */
828
829 mutex_lock(&conn->ident_lock);
830
831 if (++conn->tx_ident > 128)
832 conn->tx_ident = 1;
833
834 id = conn->tx_ident;
835
836 mutex_unlock(&conn->ident_lock);
837
838 return id;
839 }
840
841 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
842 void *data)
843 {
844 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
845 u8 flags;
846
847 BT_DBG("code 0x%2.2x", code);
848
849 if (!skb)
850 return;
851
852 /* Use NO_FLUSH if supported or we have an LE link (which does
853 * not support auto-flushing packets) */
854 if (lmp_no_flush_capable(conn->hcon->hdev) ||
855 conn->hcon->type == LE_LINK)
856 flags = ACL_START_NO_FLUSH;
857 else
858 flags = ACL_START;
859
860 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
861 skb->priority = HCI_PRIO_MAX;
862
863 hci_send_acl(conn->hchan, skb, flags);
864 }
865
866 static bool __chan_is_moving(struct l2cap_chan *chan)
867 {
868 return chan->move_state != L2CAP_MOVE_STABLE &&
869 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
870 }
871
872 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
873 {
874 struct hci_conn *hcon = chan->conn->hcon;
875 u16 flags;
876
877 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
878 skb->priority);
879
880 if (chan->hs_hcon && !__chan_is_moving(chan)) {
881 if (chan->hs_hchan)
882 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
883 else
884 kfree_skb(skb);
885
886 return;
887 }
888
889 /* Use NO_FLUSH for LE links (where this is the only option) or
890 * if the BR/EDR link supports it and flushing has not been
891 * explicitly requested (through FLAG_FLUSHABLE).
892 */
893 if (hcon->type == LE_LINK ||
894 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
895 lmp_no_flush_capable(hcon->hdev)))
896 flags = ACL_START_NO_FLUSH;
897 else
898 flags = ACL_START;
899
900 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
901 hci_send_acl(chan->conn->hchan, skb, flags);
902 }
903
904 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
905 {
906 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
907 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
908
909 if (enh & L2CAP_CTRL_FRAME_TYPE) {
910 /* S-Frame */
911 control->sframe = 1;
912 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
913 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
914
915 control->sar = 0;
916 control->txseq = 0;
917 } else {
918 /* I-Frame */
919 control->sframe = 0;
920 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
921 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
922
923 control->poll = 0;
924 control->super = 0;
925 }
926 }
927
928 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
929 {
930 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
931 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
932
933 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
934 /* S-Frame */
935 control->sframe = 1;
936 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
937 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
938
939 control->sar = 0;
940 control->txseq = 0;
941 } else {
942 /* I-Frame */
943 control->sframe = 0;
944 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
945 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
946
947 control->poll = 0;
948 control->super = 0;
949 }
950 }
951
952 static inline void __unpack_control(struct l2cap_chan *chan,
953 struct sk_buff *skb)
954 {
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
956 __unpack_extended_control(get_unaligned_le32(skb->data),
957 &bt_cb(skb)->l2cap);
958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
959 } else {
960 __unpack_enhanced_control(get_unaligned_le16(skb->data),
961 &bt_cb(skb)->l2cap);
962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
963 }
964 }
965
966 static u32 __pack_extended_control(struct l2cap_ctrl *control)
967 {
968 u32 packed;
969
970 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
971 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
972
973 if (control->sframe) {
974 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
975 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
976 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
977 } else {
978 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
979 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
980 }
981
982 return packed;
983 }
984
985 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
986 {
987 u16 packed;
988
989 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
990 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
991
992 if (control->sframe) {
993 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
994 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
995 packed |= L2CAP_CTRL_FRAME_TYPE;
996 } else {
997 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
998 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
999 }
1000
1001 return packed;
1002 }
1003
1004 static inline void __pack_control(struct l2cap_chan *chan,
1005 struct l2cap_ctrl *control,
1006 struct sk_buff *skb)
1007 {
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1009 put_unaligned_le32(__pack_extended_control(control),
1010 skb->data + L2CAP_HDR_SIZE);
1011 } else {
1012 put_unaligned_le16(__pack_enhanced_control(control),
1013 skb->data + L2CAP_HDR_SIZE);
1014 }
1015 }
1016
1017 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1018 {
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1020 return L2CAP_EXT_HDR_SIZE;
1021 else
1022 return L2CAP_ENH_HDR_SIZE;
1023 }
1024
1025 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1026 u32 control)
1027 {
1028 struct sk_buff *skb;
1029 struct l2cap_hdr *lh;
1030 int hlen = __ertm_hdr_size(chan);
1031
1032 if (chan->fcs == L2CAP_FCS_CRC16)
1033 hlen += L2CAP_FCS_SIZE;
1034
1035 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1036
1037 if (!skb)
1038 return ERR_PTR(-ENOMEM);
1039
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1042 lh->cid = cpu_to_le16(chan->dcid);
1043
1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1046 else
1047 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1048
1049 if (chan->fcs == L2CAP_FCS_CRC16) {
1050 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1051 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1052 }
1053
1054 skb->priority = HCI_PRIO_MAX;
1055 return skb;
1056 }
1057
1058 static void l2cap_send_sframe(struct l2cap_chan *chan,
1059 struct l2cap_ctrl *control)
1060 {
1061 struct sk_buff *skb;
1062 u32 control_field;
1063
1064 BT_DBG("chan %p, control %p", chan, control);
1065
1066 if (!control->sframe)
1067 return;
1068
1069 if (__chan_is_moving(chan))
1070 return;
1071
1072 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1073 !control->poll)
1074 control->final = 1;
1075
1076 if (control->super == L2CAP_SUPER_RR)
1077 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1078 else if (control->super == L2CAP_SUPER_RNR)
1079 set_bit(CONN_RNR_SENT, &chan->conn_state);
1080
1081 if (control->super != L2CAP_SUPER_SREJ) {
1082 chan->last_acked_seq = control->reqseq;
1083 __clear_ack_timer(chan);
1084 }
1085
1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1087 control->final, control->poll, control->super);
1088
1089 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1090 control_field = __pack_extended_control(control);
1091 else
1092 control_field = __pack_enhanced_control(control);
1093
1094 skb = l2cap_create_sframe_pdu(chan, control_field);
1095 if (!IS_ERR(skb))
1096 l2cap_do_send(chan, skb);
1097 }
1098
1099 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1100 {
1101 struct l2cap_ctrl control;
1102
1103 BT_DBG("chan %p, poll %d", chan, poll);
1104
1105 memset(&control, 0, sizeof(control));
1106 control.sframe = 1;
1107 control.poll = poll;
1108
1109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1110 control.super = L2CAP_SUPER_RNR;
1111 else
1112 control.super = L2CAP_SUPER_RR;
1113
1114 control.reqseq = chan->buffer_seq;
1115 l2cap_send_sframe(chan, &control);
1116 }
1117
1118 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1119 {
1120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1121 return true;
1122
1123 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1124 }
1125
1126 static bool __amp_capable(struct l2cap_chan *chan)
1127 {
1128 struct l2cap_conn *conn = chan->conn;
1129 struct hci_dev *hdev;
1130 bool amp_available = false;
1131
1132 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1133 return false;
1134
1135 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1136 return false;
1137
1138 read_lock(&hci_dev_list_lock);
1139 list_for_each_entry(hdev, &hci_dev_list, list) {
1140 if (hdev->amp_type != AMP_TYPE_BREDR &&
1141 test_bit(HCI_UP, &hdev->flags)) {
1142 amp_available = true;
1143 break;
1144 }
1145 }
1146 read_unlock(&hci_dev_list_lock);
1147
1148 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1149 return amp_available;
1150
1151 return false;
1152 }
1153
1154 static bool l2cap_check_efs(struct l2cap_chan *chan)
1155 {
1156 /* Check EFS parameters */
1157 return true;
1158 }
1159
1160 void l2cap_send_conn_req(struct l2cap_chan *chan)
1161 {
1162 struct l2cap_conn *conn = chan->conn;
1163 struct l2cap_conn_req req;
1164
1165 req.scid = cpu_to_le16(chan->scid);
1166 req.psm = chan->psm;
1167
1168 chan->ident = l2cap_get_ident(conn);
1169
1170 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1171
1172 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1173 }
1174
1175 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1176 {
1177 struct l2cap_create_chan_req req;
1178 req.scid = cpu_to_le16(chan->scid);
1179 req.psm = chan->psm;
1180 req.amp_id = amp_id;
1181
1182 chan->ident = l2cap_get_ident(chan->conn);
1183
1184 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1185 sizeof(req), &req);
1186 }
1187
1188 static void l2cap_move_setup(struct l2cap_chan *chan)
1189 {
1190 struct sk_buff *skb;
1191
1192 BT_DBG("chan %p", chan);
1193
1194 if (chan->mode != L2CAP_MODE_ERTM)
1195 return;
1196
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1200
1201 chan->retry_count = 0;
1202 skb_queue_walk(&chan->tx_q, skb) {
1203 if (bt_cb(skb)->l2cap.retries)
1204 bt_cb(skb)->l2cap.retries = 1;
1205 else
1206 break;
1207 }
1208
1209 chan->expected_tx_seq = chan->buffer_seq;
1210
1211 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1212 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1213 l2cap_seq_list_clear(&chan->retrans_list);
1214 l2cap_seq_list_clear(&chan->srej_list);
1215 skb_queue_purge(&chan->srej_q);
1216
1217 chan->tx_state = L2CAP_TX_STATE_XMIT;
1218 chan->rx_state = L2CAP_RX_STATE_MOVE;
1219
1220 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1221 }
1222
1223 static void l2cap_move_done(struct l2cap_chan *chan)
1224 {
1225 u8 move_role = chan->move_role;
1226 BT_DBG("chan %p", chan);
1227
1228 chan->move_state = L2CAP_MOVE_STABLE;
1229 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1230
1231 if (chan->mode != L2CAP_MODE_ERTM)
1232 return;
1233
1234 switch (move_role) {
1235 case L2CAP_MOVE_ROLE_INITIATOR:
1236 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1237 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1238 break;
1239 case L2CAP_MOVE_ROLE_RESPONDER:
1240 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1241 break;
1242 }
1243 }
1244
1245 static void l2cap_chan_ready(struct l2cap_chan *chan)
1246 {
1247 /* The channel may have already been flagged as connected in
1248 * case of receiving data before the L2CAP info req/rsp
1249 * procedure is complete.
1250 */
1251 if (chan->state == BT_CONNECTED)
1252 return;
1253
1254 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1255 chan->conf_state = 0;
1256 __clear_chan_timer(chan);
1257
1258 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1259 chan->ops->suspend(chan);
1260
1261 chan->state = BT_CONNECTED;
1262
1263 chan->ops->ready(chan);
1264 }
1265
1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1270
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1273
1274 req.psm = chan->psm;
1275 req.scid = cpu_to_le16(chan->scid);
1276 req.mtu = cpu_to_le16(chan->imtu);
1277 req.mps = cpu_to_le16(chan->mps);
1278 req.credits = cpu_to_le16(chan->rx_credits);
1279
1280 chan->ident = l2cap_get_ident(conn);
1281
1282 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1283 sizeof(req), &req);
1284 }
1285
1286 static void l2cap_le_start(struct l2cap_chan *chan)
1287 {
1288 struct l2cap_conn *conn = chan->conn;
1289
1290 if (!smp_conn_security(conn->hcon, chan->sec_level))
1291 return;
1292
1293 if (!chan->psm) {
1294 l2cap_chan_ready(chan);
1295 return;
1296 }
1297
1298 if (chan->state == BT_CONNECT)
1299 l2cap_le_connect(chan);
1300 }
1301
1302 static void l2cap_start_connection(struct l2cap_chan *chan)
1303 {
1304 if (__amp_capable(chan)) {
1305 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1306 a2mp_discover_amp(chan);
1307 } else if (chan->conn->hcon->type == LE_LINK) {
1308 l2cap_le_start(chan);
1309 } else {
1310 l2cap_send_conn_req(chan);
1311 }
1312 }
1313
1314 static void l2cap_request_info(struct l2cap_conn *conn)
1315 {
1316 struct l2cap_info_req req;
1317
1318 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1319 return;
1320
1321 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1322
1323 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1324 conn->info_ident = l2cap_get_ident(conn);
1325
1326 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1327
1328 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1329 sizeof(req), &req);
1330 }
1331
1332 static void l2cap_do_start(struct l2cap_chan *chan)
1333 {
1334 struct l2cap_conn *conn = chan->conn;
1335
1336 if (conn->hcon->type == LE_LINK) {
1337 l2cap_le_start(chan);
1338 return;
1339 }
1340
1341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1342 l2cap_request_info(conn);
1343 return;
1344 }
1345
1346 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1347 return;
1348
1349 if (l2cap_chan_check_security(chan, true) &&
1350 __l2cap_no_conn_pending(chan))
1351 l2cap_start_connection(chan);
1352 }
1353
1354 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1355 {
1356 u32 local_feat_mask = l2cap_feat_mask;
1357 if (!disable_ertm)
1358 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1359
1360 switch (mode) {
1361 case L2CAP_MODE_ERTM:
1362 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1363 case L2CAP_MODE_STREAMING:
1364 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1365 default:
1366 return 0x00;
1367 }
1368 }
1369
1370 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1371 {
1372 struct l2cap_conn *conn = chan->conn;
1373 struct l2cap_disconn_req req;
1374
1375 if (!conn)
1376 return;
1377
1378 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1379 __clear_retrans_timer(chan);
1380 __clear_monitor_timer(chan);
1381 __clear_ack_timer(chan);
1382 }
1383
1384 if (chan->scid == L2CAP_CID_A2MP) {
1385 l2cap_state_change(chan, BT_DISCONN);
1386 return;
1387 }
1388
1389 req.dcid = cpu_to_le16(chan->dcid);
1390 req.scid = cpu_to_le16(chan->scid);
1391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1392 sizeof(req), &req);
1393
1394 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1395 }
1396
1397 /* ---- L2CAP connections ---- */
1398 static void l2cap_conn_start(struct l2cap_conn *conn)
1399 {
1400 struct l2cap_chan *chan, *tmp;
1401
1402 BT_DBG("conn %p", conn);
1403
1404 mutex_lock(&conn->chan_lock);
1405
1406 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1407 l2cap_chan_lock(chan);
1408
1409 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1410 l2cap_chan_ready(chan);
1411 l2cap_chan_unlock(chan);
1412 continue;
1413 }
1414
1415 if (chan->state == BT_CONNECT) {
1416 if (!l2cap_chan_check_security(chan, true) ||
1417 !__l2cap_no_conn_pending(chan)) {
1418 l2cap_chan_unlock(chan);
1419 continue;
1420 }
1421
1422 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1423 && test_bit(CONF_STATE2_DEVICE,
1424 &chan->conf_state)) {
1425 l2cap_chan_close(chan, ECONNRESET);
1426 l2cap_chan_unlock(chan);
1427 continue;
1428 }
1429
1430 l2cap_start_connection(chan);
1431
1432 } else if (chan->state == BT_CONNECT2) {
1433 struct l2cap_conn_rsp rsp;
1434 char buf[128];
1435 rsp.scid = cpu_to_le16(chan->dcid);
1436 rsp.dcid = cpu_to_le16(chan->scid);
1437
1438 if (l2cap_chan_check_security(chan, false)) {
1439 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1442 chan->ops->defer(chan);
1443
1444 } else {
1445 l2cap_state_change(chan, BT_CONFIG);
1446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1448 }
1449 } else {
1450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1452 }
1453
1454 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1455 sizeof(rsp), &rsp);
1456
1457 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1458 rsp.result != L2CAP_CR_SUCCESS) {
1459 l2cap_chan_unlock(chan);
1460 continue;
1461 }
1462
1463 set_bit(CONF_REQ_SENT, &chan->conf_state);
1464 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1465 l2cap_build_conf_req(chan, buf), buf);
1466 chan->num_conf_req++;
1467 }
1468
1469 l2cap_chan_unlock(chan);
1470 }
1471
1472 mutex_unlock(&conn->chan_lock);
1473 }
1474
1475 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1476 {
1477 struct hci_conn *hcon = conn->hcon;
1478 struct hci_dev *hdev = hcon->hdev;
1479
1480 BT_DBG("%s conn %p", hdev->name, conn);
1481
1482 /* For outgoing pairing which doesn't necessarily have an
1483 * associated socket (e.g. mgmt_pair_device).
1484 */
1485 if (hcon->out)
1486 smp_conn_security(hcon, hcon->pending_sec_level);
1487
1488 /* For LE slave connections, make sure the connection interval
1489 * is in the range of the minium and maximum interval that has
1490 * been configured for this connection. If not, then trigger
1491 * the connection update procedure.
1492 */
1493 if (hcon->role == HCI_ROLE_SLAVE &&
1494 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1495 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1496 struct l2cap_conn_param_update_req req;
1497
1498 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1499 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1500 req.latency = cpu_to_le16(hcon->le_conn_latency);
1501 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1502
1503 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1504 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1505 }
1506 }
1507
1508 static void l2cap_conn_ready(struct l2cap_conn *conn)
1509 {
1510 struct l2cap_chan *chan;
1511 struct hci_conn *hcon = conn->hcon;
1512
1513 BT_DBG("conn %p", conn);
1514
1515 if (hcon->type == ACL_LINK)
1516 l2cap_request_info(conn);
1517
1518 mutex_lock(&conn->chan_lock);
1519
1520 list_for_each_entry(chan, &conn->chan_l, list) {
1521
1522 l2cap_chan_lock(chan);
1523
1524 if (chan->scid == L2CAP_CID_A2MP) {
1525 l2cap_chan_unlock(chan);
1526 continue;
1527 }
1528
1529 if (hcon->type == LE_LINK) {
1530 l2cap_le_start(chan);
1531 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1533 l2cap_chan_ready(chan);
1534 } else if (chan->state == BT_CONNECT) {
1535 l2cap_do_start(chan);
1536 }
1537
1538 l2cap_chan_unlock(chan);
1539 }
1540
1541 mutex_unlock(&conn->chan_lock);
1542
1543 if (hcon->type == LE_LINK)
1544 l2cap_le_conn_ready(conn);
1545
1546 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1547 }
1548
1549 /* Notify sockets that we cannot guaranty reliability anymore */
1550 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1551 {
1552 struct l2cap_chan *chan;
1553
1554 BT_DBG("conn %p", conn);
1555
1556 mutex_lock(&conn->chan_lock);
1557
1558 list_for_each_entry(chan, &conn->chan_l, list) {
1559 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1560 l2cap_chan_set_err(chan, err);
1561 }
1562
1563 mutex_unlock(&conn->chan_lock);
1564 }
1565
1566 static void l2cap_info_timeout(struct work_struct *work)
1567 {
1568 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1569 info_timer.work);
1570
1571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1572 conn->info_ident = 0;
1573
1574 l2cap_conn_start(conn);
1575 }
1576
1577 /*
1578 * l2cap_user
1579 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1580 * callback is called during registration. The ->remove callback is called
1581 * during unregistration.
1582 * An l2cap_user object can either be explicitly unregistered or when the
1583 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1584 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1585 * External modules must own a reference to the l2cap_conn object if they intend
1586 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1587 * any time if they don't.
1588 */
1589
1590 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1591 {
1592 struct hci_dev *hdev = conn->hcon->hdev;
1593 int ret;
1594
1595 /* We need to check whether l2cap_conn is registered. If it is not, we
1596 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1597 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1598 * relies on the parent hci_conn object to be locked. This itself relies
1599 * on the hci_dev object to be locked. So we must lock the hci device
1600 * here, too. */
1601
1602 hci_dev_lock(hdev);
1603
1604 if (!list_empty(&user->list)) {
1605 ret = -EINVAL;
1606 goto out_unlock;
1607 }
1608
1609 /* conn->hchan is NULL after l2cap_conn_del() was called */
1610 if (!conn->hchan) {
1611 ret = -ENODEV;
1612 goto out_unlock;
1613 }
1614
1615 ret = user->probe(conn, user);
1616 if (ret)
1617 goto out_unlock;
1618
1619 list_add(&user->list, &conn->users);
1620 ret = 0;
1621
1622 out_unlock:
1623 hci_dev_unlock(hdev);
1624 return ret;
1625 }
1626 EXPORT_SYMBOL(l2cap_register_user);
1627
1628 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1629 {
1630 struct hci_dev *hdev = conn->hcon->hdev;
1631
1632 hci_dev_lock(hdev);
1633
1634 if (list_empty(&user->list))
1635 goto out_unlock;
1636
1637 list_del_init(&user->list);
1638 user->remove(conn, user);
1639
1640 out_unlock:
1641 hci_dev_unlock(hdev);
1642 }
1643 EXPORT_SYMBOL(l2cap_unregister_user);
1644
1645 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1646 {
1647 struct l2cap_user *user;
1648
1649 while (!list_empty(&conn->users)) {
1650 user = list_first_entry(&conn->users, struct l2cap_user, list);
1651 list_del_init(&user->list);
1652 user->remove(conn, user);
1653 }
1654 }
1655
1656 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1657 {
1658 struct l2cap_conn *conn = hcon->l2cap_data;
1659 struct l2cap_chan *chan, *l;
1660
1661 if (!conn)
1662 return;
1663
1664 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1665
1666 kfree_skb(conn->rx_skb);
1667
1668 skb_queue_purge(&conn->pending_rx);
1669
1670 /* We can not call flush_work(&conn->pending_rx_work) here since we
1671 * might block if we are running on a worker from the same workqueue
1672 * pending_rx_work is waiting on.
1673 */
1674 if (work_pending(&conn->pending_rx_work))
1675 cancel_work_sync(&conn->pending_rx_work);
1676
1677 if (work_pending(&conn->id_addr_update_work))
1678 cancel_work_sync(&conn->id_addr_update_work);
1679
1680 l2cap_unregister_all_users(conn);
1681
1682 /* Force the connection to be immediately dropped */
1683 hcon->disc_timeout = 0;
1684
1685 mutex_lock(&conn->chan_lock);
1686
1687 /* Kill channels */
1688 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1689 l2cap_chan_hold(chan);
1690 l2cap_chan_lock(chan);
1691
1692 l2cap_chan_del(chan, err);
1693
1694 l2cap_chan_unlock(chan);
1695
1696 chan->ops->close(chan);
1697 l2cap_chan_put(chan);
1698 }
1699
1700 mutex_unlock(&conn->chan_lock);
1701
1702 hci_chan_del(conn->hchan);
1703
1704 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1705 cancel_delayed_work_sync(&conn->info_timer);
1706
1707 hcon->l2cap_data = NULL;
1708 conn->hchan = NULL;
1709 l2cap_conn_put(conn);
1710 }
1711
1712 static void l2cap_conn_free(struct kref *ref)
1713 {
1714 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1715
1716 hci_conn_put(conn->hcon);
1717 kfree(conn);
1718 }
1719
1720 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1721 {
1722 kref_get(&conn->ref);
1723 return conn;
1724 }
1725 EXPORT_SYMBOL(l2cap_conn_get);
1726
1727 void l2cap_conn_put(struct l2cap_conn *conn)
1728 {
1729 kref_put(&conn->ref, l2cap_conn_free);
1730 }
1731 EXPORT_SYMBOL(l2cap_conn_put);
1732
1733 /* ---- Socket interface ---- */
1734
1735 /* Find socket with psm and source / destination bdaddr.
1736 * Returns closest match.
1737 */
1738 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1739 bdaddr_t *src,
1740 bdaddr_t *dst,
1741 u8 link_type)
1742 {
1743 struct l2cap_chan *c, *c1 = NULL;
1744
1745 read_lock(&chan_list_lock);
1746
1747 list_for_each_entry(c, &chan_list, global_l) {
1748 if (state && c->state != state)
1749 continue;
1750
1751 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1752 continue;
1753
1754 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1755 continue;
1756
1757 if (c->psm == psm) {
1758 int src_match, dst_match;
1759 int src_any, dst_any;
1760
1761 /* Exact match. */
1762 src_match = !bacmp(&c->src, src);
1763 dst_match = !bacmp(&c->dst, dst);
1764 if (src_match && dst_match) {
1765 l2cap_chan_hold(c);
1766 read_unlock(&chan_list_lock);
1767 return c;
1768 }
1769
1770 /* Closest match */
1771 src_any = !bacmp(&c->src, BDADDR_ANY);
1772 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1773 if ((src_match && dst_any) || (src_any && dst_match) ||
1774 (src_any && dst_any))
1775 c1 = c;
1776 }
1777 }
1778
1779 if (c1)
1780 l2cap_chan_hold(c1);
1781
1782 read_unlock(&chan_list_lock);
1783
1784 return c1;
1785 }
1786
1787 static void l2cap_monitor_timeout(struct work_struct *work)
1788 {
1789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1790 monitor_timer.work);
1791
1792 BT_DBG("chan %p", chan);
1793
1794 l2cap_chan_lock(chan);
1795
1796 if (!chan->conn) {
1797 l2cap_chan_unlock(chan);
1798 l2cap_chan_put(chan);
1799 return;
1800 }
1801
1802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1803
1804 l2cap_chan_unlock(chan);
1805 l2cap_chan_put(chan);
1806 }
1807
1808 static void l2cap_retrans_timeout(struct work_struct *work)
1809 {
1810 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1811 retrans_timer.work);
1812
1813 BT_DBG("chan %p", chan);
1814
1815 l2cap_chan_lock(chan);
1816
1817 if (!chan->conn) {
1818 l2cap_chan_unlock(chan);
1819 l2cap_chan_put(chan);
1820 return;
1821 }
1822
1823 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1824 l2cap_chan_unlock(chan);
1825 l2cap_chan_put(chan);
1826 }
1827
1828 static void l2cap_streaming_send(struct l2cap_chan *chan,
1829 struct sk_buff_head *skbs)
1830 {
1831 struct sk_buff *skb;
1832 struct l2cap_ctrl *control;
1833
1834 BT_DBG("chan %p, skbs %p", chan, skbs);
1835
1836 if (__chan_is_moving(chan))
1837 return;
1838
1839 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1840
1841 while (!skb_queue_empty(&chan->tx_q)) {
1842
1843 skb = skb_dequeue(&chan->tx_q);
1844
1845 bt_cb(skb)->l2cap.retries = 1;
1846 control = &bt_cb(skb)->l2cap;
1847
1848 control->reqseq = 0;
1849 control->txseq = chan->next_tx_seq;
1850
1851 __pack_control(chan, control, skb);
1852
1853 if (chan->fcs == L2CAP_FCS_CRC16) {
1854 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1855 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1856 }
1857
1858 l2cap_do_send(chan, skb);
1859
1860 BT_DBG("Sent txseq %u", control->txseq);
1861
1862 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1863 chan->frames_sent++;
1864 }
1865 }
1866
1867 static int l2cap_ertm_send(struct l2cap_chan *chan)
1868 {
1869 struct sk_buff *skb, *tx_skb;
1870 struct l2cap_ctrl *control;
1871 int sent = 0;
1872
1873 BT_DBG("chan %p", chan);
1874
1875 if (chan->state != BT_CONNECTED)
1876 return -ENOTCONN;
1877
1878 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1879 return 0;
1880
1881 if (__chan_is_moving(chan))
1882 return 0;
1883
1884 while (chan->tx_send_head &&
1885 chan->unacked_frames < chan->remote_tx_win &&
1886 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1887
1888 skb = chan->tx_send_head;
1889
1890 bt_cb(skb)->l2cap.retries = 1;
1891 control = &bt_cb(skb)->l2cap;
1892
1893 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1894 control->final = 1;
1895
1896 control->reqseq = chan->buffer_seq;
1897 chan->last_acked_seq = chan->buffer_seq;
1898 control->txseq = chan->next_tx_seq;
1899
1900 __pack_control(chan, control, skb);
1901
1902 if (chan->fcs == L2CAP_FCS_CRC16) {
1903 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1904 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1905 }
1906
1907 /* Clone after data has been modified. Data is assumed to be
1908 read-only (for locking purposes) on cloned sk_buffs.
1909 */
1910 tx_skb = skb_clone(skb, GFP_KERNEL);
1911
1912 if (!tx_skb)
1913 break;
1914
1915 __set_retrans_timer(chan);
1916
1917 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1918 chan->unacked_frames++;
1919 chan->frames_sent++;
1920 sent++;
1921
1922 if (skb_queue_is_last(&chan->tx_q, skb))
1923 chan->tx_send_head = NULL;
1924 else
1925 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1926
1927 l2cap_do_send(chan, tx_skb);
1928 BT_DBG("Sent txseq %u", control->txseq);
1929 }
1930
1931 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1932 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1933
1934 return sent;
1935 }
1936
1937 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1938 {
1939 struct l2cap_ctrl control;
1940 struct sk_buff *skb;
1941 struct sk_buff *tx_skb;
1942 u16 seq;
1943
1944 BT_DBG("chan %p", chan);
1945
1946 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1947 return;
1948
1949 if (__chan_is_moving(chan))
1950 return;
1951
1952 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1953 seq = l2cap_seq_list_pop(&chan->retrans_list);
1954
1955 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1956 if (!skb) {
1957 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1958 seq);
1959 continue;
1960 }
1961
1962 bt_cb(skb)->l2cap.retries++;
1963 control = bt_cb(skb)->l2cap;
1964
1965 if (chan->max_tx != 0 &&
1966 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1967 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1968 l2cap_send_disconn_req(chan, ECONNRESET);
1969 l2cap_seq_list_clear(&chan->retrans_list);
1970 break;
1971 }
1972
1973 control.reqseq = chan->buffer_seq;
1974 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1975 control.final = 1;
1976 else
1977 control.final = 0;
1978
1979 if (skb_cloned(skb)) {
1980 /* Cloned sk_buffs are read-only, so we need a
1981 * writeable copy
1982 */
1983 tx_skb = skb_copy(skb, GFP_KERNEL);
1984 } else {
1985 tx_skb = skb_clone(skb, GFP_KERNEL);
1986 }
1987
1988 if (!tx_skb) {
1989 l2cap_seq_list_clear(&chan->retrans_list);
1990 break;
1991 }
1992
1993 /* Update skb contents */
1994 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1995 put_unaligned_le32(__pack_extended_control(&control),
1996 tx_skb->data + L2CAP_HDR_SIZE);
1997 } else {
1998 put_unaligned_le16(__pack_enhanced_control(&control),
1999 tx_skb->data + L2CAP_HDR_SIZE);
2000 }
2001
2002 /* Update FCS */
2003 if (chan->fcs == L2CAP_FCS_CRC16) {
2004 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2005 tx_skb->len - L2CAP_FCS_SIZE);
2006 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2007 L2CAP_FCS_SIZE);
2008 }
2009
2010 l2cap_do_send(chan, tx_skb);
2011
2012 BT_DBG("Resent txseq %d", control.txseq);
2013
2014 chan->last_acked_seq = chan->buffer_seq;
2015 }
2016 }
2017
2018 static void l2cap_retransmit(struct l2cap_chan *chan,
2019 struct l2cap_ctrl *control)
2020 {
2021 BT_DBG("chan %p, control %p", chan, control);
2022
2023 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2024 l2cap_ertm_resend(chan);
2025 }
2026
2027 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2028 struct l2cap_ctrl *control)
2029 {
2030 struct sk_buff *skb;
2031
2032 BT_DBG("chan %p, control %p", chan, control);
2033
2034 if (control->poll)
2035 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2036
2037 l2cap_seq_list_clear(&chan->retrans_list);
2038
2039 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2040 return;
2041
2042 if (chan->unacked_frames) {
2043 skb_queue_walk(&chan->tx_q, skb) {
2044 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2045 skb == chan->tx_send_head)
2046 break;
2047 }
2048
2049 skb_queue_walk_from(&chan->tx_q, skb) {
2050 if (skb == chan->tx_send_head)
2051 break;
2052
2053 l2cap_seq_list_append(&chan->retrans_list,
2054 bt_cb(skb)->l2cap.txseq);
2055 }
2056
2057 l2cap_ertm_resend(chan);
2058 }
2059 }
2060
2061 static void l2cap_send_ack(struct l2cap_chan *chan)
2062 {
2063 struct l2cap_ctrl control;
2064 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2065 chan->last_acked_seq);
2066 int threshold;
2067
2068 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2069 chan, chan->last_acked_seq, chan->buffer_seq);
2070
2071 memset(&control, 0, sizeof(control));
2072 control.sframe = 1;
2073
2074 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2075 chan->rx_state == L2CAP_RX_STATE_RECV) {
2076 __clear_ack_timer(chan);
2077 control.super = L2CAP_SUPER_RNR;
2078 control.reqseq = chan->buffer_seq;
2079 l2cap_send_sframe(chan, &control);
2080 } else {
2081 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2082 l2cap_ertm_send(chan);
2083 /* If any i-frames were sent, they included an ack */
2084 if (chan->buffer_seq == chan->last_acked_seq)
2085 frames_to_ack = 0;
2086 }
2087
2088 /* Ack now if the window is 3/4ths full.
2089 * Calculate without mul or div
2090 */
2091 threshold = chan->ack_win;
2092 threshold += threshold << 1;
2093 threshold >>= 2;
2094
2095 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2096 threshold);
2097
2098 if (frames_to_ack >= threshold) {
2099 __clear_ack_timer(chan);
2100 control.super = L2CAP_SUPER_RR;
2101 control.reqseq = chan->buffer_seq;
2102 l2cap_send_sframe(chan, &control);
2103 frames_to_ack = 0;
2104 }
2105
2106 if (frames_to_ack)
2107 __set_ack_timer(chan);
2108 }
2109 }
2110
2111 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2112 struct msghdr *msg, int len,
2113 int count, struct sk_buff *skb)
2114 {
2115 struct l2cap_conn *conn = chan->conn;
2116 struct sk_buff **frag;
2117 int sent = 0;
2118
2119 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2120 return -EFAULT;
2121
2122 sent += count;
2123 len -= count;
2124
2125 /* Continuation fragments (no L2CAP header) */
2126 frag = &skb_shinfo(skb)->frag_list;
2127 while (len) {
2128 struct sk_buff *tmp;
2129
2130 count = min_t(unsigned int, conn->mtu, len);
2131
2132 tmp = chan->ops->alloc_skb(chan, 0, count,
2133 msg->msg_flags & MSG_DONTWAIT);
2134 if (IS_ERR(tmp))
2135 return PTR_ERR(tmp);
2136
2137 *frag = tmp;
2138
2139 if (copy_from_iter(skb_put(*frag, count), count,
2140 &msg->msg_iter) != count)
2141 return -EFAULT;
2142
2143 sent += count;
2144 len -= count;
2145
2146 skb->len += (*frag)->len;
2147 skb->data_len += (*frag)->len;
2148
2149 frag = &(*frag)->next;
2150 }
2151
2152 return sent;
2153 }
2154
2155 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2156 struct msghdr *msg, size_t len)
2157 {
2158 struct l2cap_conn *conn = chan->conn;
2159 struct sk_buff *skb;
2160 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2161 struct l2cap_hdr *lh;
2162
2163 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2164 __le16_to_cpu(chan->psm), len);
2165
2166 count = min_t(unsigned int, (conn->mtu - hlen), len);
2167
2168 skb = chan->ops->alloc_skb(chan, hlen, count,
2169 msg->msg_flags & MSG_DONTWAIT);
2170 if (IS_ERR(skb))
2171 return skb;
2172
2173 /* Create L2CAP header */
2174 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2175 lh->cid = cpu_to_le16(chan->dcid);
2176 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2177 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2178
2179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2180 if (unlikely(err < 0)) {
2181 kfree_skb(skb);
2182 return ERR_PTR(err);
2183 }
2184 return skb;
2185 }
2186
2187 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2188 struct msghdr *msg, size_t len)
2189 {
2190 struct l2cap_conn *conn = chan->conn;
2191 struct sk_buff *skb;
2192 int err, count;
2193 struct l2cap_hdr *lh;
2194
2195 BT_DBG("chan %p len %zu", chan, len);
2196
2197 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2198
2199 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2200 msg->msg_flags & MSG_DONTWAIT);
2201 if (IS_ERR(skb))
2202 return skb;
2203
2204 /* Create L2CAP header */
2205 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2206 lh->cid = cpu_to_le16(chan->dcid);
2207 lh->len = cpu_to_le16(len);
2208
2209 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2210 if (unlikely(err < 0)) {
2211 kfree_skb(skb);
2212 return ERR_PTR(err);
2213 }
2214 return skb;
2215 }
2216
2217 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2218 struct msghdr *msg, size_t len,
2219 u16 sdulen)
2220 {
2221 struct l2cap_conn *conn = chan->conn;
2222 struct sk_buff *skb;
2223 int err, count, hlen;
2224 struct l2cap_hdr *lh;
2225
2226 BT_DBG("chan %p len %zu", chan, len);
2227
2228 if (!conn)
2229 return ERR_PTR(-ENOTCONN);
2230
2231 hlen = __ertm_hdr_size(chan);
2232
2233 if (sdulen)
2234 hlen += L2CAP_SDULEN_SIZE;
2235
2236 if (chan->fcs == L2CAP_FCS_CRC16)
2237 hlen += L2CAP_FCS_SIZE;
2238
2239 count = min_t(unsigned int, (conn->mtu - hlen), len);
2240
2241 skb = chan->ops->alloc_skb(chan, hlen, count,
2242 msg->msg_flags & MSG_DONTWAIT);
2243 if (IS_ERR(skb))
2244 return skb;
2245
2246 /* Create L2CAP header */
2247 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2248 lh->cid = cpu_to_le16(chan->dcid);
2249 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2250
2251 /* Control header is populated later */
2252 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2253 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2254 else
2255 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2256
2257 if (sdulen)
2258 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2259
2260 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2261 if (unlikely(err < 0)) {
2262 kfree_skb(skb);
2263 return ERR_PTR(err);
2264 }
2265
2266 bt_cb(skb)->l2cap.fcs = chan->fcs;
2267 bt_cb(skb)->l2cap.retries = 0;
2268 return skb;
2269 }
2270
2271 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2272 struct sk_buff_head *seg_queue,
2273 struct msghdr *msg, size_t len)
2274 {
2275 struct sk_buff *skb;
2276 u16 sdu_len;
2277 size_t pdu_len;
2278 u8 sar;
2279
2280 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2281
2282 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2283 * so fragmented skbs are not used. The HCI layer's handling
2284 * of fragmented skbs is not compatible with ERTM's queueing.
2285 */
2286
2287 /* PDU size is derived from the HCI MTU */
2288 pdu_len = chan->conn->mtu;
2289
2290 /* Constrain PDU size for BR/EDR connections */
2291 if (!chan->hs_hcon)
2292 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2293
2294 /* Adjust for largest possible L2CAP overhead. */
2295 if (chan->fcs)
2296 pdu_len -= L2CAP_FCS_SIZE;
2297
2298 pdu_len -= __ertm_hdr_size(chan);
2299
2300 /* Remote device may have requested smaller PDUs */
2301 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2302
2303 if (len <= pdu_len) {
2304 sar = L2CAP_SAR_UNSEGMENTED;
2305 sdu_len = 0;
2306 pdu_len = len;
2307 } else {
2308 sar = L2CAP_SAR_START;
2309 sdu_len = len;
2310 }
2311
2312 while (len > 0) {
2313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2314
2315 if (IS_ERR(skb)) {
2316 __skb_queue_purge(seg_queue);
2317 return PTR_ERR(skb);
2318 }
2319
2320 bt_cb(skb)->l2cap.sar = sar;
2321 __skb_queue_tail(seg_queue, skb);
2322
2323 len -= pdu_len;
2324 if (sdu_len)
2325 sdu_len = 0;
2326
2327 if (len <= pdu_len) {
2328 sar = L2CAP_SAR_END;
2329 pdu_len = len;
2330 } else {
2331 sar = L2CAP_SAR_CONTINUE;
2332 }
2333 }
2334
2335 return 0;
2336 }
2337
2338 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2339 struct msghdr *msg,
2340 size_t len, u16 sdulen)
2341 {
2342 struct l2cap_conn *conn = chan->conn;
2343 struct sk_buff *skb;
2344 int err, count, hlen;
2345 struct l2cap_hdr *lh;
2346
2347 BT_DBG("chan %p len %zu", chan, len);
2348
2349 if (!conn)
2350 return ERR_PTR(-ENOTCONN);
2351
2352 hlen = L2CAP_HDR_SIZE;
2353
2354 if (sdulen)
2355 hlen += L2CAP_SDULEN_SIZE;
2356
2357 count = min_t(unsigned int, (conn->mtu - hlen), len);
2358
2359 skb = chan->ops->alloc_skb(chan, hlen, count,
2360 msg->msg_flags & MSG_DONTWAIT);
2361 if (IS_ERR(skb))
2362 return skb;
2363
2364 /* Create L2CAP header */
2365 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2366 lh->cid = cpu_to_le16(chan->dcid);
2367 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2368
2369 if (sdulen)
2370 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2371
2372 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2373 if (unlikely(err < 0)) {
2374 kfree_skb(skb);
2375 return ERR_PTR(err);
2376 }
2377
2378 return skb;
2379 }
2380
2381 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2382 struct sk_buff_head *seg_queue,
2383 struct msghdr *msg, size_t len)
2384 {
2385 struct sk_buff *skb;
2386 size_t pdu_len;
2387 u16 sdu_len;
2388
2389 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2390
2391 sdu_len = len;
2392 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2393
2394 while (len > 0) {
2395 if (len <= pdu_len)
2396 pdu_len = len;
2397
2398 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2399 if (IS_ERR(skb)) {
2400 __skb_queue_purge(seg_queue);
2401 return PTR_ERR(skb);
2402 }
2403
2404 __skb_queue_tail(seg_queue, skb);
2405
2406 len -= pdu_len;
2407
2408 if (sdu_len) {
2409 sdu_len = 0;
2410 pdu_len += L2CAP_SDULEN_SIZE;
2411 }
2412 }
2413
2414 return 0;
2415 }
2416
2417 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2418 {
2419 struct sk_buff *skb;
2420 int err;
2421 struct sk_buff_head seg_queue;
2422
2423 if (!chan->conn)
2424 return -ENOTCONN;
2425
2426 /* Connectionless channel */
2427 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2428 skb = l2cap_create_connless_pdu(chan, msg, len);
2429 if (IS_ERR(skb))
2430 return PTR_ERR(skb);
2431
2432 /* Channel lock is released before requesting new skb and then
2433 * reacquired thus we need to recheck channel state.
2434 */
2435 if (chan->state != BT_CONNECTED) {
2436 kfree_skb(skb);
2437 return -ENOTCONN;
2438 }
2439
2440 l2cap_do_send(chan, skb);
2441 return len;
2442 }
2443
2444 switch (chan->mode) {
2445 case L2CAP_MODE_LE_FLOWCTL:
2446 /* Check outgoing MTU */
2447 if (len > chan->omtu)
2448 return -EMSGSIZE;
2449
2450 if (!chan->tx_credits)
2451 return -EAGAIN;
2452
2453 __skb_queue_head_init(&seg_queue);
2454
2455 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2456
2457 if (chan->state != BT_CONNECTED) {
2458 __skb_queue_purge(&seg_queue);
2459 err = -ENOTCONN;
2460 }
2461
2462 if (err)
2463 return err;
2464
2465 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2466
2467 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2468 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2469 chan->tx_credits--;
2470 }
2471
2472 if (!chan->tx_credits)
2473 chan->ops->suspend(chan);
2474
2475 err = len;
2476
2477 break;
2478
2479 case L2CAP_MODE_BASIC:
2480 /* Check outgoing MTU */
2481 if (len > chan->omtu)
2482 return -EMSGSIZE;
2483
2484 /* Create a basic PDU */
2485 skb = l2cap_create_basic_pdu(chan, msg, len);
2486 if (IS_ERR(skb))
2487 return PTR_ERR(skb);
2488
2489 /* Channel lock is released before requesting new skb and then
2490 * reacquired thus we need to recheck channel state.
2491 */
2492 if (chan->state != BT_CONNECTED) {
2493 kfree_skb(skb);
2494 return -ENOTCONN;
2495 }
2496
2497 l2cap_do_send(chan, skb);
2498 err = len;
2499 break;
2500
2501 case L2CAP_MODE_ERTM:
2502 case L2CAP_MODE_STREAMING:
2503 /* Check outgoing MTU */
2504 if (len > chan->omtu) {
2505 err = -EMSGSIZE;
2506 break;
2507 }
2508
2509 __skb_queue_head_init(&seg_queue);
2510
2511 /* Do segmentation before calling in to the state machine,
2512 * since it's possible to block while waiting for memory
2513 * allocation.
2514 */
2515 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2516
2517 /* The channel could have been closed while segmenting,
2518 * check that it is still connected.
2519 */
2520 if (chan->state != BT_CONNECTED) {
2521 __skb_queue_purge(&seg_queue);
2522 err = -ENOTCONN;
2523 }
2524
2525 if (err)
2526 break;
2527
2528 if (chan->mode == L2CAP_MODE_ERTM)
2529 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2530 else
2531 l2cap_streaming_send(chan, &seg_queue);
2532
2533 err = len;
2534
2535 /* If the skbs were not queued for sending, they'll still be in
2536 * seg_queue and need to be purged.
2537 */
2538 __skb_queue_purge(&seg_queue);
2539 break;
2540
2541 default:
2542 BT_DBG("bad state %1.1x", chan->mode);
2543 err = -EBADFD;
2544 }
2545
2546 return err;
2547 }
2548 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2549
2550 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2551 {
2552 struct l2cap_ctrl control;
2553 u16 seq;
2554
2555 BT_DBG("chan %p, txseq %u", chan, txseq);
2556
2557 memset(&control, 0, sizeof(control));
2558 control.sframe = 1;
2559 control.super = L2CAP_SUPER_SREJ;
2560
2561 for (seq = chan->expected_tx_seq; seq != txseq;
2562 seq = __next_seq(chan, seq)) {
2563 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2564 control.reqseq = seq;
2565 l2cap_send_sframe(chan, &control);
2566 l2cap_seq_list_append(&chan->srej_list, seq);
2567 }
2568 }
2569
2570 chan->expected_tx_seq = __next_seq(chan, txseq);
2571 }
2572
2573 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2574 {
2575 struct l2cap_ctrl control;
2576
2577 BT_DBG("chan %p", chan);
2578
2579 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2580 return;
2581
2582 memset(&control, 0, sizeof(control));
2583 control.sframe = 1;
2584 control.super = L2CAP_SUPER_SREJ;
2585 control.reqseq = chan->srej_list.tail;
2586 l2cap_send_sframe(chan, &control);
2587 }
2588
2589 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2590 {
2591 struct l2cap_ctrl control;
2592 u16 initial_head;
2593 u16 seq;
2594
2595 BT_DBG("chan %p, txseq %u", chan, txseq);
2596
2597 memset(&control, 0, sizeof(control));
2598 control.sframe = 1;
2599 control.super = L2CAP_SUPER_SREJ;
2600
2601 /* Capture initial list head to allow only one pass through the list. */
2602 initial_head = chan->srej_list.head;
2603
2604 do {
2605 seq = l2cap_seq_list_pop(&chan->srej_list);
2606 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2607 break;
2608
2609 control.reqseq = seq;
2610 l2cap_send_sframe(chan, &control);
2611 l2cap_seq_list_append(&chan->srej_list, seq);
2612 } while (chan->srej_list.head != initial_head);
2613 }
2614
2615 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2616 {
2617 struct sk_buff *acked_skb;
2618 u16 ackseq;
2619
2620 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2621
2622 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2623 return;
2624
2625 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2626 chan->expected_ack_seq, chan->unacked_frames);
2627
2628 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2629 ackseq = __next_seq(chan, ackseq)) {
2630
2631 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2632 if (acked_skb) {
2633 skb_unlink(acked_skb, &chan->tx_q);
2634 kfree_skb(acked_skb);
2635 chan->unacked_frames--;
2636 }
2637 }
2638
2639 chan->expected_ack_seq = reqseq;
2640
2641 if (chan->unacked_frames == 0)
2642 __clear_retrans_timer(chan);
2643
2644 BT_DBG("unacked_frames %u", chan->unacked_frames);
2645 }
2646
2647 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2648 {
2649 BT_DBG("chan %p", chan);
2650
2651 chan->expected_tx_seq = chan->buffer_seq;
2652 l2cap_seq_list_clear(&chan->srej_list);
2653 skb_queue_purge(&chan->srej_q);
2654 chan->rx_state = L2CAP_RX_STATE_RECV;
2655 }
2656
2657 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2658 struct l2cap_ctrl *control,
2659 struct sk_buff_head *skbs, u8 event)
2660 {
2661 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2662 event);
2663
2664 switch (event) {
2665 case L2CAP_EV_DATA_REQUEST:
2666 if (chan->tx_send_head == NULL)
2667 chan->tx_send_head = skb_peek(skbs);
2668
2669 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2670 l2cap_ertm_send(chan);
2671 break;
2672 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2673 BT_DBG("Enter LOCAL_BUSY");
2674 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2675
2676 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2677 /* The SREJ_SENT state must be aborted if we are to
2678 * enter the LOCAL_BUSY state.
2679 */
2680 l2cap_abort_rx_srej_sent(chan);
2681 }
2682
2683 l2cap_send_ack(chan);
2684
2685 break;
2686 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2687 BT_DBG("Exit LOCAL_BUSY");
2688 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2689
2690 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2691 struct l2cap_ctrl local_control;
2692
2693 memset(&local_control, 0, sizeof(local_control));
2694 local_control.sframe = 1;
2695 local_control.super = L2CAP_SUPER_RR;
2696 local_control.poll = 1;
2697 local_control.reqseq = chan->buffer_seq;
2698 l2cap_send_sframe(chan, &local_control);
2699
2700 chan->retry_count = 1;
2701 __set_monitor_timer(chan);
2702 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2703 }
2704 break;
2705 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2706 l2cap_process_reqseq(chan, control->reqseq);
2707 break;
2708 case L2CAP_EV_EXPLICIT_POLL:
2709 l2cap_send_rr_or_rnr(chan, 1);
2710 chan->retry_count = 1;
2711 __set_monitor_timer(chan);
2712 __clear_ack_timer(chan);
2713 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2714 break;
2715 case L2CAP_EV_RETRANS_TO:
2716 l2cap_send_rr_or_rnr(chan, 1);
2717 chan->retry_count = 1;
2718 __set_monitor_timer(chan);
2719 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2720 break;
2721 case L2CAP_EV_RECV_FBIT:
2722 /* Nothing to process */
2723 break;
2724 default:
2725 break;
2726 }
2727 }
2728
2729 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2730 struct l2cap_ctrl *control,
2731 struct sk_buff_head *skbs, u8 event)
2732 {
2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2734 event);
2735
2736 switch (event) {
2737 case L2CAP_EV_DATA_REQUEST:
2738 if (chan->tx_send_head == NULL)
2739 chan->tx_send_head = skb_peek(skbs);
2740 /* Queue data, but don't send. */
2741 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2742 break;
2743 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2744 BT_DBG("Enter LOCAL_BUSY");
2745 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2746
2747 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2748 /* The SREJ_SENT state must be aborted if we are to
2749 * enter the LOCAL_BUSY state.
2750 */
2751 l2cap_abort_rx_srej_sent(chan);
2752 }
2753
2754 l2cap_send_ack(chan);
2755
2756 break;
2757 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2758 BT_DBG("Exit LOCAL_BUSY");
2759 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2760
2761 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2762 struct l2cap_ctrl local_control;
2763 memset(&local_control, 0, sizeof(local_control));
2764 local_control.sframe = 1;
2765 local_control.super = L2CAP_SUPER_RR;
2766 local_control.poll = 1;
2767 local_control.reqseq = chan->buffer_seq;
2768 l2cap_send_sframe(chan, &local_control);
2769
2770 chan->retry_count = 1;
2771 __set_monitor_timer(chan);
2772 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2773 }
2774 break;
2775 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2776 l2cap_process_reqseq(chan, control->reqseq);
2777
2778 /* Fall through */
2779
2780 case L2CAP_EV_RECV_FBIT:
2781 if (control && control->final) {
2782 __clear_monitor_timer(chan);
2783 if (chan->unacked_frames > 0)
2784 __set_retrans_timer(chan);
2785 chan->retry_count = 0;
2786 chan->tx_state = L2CAP_TX_STATE_XMIT;
2787 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2788 }
2789 break;
2790 case L2CAP_EV_EXPLICIT_POLL:
2791 /* Ignore */
2792 break;
2793 case L2CAP_EV_MONITOR_TO:
2794 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2795 l2cap_send_rr_or_rnr(chan, 1);
2796 __set_monitor_timer(chan);
2797 chan->retry_count++;
2798 } else {
2799 l2cap_send_disconn_req(chan, ECONNABORTED);
2800 }
2801 break;
2802 default:
2803 break;
2804 }
2805 }
2806
2807 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2808 struct sk_buff_head *skbs, u8 event)
2809 {
2810 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2811 chan, control, skbs, event, chan->tx_state);
2812
2813 switch (chan->tx_state) {
2814 case L2CAP_TX_STATE_XMIT:
2815 l2cap_tx_state_xmit(chan, control, skbs, event);
2816 break;
2817 case L2CAP_TX_STATE_WAIT_F:
2818 l2cap_tx_state_wait_f(chan, control, skbs, event);
2819 break;
2820 default:
2821 /* Ignore event */
2822 break;
2823 }
2824 }
2825
2826 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2827 struct l2cap_ctrl *control)
2828 {
2829 BT_DBG("chan %p, control %p", chan, control);
2830 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2831 }
2832
2833 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2834 struct l2cap_ctrl *control)
2835 {
2836 BT_DBG("chan %p, control %p", chan, control);
2837 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2838 }
2839
2840 /* Copy frame to all raw sockets on that connection */
2841 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2842 {
2843 struct sk_buff *nskb;
2844 struct l2cap_chan *chan;
2845
2846 BT_DBG("conn %p", conn);
2847
2848 mutex_lock(&conn->chan_lock);
2849
2850 list_for_each_entry(chan, &conn->chan_l, list) {
2851 if (chan->chan_type != L2CAP_CHAN_RAW)
2852 continue;
2853
2854 /* Don't send frame to the channel it came from */
2855 if (bt_cb(skb)->l2cap.chan == chan)
2856 continue;
2857
2858 nskb = skb_clone(skb, GFP_KERNEL);
2859 if (!nskb)
2860 continue;
2861 if (chan->ops->recv(chan, nskb))
2862 kfree_skb(nskb);
2863 }
2864
2865 mutex_unlock(&conn->chan_lock);
2866 }
2867
2868 /* ---- L2CAP signalling commands ---- */
2869 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2870 u8 ident, u16 dlen, void *data)
2871 {
2872 struct sk_buff *skb, **frag;
2873 struct l2cap_cmd_hdr *cmd;
2874 struct l2cap_hdr *lh;
2875 int len, count;
2876
2877 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2878 conn, code, ident, dlen);
2879
2880 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2881 return NULL;
2882
2883 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2884 count = min_t(unsigned int, conn->mtu, len);
2885
2886 skb = bt_skb_alloc(count, GFP_KERNEL);
2887 if (!skb)
2888 return NULL;
2889
2890 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2891 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2892
2893 if (conn->hcon->type == LE_LINK)
2894 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2895 else
2896 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2897
2898 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2899 cmd->code = code;
2900 cmd->ident = ident;
2901 cmd->len = cpu_to_le16(dlen);
2902
2903 if (dlen) {
2904 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2905 memcpy(skb_put(skb, count), data, count);
2906 data += count;
2907 }
2908
2909 len -= skb->len;
2910
2911 /* Continuation fragments (no L2CAP header) */
2912 frag = &skb_shinfo(skb)->frag_list;
2913 while (len) {
2914 count = min_t(unsigned int, conn->mtu, len);
2915
2916 *frag = bt_skb_alloc(count, GFP_KERNEL);
2917 if (!*frag)
2918 goto fail;
2919
2920 memcpy(skb_put(*frag, count), data, count);
2921
2922 len -= count;
2923 data += count;
2924
2925 frag = &(*frag)->next;
2926 }
2927
2928 return skb;
2929
2930 fail:
2931 kfree_skb(skb);
2932 return NULL;
2933 }
2934
2935 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2936 unsigned long *val)
2937 {
2938 struct l2cap_conf_opt *opt = *ptr;
2939 int len;
2940
2941 len = L2CAP_CONF_OPT_SIZE + opt->len;
2942 *ptr += len;
2943
2944 *type = opt->type;
2945 *olen = opt->len;
2946
2947 switch (opt->len) {
2948 case 1:
2949 *val = *((u8 *) opt->val);
2950 break;
2951
2952 case 2:
2953 *val = get_unaligned_le16(opt->val);
2954 break;
2955
2956 case 4:
2957 *val = get_unaligned_le32(opt->val);
2958 break;
2959
2960 default:
2961 *val = (unsigned long) opt->val;
2962 break;
2963 }
2964
2965 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2966 return len;
2967 }
2968
2969 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2970 {
2971 struct l2cap_conf_opt *opt = *ptr;
2972
2973 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2974
2975 opt->type = type;
2976 opt->len = len;
2977
2978 switch (len) {
2979 case 1:
2980 *((u8 *) opt->val) = val;
2981 break;
2982
2983 case 2:
2984 put_unaligned_le16(val, opt->val);
2985 break;
2986
2987 case 4:
2988 put_unaligned_le32(val, opt->val);
2989 break;
2990
2991 default:
2992 memcpy(opt->val, (void *) val, len);
2993 break;
2994 }
2995
2996 *ptr += L2CAP_CONF_OPT_SIZE + len;
2997 }
2998
2999 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3000 {
3001 struct l2cap_conf_efs efs;
3002
3003 switch (chan->mode) {
3004 case L2CAP_MODE_ERTM:
3005 efs.id = chan->local_id;
3006 efs.stype = chan->local_stype;
3007 efs.msdu = cpu_to_le16(chan->local_msdu);
3008 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3009 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3010 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3011 break;
3012
3013 case L2CAP_MODE_STREAMING:
3014 efs.id = 1;
3015 efs.stype = L2CAP_SERV_BESTEFFORT;
3016 efs.msdu = cpu_to_le16(chan->local_msdu);
3017 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3018 efs.acc_lat = 0;
3019 efs.flush_to = 0;
3020 break;
3021
3022 default:
3023 return;
3024 }
3025
3026 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3027 (unsigned long) &efs);
3028 }
3029
3030 static void l2cap_ack_timeout(struct work_struct *work)
3031 {
3032 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3033 ack_timer.work);
3034 u16 frames_to_ack;
3035
3036 BT_DBG("chan %p", chan);
3037
3038 l2cap_chan_lock(chan);
3039
3040 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3041 chan->last_acked_seq);
3042
3043 if (frames_to_ack)
3044 l2cap_send_rr_or_rnr(chan, 0);
3045
3046 l2cap_chan_unlock(chan);
3047 l2cap_chan_put(chan);
3048 }
3049
3050 int l2cap_ertm_init(struct l2cap_chan *chan)
3051 {
3052 int err;
3053
3054 chan->next_tx_seq = 0;
3055 chan->expected_tx_seq = 0;
3056 chan->expected_ack_seq = 0;
3057 chan->unacked_frames = 0;
3058 chan->buffer_seq = 0;
3059 chan->frames_sent = 0;
3060 chan->last_acked_seq = 0;
3061 chan->sdu = NULL;
3062 chan->sdu_last_frag = NULL;
3063 chan->sdu_len = 0;
3064
3065 skb_queue_head_init(&chan->tx_q);
3066
3067 chan->local_amp_id = AMP_ID_BREDR;
3068 chan->move_id = AMP_ID_BREDR;
3069 chan->move_state = L2CAP_MOVE_STABLE;
3070 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3071
3072 if (chan->mode != L2CAP_MODE_ERTM)
3073 return 0;
3074
3075 chan->rx_state = L2CAP_RX_STATE_RECV;
3076 chan->tx_state = L2CAP_TX_STATE_XMIT;
3077
3078 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3079 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3080 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3081
3082 skb_queue_head_init(&chan->srej_q);
3083
3084 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3085 if (err < 0)
3086 return err;
3087
3088 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3089 if (err < 0)
3090 l2cap_seq_list_free(&chan->srej_list);
3091
3092 return err;
3093 }
3094
3095 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3096 {
3097 switch (mode) {
3098 case L2CAP_MODE_STREAMING:
3099 case L2CAP_MODE_ERTM:
3100 if (l2cap_mode_supported(mode, remote_feat_mask))
3101 return mode;
3102 /* fall through */
3103 default:
3104 return L2CAP_MODE_BASIC;
3105 }
3106 }
3107
3108 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3109 {
3110 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3111 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3112 }
3113
3114 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3115 {
3116 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3117 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3118 }
3119
3120 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3121 struct l2cap_conf_rfc *rfc)
3122 {
3123 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3124 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3125
3126 /* Class 1 devices have must have ERTM timeouts
3127 * exceeding the Link Supervision Timeout. The
3128 * default Link Supervision Timeout for AMP
3129 * controllers is 10 seconds.
3130 *
3131 * Class 1 devices use 0xffffffff for their
3132 * best-effort flush timeout, so the clamping logic
3133 * will result in a timeout that meets the above
3134 * requirement. ERTM timeouts are 16-bit values, so
3135 * the maximum timeout is 65.535 seconds.
3136 */
3137
3138 /* Convert timeout to milliseconds and round */
3139 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3140
3141 /* This is the recommended formula for class 2 devices
3142 * that start ERTM timers when packets are sent to the
3143 * controller.
3144 */
3145 ertm_to = 3 * ertm_to + 500;
3146
3147 if (ertm_to > 0xffff)
3148 ertm_to = 0xffff;
3149
3150 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3151 rfc->monitor_timeout = rfc->retrans_timeout;
3152 } else {
3153 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3154 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3155 }
3156 }
3157
3158 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3159 {
3160 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3161 __l2cap_ews_supported(chan->conn)) {
3162 /* use extended control field */
3163 set_bit(FLAG_EXT_CTRL, &chan->flags);
3164 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3165 } else {
3166 chan->tx_win = min_t(u16, chan->tx_win,
3167 L2CAP_DEFAULT_TX_WINDOW);
3168 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3169 }
3170 chan->ack_win = chan->tx_win;
3171 }
3172
3173 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3174 {
3175 struct l2cap_conf_req *req = data;
3176 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3177 void *ptr = req->data;
3178 u16 size;
3179
3180 BT_DBG("chan %p", chan);
3181
3182 if (chan->num_conf_req || chan->num_conf_rsp)
3183 goto done;
3184
3185 switch (chan->mode) {
3186 case L2CAP_MODE_STREAMING:
3187 case L2CAP_MODE_ERTM:
3188 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3189 break;
3190
3191 if (__l2cap_efs_supported(chan->conn))
3192 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3193
3194 /* fall through */
3195 default:
3196 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3197 break;
3198 }
3199
3200 done:
3201 if (chan->imtu != L2CAP_DEFAULT_MTU)
3202 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3203
3204 switch (chan->mode) {
3205 case L2CAP_MODE_BASIC:
3206 if (disable_ertm)
3207 break;
3208
3209 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3210 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3211 break;
3212
3213 rfc.mode = L2CAP_MODE_BASIC;
3214 rfc.txwin_size = 0;
3215 rfc.max_transmit = 0;
3216 rfc.retrans_timeout = 0;
3217 rfc.monitor_timeout = 0;
3218 rfc.max_pdu_size = 0;
3219
3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3221 (unsigned long) &rfc);
3222 break;
3223
3224 case L2CAP_MODE_ERTM:
3225 rfc.mode = L2CAP_MODE_ERTM;
3226 rfc.max_transmit = chan->max_tx;
3227
3228 __l2cap_set_ertm_timeouts(chan, &rfc);
3229
3230 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3231 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3232 L2CAP_FCS_SIZE);
3233 rfc.max_pdu_size = cpu_to_le16(size);
3234
3235 l2cap_txwin_setup(chan);
3236
3237 rfc.txwin_size = min_t(u16, chan->tx_win,
3238 L2CAP_DEFAULT_TX_WINDOW);
3239
3240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3241 (unsigned long) &rfc);
3242
3243 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3244 l2cap_add_opt_efs(&ptr, chan);
3245
3246 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3248 chan->tx_win);
3249
3250 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3251 if (chan->fcs == L2CAP_FCS_NONE ||
3252 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3253 chan->fcs = L2CAP_FCS_NONE;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3255 chan->fcs);
3256 }
3257 break;
3258
3259 case L2CAP_MODE_STREAMING:
3260 l2cap_txwin_setup(chan);
3261 rfc.mode = L2CAP_MODE_STREAMING;
3262 rfc.txwin_size = 0;
3263 rfc.max_transmit = 0;
3264 rfc.retrans_timeout = 0;
3265 rfc.monitor_timeout = 0;
3266
3267 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3268 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3269 L2CAP_FCS_SIZE);
3270 rfc.max_pdu_size = cpu_to_le16(size);
3271
3272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3273 (unsigned long) &rfc);
3274
3275 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3276 l2cap_add_opt_efs(&ptr, chan);
3277
3278 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3279 if (chan->fcs == L2CAP_FCS_NONE ||
3280 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3281 chan->fcs = L2CAP_FCS_NONE;
3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3283 chan->fcs);
3284 }
3285 break;
3286 }
3287
3288 req->dcid = cpu_to_le16(chan->dcid);
3289 req->flags = cpu_to_le16(0);
3290
3291 return ptr - data;
3292 }
3293
3294 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3295 {
3296 struct l2cap_conf_rsp *rsp = data;
3297 void *ptr = rsp->data;
3298 void *req = chan->conf_req;
3299 int len = chan->conf_len;
3300 int type, hint, olen;
3301 unsigned long val;
3302 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3303 struct l2cap_conf_efs efs;
3304 u8 remote_efs = 0;
3305 u16 mtu = L2CAP_DEFAULT_MTU;
3306 u16 result = L2CAP_CONF_SUCCESS;
3307 u16 size;
3308
3309 BT_DBG("chan %p", chan);
3310
3311 while (len >= L2CAP_CONF_OPT_SIZE) {
3312 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3313
3314 hint = type & L2CAP_CONF_HINT;
3315 type &= L2CAP_CONF_MASK;
3316
3317 switch (type) {
3318 case L2CAP_CONF_MTU:
3319 mtu = val;
3320 break;
3321
3322 case L2CAP_CONF_FLUSH_TO:
3323 chan->flush_to = val;
3324 break;
3325
3326 case L2CAP_CONF_QOS:
3327 break;
3328
3329 case L2CAP_CONF_RFC:
3330 if (olen == sizeof(rfc))
3331 memcpy(&rfc, (void *) val, olen);
3332 break;
3333
3334 case L2CAP_CONF_FCS:
3335 if (val == L2CAP_FCS_NONE)
3336 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3337 break;
3338
3339 case L2CAP_CONF_EFS:
3340 remote_efs = 1;
3341 if (olen == sizeof(efs))
3342 memcpy(&efs, (void *) val, olen);
3343 break;
3344
3345 case L2CAP_CONF_EWS:
3346 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3347 return -ECONNREFUSED;
3348
3349 set_bit(FLAG_EXT_CTRL, &chan->flags);
3350 set_bit(CONF_EWS_RECV, &chan->conf_state);
3351 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3352 chan->remote_tx_win = val;
3353 break;
3354
3355 default:
3356 if (hint)
3357 break;
3358
3359 result = L2CAP_CONF_UNKNOWN;
3360 *((u8 *) ptr++) = type;
3361 break;
3362 }
3363 }
3364
3365 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3366 goto done;
3367
3368 switch (chan->mode) {
3369 case L2CAP_MODE_STREAMING:
3370 case L2CAP_MODE_ERTM:
3371 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3372 chan->mode = l2cap_select_mode(rfc.mode,
3373 chan->conn->feat_mask);
3374 break;
3375 }
3376
3377 if (remote_efs) {
3378 if (__l2cap_efs_supported(chan->conn))
3379 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3380 else
3381 return -ECONNREFUSED;
3382 }
3383
3384 if (chan->mode != rfc.mode)
3385 return -ECONNREFUSED;
3386
3387 break;
3388 }
3389
3390 done:
3391 if (chan->mode != rfc.mode) {
3392 result = L2CAP_CONF_UNACCEPT;
3393 rfc.mode = chan->mode;
3394
3395 if (chan->num_conf_rsp == 1)
3396 return -ECONNREFUSED;
3397
3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3399 (unsigned long) &rfc);
3400 }
3401
3402 if (result == L2CAP_CONF_SUCCESS) {
3403 /* Configure output options and let the other side know
3404 * which ones we don't like. */
3405
3406 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3407 result = L2CAP_CONF_UNACCEPT;
3408 else {
3409 chan->omtu = mtu;
3410 set_bit(CONF_MTU_DONE, &chan->conf_state);
3411 }
3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3413
3414 if (remote_efs) {
3415 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3416 efs.stype != L2CAP_SERV_NOTRAFIC &&
3417 efs.stype != chan->local_stype) {
3418
3419 result = L2CAP_CONF_UNACCEPT;
3420
3421 if (chan->num_conf_req >= 1)
3422 return -ECONNREFUSED;
3423
3424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3425 sizeof(efs),
3426 (unsigned long) &efs);
3427 } else {
3428 /* Send PENDING Conf Rsp */
3429 result = L2CAP_CONF_PENDING;
3430 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3431 }
3432 }
3433
3434 switch (rfc.mode) {
3435 case L2CAP_MODE_BASIC:
3436 chan->fcs = L2CAP_FCS_NONE;
3437 set_bit(CONF_MODE_DONE, &chan->conf_state);
3438 break;
3439
3440 case L2CAP_MODE_ERTM:
3441 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3442 chan->remote_tx_win = rfc.txwin_size;
3443 else
3444 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3445
3446 chan->remote_max_tx = rfc.max_transmit;
3447
3448 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3449 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3450 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3451 rfc.max_pdu_size = cpu_to_le16(size);
3452 chan->remote_mps = size;
3453
3454 __l2cap_set_ertm_timeouts(chan, &rfc);
3455
3456 set_bit(CONF_MODE_DONE, &chan->conf_state);
3457
3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3459 sizeof(rfc), (unsigned long) &rfc);
3460
3461 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3462 chan->remote_id = efs.id;
3463 chan->remote_stype = efs.stype;
3464 chan->remote_msdu = le16_to_cpu(efs.msdu);
3465 chan->remote_flush_to =
3466 le32_to_cpu(efs.flush_to);
3467 chan->remote_acc_lat =
3468 le32_to_cpu(efs.acc_lat);
3469 chan->remote_sdu_itime =
3470 le32_to_cpu(efs.sdu_itime);
3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3472 sizeof(efs),
3473 (unsigned long) &efs);
3474 }
3475 break;
3476
3477 case L2CAP_MODE_STREAMING:
3478 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3479 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3480 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3481 rfc.max_pdu_size = cpu_to_le16(size);
3482 chan->remote_mps = size;
3483
3484 set_bit(CONF_MODE_DONE, &chan->conf_state);
3485
3486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3487 (unsigned long) &rfc);
3488
3489 break;
3490
3491 default:
3492 result = L2CAP_CONF_UNACCEPT;
3493
3494 memset(&rfc, 0, sizeof(rfc));
3495 rfc.mode = chan->mode;
3496 }
3497
3498 if (result == L2CAP_CONF_SUCCESS)
3499 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3500 }
3501 rsp->scid = cpu_to_le16(chan->dcid);
3502 rsp->result = cpu_to_le16(result);
3503 rsp->flags = cpu_to_le16(0);
3504
3505 return ptr - data;
3506 }
3507
3508 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3509 void *data, u16 *result)
3510 {
3511 struct l2cap_conf_req *req = data;
3512 void *ptr = req->data;
3513 int type, olen;
3514 unsigned long val;
3515 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3516 struct l2cap_conf_efs efs;
3517
3518 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3519
3520 while (len >= L2CAP_CONF_OPT_SIZE) {
3521 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3522
3523 switch (type) {
3524 case L2CAP_CONF_MTU:
3525 if (val < L2CAP_DEFAULT_MIN_MTU) {
3526 *result = L2CAP_CONF_UNACCEPT;
3527 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3528 } else
3529 chan->imtu = val;
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3531 break;
3532
3533 case L2CAP_CONF_FLUSH_TO:
3534 chan->flush_to = val;
3535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3536 2, chan->flush_to);
3537 break;
3538
3539 case L2CAP_CONF_RFC:
3540 if (olen == sizeof(rfc))
3541 memcpy(&rfc, (void *)val, olen);
3542
3543 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3544 rfc.mode != chan->mode)
3545 return -ECONNREFUSED;
3546
3547 chan->fcs = 0;
3548
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3550 sizeof(rfc), (unsigned long) &rfc);
3551 break;
3552
3553 case L2CAP_CONF_EWS:
3554 chan->ack_win = min_t(u16, val, chan->ack_win);
3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3556 chan->tx_win);
3557 break;
3558
3559 case L2CAP_CONF_EFS:
3560 if (olen == sizeof(efs))
3561 memcpy(&efs, (void *)val, olen);
3562
3563 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3564 efs.stype != L2CAP_SERV_NOTRAFIC &&
3565 efs.stype != chan->local_stype)
3566 return -ECONNREFUSED;
3567
3568 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3569 (unsigned long) &efs);
3570 break;
3571
3572 case L2CAP_CONF_FCS:
3573 if (*result == L2CAP_CONF_PENDING)
3574 if (val == L2CAP_FCS_NONE)
3575 set_bit(CONF_RECV_NO_FCS,
3576 &chan->conf_state);
3577 break;
3578 }
3579 }
3580
3581 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3582 return -ECONNREFUSED;
3583
3584 chan->mode = rfc.mode;
3585
3586 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3587 switch (rfc.mode) {
3588 case L2CAP_MODE_ERTM:
3589 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3590 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3591 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3592 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3593 chan->ack_win = min_t(u16, chan->ack_win,
3594 rfc.txwin_size);
3595
3596 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3597 chan->local_msdu = le16_to_cpu(efs.msdu);
3598 chan->local_sdu_itime =
3599 le32_to_cpu(efs.sdu_itime);
3600 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3601 chan->local_flush_to =
3602 le32_to_cpu(efs.flush_to);
3603 }
3604 break;
3605
3606 case L2CAP_MODE_STREAMING:
3607 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3608 }
3609 }
3610
3611 req->dcid = cpu_to_le16(chan->dcid);
3612 req->flags = cpu_to_le16(0);
3613
3614 return ptr - data;
3615 }
3616
3617 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3618 u16 result, u16 flags)
3619 {
3620 struct l2cap_conf_rsp *rsp = data;
3621 void *ptr = rsp->data;
3622
3623 BT_DBG("chan %p", chan);
3624
3625 rsp->scid = cpu_to_le16(chan->dcid);
3626 rsp->result = cpu_to_le16(result);
3627 rsp->flags = cpu_to_le16(flags);
3628
3629 return ptr - data;
3630 }
3631
3632 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3633 {
3634 struct l2cap_le_conn_rsp rsp;
3635 struct l2cap_conn *conn = chan->conn;
3636
3637 BT_DBG("chan %p", chan);
3638
3639 rsp.dcid = cpu_to_le16(chan->scid);
3640 rsp.mtu = cpu_to_le16(chan->imtu);
3641 rsp.mps = cpu_to_le16(chan->mps);
3642 rsp.credits = cpu_to_le16(chan->rx_credits);
3643 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3644
3645 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3646 &rsp);
3647 }
3648
3649 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3650 {
3651 struct l2cap_conn_rsp rsp;
3652 struct l2cap_conn *conn = chan->conn;
3653 u8 buf[128];
3654 u8 rsp_code;
3655
3656 rsp.scid = cpu_to_le16(chan->dcid);
3657 rsp.dcid = cpu_to_le16(chan->scid);
3658 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3659 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3660
3661 if (chan->hs_hcon)
3662 rsp_code = L2CAP_CREATE_CHAN_RSP;
3663 else
3664 rsp_code = L2CAP_CONN_RSP;
3665
3666 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3667
3668 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3669
3670 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3671 return;
3672
3673 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3674 l2cap_build_conf_req(chan, buf), buf);
3675 chan->num_conf_req++;
3676 }
3677
3678 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3679 {
3680 int type, olen;
3681 unsigned long val;
3682 /* Use sane default values in case a misbehaving remote device
3683 * did not send an RFC or extended window size option.
3684 */
3685 u16 txwin_ext = chan->ack_win;
3686 struct l2cap_conf_rfc rfc = {
3687 .mode = chan->mode,
3688 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3689 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3690 .max_pdu_size = cpu_to_le16(chan->imtu),
3691 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3692 };
3693
3694 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3695
3696 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3697 return;
3698
3699 while (len >= L2CAP_CONF_OPT_SIZE) {
3700 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3701
3702 switch (type) {
3703 case L2CAP_CONF_RFC:
3704 if (olen == sizeof(rfc))
3705 memcpy(&rfc, (void *)val, olen);
3706 break;
3707 case L2CAP_CONF_EWS:
3708 txwin_ext = val;
3709 break;
3710 }
3711 }
3712
3713 switch (rfc.mode) {
3714 case L2CAP_MODE_ERTM:
3715 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3716 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3717 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3718 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3719 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3720 else
3721 chan->ack_win = min_t(u16, chan->ack_win,
3722 rfc.txwin_size);
3723 break;
3724 case L2CAP_MODE_STREAMING:
3725 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3726 }
3727 }
3728
3729 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3730 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3731 u8 *data)
3732 {
3733 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3734
3735 if (cmd_len < sizeof(*rej))
3736 return -EPROTO;
3737
3738 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3739 return 0;
3740
3741 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3742 cmd->ident == conn->info_ident) {
3743 cancel_delayed_work(&conn->info_timer);
3744
3745 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3746 conn->info_ident = 0;
3747
3748 l2cap_conn_start(conn);
3749 }
3750
3751 return 0;
3752 }
3753
3754 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3755 struct l2cap_cmd_hdr *cmd,
3756 u8 *data, u8 rsp_code, u8 amp_id)
3757 {
3758 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3759 struct l2cap_conn_rsp rsp;
3760 struct l2cap_chan *chan = NULL, *pchan;
3761 int result, status = L2CAP_CS_NO_INFO;
3762
3763 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3764 __le16 psm = req->psm;
3765
3766 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3767
3768 /* Check if we have socket listening on psm */
3769 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3770 &conn->hcon->dst, ACL_LINK);
3771 if (!pchan) {
3772 result = L2CAP_CR_BAD_PSM;
3773 goto sendresp;
3774 }
3775
3776 mutex_lock(&conn->chan_lock);
3777 l2cap_chan_lock(pchan);
3778
3779 /* Check if the ACL is secure enough (if not SDP) */
3780 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3781 !hci_conn_check_link_mode(conn->hcon)) {
3782 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3783 result = L2CAP_CR_SEC_BLOCK;
3784 goto response;
3785 }
3786
3787 result = L2CAP_CR_NO_MEM;
3788
3789 /* Check if we already have channel with that dcid */
3790 if (__l2cap_get_chan_by_dcid(conn, scid))
3791 goto response;
3792
3793 chan = pchan->ops->new_connection(pchan);
3794 if (!chan)
3795 goto response;
3796
3797 /* For certain devices (ex: HID mouse), support for authentication,
3798 * pairing and bonding is optional. For such devices, inorder to avoid
3799 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3800 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3801 */
3802 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3803
3804 bacpy(&chan->src, &conn->hcon->src);
3805 bacpy(&chan->dst, &conn->hcon->dst);
3806 chan->src_type = bdaddr_src_type(conn->hcon);
3807 chan->dst_type = bdaddr_dst_type(conn->hcon);
3808 chan->psm = psm;
3809 chan->dcid = scid;
3810 chan->local_amp_id = amp_id;
3811
3812 __l2cap_chan_add(conn, chan);
3813
3814 dcid = chan->scid;
3815
3816 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3817
3818 chan->ident = cmd->ident;
3819
3820 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3821 if (l2cap_chan_check_security(chan, false)) {
3822 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3823 l2cap_state_change(chan, BT_CONNECT2);
3824 result = L2CAP_CR_PEND;
3825 status = L2CAP_CS_AUTHOR_PEND;
3826 chan->ops->defer(chan);
3827 } else {
3828 /* Force pending result for AMP controllers.
3829 * The connection will succeed after the
3830 * physical link is up.
3831 */
3832 if (amp_id == AMP_ID_BREDR) {
3833 l2cap_state_change(chan, BT_CONFIG);
3834 result = L2CAP_CR_SUCCESS;
3835 } else {
3836 l2cap_state_change(chan, BT_CONNECT2);
3837 result = L2CAP_CR_PEND;
3838 }
3839 status = L2CAP_CS_NO_INFO;
3840 }
3841 } else {
3842 l2cap_state_change(chan, BT_CONNECT2);
3843 result = L2CAP_CR_PEND;
3844 status = L2CAP_CS_AUTHEN_PEND;
3845 }
3846 } else {
3847 l2cap_state_change(chan, BT_CONNECT2);
3848 result = L2CAP_CR_PEND;
3849 status = L2CAP_CS_NO_INFO;
3850 }
3851
3852 response:
3853 l2cap_chan_unlock(pchan);
3854 mutex_unlock(&conn->chan_lock);
3855 l2cap_chan_put(pchan);
3856
3857 sendresp:
3858 rsp.scid = cpu_to_le16(scid);
3859 rsp.dcid = cpu_to_le16(dcid);
3860 rsp.result = cpu_to_le16(result);
3861 rsp.status = cpu_to_le16(status);
3862 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3863
3864 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3865 struct l2cap_info_req info;
3866 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3867
3868 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3869 conn->info_ident = l2cap_get_ident(conn);
3870
3871 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3872
3873 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3874 sizeof(info), &info);
3875 }
3876
3877 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3878 result == L2CAP_CR_SUCCESS) {
3879 u8 buf[128];
3880 set_bit(CONF_REQ_SENT, &chan->conf_state);
3881 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3882 l2cap_build_conf_req(chan, buf), buf);
3883 chan->num_conf_req++;
3884 }
3885
3886 return chan;
3887 }
3888
3889 static int l2cap_connect_req(struct l2cap_conn *conn,
3890 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3891 {
3892 struct hci_dev *hdev = conn->hcon->hdev;
3893 struct hci_conn *hcon = conn->hcon;
3894
3895 if (cmd_len < sizeof(struct l2cap_conn_req))
3896 return -EPROTO;
3897
3898 hci_dev_lock(hdev);
3899 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3900 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3901 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3902 hci_dev_unlock(hdev);
3903
3904 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3905 return 0;
3906 }
3907
3908 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3909 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3910 u8 *data)
3911 {
3912 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3913 u16 scid, dcid, result, status;
3914 struct l2cap_chan *chan;
3915 u8 req[128];
3916 int err;
3917
3918 if (cmd_len < sizeof(*rsp))
3919 return -EPROTO;
3920
3921 scid = __le16_to_cpu(rsp->scid);
3922 dcid = __le16_to_cpu(rsp->dcid);
3923 result = __le16_to_cpu(rsp->result);
3924 status = __le16_to_cpu(rsp->status);
3925
3926 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3927 dcid, scid, result, status);
3928
3929 mutex_lock(&conn->chan_lock);
3930
3931 if (scid) {
3932 chan = __l2cap_get_chan_by_scid(conn, scid);
3933 if (!chan) {
3934 err = -EBADSLT;
3935 goto unlock;
3936 }
3937 } else {
3938 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3939 if (!chan) {
3940 err = -EBADSLT;
3941 goto unlock;
3942 }
3943 }
3944
3945 err = 0;
3946
3947 l2cap_chan_lock(chan);
3948
3949 switch (result) {
3950 case L2CAP_CR_SUCCESS:
3951 l2cap_state_change(chan, BT_CONFIG);
3952 chan->ident = 0;
3953 chan->dcid = dcid;
3954 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3955
3956 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3957 break;
3958
3959 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3960 l2cap_build_conf_req(chan, req), req);
3961 chan->num_conf_req++;
3962 break;
3963
3964 case L2CAP_CR_PEND:
3965 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3966 break;
3967
3968 default:
3969 l2cap_chan_del(chan, ECONNREFUSED);
3970 break;
3971 }
3972
3973 l2cap_chan_unlock(chan);
3974
3975 unlock:
3976 mutex_unlock(&conn->chan_lock);
3977
3978 return err;
3979 }
3980
3981 static inline void set_default_fcs(struct l2cap_chan *chan)
3982 {
3983 /* FCS is enabled only in ERTM or streaming mode, if one or both
3984 * sides request it.
3985 */
3986 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3987 chan->fcs = L2CAP_FCS_NONE;
3988 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3989 chan->fcs = L2CAP_FCS_CRC16;
3990 }
3991
3992 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3993 u8 ident, u16 flags)
3994 {
3995 struct l2cap_conn *conn = chan->conn;
3996
3997 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3998 flags);
3999
4000 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4001 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4002
4003 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4004 l2cap_build_conf_rsp(chan, data,
4005 L2CAP_CONF_SUCCESS, flags), data);
4006 }
4007
4008 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4009 u16 scid, u16 dcid)
4010 {
4011 struct l2cap_cmd_rej_cid rej;
4012
4013 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4014 rej.scid = __cpu_to_le16(scid);
4015 rej.dcid = __cpu_to_le16(dcid);
4016
4017 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4018 }
4019
4020 static inline int l2cap_config_req(struct l2cap_conn *conn,
4021 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4022 u8 *data)
4023 {
4024 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4025 u16 dcid, flags;
4026 u8 rsp[64];
4027 struct l2cap_chan *chan;
4028 int len, err = 0;
4029
4030 if (cmd_len < sizeof(*req))
4031 return -EPROTO;
4032
4033 dcid = __le16_to_cpu(req->dcid);
4034 flags = __le16_to_cpu(req->flags);
4035
4036 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4037
4038 chan = l2cap_get_chan_by_scid(conn, dcid);
4039 if (!chan) {
4040 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4041 return 0;
4042 }
4043
4044 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4045 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4046 chan->dcid);
4047 goto unlock;
4048 }
4049
4050 /* Reject if config buffer is too small. */
4051 len = cmd_len - sizeof(*req);
4052 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4053 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4054 l2cap_build_conf_rsp(chan, rsp,
4055 L2CAP_CONF_REJECT, flags), rsp);
4056 goto unlock;
4057 }
4058
4059 /* Store config. */
4060 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4061 chan->conf_len += len;
4062
4063 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4064 /* Incomplete config. Send empty response. */
4065 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4066 l2cap_build_conf_rsp(chan, rsp,
4067 L2CAP_CONF_SUCCESS, flags), rsp);
4068 goto unlock;
4069 }
4070
4071 /* Complete config. */
4072 len = l2cap_parse_conf_req(chan, rsp);
4073 if (len < 0) {
4074 l2cap_send_disconn_req(chan, ECONNRESET);
4075 goto unlock;
4076 }
4077
4078 chan->ident = cmd->ident;
4079 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4080 chan->num_conf_rsp++;
4081
4082 /* Reset config buffer. */
4083 chan->conf_len = 0;
4084
4085 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4086 goto unlock;
4087
4088 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4089 set_default_fcs(chan);
4090
4091 if (chan->mode == L2CAP_MODE_ERTM ||
4092 chan->mode == L2CAP_MODE_STREAMING)
4093 err = l2cap_ertm_init(chan);
4094
4095 if (err < 0)
4096 l2cap_send_disconn_req(chan, -err);
4097 else
4098 l2cap_chan_ready(chan);
4099
4100 goto unlock;
4101 }
4102
4103 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4104 u8 buf[64];
4105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4106 l2cap_build_conf_req(chan, buf), buf);
4107 chan->num_conf_req++;
4108 }
4109
4110 /* Got Conf Rsp PENDING from remote side and assume we sent
4111 Conf Rsp PENDING in the code above */
4112 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4113 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4114
4115 /* check compatibility */
4116
4117 /* Send rsp for BR/EDR channel */
4118 if (!chan->hs_hcon)
4119 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4120 else
4121 chan->ident = cmd->ident;
4122 }
4123
4124 unlock:
4125 l2cap_chan_unlock(chan);
4126 return err;
4127 }
4128
4129 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4130 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4131 u8 *data)
4132 {
4133 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4134 u16 scid, flags, result;
4135 struct l2cap_chan *chan;
4136 int len = cmd_len - sizeof(*rsp);
4137 int err = 0;
4138
4139 if (cmd_len < sizeof(*rsp))
4140 return -EPROTO;
4141
4142 scid = __le16_to_cpu(rsp->scid);
4143 flags = __le16_to_cpu(rsp->flags);
4144 result = __le16_to_cpu(rsp->result);
4145
4146 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4147 result, len);
4148
4149 chan = l2cap_get_chan_by_scid(conn, scid);
4150 if (!chan)
4151 return 0;
4152
4153 switch (result) {
4154 case L2CAP_CONF_SUCCESS:
4155 l2cap_conf_rfc_get(chan, rsp->data, len);
4156 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4157 break;
4158
4159 case L2CAP_CONF_PENDING:
4160 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4161
4162 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4163 char buf[64];
4164
4165 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4166 buf, &result);
4167 if (len < 0) {
4168 l2cap_send_disconn_req(chan, ECONNRESET);
4169 goto done;
4170 }
4171
4172 if (!chan->hs_hcon) {
4173 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4174 0);
4175 } else {
4176 if (l2cap_check_efs(chan)) {
4177 amp_create_logical_link(chan);
4178 chan->ident = cmd->ident;
4179 }
4180 }
4181 }
4182 goto done;
4183
4184 case L2CAP_CONF_UNACCEPT:
4185 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4186 char req[64];
4187
4188 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4189 l2cap_send_disconn_req(chan, ECONNRESET);
4190 goto done;
4191 }
4192
4193 /* throw out any old stored conf requests */
4194 result = L2CAP_CONF_SUCCESS;
4195 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4196 req, &result);
4197 if (len < 0) {
4198 l2cap_send_disconn_req(chan, ECONNRESET);
4199 goto done;
4200 }
4201
4202 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4203 L2CAP_CONF_REQ, len, req);
4204 chan->num_conf_req++;
4205 if (result != L2CAP_CONF_SUCCESS)
4206 goto done;
4207 break;
4208 }
4209
4210 default:
4211 l2cap_chan_set_err(chan, ECONNRESET);
4212
4213 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4214 l2cap_send_disconn_req(chan, ECONNRESET);
4215 goto done;
4216 }
4217
4218 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4219 goto done;
4220
4221 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4222
4223 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4224 set_default_fcs(chan);
4225
4226 if (chan->mode == L2CAP_MODE_ERTM ||
4227 chan->mode == L2CAP_MODE_STREAMING)
4228 err = l2cap_ertm_init(chan);
4229
4230 if (err < 0)
4231 l2cap_send_disconn_req(chan, -err);
4232 else
4233 l2cap_chan_ready(chan);
4234 }
4235
4236 done:
4237 l2cap_chan_unlock(chan);
4238 return err;
4239 }
4240
4241 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4242 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4243 u8 *data)
4244 {
4245 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4246 struct l2cap_disconn_rsp rsp;
4247 u16 dcid, scid;
4248 struct l2cap_chan *chan;
4249
4250 if (cmd_len != sizeof(*req))
4251 return -EPROTO;
4252
4253 scid = __le16_to_cpu(req->scid);
4254 dcid = __le16_to_cpu(req->dcid);
4255
4256 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4257
4258 mutex_lock(&conn->chan_lock);
4259
4260 chan = __l2cap_get_chan_by_scid(conn, dcid);
4261 if (!chan) {
4262 mutex_unlock(&conn->chan_lock);
4263 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4264 return 0;
4265 }
4266
4267 l2cap_chan_lock(chan);
4268
4269 rsp.dcid = cpu_to_le16(chan->scid);
4270 rsp.scid = cpu_to_le16(chan->dcid);
4271 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4272
4273 chan->ops->set_shutdown(chan);
4274
4275 l2cap_chan_hold(chan);
4276 l2cap_chan_del(chan, ECONNRESET);
4277
4278 l2cap_chan_unlock(chan);
4279
4280 chan->ops->close(chan);
4281 l2cap_chan_put(chan);
4282
4283 mutex_unlock(&conn->chan_lock);
4284
4285 return 0;
4286 }
4287
4288 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4289 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4290 u8 *data)
4291 {
4292 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4293 u16 dcid, scid;
4294 struct l2cap_chan *chan;
4295
4296 if (cmd_len != sizeof(*rsp))
4297 return -EPROTO;
4298
4299 scid = __le16_to_cpu(rsp->scid);
4300 dcid = __le16_to_cpu(rsp->dcid);
4301
4302 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4303
4304 mutex_lock(&conn->chan_lock);
4305
4306 chan = __l2cap_get_chan_by_scid(conn, scid);
4307 if (!chan) {
4308 mutex_unlock(&conn->chan_lock);
4309 return 0;
4310 }
4311
4312 l2cap_chan_lock(chan);
4313
4314 l2cap_chan_hold(chan);
4315 l2cap_chan_del(chan, 0);
4316
4317 l2cap_chan_unlock(chan);
4318
4319 chan->ops->close(chan);
4320 l2cap_chan_put(chan);
4321
4322 mutex_unlock(&conn->chan_lock);
4323
4324 return 0;
4325 }
4326
4327 static inline int l2cap_information_req(struct l2cap_conn *conn,
4328 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4329 u8 *data)
4330 {
4331 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4332 u16 type;
4333
4334 if (cmd_len != sizeof(*req))
4335 return -EPROTO;
4336
4337 type = __le16_to_cpu(req->type);
4338
4339 BT_DBG("type 0x%4.4x", type);
4340
4341 if (type == L2CAP_IT_FEAT_MASK) {
4342 u8 buf[8];
4343 u32 feat_mask = l2cap_feat_mask;
4344 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4345 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4346 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4347 if (!disable_ertm)
4348 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4349 | L2CAP_FEAT_FCS;
4350 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4351 feat_mask |= L2CAP_FEAT_EXT_FLOW
4352 | L2CAP_FEAT_EXT_WINDOW;
4353
4354 put_unaligned_le32(feat_mask, rsp->data);
4355 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4356 buf);
4357 } else if (type == L2CAP_IT_FIXED_CHAN) {
4358 u8 buf[12];
4359 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4360
4361 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4362 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4363 rsp->data[0] = conn->local_fixed_chan;
4364 memset(rsp->data + 1, 0, 7);
4365 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4366 buf);
4367 } else {
4368 struct l2cap_info_rsp rsp;
4369 rsp.type = cpu_to_le16(type);
4370 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4371 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4372 &rsp);
4373 }
4374
4375 return 0;
4376 }
4377
4378 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4379 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4380 u8 *data)
4381 {
4382 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4383 u16 type, result;
4384
4385 if (cmd_len < sizeof(*rsp))
4386 return -EPROTO;
4387
4388 type = __le16_to_cpu(rsp->type);
4389 result = __le16_to_cpu(rsp->result);
4390
4391 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4392
4393 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4394 if (cmd->ident != conn->info_ident ||
4395 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4396 return 0;
4397
4398 cancel_delayed_work(&conn->info_timer);
4399
4400 if (result != L2CAP_IR_SUCCESS) {
4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4402 conn->info_ident = 0;
4403
4404 l2cap_conn_start(conn);
4405
4406 return 0;
4407 }
4408
4409 switch (type) {
4410 case L2CAP_IT_FEAT_MASK:
4411 conn->feat_mask = get_unaligned_le32(rsp->data);
4412
4413 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4414 struct l2cap_info_req req;
4415 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4416
4417 conn->info_ident = l2cap_get_ident(conn);
4418
4419 l2cap_send_cmd(conn, conn->info_ident,
4420 L2CAP_INFO_REQ, sizeof(req), &req);
4421 } else {
4422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4423 conn->info_ident = 0;
4424
4425 l2cap_conn_start(conn);
4426 }
4427 break;
4428
4429 case L2CAP_IT_FIXED_CHAN:
4430 conn->remote_fixed_chan = rsp->data[0];
4431 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4432 conn->info_ident = 0;
4433
4434 l2cap_conn_start(conn);
4435 break;
4436 }
4437
4438 return 0;
4439 }
4440
4441 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4442 struct l2cap_cmd_hdr *cmd,
4443 u16 cmd_len, void *data)
4444 {
4445 struct l2cap_create_chan_req *req = data;
4446 struct l2cap_create_chan_rsp rsp;
4447 struct l2cap_chan *chan;
4448 struct hci_dev *hdev;
4449 u16 psm, scid;
4450
4451 if (cmd_len != sizeof(*req))
4452 return -EPROTO;
4453
4454 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4455 return -EINVAL;
4456
4457 psm = le16_to_cpu(req->psm);
4458 scid = le16_to_cpu(req->scid);
4459
4460 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4461
4462 /* For controller id 0 make BR/EDR connection */
4463 if (req->amp_id == AMP_ID_BREDR) {
4464 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4465 req->amp_id);
4466 return 0;
4467 }
4468
4469 /* Validate AMP controller id */
4470 hdev = hci_dev_get(req->amp_id);
4471 if (!hdev)
4472 goto error;
4473
4474 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4475 hci_dev_put(hdev);
4476 goto error;
4477 }
4478
4479 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4480 req->amp_id);
4481 if (chan) {
4482 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4483 struct hci_conn *hs_hcon;
4484
4485 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4486 &conn->hcon->dst);
4487 if (!hs_hcon) {
4488 hci_dev_put(hdev);
4489 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4490 chan->dcid);
4491 return 0;
4492 }
4493
4494 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4495
4496 mgr->bredr_chan = chan;
4497 chan->hs_hcon = hs_hcon;
4498 chan->fcs = L2CAP_FCS_NONE;
4499 conn->mtu = hdev->block_mtu;
4500 }
4501
4502 hci_dev_put(hdev);
4503
4504 return 0;
4505
4506 error:
4507 rsp.dcid = 0;
4508 rsp.scid = cpu_to_le16(scid);
4509 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4510 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4511
4512 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4513 sizeof(rsp), &rsp);
4514
4515 return 0;
4516 }
4517
4518 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4519 {
4520 struct l2cap_move_chan_req req;
4521 u8 ident;
4522
4523 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4524
4525 ident = l2cap_get_ident(chan->conn);
4526 chan->ident = ident;
4527
4528 req.icid = cpu_to_le16(chan->scid);
4529 req.dest_amp_id = dest_amp_id;
4530
4531 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4532 &req);
4533
4534 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4535 }
4536
4537 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4538 {
4539 struct l2cap_move_chan_rsp rsp;
4540
4541 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4542
4543 rsp.icid = cpu_to_le16(chan->dcid);
4544 rsp.result = cpu_to_le16(result);
4545
4546 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4547 sizeof(rsp), &rsp);
4548 }
4549
4550 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4551 {
4552 struct l2cap_move_chan_cfm cfm;
4553
4554 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4555
4556 chan->ident = l2cap_get_ident(chan->conn);
4557
4558 cfm.icid = cpu_to_le16(chan->scid);
4559 cfm.result = cpu_to_le16(result);
4560
4561 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4562 sizeof(cfm), &cfm);
4563
4564 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4565 }
4566
4567 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4568 {
4569 struct l2cap_move_chan_cfm cfm;
4570
4571 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4572
4573 cfm.icid = cpu_to_le16(icid);
4574 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4575
4576 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4577 sizeof(cfm), &cfm);
4578 }
4579
4580 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4581 u16 icid)
4582 {
4583 struct l2cap_move_chan_cfm_rsp rsp;
4584
4585 BT_DBG("icid 0x%4.4x", icid);
4586
4587 rsp.icid = cpu_to_le16(icid);
4588 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4589 }
4590
4591 static void __release_logical_link(struct l2cap_chan *chan)
4592 {
4593 chan->hs_hchan = NULL;
4594 chan->hs_hcon = NULL;
4595
4596 /* Placeholder - release the logical link */
4597 }
4598
4599 static void l2cap_logical_fail(struct l2cap_chan *chan)
4600 {
4601 /* Logical link setup failed */
4602 if (chan->state != BT_CONNECTED) {
4603 /* Create channel failure, disconnect */
4604 l2cap_send_disconn_req(chan, ECONNRESET);
4605 return;
4606 }
4607
4608 switch (chan->move_role) {
4609 case L2CAP_MOVE_ROLE_RESPONDER:
4610 l2cap_move_done(chan);
4611 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4612 break;
4613 case L2CAP_MOVE_ROLE_INITIATOR:
4614 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4615 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4616 /* Remote has only sent pending or
4617 * success responses, clean up
4618 */
4619 l2cap_move_done(chan);
4620 }
4621
4622 /* Other amp move states imply that the move
4623 * has already aborted
4624 */
4625 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4626 break;
4627 }
4628 }
4629
4630 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4631 struct hci_chan *hchan)
4632 {
4633 struct l2cap_conf_rsp rsp;
4634
4635 chan->hs_hchan = hchan;
4636 chan->hs_hcon->l2cap_data = chan->conn;
4637
4638 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4639
4640 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4641 int err;
4642
4643 set_default_fcs(chan);
4644
4645 err = l2cap_ertm_init(chan);
4646 if (err < 0)
4647 l2cap_send_disconn_req(chan, -err);
4648 else
4649 l2cap_chan_ready(chan);
4650 }
4651 }
4652
4653 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4654 struct hci_chan *hchan)
4655 {
4656 chan->hs_hcon = hchan->conn;
4657 chan->hs_hcon->l2cap_data = chan->conn;
4658
4659 BT_DBG("move_state %d", chan->move_state);
4660
4661 switch (chan->move_state) {
4662 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4663 /* Move confirm will be sent after a success
4664 * response is received
4665 */
4666 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4667 break;
4668 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4669 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4670 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4671 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4672 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4673 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4674 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4675 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4676 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4677 }
4678 break;
4679 default:
4680 /* Move was not in expected state, free the channel */
4681 __release_logical_link(chan);
4682
4683 chan->move_state = L2CAP_MOVE_STABLE;
4684 }
4685 }
4686
4687 /* Call with chan locked */
4688 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4689 u8 status)
4690 {
4691 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4692
4693 if (status) {
4694 l2cap_logical_fail(chan);
4695 __release_logical_link(chan);
4696 return;
4697 }
4698
4699 if (chan->state != BT_CONNECTED) {
4700 /* Ignore logical link if channel is on BR/EDR */
4701 if (chan->local_amp_id != AMP_ID_BREDR)
4702 l2cap_logical_finish_create(chan, hchan);
4703 } else {
4704 l2cap_logical_finish_move(chan, hchan);
4705 }
4706 }
4707
4708 void l2cap_move_start(struct l2cap_chan *chan)
4709 {
4710 BT_DBG("chan %p", chan);
4711
4712 if (chan->local_amp_id == AMP_ID_BREDR) {
4713 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4714 return;
4715 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4716 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4717 /* Placeholder - start physical link setup */
4718 } else {
4719 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4720 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4721 chan->move_id = 0;
4722 l2cap_move_setup(chan);
4723 l2cap_send_move_chan_req(chan, 0);
4724 }
4725 }
4726
4727 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4728 u8 local_amp_id, u8 remote_amp_id)
4729 {
4730 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4731 local_amp_id, remote_amp_id);
4732
4733 chan->fcs = L2CAP_FCS_NONE;
4734
4735 /* Outgoing channel on AMP */
4736 if (chan->state == BT_CONNECT) {
4737 if (result == L2CAP_CR_SUCCESS) {
4738 chan->local_amp_id = local_amp_id;
4739 l2cap_send_create_chan_req(chan, remote_amp_id);
4740 } else {
4741 /* Revert to BR/EDR connect */
4742 l2cap_send_conn_req(chan);
4743 }
4744
4745 return;
4746 }
4747
4748 /* Incoming channel on AMP */
4749 if (__l2cap_no_conn_pending(chan)) {
4750 struct l2cap_conn_rsp rsp;
4751 char buf[128];
4752 rsp.scid = cpu_to_le16(chan->dcid);
4753 rsp.dcid = cpu_to_le16(chan->scid);
4754
4755 if (result == L2CAP_CR_SUCCESS) {
4756 /* Send successful response */
4757 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4758 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4759 } else {
4760 /* Send negative response */
4761 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4762 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4763 }
4764
4765 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4766 sizeof(rsp), &rsp);
4767
4768 if (result == L2CAP_CR_SUCCESS) {
4769 l2cap_state_change(chan, BT_CONFIG);
4770 set_bit(CONF_REQ_SENT, &chan->conf_state);
4771 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4772 L2CAP_CONF_REQ,
4773 l2cap_build_conf_req(chan, buf), buf);
4774 chan->num_conf_req++;
4775 }
4776 }
4777 }
4778
4779 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4780 u8 remote_amp_id)
4781 {
4782 l2cap_move_setup(chan);
4783 chan->move_id = local_amp_id;
4784 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4785
4786 l2cap_send_move_chan_req(chan, remote_amp_id);
4787 }
4788
4789 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4790 {
4791 struct hci_chan *hchan = NULL;
4792
4793 /* Placeholder - get hci_chan for logical link */
4794
4795 if (hchan) {
4796 if (hchan->state == BT_CONNECTED) {
4797 /* Logical link is ready to go */
4798 chan->hs_hcon = hchan->conn;
4799 chan->hs_hcon->l2cap_data = chan->conn;
4800 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4801 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4802
4803 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4804 } else {
4805 /* Wait for logical link to be ready */
4806 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4807 }
4808 } else {
4809 /* Logical link not available */
4810 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4811 }
4812 }
4813
4814 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4815 {
4816 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4817 u8 rsp_result;
4818 if (result == -EINVAL)
4819 rsp_result = L2CAP_MR_BAD_ID;
4820 else
4821 rsp_result = L2CAP_MR_NOT_ALLOWED;
4822
4823 l2cap_send_move_chan_rsp(chan, rsp_result);
4824 }
4825
4826 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4827 chan->move_state = L2CAP_MOVE_STABLE;
4828
4829 /* Restart data transmission */
4830 l2cap_ertm_send(chan);
4831 }
4832
4833 /* Invoke with locked chan */
4834 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4835 {
4836 u8 local_amp_id = chan->local_amp_id;
4837 u8 remote_amp_id = chan->remote_amp_id;
4838
4839 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4840 chan, result, local_amp_id, remote_amp_id);
4841
4842 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4843 l2cap_chan_unlock(chan);
4844 return;
4845 }
4846
4847 if (chan->state != BT_CONNECTED) {
4848 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4849 } else if (result != L2CAP_MR_SUCCESS) {
4850 l2cap_do_move_cancel(chan, result);
4851 } else {
4852 switch (chan->move_role) {
4853 case L2CAP_MOVE_ROLE_INITIATOR:
4854 l2cap_do_move_initiate(chan, local_amp_id,
4855 remote_amp_id);
4856 break;
4857 case L2CAP_MOVE_ROLE_RESPONDER:
4858 l2cap_do_move_respond(chan, result);
4859 break;
4860 default:
4861 l2cap_do_move_cancel(chan, result);
4862 break;
4863 }
4864 }
4865 }
4866
4867 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4868 struct l2cap_cmd_hdr *cmd,
4869 u16 cmd_len, void *data)
4870 {
4871 struct l2cap_move_chan_req *req = data;
4872 struct l2cap_move_chan_rsp rsp;
4873 struct l2cap_chan *chan;
4874 u16 icid = 0;
4875 u16 result = L2CAP_MR_NOT_ALLOWED;
4876
4877 if (cmd_len != sizeof(*req))
4878 return -EPROTO;
4879
4880 icid = le16_to_cpu(req->icid);
4881
4882 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4883
4884 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4885 return -EINVAL;
4886
4887 chan = l2cap_get_chan_by_dcid(conn, icid);
4888 if (!chan) {
4889 rsp.icid = cpu_to_le16(icid);
4890 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4891 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4892 sizeof(rsp), &rsp);
4893 return 0;
4894 }
4895
4896 chan->ident = cmd->ident;
4897
4898 if (chan->scid < L2CAP_CID_DYN_START ||
4899 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4900 (chan->mode != L2CAP_MODE_ERTM &&
4901 chan->mode != L2CAP_MODE_STREAMING)) {
4902 result = L2CAP_MR_NOT_ALLOWED;
4903 goto send_move_response;
4904 }
4905
4906 if (chan->local_amp_id == req->dest_amp_id) {
4907 result = L2CAP_MR_SAME_ID;
4908 goto send_move_response;
4909 }
4910
4911 if (req->dest_amp_id != AMP_ID_BREDR) {
4912 struct hci_dev *hdev;
4913 hdev = hci_dev_get(req->dest_amp_id);
4914 if (!hdev || hdev->dev_type != HCI_AMP ||
4915 !test_bit(HCI_UP, &hdev->flags)) {
4916 if (hdev)
4917 hci_dev_put(hdev);
4918
4919 result = L2CAP_MR_BAD_ID;
4920 goto send_move_response;
4921 }
4922 hci_dev_put(hdev);
4923 }
4924
4925 /* Detect a move collision. Only send a collision response
4926 * if this side has "lost", otherwise proceed with the move.
4927 * The winner has the larger bd_addr.
4928 */
4929 if ((__chan_is_moving(chan) ||
4930 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4931 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4932 result = L2CAP_MR_COLLISION;
4933 goto send_move_response;
4934 }
4935
4936 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4937 l2cap_move_setup(chan);
4938 chan->move_id = req->dest_amp_id;
4939 icid = chan->dcid;
4940
4941 if (req->dest_amp_id == AMP_ID_BREDR) {
4942 /* Moving to BR/EDR */
4943 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4944 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4945 result = L2CAP_MR_PEND;
4946 } else {
4947 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4948 result = L2CAP_MR_SUCCESS;
4949 }
4950 } else {
4951 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4952 /* Placeholder - uncomment when amp functions are available */
4953 /*amp_accept_physical(chan, req->dest_amp_id);*/
4954 result = L2CAP_MR_PEND;
4955 }
4956
4957 send_move_response:
4958 l2cap_send_move_chan_rsp(chan, result);
4959
4960 l2cap_chan_unlock(chan);
4961
4962 return 0;
4963 }
4964
4965 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4966 {
4967 struct l2cap_chan *chan;
4968 struct hci_chan *hchan = NULL;
4969
4970 chan = l2cap_get_chan_by_scid(conn, icid);
4971 if (!chan) {
4972 l2cap_send_move_chan_cfm_icid(conn, icid);
4973 return;
4974 }
4975
4976 __clear_chan_timer(chan);
4977 if (result == L2CAP_MR_PEND)
4978 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4979
4980 switch (chan->move_state) {
4981 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4982 /* Move confirm will be sent when logical link
4983 * is complete.
4984 */
4985 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4986 break;
4987 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4988 if (result == L2CAP_MR_PEND) {
4989 break;
4990 } else if (test_bit(CONN_LOCAL_BUSY,
4991 &chan->conn_state)) {
4992 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4993 } else {
4994 /* Logical link is up or moving to BR/EDR,
4995 * proceed with move
4996 */
4997 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4998 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4999 }
5000 break;
5001 case L2CAP_MOVE_WAIT_RSP:
5002 /* Moving to AMP */
5003 if (result == L2CAP_MR_SUCCESS) {
5004 /* Remote is ready, send confirm immediately
5005 * after logical link is ready
5006 */
5007 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5008 } else {
5009 /* Both logical link and move success
5010 * are required to confirm
5011 */
5012 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5013 }
5014
5015 /* Placeholder - get hci_chan for logical link */
5016 if (!hchan) {
5017 /* Logical link not available */
5018 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5019 break;
5020 }
5021
5022 /* If the logical link is not yet connected, do not
5023 * send confirmation.
5024 */
5025 if (hchan->state != BT_CONNECTED)
5026 break;
5027
5028 /* Logical link is already ready to go */
5029
5030 chan->hs_hcon = hchan->conn;
5031 chan->hs_hcon->l2cap_data = chan->conn;
5032
5033 if (result == L2CAP_MR_SUCCESS) {
5034 /* Can confirm now */
5035 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5036 } else {
5037 /* Now only need move success
5038 * to confirm
5039 */
5040 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5041 }
5042
5043 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5044 break;
5045 default:
5046 /* Any other amp move state means the move failed. */
5047 chan->move_id = chan->local_amp_id;
5048 l2cap_move_done(chan);
5049 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5050 }
5051
5052 l2cap_chan_unlock(chan);
5053 }
5054
5055 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5056 u16 result)
5057 {
5058 struct l2cap_chan *chan;
5059
5060 chan = l2cap_get_chan_by_ident(conn, ident);
5061 if (!chan) {
5062 /* Could not locate channel, icid is best guess */
5063 l2cap_send_move_chan_cfm_icid(conn, icid);
5064 return;
5065 }
5066
5067 __clear_chan_timer(chan);
5068
5069 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5070 if (result == L2CAP_MR_COLLISION) {
5071 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5072 } else {
5073 /* Cleanup - cancel move */
5074 chan->move_id = chan->local_amp_id;
5075 l2cap_move_done(chan);
5076 }
5077 }
5078
5079 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5080
5081 l2cap_chan_unlock(chan);
5082 }
5083
5084 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5085 struct l2cap_cmd_hdr *cmd,
5086 u16 cmd_len, void *data)
5087 {
5088 struct l2cap_move_chan_rsp *rsp = data;
5089 u16 icid, result;
5090
5091 if (cmd_len != sizeof(*rsp))
5092 return -EPROTO;
5093
5094 icid = le16_to_cpu(rsp->icid);
5095 result = le16_to_cpu(rsp->result);
5096
5097 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5098
5099 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5100 l2cap_move_continue(conn, icid, result);
5101 else
5102 l2cap_move_fail(conn, cmd->ident, icid, result);
5103
5104 return 0;
5105 }
5106
5107 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5108 struct l2cap_cmd_hdr *cmd,
5109 u16 cmd_len, void *data)
5110 {
5111 struct l2cap_move_chan_cfm *cfm = data;
5112 struct l2cap_chan *chan;
5113 u16 icid, result;
5114
5115 if (cmd_len != sizeof(*cfm))
5116 return -EPROTO;
5117
5118 icid = le16_to_cpu(cfm->icid);
5119 result = le16_to_cpu(cfm->result);
5120
5121 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5122
5123 chan = l2cap_get_chan_by_dcid(conn, icid);
5124 if (!chan) {
5125 /* Spec requires a response even if the icid was not found */
5126 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5127 return 0;
5128 }
5129
5130 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5131 if (result == L2CAP_MC_CONFIRMED) {
5132 chan->local_amp_id = chan->move_id;
5133 if (chan->local_amp_id == AMP_ID_BREDR)
5134 __release_logical_link(chan);
5135 } else {
5136 chan->move_id = chan->local_amp_id;
5137 }
5138
5139 l2cap_move_done(chan);
5140 }
5141
5142 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5143
5144 l2cap_chan_unlock(chan);
5145
5146 return 0;
5147 }
5148
5149 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5150 struct l2cap_cmd_hdr *cmd,
5151 u16 cmd_len, void *data)
5152 {
5153 struct l2cap_move_chan_cfm_rsp *rsp = data;
5154 struct l2cap_chan *chan;
5155 u16 icid;
5156
5157 if (cmd_len != sizeof(*rsp))
5158 return -EPROTO;
5159
5160 icid = le16_to_cpu(rsp->icid);
5161
5162 BT_DBG("icid 0x%4.4x", icid);
5163
5164 chan = l2cap_get_chan_by_scid(conn, icid);
5165 if (!chan)
5166 return 0;
5167
5168 __clear_chan_timer(chan);
5169
5170 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5171 chan->local_amp_id = chan->move_id;
5172
5173 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5174 __release_logical_link(chan);
5175
5176 l2cap_move_done(chan);
5177 }
5178
5179 l2cap_chan_unlock(chan);
5180
5181 return 0;
5182 }
5183
5184 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5185 struct l2cap_cmd_hdr *cmd,
5186 u16 cmd_len, u8 *data)
5187 {
5188 struct hci_conn *hcon = conn->hcon;
5189 struct l2cap_conn_param_update_req *req;
5190 struct l2cap_conn_param_update_rsp rsp;
5191 u16 min, max, latency, to_multiplier;
5192 int err;
5193
5194 if (hcon->role != HCI_ROLE_MASTER)
5195 return -EINVAL;
5196
5197 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5198 return -EPROTO;
5199
5200 req = (struct l2cap_conn_param_update_req *) data;
5201 min = __le16_to_cpu(req->min);
5202 max = __le16_to_cpu(req->max);
5203 latency = __le16_to_cpu(req->latency);
5204 to_multiplier = __le16_to_cpu(req->to_multiplier);
5205
5206 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5207 min, max, latency, to_multiplier);
5208
5209 memset(&rsp, 0, sizeof(rsp));
5210
5211 err = hci_check_conn_params(min, max, latency, to_multiplier);
5212 if (err)
5213 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5214 else
5215 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5216
5217 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5218 sizeof(rsp), &rsp);
5219
5220 if (!err) {
5221 u8 store_hint;
5222
5223 store_hint = hci_le_conn_update(hcon, min, max, latency,
5224 to_multiplier);
5225 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5226 store_hint, min, max, latency,
5227 to_multiplier);
5228
5229 }
5230
5231 return 0;
5232 }
5233
5234 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5235 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5236 u8 *data)
5237 {
5238 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5239 struct hci_conn *hcon = conn->hcon;
5240 u16 dcid, mtu, mps, credits, result;
5241 struct l2cap_chan *chan;
5242 int err, sec_level;
5243
5244 if (cmd_len < sizeof(*rsp))
5245 return -EPROTO;
5246
5247 dcid = __le16_to_cpu(rsp->dcid);
5248 mtu = __le16_to_cpu(rsp->mtu);
5249 mps = __le16_to_cpu(rsp->mps);
5250 credits = __le16_to_cpu(rsp->credits);
5251 result = __le16_to_cpu(rsp->result);
5252
5253 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5254 dcid < L2CAP_CID_DYN_START ||
5255 dcid > L2CAP_CID_LE_DYN_END))
5256 return -EPROTO;
5257
5258 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5259 dcid, mtu, mps, credits, result);
5260
5261 mutex_lock(&conn->chan_lock);
5262
5263 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5264 if (!chan) {
5265 err = -EBADSLT;
5266 goto unlock;
5267 }
5268
5269 err = 0;
5270
5271 l2cap_chan_lock(chan);
5272
5273 switch (result) {
5274 case L2CAP_CR_SUCCESS:
5275 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5276 err = -EBADSLT;
5277 break;
5278 }
5279
5280 chan->ident = 0;
5281 chan->dcid = dcid;
5282 chan->omtu = mtu;
5283 chan->remote_mps = mps;
5284 chan->tx_credits = credits;
5285 l2cap_chan_ready(chan);
5286 break;
5287
5288 case L2CAP_CR_AUTHENTICATION:
5289 case L2CAP_CR_ENCRYPTION:
5290 /* If we already have MITM protection we can't do
5291 * anything.
5292 */
5293 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5294 l2cap_chan_del(chan, ECONNREFUSED);
5295 break;
5296 }
5297
5298 sec_level = hcon->sec_level + 1;
5299 if (chan->sec_level < sec_level)
5300 chan->sec_level = sec_level;
5301
5302 /* We'll need to send a new Connect Request */
5303 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5304
5305 smp_conn_security(hcon, chan->sec_level);
5306 break;
5307
5308 default:
5309 l2cap_chan_del(chan, ECONNREFUSED);
5310 break;
5311 }
5312
5313 l2cap_chan_unlock(chan);
5314
5315 unlock:
5316 mutex_unlock(&conn->chan_lock);
5317
5318 return err;
5319 }
5320
5321 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5322 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5323 u8 *data)
5324 {
5325 int err = 0;
5326
5327 switch (cmd->code) {
5328 case L2CAP_COMMAND_REJ:
5329 l2cap_command_rej(conn, cmd, cmd_len, data);
5330 break;
5331
5332 case L2CAP_CONN_REQ:
5333 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5334 break;
5335
5336 case L2CAP_CONN_RSP:
5337 case L2CAP_CREATE_CHAN_RSP:
5338 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5339 break;
5340
5341 case L2CAP_CONF_REQ:
5342 err = l2cap_config_req(conn, cmd, cmd_len, data);
5343 break;
5344
5345 case L2CAP_CONF_RSP:
5346 l2cap_config_rsp(conn, cmd, cmd_len, data);
5347 break;
5348
5349 case L2CAP_DISCONN_REQ:
5350 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5351 break;
5352
5353 case L2CAP_DISCONN_RSP:
5354 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5355 break;
5356
5357 case L2CAP_ECHO_REQ:
5358 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5359 break;
5360
5361 case L2CAP_ECHO_RSP:
5362 break;
5363
5364 case L2CAP_INFO_REQ:
5365 err = l2cap_information_req(conn, cmd, cmd_len, data);
5366 break;
5367
5368 case L2CAP_INFO_RSP:
5369 l2cap_information_rsp(conn, cmd, cmd_len, data);
5370 break;
5371
5372 case L2CAP_CREATE_CHAN_REQ:
5373 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5374 break;
5375
5376 case L2CAP_MOVE_CHAN_REQ:
5377 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5378 break;
5379
5380 case L2CAP_MOVE_CHAN_RSP:
5381 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5382 break;
5383
5384 case L2CAP_MOVE_CHAN_CFM:
5385 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5386 break;
5387
5388 case L2CAP_MOVE_CHAN_CFM_RSP:
5389 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5390 break;
5391
5392 default:
5393 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5394 err = -EINVAL;
5395 break;
5396 }
5397
5398 return err;
5399 }
5400
5401 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5402 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5403 u8 *data)
5404 {
5405 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5406 struct l2cap_le_conn_rsp rsp;
5407 struct l2cap_chan *chan, *pchan;
5408 u16 dcid, scid, credits, mtu, mps;
5409 __le16 psm;
5410 u8 result;
5411
5412 if (cmd_len != sizeof(*req))
5413 return -EPROTO;
5414
5415 scid = __le16_to_cpu(req->scid);
5416 mtu = __le16_to_cpu(req->mtu);
5417 mps = __le16_to_cpu(req->mps);
5418 psm = req->psm;
5419 dcid = 0;
5420 credits = 0;
5421
5422 if (mtu < 23 || mps < 23)
5423 return -EPROTO;
5424
5425 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5426 scid, mtu, mps);
5427
5428 /* Check if we have socket listening on psm */
5429 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5430 &conn->hcon->dst, LE_LINK);
5431 if (!pchan) {
5432 result = L2CAP_CR_BAD_PSM;
5433 chan = NULL;
5434 goto response;
5435 }
5436
5437 mutex_lock(&conn->chan_lock);
5438 l2cap_chan_lock(pchan);
5439
5440 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5441 SMP_ALLOW_STK)) {
5442 result = L2CAP_CR_AUTHENTICATION;
5443 chan = NULL;
5444 goto response_unlock;
5445 }
5446
5447 /* Check for valid dynamic CID range */
5448 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5449 result = L2CAP_CR_INVALID_SCID;
5450 chan = NULL;
5451 goto response_unlock;
5452 }
5453
5454 /* Check if we already have channel with that dcid */
5455 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5456 result = L2CAP_CR_SCID_IN_USE;
5457 chan = NULL;
5458 goto response_unlock;
5459 }
5460
5461 chan = pchan->ops->new_connection(pchan);
5462 if (!chan) {
5463 result = L2CAP_CR_NO_MEM;
5464 goto response_unlock;
5465 }
5466
5467 l2cap_le_flowctl_init(chan);
5468
5469 bacpy(&chan->src, &conn->hcon->src);
5470 bacpy(&chan->dst, &conn->hcon->dst);
5471 chan->src_type = bdaddr_src_type(conn->hcon);
5472 chan->dst_type = bdaddr_dst_type(conn->hcon);
5473 chan->psm = psm;
5474 chan->dcid = scid;
5475 chan->omtu = mtu;
5476 chan->remote_mps = mps;
5477 chan->tx_credits = __le16_to_cpu(req->credits);
5478
5479 __l2cap_chan_add(conn, chan);
5480 dcid = chan->scid;
5481 credits = chan->rx_credits;
5482
5483 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5484
5485 chan->ident = cmd->ident;
5486
5487 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5488 l2cap_state_change(chan, BT_CONNECT2);
5489 /* The following result value is actually not defined
5490 * for LE CoC but we use it to let the function know
5491 * that it should bail out after doing its cleanup
5492 * instead of sending a response.
5493 */
5494 result = L2CAP_CR_PEND;
5495 chan->ops->defer(chan);
5496 } else {
5497 l2cap_chan_ready(chan);
5498 result = L2CAP_CR_SUCCESS;
5499 }
5500
5501 response_unlock:
5502 l2cap_chan_unlock(pchan);
5503 mutex_unlock(&conn->chan_lock);
5504 l2cap_chan_put(pchan);
5505
5506 if (result == L2CAP_CR_PEND)
5507 return 0;
5508
5509 response:
5510 if (chan) {
5511 rsp.mtu = cpu_to_le16(chan->imtu);
5512 rsp.mps = cpu_to_le16(chan->mps);
5513 } else {
5514 rsp.mtu = 0;
5515 rsp.mps = 0;
5516 }
5517
5518 rsp.dcid = cpu_to_le16(dcid);
5519 rsp.credits = cpu_to_le16(credits);
5520 rsp.result = cpu_to_le16(result);
5521
5522 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5523
5524 return 0;
5525 }
5526
5527 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5528 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5529 u8 *data)
5530 {
5531 struct l2cap_le_credits *pkt;
5532 struct l2cap_chan *chan;
5533 u16 cid, credits, max_credits;
5534
5535 if (cmd_len != sizeof(*pkt))
5536 return -EPROTO;
5537
5538 pkt = (struct l2cap_le_credits *) data;
5539 cid = __le16_to_cpu(pkt->cid);
5540 credits = __le16_to_cpu(pkt->credits);
5541
5542 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5543
5544 chan = l2cap_get_chan_by_dcid(conn, cid);
5545 if (!chan)
5546 return -EBADSLT;
5547
5548 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5549 if (credits > max_credits) {
5550 BT_ERR("LE credits overflow");
5551 l2cap_send_disconn_req(chan, ECONNRESET);
5552 l2cap_chan_unlock(chan);
5553
5554 /* Return 0 so that we don't trigger an unnecessary
5555 * command reject packet.
5556 */
5557 return 0;
5558 }
5559
5560 chan->tx_credits += credits;
5561
5562 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5563 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5564 chan->tx_credits--;
5565 }
5566
5567 if (chan->tx_credits)
5568 chan->ops->resume(chan);
5569
5570 l2cap_chan_unlock(chan);
5571
5572 return 0;
5573 }
5574
5575 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5576 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5577 u8 *data)
5578 {
5579 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5580 struct l2cap_chan *chan;
5581
5582 if (cmd_len < sizeof(*rej))
5583 return -EPROTO;
5584
5585 mutex_lock(&conn->chan_lock);
5586
5587 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5588 if (!chan)
5589 goto done;
5590
5591 l2cap_chan_lock(chan);
5592 l2cap_chan_del(chan, ECONNREFUSED);
5593 l2cap_chan_unlock(chan);
5594
5595 done:
5596 mutex_unlock(&conn->chan_lock);
5597 return 0;
5598 }
5599
5600 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5601 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5602 u8 *data)
5603 {
5604 int err = 0;
5605
5606 switch (cmd->code) {
5607 case L2CAP_COMMAND_REJ:
5608 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5609 break;
5610
5611 case L2CAP_CONN_PARAM_UPDATE_REQ:
5612 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5613 break;
5614
5615 case L2CAP_CONN_PARAM_UPDATE_RSP:
5616 break;
5617
5618 case L2CAP_LE_CONN_RSP:
5619 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5620 break;
5621
5622 case L2CAP_LE_CONN_REQ:
5623 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5624 break;
5625
5626 case L2CAP_LE_CREDITS:
5627 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5628 break;
5629
5630 case L2CAP_DISCONN_REQ:
5631 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5632 break;
5633
5634 case L2CAP_DISCONN_RSP:
5635 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5636 break;
5637
5638 default:
5639 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5640 err = -EINVAL;
5641 break;
5642 }
5643
5644 return err;
5645 }
5646
5647 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5648 struct sk_buff *skb)
5649 {
5650 struct hci_conn *hcon = conn->hcon;
5651 struct l2cap_cmd_hdr *cmd;
5652 u16 len;
5653 int err;
5654
5655 if (hcon->type != LE_LINK)
5656 goto drop;
5657
5658 if (skb->len < L2CAP_CMD_HDR_SIZE)
5659 goto drop;
5660
5661 cmd = (void *) skb->data;
5662 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5663
5664 len = le16_to_cpu(cmd->len);
5665
5666 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5667
5668 if (len != skb->len || !cmd->ident) {
5669 BT_DBG("corrupted command");
5670 goto drop;
5671 }
5672
5673 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5674 if (err) {
5675 struct l2cap_cmd_rej_unk rej;
5676
5677 BT_ERR("Wrong link type (%d)", err);
5678
5679 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5680 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5681 sizeof(rej), &rej);
5682 }
5683
5684 drop:
5685 kfree_skb(skb);
5686 }
5687
5688 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5689 struct sk_buff *skb)
5690 {
5691 struct hci_conn *hcon = conn->hcon;
5692 u8 *data = skb->data;
5693 int len = skb->len;
5694 struct l2cap_cmd_hdr cmd;
5695 int err;
5696
5697 l2cap_raw_recv(conn, skb);
5698
5699 if (hcon->type != ACL_LINK)
5700 goto drop;
5701
5702 while (len >= L2CAP_CMD_HDR_SIZE) {
5703 u16 cmd_len;
5704 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5705 data += L2CAP_CMD_HDR_SIZE;
5706 len -= L2CAP_CMD_HDR_SIZE;
5707
5708 cmd_len = le16_to_cpu(cmd.len);
5709
5710 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5711 cmd.ident);
5712
5713 if (cmd_len > len || !cmd.ident) {
5714 BT_DBG("corrupted command");
5715 break;
5716 }
5717
5718 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5719 if (err) {
5720 struct l2cap_cmd_rej_unk rej;
5721
5722 BT_ERR("Wrong link type (%d)", err);
5723
5724 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5725 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5726 sizeof(rej), &rej);
5727 }
5728
5729 data += cmd_len;
5730 len -= cmd_len;
5731 }
5732
5733 drop:
5734 kfree_skb(skb);
5735 }
5736
5737 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5738 {
5739 u16 our_fcs, rcv_fcs;
5740 int hdr_size;
5741
5742 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5743 hdr_size = L2CAP_EXT_HDR_SIZE;
5744 else
5745 hdr_size = L2CAP_ENH_HDR_SIZE;
5746
5747 if (chan->fcs == L2CAP_FCS_CRC16) {
5748 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5749 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5750 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5751
5752 if (our_fcs != rcv_fcs)
5753 return -EBADMSG;
5754 }
5755 return 0;
5756 }
5757
5758 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5759 {
5760 struct l2cap_ctrl control;
5761
5762 BT_DBG("chan %p", chan);
5763
5764 memset(&control, 0, sizeof(control));
5765 control.sframe = 1;
5766 control.final = 1;
5767 control.reqseq = chan->buffer_seq;
5768 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5769
5770 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5771 control.super = L2CAP_SUPER_RNR;
5772 l2cap_send_sframe(chan, &control);
5773 }
5774
5775 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5776 chan->unacked_frames > 0)
5777 __set_retrans_timer(chan);
5778
5779 /* Send pending iframes */
5780 l2cap_ertm_send(chan);
5781
5782 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5783 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5784 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5785 * send it now.
5786 */
5787 control.super = L2CAP_SUPER_RR;
5788 l2cap_send_sframe(chan, &control);
5789 }
5790 }
5791
5792 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5793 struct sk_buff **last_frag)
5794 {
5795 /* skb->len reflects data in skb as well as all fragments
5796 * skb->data_len reflects only data in fragments
5797 */
5798 if (!skb_has_frag_list(skb))
5799 skb_shinfo(skb)->frag_list = new_frag;
5800
5801 new_frag->next = NULL;
5802
5803 (*last_frag)->next = new_frag;
5804 *last_frag = new_frag;
5805
5806 skb->len += new_frag->len;
5807 skb->data_len += new_frag->len;
5808 skb->truesize += new_frag->truesize;
5809 }
5810
5811 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5812 struct l2cap_ctrl *control)
5813 {
5814 int err = -EINVAL;
5815
5816 switch (control->sar) {
5817 case L2CAP_SAR_UNSEGMENTED:
5818 if (chan->sdu)
5819 break;
5820
5821 err = chan->ops->recv(chan, skb);
5822 break;
5823
5824 case L2CAP_SAR_START:
5825 if (chan->sdu)
5826 break;
5827
5828 chan->sdu_len = get_unaligned_le16(skb->data);
5829 skb_pull(skb, L2CAP_SDULEN_SIZE);
5830
5831 if (chan->sdu_len > chan->imtu) {
5832 err = -EMSGSIZE;
5833 break;
5834 }
5835
5836 if (skb->len >= chan->sdu_len)
5837 break;
5838
5839 chan->sdu = skb;
5840 chan->sdu_last_frag = skb;
5841
5842 skb = NULL;
5843 err = 0;
5844 break;
5845
5846 case L2CAP_SAR_CONTINUE:
5847 if (!chan->sdu)
5848 break;
5849
5850 append_skb_frag(chan->sdu, skb,
5851 &chan->sdu_last_frag);
5852 skb = NULL;
5853
5854 if (chan->sdu->len >= chan->sdu_len)
5855 break;
5856
5857 err = 0;
5858 break;
5859
5860 case L2CAP_SAR_END:
5861 if (!chan->sdu)
5862 break;
5863
5864 append_skb_frag(chan->sdu, skb,
5865 &chan->sdu_last_frag);
5866 skb = NULL;
5867
5868 if (chan->sdu->len != chan->sdu_len)
5869 break;
5870
5871 err = chan->ops->recv(chan, chan->sdu);
5872
5873 if (!err) {
5874 /* Reassembly complete */
5875 chan->sdu = NULL;
5876 chan->sdu_last_frag = NULL;
5877 chan->sdu_len = 0;
5878 }
5879 break;
5880 }
5881
5882 if (err) {
5883 kfree_skb(skb);
5884 kfree_skb(chan->sdu);
5885 chan->sdu = NULL;
5886 chan->sdu_last_frag = NULL;
5887 chan->sdu_len = 0;
5888 }
5889
5890 return err;
5891 }
5892
5893 static int l2cap_resegment(struct l2cap_chan *chan)
5894 {
5895 /* Placeholder */
5896 return 0;
5897 }
5898
5899 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5900 {
5901 u8 event;
5902
5903 if (chan->mode != L2CAP_MODE_ERTM)
5904 return;
5905
5906 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5907 l2cap_tx(chan, NULL, NULL, event);
5908 }
5909
5910 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5911 {
5912 int err = 0;
5913 /* Pass sequential frames to l2cap_reassemble_sdu()
5914 * until a gap is encountered.
5915 */
5916
5917 BT_DBG("chan %p", chan);
5918
5919 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5920 struct sk_buff *skb;
5921 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5922 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5923
5924 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5925
5926 if (!skb)
5927 break;
5928
5929 skb_unlink(skb, &chan->srej_q);
5930 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5931 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5932 if (err)
5933 break;
5934 }
5935
5936 if (skb_queue_empty(&chan->srej_q)) {
5937 chan->rx_state = L2CAP_RX_STATE_RECV;
5938 l2cap_send_ack(chan);
5939 }
5940
5941 return err;
5942 }
5943
5944 static void l2cap_handle_srej(struct l2cap_chan *chan,
5945 struct l2cap_ctrl *control)
5946 {
5947 struct sk_buff *skb;
5948
5949 BT_DBG("chan %p, control %p", chan, control);
5950
5951 if (control->reqseq == chan->next_tx_seq) {
5952 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5953 l2cap_send_disconn_req(chan, ECONNRESET);
5954 return;
5955 }
5956
5957 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5958
5959 if (skb == NULL) {
5960 BT_DBG("Seq %d not available for retransmission",
5961 control->reqseq);
5962 return;
5963 }
5964
5965 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5966 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5967 l2cap_send_disconn_req(chan, ECONNRESET);
5968 return;
5969 }
5970
5971 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5972
5973 if (control->poll) {
5974 l2cap_pass_to_tx(chan, control);
5975
5976 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5977 l2cap_retransmit(chan, control);
5978 l2cap_ertm_send(chan);
5979
5980 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5981 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5982 chan->srej_save_reqseq = control->reqseq;
5983 }
5984 } else {
5985 l2cap_pass_to_tx_fbit(chan, control);
5986
5987 if (control->final) {
5988 if (chan->srej_save_reqseq != control->reqseq ||
5989 !test_and_clear_bit(CONN_SREJ_ACT,
5990 &chan->conn_state))
5991 l2cap_retransmit(chan, control);
5992 } else {
5993 l2cap_retransmit(chan, control);
5994 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5995 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5996 chan->srej_save_reqseq = control->reqseq;
5997 }
5998 }
5999 }
6000 }
6001
6002 static void l2cap_handle_rej(struct l2cap_chan *chan,
6003 struct l2cap_ctrl *control)
6004 {
6005 struct sk_buff *skb;
6006
6007 BT_DBG("chan %p, control %p", chan, control);
6008
6009 if (control->reqseq == chan->next_tx_seq) {
6010 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6011 l2cap_send_disconn_req(chan, ECONNRESET);
6012 return;
6013 }
6014
6015 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6016
6017 if (chan->max_tx && skb &&
6018 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6019 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6020 l2cap_send_disconn_req(chan, ECONNRESET);
6021 return;
6022 }
6023
6024 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6025
6026 l2cap_pass_to_tx(chan, control);
6027
6028 if (control->final) {
6029 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6030 l2cap_retransmit_all(chan, control);
6031 } else {
6032 l2cap_retransmit_all(chan, control);
6033 l2cap_ertm_send(chan);
6034 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6035 set_bit(CONN_REJ_ACT, &chan->conn_state);
6036 }
6037 }
6038
6039 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6040 {
6041 BT_DBG("chan %p, txseq %d", chan, txseq);
6042
6043 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6044 chan->expected_tx_seq);
6045
6046 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6047 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6048 chan->tx_win) {
6049 /* See notes below regarding "double poll" and
6050 * invalid packets.
6051 */
6052 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6053 BT_DBG("Invalid/Ignore - after SREJ");
6054 return L2CAP_TXSEQ_INVALID_IGNORE;
6055 } else {
6056 BT_DBG("Invalid - in window after SREJ sent");
6057 return L2CAP_TXSEQ_INVALID;
6058 }
6059 }
6060
6061 if (chan->srej_list.head == txseq) {
6062 BT_DBG("Expected SREJ");
6063 return L2CAP_TXSEQ_EXPECTED_SREJ;
6064 }
6065
6066 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6067 BT_DBG("Duplicate SREJ - txseq already stored");
6068 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6069 }
6070
6071 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6072 BT_DBG("Unexpected SREJ - not requested");
6073 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6074 }
6075 }
6076
6077 if (chan->expected_tx_seq == txseq) {
6078 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6079 chan->tx_win) {
6080 BT_DBG("Invalid - txseq outside tx window");
6081 return L2CAP_TXSEQ_INVALID;
6082 } else {
6083 BT_DBG("Expected");
6084 return L2CAP_TXSEQ_EXPECTED;
6085 }
6086 }
6087
6088 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6089 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6090 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6091 return L2CAP_TXSEQ_DUPLICATE;
6092 }
6093
6094 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6095 /* A source of invalid packets is a "double poll" condition,
6096 * where delays cause us to send multiple poll packets. If
6097 * the remote stack receives and processes both polls,
6098 * sequence numbers can wrap around in such a way that a
6099 * resent frame has a sequence number that looks like new data
6100 * with a sequence gap. This would trigger an erroneous SREJ
6101 * request.
6102 *
6103 * Fortunately, this is impossible with a tx window that's
6104 * less than half of the maximum sequence number, which allows
6105 * invalid frames to be safely ignored.
6106 *
6107 * With tx window sizes greater than half of the tx window
6108 * maximum, the frame is invalid and cannot be ignored. This
6109 * causes a disconnect.
6110 */
6111
6112 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6113 BT_DBG("Invalid/Ignore - txseq outside tx window");
6114 return L2CAP_TXSEQ_INVALID_IGNORE;
6115 } else {
6116 BT_DBG("Invalid - txseq outside tx window");
6117 return L2CAP_TXSEQ_INVALID;
6118 }
6119 } else {
6120 BT_DBG("Unexpected - txseq indicates missing frames");
6121 return L2CAP_TXSEQ_UNEXPECTED;
6122 }
6123 }
6124
6125 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6126 struct l2cap_ctrl *control,
6127 struct sk_buff *skb, u8 event)
6128 {
6129 int err = 0;
6130 bool skb_in_use = false;
6131
6132 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6133 event);
6134
6135 switch (event) {
6136 case L2CAP_EV_RECV_IFRAME:
6137 switch (l2cap_classify_txseq(chan, control->txseq)) {
6138 case L2CAP_TXSEQ_EXPECTED:
6139 l2cap_pass_to_tx(chan, control);
6140
6141 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6142 BT_DBG("Busy, discarding expected seq %d",
6143 control->txseq);
6144 break;
6145 }
6146
6147 chan->expected_tx_seq = __next_seq(chan,
6148 control->txseq);
6149
6150 chan->buffer_seq = chan->expected_tx_seq;
6151 skb_in_use = true;
6152
6153 err = l2cap_reassemble_sdu(chan, skb, control);
6154 if (err)
6155 break;
6156
6157 if (control->final) {
6158 if (!test_and_clear_bit(CONN_REJ_ACT,
6159 &chan->conn_state)) {
6160 control->final = 0;
6161 l2cap_retransmit_all(chan, control);
6162 l2cap_ertm_send(chan);
6163 }
6164 }
6165
6166 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6167 l2cap_send_ack(chan);
6168 break;
6169 case L2CAP_TXSEQ_UNEXPECTED:
6170 l2cap_pass_to_tx(chan, control);
6171
6172 /* Can't issue SREJ frames in the local busy state.
6173 * Drop this frame, it will be seen as missing
6174 * when local busy is exited.
6175 */
6176 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6177 BT_DBG("Busy, discarding unexpected seq %d",
6178 control->txseq);
6179 break;
6180 }
6181
6182 /* There was a gap in the sequence, so an SREJ
6183 * must be sent for each missing frame. The
6184 * current frame is stored for later use.
6185 */
6186 skb_queue_tail(&chan->srej_q, skb);
6187 skb_in_use = true;
6188 BT_DBG("Queued %p (queue len %d)", skb,
6189 skb_queue_len(&chan->srej_q));
6190
6191 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6192 l2cap_seq_list_clear(&chan->srej_list);
6193 l2cap_send_srej(chan, control->txseq);
6194
6195 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6196 break;
6197 case L2CAP_TXSEQ_DUPLICATE:
6198 l2cap_pass_to_tx(chan, control);
6199 break;
6200 case L2CAP_TXSEQ_INVALID_IGNORE:
6201 break;
6202 case L2CAP_TXSEQ_INVALID:
6203 default:
6204 l2cap_send_disconn_req(chan, ECONNRESET);
6205 break;
6206 }
6207 break;
6208 case L2CAP_EV_RECV_RR:
6209 l2cap_pass_to_tx(chan, control);
6210 if (control->final) {
6211 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6212
6213 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6214 !__chan_is_moving(chan)) {
6215 control->final = 0;
6216 l2cap_retransmit_all(chan, control);
6217 }
6218
6219 l2cap_ertm_send(chan);
6220 } else if (control->poll) {
6221 l2cap_send_i_or_rr_or_rnr(chan);
6222 } else {
6223 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6224 &chan->conn_state) &&
6225 chan->unacked_frames)
6226 __set_retrans_timer(chan);
6227
6228 l2cap_ertm_send(chan);
6229 }
6230 break;
6231 case L2CAP_EV_RECV_RNR:
6232 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6233 l2cap_pass_to_tx(chan, control);
6234 if (control && control->poll) {
6235 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6236 l2cap_send_rr_or_rnr(chan, 0);
6237 }
6238 __clear_retrans_timer(chan);
6239 l2cap_seq_list_clear(&chan->retrans_list);
6240 break;
6241 case L2CAP_EV_RECV_REJ:
6242 l2cap_handle_rej(chan, control);
6243 break;
6244 case L2CAP_EV_RECV_SREJ:
6245 l2cap_handle_srej(chan, control);
6246 break;
6247 default:
6248 break;
6249 }
6250
6251 if (skb && !skb_in_use) {
6252 BT_DBG("Freeing %p", skb);
6253 kfree_skb(skb);
6254 }
6255
6256 return err;
6257 }
6258
6259 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6260 struct l2cap_ctrl *control,
6261 struct sk_buff *skb, u8 event)
6262 {
6263 int err = 0;
6264 u16 txseq = control->txseq;
6265 bool skb_in_use = false;
6266
6267 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6268 event);
6269
6270 switch (event) {
6271 case L2CAP_EV_RECV_IFRAME:
6272 switch (l2cap_classify_txseq(chan, txseq)) {
6273 case L2CAP_TXSEQ_EXPECTED:
6274 /* Keep frame for reassembly later */
6275 l2cap_pass_to_tx(chan, control);
6276 skb_queue_tail(&chan->srej_q, skb);
6277 skb_in_use = true;
6278 BT_DBG("Queued %p (queue len %d)", skb,
6279 skb_queue_len(&chan->srej_q));
6280
6281 chan->expected_tx_seq = __next_seq(chan, txseq);
6282 break;
6283 case L2CAP_TXSEQ_EXPECTED_SREJ:
6284 l2cap_seq_list_pop(&chan->srej_list);
6285
6286 l2cap_pass_to_tx(chan, control);
6287 skb_queue_tail(&chan->srej_q, skb);
6288 skb_in_use = true;
6289 BT_DBG("Queued %p (queue len %d)", skb,
6290 skb_queue_len(&chan->srej_q));
6291
6292 err = l2cap_rx_queued_iframes(chan);
6293 if (err)
6294 break;
6295
6296 break;
6297 case L2CAP_TXSEQ_UNEXPECTED:
6298 /* Got a frame that can't be reassembled yet.
6299 * Save it for later, and send SREJs to cover
6300 * the missing frames.
6301 */
6302 skb_queue_tail(&chan->srej_q, skb);
6303 skb_in_use = true;
6304 BT_DBG("Queued %p (queue len %d)", skb,
6305 skb_queue_len(&chan->srej_q));
6306
6307 l2cap_pass_to_tx(chan, control);
6308 l2cap_send_srej(chan, control->txseq);
6309 break;
6310 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6311 /* This frame was requested with an SREJ, but
6312 * some expected retransmitted frames are
6313 * missing. Request retransmission of missing
6314 * SREJ'd frames.
6315 */
6316 skb_queue_tail(&chan->srej_q, skb);
6317 skb_in_use = true;
6318 BT_DBG("Queued %p (queue len %d)", skb,
6319 skb_queue_len(&chan->srej_q));
6320
6321 l2cap_pass_to_tx(chan, control);
6322 l2cap_send_srej_list(chan, control->txseq);
6323 break;
6324 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6325 /* We've already queued this frame. Drop this copy. */
6326 l2cap_pass_to_tx(chan, control);
6327 break;
6328 case L2CAP_TXSEQ_DUPLICATE:
6329 /* Expecting a later sequence number, so this frame
6330 * was already received. Ignore it completely.
6331 */
6332 break;
6333 case L2CAP_TXSEQ_INVALID_IGNORE:
6334 break;
6335 case L2CAP_TXSEQ_INVALID:
6336 default:
6337 l2cap_send_disconn_req(chan, ECONNRESET);
6338 break;
6339 }
6340 break;
6341 case L2CAP_EV_RECV_RR:
6342 l2cap_pass_to_tx(chan, control);
6343 if (control->final) {
6344 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6345
6346 if (!test_and_clear_bit(CONN_REJ_ACT,
6347 &chan->conn_state)) {
6348 control->final = 0;
6349 l2cap_retransmit_all(chan, control);
6350 }
6351
6352 l2cap_ertm_send(chan);
6353 } else if (control->poll) {
6354 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6355 &chan->conn_state) &&
6356 chan->unacked_frames) {
6357 __set_retrans_timer(chan);
6358 }
6359
6360 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6361 l2cap_send_srej_tail(chan);
6362 } else {
6363 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6364 &chan->conn_state) &&
6365 chan->unacked_frames)
6366 __set_retrans_timer(chan);
6367
6368 l2cap_send_ack(chan);
6369 }
6370 break;
6371 case L2CAP_EV_RECV_RNR:
6372 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6373 l2cap_pass_to_tx(chan, control);
6374 if (control->poll) {
6375 l2cap_send_srej_tail(chan);
6376 } else {
6377 struct l2cap_ctrl rr_control;
6378 memset(&rr_control, 0, sizeof(rr_control));
6379 rr_control.sframe = 1;
6380 rr_control.super = L2CAP_SUPER_RR;
6381 rr_control.reqseq = chan->buffer_seq;
6382 l2cap_send_sframe(chan, &rr_control);
6383 }
6384
6385 break;
6386 case L2CAP_EV_RECV_REJ:
6387 l2cap_handle_rej(chan, control);
6388 break;
6389 case L2CAP_EV_RECV_SREJ:
6390 l2cap_handle_srej(chan, control);
6391 break;
6392 }
6393
6394 if (skb && !skb_in_use) {
6395 BT_DBG("Freeing %p", skb);
6396 kfree_skb(skb);
6397 }
6398
6399 return err;
6400 }
6401
6402 static int l2cap_finish_move(struct l2cap_chan *chan)
6403 {
6404 BT_DBG("chan %p", chan);
6405
6406 chan->rx_state = L2CAP_RX_STATE_RECV;
6407
6408 if (chan->hs_hcon)
6409 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6410 else
6411 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6412
6413 return l2cap_resegment(chan);
6414 }
6415
6416 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6417 struct l2cap_ctrl *control,
6418 struct sk_buff *skb, u8 event)
6419 {
6420 int err;
6421
6422 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6423 event);
6424
6425 if (!control->poll)
6426 return -EPROTO;
6427
6428 l2cap_process_reqseq(chan, control->reqseq);
6429
6430 if (!skb_queue_empty(&chan->tx_q))
6431 chan->tx_send_head = skb_peek(&chan->tx_q);
6432 else
6433 chan->tx_send_head = NULL;
6434
6435 /* Rewind next_tx_seq to the point expected
6436 * by the receiver.
6437 */
6438 chan->next_tx_seq = control->reqseq;
6439 chan->unacked_frames = 0;
6440
6441 err = l2cap_finish_move(chan);
6442 if (err)
6443 return err;
6444
6445 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6446 l2cap_send_i_or_rr_or_rnr(chan);
6447
6448 if (event == L2CAP_EV_RECV_IFRAME)
6449 return -EPROTO;
6450
6451 return l2cap_rx_state_recv(chan, control, NULL, event);
6452 }
6453
6454 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6455 struct l2cap_ctrl *control,
6456 struct sk_buff *skb, u8 event)
6457 {
6458 int err;
6459
6460 if (!control->final)
6461 return -EPROTO;
6462
6463 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6464
6465 chan->rx_state = L2CAP_RX_STATE_RECV;
6466 l2cap_process_reqseq(chan, control->reqseq);
6467
6468 if (!skb_queue_empty(&chan->tx_q))
6469 chan->tx_send_head = skb_peek(&chan->tx_q);
6470 else
6471 chan->tx_send_head = NULL;
6472
6473 /* Rewind next_tx_seq to the point expected
6474 * by the receiver.
6475 */
6476 chan->next_tx_seq = control->reqseq;
6477 chan->unacked_frames = 0;
6478
6479 if (chan->hs_hcon)
6480 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6481 else
6482 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6483
6484 err = l2cap_resegment(chan);
6485
6486 if (!err)
6487 err = l2cap_rx_state_recv(chan, control, skb, event);
6488
6489 return err;
6490 }
6491
6492 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6493 {
6494 /* Make sure reqseq is for a packet that has been sent but not acked */
6495 u16 unacked;
6496
6497 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6498 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6499 }
6500
6501 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6502 struct sk_buff *skb, u8 event)
6503 {
6504 int err = 0;
6505
6506 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6507 control, skb, event, chan->rx_state);
6508
6509 if (__valid_reqseq(chan, control->reqseq)) {
6510 switch (chan->rx_state) {
6511 case L2CAP_RX_STATE_RECV:
6512 err = l2cap_rx_state_recv(chan, control, skb, event);
6513 break;
6514 case L2CAP_RX_STATE_SREJ_SENT:
6515 err = l2cap_rx_state_srej_sent(chan, control, skb,
6516 event);
6517 break;
6518 case L2CAP_RX_STATE_WAIT_P:
6519 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6520 break;
6521 case L2CAP_RX_STATE_WAIT_F:
6522 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6523 break;
6524 default:
6525 /* shut it down */
6526 break;
6527 }
6528 } else {
6529 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6530 control->reqseq, chan->next_tx_seq,
6531 chan->expected_ack_seq);
6532 l2cap_send_disconn_req(chan, ECONNRESET);
6533 }
6534
6535 return err;
6536 }
6537
6538 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6539 struct sk_buff *skb)
6540 {
6541 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6542 chan->rx_state);
6543
6544 if (l2cap_classify_txseq(chan, control->txseq) ==
6545 L2CAP_TXSEQ_EXPECTED) {
6546 l2cap_pass_to_tx(chan, control);
6547
6548 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6549 __next_seq(chan, chan->buffer_seq));
6550
6551 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6552
6553 l2cap_reassemble_sdu(chan, skb, control);
6554 } else {
6555 if (chan->sdu) {
6556 kfree_skb(chan->sdu);
6557 chan->sdu = NULL;
6558 }
6559 chan->sdu_last_frag = NULL;
6560 chan->sdu_len = 0;
6561
6562 if (skb) {
6563 BT_DBG("Freeing %p", skb);
6564 kfree_skb(skb);
6565 }
6566 }
6567
6568 chan->last_acked_seq = control->txseq;
6569 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6570
6571 return 0;
6572 }
6573
6574 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6575 {
6576 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6577 u16 len;
6578 u8 event;
6579
6580 __unpack_control(chan, skb);
6581
6582 len = skb->len;
6583
6584 /*
6585 * We can just drop the corrupted I-frame here.
6586 * Receiver will miss it and start proper recovery
6587 * procedures and ask for retransmission.
6588 */
6589 if (l2cap_check_fcs(chan, skb))
6590 goto drop;
6591
6592 if (!control->sframe && control->sar == L2CAP_SAR_START)
6593 len -= L2CAP_SDULEN_SIZE;
6594
6595 if (chan->fcs == L2CAP_FCS_CRC16)
6596 len -= L2CAP_FCS_SIZE;
6597
6598 if (len > chan->mps) {
6599 l2cap_send_disconn_req(chan, ECONNRESET);
6600 goto drop;
6601 }
6602
6603 if (!control->sframe) {
6604 int err;
6605
6606 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6607 control->sar, control->reqseq, control->final,
6608 control->txseq);
6609
6610 /* Validate F-bit - F=0 always valid, F=1 only
6611 * valid in TX WAIT_F
6612 */
6613 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6614 goto drop;
6615
6616 if (chan->mode != L2CAP_MODE_STREAMING) {
6617 event = L2CAP_EV_RECV_IFRAME;
6618 err = l2cap_rx(chan, control, skb, event);
6619 } else {
6620 err = l2cap_stream_rx(chan, control, skb);
6621 }
6622
6623 if (err)
6624 l2cap_send_disconn_req(chan, ECONNRESET);
6625 } else {
6626 const u8 rx_func_to_event[4] = {
6627 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6628 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6629 };
6630
6631 /* Only I-frames are expected in streaming mode */
6632 if (chan->mode == L2CAP_MODE_STREAMING)
6633 goto drop;
6634
6635 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6636 control->reqseq, control->final, control->poll,
6637 control->super);
6638
6639 if (len != 0) {
6640 BT_ERR("Trailing bytes: %d in sframe", len);
6641 l2cap_send_disconn_req(chan, ECONNRESET);
6642 goto drop;
6643 }
6644
6645 /* Validate F and P bits */
6646 if (control->final && (control->poll ||
6647 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6648 goto drop;
6649
6650 event = rx_func_to_event[control->super];
6651 if (l2cap_rx(chan, control, skb, event))
6652 l2cap_send_disconn_req(chan, ECONNRESET);
6653 }
6654
6655 return 0;
6656
6657 drop:
6658 kfree_skb(skb);
6659 return 0;
6660 }
6661
6662 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6663 {
6664 struct l2cap_conn *conn = chan->conn;
6665 struct l2cap_le_credits pkt;
6666 u16 return_credits;
6667
6668 /* We return more credits to the sender only after the amount of
6669 * credits falls below half of the initial amount.
6670 */
6671 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6672 return;
6673
6674 return_credits = le_max_credits - chan->rx_credits;
6675
6676 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6677
6678 chan->rx_credits += return_credits;
6679
6680 pkt.cid = cpu_to_le16(chan->scid);
6681 pkt.credits = cpu_to_le16(return_credits);
6682
6683 chan->ident = l2cap_get_ident(conn);
6684
6685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6686 }
6687
6688 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6689 {
6690 int err;
6691
6692 if (!chan->rx_credits) {
6693 BT_ERR("No credits to receive LE L2CAP data");
6694 l2cap_send_disconn_req(chan, ECONNRESET);
6695 return -ENOBUFS;
6696 }
6697
6698 if (chan->imtu < skb->len) {
6699 BT_ERR("Too big LE L2CAP PDU");
6700 return -ENOBUFS;
6701 }
6702
6703 chan->rx_credits--;
6704 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6705
6706 l2cap_chan_le_send_credits(chan);
6707
6708 err = 0;
6709
6710 if (!chan->sdu) {
6711 u16 sdu_len;
6712
6713 sdu_len = get_unaligned_le16(skb->data);
6714 skb_pull(skb, L2CAP_SDULEN_SIZE);
6715
6716 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6717 sdu_len, skb->len, chan->imtu);
6718
6719 if (sdu_len > chan->imtu) {
6720 BT_ERR("Too big LE L2CAP SDU length received");
6721 err = -EMSGSIZE;
6722 goto failed;
6723 }
6724
6725 if (skb->len > sdu_len) {
6726 BT_ERR("Too much LE L2CAP data received");
6727 err = -EINVAL;
6728 goto failed;
6729 }
6730
6731 if (skb->len == sdu_len)
6732 return chan->ops->recv(chan, skb);
6733
6734 chan->sdu = skb;
6735 chan->sdu_len = sdu_len;
6736 chan->sdu_last_frag = skb;
6737
6738 return 0;
6739 }
6740
6741 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6742 chan->sdu->len, skb->len, chan->sdu_len);
6743
6744 if (chan->sdu->len + skb->len > chan->sdu_len) {
6745 BT_ERR("Too much LE L2CAP data received");
6746 err = -EINVAL;
6747 goto failed;
6748 }
6749
6750 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6751 skb = NULL;
6752
6753 if (chan->sdu->len == chan->sdu_len) {
6754 err = chan->ops->recv(chan, chan->sdu);
6755 if (!err) {
6756 chan->sdu = NULL;
6757 chan->sdu_last_frag = NULL;
6758 chan->sdu_len = 0;
6759 }
6760 }
6761
6762 failed:
6763 if (err) {
6764 kfree_skb(skb);
6765 kfree_skb(chan->sdu);
6766 chan->sdu = NULL;
6767 chan->sdu_last_frag = NULL;
6768 chan->sdu_len = 0;
6769 }
6770
6771 /* We can't return an error here since we took care of the skb
6772 * freeing internally. An error return would cause the caller to
6773 * do a double-free of the skb.
6774 */
6775 return 0;
6776 }
6777
6778 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6779 struct sk_buff *skb)
6780 {
6781 struct l2cap_chan *chan;
6782
6783 chan = l2cap_get_chan_by_scid(conn, cid);
6784 if (!chan) {
6785 if (cid == L2CAP_CID_A2MP) {
6786 chan = a2mp_channel_create(conn, skb);
6787 if (!chan) {
6788 kfree_skb(skb);
6789 return;
6790 }
6791
6792 l2cap_chan_lock(chan);
6793 } else {
6794 BT_DBG("unknown cid 0x%4.4x", cid);
6795 /* Drop packet and return */
6796 kfree_skb(skb);
6797 return;
6798 }
6799 }
6800
6801 BT_DBG("chan %p, len %d", chan, skb->len);
6802
6803 /* If we receive data on a fixed channel before the info req/rsp
6804 * procdure is done simply assume that the channel is supported
6805 * and mark it as ready.
6806 */
6807 if (chan->chan_type == L2CAP_CHAN_FIXED)
6808 l2cap_chan_ready(chan);
6809
6810 if (chan->state != BT_CONNECTED)
6811 goto drop;
6812
6813 switch (chan->mode) {
6814 case L2CAP_MODE_LE_FLOWCTL:
6815 if (l2cap_le_data_rcv(chan, skb) < 0)
6816 goto drop;
6817
6818 goto done;
6819
6820 case L2CAP_MODE_BASIC:
6821 /* If socket recv buffers overflows we drop data here
6822 * which is *bad* because L2CAP has to be reliable.
6823 * But we don't have any other choice. L2CAP doesn't
6824 * provide flow control mechanism. */
6825
6826 if (chan->imtu < skb->len) {
6827 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6828 goto drop;
6829 }
6830
6831 if (!chan->ops->recv(chan, skb))
6832 goto done;
6833 break;
6834
6835 case L2CAP_MODE_ERTM:
6836 case L2CAP_MODE_STREAMING:
6837 l2cap_data_rcv(chan, skb);
6838 goto done;
6839
6840 default:
6841 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6842 break;
6843 }
6844
6845 drop:
6846 kfree_skb(skb);
6847
6848 done:
6849 l2cap_chan_unlock(chan);
6850 }
6851
6852 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6853 struct sk_buff *skb)
6854 {
6855 struct hci_conn *hcon = conn->hcon;
6856 struct l2cap_chan *chan;
6857
6858 if (hcon->type != ACL_LINK)
6859 goto free_skb;
6860
6861 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6862 ACL_LINK);
6863 if (!chan)
6864 goto free_skb;
6865
6866 BT_DBG("chan %p, len %d", chan, skb->len);
6867
6868 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6869 goto drop;
6870
6871 if (chan->imtu < skb->len)
6872 goto drop;
6873
6874 /* Store remote BD_ADDR and PSM for msg_name */
6875 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6876 bt_cb(skb)->l2cap.psm = psm;
6877
6878 if (!chan->ops->recv(chan, skb)) {
6879 l2cap_chan_put(chan);
6880 return;
6881 }
6882
6883 drop:
6884 l2cap_chan_put(chan);
6885 free_skb:
6886 kfree_skb(skb);
6887 }
6888
6889 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6890 {
6891 struct l2cap_hdr *lh = (void *) skb->data;
6892 struct hci_conn *hcon = conn->hcon;
6893 u16 cid, len;
6894 __le16 psm;
6895
6896 if (hcon->state != BT_CONNECTED) {
6897 BT_DBG("queueing pending rx skb");
6898 skb_queue_tail(&conn->pending_rx, skb);
6899 return;
6900 }
6901
6902 skb_pull(skb, L2CAP_HDR_SIZE);
6903 cid = __le16_to_cpu(lh->cid);
6904 len = __le16_to_cpu(lh->len);
6905
6906 if (len != skb->len) {
6907 kfree_skb(skb);
6908 return;
6909 }
6910
6911 /* Since we can't actively block incoming LE connections we must
6912 * at least ensure that we ignore incoming data from them.
6913 */
6914 if (hcon->type == LE_LINK &&
6915 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6916 bdaddr_dst_type(hcon))) {
6917 kfree_skb(skb);
6918 return;
6919 }
6920
6921 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6922
6923 switch (cid) {
6924 case L2CAP_CID_SIGNALING:
6925 l2cap_sig_channel(conn, skb);
6926 break;
6927
6928 case L2CAP_CID_CONN_LESS:
6929 psm = get_unaligned((__le16 *) skb->data);
6930 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6931 l2cap_conless_channel(conn, psm, skb);
6932 break;
6933
6934 case L2CAP_CID_LE_SIGNALING:
6935 l2cap_le_sig_channel(conn, skb);
6936 break;
6937
6938 default:
6939 l2cap_data_channel(conn, cid, skb);
6940 break;
6941 }
6942 }
6943
6944 static void process_pending_rx(struct work_struct *work)
6945 {
6946 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6947 pending_rx_work);
6948 struct sk_buff *skb;
6949
6950 BT_DBG("");
6951
6952 while ((skb = skb_dequeue(&conn->pending_rx)))
6953 l2cap_recv_frame(conn, skb);
6954 }
6955
6956 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6957 {
6958 struct l2cap_conn *conn = hcon->l2cap_data;
6959 struct hci_chan *hchan;
6960
6961 if (conn)
6962 return conn;
6963
6964 hchan = hci_chan_create(hcon);
6965 if (!hchan)
6966 return NULL;
6967
6968 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6969 if (!conn) {
6970 hci_chan_del(hchan);
6971 return NULL;
6972 }
6973
6974 kref_init(&conn->ref);
6975 hcon->l2cap_data = conn;
6976 conn->hcon = hci_conn_get(hcon);
6977 conn->hchan = hchan;
6978
6979 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6980
6981 switch (hcon->type) {
6982 case LE_LINK:
6983 if (hcon->hdev->le_mtu) {
6984 conn->mtu = hcon->hdev->le_mtu;
6985 break;
6986 }
6987 /* fall through */
6988 default:
6989 conn->mtu = hcon->hdev->acl_mtu;
6990 break;
6991 }
6992
6993 conn->feat_mask = 0;
6994
6995 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6996
6997 if (hcon->type == ACL_LINK &&
6998 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
6999 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7000
7001 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7002 (bredr_sc_enabled(hcon->hdev) ||
7003 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7004 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7005
7006 mutex_init(&conn->ident_lock);
7007 mutex_init(&conn->chan_lock);
7008
7009 INIT_LIST_HEAD(&conn->chan_l);
7010 INIT_LIST_HEAD(&conn->users);
7011
7012 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7013
7014 skb_queue_head_init(&conn->pending_rx);
7015 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7016 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7017
7018 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7019
7020 return conn;
7021 }
7022
7023 static bool is_valid_psm(u16 psm, u8 dst_type) {
7024 if (!psm)
7025 return false;
7026
7027 if (bdaddr_type_is_le(dst_type))
7028 return (psm <= 0x00ff);
7029
7030 /* PSM must be odd and lsb of upper byte must be 0 */
7031 return ((psm & 0x0101) == 0x0001);
7032 }
7033
7034 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7035 bdaddr_t *dst, u8 dst_type)
7036 {
7037 struct l2cap_conn *conn;
7038 struct hci_conn *hcon;
7039 struct hci_dev *hdev;
7040 int err;
7041
7042 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7043 dst_type, __le16_to_cpu(psm));
7044
7045 hdev = hci_get_route(dst, &chan->src);
7046 if (!hdev)
7047 return -EHOSTUNREACH;
7048
7049 hci_dev_lock(hdev);
7050
7051 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7052 chan->chan_type != L2CAP_CHAN_RAW) {
7053 err = -EINVAL;
7054 goto done;
7055 }
7056
7057 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7058 err = -EINVAL;
7059 goto done;
7060 }
7061
7062 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7063 err = -EINVAL;
7064 goto done;
7065 }
7066
7067 switch (chan->mode) {
7068 case L2CAP_MODE_BASIC:
7069 break;
7070 case L2CAP_MODE_LE_FLOWCTL:
7071 l2cap_le_flowctl_init(chan);
7072 break;
7073 case L2CAP_MODE_ERTM:
7074 case L2CAP_MODE_STREAMING:
7075 if (!disable_ertm)
7076 break;
7077 /* fall through */
7078 default:
7079 err = -EOPNOTSUPP;
7080 goto done;
7081 }
7082
7083 switch (chan->state) {
7084 case BT_CONNECT:
7085 case BT_CONNECT2:
7086 case BT_CONFIG:
7087 /* Already connecting */
7088 err = 0;
7089 goto done;
7090
7091 case BT_CONNECTED:
7092 /* Already connected */
7093 err = -EISCONN;
7094 goto done;
7095
7096 case BT_OPEN:
7097 case BT_BOUND:
7098 /* Can connect */
7099 break;
7100
7101 default:
7102 err = -EBADFD;
7103 goto done;
7104 }
7105
7106 /* Set destination address and psm */
7107 bacpy(&chan->dst, dst);
7108 chan->dst_type = dst_type;
7109
7110 chan->psm = psm;
7111 chan->dcid = cid;
7112
7113 if (bdaddr_type_is_le(dst_type)) {
7114 /* Convert from L2CAP channel address type to HCI address type
7115 */
7116 if (dst_type == BDADDR_LE_PUBLIC)
7117 dst_type = ADDR_LE_DEV_PUBLIC;
7118 else
7119 dst_type = ADDR_LE_DEV_RANDOM;
7120
7121 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7122 hcon = hci_connect_le(hdev, dst, dst_type,
7123 chan->sec_level,
7124 HCI_LE_CONN_TIMEOUT,
7125 HCI_ROLE_SLAVE);
7126 else
7127 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7128 chan->sec_level,
7129 HCI_LE_CONN_TIMEOUT);
7130
7131 } else {
7132 u8 auth_type = l2cap_get_auth_type(chan);
7133 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7134 }
7135
7136 if (IS_ERR(hcon)) {
7137 err = PTR_ERR(hcon);
7138 goto done;
7139 }
7140
7141 conn = l2cap_conn_add(hcon);
7142 if (!conn) {
7143 hci_conn_drop(hcon);
7144 err = -ENOMEM;
7145 goto done;
7146 }
7147
7148 mutex_lock(&conn->chan_lock);
7149 l2cap_chan_lock(chan);
7150
7151 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7152 hci_conn_drop(hcon);
7153 err = -EBUSY;
7154 goto chan_unlock;
7155 }
7156
7157 /* Update source addr of the socket */
7158 bacpy(&chan->src, &hcon->src);
7159 chan->src_type = bdaddr_src_type(hcon);
7160
7161 __l2cap_chan_add(conn, chan);
7162
7163 /* l2cap_chan_add takes its own ref so we can drop this one */
7164 hci_conn_drop(hcon);
7165
7166 l2cap_state_change(chan, BT_CONNECT);
7167 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7168
7169 /* Release chan->sport so that it can be reused by other
7170 * sockets (as it's only used for listening sockets).
7171 */
7172 write_lock(&chan_list_lock);
7173 chan->sport = 0;
7174 write_unlock(&chan_list_lock);
7175
7176 if (hcon->state == BT_CONNECTED) {
7177 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7178 __clear_chan_timer(chan);
7179 if (l2cap_chan_check_security(chan, true))
7180 l2cap_state_change(chan, BT_CONNECTED);
7181 } else
7182 l2cap_do_start(chan);
7183 }
7184
7185 err = 0;
7186
7187 chan_unlock:
7188 l2cap_chan_unlock(chan);
7189 mutex_unlock(&conn->chan_lock);
7190 done:
7191 hci_dev_unlock(hdev);
7192 hci_dev_put(hdev);
7193 return err;
7194 }
7195 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7196
7197 /* ---- L2CAP interface with lower layer (HCI) ---- */
7198
7199 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7200 {
7201 int exact = 0, lm1 = 0, lm2 = 0;
7202 struct l2cap_chan *c;
7203
7204 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7205
7206 /* Find listening sockets and check their link_mode */
7207 read_lock(&chan_list_lock);
7208 list_for_each_entry(c, &chan_list, global_l) {
7209 if (c->state != BT_LISTEN)
7210 continue;
7211
7212 if (!bacmp(&c->src, &hdev->bdaddr)) {
7213 lm1 |= HCI_LM_ACCEPT;
7214 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7215 lm1 |= HCI_LM_MASTER;
7216 exact++;
7217 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7218 lm2 |= HCI_LM_ACCEPT;
7219 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7220 lm2 |= HCI_LM_MASTER;
7221 }
7222 }
7223 read_unlock(&chan_list_lock);
7224
7225 return exact ? lm1 : lm2;
7226 }
7227
7228 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7229 * from an existing channel in the list or from the beginning of the
7230 * global list (by passing NULL as first parameter).
7231 */
7232 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7233 struct hci_conn *hcon)
7234 {
7235 u8 src_type = bdaddr_src_type(hcon);
7236
7237 read_lock(&chan_list_lock);
7238
7239 if (c)
7240 c = list_next_entry(c, global_l);
7241 else
7242 c = list_entry(chan_list.next, typeof(*c), global_l);
7243
7244 list_for_each_entry_from(c, &chan_list, global_l) {
7245 if (c->chan_type != L2CAP_CHAN_FIXED)
7246 continue;
7247 if (c->state != BT_LISTEN)
7248 continue;
7249 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7250 continue;
7251 if (src_type != c->src_type)
7252 continue;
7253
7254 l2cap_chan_hold(c);
7255 read_unlock(&chan_list_lock);
7256 return c;
7257 }
7258
7259 read_unlock(&chan_list_lock);
7260
7261 return NULL;
7262 }
7263
7264 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7265 {
7266 struct hci_dev *hdev = hcon->hdev;
7267 struct l2cap_conn *conn;
7268 struct l2cap_chan *pchan;
7269 u8 dst_type;
7270
7271 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7272 return;
7273
7274 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7275
7276 if (status) {
7277 l2cap_conn_del(hcon, bt_to_errno(status));
7278 return;
7279 }
7280
7281 conn = l2cap_conn_add(hcon);
7282 if (!conn)
7283 return;
7284
7285 dst_type = bdaddr_dst_type(hcon);
7286
7287 /* If device is blocked, do not create channels for it */
7288 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7289 return;
7290
7291 /* Find fixed channels and notify them of the new connection. We
7292 * use multiple individual lookups, continuing each time where
7293 * we left off, because the list lock would prevent calling the
7294 * potentially sleeping l2cap_chan_lock() function.
7295 */
7296 pchan = l2cap_global_fixed_chan(NULL, hcon);
7297 while (pchan) {
7298 struct l2cap_chan *chan, *next;
7299
7300 /* Client fixed channels should override server ones */
7301 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7302 goto next;
7303
7304 l2cap_chan_lock(pchan);
7305 chan = pchan->ops->new_connection(pchan);
7306 if (chan) {
7307 bacpy(&chan->src, &hcon->src);
7308 bacpy(&chan->dst, &hcon->dst);
7309 chan->src_type = bdaddr_src_type(hcon);
7310 chan->dst_type = dst_type;
7311
7312 __l2cap_chan_add(conn, chan);
7313 }
7314
7315 l2cap_chan_unlock(pchan);
7316 next:
7317 next = l2cap_global_fixed_chan(pchan, hcon);
7318 l2cap_chan_put(pchan);
7319 pchan = next;
7320 }
7321
7322 l2cap_conn_ready(conn);
7323 }
7324
7325 int l2cap_disconn_ind(struct hci_conn *hcon)
7326 {
7327 struct l2cap_conn *conn = hcon->l2cap_data;
7328
7329 BT_DBG("hcon %p", hcon);
7330
7331 if (!conn)
7332 return HCI_ERROR_REMOTE_USER_TERM;
7333 return conn->disc_reason;
7334 }
7335
7336 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7337 {
7338 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7339 return;
7340
7341 BT_DBG("hcon %p reason %d", hcon, reason);
7342
7343 l2cap_conn_del(hcon, bt_to_errno(reason));
7344 }
7345
7346 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7347 {
7348 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7349 return;
7350
7351 if (encrypt == 0x00) {
7352 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7353 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7354 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7355 chan->sec_level == BT_SECURITY_FIPS)
7356 l2cap_chan_close(chan, ECONNREFUSED);
7357 } else {
7358 if (chan->sec_level == BT_SECURITY_MEDIUM)
7359 __clear_chan_timer(chan);
7360 }
7361 }
7362
7363 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7364 {
7365 struct l2cap_conn *conn = hcon->l2cap_data;
7366 struct l2cap_chan *chan;
7367
7368 if (!conn)
7369 return;
7370
7371 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7372
7373 mutex_lock(&conn->chan_lock);
7374
7375 list_for_each_entry(chan, &conn->chan_l, list) {
7376 l2cap_chan_lock(chan);
7377
7378 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7379 state_to_string(chan->state));
7380
7381 if (chan->scid == L2CAP_CID_A2MP) {
7382 l2cap_chan_unlock(chan);
7383 continue;
7384 }
7385
7386 if (!status && encrypt)
7387 chan->sec_level = hcon->sec_level;
7388
7389 if (!__l2cap_no_conn_pending(chan)) {
7390 l2cap_chan_unlock(chan);
7391 continue;
7392 }
7393
7394 if (!status && (chan->state == BT_CONNECTED ||
7395 chan->state == BT_CONFIG)) {
7396 chan->ops->resume(chan);
7397 l2cap_check_encryption(chan, encrypt);
7398 l2cap_chan_unlock(chan);
7399 continue;
7400 }
7401
7402 if (chan->state == BT_CONNECT) {
7403 if (!status)
7404 l2cap_start_connection(chan);
7405 else
7406 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7407 } else if (chan->state == BT_CONNECT2 &&
7408 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7409 struct l2cap_conn_rsp rsp;
7410 __u16 res, stat;
7411
7412 if (!status) {
7413 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7414 res = L2CAP_CR_PEND;
7415 stat = L2CAP_CS_AUTHOR_PEND;
7416 chan->ops->defer(chan);
7417 } else {
7418 l2cap_state_change(chan, BT_CONFIG);
7419 res = L2CAP_CR_SUCCESS;
7420 stat = L2CAP_CS_NO_INFO;
7421 }
7422 } else {
7423 l2cap_state_change(chan, BT_DISCONN);
7424 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7425 res = L2CAP_CR_SEC_BLOCK;
7426 stat = L2CAP_CS_NO_INFO;
7427 }
7428
7429 rsp.scid = cpu_to_le16(chan->dcid);
7430 rsp.dcid = cpu_to_le16(chan->scid);
7431 rsp.result = cpu_to_le16(res);
7432 rsp.status = cpu_to_le16(stat);
7433 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7434 sizeof(rsp), &rsp);
7435
7436 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7437 res == L2CAP_CR_SUCCESS) {
7438 char buf[128];
7439 set_bit(CONF_REQ_SENT, &chan->conf_state);
7440 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7441 L2CAP_CONF_REQ,
7442 l2cap_build_conf_req(chan, buf),
7443 buf);
7444 chan->num_conf_req++;
7445 }
7446 }
7447
7448 l2cap_chan_unlock(chan);
7449 }
7450
7451 mutex_unlock(&conn->chan_lock);
7452 }
7453
7454 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7455 {
7456 struct l2cap_conn *conn = hcon->l2cap_data;
7457 struct l2cap_hdr *hdr;
7458 int len;
7459
7460 /* For AMP controller do not create l2cap conn */
7461 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7462 goto drop;
7463
7464 if (!conn)
7465 conn = l2cap_conn_add(hcon);
7466
7467 if (!conn)
7468 goto drop;
7469
7470 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7471
7472 switch (flags) {
7473 case ACL_START:
7474 case ACL_START_NO_FLUSH:
7475 case ACL_COMPLETE:
7476 if (conn->rx_len) {
7477 BT_ERR("Unexpected start frame (len %d)", skb->len);
7478 kfree_skb(conn->rx_skb);
7479 conn->rx_skb = NULL;
7480 conn->rx_len = 0;
7481 l2cap_conn_unreliable(conn, ECOMM);
7482 }
7483
7484 /* Start fragment always begin with Basic L2CAP header */
7485 if (skb->len < L2CAP_HDR_SIZE) {
7486 BT_ERR("Frame is too short (len %d)", skb->len);
7487 l2cap_conn_unreliable(conn, ECOMM);
7488 goto drop;
7489 }
7490
7491 hdr = (struct l2cap_hdr *) skb->data;
7492 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7493
7494 if (len == skb->len) {
7495 /* Complete frame received */
7496 l2cap_recv_frame(conn, skb);
7497 return;
7498 }
7499
7500 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7501
7502 if (skb->len > len) {
7503 BT_ERR("Frame is too long (len %d, expected len %d)",
7504 skb->len, len);
7505 l2cap_conn_unreliable(conn, ECOMM);
7506 goto drop;
7507 }
7508
7509 /* Allocate skb for the complete frame (with header) */
7510 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7511 if (!conn->rx_skb)
7512 goto drop;
7513
7514 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7515 skb->len);
7516 conn->rx_len = len - skb->len;
7517 break;
7518
7519 case ACL_CONT:
7520 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7521
7522 if (!conn->rx_len) {
7523 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7524 l2cap_conn_unreliable(conn, ECOMM);
7525 goto drop;
7526 }
7527
7528 if (skb->len > conn->rx_len) {
7529 BT_ERR("Fragment is too long (len %d, expected %d)",
7530 skb->len, conn->rx_len);
7531 kfree_skb(conn->rx_skb);
7532 conn->rx_skb = NULL;
7533 conn->rx_len = 0;
7534 l2cap_conn_unreliable(conn, ECOMM);
7535 goto drop;
7536 }
7537
7538 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7539 skb->len);
7540 conn->rx_len -= skb->len;
7541
7542 if (!conn->rx_len) {
7543 /* Complete frame received. l2cap_recv_frame
7544 * takes ownership of the skb so set the global
7545 * rx_skb pointer to NULL first.
7546 */
7547 struct sk_buff *rx_skb = conn->rx_skb;
7548 conn->rx_skb = NULL;
7549 l2cap_recv_frame(conn, rx_skb);
7550 }
7551 break;
7552 }
7553
7554 drop:
7555 kfree_skb(skb);
7556 }
7557
7558 static struct hci_cb l2cap_cb = {
7559 .name = "L2CAP",
7560 .connect_cfm = l2cap_connect_cfm,
7561 .disconn_cfm = l2cap_disconn_cfm,
7562 .security_cfm = l2cap_security_cfm,
7563 };
7564
7565 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7566 {
7567 struct l2cap_chan *c;
7568
7569 read_lock(&chan_list_lock);
7570
7571 list_for_each_entry(c, &chan_list, global_l) {
7572 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7573 &c->src, c->src_type, &c->dst, c->dst_type,
7574 c->state, __le16_to_cpu(c->psm),
7575 c->scid, c->dcid, c->imtu, c->omtu,
7576 c->sec_level, c->mode);
7577 }
7578
7579 read_unlock(&chan_list_lock);
7580
7581 return 0;
7582 }
7583
7584 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7585 {
7586 return single_open(file, l2cap_debugfs_show, inode->i_private);
7587 }
7588
7589 static const struct file_operations l2cap_debugfs_fops = {
7590 .open = l2cap_debugfs_open,
7591 .read = seq_read,
7592 .llseek = seq_lseek,
7593 .release = single_release,
7594 };
7595
7596 static struct dentry *l2cap_debugfs;
7597
7598 int __init l2cap_init(void)
7599 {
7600 int err;
7601
7602 err = l2cap_init_sockets();
7603 if (err < 0)
7604 return err;
7605
7606 hci_register_cb(&l2cap_cb);
7607
7608 if (IS_ERR_OR_NULL(bt_debugfs))
7609 return 0;
7610
7611 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7612 NULL, &l2cap_debugfs_fops);
7613
7614 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7615 &le_max_credits);
7616 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7617 &le_default_mps);
7618
7619 return 0;
7620 }
7621
7622 void l2cap_exit(void)
7623 {
7624 debugfs_remove(l2cap_debugfs);
7625 hci_unregister_cb(&l2cap_cb);
7626 l2cap_cleanup_sockets();
7627 }
7628
7629 module_param(disable_ertm, bool, 0644);
7630 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.409224 seconds and 6 git commands to generate.