Merge tag 'sound-fix-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32
33 #include "smp.h"
34 #include "a2mp.h"
35
36 struct sco_param {
37 u16 pkt_type;
38 u16 max_latency;
39 };
40
41 static const struct sco_param sco_param_cvsd[] = {
42 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
45 { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
46 { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
47 };
48
49 static const struct sco_param sco_param_wideband[] = {
50 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
52 };
53
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
55 {
56 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
57 }
58
59 static void hci_acl_create_connection(struct hci_conn *conn)
60 {
61 struct hci_dev *hdev = conn->hdev;
62 struct inquiry_entry *ie;
63 struct hci_cp_create_conn cp;
64
65 BT_DBG("hcon %p", conn);
66
67 conn->state = BT_CONNECT;
68 conn->out = true;
69 conn->role = HCI_ROLE_MASTER;
70
71 conn->attempt++;
72
73 conn->link_policy = hdev->link_policy;
74
75 memset(&cp, 0, sizeof(cp));
76 bacpy(&cp.bdaddr, &conn->dst);
77 cp.pscan_rep_mode = 0x02;
78
79 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
80 if (ie) {
81 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
82 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
83 cp.pscan_mode = ie->data.pscan_mode;
84 cp.clock_offset = ie->data.clock_offset |
85 cpu_to_le16(0x8000);
86 }
87
88 memcpy(conn->dev_class, ie->data.dev_class, 3);
89 if (ie->data.ssp_mode > 0)
90 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
91 }
92
93 cp.pkt_type = cpu_to_le16(conn->pkt_type);
94 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
95 cp.role_switch = 0x01;
96 else
97 cp.role_switch = 0x00;
98
99 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
100 }
101
102 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
103 {
104 struct hci_cp_create_conn_cancel cp;
105
106 BT_DBG("hcon %p", conn);
107
108 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
109 return;
110
111 bacpy(&cp.bdaddr, &conn->dst);
112 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
113 }
114
115 static void hci_reject_sco(struct hci_conn *conn)
116 {
117 struct hci_cp_reject_sync_conn_req cp;
118
119 cp.reason = HCI_ERROR_REMOTE_USER_TERM;
120 bacpy(&cp.bdaddr, &conn->dst);
121
122 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
123 }
124
125 void hci_disconnect(struct hci_conn *conn, __u8 reason)
126 {
127 struct hci_cp_disconnect cp;
128
129 BT_DBG("hcon %p", conn);
130
131 conn->state = BT_DISCONN;
132
133 cp.handle = cpu_to_le16(conn->handle);
134 cp.reason = reason;
135 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
136 }
137
138 static void hci_amp_disconn(struct hci_conn *conn)
139 {
140 struct hci_cp_disconn_phy_link cp;
141
142 BT_DBG("hcon %p", conn);
143
144 conn->state = BT_DISCONN;
145
146 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
147 cp.reason = hci_proto_disconn_ind(conn);
148 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
149 sizeof(cp), &cp);
150 }
151
152 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
153 {
154 struct hci_dev *hdev = conn->hdev;
155 struct hci_cp_add_sco cp;
156
157 BT_DBG("hcon %p", conn);
158
159 conn->state = BT_CONNECT;
160 conn->out = true;
161
162 conn->attempt++;
163
164 cp.handle = cpu_to_le16(handle);
165 cp.pkt_type = cpu_to_le16(conn->pkt_type);
166
167 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
168 }
169
170 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
171 {
172 struct hci_dev *hdev = conn->hdev;
173 struct hci_cp_setup_sync_conn cp;
174 const struct sco_param *param;
175
176 BT_DBG("hcon %p", conn);
177
178 conn->state = BT_CONNECT;
179 conn->out = true;
180
181 conn->attempt++;
182
183 cp.handle = cpu_to_le16(handle);
184
185 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
186 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
187 cp.voice_setting = cpu_to_le16(conn->setting);
188
189 switch (conn->setting & SCO_AIRMODE_MASK) {
190 case SCO_AIRMODE_TRANSP:
191 if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
192 return false;
193 cp.retrans_effort = 0x02;
194 param = &sco_param_wideband[conn->attempt - 1];
195 break;
196 case SCO_AIRMODE_CVSD:
197 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
198 return false;
199 cp.retrans_effort = 0x01;
200 param = &sco_param_cvsd[conn->attempt - 1];
201 break;
202 default:
203 return false;
204 }
205
206 cp.pkt_type = __cpu_to_le16(param->pkt_type);
207 cp.max_latency = __cpu_to_le16(param->max_latency);
208
209 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
210 return false;
211
212 return true;
213 }
214
215 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
216 u16 to_multiplier)
217 {
218 struct hci_dev *hdev = conn->hdev;
219 struct hci_conn_params *params;
220 struct hci_cp_le_conn_update cp;
221
222 hci_dev_lock(hdev);
223
224 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
225 if (params) {
226 params->conn_min_interval = min;
227 params->conn_max_interval = max;
228 params->conn_latency = latency;
229 params->supervision_timeout = to_multiplier;
230 }
231
232 hci_dev_unlock(hdev);
233
234 memset(&cp, 0, sizeof(cp));
235 cp.handle = cpu_to_le16(conn->handle);
236 cp.conn_interval_min = cpu_to_le16(min);
237 cp.conn_interval_max = cpu_to_le16(max);
238 cp.conn_latency = cpu_to_le16(latency);
239 cp.supervision_timeout = cpu_to_le16(to_multiplier);
240 cp.min_ce_len = cpu_to_le16(0x0000);
241 cp.max_ce_len = cpu_to_le16(0x0000);
242
243 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
244
245 if (params)
246 return 0x01;
247
248 return 0x00;
249 }
250
251 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
252 __u8 ltk[16])
253 {
254 struct hci_dev *hdev = conn->hdev;
255 struct hci_cp_le_start_enc cp;
256
257 BT_DBG("hcon %p", conn);
258
259 memset(&cp, 0, sizeof(cp));
260
261 cp.handle = cpu_to_le16(conn->handle);
262 cp.rand = rand;
263 cp.ediv = ediv;
264 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
265
266 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
267 }
268
269 /* Device _must_ be locked */
270 void hci_sco_setup(struct hci_conn *conn, __u8 status)
271 {
272 struct hci_conn *sco = conn->link;
273
274 if (!sco)
275 return;
276
277 BT_DBG("hcon %p", conn);
278
279 if (!status) {
280 if (lmp_esco_capable(conn->hdev))
281 hci_setup_sync(sco, conn->handle);
282 else
283 hci_add_sco(sco, conn->handle);
284 } else {
285 hci_proto_connect_cfm(sco, status);
286 hci_conn_del(sco);
287 }
288 }
289
290 static void hci_conn_timeout(struct work_struct *work)
291 {
292 struct hci_conn *conn = container_of(work, struct hci_conn,
293 disc_work.work);
294 int refcnt = atomic_read(&conn->refcnt);
295
296 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
297
298 WARN_ON(refcnt < 0);
299
300 /* FIXME: It was observed that in pairing failed scenario, refcnt
301 * drops below 0. Probably this is because l2cap_conn_del calls
302 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
303 * dropped. After that loop hci_chan_del is called which also drops
304 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
305 * otherwise drop it.
306 */
307 if (refcnt > 0)
308 return;
309
310 switch (conn->state) {
311 case BT_CONNECT:
312 case BT_CONNECT2:
313 if (conn->out) {
314 if (conn->type == ACL_LINK)
315 hci_acl_create_connection_cancel(conn);
316 else if (conn->type == LE_LINK)
317 hci_le_create_connection_cancel(conn);
318 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
319 hci_reject_sco(conn);
320 }
321 break;
322 case BT_CONFIG:
323 case BT_CONNECTED:
324 if (conn->type == AMP_LINK) {
325 hci_amp_disconn(conn);
326 } else {
327 __u8 reason = hci_proto_disconn_ind(conn);
328
329 /* When we are master of an established connection
330 * and it enters the disconnect timeout, then go
331 * ahead and try to read the current clock offset.
332 *
333 * Processing of the result is done within the
334 * event handling and hci_clock_offset_evt function.
335 */
336 if (conn->type == ACL_LINK &&
337 conn->role == HCI_ROLE_MASTER) {
338 struct hci_dev *hdev = conn->hdev;
339 struct hci_cp_read_clock_offset cp;
340
341 cp.handle = cpu_to_le16(conn->handle);
342
343 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
344 sizeof(cp), &cp);
345 }
346
347 hci_disconnect(conn, reason);
348 }
349 break;
350 default:
351 conn->state = BT_CLOSED;
352 break;
353 }
354 }
355
356 /* Enter sniff mode */
357 static void hci_conn_idle(struct work_struct *work)
358 {
359 struct hci_conn *conn = container_of(work, struct hci_conn,
360 idle_work.work);
361 struct hci_dev *hdev = conn->hdev;
362
363 BT_DBG("hcon %p mode %d", conn, conn->mode);
364
365 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
366 return;
367
368 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
369 return;
370
371 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
372 struct hci_cp_sniff_subrate cp;
373 cp.handle = cpu_to_le16(conn->handle);
374 cp.max_latency = cpu_to_le16(0);
375 cp.min_remote_timeout = cpu_to_le16(0);
376 cp.min_local_timeout = cpu_to_le16(0);
377 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
378 }
379
380 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
381 struct hci_cp_sniff_mode cp;
382 cp.handle = cpu_to_le16(conn->handle);
383 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
384 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
385 cp.attempt = cpu_to_le16(4);
386 cp.timeout = cpu_to_le16(1);
387 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
388 }
389 }
390
391 static void hci_conn_auto_accept(struct work_struct *work)
392 {
393 struct hci_conn *conn = container_of(work, struct hci_conn,
394 auto_accept_work.work);
395
396 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
397 &conn->dst);
398 }
399
400 static void le_conn_timeout(struct work_struct *work)
401 {
402 struct hci_conn *conn = container_of(work, struct hci_conn,
403 le_conn_timeout.work);
404 struct hci_dev *hdev = conn->hdev;
405
406 BT_DBG("");
407
408 /* We could end up here due to having done directed advertising,
409 * so clean up the state if necessary. This should however only
410 * happen with broken hardware or if low duty cycle was used
411 * (which doesn't have a timeout of its own).
412 */
413 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
414 u8 enable = 0x00;
415 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
416 &enable);
417 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
418 return;
419 }
420
421 hci_le_create_connection_cancel(conn);
422 }
423
424 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
425 u8 role)
426 {
427 struct hci_conn *conn;
428
429 BT_DBG("%s dst %pMR", hdev->name, dst);
430
431 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
432 if (!conn)
433 return NULL;
434
435 bacpy(&conn->dst, dst);
436 bacpy(&conn->src, &hdev->bdaddr);
437 conn->hdev = hdev;
438 conn->type = type;
439 conn->role = role;
440 conn->mode = HCI_CM_ACTIVE;
441 conn->state = BT_OPEN;
442 conn->auth_type = HCI_AT_GENERAL_BONDING;
443 conn->io_capability = hdev->io_capability;
444 conn->remote_auth = 0xff;
445 conn->key_type = 0xff;
446 conn->tx_power = HCI_TX_POWER_INVALID;
447 conn->max_tx_power = HCI_TX_POWER_INVALID;
448
449 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
450 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
451
452 if (conn->role == HCI_ROLE_MASTER)
453 conn->out = true;
454
455 switch (type) {
456 case ACL_LINK:
457 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
458 break;
459 case LE_LINK:
460 /* conn->src should reflect the local identity address */
461 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
462 break;
463 case SCO_LINK:
464 if (lmp_esco_capable(hdev))
465 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
466 (hdev->esco_type & EDR_ESCO_MASK);
467 else
468 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
469 break;
470 case ESCO_LINK:
471 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
472 break;
473 }
474
475 skb_queue_head_init(&conn->data_q);
476
477 INIT_LIST_HEAD(&conn->chan_list);
478
479 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
480 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
481 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
482 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
483
484 atomic_set(&conn->refcnt, 0);
485
486 hci_dev_hold(hdev);
487
488 hci_conn_hash_add(hdev, conn);
489 if (hdev->notify)
490 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
491
492 hci_conn_init_sysfs(conn);
493
494 return conn;
495 }
496
497 int hci_conn_del(struct hci_conn *conn)
498 {
499 struct hci_dev *hdev = conn->hdev;
500
501 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
502
503 cancel_delayed_work_sync(&conn->disc_work);
504 cancel_delayed_work_sync(&conn->auto_accept_work);
505 cancel_delayed_work_sync(&conn->idle_work);
506
507 if (conn->type == ACL_LINK) {
508 struct hci_conn *sco = conn->link;
509 if (sco)
510 sco->link = NULL;
511
512 /* Unacked frames */
513 hdev->acl_cnt += conn->sent;
514 } else if (conn->type == LE_LINK) {
515 cancel_delayed_work_sync(&conn->le_conn_timeout);
516
517 if (hdev->le_pkts)
518 hdev->le_cnt += conn->sent;
519 else
520 hdev->acl_cnt += conn->sent;
521 } else {
522 struct hci_conn *acl = conn->link;
523 if (acl) {
524 acl->link = NULL;
525 hci_conn_drop(acl);
526 }
527 }
528
529 hci_chan_list_flush(conn);
530
531 if (conn->amp_mgr)
532 amp_mgr_put(conn->amp_mgr);
533
534 hci_conn_hash_del(hdev, conn);
535 if (hdev->notify)
536 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
537
538 skb_queue_purge(&conn->data_q);
539
540 hci_conn_del_sysfs(conn);
541
542 hci_dev_put(hdev);
543
544 hci_conn_put(conn);
545
546 return 0;
547 }
548
549 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
550 {
551 int use_src = bacmp(src, BDADDR_ANY);
552 struct hci_dev *hdev = NULL, *d;
553
554 BT_DBG("%pMR -> %pMR", src, dst);
555
556 read_lock(&hci_dev_list_lock);
557
558 list_for_each_entry(d, &hci_dev_list, list) {
559 if (!test_bit(HCI_UP, &d->flags) ||
560 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
561 d->dev_type != HCI_BREDR)
562 continue;
563
564 /* Simple routing:
565 * No source address - find interface with bdaddr != dst
566 * Source address - find interface with bdaddr == src
567 */
568
569 if (use_src) {
570 if (!bacmp(&d->bdaddr, src)) {
571 hdev = d; break;
572 }
573 } else {
574 if (bacmp(&d->bdaddr, dst)) {
575 hdev = d; break;
576 }
577 }
578 }
579
580 if (hdev)
581 hdev = hci_dev_hold(hdev);
582
583 read_unlock(&hci_dev_list_lock);
584 return hdev;
585 }
586 EXPORT_SYMBOL(hci_get_route);
587
588 /* This function requires the caller holds hdev->lock */
589 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
590 {
591 struct hci_dev *hdev = conn->hdev;
592
593 conn->state = BT_CLOSED;
594
595 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
596 status);
597
598 hci_proto_connect_cfm(conn, status);
599
600 hci_conn_del(conn);
601
602 /* Since we may have temporarily stopped the background scanning in
603 * favor of connection establishment, we should restart it.
604 */
605 hci_update_background_scan(hdev);
606
607 /* Re-enable advertising in case this was a failed connection
608 * attempt as a peripheral.
609 */
610 mgmt_reenable_advertising(hdev);
611 }
612
613 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
614 {
615 struct hci_conn *conn;
616
617 if (status == 0)
618 return;
619
620 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
621 status);
622
623 hci_dev_lock(hdev);
624
625 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
626 if (!conn)
627 goto done;
628
629 hci_le_conn_failed(conn, status);
630
631 done:
632 hci_dev_unlock(hdev);
633 }
634
635 static void hci_req_add_le_create_conn(struct hci_request *req,
636 struct hci_conn *conn)
637 {
638 struct hci_cp_le_create_conn cp;
639 struct hci_dev *hdev = conn->hdev;
640 u8 own_addr_type;
641
642 memset(&cp, 0, sizeof(cp));
643
644 /* Update random address, but set require_privacy to false so
645 * that we never connect with an unresolvable address.
646 */
647 if (hci_update_random_address(req, false, &own_addr_type))
648 return;
649
650 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
651 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
652 bacpy(&cp.peer_addr, &conn->dst);
653 cp.peer_addr_type = conn->dst_type;
654 cp.own_address_type = own_addr_type;
655 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
656 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
657 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
658 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
659 cp.min_ce_len = cpu_to_le16(0x0000);
660 cp.max_ce_len = cpu_to_le16(0x0000);
661
662 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
663
664 conn->state = BT_CONNECT;
665 }
666
667 static void hci_req_directed_advertising(struct hci_request *req,
668 struct hci_conn *conn)
669 {
670 struct hci_dev *hdev = req->hdev;
671 struct hci_cp_le_set_adv_param cp;
672 u8 own_addr_type;
673 u8 enable;
674
675 /* Clear the HCI_LE_ADV bit temporarily so that the
676 * hci_update_random_address knows that it's safe to go ahead
677 * and write a new random address. The flag will be set back on
678 * as soon as the SET_ADV_ENABLE HCI command completes.
679 */
680 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
681
682 /* Set require_privacy to false so that the remote device has a
683 * chance of identifying us.
684 */
685 if (hci_update_random_address(req, false, &own_addr_type) < 0)
686 return;
687
688 memset(&cp, 0, sizeof(cp));
689 cp.type = LE_ADV_DIRECT_IND;
690 cp.own_address_type = own_addr_type;
691 cp.direct_addr_type = conn->dst_type;
692 bacpy(&cp.direct_addr, &conn->dst);
693 cp.channel_map = hdev->le_adv_channel_map;
694
695 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
696
697 enable = 0x01;
698 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
699
700 conn->state = BT_CONNECT;
701 }
702
703 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
704 u8 dst_type, u8 sec_level, u16 conn_timeout,
705 u8 role)
706 {
707 struct hci_conn_params *params;
708 struct hci_conn *conn;
709 struct smp_irk *irk;
710 struct hci_request req;
711 int err;
712
713 /* Some devices send ATT messages as soon as the physical link is
714 * established. To be able to handle these ATT messages, the user-
715 * space first establishes the connection and then starts the pairing
716 * process.
717 *
718 * So if a hci_conn object already exists for the following connection
719 * attempt, we simply update pending_sec_level and auth_type fields
720 * and return the object found.
721 */
722 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
723 if (conn) {
724 conn->pending_sec_level = sec_level;
725 goto done;
726 }
727
728 /* Since the controller supports only one LE connection attempt at a
729 * time, we return -EBUSY if there is any connection attempt running.
730 */
731 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
732 if (conn)
733 return ERR_PTR(-EBUSY);
734
735 /* When given an identity address with existing identity
736 * resolving key, the connection needs to be established
737 * to a resolvable random address.
738 *
739 * This uses the cached random resolvable address from
740 * a previous scan. When no cached address is available,
741 * try connecting to the identity address instead.
742 *
743 * Storing the resolvable random address is required here
744 * to handle connection failures. The address will later
745 * be resolved back into the original identity address
746 * from the connect request.
747 */
748 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
749 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
750 dst = &irk->rpa;
751 dst_type = ADDR_LE_DEV_RANDOM;
752 }
753
754 conn = hci_conn_add(hdev, LE_LINK, dst, role);
755 if (!conn)
756 return ERR_PTR(-ENOMEM);
757
758 conn->dst_type = dst_type;
759 conn->sec_level = BT_SECURITY_LOW;
760 conn->pending_sec_level = sec_level;
761 conn->conn_timeout = conn_timeout;
762
763 hci_req_init(&req, hdev);
764
765 /* Disable advertising if we're active. For master role
766 * connections most controllers will refuse to connect if
767 * advertising is enabled, and for slave role connections we
768 * anyway have to disable it in order to start directed
769 * advertising.
770 */
771 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
772 u8 enable = 0x00;
773 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
774 &enable);
775 }
776
777 /* If requested to connect as slave use directed advertising */
778 if (conn->role == HCI_ROLE_SLAVE) {
779 /* If we're active scanning most controllers are unable
780 * to initiate advertising. Simply reject the attempt.
781 */
782 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
783 hdev->le_scan_type == LE_SCAN_ACTIVE) {
784 skb_queue_purge(&req.cmd_q);
785 hci_conn_del(conn);
786 return ERR_PTR(-EBUSY);
787 }
788
789 hci_req_directed_advertising(&req, conn);
790 goto create_conn;
791 }
792
793 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
794 if (params) {
795 conn->le_conn_min_interval = params->conn_min_interval;
796 conn->le_conn_max_interval = params->conn_max_interval;
797 conn->le_conn_latency = params->conn_latency;
798 conn->le_supv_timeout = params->supervision_timeout;
799 } else {
800 conn->le_conn_min_interval = hdev->le_conn_min_interval;
801 conn->le_conn_max_interval = hdev->le_conn_max_interval;
802 conn->le_conn_latency = hdev->le_conn_latency;
803 conn->le_supv_timeout = hdev->le_supv_timeout;
804 }
805
806 /* If controller is scanning, we stop it since some controllers are
807 * not able to scan and connect at the same time. Also set the
808 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
809 * handler for scan disabling knows to set the correct discovery
810 * state.
811 */
812 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
813 hci_req_add_le_scan_disable(&req);
814 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
815 }
816
817 hci_req_add_le_create_conn(&req, conn);
818
819 create_conn:
820 err = hci_req_run(&req, create_le_conn_complete);
821 if (err) {
822 hci_conn_del(conn);
823 return ERR_PTR(err);
824 }
825
826 done:
827 hci_conn_hold(conn);
828 return conn;
829 }
830
831 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
832 u8 sec_level, u8 auth_type)
833 {
834 struct hci_conn *acl;
835
836 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
837 return ERR_PTR(-EOPNOTSUPP);
838
839 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
840 if (!acl) {
841 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
842 if (!acl)
843 return ERR_PTR(-ENOMEM);
844 }
845
846 hci_conn_hold(acl);
847
848 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
849 acl->sec_level = BT_SECURITY_LOW;
850 acl->pending_sec_level = sec_level;
851 acl->auth_type = auth_type;
852 hci_acl_create_connection(acl);
853 }
854
855 return acl;
856 }
857
858 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
859 __u16 setting)
860 {
861 struct hci_conn *acl;
862 struct hci_conn *sco;
863
864 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
865 if (IS_ERR(acl))
866 return acl;
867
868 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
869 if (!sco) {
870 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
871 if (!sco) {
872 hci_conn_drop(acl);
873 return ERR_PTR(-ENOMEM);
874 }
875 }
876
877 acl->link = sco;
878 sco->link = acl;
879
880 hci_conn_hold(sco);
881
882 sco->setting = setting;
883
884 if (acl->state == BT_CONNECTED &&
885 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
886 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
887 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
888
889 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
890 /* defer SCO setup until mode change completed */
891 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
892 return sco;
893 }
894
895 hci_sco_setup(acl, 0x00);
896 }
897
898 return sco;
899 }
900
901 /* Check link security requirement */
902 int hci_conn_check_link_mode(struct hci_conn *conn)
903 {
904 BT_DBG("hcon %p", conn);
905
906 /* In Secure Connections Only mode, it is required that Secure
907 * Connections is used and the link is encrypted with AES-CCM
908 * using a P-256 authenticated combination key.
909 */
910 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
911 if (!hci_conn_sc_enabled(conn) ||
912 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
913 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
914 return 0;
915 }
916
917 if (hci_conn_ssp_enabled(conn) &&
918 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
919 return 0;
920
921 return 1;
922 }
923
924 /* Authenticate remote device */
925 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
926 {
927 BT_DBG("hcon %p", conn);
928
929 if (conn->pending_sec_level > sec_level)
930 sec_level = conn->pending_sec_level;
931
932 if (sec_level > conn->sec_level)
933 conn->pending_sec_level = sec_level;
934 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
935 return 1;
936
937 /* Make sure we preserve an existing MITM requirement*/
938 auth_type |= (conn->auth_type & 0x01);
939
940 conn->auth_type = auth_type;
941
942 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
943 struct hci_cp_auth_requested cp;
944
945 cp.handle = cpu_to_le16(conn->handle);
946 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
947 sizeof(cp), &cp);
948
949 /* If we're already encrypted set the REAUTH_PEND flag,
950 * otherwise set the ENCRYPT_PEND.
951 */
952 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
953 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
954 else
955 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
956 }
957
958 return 0;
959 }
960
961 /* Encrypt the the link */
962 static void hci_conn_encrypt(struct hci_conn *conn)
963 {
964 BT_DBG("hcon %p", conn);
965
966 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
967 struct hci_cp_set_conn_encrypt cp;
968 cp.handle = cpu_to_le16(conn->handle);
969 cp.encrypt = 0x01;
970 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
971 &cp);
972 }
973 }
974
975 /* Enable security */
976 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
977 bool initiator)
978 {
979 BT_DBG("hcon %p", conn);
980
981 if (conn->type == LE_LINK)
982 return smp_conn_security(conn, sec_level);
983
984 /* For sdp we don't need the link key. */
985 if (sec_level == BT_SECURITY_SDP)
986 return 1;
987
988 /* For non 2.1 devices and low security level we don't need the link
989 key. */
990 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
991 return 1;
992
993 /* For other security levels we need the link key. */
994 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
995 goto auth;
996
997 /* An authenticated FIPS approved combination key has sufficient
998 * security for security level 4. */
999 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1000 sec_level == BT_SECURITY_FIPS)
1001 goto encrypt;
1002
1003 /* An authenticated combination key has sufficient security for
1004 security level 3. */
1005 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1006 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1007 sec_level == BT_SECURITY_HIGH)
1008 goto encrypt;
1009
1010 /* An unauthenticated combination key has sufficient security for
1011 security level 1 and 2. */
1012 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1013 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1014 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1015 goto encrypt;
1016
1017 /* A combination key has always sufficient security for the security
1018 levels 1 or 2. High security level requires the combination key
1019 is generated using maximum PIN code length (16).
1020 For pre 2.1 units. */
1021 if (conn->key_type == HCI_LK_COMBINATION &&
1022 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1023 conn->pin_length == 16))
1024 goto encrypt;
1025
1026 auth:
1027 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1028 return 0;
1029
1030 if (initiator)
1031 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1032
1033 if (!hci_conn_auth(conn, sec_level, auth_type))
1034 return 0;
1035
1036 encrypt:
1037 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1038 return 1;
1039
1040 hci_conn_encrypt(conn);
1041 return 0;
1042 }
1043 EXPORT_SYMBOL(hci_conn_security);
1044
1045 /* Check secure link requirement */
1046 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1047 {
1048 BT_DBG("hcon %p", conn);
1049
1050 /* Accept if non-secure or higher security level is required */
1051 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1052 return 1;
1053
1054 /* Accept if secure or higher security level is already present */
1055 if (conn->sec_level == BT_SECURITY_HIGH ||
1056 conn->sec_level == BT_SECURITY_FIPS)
1057 return 1;
1058
1059 /* Reject not secure link */
1060 return 0;
1061 }
1062 EXPORT_SYMBOL(hci_conn_check_secure);
1063
1064 /* Change link key */
1065 int hci_conn_change_link_key(struct hci_conn *conn)
1066 {
1067 BT_DBG("hcon %p", conn);
1068
1069 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1070 struct hci_cp_change_conn_link_key cp;
1071 cp.handle = cpu_to_le16(conn->handle);
1072 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1073 sizeof(cp), &cp);
1074 }
1075
1076 return 0;
1077 }
1078
1079 /* Switch role */
1080 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1081 {
1082 BT_DBG("hcon %p", conn);
1083
1084 if (role == conn->role)
1085 return 1;
1086
1087 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1088 struct hci_cp_switch_role cp;
1089 bacpy(&cp.bdaddr, &conn->dst);
1090 cp.role = role;
1091 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1092 }
1093
1094 return 0;
1095 }
1096 EXPORT_SYMBOL(hci_conn_switch_role);
1097
1098 /* Enter active mode */
1099 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1100 {
1101 struct hci_dev *hdev = conn->hdev;
1102
1103 BT_DBG("hcon %p mode %d", conn, conn->mode);
1104
1105 if (conn->mode != HCI_CM_SNIFF)
1106 goto timer;
1107
1108 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1109 goto timer;
1110
1111 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1112 struct hci_cp_exit_sniff_mode cp;
1113 cp.handle = cpu_to_le16(conn->handle);
1114 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1115 }
1116
1117 timer:
1118 if (hdev->idle_timeout > 0)
1119 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1120 msecs_to_jiffies(hdev->idle_timeout));
1121 }
1122
1123 /* Drop all connection on the device */
1124 void hci_conn_hash_flush(struct hci_dev *hdev)
1125 {
1126 struct hci_conn_hash *h = &hdev->conn_hash;
1127 struct hci_conn *c, *n;
1128
1129 BT_DBG("hdev %s", hdev->name);
1130
1131 list_for_each_entry_safe(c, n, &h->list, list) {
1132 c->state = BT_CLOSED;
1133
1134 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1135 hci_conn_del(c);
1136 }
1137 }
1138
1139 /* Check pending connect attempts */
1140 void hci_conn_check_pending(struct hci_dev *hdev)
1141 {
1142 struct hci_conn *conn;
1143
1144 BT_DBG("hdev %s", hdev->name);
1145
1146 hci_dev_lock(hdev);
1147
1148 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1149 if (conn)
1150 hci_acl_create_connection(conn);
1151
1152 hci_dev_unlock(hdev);
1153 }
1154
1155 static u32 get_link_mode(struct hci_conn *conn)
1156 {
1157 u32 link_mode = 0;
1158
1159 if (conn->role == HCI_ROLE_MASTER)
1160 link_mode |= HCI_LM_MASTER;
1161
1162 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1163 link_mode |= HCI_LM_ENCRYPT;
1164
1165 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1166 link_mode |= HCI_LM_AUTH;
1167
1168 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1169 link_mode |= HCI_LM_SECURE;
1170
1171 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1172 link_mode |= HCI_LM_FIPS;
1173
1174 return link_mode;
1175 }
1176
1177 int hci_get_conn_list(void __user *arg)
1178 {
1179 struct hci_conn *c;
1180 struct hci_conn_list_req req, *cl;
1181 struct hci_conn_info *ci;
1182 struct hci_dev *hdev;
1183 int n = 0, size, err;
1184
1185 if (copy_from_user(&req, arg, sizeof(req)))
1186 return -EFAULT;
1187
1188 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1189 return -EINVAL;
1190
1191 size = sizeof(req) + req.conn_num * sizeof(*ci);
1192
1193 cl = kmalloc(size, GFP_KERNEL);
1194 if (!cl)
1195 return -ENOMEM;
1196
1197 hdev = hci_dev_get(req.dev_id);
1198 if (!hdev) {
1199 kfree(cl);
1200 return -ENODEV;
1201 }
1202
1203 ci = cl->conn_info;
1204
1205 hci_dev_lock(hdev);
1206 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1207 bacpy(&(ci + n)->bdaddr, &c->dst);
1208 (ci + n)->handle = c->handle;
1209 (ci + n)->type = c->type;
1210 (ci + n)->out = c->out;
1211 (ci + n)->state = c->state;
1212 (ci + n)->link_mode = get_link_mode(c);
1213 if (++n >= req.conn_num)
1214 break;
1215 }
1216 hci_dev_unlock(hdev);
1217
1218 cl->dev_id = hdev->id;
1219 cl->conn_num = n;
1220 size = sizeof(req) + n * sizeof(*ci);
1221
1222 hci_dev_put(hdev);
1223
1224 err = copy_to_user(arg, cl, size);
1225 kfree(cl);
1226
1227 return err ? -EFAULT : 0;
1228 }
1229
1230 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1231 {
1232 struct hci_conn_info_req req;
1233 struct hci_conn_info ci;
1234 struct hci_conn *conn;
1235 char __user *ptr = arg + sizeof(req);
1236
1237 if (copy_from_user(&req, arg, sizeof(req)))
1238 return -EFAULT;
1239
1240 hci_dev_lock(hdev);
1241 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1242 if (conn) {
1243 bacpy(&ci.bdaddr, &conn->dst);
1244 ci.handle = conn->handle;
1245 ci.type = conn->type;
1246 ci.out = conn->out;
1247 ci.state = conn->state;
1248 ci.link_mode = get_link_mode(conn);
1249 }
1250 hci_dev_unlock(hdev);
1251
1252 if (!conn)
1253 return -ENOENT;
1254
1255 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1256 }
1257
1258 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1259 {
1260 struct hci_auth_info_req req;
1261 struct hci_conn *conn;
1262
1263 if (copy_from_user(&req, arg, sizeof(req)))
1264 return -EFAULT;
1265
1266 hci_dev_lock(hdev);
1267 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1268 if (conn)
1269 req.type = conn->auth_type;
1270 hci_dev_unlock(hdev);
1271
1272 if (!conn)
1273 return -ENOENT;
1274
1275 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1276 }
1277
1278 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1279 {
1280 struct hci_dev *hdev = conn->hdev;
1281 struct hci_chan *chan;
1282
1283 BT_DBG("%s hcon %p", hdev->name, conn);
1284
1285 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1286 if (!chan)
1287 return NULL;
1288
1289 chan->conn = conn;
1290 skb_queue_head_init(&chan->data_q);
1291 chan->state = BT_CONNECTED;
1292
1293 list_add_rcu(&chan->list, &conn->chan_list);
1294
1295 return chan;
1296 }
1297
1298 void hci_chan_del(struct hci_chan *chan)
1299 {
1300 struct hci_conn *conn = chan->conn;
1301 struct hci_dev *hdev = conn->hdev;
1302
1303 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1304
1305 list_del_rcu(&chan->list);
1306
1307 synchronize_rcu();
1308
1309 hci_conn_drop(conn);
1310
1311 skb_queue_purge(&chan->data_q);
1312 kfree(chan);
1313 }
1314
1315 void hci_chan_list_flush(struct hci_conn *conn)
1316 {
1317 struct hci_chan *chan, *n;
1318
1319 BT_DBG("hcon %p", conn);
1320
1321 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1322 hci_chan_del(chan);
1323 }
1324
1325 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1326 __u16 handle)
1327 {
1328 struct hci_chan *hchan;
1329
1330 list_for_each_entry(hchan, &hcon->chan_list, list) {
1331 if (hchan->handle == handle)
1332 return hchan;
1333 }
1334
1335 return NULL;
1336 }
1337
1338 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1339 {
1340 struct hci_conn_hash *h = &hdev->conn_hash;
1341 struct hci_conn *hcon;
1342 struct hci_chan *hchan = NULL;
1343
1344 rcu_read_lock();
1345
1346 list_for_each_entry_rcu(hcon, &h->list, list) {
1347 hchan = __hci_chan_lookup_handle(hcon, handle);
1348 if (hchan)
1349 break;
1350 }
1351
1352 rcu_read_unlock();
1353
1354 return hchan;
1355 }
This page took 0.074974 seconds and 6 git commands to generate.