Bluetooth: Use explicit role instead of a bool in function parameters
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32
33 #include "smp.h"
34 #include "a2mp.h"
35
36 struct sco_param {
37 u16 pkt_type;
38 u16 max_latency;
39 };
40
41 static const struct sco_param sco_param_cvsd[] = {
42 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
45 { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
46 { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
47 };
48
49 static const struct sco_param sco_param_wideband[] = {
50 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
52 };
53
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
55 {
56 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
57 }
58
59 static void hci_acl_create_connection(struct hci_conn *conn)
60 {
61 struct hci_dev *hdev = conn->hdev;
62 struct inquiry_entry *ie;
63 struct hci_cp_create_conn cp;
64
65 BT_DBG("hcon %p", conn);
66
67 conn->state = BT_CONNECT;
68 conn->out = true;
69 conn->role = HCI_ROLE_MASTER;
70
71 conn->attempt++;
72
73 conn->link_policy = hdev->link_policy;
74
75 memset(&cp, 0, sizeof(cp));
76 bacpy(&cp.bdaddr, &conn->dst);
77 cp.pscan_rep_mode = 0x02;
78
79 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
80 if (ie) {
81 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
82 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
83 cp.pscan_mode = ie->data.pscan_mode;
84 cp.clock_offset = ie->data.clock_offset |
85 cpu_to_le16(0x8000);
86 }
87
88 memcpy(conn->dev_class, ie->data.dev_class, 3);
89 if (ie->data.ssp_mode > 0)
90 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
91 }
92
93 cp.pkt_type = cpu_to_le16(conn->pkt_type);
94 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
95 cp.role_switch = 0x01;
96 else
97 cp.role_switch = 0x00;
98
99 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
100 }
101
102 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
103 {
104 struct hci_cp_create_conn_cancel cp;
105
106 BT_DBG("hcon %p", conn);
107
108 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
109 return;
110
111 bacpy(&cp.bdaddr, &conn->dst);
112 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
113 }
114
115 static void hci_reject_sco(struct hci_conn *conn)
116 {
117 struct hci_cp_reject_sync_conn_req cp;
118
119 cp.reason = HCI_ERROR_REMOTE_USER_TERM;
120 bacpy(&cp.bdaddr, &conn->dst);
121
122 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
123 }
124
125 void hci_disconnect(struct hci_conn *conn, __u8 reason)
126 {
127 struct hci_cp_disconnect cp;
128
129 BT_DBG("hcon %p", conn);
130
131 conn->state = BT_DISCONN;
132
133 cp.handle = cpu_to_le16(conn->handle);
134 cp.reason = reason;
135 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
136 }
137
138 static void hci_amp_disconn(struct hci_conn *conn)
139 {
140 struct hci_cp_disconn_phy_link cp;
141
142 BT_DBG("hcon %p", conn);
143
144 conn->state = BT_DISCONN;
145
146 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
147 cp.reason = hci_proto_disconn_ind(conn);
148 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
149 sizeof(cp), &cp);
150 }
151
152 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
153 {
154 struct hci_dev *hdev = conn->hdev;
155 struct hci_cp_add_sco cp;
156
157 BT_DBG("hcon %p", conn);
158
159 conn->state = BT_CONNECT;
160 conn->out = true;
161
162 conn->attempt++;
163
164 cp.handle = cpu_to_le16(handle);
165 cp.pkt_type = cpu_to_le16(conn->pkt_type);
166
167 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
168 }
169
170 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
171 {
172 struct hci_dev *hdev = conn->hdev;
173 struct hci_cp_setup_sync_conn cp;
174 const struct sco_param *param;
175
176 BT_DBG("hcon %p", conn);
177
178 conn->state = BT_CONNECT;
179 conn->out = true;
180
181 conn->attempt++;
182
183 cp.handle = cpu_to_le16(handle);
184
185 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
186 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
187 cp.voice_setting = cpu_to_le16(conn->setting);
188
189 switch (conn->setting & SCO_AIRMODE_MASK) {
190 case SCO_AIRMODE_TRANSP:
191 if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
192 return false;
193 cp.retrans_effort = 0x02;
194 param = &sco_param_wideband[conn->attempt - 1];
195 break;
196 case SCO_AIRMODE_CVSD:
197 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
198 return false;
199 cp.retrans_effort = 0x01;
200 param = &sco_param_cvsd[conn->attempt - 1];
201 break;
202 default:
203 return false;
204 }
205
206 cp.pkt_type = __cpu_to_le16(param->pkt_type);
207 cp.max_latency = __cpu_to_le16(param->max_latency);
208
209 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
210 return false;
211
212 return true;
213 }
214
215 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
216 u16 to_multiplier)
217 {
218 struct hci_dev *hdev = conn->hdev;
219 struct hci_conn_params *params;
220 struct hci_cp_le_conn_update cp;
221
222 hci_dev_lock(hdev);
223
224 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
225 if (params) {
226 params->conn_min_interval = min;
227 params->conn_max_interval = max;
228 params->conn_latency = latency;
229 params->supervision_timeout = to_multiplier;
230 }
231
232 hci_dev_unlock(hdev);
233
234 memset(&cp, 0, sizeof(cp));
235 cp.handle = cpu_to_le16(conn->handle);
236 cp.conn_interval_min = cpu_to_le16(min);
237 cp.conn_interval_max = cpu_to_le16(max);
238 cp.conn_latency = cpu_to_le16(latency);
239 cp.supervision_timeout = cpu_to_le16(to_multiplier);
240 cp.min_ce_len = cpu_to_le16(0x0000);
241 cp.max_ce_len = cpu_to_le16(0x0000);
242
243 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
244
245 if (params)
246 return 0x01;
247
248 return 0x00;
249 }
250
251 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
252 __u8 ltk[16])
253 {
254 struct hci_dev *hdev = conn->hdev;
255 struct hci_cp_le_start_enc cp;
256
257 BT_DBG("hcon %p", conn);
258
259 memset(&cp, 0, sizeof(cp));
260
261 cp.handle = cpu_to_le16(conn->handle);
262 cp.rand = rand;
263 cp.ediv = ediv;
264 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
265
266 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
267 }
268
269 /* Device _must_ be locked */
270 void hci_sco_setup(struct hci_conn *conn, __u8 status)
271 {
272 struct hci_conn *sco = conn->link;
273
274 if (!sco)
275 return;
276
277 BT_DBG("hcon %p", conn);
278
279 if (!status) {
280 if (lmp_esco_capable(conn->hdev))
281 hci_setup_sync(sco, conn->handle);
282 else
283 hci_add_sco(sco, conn->handle);
284 } else {
285 hci_proto_connect_cfm(sco, status);
286 hci_conn_del(sco);
287 }
288 }
289
290 static void hci_conn_timeout(struct work_struct *work)
291 {
292 struct hci_conn *conn = container_of(work, struct hci_conn,
293 disc_work.work);
294 int refcnt = atomic_read(&conn->refcnt);
295
296 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
297
298 WARN_ON(refcnt < 0);
299
300 /* FIXME: It was observed that in pairing failed scenario, refcnt
301 * drops below 0. Probably this is because l2cap_conn_del calls
302 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
303 * dropped. After that loop hci_chan_del is called which also drops
304 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
305 * otherwise drop it.
306 */
307 if (refcnt > 0)
308 return;
309
310 switch (conn->state) {
311 case BT_CONNECT:
312 case BT_CONNECT2:
313 if (conn->out) {
314 if (conn->type == ACL_LINK)
315 hci_acl_create_connection_cancel(conn);
316 else if (conn->type == LE_LINK)
317 hci_le_create_connection_cancel(conn);
318 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
319 hci_reject_sco(conn);
320 }
321 break;
322 case BT_CONFIG:
323 case BT_CONNECTED:
324 if (conn->type == AMP_LINK) {
325 hci_amp_disconn(conn);
326 } else {
327 __u8 reason = hci_proto_disconn_ind(conn);
328
329 /* When we are master of an established connection
330 * and it enters the disconnect timeout, then go
331 * ahead and try to read the current clock offset.
332 *
333 * Processing of the result is done within the
334 * event handling and hci_clock_offset_evt function.
335 */
336 if (conn->type == ACL_LINK &&
337 conn->role == HCI_ROLE_MASTER) {
338 struct hci_dev *hdev = conn->hdev;
339 struct hci_cp_read_clock_offset cp;
340
341 cp.handle = cpu_to_le16(conn->handle);
342
343 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
344 sizeof(cp), &cp);
345 }
346
347 hci_disconnect(conn, reason);
348 }
349 break;
350 default:
351 conn->state = BT_CLOSED;
352 break;
353 }
354 }
355
356 /* Enter sniff mode */
357 static void hci_conn_idle(struct work_struct *work)
358 {
359 struct hci_conn *conn = container_of(work, struct hci_conn,
360 idle_work.work);
361 struct hci_dev *hdev = conn->hdev;
362
363 BT_DBG("hcon %p mode %d", conn, conn->mode);
364
365 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
366 return;
367
368 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
369 return;
370
371 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
372 struct hci_cp_sniff_subrate cp;
373 cp.handle = cpu_to_le16(conn->handle);
374 cp.max_latency = cpu_to_le16(0);
375 cp.min_remote_timeout = cpu_to_le16(0);
376 cp.min_local_timeout = cpu_to_le16(0);
377 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
378 }
379
380 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
381 struct hci_cp_sniff_mode cp;
382 cp.handle = cpu_to_le16(conn->handle);
383 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
384 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
385 cp.attempt = cpu_to_le16(4);
386 cp.timeout = cpu_to_le16(1);
387 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
388 }
389 }
390
391 static void hci_conn_auto_accept(struct work_struct *work)
392 {
393 struct hci_conn *conn = container_of(work, struct hci_conn,
394 auto_accept_work.work);
395
396 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
397 &conn->dst);
398 }
399
400 static void le_conn_timeout(struct work_struct *work)
401 {
402 struct hci_conn *conn = container_of(work, struct hci_conn,
403 le_conn_timeout.work);
404 struct hci_dev *hdev = conn->hdev;
405
406 BT_DBG("");
407
408 /* We could end up here due to having done directed advertising,
409 * so clean up the state if necessary. This should however only
410 * happen with broken hardware or if low duty cycle was used
411 * (which doesn't have a timeout of its own).
412 */
413 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
414 u8 enable = 0x00;
415 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
416 &enable);
417 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
418 return;
419 }
420
421 hci_le_create_connection_cancel(conn);
422 }
423
424 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
425 {
426 struct hci_conn *conn;
427
428 BT_DBG("%s dst %pMR", hdev->name, dst);
429
430 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
431 if (!conn)
432 return NULL;
433
434 bacpy(&conn->dst, dst);
435 bacpy(&conn->src, &hdev->bdaddr);
436 conn->hdev = hdev;
437 conn->type = type;
438 conn->mode = HCI_CM_ACTIVE;
439 conn->state = BT_OPEN;
440 conn->auth_type = HCI_AT_GENERAL_BONDING;
441 conn->io_capability = hdev->io_capability;
442 conn->remote_auth = 0xff;
443 conn->key_type = 0xff;
444 conn->tx_power = HCI_TX_POWER_INVALID;
445 conn->max_tx_power = HCI_TX_POWER_INVALID;
446
447 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
448 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
449
450 switch (type) {
451 case ACL_LINK:
452 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
453 break;
454 case LE_LINK:
455 /* conn->src should reflect the local identity address */
456 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
457 break;
458 case SCO_LINK:
459 if (lmp_esco_capable(hdev))
460 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
461 (hdev->esco_type & EDR_ESCO_MASK);
462 else
463 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
464 break;
465 case ESCO_LINK:
466 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
467 break;
468 }
469
470 skb_queue_head_init(&conn->data_q);
471
472 INIT_LIST_HEAD(&conn->chan_list);
473
474 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
475 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
476 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
477 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
478
479 atomic_set(&conn->refcnt, 0);
480
481 hci_dev_hold(hdev);
482
483 hci_conn_hash_add(hdev, conn);
484 if (hdev->notify)
485 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
486
487 hci_conn_init_sysfs(conn);
488
489 return conn;
490 }
491
492 int hci_conn_del(struct hci_conn *conn)
493 {
494 struct hci_dev *hdev = conn->hdev;
495
496 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
497
498 cancel_delayed_work_sync(&conn->disc_work);
499 cancel_delayed_work_sync(&conn->auto_accept_work);
500 cancel_delayed_work_sync(&conn->idle_work);
501
502 if (conn->type == ACL_LINK) {
503 struct hci_conn *sco = conn->link;
504 if (sco)
505 sco->link = NULL;
506
507 /* Unacked frames */
508 hdev->acl_cnt += conn->sent;
509 } else if (conn->type == LE_LINK) {
510 cancel_delayed_work_sync(&conn->le_conn_timeout);
511
512 if (hdev->le_pkts)
513 hdev->le_cnt += conn->sent;
514 else
515 hdev->acl_cnt += conn->sent;
516 } else {
517 struct hci_conn *acl = conn->link;
518 if (acl) {
519 acl->link = NULL;
520 hci_conn_drop(acl);
521 }
522 }
523
524 hci_chan_list_flush(conn);
525
526 if (conn->amp_mgr)
527 amp_mgr_put(conn->amp_mgr);
528
529 hci_conn_hash_del(hdev, conn);
530 if (hdev->notify)
531 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
532
533 skb_queue_purge(&conn->data_q);
534
535 hci_conn_del_sysfs(conn);
536
537 hci_dev_put(hdev);
538
539 hci_conn_put(conn);
540
541 return 0;
542 }
543
544 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
545 {
546 int use_src = bacmp(src, BDADDR_ANY);
547 struct hci_dev *hdev = NULL, *d;
548
549 BT_DBG("%pMR -> %pMR", src, dst);
550
551 read_lock(&hci_dev_list_lock);
552
553 list_for_each_entry(d, &hci_dev_list, list) {
554 if (!test_bit(HCI_UP, &d->flags) ||
555 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
556 d->dev_type != HCI_BREDR)
557 continue;
558
559 /* Simple routing:
560 * No source address - find interface with bdaddr != dst
561 * Source address - find interface with bdaddr == src
562 */
563
564 if (use_src) {
565 if (!bacmp(&d->bdaddr, src)) {
566 hdev = d; break;
567 }
568 } else {
569 if (bacmp(&d->bdaddr, dst)) {
570 hdev = d; break;
571 }
572 }
573 }
574
575 if (hdev)
576 hdev = hci_dev_hold(hdev);
577
578 read_unlock(&hci_dev_list_lock);
579 return hdev;
580 }
581 EXPORT_SYMBOL(hci_get_route);
582
583 /* This function requires the caller holds hdev->lock */
584 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
585 {
586 struct hci_dev *hdev = conn->hdev;
587
588 conn->state = BT_CLOSED;
589
590 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
591 status);
592
593 hci_proto_connect_cfm(conn, status);
594
595 hci_conn_del(conn);
596
597 /* Since we may have temporarily stopped the background scanning in
598 * favor of connection establishment, we should restart it.
599 */
600 hci_update_background_scan(hdev);
601
602 /* Re-enable advertising in case this was a failed connection
603 * attempt as a peripheral.
604 */
605 mgmt_reenable_advertising(hdev);
606 }
607
608 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
609 {
610 struct hci_conn *conn;
611
612 if (status == 0)
613 return;
614
615 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
616 status);
617
618 hci_dev_lock(hdev);
619
620 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
621 if (!conn)
622 goto done;
623
624 hci_le_conn_failed(conn, status);
625
626 done:
627 hci_dev_unlock(hdev);
628 }
629
630 static void hci_req_add_le_create_conn(struct hci_request *req,
631 struct hci_conn *conn)
632 {
633 struct hci_cp_le_create_conn cp;
634 struct hci_dev *hdev = conn->hdev;
635 u8 own_addr_type;
636
637 memset(&cp, 0, sizeof(cp));
638
639 /* Update random address, but set require_privacy to false so
640 * that we never connect with an unresolvable address.
641 */
642 if (hci_update_random_address(req, false, &own_addr_type))
643 return;
644
645 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
646 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
647 bacpy(&cp.peer_addr, &conn->dst);
648 cp.peer_addr_type = conn->dst_type;
649 cp.own_address_type = own_addr_type;
650 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
651 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
652 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
653 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
654 cp.min_ce_len = cpu_to_le16(0x0000);
655 cp.max_ce_len = cpu_to_le16(0x0000);
656
657 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
658
659 conn->state = BT_CONNECT;
660 }
661
662 static void hci_req_directed_advertising(struct hci_request *req,
663 struct hci_conn *conn)
664 {
665 struct hci_dev *hdev = req->hdev;
666 struct hci_cp_le_set_adv_param cp;
667 u8 own_addr_type;
668 u8 enable;
669
670 /* Clear the HCI_LE_ADV bit temporarily so that the
671 * hci_update_random_address knows that it's safe to go ahead
672 * and write a new random address. The flag will be set back on
673 * as soon as the SET_ADV_ENABLE HCI command completes.
674 */
675 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
676
677 /* Set require_privacy to false so that the remote device has a
678 * chance of identifying us.
679 */
680 if (hci_update_random_address(req, false, &own_addr_type) < 0)
681 return;
682
683 memset(&cp, 0, sizeof(cp));
684 cp.type = LE_ADV_DIRECT_IND;
685 cp.own_address_type = own_addr_type;
686 cp.direct_addr_type = conn->dst_type;
687 bacpy(&cp.direct_addr, &conn->dst);
688 cp.channel_map = hdev->le_adv_channel_map;
689
690 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
691
692 enable = 0x01;
693 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
694
695 conn->state = BT_CONNECT;
696 }
697
698 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
699 u8 dst_type, u8 sec_level, u16 conn_timeout,
700 u8 role)
701 {
702 struct hci_conn_params *params;
703 struct hci_conn *conn;
704 struct smp_irk *irk;
705 struct hci_request req;
706 int err;
707
708 /* Some devices send ATT messages as soon as the physical link is
709 * established. To be able to handle these ATT messages, the user-
710 * space first establishes the connection and then starts the pairing
711 * process.
712 *
713 * So if a hci_conn object already exists for the following connection
714 * attempt, we simply update pending_sec_level and auth_type fields
715 * and return the object found.
716 */
717 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
718 if (conn) {
719 conn->pending_sec_level = sec_level;
720 goto done;
721 }
722
723 /* Since the controller supports only one LE connection attempt at a
724 * time, we return -EBUSY if there is any connection attempt running.
725 */
726 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
727 if (conn)
728 return ERR_PTR(-EBUSY);
729
730 /* When given an identity address with existing identity
731 * resolving key, the connection needs to be established
732 * to a resolvable random address.
733 *
734 * This uses the cached random resolvable address from
735 * a previous scan. When no cached address is available,
736 * try connecting to the identity address instead.
737 *
738 * Storing the resolvable random address is required here
739 * to handle connection failures. The address will later
740 * be resolved back into the original identity address
741 * from the connect request.
742 */
743 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
744 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
745 dst = &irk->rpa;
746 dst_type = ADDR_LE_DEV_RANDOM;
747 }
748
749 conn = hci_conn_add(hdev, LE_LINK, dst);
750 if (!conn)
751 return ERR_PTR(-ENOMEM);
752
753 conn->dst_type = dst_type;
754 conn->sec_level = BT_SECURITY_LOW;
755 conn->pending_sec_level = sec_level;
756 conn->conn_timeout = conn_timeout;
757
758 hci_req_init(&req, hdev);
759
760 /* Disable advertising if we're active. For master role
761 * connections most controllers will refuse to connect if
762 * advertising is enabled, and for slave role connections we
763 * anyway have to disable it in order to start directed
764 * advertising.
765 */
766 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
767 u8 enable = 0x00;
768 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
769 &enable);
770 }
771
772 conn->role = role;
773
774 /* If requested to connect as slave use directed advertising */
775 if (conn->role == HCI_ROLE_SLAVE) {
776 /* If we're active scanning most controllers are unable
777 * to initiate advertising. Simply reject the attempt.
778 */
779 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
780 hdev->le_scan_type == LE_SCAN_ACTIVE) {
781 skb_queue_purge(&req.cmd_q);
782 hci_conn_del(conn);
783 return ERR_PTR(-EBUSY);
784 }
785
786 hci_req_directed_advertising(&req, conn);
787 goto create_conn;
788 }
789
790 conn->out = true;
791
792 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
793 if (params) {
794 conn->le_conn_min_interval = params->conn_min_interval;
795 conn->le_conn_max_interval = params->conn_max_interval;
796 conn->le_conn_latency = params->conn_latency;
797 conn->le_supv_timeout = params->supervision_timeout;
798 } else {
799 conn->le_conn_min_interval = hdev->le_conn_min_interval;
800 conn->le_conn_max_interval = hdev->le_conn_max_interval;
801 conn->le_conn_latency = hdev->le_conn_latency;
802 conn->le_supv_timeout = hdev->le_supv_timeout;
803 }
804
805 /* If controller is scanning, we stop it since some controllers are
806 * not able to scan and connect at the same time. Also set the
807 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
808 * handler for scan disabling knows to set the correct discovery
809 * state.
810 */
811 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
812 hci_req_add_le_scan_disable(&req);
813 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
814 }
815
816 hci_req_add_le_create_conn(&req, conn);
817
818 create_conn:
819 err = hci_req_run(&req, create_le_conn_complete);
820 if (err) {
821 hci_conn_del(conn);
822 return ERR_PTR(err);
823 }
824
825 done:
826 hci_conn_hold(conn);
827 return conn;
828 }
829
830 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
831 u8 sec_level, u8 auth_type)
832 {
833 struct hci_conn *acl;
834
835 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
836 return ERR_PTR(-ENOTSUPP);
837
838 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
839 if (!acl) {
840 acl = hci_conn_add(hdev, ACL_LINK, dst);
841 if (!acl)
842 return ERR_PTR(-ENOMEM);
843 }
844
845 hci_conn_hold(acl);
846
847 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
848 acl->sec_level = BT_SECURITY_LOW;
849 acl->pending_sec_level = sec_level;
850 acl->auth_type = auth_type;
851 hci_acl_create_connection(acl);
852 }
853
854 return acl;
855 }
856
857 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
858 __u16 setting)
859 {
860 struct hci_conn *acl;
861 struct hci_conn *sco;
862
863 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
864 if (IS_ERR(acl))
865 return acl;
866
867 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
868 if (!sco) {
869 sco = hci_conn_add(hdev, type, dst);
870 if (!sco) {
871 hci_conn_drop(acl);
872 return ERR_PTR(-ENOMEM);
873 }
874 }
875
876 acl->link = sco;
877 sco->link = acl;
878
879 hci_conn_hold(sco);
880
881 sco->setting = setting;
882
883 if (acl->state == BT_CONNECTED &&
884 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
885 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
886 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
887
888 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
889 /* defer SCO setup until mode change completed */
890 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
891 return sco;
892 }
893
894 hci_sco_setup(acl, 0x00);
895 }
896
897 return sco;
898 }
899
900 /* Check link security requirement */
901 int hci_conn_check_link_mode(struct hci_conn *conn)
902 {
903 BT_DBG("hcon %p", conn);
904
905 /* In Secure Connections Only mode, it is required that Secure
906 * Connections is used and the link is encrypted with AES-CCM
907 * using a P-256 authenticated combination key.
908 */
909 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
910 if (!hci_conn_sc_enabled(conn) ||
911 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
912 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
913 return 0;
914 }
915
916 if (hci_conn_ssp_enabled(conn) &&
917 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
918 return 0;
919
920 return 1;
921 }
922
923 /* Authenticate remote device */
924 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
925 {
926 BT_DBG("hcon %p", conn);
927
928 if (conn->pending_sec_level > sec_level)
929 sec_level = conn->pending_sec_level;
930
931 if (sec_level > conn->sec_level)
932 conn->pending_sec_level = sec_level;
933 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
934 return 1;
935
936 /* Make sure we preserve an existing MITM requirement*/
937 auth_type |= (conn->auth_type & 0x01);
938
939 conn->auth_type = auth_type;
940
941 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
942 struct hci_cp_auth_requested cp;
943
944 cp.handle = cpu_to_le16(conn->handle);
945 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
946 sizeof(cp), &cp);
947
948 /* If we're already encrypted set the REAUTH_PEND flag,
949 * otherwise set the ENCRYPT_PEND.
950 */
951 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
952 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
953 else
954 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
955 }
956
957 return 0;
958 }
959
960 /* Encrypt the the link */
961 static void hci_conn_encrypt(struct hci_conn *conn)
962 {
963 BT_DBG("hcon %p", conn);
964
965 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
966 struct hci_cp_set_conn_encrypt cp;
967 cp.handle = cpu_to_le16(conn->handle);
968 cp.encrypt = 0x01;
969 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
970 &cp);
971 }
972 }
973
974 /* Enable security */
975 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
976 {
977 BT_DBG("hcon %p", conn);
978
979 if (conn->type == LE_LINK)
980 return smp_conn_security(conn, sec_level);
981
982 /* For sdp we don't need the link key. */
983 if (sec_level == BT_SECURITY_SDP)
984 return 1;
985
986 /* For non 2.1 devices and low security level we don't need the link
987 key. */
988 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
989 return 1;
990
991 /* For other security levels we need the link key. */
992 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
993 goto auth;
994
995 /* An authenticated FIPS approved combination key has sufficient
996 * security for security level 4. */
997 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
998 sec_level == BT_SECURITY_FIPS)
999 goto encrypt;
1000
1001 /* An authenticated combination key has sufficient security for
1002 security level 3. */
1003 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1004 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1005 sec_level == BT_SECURITY_HIGH)
1006 goto encrypt;
1007
1008 /* An unauthenticated combination key has sufficient security for
1009 security level 1 and 2. */
1010 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1011 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1012 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1013 goto encrypt;
1014
1015 /* A combination key has always sufficient security for the security
1016 levels 1 or 2. High security level requires the combination key
1017 is generated using maximum PIN code length (16).
1018 For pre 2.1 units. */
1019 if (conn->key_type == HCI_LK_COMBINATION &&
1020 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1021 conn->pin_length == 16))
1022 goto encrypt;
1023
1024 auth:
1025 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1026 return 0;
1027
1028 if (!hci_conn_auth(conn, sec_level, auth_type))
1029 return 0;
1030
1031 encrypt:
1032 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1033 return 1;
1034
1035 hci_conn_encrypt(conn);
1036 return 0;
1037 }
1038 EXPORT_SYMBOL(hci_conn_security);
1039
1040 /* Check secure link requirement */
1041 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1042 {
1043 BT_DBG("hcon %p", conn);
1044
1045 /* Accept if non-secure or higher security level is required */
1046 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1047 return 1;
1048
1049 /* Accept if secure or higher security level is already present */
1050 if (conn->sec_level == BT_SECURITY_HIGH ||
1051 conn->sec_level == BT_SECURITY_FIPS)
1052 return 1;
1053
1054 /* Reject not secure link */
1055 return 0;
1056 }
1057 EXPORT_SYMBOL(hci_conn_check_secure);
1058
1059 /* Change link key */
1060 int hci_conn_change_link_key(struct hci_conn *conn)
1061 {
1062 BT_DBG("hcon %p", conn);
1063
1064 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1065 struct hci_cp_change_conn_link_key cp;
1066 cp.handle = cpu_to_le16(conn->handle);
1067 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1068 sizeof(cp), &cp);
1069 }
1070
1071 return 0;
1072 }
1073
1074 /* Switch role */
1075 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1076 {
1077 BT_DBG("hcon %p", conn);
1078
1079 if (role == conn->role)
1080 return 1;
1081
1082 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1083 struct hci_cp_switch_role cp;
1084 bacpy(&cp.bdaddr, &conn->dst);
1085 cp.role = role;
1086 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1087 }
1088
1089 return 0;
1090 }
1091 EXPORT_SYMBOL(hci_conn_switch_role);
1092
1093 /* Enter active mode */
1094 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1095 {
1096 struct hci_dev *hdev = conn->hdev;
1097
1098 BT_DBG("hcon %p mode %d", conn, conn->mode);
1099
1100 if (conn->mode != HCI_CM_SNIFF)
1101 goto timer;
1102
1103 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1104 goto timer;
1105
1106 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1107 struct hci_cp_exit_sniff_mode cp;
1108 cp.handle = cpu_to_le16(conn->handle);
1109 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1110 }
1111
1112 timer:
1113 if (hdev->idle_timeout > 0)
1114 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1115 msecs_to_jiffies(hdev->idle_timeout));
1116 }
1117
1118 /* Drop all connection on the device */
1119 void hci_conn_hash_flush(struct hci_dev *hdev)
1120 {
1121 struct hci_conn_hash *h = &hdev->conn_hash;
1122 struct hci_conn *c, *n;
1123
1124 BT_DBG("hdev %s", hdev->name);
1125
1126 list_for_each_entry_safe(c, n, &h->list, list) {
1127 c->state = BT_CLOSED;
1128
1129 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1130 hci_conn_del(c);
1131 }
1132 }
1133
1134 /* Check pending connect attempts */
1135 void hci_conn_check_pending(struct hci_dev *hdev)
1136 {
1137 struct hci_conn *conn;
1138
1139 BT_DBG("hdev %s", hdev->name);
1140
1141 hci_dev_lock(hdev);
1142
1143 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1144 if (conn)
1145 hci_acl_create_connection(conn);
1146
1147 hci_dev_unlock(hdev);
1148 }
1149
1150 static u32 get_link_mode(struct hci_conn *conn)
1151 {
1152 u32 link_mode = 0;
1153
1154 if (conn->role == HCI_ROLE_MASTER)
1155 link_mode |= HCI_LM_MASTER;
1156
1157 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1158 link_mode |= HCI_LM_ENCRYPT;
1159
1160 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1161 link_mode |= HCI_LM_AUTH;
1162
1163 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1164 link_mode |= HCI_LM_SECURE;
1165
1166 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1167 link_mode |= HCI_LM_FIPS;
1168
1169 return link_mode;
1170 }
1171
1172 int hci_get_conn_list(void __user *arg)
1173 {
1174 struct hci_conn *c;
1175 struct hci_conn_list_req req, *cl;
1176 struct hci_conn_info *ci;
1177 struct hci_dev *hdev;
1178 int n = 0, size, err;
1179
1180 if (copy_from_user(&req, arg, sizeof(req)))
1181 return -EFAULT;
1182
1183 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1184 return -EINVAL;
1185
1186 size = sizeof(req) + req.conn_num * sizeof(*ci);
1187
1188 cl = kmalloc(size, GFP_KERNEL);
1189 if (!cl)
1190 return -ENOMEM;
1191
1192 hdev = hci_dev_get(req.dev_id);
1193 if (!hdev) {
1194 kfree(cl);
1195 return -ENODEV;
1196 }
1197
1198 ci = cl->conn_info;
1199
1200 hci_dev_lock(hdev);
1201 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1202 bacpy(&(ci + n)->bdaddr, &c->dst);
1203 (ci + n)->handle = c->handle;
1204 (ci + n)->type = c->type;
1205 (ci + n)->out = c->out;
1206 (ci + n)->state = c->state;
1207 (ci + n)->link_mode = get_link_mode(c);
1208 if (++n >= req.conn_num)
1209 break;
1210 }
1211 hci_dev_unlock(hdev);
1212
1213 cl->dev_id = hdev->id;
1214 cl->conn_num = n;
1215 size = sizeof(req) + n * sizeof(*ci);
1216
1217 hci_dev_put(hdev);
1218
1219 err = copy_to_user(arg, cl, size);
1220 kfree(cl);
1221
1222 return err ? -EFAULT : 0;
1223 }
1224
1225 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1226 {
1227 struct hci_conn_info_req req;
1228 struct hci_conn_info ci;
1229 struct hci_conn *conn;
1230 char __user *ptr = arg + sizeof(req);
1231
1232 if (copy_from_user(&req, arg, sizeof(req)))
1233 return -EFAULT;
1234
1235 hci_dev_lock(hdev);
1236 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1237 if (conn) {
1238 bacpy(&ci.bdaddr, &conn->dst);
1239 ci.handle = conn->handle;
1240 ci.type = conn->type;
1241 ci.out = conn->out;
1242 ci.state = conn->state;
1243 ci.link_mode = get_link_mode(conn);
1244 }
1245 hci_dev_unlock(hdev);
1246
1247 if (!conn)
1248 return -ENOENT;
1249
1250 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1251 }
1252
1253 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1254 {
1255 struct hci_auth_info_req req;
1256 struct hci_conn *conn;
1257
1258 if (copy_from_user(&req, arg, sizeof(req)))
1259 return -EFAULT;
1260
1261 hci_dev_lock(hdev);
1262 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1263 if (conn)
1264 req.type = conn->auth_type;
1265 hci_dev_unlock(hdev);
1266
1267 if (!conn)
1268 return -ENOENT;
1269
1270 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1271 }
1272
1273 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1274 {
1275 struct hci_dev *hdev = conn->hdev;
1276 struct hci_chan *chan;
1277
1278 BT_DBG("%s hcon %p", hdev->name, conn);
1279
1280 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1281 if (!chan)
1282 return NULL;
1283
1284 chan->conn = conn;
1285 skb_queue_head_init(&chan->data_q);
1286 chan->state = BT_CONNECTED;
1287
1288 list_add_rcu(&chan->list, &conn->chan_list);
1289
1290 return chan;
1291 }
1292
1293 void hci_chan_del(struct hci_chan *chan)
1294 {
1295 struct hci_conn *conn = chan->conn;
1296 struct hci_dev *hdev = conn->hdev;
1297
1298 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1299
1300 list_del_rcu(&chan->list);
1301
1302 synchronize_rcu();
1303
1304 hci_conn_drop(conn);
1305
1306 skb_queue_purge(&chan->data_q);
1307 kfree(chan);
1308 }
1309
1310 void hci_chan_list_flush(struct hci_conn *conn)
1311 {
1312 struct hci_chan *chan, *n;
1313
1314 BT_DBG("hcon %p", conn);
1315
1316 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1317 hci_chan_del(chan);
1318 }
1319
1320 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1321 __u16 handle)
1322 {
1323 struct hci_chan *hchan;
1324
1325 list_for_each_entry(hchan, &hcon->chan_list, list) {
1326 if (hchan->handle == handle)
1327 return hchan;
1328 }
1329
1330 return NULL;
1331 }
1332
1333 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1334 {
1335 struct hci_conn_hash *h = &hdev->conn_hash;
1336 struct hci_conn *hcon;
1337 struct hci_chan *hchan = NULL;
1338
1339 rcu_read_lock();
1340
1341 list_for_each_entry_rcu(hcon, &h->list, list) {
1342 hchan = __hci_chan_lookup_handle(hcon, handle);
1343 if (hchan)
1344 break;
1345 }
1346
1347 rcu_read_unlock();
1348
1349 return hchan;
1350 }
This page took 0.059551 seconds and 6 git commands to generate.