Merge branch 'topic/next' into for-next
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32
33 /* Handle HCI Event packets */
34
35 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
36 {
37 __u8 status = *((__u8 *) skb->data);
38
39 BT_DBG("%s status 0x%2.2x", hdev->name, status);
40
41 if (status) {
42 hci_dev_lock(hdev);
43 mgmt_stop_discovery_failed(hdev, status);
44 hci_dev_unlock(hdev);
45 return;
46 }
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49
50 hci_dev_lock(hdev);
51 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
52 hci_dev_unlock(hdev);
53
54 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 conn->link_mode &= ~HCI_LM_MASTER;
107 else
108 conn->link_mode |= HCI_LM_MASTER;
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
159 {
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 /* Reset all non-persistent flags */
199 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
200 BIT(HCI_PERIODIC_INQ));
201
202 hdev->discovery.state = DISCOVERY_STOPPED;
203 }
204
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224
225 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263
264 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
265 }
266
267 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
268 {
269 __u8 status = *((__u8 *) skb->data);
270 void *sent;
271
272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
273
274 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
275 if (!sent)
276 return;
277
278 if (!status) {
279 __u8 param = *((__u8 *) sent);
280
281 if (param)
282 set_bit(HCI_ENCRYPT, &hdev->flags);
283 else
284 clear_bit(HCI_ENCRYPT, &hdev->flags);
285 }
286
287 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
288 }
289
290 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 __u8 param, status = *((__u8 *) skb->data);
293 int old_pscan, old_iscan;
294 void *sent;
295
296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297
298 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
299 if (!sent)
300 return;
301
302 param = *((__u8 *) sent);
303
304 hci_dev_lock(hdev);
305
306 if (status != 0) {
307 mgmt_write_scan_failed(hdev, param, status);
308 hdev->discov_timeout = 0;
309 goto done;
310 }
311
312 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
313 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
314
315 if (param & SCAN_INQUIRY) {
316 set_bit(HCI_ISCAN, &hdev->flags);
317 if (!old_iscan)
318 mgmt_discoverable(hdev, 1);
319 if (hdev->discov_timeout > 0) {
320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
322 to);
323 }
324 } else if (old_iscan)
325 mgmt_discoverable(hdev, 0);
326
327 if (param & SCAN_PAGE) {
328 set_bit(HCI_PSCAN, &hdev->flags);
329 if (!old_pscan)
330 mgmt_connectable(hdev, 1);
331 } else if (old_pscan)
332 mgmt_connectable(hdev, 0);
333
334 done:
335 hci_dev_unlock(hdev);
336 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
337 }
338
339 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340 {
341 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342
343 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
344
345 if (rp->status)
346 return;
347
348 memcpy(hdev->dev_class, rp->dev_class, 3);
349
350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
352 }
353
354 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355 {
356 __u8 status = *((__u8 *) skb->data);
357 void *sent;
358
359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
360
361 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
362 if (!sent)
363 return;
364
365 hci_dev_lock(hdev);
366
367 if (status == 0)
368 memcpy(hdev->dev_class, sent, 3);
369
370 if (test_bit(HCI_MGMT, &hdev->dev_flags))
371 mgmt_set_class_of_dev_complete(hdev, sent, status);
372
373 hci_dev_unlock(hdev);
374 }
375
376 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
379 __u16 setting;
380
381 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382
383 if (rp->status)
384 return;
385
386 setting = __le16_to_cpu(rp->voice_setting);
387
388 if (hdev->voice_setting == setting)
389 return;
390
391 hdev->voice_setting = setting;
392
393 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
394
395 if (hdev->notify)
396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
397 }
398
399 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
401 {
402 __u8 status = *((__u8 *) skb->data);
403 __u16 setting;
404 void *sent;
405
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
407
408 if (status)
409 return;
410
411 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
412 if (!sent)
413 return;
414
415 setting = get_unaligned_le16(sent);
416
417 if (hdev->voice_setting == setting)
418 return;
419
420 hdev->voice_setting = setting;
421
422 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
423
424 if (hdev->notify)
425 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
426 }
427
428 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
429 {
430 __u8 status = *((__u8 *) skb->data);
431
432 BT_DBG("%s status 0x%2.2x", hdev->name, status);
433
434 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
435 }
436
437 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
438 {
439 __u8 status = *((__u8 *) skb->data);
440 void *sent;
441
442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent)
446 return;
447
448 if (test_bit(HCI_MGMT, &hdev->dev_flags))
449 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
450 else if (!status) {
451 if (*((u8 *) sent))
452 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453 else
454 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455 }
456 }
457
458 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
459 {
460 if (hdev->features[6] & LMP_EXT_INQ)
461 return 2;
462
463 if (hdev->features[3] & LMP_RSSI_INQ)
464 return 1;
465
466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
467 hdev->lmp_subver == 0x0757)
468 return 1;
469
470 if (hdev->manufacturer == 15) {
471 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
472 return 1;
473 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
474 return 1;
475 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
476 return 1;
477 }
478
479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
480 hdev->lmp_subver == 0x1805)
481 return 1;
482
483 return 0;
484 }
485
486 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
487 {
488 u8 mode;
489
490 mode = hci_get_inquiry_mode(hdev);
491
492 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
493 }
494
495 static void hci_setup_event_mask(struct hci_dev *hdev)
496 {
497 /* The second byte is 0xff instead of 0x9f (two reserved bits
498 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
499 * command otherwise */
500 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
501
502 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
503 * any event mask for pre 1.2 devices */
504 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
505 return;
506
507 events[4] |= 0x01; /* Flow Specification Complete */
508 events[4] |= 0x02; /* Inquiry Result with RSSI */
509 events[4] |= 0x04; /* Read Remote Extended Features Complete */
510 events[5] |= 0x08; /* Synchronous Connection Complete */
511 events[5] |= 0x10; /* Synchronous Connection Changed */
512
513 if (hdev->features[3] & LMP_RSSI_INQ)
514 events[4] |= 0x02; /* Inquiry Result with RSSI */
515
516 if (hdev->features[5] & LMP_SNIFF_SUBR)
517 events[5] |= 0x20; /* Sniff Subrating */
518
519 if (hdev->features[5] & LMP_PAUSE_ENC)
520 events[5] |= 0x80; /* Encryption Key Refresh Complete */
521
522 if (hdev->features[6] & LMP_EXT_INQ)
523 events[5] |= 0x40; /* Extended Inquiry Result */
524
525 if (hdev->features[6] & LMP_NO_FLUSH)
526 events[7] |= 0x01; /* Enhanced Flush Complete */
527
528 if (hdev->features[7] & LMP_LSTO)
529 events[6] |= 0x80; /* Link Supervision Timeout Changed */
530
531 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
532 events[6] |= 0x01; /* IO Capability Request */
533 events[6] |= 0x02; /* IO Capability Response */
534 events[6] |= 0x04; /* User Confirmation Request */
535 events[6] |= 0x08; /* User Passkey Request */
536 events[6] |= 0x10; /* Remote OOB Data Request */
537 events[6] |= 0x20; /* Simple Pairing Complete */
538 events[7] |= 0x04; /* User Passkey Notification */
539 events[7] |= 0x08; /* Keypress Notification */
540 events[7] |= 0x10; /* Remote Host Supported
541 * Features Notification */
542 }
543
544 if (hdev->features[4] & LMP_LE)
545 events[7] |= 0x20; /* LE Meta-Event */
546
547 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
548 }
549
550 static void hci_setup(struct hci_dev *hdev)
551 {
552 if (hdev->dev_type != HCI_BREDR)
553 return;
554
555 hci_setup_event_mask(hdev);
556
557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
559
560 if (lmp_ssp_capable(hdev)) {
561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
562 u8 mode = 0x01;
563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
564 sizeof(mode), &mode);
565 } else {
566 struct hci_cp_write_eir cp;
567
568 memset(hdev->eir, 0, sizeof(hdev->eir));
569 memset(&cp, 0, sizeof(cp));
570
571 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
572 }
573 }
574
575 if (hdev->features[3] & LMP_RSSI_INQ)
576 hci_setup_inquiry_mode(hdev);
577
578 if (hdev->features[7] & LMP_INQ_TX_PWR)
579 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
580
581 if (hdev->features[7] & LMP_EXTFEATURES) {
582 struct hci_cp_read_local_ext_features cp;
583
584 cp.page = 0x01;
585 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
586 &cp);
587 }
588
589 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
590 u8 enable = 1;
591 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
592 &enable);
593 }
594 }
595
596 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
597 {
598 struct hci_rp_read_local_version *rp = (void *) skb->data;
599
600 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601
602 if (rp->status)
603 goto done;
604
605 hdev->hci_ver = rp->hci_ver;
606 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
607 hdev->lmp_ver = rp->lmp_ver;
608 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
610
611 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
613
614 if (test_bit(HCI_INIT, &hdev->flags))
615 hci_setup(hdev);
616
617 done:
618 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
619 }
620
621 static void hci_setup_link_policy(struct hci_dev *hdev)
622 {
623 struct hci_cp_write_def_link_policy cp;
624 u16 link_policy = 0;
625
626 if (hdev->features[0] & LMP_RSWITCH)
627 link_policy |= HCI_LP_RSWITCH;
628 if (hdev->features[0] & LMP_HOLD)
629 link_policy |= HCI_LP_HOLD;
630 if (hdev->features[0] & LMP_SNIFF)
631 link_policy |= HCI_LP_SNIFF;
632 if (hdev->features[1] & LMP_PARK)
633 link_policy |= HCI_LP_PARK;
634
635 cp.policy = cpu_to_le16(link_policy);
636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
637 }
638
639 static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
641 {
642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
643
644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
645
646 if (rp->status)
647 goto done;
648
649 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
650
651 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
652 hci_setup_link_policy(hdev);
653
654 done:
655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
656 }
657
658 static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
660 {
661 struct hci_rp_read_local_features *rp = (void *) skb->data;
662
663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
664
665 if (rp->status)
666 return;
667
668 memcpy(hdev->features, rp->features, 8);
669
670 /* Adjust default settings according to features
671 * supported by device. */
672
673 if (hdev->features[0] & LMP_3SLOT)
674 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
675
676 if (hdev->features[0] & LMP_5SLOT)
677 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
678
679 if (hdev->features[1] & LMP_HV2) {
680 hdev->pkt_type |= (HCI_HV2);
681 hdev->esco_type |= (ESCO_HV2);
682 }
683
684 if (hdev->features[1] & LMP_HV3) {
685 hdev->pkt_type |= (HCI_HV3);
686 hdev->esco_type |= (ESCO_HV3);
687 }
688
689 if (hdev->features[3] & LMP_ESCO)
690 hdev->esco_type |= (ESCO_EV3);
691
692 if (hdev->features[4] & LMP_EV4)
693 hdev->esco_type |= (ESCO_EV4);
694
695 if (hdev->features[4] & LMP_EV5)
696 hdev->esco_type |= (ESCO_EV5);
697
698 if (hdev->features[5] & LMP_EDR_ESCO_2M)
699 hdev->esco_type |= (ESCO_2EV3);
700
701 if (hdev->features[5] & LMP_EDR_ESCO_3M)
702 hdev->esco_type |= (ESCO_3EV3);
703
704 if (hdev->features[5] & LMP_EDR_3S_ESCO)
705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
706
707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
708 hdev->features[0], hdev->features[1],
709 hdev->features[2], hdev->features[3],
710 hdev->features[4], hdev->features[5],
711 hdev->features[6], hdev->features[7]);
712 }
713
714 static void hci_set_le_support(struct hci_dev *hdev)
715 {
716 struct hci_cp_write_le_host_supported cp;
717
718 memset(&cp, 0, sizeof(cp));
719
720 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
721 cp.le = 1;
722 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
723 }
724
725 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
726 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
727 &cp);
728 }
729
730 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
731 struct sk_buff *skb)
732 {
733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
734
735 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
736
737 if (rp->status)
738 goto done;
739
740 switch (rp->page) {
741 case 0:
742 memcpy(hdev->features, rp->features, 8);
743 break;
744 case 1:
745 memcpy(hdev->host_features, rp->features, 8);
746 break;
747 }
748
749 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
750 hci_set_le_support(hdev);
751
752 done:
753 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
754 }
755
756 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
757 struct sk_buff *skb)
758 {
759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
760
761 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
762
763 if (rp->status)
764 return;
765
766 hdev->flow_ctl_mode = rp->mode;
767
768 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
769 }
770
771 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
772 {
773 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
774
775 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
776
777 if (rp->status)
778 return;
779
780 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
781 hdev->sco_mtu = rp->sco_mtu;
782 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
783 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
784
785 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
786 hdev->sco_mtu = 64;
787 hdev->sco_pkts = 8;
788 }
789
790 hdev->acl_cnt = hdev->acl_pkts;
791 hdev->sco_cnt = hdev->sco_pkts;
792
793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
795 }
796
797 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
798 {
799 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
800
801 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802
803 if (!rp->status)
804 bacpy(&hdev->bdaddr, &rp->bdaddr);
805
806 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
807 }
808
809 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
810 struct sk_buff *skb)
811 {
812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
813
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815
816 if (rp->status)
817 return;
818
819 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
820 hdev->block_len = __le16_to_cpu(rp->block_len);
821 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
822
823 hdev->block_cnt = hdev->num_blocks;
824
825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
826 hdev->block_cnt, hdev->block_len);
827
828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
829 }
830
831 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
832 {
833 __u8 status = *((__u8 *) skb->data);
834
835 BT_DBG("%s status 0x%2.2x", hdev->name, status);
836
837 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
838 }
839
840 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 struct sk_buff *skb)
842 {
843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 hdev->amp_status = rp->amp_status;
851 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
852 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
853 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
854 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
855 hdev->amp_type = rp->amp_type;
856 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
857 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
858 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
859 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860
861 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
862 }
863
864 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
865 struct sk_buff *skb)
866 {
867 __u8 status = *((__u8 *) skb->data);
868
869 BT_DBG("%s status 0x%2.2x", hdev->name, status);
870
871 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
872 }
873
874 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%2.2x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
881 }
882
883 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 __u8 status = *((__u8 *) skb->data);
887
888 BT_DBG("%s status 0x%2.2x", hdev->name, status);
889
890 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
891 }
892
893 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
894 struct sk_buff *skb)
895 {
896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
897
898 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899
900 if (!rp->status)
901 hdev->inq_tx_power = rp->tx_power;
902
903 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
904 }
905
906 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 __u8 status = *((__u8 *) skb->data);
909
910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
911
912 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
913 }
914
915 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 {
917 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
918 struct hci_cp_pin_code_reply *cp;
919 struct hci_conn *conn;
920
921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
922
923 hci_dev_lock(hdev);
924
925 if (test_bit(HCI_MGMT, &hdev->dev_flags))
926 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
927
928 if (rp->status != 0)
929 goto unlock;
930
931 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
932 if (!cp)
933 goto unlock;
934
935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
936 if (conn)
937 conn->pin_length = cp->pin_len;
938
939 unlock:
940 hci_dev_unlock(hdev);
941 }
942
943 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
944 {
945 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
946
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948
949 hci_dev_lock(hdev);
950
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
953 rp->status);
954
955 hci_dev_unlock(hdev);
956 }
957
958 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
962
963 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964
965 if (rp->status)
966 return;
967
968 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
969 hdev->le_pkts = rp->le_max_pkt;
970
971 hdev->le_cnt = hdev->le_pkts;
972
973 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
974
975 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
976 }
977
978 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 {
980 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983
984 hci_dev_lock(hdev);
985
986 if (test_bit(HCI_MGMT, &hdev->dev_flags))
987 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
988 rp->status);
989
990 hci_dev_unlock(hdev);
991 }
992
993 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
994 struct sk_buff *skb)
995 {
996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997
998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999
1000 hci_dev_lock(hdev);
1001
1002 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1003 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1004 ACL_LINK, 0, rp->status);
1005
1006 hci_dev_unlock(hdev);
1007 }
1008
1009 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 {
1011 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014
1015 hci_dev_lock(hdev);
1016
1017 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1018 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1019 0, rp->status);
1020
1021 hci_dev_unlock(hdev);
1022 }
1023
1024 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 hci_dev_lock(hdev);
1032
1033 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1034 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1035 ACL_LINK, 0, rp->status);
1036
1037 hci_dev_unlock(hdev);
1038 }
1039
1040 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1041 struct sk_buff *skb)
1042 {
1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1044
1045 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046
1047 hci_dev_lock(hdev);
1048 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1049 rp->randomizer, rp->status);
1050 hci_dev_unlock(hdev);
1051 }
1052
1053 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1054 {
1055 __u8 status = *((__u8 *) skb->data);
1056
1057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1058
1059 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1060
1061 if (status) {
1062 hci_dev_lock(hdev);
1063 mgmt_start_discovery_failed(hdev, status);
1064 hci_dev_unlock(hdev);
1065 return;
1066 }
1067 }
1068
1069 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1071 {
1072 struct hci_cp_le_set_scan_enable *cp;
1073 __u8 status = *((__u8 *) skb->data);
1074
1075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1076
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 if (!cp)
1079 return;
1080
1081 switch (cp->enable) {
1082 case LE_SCANNING_ENABLED:
1083 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1084
1085 if (status) {
1086 hci_dev_lock(hdev);
1087 mgmt_start_discovery_failed(hdev, status);
1088 hci_dev_unlock(hdev);
1089 return;
1090 }
1091
1092 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1093
1094 hci_dev_lock(hdev);
1095 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1096 hci_dev_unlock(hdev);
1097 break;
1098
1099 case LE_SCANNING_DISABLED:
1100 if (status) {
1101 hci_dev_lock(hdev);
1102 mgmt_stop_discovery_failed(hdev, status);
1103 hci_dev_unlock(hdev);
1104 return;
1105 }
1106
1107 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1108
1109 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1110 hdev->discovery.state == DISCOVERY_FINDING) {
1111 mgmt_interleaved_discovery(hdev);
1112 } else {
1113 hci_dev_lock(hdev);
1114 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1115 hci_dev_unlock(hdev);
1116 }
1117
1118 break;
1119
1120 default:
1121 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1122 break;
1123 }
1124 }
1125
1126 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1127 {
1128 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1129
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131
1132 if (rp->status)
1133 return;
1134
1135 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1136 }
1137
1138 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1139 {
1140 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1141
1142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1143
1144 if (rp->status)
1145 return;
1146
1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1148 }
1149
1150 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1152 {
1153 struct hci_cp_write_le_host_supported *sent;
1154 __u8 status = *((__u8 *) skb->data);
1155
1156 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1157
1158 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1159 if (!sent)
1160 return;
1161
1162 if (!status) {
1163 if (sent->le)
1164 hdev->host_features[0] |= LMP_HOST_LE;
1165 else
1166 hdev->host_features[0] &= ~LMP_HOST_LE;
1167 }
1168
1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1170 !test_bit(HCI_INIT, &hdev->flags))
1171 mgmt_le_enable_complete(hdev, sent->le, status);
1172
1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1174 }
1175
1176 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1177 {
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (status) {
1181 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1182 hci_conn_check_pending(hdev);
1183 hci_dev_lock(hdev);
1184 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1185 mgmt_start_discovery_failed(hdev, status);
1186 hci_dev_unlock(hdev);
1187 return;
1188 }
1189
1190 set_bit(HCI_INQUIRY, &hdev->flags);
1191
1192 hci_dev_lock(hdev);
1193 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1194 hci_dev_unlock(hdev);
1195 }
1196
1197 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1198 {
1199 struct hci_cp_create_conn *cp;
1200 struct hci_conn *conn;
1201
1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203
1204 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1205 if (!cp)
1206 return;
1207
1208 hci_dev_lock(hdev);
1209
1210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1211
1212 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1213
1214 if (status) {
1215 if (conn && conn->state == BT_CONNECT) {
1216 if (status != 0x0c || conn->attempt > 2) {
1217 conn->state = BT_CLOSED;
1218 hci_proto_connect_cfm(conn, status);
1219 hci_conn_del(conn);
1220 } else
1221 conn->state = BT_CONNECT2;
1222 }
1223 } else {
1224 if (!conn) {
1225 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1226 if (conn) {
1227 conn->out = true;
1228 conn->link_mode |= HCI_LM_MASTER;
1229 } else
1230 BT_ERR("No memory for new connection");
1231 }
1232 }
1233
1234 hci_dev_unlock(hdev);
1235 }
1236
1237 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1238 {
1239 struct hci_cp_add_sco *cp;
1240 struct hci_conn *acl, *sco;
1241 __u16 handle;
1242
1243 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1244
1245 if (!status)
1246 return;
1247
1248 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1249 if (!cp)
1250 return;
1251
1252 handle = __le16_to_cpu(cp->handle);
1253
1254 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1255
1256 hci_dev_lock(hdev);
1257
1258 acl = hci_conn_hash_lookup_handle(hdev, handle);
1259 if (acl) {
1260 sco = acl->link;
1261 if (sco) {
1262 sco->state = BT_CLOSED;
1263
1264 hci_proto_connect_cfm(sco, status);
1265 hci_conn_del(sco);
1266 }
1267 }
1268
1269 hci_dev_unlock(hdev);
1270 }
1271
1272 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1273 {
1274 struct hci_cp_auth_requested *cp;
1275 struct hci_conn *conn;
1276
1277 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1278
1279 if (!status)
1280 return;
1281
1282 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1283 if (!cp)
1284 return;
1285
1286 hci_dev_lock(hdev);
1287
1288 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1289 if (conn) {
1290 if (conn->state == BT_CONFIG) {
1291 hci_proto_connect_cfm(conn, status);
1292 hci_conn_put(conn);
1293 }
1294 }
1295
1296 hci_dev_unlock(hdev);
1297 }
1298
1299 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1300 {
1301 struct hci_cp_set_conn_encrypt *cp;
1302 struct hci_conn *conn;
1303
1304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305
1306 if (!status)
1307 return;
1308
1309 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1310 if (!cp)
1311 return;
1312
1313 hci_dev_lock(hdev);
1314
1315 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1316 if (conn) {
1317 if (conn->state == BT_CONFIG) {
1318 hci_proto_connect_cfm(conn, status);
1319 hci_conn_put(conn);
1320 }
1321 }
1322
1323 hci_dev_unlock(hdev);
1324 }
1325
1326 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1327 struct hci_conn *conn)
1328 {
1329 if (conn->state != BT_CONFIG || !conn->out)
1330 return 0;
1331
1332 if (conn->pending_sec_level == BT_SECURITY_SDP)
1333 return 0;
1334
1335 /* Only request authentication for SSP connections or non-SSP
1336 * devices with sec_level HIGH or if MITM protection is requested */
1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1339 return 0;
1340
1341 return 1;
1342 }
1343
1344 static int hci_resolve_name(struct hci_dev *hdev,
1345 struct inquiry_entry *e)
1346 {
1347 struct hci_cp_remote_name_req cp;
1348
1349 memset(&cp, 0, sizeof(cp));
1350
1351 bacpy(&cp.bdaddr, &e->data.bdaddr);
1352 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1353 cp.pscan_mode = e->data.pscan_mode;
1354 cp.clock_offset = e->data.clock_offset;
1355
1356 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1357 }
1358
1359 static bool hci_resolve_next_name(struct hci_dev *hdev)
1360 {
1361 struct discovery_state *discov = &hdev->discovery;
1362 struct inquiry_entry *e;
1363
1364 if (list_empty(&discov->resolve))
1365 return false;
1366
1367 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1368 if (hci_resolve_name(hdev, e) == 0) {
1369 e->name_state = NAME_PENDING;
1370 return true;
1371 }
1372
1373 return false;
1374 }
1375
1376 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1377 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1378 {
1379 struct discovery_state *discov = &hdev->discovery;
1380 struct inquiry_entry *e;
1381
1382 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1383 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1384 name_len, conn->dev_class);
1385
1386 if (discov->state == DISCOVERY_STOPPED)
1387 return;
1388
1389 if (discov->state == DISCOVERY_STOPPING)
1390 goto discov_complete;
1391
1392 if (discov->state != DISCOVERY_RESOLVING)
1393 return;
1394
1395 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1396 if (e) {
1397 e->name_state = NAME_KNOWN;
1398 list_del(&e->list);
1399 if (name)
1400 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1401 e->data.rssi, name, name_len);
1402 }
1403
1404 if (hci_resolve_next_name(hdev))
1405 return;
1406
1407 discov_complete:
1408 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1409 }
1410
1411 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1412 {
1413 struct hci_cp_remote_name_req *cp;
1414 struct hci_conn *conn;
1415
1416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1417
1418 /* If successful wait for the name req complete event before
1419 * checking for the need to do authentication */
1420 if (!status)
1421 return;
1422
1423 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1424 if (!cp)
1425 return;
1426
1427 hci_dev_lock(hdev);
1428
1429 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1430
1431 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1432 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1433
1434 if (!conn)
1435 goto unlock;
1436
1437 if (!hci_outgoing_auth_needed(hdev, conn))
1438 goto unlock;
1439
1440 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1441 struct hci_cp_auth_requested cp;
1442 cp.handle = __cpu_to_le16(conn->handle);
1443 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1444 }
1445
1446 unlock:
1447 hci_dev_unlock(hdev);
1448 }
1449
1450 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1451 {
1452 struct hci_cp_read_remote_features *cp;
1453 struct hci_conn *conn;
1454
1455 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1456
1457 if (!status)
1458 return;
1459
1460 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1461 if (!cp)
1462 return;
1463
1464 hci_dev_lock(hdev);
1465
1466 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1467 if (conn) {
1468 if (conn->state == BT_CONFIG) {
1469 hci_proto_connect_cfm(conn, status);
1470 hci_conn_put(conn);
1471 }
1472 }
1473
1474 hci_dev_unlock(hdev);
1475 }
1476
1477 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1478 {
1479 struct hci_cp_read_remote_ext_features *cp;
1480 struct hci_conn *conn;
1481
1482 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1483
1484 if (!status)
1485 return;
1486
1487 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1488 if (!cp)
1489 return;
1490
1491 hci_dev_lock(hdev);
1492
1493 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1494 if (conn) {
1495 if (conn->state == BT_CONFIG) {
1496 hci_proto_connect_cfm(conn, status);
1497 hci_conn_put(conn);
1498 }
1499 }
1500
1501 hci_dev_unlock(hdev);
1502 }
1503
1504 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1505 {
1506 struct hci_cp_setup_sync_conn *cp;
1507 struct hci_conn *acl, *sco;
1508 __u16 handle;
1509
1510 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511
1512 if (!status)
1513 return;
1514
1515 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1516 if (!cp)
1517 return;
1518
1519 handle = __le16_to_cpu(cp->handle);
1520
1521 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1522
1523 hci_dev_lock(hdev);
1524
1525 acl = hci_conn_hash_lookup_handle(hdev, handle);
1526 if (acl) {
1527 sco = acl->link;
1528 if (sco) {
1529 sco->state = BT_CLOSED;
1530
1531 hci_proto_connect_cfm(sco, status);
1532 hci_conn_del(sco);
1533 }
1534 }
1535
1536 hci_dev_unlock(hdev);
1537 }
1538
1539 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1540 {
1541 struct hci_cp_sniff_mode *cp;
1542 struct hci_conn *conn;
1543
1544 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1545
1546 if (!status)
1547 return;
1548
1549 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1550 if (!cp)
1551 return;
1552
1553 hci_dev_lock(hdev);
1554
1555 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1556 if (conn) {
1557 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1558
1559 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1560 hci_sco_setup(conn, status);
1561 }
1562
1563 hci_dev_unlock(hdev);
1564 }
1565
1566 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1567 {
1568 struct hci_cp_exit_sniff_mode *cp;
1569 struct hci_conn *conn;
1570
1571 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1572
1573 if (!status)
1574 return;
1575
1576 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1577 if (!cp)
1578 return;
1579
1580 hci_dev_lock(hdev);
1581
1582 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1583 if (conn) {
1584 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1585
1586 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1587 hci_sco_setup(conn, status);
1588 }
1589
1590 hci_dev_unlock(hdev);
1591 }
1592
1593 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1594 {
1595 struct hci_cp_disconnect *cp;
1596 struct hci_conn *conn;
1597
1598 if (!status)
1599 return;
1600
1601 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1602 if (!cp)
1603 return;
1604
1605 hci_dev_lock(hdev);
1606
1607 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1608 if (conn)
1609 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1610 conn->dst_type, status);
1611
1612 hci_dev_unlock(hdev);
1613 }
1614
1615 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1616 {
1617 struct hci_cp_le_create_conn *cp;
1618 struct hci_conn *conn;
1619
1620 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1621
1622 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1623 if (!cp)
1624 return;
1625
1626 hci_dev_lock(hdev);
1627
1628 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1629
1630 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1631 conn);
1632
1633 if (status) {
1634 if (conn && conn->state == BT_CONNECT) {
1635 conn->state = BT_CLOSED;
1636 mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
1637 conn->dst_type, status);
1638 hci_proto_connect_cfm(conn, status);
1639 hci_conn_del(conn);
1640 }
1641 } else {
1642 if (!conn) {
1643 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1644 if (conn) {
1645 conn->dst_type = cp->peer_addr_type;
1646 conn->out = true;
1647 } else {
1648 BT_ERR("No memory for new connection");
1649 }
1650 }
1651 }
1652
1653 hci_dev_unlock(hdev);
1654 }
1655
1656 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1657 {
1658 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1659 }
1660
1661 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1662 {
1663 __u8 status = *((__u8 *) skb->data);
1664 struct discovery_state *discov = &hdev->discovery;
1665 struct inquiry_entry *e;
1666
1667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1668
1669 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1670
1671 hci_conn_check_pending(hdev);
1672
1673 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1674 return;
1675
1676 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1677 return;
1678
1679 hci_dev_lock(hdev);
1680
1681 if (discov->state != DISCOVERY_FINDING)
1682 goto unlock;
1683
1684 if (list_empty(&discov->resolve)) {
1685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1686 goto unlock;
1687 }
1688
1689 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1690 if (e && hci_resolve_name(hdev, e) == 0) {
1691 e->name_state = NAME_PENDING;
1692 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1693 } else {
1694 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1695 }
1696
1697 unlock:
1698 hci_dev_unlock(hdev);
1699 }
1700
1701 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1702 {
1703 struct inquiry_data data;
1704 struct inquiry_info *info = (void *) (skb->data + 1);
1705 int num_rsp = *((__u8 *) skb->data);
1706
1707 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1708
1709 if (!num_rsp)
1710 return;
1711
1712 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1713 return;
1714
1715 hci_dev_lock(hdev);
1716
1717 for (; num_rsp; num_rsp--, info++) {
1718 bool name_known, ssp;
1719
1720 bacpy(&data.bdaddr, &info->bdaddr);
1721 data.pscan_rep_mode = info->pscan_rep_mode;
1722 data.pscan_period_mode = info->pscan_period_mode;
1723 data.pscan_mode = info->pscan_mode;
1724 memcpy(data.dev_class, info->dev_class, 3);
1725 data.clock_offset = info->clock_offset;
1726 data.rssi = 0x00;
1727 data.ssp_mode = 0x00;
1728
1729 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1730 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1731 info->dev_class, 0, !name_known, ssp, NULL,
1732 0);
1733 }
1734
1735 hci_dev_unlock(hdev);
1736 }
1737
1738 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1739 {
1740 struct hci_ev_conn_complete *ev = (void *) skb->data;
1741 struct hci_conn *conn;
1742
1743 BT_DBG("%s", hdev->name);
1744
1745 hci_dev_lock(hdev);
1746
1747 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1748 if (!conn) {
1749 if (ev->link_type != SCO_LINK)
1750 goto unlock;
1751
1752 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1753 if (!conn)
1754 goto unlock;
1755
1756 conn->type = SCO_LINK;
1757 }
1758
1759 if (!ev->status) {
1760 conn->handle = __le16_to_cpu(ev->handle);
1761
1762 if (conn->type == ACL_LINK) {
1763 conn->state = BT_CONFIG;
1764 hci_conn_hold(conn);
1765 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1766 } else
1767 conn->state = BT_CONNECTED;
1768
1769 hci_conn_hold_device(conn);
1770 hci_conn_add_sysfs(conn);
1771
1772 if (test_bit(HCI_AUTH, &hdev->flags))
1773 conn->link_mode |= HCI_LM_AUTH;
1774
1775 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1776 conn->link_mode |= HCI_LM_ENCRYPT;
1777
1778 /* Get remote features */
1779 if (conn->type == ACL_LINK) {
1780 struct hci_cp_read_remote_features cp;
1781 cp.handle = ev->handle;
1782 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1783 sizeof(cp), &cp);
1784 }
1785
1786 /* Set packet type for incoming connection */
1787 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1788 struct hci_cp_change_conn_ptype cp;
1789 cp.handle = ev->handle;
1790 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1791 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1792 &cp);
1793 }
1794 } else {
1795 conn->state = BT_CLOSED;
1796 if (conn->type == ACL_LINK)
1797 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1798 conn->dst_type, ev->status);
1799 }
1800
1801 if (conn->type == ACL_LINK)
1802 hci_sco_setup(conn, ev->status);
1803
1804 if (ev->status) {
1805 hci_proto_connect_cfm(conn, ev->status);
1806 hci_conn_del(conn);
1807 } else if (ev->link_type != ACL_LINK)
1808 hci_proto_connect_cfm(conn, ev->status);
1809
1810 unlock:
1811 hci_dev_unlock(hdev);
1812
1813 hci_conn_check_pending(hdev);
1814 }
1815
1816 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1817 {
1818 struct hci_ev_conn_request *ev = (void *) skb->data;
1819 int mask = hdev->link_mode;
1820
1821 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1822 ev->link_type);
1823
1824 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1825
1826 if ((mask & HCI_LM_ACCEPT) &&
1827 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1828 /* Connection accepted */
1829 struct inquiry_entry *ie;
1830 struct hci_conn *conn;
1831
1832 hci_dev_lock(hdev);
1833
1834 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1835 if (ie)
1836 memcpy(ie->data.dev_class, ev->dev_class, 3);
1837
1838 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1839 &ev->bdaddr);
1840 if (!conn) {
1841 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1842 if (!conn) {
1843 BT_ERR("No memory for new connection");
1844 hci_dev_unlock(hdev);
1845 return;
1846 }
1847 }
1848
1849 memcpy(conn->dev_class, ev->dev_class, 3);
1850 conn->state = BT_CONNECT;
1851
1852 hci_dev_unlock(hdev);
1853
1854 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1855 struct hci_cp_accept_conn_req cp;
1856
1857 bacpy(&cp.bdaddr, &ev->bdaddr);
1858
1859 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1860 cp.role = 0x00; /* Become master */
1861 else
1862 cp.role = 0x01; /* Remain slave */
1863
1864 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1865 &cp);
1866 } else {
1867 struct hci_cp_accept_sync_conn_req cp;
1868
1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1870 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1871
1872 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1873 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1874 cp.max_latency = __constant_cpu_to_le16(0xffff);
1875 cp.content_format = cpu_to_le16(hdev->voice_setting);
1876 cp.retrans_effort = 0xff;
1877
1878 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1879 sizeof(cp), &cp);
1880 }
1881 } else {
1882 /* Connection rejected */
1883 struct hci_cp_reject_conn_req cp;
1884
1885 bacpy(&cp.bdaddr, &ev->bdaddr);
1886 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1887 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1888 }
1889 }
1890
1891 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1892 {
1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1894 struct hci_conn *conn;
1895
1896 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1897
1898 hci_dev_lock(hdev);
1899
1900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1901 if (!conn)
1902 goto unlock;
1903
1904 if (ev->status == 0)
1905 conn->state = BT_CLOSED;
1906
1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1909 if (ev->status != 0)
1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1911 conn->dst_type, ev->status);
1912 else
1913 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1914 conn->dst_type);
1915 }
1916
1917 if (ev->status == 0) {
1918 if (conn->type == ACL_LINK && conn->flush_key)
1919 hci_remove_link_key(hdev, &conn->dst);
1920 hci_proto_disconn_cfm(conn, ev->reason);
1921 hci_conn_del(conn);
1922 }
1923
1924 unlock:
1925 hci_dev_unlock(hdev);
1926 }
1927
1928 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1929 {
1930 struct hci_ev_auth_complete *ev = (void *) skb->data;
1931 struct hci_conn *conn;
1932
1933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1934
1935 hci_dev_lock(hdev);
1936
1937 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1938 if (!conn)
1939 goto unlock;
1940
1941 if (!ev->status) {
1942 if (!hci_conn_ssp_enabled(conn) &&
1943 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1944 BT_INFO("re-auth of legacy device is not possible.");
1945 } else {
1946 conn->link_mode |= HCI_LM_AUTH;
1947 conn->sec_level = conn->pending_sec_level;
1948 }
1949 } else {
1950 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1951 ev->status);
1952 }
1953
1954 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1955 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1956
1957 if (conn->state == BT_CONFIG) {
1958 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1959 struct hci_cp_set_conn_encrypt cp;
1960 cp.handle = ev->handle;
1961 cp.encrypt = 0x01;
1962 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1963 &cp);
1964 } else {
1965 conn->state = BT_CONNECTED;
1966 hci_proto_connect_cfm(conn, ev->status);
1967 hci_conn_put(conn);
1968 }
1969 } else {
1970 hci_auth_cfm(conn, ev->status);
1971
1972 hci_conn_hold(conn);
1973 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1974 hci_conn_put(conn);
1975 }
1976
1977 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1978 if (!ev->status) {
1979 struct hci_cp_set_conn_encrypt cp;
1980 cp.handle = ev->handle;
1981 cp.encrypt = 0x01;
1982 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1983 &cp);
1984 } else {
1985 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1986 hci_encrypt_cfm(conn, ev->status, 0x00);
1987 }
1988 }
1989
1990 unlock:
1991 hci_dev_unlock(hdev);
1992 }
1993
1994 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1995 {
1996 struct hci_ev_remote_name *ev = (void *) skb->data;
1997 struct hci_conn *conn;
1998
1999 BT_DBG("%s", hdev->name);
2000
2001 hci_conn_check_pending(hdev);
2002
2003 hci_dev_lock(hdev);
2004
2005 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2006
2007 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2008 goto check_auth;
2009
2010 if (ev->status == 0)
2011 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2012 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2013 else
2014 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2015
2016 check_auth:
2017 if (!conn)
2018 goto unlock;
2019
2020 if (!hci_outgoing_auth_needed(hdev, conn))
2021 goto unlock;
2022
2023 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2024 struct hci_cp_auth_requested cp;
2025 cp.handle = __cpu_to_le16(conn->handle);
2026 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2027 }
2028
2029 unlock:
2030 hci_dev_unlock(hdev);
2031 }
2032
2033 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2034 {
2035 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2036 struct hci_conn *conn;
2037
2038 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2039
2040 hci_dev_lock(hdev);
2041
2042 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2043 if (conn) {
2044 if (!ev->status) {
2045 if (ev->encrypt) {
2046 /* Encryption implies authentication */
2047 conn->link_mode |= HCI_LM_AUTH;
2048 conn->link_mode |= HCI_LM_ENCRYPT;
2049 conn->sec_level = conn->pending_sec_level;
2050 } else
2051 conn->link_mode &= ~HCI_LM_ENCRYPT;
2052 }
2053
2054 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2055
2056 if (ev->status && conn->state == BT_CONNECTED) {
2057 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2058 hci_conn_put(conn);
2059 goto unlock;
2060 }
2061
2062 if (conn->state == BT_CONFIG) {
2063 if (!ev->status)
2064 conn->state = BT_CONNECTED;
2065
2066 hci_proto_connect_cfm(conn, ev->status);
2067 hci_conn_put(conn);
2068 } else
2069 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2070 }
2071
2072 unlock:
2073 hci_dev_unlock(hdev);
2074 }
2075
2076 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2077 struct sk_buff *skb)
2078 {
2079 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2080 struct hci_conn *conn;
2081
2082 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2083
2084 hci_dev_lock(hdev);
2085
2086 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2087 if (conn) {
2088 if (!ev->status)
2089 conn->link_mode |= HCI_LM_SECURE;
2090
2091 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2092
2093 hci_key_change_cfm(conn, ev->status);
2094 }
2095
2096 hci_dev_unlock(hdev);
2097 }
2098
2099 static void hci_remote_features_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2101 {
2102 struct hci_ev_remote_features *ev = (void *) skb->data;
2103 struct hci_conn *conn;
2104
2105 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2106
2107 hci_dev_lock(hdev);
2108
2109 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2110 if (!conn)
2111 goto unlock;
2112
2113 if (!ev->status)
2114 memcpy(conn->features, ev->features, 8);
2115
2116 if (conn->state != BT_CONFIG)
2117 goto unlock;
2118
2119 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2120 struct hci_cp_read_remote_ext_features cp;
2121 cp.handle = ev->handle;
2122 cp.page = 0x01;
2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2124 sizeof(cp), &cp);
2125 goto unlock;
2126 }
2127
2128 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2129 struct hci_cp_remote_name_req cp;
2130 memset(&cp, 0, sizeof(cp));
2131 bacpy(&cp.bdaddr, &conn->dst);
2132 cp.pscan_rep_mode = 0x02;
2133 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2134 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2135 mgmt_device_connected(hdev, &conn->dst, conn->type,
2136 conn->dst_type, 0, NULL, 0,
2137 conn->dev_class);
2138
2139 if (!hci_outgoing_auth_needed(hdev, conn)) {
2140 conn->state = BT_CONNECTED;
2141 hci_proto_connect_cfm(conn, ev->status);
2142 hci_conn_put(conn);
2143 }
2144
2145 unlock:
2146 hci_dev_unlock(hdev);
2147 }
2148
2149 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2150 {
2151 BT_DBG("%s", hdev->name);
2152 }
2153
2154 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2155 struct sk_buff *skb)
2156 {
2157 BT_DBG("%s", hdev->name);
2158 }
2159
2160 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2161 {
2162 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2163 __u16 opcode;
2164
2165 skb_pull(skb, sizeof(*ev));
2166
2167 opcode = __le16_to_cpu(ev->opcode);
2168
2169 switch (opcode) {
2170 case HCI_OP_INQUIRY_CANCEL:
2171 hci_cc_inquiry_cancel(hdev, skb);
2172 break;
2173
2174 case HCI_OP_PERIODIC_INQ:
2175 hci_cc_periodic_inq(hdev, skb);
2176 break;
2177
2178 case HCI_OP_EXIT_PERIODIC_INQ:
2179 hci_cc_exit_periodic_inq(hdev, skb);
2180 break;
2181
2182 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2183 hci_cc_remote_name_req_cancel(hdev, skb);
2184 break;
2185
2186 case HCI_OP_ROLE_DISCOVERY:
2187 hci_cc_role_discovery(hdev, skb);
2188 break;
2189
2190 case HCI_OP_READ_LINK_POLICY:
2191 hci_cc_read_link_policy(hdev, skb);
2192 break;
2193
2194 case HCI_OP_WRITE_LINK_POLICY:
2195 hci_cc_write_link_policy(hdev, skb);
2196 break;
2197
2198 case HCI_OP_READ_DEF_LINK_POLICY:
2199 hci_cc_read_def_link_policy(hdev, skb);
2200 break;
2201
2202 case HCI_OP_WRITE_DEF_LINK_POLICY:
2203 hci_cc_write_def_link_policy(hdev, skb);
2204 break;
2205
2206 case HCI_OP_RESET:
2207 hci_cc_reset(hdev, skb);
2208 break;
2209
2210 case HCI_OP_WRITE_LOCAL_NAME:
2211 hci_cc_write_local_name(hdev, skb);
2212 break;
2213
2214 case HCI_OP_READ_LOCAL_NAME:
2215 hci_cc_read_local_name(hdev, skb);
2216 break;
2217
2218 case HCI_OP_WRITE_AUTH_ENABLE:
2219 hci_cc_write_auth_enable(hdev, skb);
2220 break;
2221
2222 case HCI_OP_WRITE_ENCRYPT_MODE:
2223 hci_cc_write_encrypt_mode(hdev, skb);
2224 break;
2225
2226 case HCI_OP_WRITE_SCAN_ENABLE:
2227 hci_cc_write_scan_enable(hdev, skb);
2228 break;
2229
2230 case HCI_OP_READ_CLASS_OF_DEV:
2231 hci_cc_read_class_of_dev(hdev, skb);
2232 break;
2233
2234 case HCI_OP_WRITE_CLASS_OF_DEV:
2235 hci_cc_write_class_of_dev(hdev, skb);
2236 break;
2237
2238 case HCI_OP_READ_VOICE_SETTING:
2239 hci_cc_read_voice_setting(hdev, skb);
2240 break;
2241
2242 case HCI_OP_WRITE_VOICE_SETTING:
2243 hci_cc_write_voice_setting(hdev, skb);
2244 break;
2245
2246 case HCI_OP_HOST_BUFFER_SIZE:
2247 hci_cc_host_buffer_size(hdev, skb);
2248 break;
2249
2250 case HCI_OP_WRITE_SSP_MODE:
2251 hci_cc_write_ssp_mode(hdev, skb);
2252 break;
2253
2254 case HCI_OP_READ_LOCAL_VERSION:
2255 hci_cc_read_local_version(hdev, skb);
2256 break;
2257
2258 case HCI_OP_READ_LOCAL_COMMANDS:
2259 hci_cc_read_local_commands(hdev, skb);
2260 break;
2261
2262 case HCI_OP_READ_LOCAL_FEATURES:
2263 hci_cc_read_local_features(hdev, skb);
2264 break;
2265
2266 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2267 hci_cc_read_local_ext_features(hdev, skb);
2268 break;
2269
2270 case HCI_OP_READ_BUFFER_SIZE:
2271 hci_cc_read_buffer_size(hdev, skb);
2272 break;
2273
2274 case HCI_OP_READ_BD_ADDR:
2275 hci_cc_read_bd_addr(hdev, skb);
2276 break;
2277
2278 case HCI_OP_READ_DATA_BLOCK_SIZE:
2279 hci_cc_read_data_block_size(hdev, skb);
2280 break;
2281
2282 case HCI_OP_WRITE_CA_TIMEOUT:
2283 hci_cc_write_ca_timeout(hdev, skb);
2284 break;
2285
2286 case HCI_OP_READ_FLOW_CONTROL_MODE:
2287 hci_cc_read_flow_control_mode(hdev, skb);
2288 break;
2289
2290 case HCI_OP_READ_LOCAL_AMP_INFO:
2291 hci_cc_read_local_amp_info(hdev, skb);
2292 break;
2293
2294 case HCI_OP_DELETE_STORED_LINK_KEY:
2295 hci_cc_delete_stored_link_key(hdev, skb);
2296 break;
2297
2298 case HCI_OP_SET_EVENT_MASK:
2299 hci_cc_set_event_mask(hdev, skb);
2300 break;
2301
2302 case HCI_OP_WRITE_INQUIRY_MODE:
2303 hci_cc_write_inquiry_mode(hdev, skb);
2304 break;
2305
2306 case HCI_OP_READ_INQ_RSP_TX_POWER:
2307 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2308 break;
2309
2310 case HCI_OP_SET_EVENT_FLT:
2311 hci_cc_set_event_flt(hdev, skb);
2312 break;
2313
2314 case HCI_OP_PIN_CODE_REPLY:
2315 hci_cc_pin_code_reply(hdev, skb);
2316 break;
2317
2318 case HCI_OP_PIN_CODE_NEG_REPLY:
2319 hci_cc_pin_code_neg_reply(hdev, skb);
2320 break;
2321
2322 case HCI_OP_READ_LOCAL_OOB_DATA:
2323 hci_cc_read_local_oob_data_reply(hdev, skb);
2324 break;
2325
2326 case HCI_OP_LE_READ_BUFFER_SIZE:
2327 hci_cc_le_read_buffer_size(hdev, skb);
2328 break;
2329
2330 case HCI_OP_USER_CONFIRM_REPLY:
2331 hci_cc_user_confirm_reply(hdev, skb);
2332 break;
2333
2334 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2335 hci_cc_user_confirm_neg_reply(hdev, skb);
2336 break;
2337
2338 case HCI_OP_USER_PASSKEY_REPLY:
2339 hci_cc_user_passkey_reply(hdev, skb);
2340 break;
2341
2342 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2343 hci_cc_user_passkey_neg_reply(hdev, skb);
2344 break;
2345
2346 case HCI_OP_LE_SET_SCAN_PARAM:
2347 hci_cc_le_set_scan_param(hdev, skb);
2348 break;
2349
2350 case HCI_OP_LE_SET_SCAN_ENABLE:
2351 hci_cc_le_set_scan_enable(hdev, skb);
2352 break;
2353
2354 case HCI_OP_LE_LTK_REPLY:
2355 hci_cc_le_ltk_reply(hdev, skb);
2356 break;
2357
2358 case HCI_OP_LE_LTK_NEG_REPLY:
2359 hci_cc_le_ltk_neg_reply(hdev, skb);
2360 break;
2361
2362 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2363 hci_cc_write_le_host_supported(hdev, skb);
2364 break;
2365
2366 default:
2367 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2368 break;
2369 }
2370
2371 if (ev->opcode != HCI_OP_NOP)
2372 del_timer(&hdev->cmd_timer);
2373
2374 if (ev->ncmd) {
2375 atomic_set(&hdev->cmd_cnt, 1);
2376 if (!skb_queue_empty(&hdev->cmd_q))
2377 queue_work(hdev->workqueue, &hdev->cmd_work);
2378 }
2379 }
2380
2381 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2382 {
2383 struct hci_ev_cmd_status *ev = (void *) skb->data;
2384 __u16 opcode;
2385
2386 skb_pull(skb, sizeof(*ev));
2387
2388 opcode = __le16_to_cpu(ev->opcode);
2389
2390 switch (opcode) {
2391 case HCI_OP_INQUIRY:
2392 hci_cs_inquiry(hdev, ev->status);
2393 break;
2394
2395 case HCI_OP_CREATE_CONN:
2396 hci_cs_create_conn(hdev, ev->status);
2397 break;
2398
2399 case HCI_OP_ADD_SCO:
2400 hci_cs_add_sco(hdev, ev->status);
2401 break;
2402
2403 case HCI_OP_AUTH_REQUESTED:
2404 hci_cs_auth_requested(hdev, ev->status);
2405 break;
2406
2407 case HCI_OP_SET_CONN_ENCRYPT:
2408 hci_cs_set_conn_encrypt(hdev, ev->status);
2409 break;
2410
2411 case HCI_OP_REMOTE_NAME_REQ:
2412 hci_cs_remote_name_req(hdev, ev->status);
2413 break;
2414
2415 case HCI_OP_READ_REMOTE_FEATURES:
2416 hci_cs_read_remote_features(hdev, ev->status);
2417 break;
2418
2419 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2420 hci_cs_read_remote_ext_features(hdev, ev->status);
2421 break;
2422
2423 case HCI_OP_SETUP_SYNC_CONN:
2424 hci_cs_setup_sync_conn(hdev, ev->status);
2425 break;
2426
2427 case HCI_OP_SNIFF_MODE:
2428 hci_cs_sniff_mode(hdev, ev->status);
2429 break;
2430
2431 case HCI_OP_EXIT_SNIFF_MODE:
2432 hci_cs_exit_sniff_mode(hdev, ev->status);
2433 break;
2434
2435 case HCI_OP_DISCONNECT:
2436 hci_cs_disconnect(hdev, ev->status);
2437 break;
2438
2439 case HCI_OP_LE_CREATE_CONN:
2440 hci_cs_le_create_conn(hdev, ev->status);
2441 break;
2442
2443 case HCI_OP_LE_START_ENC:
2444 hci_cs_le_start_enc(hdev, ev->status);
2445 break;
2446
2447 default:
2448 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2449 break;
2450 }
2451
2452 if (ev->opcode != HCI_OP_NOP)
2453 del_timer(&hdev->cmd_timer);
2454
2455 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2456 atomic_set(&hdev->cmd_cnt, 1);
2457 if (!skb_queue_empty(&hdev->cmd_q))
2458 queue_work(hdev->workqueue, &hdev->cmd_work);
2459 }
2460 }
2461
2462 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2463 {
2464 struct hci_ev_role_change *ev = (void *) skb->data;
2465 struct hci_conn *conn;
2466
2467 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2468
2469 hci_dev_lock(hdev);
2470
2471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2472 if (conn) {
2473 if (!ev->status) {
2474 if (ev->role)
2475 conn->link_mode &= ~HCI_LM_MASTER;
2476 else
2477 conn->link_mode |= HCI_LM_MASTER;
2478 }
2479
2480 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2481
2482 hci_role_switch_cfm(conn, ev->status, ev->role);
2483 }
2484
2485 hci_dev_unlock(hdev);
2486 }
2487
2488 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2489 {
2490 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2491 int i;
2492
2493 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2494 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2495 return;
2496 }
2497
2498 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2499 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2500 BT_DBG("%s bad parameters", hdev->name);
2501 return;
2502 }
2503
2504 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2505
2506 for (i = 0; i < ev->num_hndl; i++) {
2507 struct hci_comp_pkts_info *info = &ev->handles[i];
2508 struct hci_conn *conn;
2509 __u16 handle, count;
2510
2511 handle = __le16_to_cpu(info->handle);
2512 count = __le16_to_cpu(info->count);
2513
2514 conn = hci_conn_hash_lookup_handle(hdev, handle);
2515 if (!conn)
2516 continue;
2517
2518 conn->sent -= count;
2519
2520 switch (conn->type) {
2521 case ACL_LINK:
2522 hdev->acl_cnt += count;
2523 if (hdev->acl_cnt > hdev->acl_pkts)
2524 hdev->acl_cnt = hdev->acl_pkts;
2525 break;
2526
2527 case LE_LINK:
2528 if (hdev->le_pkts) {
2529 hdev->le_cnt += count;
2530 if (hdev->le_cnt > hdev->le_pkts)
2531 hdev->le_cnt = hdev->le_pkts;
2532 } else {
2533 hdev->acl_cnt += count;
2534 if (hdev->acl_cnt > hdev->acl_pkts)
2535 hdev->acl_cnt = hdev->acl_pkts;
2536 }
2537 break;
2538
2539 case SCO_LINK:
2540 hdev->sco_cnt += count;
2541 if (hdev->sco_cnt > hdev->sco_pkts)
2542 hdev->sco_cnt = hdev->sco_pkts;
2543 break;
2544
2545 default:
2546 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2547 break;
2548 }
2549 }
2550
2551 queue_work(hdev->workqueue, &hdev->tx_work);
2552 }
2553
2554 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2555 {
2556 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2557 int i;
2558
2559 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2560 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2561 return;
2562 }
2563
2564 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2565 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2566 BT_DBG("%s bad parameters", hdev->name);
2567 return;
2568 }
2569
2570 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2571 ev->num_hndl);
2572
2573 for (i = 0; i < ev->num_hndl; i++) {
2574 struct hci_comp_blocks_info *info = &ev->handles[i];
2575 struct hci_conn *conn;
2576 __u16 handle, block_count;
2577
2578 handle = __le16_to_cpu(info->handle);
2579 block_count = __le16_to_cpu(info->blocks);
2580
2581 conn = hci_conn_hash_lookup_handle(hdev, handle);
2582 if (!conn)
2583 continue;
2584
2585 conn->sent -= block_count;
2586
2587 switch (conn->type) {
2588 case ACL_LINK:
2589 hdev->block_cnt += block_count;
2590 if (hdev->block_cnt > hdev->num_blocks)
2591 hdev->block_cnt = hdev->num_blocks;
2592 break;
2593
2594 default:
2595 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2596 break;
2597 }
2598 }
2599
2600 queue_work(hdev->workqueue, &hdev->tx_work);
2601 }
2602
2603 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2604 {
2605 struct hci_ev_mode_change *ev = (void *) skb->data;
2606 struct hci_conn *conn;
2607
2608 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2609
2610 hci_dev_lock(hdev);
2611
2612 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2613 if (conn) {
2614 conn->mode = ev->mode;
2615 conn->interval = __le16_to_cpu(ev->interval);
2616
2617 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2618 &conn->flags)) {
2619 if (conn->mode == HCI_CM_ACTIVE)
2620 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2621 else
2622 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2623 }
2624
2625 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2626 hci_sco_setup(conn, ev->status);
2627 }
2628
2629 hci_dev_unlock(hdev);
2630 }
2631
2632 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2633 {
2634 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2635 struct hci_conn *conn;
2636
2637 BT_DBG("%s", hdev->name);
2638
2639 hci_dev_lock(hdev);
2640
2641 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2642 if (!conn)
2643 goto unlock;
2644
2645 if (conn->state == BT_CONNECTED) {
2646 hci_conn_hold(conn);
2647 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2648 hci_conn_put(conn);
2649 }
2650
2651 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2652 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2653 sizeof(ev->bdaddr), &ev->bdaddr);
2654 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2655 u8 secure;
2656
2657 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2658 secure = 1;
2659 else
2660 secure = 0;
2661
2662 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2663 }
2664
2665 unlock:
2666 hci_dev_unlock(hdev);
2667 }
2668
2669 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2670 {
2671 struct hci_ev_link_key_req *ev = (void *) skb->data;
2672 struct hci_cp_link_key_reply cp;
2673 struct hci_conn *conn;
2674 struct link_key *key;
2675
2676 BT_DBG("%s", hdev->name);
2677
2678 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2679 return;
2680
2681 hci_dev_lock(hdev);
2682
2683 key = hci_find_link_key(hdev, &ev->bdaddr);
2684 if (!key) {
2685 BT_DBG("%s link key not found for %s", hdev->name,
2686 batostr(&ev->bdaddr));
2687 goto not_found;
2688 }
2689
2690 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2691 batostr(&ev->bdaddr));
2692
2693 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2694 key->type == HCI_LK_DEBUG_COMBINATION) {
2695 BT_DBG("%s ignoring debug key", hdev->name);
2696 goto not_found;
2697 }
2698
2699 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2700 if (conn) {
2701 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2702 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2703 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2704 goto not_found;
2705 }
2706
2707 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2708 conn->pending_sec_level == BT_SECURITY_HIGH) {
2709 BT_DBG("%s ignoring key unauthenticated for high security",
2710 hdev->name);
2711 goto not_found;
2712 }
2713
2714 conn->key_type = key->type;
2715 conn->pin_length = key->pin_len;
2716 }
2717
2718 bacpy(&cp.bdaddr, &ev->bdaddr);
2719 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2720
2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2722
2723 hci_dev_unlock(hdev);
2724
2725 return;
2726
2727 not_found:
2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2729 hci_dev_unlock(hdev);
2730 }
2731
2732 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2733 {
2734 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2735 struct hci_conn *conn;
2736 u8 pin_len = 0;
2737
2738 BT_DBG("%s", hdev->name);
2739
2740 hci_dev_lock(hdev);
2741
2742 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2743 if (conn) {
2744 hci_conn_hold(conn);
2745 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2746 pin_len = conn->pin_length;
2747
2748 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2749 conn->key_type = ev->key_type;
2750
2751 hci_conn_put(conn);
2752 }
2753
2754 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2755 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2756 ev->key_type, pin_len);
2757
2758 hci_dev_unlock(hdev);
2759 }
2760
2761 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2762 {
2763 struct hci_ev_clock_offset *ev = (void *) skb->data;
2764 struct hci_conn *conn;
2765
2766 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2767
2768 hci_dev_lock(hdev);
2769
2770 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2771 if (conn && !ev->status) {
2772 struct inquiry_entry *ie;
2773
2774 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2775 if (ie) {
2776 ie->data.clock_offset = ev->clock_offset;
2777 ie->timestamp = jiffies;
2778 }
2779 }
2780
2781 hci_dev_unlock(hdev);
2782 }
2783
2784 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2785 {
2786 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2787 struct hci_conn *conn;
2788
2789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2790
2791 hci_dev_lock(hdev);
2792
2793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2794 if (conn && !ev->status)
2795 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2796
2797 hci_dev_unlock(hdev);
2798 }
2799
2800 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2801 {
2802 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2803 struct inquiry_entry *ie;
2804
2805 BT_DBG("%s", hdev->name);
2806
2807 hci_dev_lock(hdev);
2808
2809 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2810 if (ie) {
2811 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2812 ie->timestamp = jiffies;
2813 }
2814
2815 hci_dev_unlock(hdev);
2816 }
2817
2818 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2819 struct sk_buff *skb)
2820 {
2821 struct inquiry_data data;
2822 int num_rsp = *((__u8 *) skb->data);
2823 bool name_known, ssp;
2824
2825 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2826
2827 if (!num_rsp)
2828 return;
2829
2830 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2831 return;
2832
2833 hci_dev_lock(hdev);
2834
2835 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2836 struct inquiry_info_with_rssi_and_pscan_mode *info;
2837 info = (void *) (skb->data + 1);
2838
2839 for (; num_rsp; num_rsp--, info++) {
2840 bacpy(&data.bdaddr, &info->bdaddr);
2841 data.pscan_rep_mode = info->pscan_rep_mode;
2842 data.pscan_period_mode = info->pscan_period_mode;
2843 data.pscan_mode = info->pscan_mode;
2844 memcpy(data.dev_class, info->dev_class, 3);
2845 data.clock_offset = info->clock_offset;
2846 data.rssi = info->rssi;
2847 data.ssp_mode = 0x00;
2848
2849 name_known = hci_inquiry_cache_update(hdev, &data,
2850 false, &ssp);
2851 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2852 info->dev_class, info->rssi,
2853 !name_known, ssp, NULL, 0);
2854 }
2855 } else {
2856 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2857
2858 for (; num_rsp; num_rsp--, info++) {
2859 bacpy(&data.bdaddr, &info->bdaddr);
2860 data.pscan_rep_mode = info->pscan_rep_mode;
2861 data.pscan_period_mode = info->pscan_period_mode;
2862 data.pscan_mode = 0x00;
2863 memcpy(data.dev_class, info->dev_class, 3);
2864 data.clock_offset = info->clock_offset;
2865 data.rssi = info->rssi;
2866 data.ssp_mode = 0x00;
2867 name_known = hci_inquiry_cache_update(hdev, &data,
2868 false, &ssp);
2869 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2870 info->dev_class, info->rssi,
2871 !name_known, ssp, NULL, 0);
2872 }
2873 }
2874
2875 hci_dev_unlock(hdev);
2876 }
2877
2878 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2879 struct sk_buff *skb)
2880 {
2881 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2882 struct hci_conn *conn;
2883
2884 BT_DBG("%s", hdev->name);
2885
2886 hci_dev_lock(hdev);
2887
2888 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2889 if (!conn)
2890 goto unlock;
2891
2892 if (!ev->status && ev->page == 0x01) {
2893 struct inquiry_entry *ie;
2894
2895 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2896 if (ie)
2897 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2898
2899 if (ev->features[0] & LMP_HOST_SSP)
2900 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2901 }
2902
2903 if (conn->state != BT_CONFIG)
2904 goto unlock;
2905
2906 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2907 struct hci_cp_remote_name_req cp;
2908 memset(&cp, 0, sizeof(cp));
2909 bacpy(&cp.bdaddr, &conn->dst);
2910 cp.pscan_rep_mode = 0x02;
2911 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2912 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2913 mgmt_device_connected(hdev, &conn->dst, conn->type,
2914 conn->dst_type, 0, NULL, 0,
2915 conn->dev_class);
2916
2917 if (!hci_outgoing_auth_needed(hdev, conn)) {
2918 conn->state = BT_CONNECTED;
2919 hci_proto_connect_cfm(conn, ev->status);
2920 hci_conn_put(conn);
2921 }
2922
2923 unlock:
2924 hci_dev_unlock(hdev);
2925 }
2926
2927 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2928 struct sk_buff *skb)
2929 {
2930 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2931 struct hci_conn *conn;
2932
2933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2934
2935 hci_dev_lock(hdev);
2936
2937 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2938 if (!conn) {
2939 if (ev->link_type == ESCO_LINK)
2940 goto unlock;
2941
2942 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2943 if (!conn)
2944 goto unlock;
2945
2946 conn->type = SCO_LINK;
2947 }
2948
2949 switch (ev->status) {
2950 case 0x00:
2951 conn->handle = __le16_to_cpu(ev->handle);
2952 conn->state = BT_CONNECTED;
2953
2954 hci_conn_hold_device(conn);
2955 hci_conn_add_sysfs(conn);
2956 break;
2957
2958 case 0x11: /* Unsupported Feature or Parameter Value */
2959 case 0x1c: /* SCO interval rejected */
2960 case 0x1a: /* Unsupported Remote Feature */
2961 case 0x1f: /* Unspecified error */
2962 if (conn->out && conn->attempt < 2) {
2963 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2964 (hdev->esco_type & EDR_ESCO_MASK);
2965 hci_setup_sync(conn, conn->link->handle);
2966 goto unlock;
2967 }
2968 /* fall through */
2969
2970 default:
2971 conn->state = BT_CLOSED;
2972 break;
2973 }
2974
2975 hci_proto_connect_cfm(conn, ev->status);
2976 if (ev->status)
2977 hci_conn_del(conn);
2978
2979 unlock:
2980 hci_dev_unlock(hdev);
2981 }
2982
2983 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2984 {
2985 BT_DBG("%s", hdev->name);
2986 }
2987
2988 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2989 {
2990 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2991
2992 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2993 }
2994
2995 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2996 struct sk_buff *skb)
2997 {
2998 struct inquiry_data data;
2999 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3000 int num_rsp = *((__u8 *) skb->data);
3001 size_t eir_len;
3002
3003 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3004
3005 if (!num_rsp)
3006 return;
3007
3008 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3009 return;
3010
3011 hci_dev_lock(hdev);
3012
3013 for (; num_rsp; num_rsp--, info++) {
3014 bool name_known, ssp;
3015
3016 bacpy(&data.bdaddr, &info->bdaddr);
3017 data.pscan_rep_mode = info->pscan_rep_mode;
3018 data.pscan_period_mode = info->pscan_period_mode;
3019 data.pscan_mode = 0x00;
3020 memcpy(data.dev_class, info->dev_class, 3);
3021 data.clock_offset = info->clock_offset;
3022 data.rssi = info->rssi;
3023 data.ssp_mode = 0x01;
3024
3025 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3026 name_known = eir_has_data_type(info->data,
3027 sizeof(info->data),
3028 EIR_NAME_COMPLETE);
3029 else
3030 name_known = true;
3031
3032 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3033 &ssp);
3034 eir_len = eir_get_length(info->data, sizeof(info->data));
3035 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3036 info->dev_class, info->rssi, !name_known,
3037 ssp, info->data, eir_len);
3038 }
3039
3040 hci_dev_unlock(hdev);
3041 }
3042
3043 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3044 struct sk_buff *skb)
3045 {
3046 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3047 struct hci_conn *conn;
3048
3049 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3050 __le16_to_cpu(ev->handle));
3051
3052 hci_dev_lock(hdev);
3053
3054 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3055 if (!conn)
3056 goto unlock;
3057
3058 if (!ev->status)
3059 conn->sec_level = conn->pending_sec_level;
3060
3061 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3062
3063 if (ev->status && conn->state == BT_CONNECTED) {
3064 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3065 hci_conn_put(conn);
3066 goto unlock;
3067 }
3068
3069 if (conn->state == BT_CONFIG) {
3070 if (!ev->status)
3071 conn->state = BT_CONNECTED;
3072
3073 hci_proto_connect_cfm(conn, ev->status);
3074 hci_conn_put(conn);
3075 } else {
3076 hci_auth_cfm(conn, ev->status);
3077
3078 hci_conn_hold(conn);
3079 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3080 hci_conn_put(conn);
3081 }
3082
3083 unlock:
3084 hci_dev_unlock(hdev);
3085 }
3086
3087 static u8 hci_get_auth_req(struct hci_conn *conn)
3088 {
3089 /* If remote requests dedicated bonding follow that lead */
3090 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3091 /* If both remote and local IO capabilities allow MITM
3092 * protection then require it, otherwise don't */
3093 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3094 return 0x02;
3095 else
3096 return 0x03;
3097 }
3098
3099 /* If remote requests no-bonding follow that lead */
3100 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3101 return conn->remote_auth | (conn->auth_type & 0x01);
3102
3103 return conn->auth_type;
3104 }
3105
3106 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3107 {
3108 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3109 struct hci_conn *conn;
3110
3111 BT_DBG("%s", hdev->name);
3112
3113 hci_dev_lock(hdev);
3114
3115 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3116 if (!conn)
3117 goto unlock;
3118
3119 hci_conn_hold(conn);
3120
3121 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3122 goto unlock;
3123
3124 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3125 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3126 struct hci_cp_io_capability_reply cp;
3127
3128 bacpy(&cp.bdaddr, &ev->bdaddr);
3129 /* Change the IO capability from KeyboardDisplay
3130 * to DisplayYesNo as it is not supported by BT spec. */
3131 cp.capability = (conn->io_capability == 0x04) ?
3132 0x01 : conn->io_capability;
3133 conn->auth_type = hci_get_auth_req(conn);
3134 cp.authentication = conn->auth_type;
3135
3136 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3137 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3138 cp.oob_data = 0x01;
3139 else
3140 cp.oob_data = 0x00;
3141
3142 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3143 sizeof(cp), &cp);
3144 } else {
3145 struct hci_cp_io_capability_neg_reply cp;
3146
3147 bacpy(&cp.bdaddr, &ev->bdaddr);
3148 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3149
3150 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3151 sizeof(cp), &cp);
3152 }
3153
3154 unlock:
3155 hci_dev_unlock(hdev);
3156 }
3157
3158 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3159 {
3160 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3161 struct hci_conn *conn;
3162
3163 BT_DBG("%s", hdev->name);
3164
3165 hci_dev_lock(hdev);
3166
3167 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3168 if (!conn)
3169 goto unlock;
3170
3171 conn->remote_cap = ev->capability;
3172 conn->remote_auth = ev->authentication;
3173 if (ev->oob_data)
3174 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3175
3176 unlock:
3177 hci_dev_unlock(hdev);
3178 }
3179
3180 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3181 struct sk_buff *skb)
3182 {
3183 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3184 int loc_mitm, rem_mitm, confirm_hint = 0;
3185 struct hci_conn *conn;
3186
3187 BT_DBG("%s", hdev->name);
3188
3189 hci_dev_lock(hdev);
3190
3191 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3192 goto unlock;
3193
3194 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3195 if (!conn)
3196 goto unlock;
3197
3198 loc_mitm = (conn->auth_type & 0x01);
3199 rem_mitm = (conn->remote_auth & 0x01);
3200
3201 /* If we require MITM but the remote device can't provide that
3202 * (it has NoInputNoOutput) then reject the confirmation
3203 * request. The only exception is when we're dedicated bonding
3204 * initiators (connect_cfm_cb set) since then we always have the MITM
3205 * bit set. */
3206 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3207 BT_DBG("Rejecting request: remote device can't provide MITM");
3208 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3209 sizeof(ev->bdaddr), &ev->bdaddr);
3210 goto unlock;
3211 }
3212
3213 /* If no side requires MITM protection; auto-accept */
3214 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3215 (!rem_mitm || conn->io_capability == 0x03)) {
3216
3217 /* If we're not the initiators request authorization to
3218 * proceed from user space (mgmt_user_confirm with
3219 * confirm_hint set to 1). */
3220 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3221 BT_DBG("Confirming auto-accept as acceptor");
3222 confirm_hint = 1;
3223 goto confirm;
3224 }
3225
3226 BT_DBG("Auto-accept of user confirmation with %ums delay",
3227 hdev->auto_accept_delay);
3228
3229 if (hdev->auto_accept_delay > 0) {
3230 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3231 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3232 goto unlock;
3233 }
3234
3235 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3236 sizeof(ev->bdaddr), &ev->bdaddr);
3237 goto unlock;
3238 }
3239
3240 confirm:
3241 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3242 confirm_hint);
3243
3244 unlock:
3245 hci_dev_unlock(hdev);
3246 }
3247
3248 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3249 struct sk_buff *skb)
3250 {
3251 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3252
3253 BT_DBG("%s", hdev->name);
3254
3255 hci_dev_lock(hdev);
3256
3257 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3258 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3259
3260 hci_dev_unlock(hdev);
3261 }
3262
3263 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3264 struct sk_buff *skb)
3265 {
3266 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3267 struct hci_conn *conn;
3268
3269 BT_DBG("%s", hdev->name);
3270
3271 hci_dev_lock(hdev);
3272
3273 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3274 if (!conn)
3275 goto unlock;
3276
3277 /* To avoid duplicate auth_failed events to user space we check
3278 * the HCI_CONN_AUTH_PEND flag which will be set if we
3279 * initiated the authentication. A traditional auth_complete
3280 * event gets always produced as initiator and is also mapped to
3281 * the mgmt_auth_failed event */
3282 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3283 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3284 ev->status);
3285
3286 hci_conn_put(conn);
3287
3288 unlock:
3289 hci_dev_unlock(hdev);
3290 }
3291
3292 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3293 struct sk_buff *skb)
3294 {
3295 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3296 struct inquiry_entry *ie;
3297
3298 BT_DBG("%s", hdev->name);
3299
3300 hci_dev_lock(hdev);
3301
3302 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3303 if (ie)
3304 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3305
3306 hci_dev_unlock(hdev);
3307 }
3308
3309 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3310 struct sk_buff *skb)
3311 {
3312 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3313 struct oob_data *data;
3314
3315 BT_DBG("%s", hdev->name);
3316
3317 hci_dev_lock(hdev);
3318
3319 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3320 goto unlock;
3321
3322 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3323 if (data) {
3324 struct hci_cp_remote_oob_data_reply cp;
3325
3326 bacpy(&cp.bdaddr, &ev->bdaddr);
3327 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3328 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3329
3330 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3331 &cp);
3332 } else {
3333 struct hci_cp_remote_oob_data_neg_reply cp;
3334
3335 bacpy(&cp.bdaddr, &ev->bdaddr);
3336 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3337 &cp);
3338 }
3339
3340 unlock:
3341 hci_dev_unlock(hdev);
3342 }
3343
3344 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3345 {
3346 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3347 struct hci_conn *conn;
3348
3349 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3350
3351 hci_dev_lock(hdev);
3352
3353 if (ev->status) {
3354 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3355 if (!conn)
3356 goto unlock;
3357
3358 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3359 conn->dst_type, ev->status);
3360 hci_proto_connect_cfm(conn, ev->status);
3361 conn->state = BT_CLOSED;
3362 hci_conn_del(conn);
3363 goto unlock;
3364 }
3365
3366 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3367 if (!conn) {
3368 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3369 if (!conn) {
3370 BT_ERR("No memory for new connection");
3371 hci_dev_unlock(hdev);
3372 return;
3373 }
3374
3375 conn->dst_type = ev->bdaddr_type;
3376 }
3377
3378 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3379 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3380 conn->dst_type, 0, NULL, 0, NULL);
3381
3382 conn->sec_level = BT_SECURITY_LOW;
3383 conn->handle = __le16_to_cpu(ev->handle);
3384 conn->state = BT_CONNECTED;
3385
3386 hci_conn_hold_device(conn);
3387 hci_conn_add_sysfs(conn);
3388
3389 hci_proto_connect_cfm(conn, ev->status);
3390
3391 unlock:
3392 hci_dev_unlock(hdev);
3393 }
3394
3395 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3396 {
3397 u8 num_reports = skb->data[0];
3398 void *ptr = &skb->data[1];
3399 s8 rssi;
3400
3401 hci_dev_lock(hdev);
3402
3403 while (num_reports--) {
3404 struct hci_ev_le_advertising_info *ev = ptr;
3405
3406 rssi = ev->data[ev->length];
3407 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3408 NULL, rssi, 0, 1, ev->data, ev->length);
3409
3410 ptr += sizeof(*ev) + ev->length + 1;
3411 }
3412
3413 hci_dev_unlock(hdev);
3414 }
3415
3416 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3417 {
3418 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3419 struct hci_cp_le_ltk_reply cp;
3420 struct hci_cp_le_ltk_neg_reply neg;
3421 struct hci_conn *conn;
3422 struct smp_ltk *ltk;
3423
3424 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3425
3426 hci_dev_lock(hdev);
3427
3428 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3429 if (conn == NULL)
3430 goto not_found;
3431
3432 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3433 if (ltk == NULL)
3434 goto not_found;
3435
3436 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3437 cp.handle = cpu_to_le16(conn->handle);
3438
3439 if (ltk->authenticated)
3440 conn->sec_level = BT_SECURITY_HIGH;
3441
3442 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3443
3444 if (ltk->type & HCI_SMP_STK) {
3445 list_del(&ltk->list);
3446 kfree(ltk);
3447 }
3448
3449 hci_dev_unlock(hdev);
3450
3451 return;
3452
3453 not_found:
3454 neg.handle = ev->handle;
3455 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3456 hci_dev_unlock(hdev);
3457 }
3458
3459 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3460 {
3461 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3462
3463 skb_pull(skb, sizeof(*le_ev));
3464
3465 switch (le_ev->subevent) {
3466 case HCI_EV_LE_CONN_COMPLETE:
3467 hci_le_conn_complete_evt(hdev, skb);
3468 break;
3469
3470 case HCI_EV_LE_ADVERTISING_REPORT:
3471 hci_le_adv_report_evt(hdev, skb);
3472 break;
3473
3474 case HCI_EV_LE_LTK_REQ:
3475 hci_le_ltk_request_evt(hdev, skb);
3476 break;
3477
3478 default:
3479 break;
3480 }
3481 }
3482
3483 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3484 {
3485 struct hci_event_hdr *hdr = (void *) skb->data;
3486 __u8 event = hdr->evt;
3487
3488 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3489
3490 switch (event) {
3491 case HCI_EV_INQUIRY_COMPLETE:
3492 hci_inquiry_complete_evt(hdev, skb);
3493 break;
3494
3495 case HCI_EV_INQUIRY_RESULT:
3496 hci_inquiry_result_evt(hdev, skb);
3497 break;
3498
3499 case HCI_EV_CONN_COMPLETE:
3500 hci_conn_complete_evt(hdev, skb);
3501 break;
3502
3503 case HCI_EV_CONN_REQUEST:
3504 hci_conn_request_evt(hdev, skb);
3505 break;
3506
3507 case HCI_EV_DISCONN_COMPLETE:
3508 hci_disconn_complete_evt(hdev, skb);
3509 break;
3510
3511 case HCI_EV_AUTH_COMPLETE:
3512 hci_auth_complete_evt(hdev, skb);
3513 break;
3514
3515 case HCI_EV_REMOTE_NAME:
3516 hci_remote_name_evt(hdev, skb);
3517 break;
3518
3519 case HCI_EV_ENCRYPT_CHANGE:
3520 hci_encrypt_change_evt(hdev, skb);
3521 break;
3522
3523 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3524 hci_change_link_key_complete_evt(hdev, skb);
3525 break;
3526
3527 case HCI_EV_REMOTE_FEATURES:
3528 hci_remote_features_evt(hdev, skb);
3529 break;
3530
3531 case HCI_EV_REMOTE_VERSION:
3532 hci_remote_version_evt(hdev, skb);
3533 break;
3534
3535 case HCI_EV_QOS_SETUP_COMPLETE:
3536 hci_qos_setup_complete_evt(hdev, skb);
3537 break;
3538
3539 case HCI_EV_CMD_COMPLETE:
3540 hci_cmd_complete_evt(hdev, skb);
3541 break;
3542
3543 case HCI_EV_CMD_STATUS:
3544 hci_cmd_status_evt(hdev, skb);
3545 break;
3546
3547 case HCI_EV_ROLE_CHANGE:
3548 hci_role_change_evt(hdev, skb);
3549 break;
3550
3551 case HCI_EV_NUM_COMP_PKTS:
3552 hci_num_comp_pkts_evt(hdev, skb);
3553 break;
3554
3555 case HCI_EV_MODE_CHANGE:
3556 hci_mode_change_evt(hdev, skb);
3557 break;
3558
3559 case HCI_EV_PIN_CODE_REQ:
3560 hci_pin_code_request_evt(hdev, skb);
3561 break;
3562
3563 case HCI_EV_LINK_KEY_REQ:
3564 hci_link_key_request_evt(hdev, skb);
3565 break;
3566
3567 case HCI_EV_LINK_KEY_NOTIFY:
3568 hci_link_key_notify_evt(hdev, skb);
3569 break;
3570
3571 case HCI_EV_CLOCK_OFFSET:
3572 hci_clock_offset_evt(hdev, skb);
3573 break;
3574
3575 case HCI_EV_PKT_TYPE_CHANGE:
3576 hci_pkt_type_change_evt(hdev, skb);
3577 break;
3578
3579 case HCI_EV_PSCAN_REP_MODE:
3580 hci_pscan_rep_mode_evt(hdev, skb);
3581 break;
3582
3583 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3584 hci_inquiry_result_with_rssi_evt(hdev, skb);
3585 break;
3586
3587 case HCI_EV_REMOTE_EXT_FEATURES:
3588 hci_remote_ext_features_evt(hdev, skb);
3589 break;
3590
3591 case HCI_EV_SYNC_CONN_COMPLETE:
3592 hci_sync_conn_complete_evt(hdev, skb);
3593 break;
3594
3595 case HCI_EV_SYNC_CONN_CHANGED:
3596 hci_sync_conn_changed_evt(hdev, skb);
3597 break;
3598
3599 case HCI_EV_SNIFF_SUBRATE:
3600 hci_sniff_subrate_evt(hdev, skb);
3601 break;
3602
3603 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3604 hci_extended_inquiry_result_evt(hdev, skb);
3605 break;
3606
3607 case HCI_EV_KEY_REFRESH_COMPLETE:
3608 hci_key_refresh_complete_evt(hdev, skb);
3609 break;
3610
3611 case HCI_EV_IO_CAPA_REQUEST:
3612 hci_io_capa_request_evt(hdev, skb);
3613 break;
3614
3615 case HCI_EV_IO_CAPA_REPLY:
3616 hci_io_capa_reply_evt(hdev, skb);
3617 break;
3618
3619 case HCI_EV_USER_CONFIRM_REQUEST:
3620 hci_user_confirm_request_evt(hdev, skb);
3621 break;
3622
3623 case HCI_EV_USER_PASSKEY_REQUEST:
3624 hci_user_passkey_request_evt(hdev, skb);
3625 break;
3626
3627 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3628 hci_simple_pair_complete_evt(hdev, skb);
3629 break;
3630
3631 case HCI_EV_REMOTE_HOST_FEATURES:
3632 hci_remote_host_features_evt(hdev, skb);
3633 break;
3634
3635 case HCI_EV_LE_META:
3636 hci_le_meta_evt(hdev, skb);
3637 break;
3638
3639 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3640 hci_remote_oob_data_request_evt(hdev, skb);
3641 break;
3642
3643 case HCI_EV_NUM_COMP_BLOCKS:
3644 hci_num_comp_blocks_evt(hdev, skb);
3645 break;
3646
3647 default:
3648 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3649 break;
3650 }
3651
3652 kfree_skb(skb);
3653 hdev->stat.evt_rx++;
3654 }
This page took 0.110959 seconds and 5 git commands to generate.