Bluetooth: Move SREJ list to struct l2cap_chan
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 /* Handle HCI Event packets */
49
50 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
51 {
52 __u8 status = *((__u8 *) skb->data);
53
54 BT_DBG("%s status 0x%x", hdev->name, status);
55
56 if (status)
57 return;
58
59 clear_bit(HCI_INQUIRY, &hdev->flags);
60
61 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62
63 hci_conn_check_pending(hdev);
64 }
65
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_INQUIRY, &hdev->flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
81 {
82 BT_DBG("%s", hdev->name);
83 }
84
85 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 struct hci_rp_role_discovery *rp = (void *) skb->data;
88 struct hci_conn *conn;
89
90 BT_DBG("%s status 0x%x", hdev->name, rp->status);
91
92 if (rp->status)
93 return;
94
95 hci_dev_lock(hdev);
96
97 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
98 if (conn) {
99 if (rp->role)
100 conn->link_mode &= ~HCI_LM_MASTER;
101 else
102 conn->link_mode |= HCI_LM_MASTER;
103 }
104
105 hci_dev_unlock(hdev);
106 }
107
108 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
109 {
110 struct hci_rp_read_link_policy *rp = (void *) skb->data;
111 struct hci_conn *conn;
112
113 BT_DBG("%s status 0x%x", hdev->name, rp->status);
114
115 if (rp->status)
116 return;
117
118 hci_dev_lock(hdev);
119
120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
121 if (conn)
122 conn->link_policy = __le16_to_cpu(rp->policy);
123
124 hci_dev_unlock(hdev);
125 }
126
127 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128 {
129 struct hci_rp_write_link_policy *rp = (void *) skb->data;
130 struct hci_conn *conn;
131 void *sent;
132
133 BT_DBG("%s status 0x%x", hdev->name, rp->status);
134
135 if (rp->status)
136 return;
137
138 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
139 if (!sent)
140 return;
141
142 hci_dev_lock(hdev);
143
144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
145 if (conn)
146 conn->link_policy = get_unaligned_le16(sent + 2);
147
148 hci_dev_unlock(hdev);
149 }
150
151 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
152 {
153 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
154
155 BT_DBG("%s status 0x%x", hdev->name, rp->status);
156
157 if (rp->status)
158 return;
159
160 hdev->link_policy = __le16_to_cpu(rp->policy);
161 }
162
163 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
164 {
165 __u8 status = *((__u8 *) skb->data);
166 void *sent;
167
168 BT_DBG("%s status 0x%x", hdev->name, status);
169
170 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
171 if (!sent)
172 return;
173
174 if (!status)
175 hdev->link_policy = get_unaligned_le16(sent);
176
177 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
178 }
179
180 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
181 {
182 __u8 status = *((__u8 *) skb->data);
183
184 BT_DBG("%s status 0x%x", hdev->name, status);
185
186 clear_bit(HCI_RESET, &hdev->flags);
187
188 hci_req_complete(hdev, HCI_OP_RESET, status);
189 }
190
191 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 __u8 status = *((__u8 *) skb->data);
194 void *sent;
195
196 BT_DBG("%s status 0x%x", hdev->name, status);
197
198 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
199 if (!sent)
200 return;
201
202 if (test_bit(HCI_MGMT, &hdev->flags))
203 mgmt_set_local_name_complete(hdev->id, sent, status);
204
205 if (status)
206 return;
207
208 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
209 }
210
211 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
212 {
213 struct hci_rp_read_local_name *rp = (void *) skb->data;
214
215 BT_DBG("%s status 0x%x", hdev->name, rp->status);
216
217 if (rp->status)
218 return;
219
220 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
221 }
222
223 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 __u8 status = *((__u8 *) skb->data);
226 void *sent;
227
228 BT_DBG("%s status 0x%x", hdev->name, status);
229
230 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
231 if (!sent)
232 return;
233
234 if (!status) {
235 __u8 param = *((__u8 *) sent);
236
237 if (param == AUTH_ENABLED)
238 set_bit(HCI_AUTH, &hdev->flags);
239 else
240 clear_bit(HCI_AUTH, &hdev->flags);
241 }
242
243 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
244 }
245
246 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
247 {
248 __u8 status = *((__u8 *) skb->data);
249 void *sent;
250
251 BT_DBG("%s status 0x%x", hdev->name, status);
252
253 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
254 if (!sent)
255 return;
256
257 if (!status) {
258 __u8 param = *((__u8 *) sent);
259
260 if (param)
261 set_bit(HCI_ENCRYPT, &hdev->flags);
262 else
263 clear_bit(HCI_ENCRYPT, &hdev->flags);
264 }
265
266 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
267 }
268
269 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
270 {
271 __u8 status = *((__u8 *) skb->data);
272 void *sent;
273
274 BT_DBG("%s status 0x%x", hdev->name, status);
275
276 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
277 if (!sent)
278 return;
279
280 if (!status) {
281 __u8 param = *((__u8 *) sent);
282 int old_pscan, old_iscan;
283
284 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
285 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
286
287 if (param & SCAN_INQUIRY) {
288 set_bit(HCI_ISCAN, &hdev->flags);
289 if (!old_iscan)
290 mgmt_discoverable(hdev->id, 1);
291 } else if (old_iscan)
292 mgmt_discoverable(hdev->id, 0);
293
294 if (param & SCAN_PAGE) {
295 set_bit(HCI_PSCAN, &hdev->flags);
296 if (!old_pscan)
297 mgmt_connectable(hdev->id, 1);
298 } else if (old_pscan)
299 mgmt_connectable(hdev->id, 0);
300 }
301
302 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
303 }
304
305 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
306 {
307 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
308
309 BT_DBG("%s status 0x%x", hdev->name, rp->status);
310
311 if (rp->status)
312 return;
313
314 memcpy(hdev->dev_class, rp->dev_class, 3);
315
316 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
317 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
318 }
319
320 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
321 {
322 __u8 status = *((__u8 *) skb->data);
323 void *sent;
324
325 BT_DBG("%s status 0x%x", hdev->name, status);
326
327 if (status)
328 return;
329
330 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
331 if (!sent)
332 return;
333
334 memcpy(hdev->dev_class, sent, 3);
335 }
336
337 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
338 {
339 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
340 __u16 setting;
341
342 BT_DBG("%s status 0x%x", hdev->name, rp->status);
343
344 if (rp->status)
345 return;
346
347 setting = __le16_to_cpu(rp->voice_setting);
348
349 if (hdev->voice_setting == setting)
350 return;
351
352 hdev->voice_setting = setting;
353
354 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
355
356 if (hdev->notify) {
357 tasklet_disable(&hdev->tx_task);
358 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
359 tasklet_enable(&hdev->tx_task);
360 }
361 }
362
363 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
364 {
365 __u8 status = *((__u8 *) skb->data);
366 __u16 setting;
367 void *sent;
368
369 BT_DBG("%s status 0x%x", hdev->name, status);
370
371 if (status)
372 return;
373
374 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
375 if (!sent)
376 return;
377
378 setting = get_unaligned_le16(sent);
379
380 if (hdev->voice_setting == setting)
381 return;
382
383 hdev->voice_setting = setting;
384
385 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
386
387 if (hdev->notify) {
388 tasklet_disable(&hdev->tx_task);
389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 tasklet_enable(&hdev->tx_task);
391 }
392 }
393
394 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
395 {
396 __u8 status = *((__u8 *) skb->data);
397
398 BT_DBG("%s status 0x%x", hdev->name, status);
399
400 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
401 }
402
403 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
404 {
405 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
406
407 BT_DBG("%s status 0x%x", hdev->name, rp->status);
408
409 if (rp->status)
410 return;
411
412 hdev->ssp_mode = rp->mode;
413 }
414
415 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
416 {
417 __u8 status = *((__u8 *) skb->data);
418 void *sent;
419
420 BT_DBG("%s status 0x%x", hdev->name, status);
421
422 if (status)
423 return;
424
425 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
426 if (!sent)
427 return;
428
429 hdev->ssp_mode = *((__u8 *) sent);
430 }
431
432 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
433 {
434 if (hdev->features[6] & LMP_EXT_INQ)
435 return 2;
436
437 if (hdev->features[3] & LMP_RSSI_INQ)
438 return 1;
439
440 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
441 hdev->lmp_subver == 0x0757)
442 return 1;
443
444 if (hdev->manufacturer == 15) {
445 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
446 return 1;
447 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
448 return 1;
449 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
450 return 1;
451 }
452
453 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
454 hdev->lmp_subver == 0x1805)
455 return 1;
456
457 return 0;
458 }
459
460 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
461 {
462 u8 mode;
463
464 mode = hci_get_inquiry_mode(hdev);
465
466 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
467 }
468
469 static void hci_setup_event_mask(struct hci_dev *hdev)
470 {
471 /* The second byte is 0xff instead of 0x9f (two reserved bits
472 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
473 * command otherwise */
474 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
475
476 /* Events for 1.2 and newer controllers */
477 if (hdev->lmp_ver > 1) {
478 events[4] |= 0x01; /* Flow Specification Complete */
479 events[4] |= 0x02; /* Inquiry Result with RSSI */
480 events[4] |= 0x04; /* Read Remote Extended Features Complete */
481 events[5] |= 0x08; /* Synchronous Connection Complete */
482 events[5] |= 0x10; /* Synchronous Connection Changed */
483 }
484
485 if (hdev->features[3] & LMP_RSSI_INQ)
486 events[4] |= 0x04; /* Inquiry Result with RSSI */
487
488 if (hdev->features[5] & LMP_SNIFF_SUBR)
489 events[5] |= 0x20; /* Sniff Subrating */
490
491 if (hdev->features[5] & LMP_PAUSE_ENC)
492 events[5] |= 0x80; /* Encryption Key Refresh Complete */
493
494 if (hdev->features[6] & LMP_EXT_INQ)
495 events[5] |= 0x40; /* Extended Inquiry Result */
496
497 if (hdev->features[6] & LMP_NO_FLUSH)
498 events[7] |= 0x01; /* Enhanced Flush Complete */
499
500 if (hdev->features[7] & LMP_LSTO)
501 events[6] |= 0x80; /* Link Supervision Timeout Changed */
502
503 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
504 events[6] |= 0x01; /* IO Capability Request */
505 events[6] |= 0x02; /* IO Capability Response */
506 events[6] |= 0x04; /* User Confirmation Request */
507 events[6] |= 0x08; /* User Passkey Request */
508 events[6] |= 0x10; /* Remote OOB Data Request */
509 events[6] |= 0x20; /* Simple Pairing Complete */
510 events[7] |= 0x04; /* User Passkey Notification */
511 events[7] |= 0x08; /* Keypress Notification */
512 events[7] |= 0x10; /* Remote Host Supported
513 * Features Notification */
514 }
515
516 if (hdev->features[4] & LMP_LE)
517 events[7] |= 0x20; /* LE Meta-Event */
518
519 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
520 }
521
522 static void hci_setup(struct hci_dev *hdev)
523 {
524 hci_setup_event_mask(hdev);
525
526 if (hdev->lmp_ver > 1)
527 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
528
529 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
530 u8 mode = 0x01;
531 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
532 }
533
534 if (hdev->features[3] & LMP_RSSI_INQ)
535 hci_setup_inquiry_mode(hdev);
536
537 if (hdev->features[7] & LMP_INQ_TX_PWR)
538 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
539 }
540
541 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
542 {
543 struct hci_rp_read_local_version *rp = (void *) skb->data;
544
545 BT_DBG("%s status 0x%x", hdev->name, rp->status);
546
547 if (rp->status)
548 return;
549
550 hdev->hci_ver = rp->hci_ver;
551 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
552 hdev->lmp_ver = rp->lmp_ver;
553 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
554 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
555
556 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
557 hdev->manufacturer,
558 hdev->hci_ver, hdev->hci_rev);
559
560 if (test_bit(HCI_INIT, &hdev->flags))
561 hci_setup(hdev);
562 }
563
564 static void hci_setup_link_policy(struct hci_dev *hdev)
565 {
566 u16 link_policy = 0;
567
568 if (hdev->features[0] & LMP_RSWITCH)
569 link_policy |= HCI_LP_RSWITCH;
570 if (hdev->features[0] & LMP_HOLD)
571 link_policy |= HCI_LP_HOLD;
572 if (hdev->features[0] & LMP_SNIFF)
573 link_policy |= HCI_LP_SNIFF;
574 if (hdev->features[1] & LMP_PARK)
575 link_policy |= HCI_LP_PARK;
576
577 link_policy = cpu_to_le16(link_policy);
578 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
579 sizeof(link_policy), &link_policy);
580 }
581
582 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
583 {
584 struct hci_rp_read_local_commands *rp = (void *) skb->data;
585
586 BT_DBG("%s status 0x%x", hdev->name, rp->status);
587
588 if (rp->status)
589 goto done;
590
591 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
592
593 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
594 hci_setup_link_policy(hdev);
595
596 done:
597 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
598 }
599
600 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
601 {
602 struct hci_rp_read_local_features *rp = (void *) skb->data;
603
604 BT_DBG("%s status 0x%x", hdev->name, rp->status);
605
606 if (rp->status)
607 return;
608
609 memcpy(hdev->features, rp->features, 8);
610
611 /* Adjust default settings according to features
612 * supported by device. */
613
614 if (hdev->features[0] & LMP_3SLOT)
615 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
616
617 if (hdev->features[0] & LMP_5SLOT)
618 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
619
620 if (hdev->features[1] & LMP_HV2) {
621 hdev->pkt_type |= (HCI_HV2);
622 hdev->esco_type |= (ESCO_HV2);
623 }
624
625 if (hdev->features[1] & LMP_HV3) {
626 hdev->pkt_type |= (HCI_HV3);
627 hdev->esco_type |= (ESCO_HV3);
628 }
629
630 if (hdev->features[3] & LMP_ESCO)
631 hdev->esco_type |= (ESCO_EV3);
632
633 if (hdev->features[4] & LMP_EV4)
634 hdev->esco_type |= (ESCO_EV4);
635
636 if (hdev->features[4] & LMP_EV5)
637 hdev->esco_type |= (ESCO_EV5);
638
639 if (hdev->features[5] & LMP_EDR_ESCO_2M)
640 hdev->esco_type |= (ESCO_2EV3);
641
642 if (hdev->features[5] & LMP_EDR_ESCO_3M)
643 hdev->esco_type |= (ESCO_3EV3);
644
645 if (hdev->features[5] & LMP_EDR_3S_ESCO)
646 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
647
648 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
649 hdev->features[0], hdev->features[1],
650 hdev->features[2], hdev->features[3],
651 hdev->features[4], hdev->features[5],
652 hdev->features[6], hdev->features[7]);
653 }
654
655 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
656 {
657 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
658
659 BT_DBG("%s status 0x%x", hdev->name, rp->status);
660
661 if (rp->status)
662 return;
663
664 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
665 hdev->sco_mtu = rp->sco_mtu;
666 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
667 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
668
669 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
670 hdev->sco_mtu = 64;
671 hdev->sco_pkts = 8;
672 }
673
674 hdev->acl_cnt = hdev->acl_pkts;
675 hdev->sco_cnt = hdev->sco_pkts;
676
677 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
678 hdev->acl_mtu, hdev->acl_pkts,
679 hdev->sco_mtu, hdev->sco_pkts);
680 }
681
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
683 {
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
685
686 BT_DBG("%s status 0x%x", hdev->name, rp->status);
687
688 if (!rp->status)
689 bacpy(&hdev->bdaddr, &rp->bdaddr);
690
691 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
692 }
693
694 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
695 {
696 __u8 status = *((__u8 *) skb->data);
697
698 BT_DBG("%s status 0x%x", hdev->name, status);
699
700 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
701 }
702
703 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
704 struct sk_buff *skb)
705 {
706 __u8 status = *((__u8 *) skb->data);
707
708 BT_DBG("%s status 0x%x", hdev->name, status);
709
710 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
711 }
712
713 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
714 {
715 __u8 status = *((__u8 *) skb->data);
716
717 BT_DBG("%s status 0x%x", hdev->name, status);
718
719 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
720 }
721
722 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
723 struct sk_buff *skb)
724 {
725 __u8 status = *((__u8 *) skb->data);
726
727 BT_DBG("%s status 0x%x", hdev->name, status);
728
729 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
730 }
731
732 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
733 struct sk_buff *skb)
734 {
735 __u8 status = *((__u8 *) skb->data);
736
737 BT_DBG("%s status 0x%x", hdev->name, status);
738
739 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
740 }
741
742 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
743 {
744 __u8 status = *((__u8 *) skb->data);
745
746 BT_DBG("%s status 0x%x", hdev->name, status);
747
748 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
749 }
750
751 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
752 {
753 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
754 struct hci_cp_pin_code_reply *cp;
755 struct hci_conn *conn;
756
757 BT_DBG("%s status 0x%x", hdev->name, rp->status);
758
759 if (test_bit(HCI_MGMT, &hdev->flags))
760 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
761
762 if (rp->status != 0)
763 return;
764
765 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
766 if (!cp)
767 return;
768
769 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
770 if (conn)
771 conn->pin_length = cp->pin_len;
772 }
773
774 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
775 {
776 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
777
778 BT_DBG("%s status 0x%x", hdev->name, rp->status);
779
780 if (test_bit(HCI_MGMT, &hdev->flags))
781 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
782 rp->status);
783 }
784 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
785 struct sk_buff *skb)
786 {
787 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
788
789 BT_DBG("%s status 0x%x", hdev->name, rp->status);
790
791 if (rp->status)
792 return;
793
794 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
795 hdev->le_pkts = rp->le_max_pkt;
796
797 hdev->le_cnt = hdev->le_pkts;
798
799 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
800
801 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
802 }
803
804 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
805 {
806 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
807
808 BT_DBG("%s status 0x%x", hdev->name, rp->status);
809
810 if (test_bit(HCI_MGMT, &hdev->flags))
811 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
812 rp->status);
813 }
814
815 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
816 struct sk_buff *skb)
817 {
818 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
819
820 BT_DBG("%s status 0x%x", hdev->name, rp->status);
821
822 if (test_bit(HCI_MGMT, &hdev->flags))
823 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
824 rp->status);
825 }
826
827 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
828 struct sk_buff *skb)
829 {
830 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
831
832 BT_DBG("%s status 0x%x", hdev->name, rp->status);
833
834 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
835 rp->randomizer, rp->status);
836 }
837
838 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
839 {
840 BT_DBG("%s status 0x%x", hdev->name, status);
841
842 if (status) {
843 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
844
845 hci_conn_check_pending(hdev);
846 } else
847 set_bit(HCI_INQUIRY, &hdev->flags);
848 }
849
850 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
851 {
852 struct hci_cp_create_conn *cp;
853 struct hci_conn *conn;
854
855 BT_DBG("%s status 0x%x", hdev->name, status);
856
857 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
858 if (!cp)
859 return;
860
861 hci_dev_lock(hdev);
862
863 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
864
865 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
866
867 if (status) {
868 if (conn && conn->state == BT_CONNECT) {
869 if (status != 0x0c || conn->attempt > 2) {
870 conn->state = BT_CLOSED;
871 hci_proto_connect_cfm(conn, status);
872 hci_conn_del(conn);
873 } else
874 conn->state = BT_CONNECT2;
875 }
876 } else {
877 if (!conn) {
878 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
879 if (conn) {
880 conn->out = 1;
881 conn->link_mode |= HCI_LM_MASTER;
882 } else
883 BT_ERR("No memory for new connection");
884 }
885 }
886
887 hci_dev_unlock(hdev);
888 }
889
890 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
891 {
892 struct hci_cp_add_sco *cp;
893 struct hci_conn *acl, *sco;
894 __u16 handle;
895
896 BT_DBG("%s status 0x%x", hdev->name, status);
897
898 if (!status)
899 return;
900
901 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
902 if (!cp)
903 return;
904
905 handle = __le16_to_cpu(cp->handle);
906
907 BT_DBG("%s handle %d", hdev->name, handle);
908
909 hci_dev_lock(hdev);
910
911 acl = hci_conn_hash_lookup_handle(hdev, handle);
912 if (acl) {
913 sco = acl->link;
914 if (sco) {
915 sco->state = BT_CLOSED;
916
917 hci_proto_connect_cfm(sco, status);
918 hci_conn_del(sco);
919 }
920 }
921
922 hci_dev_unlock(hdev);
923 }
924
925 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
926 {
927 struct hci_cp_auth_requested *cp;
928 struct hci_conn *conn;
929
930 BT_DBG("%s status 0x%x", hdev->name, status);
931
932 if (!status)
933 return;
934
935 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
936 if (!cp)
937 return;
938
939 hci_dev_lock(hdev);
940
941 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
942 if (conn) {
943 if (conn->state == BT_CONFIG) {
944 hci_proto_connect_cfm(conn, status);
945 hci_conn_put(conn);
946 }
947 }
948
949 hci_dev_unlock(hdev);
950 }
951
952 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
953 {
954 struct hci_cp_set_conn_encrypt *cp;
955 struct hci_conn *conn;
956
957 BT_DBG("%s status 0x%x", hdev->name, status);
958
959 if (!status)
960 return;
961
962 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
963 if (!cp)
964 return;
965
966 hci_dev_lock(hdev);
967
968 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
969 if (conn) {
970 if (conn->state == BT_CONFIG) {
971 hci_proto_connect_cfm(conn, status);
972 hci_conn_put(conn);
973 }
974 }
975
976 hci_dev_unlock(hdev);
977 }
978
979 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
980 struct hci_conn *conn)
981 {
982 if (conn->state != BT_CONFIG || !conn->out)
983 return 0;
984
985 if (conn->pending_sec_level == BT_SECURITY_SDP)
986 return 0;
987
988 /* Only request authentication for SSP connections or non-SSP
989 * devices with sec_level HIGH */
990 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
991 conn->pending_sec_level != BT_SECURITY_HIGH)
992 return 0;
993
994 return 1;
995 }
996
997 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
998 {
999 struct hci_cp_remote_name_req *cp;
1000 struct hci_conn *conn;
1001
1002 BT_DBG("%s status 0x%x", hdev->name, status);
1003
1004 /* If successful wait for the name req complete event before
1005 * checking for the need to do authentication */
1006 if (!status)
1007 return;
1008
1009 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1010 if (!cp)
1011 return;
1012
1013 hci_dev_lock(hdev);
1014
1015 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1016 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
1017 struct hci_cp_auth_requested cp;
1018 cp.handle = __cpu_to_le16(conn->handle);
1019 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1020 }
1021
1022 hci_dev_unlock(hdev);
1023 }
1024
1025 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1026 {
1027 struct hci_cp_read_remote_features *cp;
1028 struct hci_conn *conn;
1029
1030 BT_DBG("%s status 0x%x", hdev->name, status);
1031
1032 if (!status)
1033 return;
1034
1035 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1036 if (!cp)
1037 return;
1038
1039 hci_dev_lock(hdev);
1040
1041 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1042 if (conn) {
1043 if (conn->state == BT_CONFIG) {
1044 hci_proto_connect_cfm(conn, status);
1045 hci_conn_put(conn);
1046 }
1047 }
1048
1049 hci_dev_unlock(hdev);
1050 }
1051
1052 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1053 {
1054 struct hci_cp_read_remote_ext_features *cp;
1055 struct hci_conn *conn;
1056
1057 BT_DBG("%s status 0x%x", hdev->name, status);
1058
1059 if (!status)
1060 return;
1061
1062 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1063 if (!cp)
1064 return;
1065
1066 hci_dev_lock(hdev);
1067
1068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1069 if (conn) {
1070 if (conn->state == BT_CONFIG) {
1071 hci_proto_connect_cfm(conn, status);
1072 hci_conn_put(conn);
1073 }
1074 }
1075
1076 hci_dev_unlock(hdev);
1077 }
1078
1079 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1080 {
1081 struct hci_cp_setup_sync_conn *cp;
1082 struct hci_conn *acl, *sco;
1083 __u16 handle;
1084
1085 BT_DBG("%s status 0x%x", hdev->name, status);
1086
1087 if (!status)
1088 return;
1089
1090 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1091 if (!cp)
1092 return;
1093
1094 handle = __le16_to_cpu(cp->handle);
1095
1096 BT_DBG("%s handle %d", hdev->name, handle);
1097
1098 hci_dev_lock(hdev);
1099
1100 acl = hci_conn_hash_lookup_handle(hdev, handle);
1101 if (acl) {
1102 sco = acl->link;
1103 if (sco) {
1104 sco->state = BT_CLOSED;
1105
1106 hci_proto_connect_cfm(sco, status);
1107 hci_conn_del(sco);
1108 }
1109 }
1110
1111 hci_dev_unlock(hdev);
1112 }
1113
1114 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1115 {
1116 struct hci_cp_sniff_mode *cp;
1117 struct hci_conn *conn;
1118
1119 BT_DBG("%s status 0x%x", hdev->name, status);
1120
1121 if (!status)
1122 return;
1123
1124 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1125 if (!cp)
1126 return;
1127
1128 hci_dev_lock(hdev);
1129
1130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1131 if (conn) {
1132 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1133
1134 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1135 hci_sco_setup(conn, status);
1136 }
1137
1138 hci_dev_unlock(hdev);
1139 }
1140
1141 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1142 {
1143 struct hci_cp_exit_sniff_mode *cp;
1144 struct hci_conn *conn;
1145
1146 BT_DBG("%s status 0x%x", hdev->name, status);
1147
1148 if (!status)
1149 return;
1150
1151 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1152 if (!cp)
1153 return;
1154
1155 hci_dev_lock(hdev);
1156
1157 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1158 if (conn) {
1159 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1160
1161 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1162 hci_sco_setup(conn, status);
1163 }
1164
1165 hci_dev_unlock(hdev);
1166 }
1167
1168 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1169 {
1170 struct hci_cp_le_create_conn *cp;
1171 struct hci_conn *conn;
1172
1173 BT_DBG("%s status 0x%x", hdev->name, status);
1174
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1176 if (!cp)
1177 return;
1178
1179 hci_dev_lock(hdev);
1180
1181 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1182
1183 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1184 conn);
1185
1186 if (status) {
1187 if (conn && conn->state == BT_CONNECT) {
1188 conn->state = BT_CLOSED;
1189 hci_proto_connect_cfm(conn, status);
1190 hci_conn_del(conn);
1191 }
1192 } else {
1193 if (!conn) {
1194 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1195 if (conn)
1196 conn->out = 1;
1197 else
1198 BT_ERR("No memory for new connection");
1199 }
1200 }
1201
1202 hci_dev_unlock(hdev);
1203 }
1204
1205 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1206 {
1207 __u8 status = *((__u8 *) skb->data);
1208
1209 BT_DBG("%s status %d", hdev->name, status);
1210
1211 clear_bit(HCI_INQUIRY, &hdev->flags);
1212
1213 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1214
1215 hci_conn_check_pending(hdev);
1216 }
1217
1218 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1219 {
1220 struct inquiry_data data;
1221 struct inquiry_info *info = (void *) (skb->data + 1);
1222 int num_rsp = *((__u8 *) skb->data);
1223
1224 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1225
1226 if (!num_rsp)
1227 return;
1228
1229 hci_dev_lock(hdev);
1230
1231 for (; num_rsp; num_rsp--, info++) {
1232 bacpy(&data.bdaddr, &info->bdaddr);
1233 data.pscan_rep_mode = info->pscan_rep_mode;
1234 data.pscan_period_mode = info->pscan_period_mode;
1235 data.pscan_mode = info->pscan_mode;
1236 memcpy(data.dev_class, info->dev_class, 3);
1237 data.clock_offset = info->clock_offset;
1238 data.rssi = 0x00;
1239 data.ssp_mode = 0x00;
1240 hci_inquiry_cache_update(hdev, &data);
1241 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1242 NULL);
1243 }
1244
1245 hci_dev_unlock(hdev);
1246 }
1247
1248 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1249 {
1250 struct hci_ev_conn_complete *ev = (void *) skb->data;
1251 struct hci_conn *conn;
1252
1253 BT_DBG("%s", hdev->name);
1254
1255 hci_dev_lock(hdev);
1256
1257 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1258 if (!conn) {
1259 if (ev->link_type != SCO_LINK)
1260 goto unlock;
1261
1262 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1263 if (!conn)
1264 goto unlock;
1265
1266 conn->type = SCO_LINK;
1267 }
1268
1269 if (!ev->status) {
1270 conn->handle = __le16_to_cpu(ev->handle);
1271
1272 if (conn->type == ACL_LINK) {
1273 conn->state = BT_CONFIG;
1274 hci_conn_hold(conn);
1275 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1276 mgmt_connected(hdev->id, &ev->bdaddr);
1277 } else
1278 conn->state = BT_CONNECTED;
1279
1280 hci_conn_hold_device(conn);
1281 hci_conn_add_sysfs(conn);
1282
1283 if (test_bit(HCI_AUTH, &hdev->flags))
1284 conn->link_mode |= HCI_LM_AUTH;
1285
1286 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1287 conn->link_mode |= HCI_LM_ENCRYPT;
1288
1289 /* Get remote features */
1290 if (conn->type == ACL_LINK) {
1291 struct hci_cp_read_remote_features cp;
1292 cp.handle = ev->handle;
1293 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1294 sizeof(cp), &cp);
1295 }
1296
1297 /* Set packet type for incoming connection */
1298 if (!conn->out && hdev->hci_ver < 3) {
1299 struct hci_cp_change_conn_ptype cp;
1300 cp.handle = ev->handle;
1301 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1302 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1303 sizeof(cp), &cp);
1304 }
1305 } else {
1306 conn->state = BT_CLOSED;
1307 if (conn->type == ACL_LINK)
1308 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1309 }
1310
1311 if (conn->type == ACL_LINK)
1312 hci_sco_setup(conn, ev->status);
1313
1314 if (ev->status) {
1315 hci_proto_connect_cfm(conn, ev->status);
1316 hci_conn_del(conn);
1317 } else if (ev->link_type != ACL_LINK)
1318 hci_proto_connect_cfm(conn, ev->status);
1319
1320 unlock:
1321 hci_dev_unlock(hdev);
1322
1323 hci_conn_check_pending(hdev);
1324 }
1325
1326 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1327 {
1328 struct hci_ev_conn_request *ev = (void *) skb->data;
1329 int mask = hdev->link_mode;
1330
1331 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1332 batostr(&ev->bdaddr), ev->link_type);
1333
1334 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1335
1336 if ((mask & HCI_LM_ACCEPT) &&
1337 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1338 /* Connection accepted */
1339 struct inquiry_entry *ie;
1340 struct hci_conn *conn;
1341
1342 hci_dev_lock(hdev);
1343
1344 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1345 if (ie)
1346 memcpy(ie->data.dev_class, ev->dev_class, 3);
1347
1348 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1349 if (!conn) {
1350 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1351 if (!conn) {
1352 BT_ERR("No memory for new connection");
1353 hci_dev_unlock(hdev);
1354 return;
1355 }
1356 }
1357
1358 memcpy(conn->dev_class, ev->dev_class, 3);
1359 conn->state = BT_CONNECT;
1360
1361 hci_dev_unlock(hdev);
1362
1363 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1364 struct hci_cp_accept_conn_req cp;
1365
1366 bacpy(&cp.bdaddr, &ev->bdaddr);
1367
1368 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1369 cp.role = 0x00; /* Become master */
1370 else
1371 cp.role = 0x01; /* Remain slave */
1372
1373 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1374 sizeof(cp), &cp);
1375 } else {
1376 struct hci_cp_accept_sync_conn_req cp;
1377
1378 bacpy(&cp.bdaddr, &ev->bdaddr);
1379 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1380
1381 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1382 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1383 cp.max_latency = cpu_to_le16(0xffff);
1384 cp.content_format = cpu_to_le16(hdev->voice_setting);
1385 cp.retrans_effort = 0xff;
1386
1387 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1388 sizeof(cp), &cp);
1389 }
1390 } else {
1391 /* Connection rejected */
1392 struct hci_cp_reject_conn_req cp;
1393
1394 bacpy(&cp.bdaddr, &ev->bdaddr);
1395 cp.reason = 0x0f;
1396 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1397 }
1398 }
1399
1400 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1401 {
1402 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1403 struct hci_conn *conn;
1404
1405 BT_DBG("%s status %d", hdev->name, ev->status);
1406
1407 if (ev->status) {
1408 mgmt_disconnect_failed(hdev->id);
1409 return;
1410 }
1411
1412 hci_dev_lock(hdev);
1413
1414 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1415 if (!conn)
1416 goto unlock;
1417
1418 conn->state = BT_CLOSED;
1419
1420 if (conn->type == ACL_LINK)
1421 mgmt_disconnected(hdev->id, &conn->dst);
1422
1423 hci_proto_disconn_cfm(conn, ev->reason);
1424 hci_conn_del(conn);
1425
1426 unlock:
1427 hci_dev_unlock(hdev);
1428 }
1429
1430 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1431 {
1432 struct hci_ev_auth_complete *ev = (void *) skb->data;
1433 struct hci_conn *conn;
1434
1435 BT_DBG("%s status %d", hdev->name, ev->status);
1436
1437 hci_dev_lock(hdev);
1438
1439 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1440 if (conn) {
1441 if (!ev->status) {
1442 conn->link_mode |= HCI_LM_AUTH;
1443 conn->sec_level = conn->pending_sec_level;
1444 } else {
1445 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1446 conn->sec_level = BT_SECURITY_LOW;
1447 }
1448
1449 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1450
1451 if (conn->state == BT_CONFIG) {
1452 if (!ev->status && hdev->ssp_mode > 0 &&
1453 conn->ssp_mode > 0) {
1454 struct hci_cp_set_conn_encrypt cp;
1455 cp.handle = ev->handle;
1456 cp.encrypt = 0x01;
1457 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1458 sizeof(cp), &cp);
1459 } else {
1460 conn->state = BT_CONNECTED;
1461 hci_proto_connect_cfm(conn, ev->status);
1462 hci_conn_put(conn);
1463 }
1464 } else {
1465 hci_auth_cfm(conn, ev->status);
1466
1467 hci_conn_hold(conn);
1468 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1469 hci_conn_put(conn);
1470 }
1471
1472 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1473 if (!ev->status) {
1474 struct hci_cp_set_conn_encrypt cp;
1475 cp.handle = ev->handle;
1476 cp.encrypt = 0x01;
1477 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1478 sizeof(cp), &cp);
1479 } else {
1480 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1481 hci_encrypt_cfm(conn, ev->status, 0x00);
1482 }
1483 }
1484 }
1485
1486 hci_dev_unlock(hdev);
1487 }
1488
1489 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1490 {
1491 struct hci_ev_remote_name *ev = (void *) skb->data;
1492 struct hci_conn *conn;
1493
1494 BT_DBG("%s", hdev->name);
1495
1496 hci_conn_check_pending(hdev);
1497
1498 hci_dev_lock(hdev);
1499
1500 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1501 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1502
1503 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1504 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
1505 struct hci_cp_auth_requested cp;
1506 cp.handle = __cpu_to_le16(conn->handle);
1507 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1508 }
1509
1510 hci_dev_unlock(hdev);
1511 }
1512
1513 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1514 {
1515 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1516 struct hci_conn *conn;
1517
1518 BT_DBG("%s status %d", hdev->name, ev->status);
1519
1520 hci_dev_lock(hdev);
1521
1522 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1523 if (conn) {
1524 if (!ev->status) {
1525 if (ev->encrypt) {
1526 /* Encryption implies authentication */
1527 conn->link_mode |= HCI_LM_AUTH;
1528 conn->link_mode |= HCI_LM_ENCRYPT;
1529 } else
1530 conn->link_mode &= ~HCI_LM_ENCRYPT;
1531 }
1532
1533 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1534
1535 if (conn->state == BT_CONFIG) {
1536 if (!ev->status)
1537 conn->state = BT_CONNECTED;
1538
1539 hci_proto_connect_cfm(conn, ev->status);
1540 hci_conn_put(conn);
1541 } else
1542 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1543 }
1544
1545 hci_dev_unlock(hdev);
1546 }
1547
1548 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1549 {
1550 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1551 struct hci_conn *conn;
1552
1553 BT_DBG("%s status %d", hdev->name, ev->status);
1554
1555 hci_dev_lock(hdev);
1556
1557 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1558 if (conn) {
1559 if (!ev->status)
1560 conn->link_mode |= HCI_LM_SECURE;
1561
1562 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1563
1564 hci_key_change_cfm(conn, ev->status);
1565 }
1566
1567 hci_dev_unlock(hdev);
1568 }
1569
1570 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1571 {
1572 struct hci_ev_remote_features *ev = (void *) skb->data;
1573 struct hci_conn *conn;
1574
1575 BT_DBG("%s status %d", hdev->name, ev->status);
1576
1577 hci_dev_lock(hdev);
1578
1579 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1580 if (!conn)
1581 goto unlock;
1582
1583 if (!ev->status)
1584 memcpy(conn->features, ev->features, 8);
1585
1586 if (conn->state != BT_CONFIG)
1587 goto unlock;
1588
1589 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1590 struct hci_cp_read_remote_ext_features cp;
1591 cp.handle = ev->handle;
1592 cp.page = 0x01;
1593 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1594 sizeof(cp), &cp);
1595 goto unlock;
1596 }
1597
1598 if (!ev->status) {
1599 struct hci_cp_remote_name_req cp;
1600 memset(&cp, 0, sizeof(cp));
1601 bacpy(&cp.bdaddr, &conn->dst);
1602 cp.pscan_rep_mode = 0x02;
1603 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1604 }
1605
1606 if (!hci_outgoing_auth_needed(hdev, conn)) {
1607 conn->state = BT_CONNECTED;
1608 hci_proto_connect_cfm(conn, ev->status);
1609 hci_conn_put(conn);
1610 }
1611
1612 unlock:
1613 hci_dev_unlock(hdev);
1614 }
1615
1616 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1617 {
1618 BT_DBG("%s", hdev->name);
1619 }
1620
1621 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1622 {
1623 BT_DBG("%s", hdev->name);
1624 }
1625
1626 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1627 {
1628 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1629 __u16 opcode;
1630
1631 skb_pull(skb, sizeof(*ev));
1632
1633 opcode = __le16_to_cpu(ev->opcode);
1634
1635 switch (opcode) {
1636 case HCI_OP_INQUIRY_CANCEL:
1637 hci_cc_inquiry_cancel(hdev, skb);
1638 break;
1639
1640 case HCI_OP_EXIT_PERIODIC_INQ:
1641 hci_cc_exit_periodic_inq(hdev, skb);
1642 break;
1643
1644 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1645 hci_cc_remote_name_req_cancel(hdev, skb);
1646 break;
1647
1648 case HCI_OP_ROLE_DISCOVERY:
1649 hci_cc_role_discovery(hdev, skb);
1650 break;
1651
1652 case HCI_OP_READ_LINK_POLICY:
1653 hci_cc_read_link_policy(hdev, skb);
1654 break;
1655
1656 case HCI_OP_WRITE_LINK_POLICY:
1657 hci_cc_write_link_policy(hdev, skb);
1658 break;
1659
1660 case HCI_OP_READ_DEF_LINK_POLICY:
1661 hci_cc_read_def_link_policy(hdev, skb);
1662 break;
1663
1664 case HCI_OP_WRITE_DEF_LINK_POLICY:
1665 hci_cc_write_def_link_policy(hdev, skb);
1666 break;
1667
1668 case HCI_OP_RESET:
1669 hci_cc_reset(hdev, skb);
1670 break;
1671
1672 case HCI_OP_WRITE_LOCAL_NAME:
1673 hci_cc_write_local_name(hdev, skb);
1674 break;
1675
1676 case HCI_OP_READ_LOCAL_NAME:
1677 hci_cc_read_local_name(hdev, skb);
1678 break;
1679
1680 case HCI_OP_WRITE_AUTH_ENABLE:
1681 hci_cc_write_auth_enable(hdev, skb);
1682 break;
1683
1684 case HCI_OP_WRITE_ENCRYPT_MODE:
1685 hci_cc_write_encrypt_mode(hdev, skb);
1686 break;
1687
1688 case HCI_OP_WRITE_SCAN_ENABLE:
1689 hci_cc_write_scan_enable(hdev, skb);
1690 break;
1691
1692 case HCI_OP_READ_CLASS_OF_DEV:
1693 hci_cc_read_class_of_dev(hdev, skb);
1694 break;
1695
1696 case HCI_OP_WRITE_CLASS_OF_DEV:
1697 hci_cc_write_class_of_dev(hdev, skb);
1698 break;
1699
1700 case HCI_OP_READ_VOICE_SETTING:
1701 hci_cc_read_voice_setting(hdev, skb);
1702 break;
1703
1704 case HCI_OP_WRITE_VOICE_SETTING:
1705 hci_cc_write_voice_setting(hdev, skb);
1706 break;
1707
1708 case HCI_OP_HOST_BUFFER_SIZE:
1709 hci_cc_host_buffer_size(hdev, skb);
1710 break;
1711
1712 case HCI_OP_READ_SSP_MODE:
1713 hci_cc_read_ssp_mode(hdev, skb);
1714 break;
1715
1716 case HCI_OP_WRITE_SSP_MODE:
1717 hci_cc_write_ssp_mode(hdev, skb);
1718 break;
1719
1720 case HCI_OP_READ_LOCAL_VERSION:
1721 hci_cc_read_local_version(hdev, skb);
1722 break;
1723
1724 case HCI_OP_READ_LOCAL_COMMANDS:
1725 hci_cc_read_local_commands(hdev, skb);
1726 break;
1727
1728 case HCI_OP_READ_LOCAL_FEATURES:
1729 hci_cc_read_local_features(hdev, skb);
1730 break;
1731
1732 case HCI_OP_READ_BUFFER_SIZE:
1733 hci_cc_read_buffer_size(hdev, skb);
1734 break;
1735
1736 case HCI_OP_READ_BD_ADDR:
1737 hci_cc_read_bd_addr(hdev, skb);
1738 break;
1739
1740 case HCI_OP_WRITE_CA_TIMEOUT:
1741 hci_cc_write_ca_timeout(hdev, skb);
1742 break;
1743
1744 case HCI_OP_DELETE_STORED_LINK_KEY:
1745 hci_cc_delete_stored_link_key(hdev, skb);
1746 break;
1747
1748 case HCI_OP_SET_EVENT_MASK:
1749 hci_cc_set_event_mask(hdev, skb);
1750 break;
1751
1752 case HCI_OP_WRITE_INQUIRY_MODE:
1753 hci_cc_write_inquiry_mode(hdev, skb);
1754 break;
1755
1756 case HCI_OP_READ_INQ_RSP_TX_POWER:
1757 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1758 break;
1759
1760 case HCI_OP_SET_EVENT_FLT:
1761 hci_cc_set_event_flt(hdev, skb);
1762 break;
1763
1764 case HCI_OP_PIN_CODE_REPLY:
1765 hci_cc_pin_code_reply(hdev, skb);
1766 break;
1767
1768 case HCI_OP_PIN_CODE_NEG_REPLY:
1769 hci_cc_pin_code_neg_reply(hdev, skb);
1770 break;
1771
1772 case HCI_OP_READ_LOCAL_OOB_DATA:
1773 hci_cc_read_local_oob_data_reply(hdev, skb);
1774 break;
1775
1776 case HCI_OP_LE_READ_BUFFER_SIZE:
1777 hci_cc_le_read_buffer_size(hdev, skb);
1778 break;
1779
1780 case HCI_OP_USER_CONFIRM_REPLY:
1781 hci_cc_user_confirm_reply(hdev, skb);
1782 break;
1783
1784 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1785 hci_cc_user_confirm_neg_reply(hdev, skb);
1786 break;
1787
1788 default:
1789 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1790 break;
1791 }
1792
1793 if (ev->opcode != HCI_OP_NOP)
1794 del_timer(&hdev->cmd_timer);
1795
1796 if (ev->ncmd) {
1797 atomic_set(&hdev->cmd_cnt, 1);
1798 if (!skb_queue_empty(&hdev->cmd_q))
1799 tasklet_schedule(&hdev->cmd_task);
1800 }
1801 }
1802
1803 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1804 {
1805 struct hci_ev_cmd_status *ev = (void *) skb->data;
1806 __u16 opcode;
1807
1808 skb_pull(skb, sizeof(*ev));
1809
1810 opcode = __le16_to_cpu(ev->opcode);
1811
1812 switch (opcode) {
1813 case HCI_OP_INQUIRY:
1814 hci_cs_inquiry(hdev, ev->status);
1815 break;
1816
1817 case HCI_OP_CREATE_CONN:
1818 hci_cs_create_conn(hdev, ev->status);
1819 break;
1820
1821 case HCI_OP_ADD_SCO:
1822 hci_cs_add_sco(hdev, ev->status);
1823 break;
1824
1825 case HCI_OP_AUTH_REQUESTED:
1826 hci_cs_auth_requested(hdev, ev->status);
1827 break;
1828
1829 case HCI_OP_SET_CONN_ENCRYPT:
1830 hci_cs_set_conn_encrypt(hdev, ev->status);
1831 break;
1832
1833 case HCI_OP_REMOTE_NAME_REQ:
1834 hci_cs_remote_name_req(hdev, ev->status);
1835 break;
1836
1837 case HCI_OP_READ_REMOTE_FEATURES:
1838 hci_cs_read_remote_features(hdev, ev->status);
1839 break;
1840
1841 case HCI_OP_READ_REMOTE_EXT_FEATURES:
1842 hci_cs_read_remote_ext_features(hdev, ev->status);
1843 break;
1844
1845 case HCI_OP_SETUP_SYNC_CONN:
1846 hci_cs_setup_sync_conn(hdev, ev->status);
1847 break;
1848
1849 case HCI_OP_SNIFF_MODE:
1850 hci_cs_sniff_mode(hdev, ev->status);
1851 break;
1852
1853 case HCI_OP_EXIT_SNIFF_MODE:
1854 hci_cs_exit_sniff_mode(hdev, ev->status);
1855 break;
1856
1857 case HCI_OP_DISCONNECT:
1858 if (ev->status != 0)
1859 mgmt_disconnect_failed(hdev->id);
1860 break;
1861
1862 case HCI_OP_LE_CREATE_CONN:
1863 hci_cs_le_create_conn(hdev, ev->status);
1864 break;
1865
1866 default:
1867 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1868 break;
1869 }
1870
1871 if (ev->opcode != HCI_OP_NOP)
1872 del_timer(&hdev->cmd_timer);
1873
1874 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
1875 atomic_set(&hdev->cmd_cnt, 1);
1876 if (!skb_queue_empty(&hdev->cmd_q))
1877 tasklet_schedule(&hdev->cmd_task);
1878 }
1879 }
1880
1881 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1882 {
1883 struct hci_ev_role_change *ev = (void *) skb->data;
1884 struct hci_conn *conn;
1885
1886 BT_DBG("%s status %d", hdev->name, ev->status);
1887
1888 hci_dev_lock(hdev);
1889
1890 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1891 if (conn) {
1892 if (!ev->status) {
1893 if (ev->role)
1894 conn->link_mode &= ~HCI_LM_MASTER;
1895 else
1896 conn->link_mode |= HCI_LM_MASTER;
1897 }
1898
1899 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1900
1901 hci_role_switch_cfm(conn, ev->status, ev->role);
1902 }
1903
1904 hci_dev_unlock(hdev);
1905 }
1906
1907 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1908 {
1909 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1910 __le16 *ptr;
1911 int i;
1912
1913 skb_pull(skb, sizeof(*ev));
1914
1915 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1916
1917 if (skb->len < ev->num_hndl * 4) {
1918 BT_DBG("%s bad parameters", hdev->name);
1919 return;
1920 }
1921
1922 tasklet_disable(&hdev->tx_task);
1923
1924 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1925 struct hci_conn *conn;
1926 __u16 handle, count;
1927
1928 handle = get_unaligned_le16(ptr++);
1929 count = get_unaligned_le16(ptr++);
1930
1931 conn = hci_conn_hash_lookup_handle(hdev, handle);
1932 if (conn) {
1933 conn->sent -= count;
1934
1935 if (conn->type == ACL_LINK) {
1936 hdev->acl_cnt += count;
1937 if (hdev->acl_cnt > hdev->acl_pkts)
1938 hdev->acl_cnt = hdev->acl_pkts;
1939 } else if (conn->type == LE_LINK) {
1940 if (hdev->le_pkts) {
1941 hdev->le_cnt += count;
1942 if (hdev->le_cnt > hdev->le_pkts)
1943 hdev->le_cnt = hdev->le_pkts;
1944 } else {
1945 hdev->acl_cnt += count;
1946 if (hdev->acl_cnt > hdev->acl_pkts)
1947 hdev->acl_cnt = hdev->acl_pkts;
1948 }
1949 } else {
1950 hdev->sco_cnt += count;
1951 if (hdev->sco_cnt > hdev->sco_pkts)
1952 hdev->sco_cnt = hdev->sco_pkts;
1953 }
1954 }
1955 }
1956
1957 tasklet_schedule(&hdev->tx_task);
1958
1959 tasklet_enable(&hdev->tx_task);
1960 }
1961
1962 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1963 {
1964 struct hci_ev_mode_change *ev = (void *) skb->data;
1965 struct hci_conn *conn;
1966
1967 BT_DBG("%s status %d", hdev->name, ev->status);
1968
1969 hci_dev_lock(hdev);
1970
1971 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1972 if (conn) {
1973 conn->mode = ev->mode;
1974 conn->interval = __le16_to_cpu(ev->interval);
1975
1976 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
1977 if (conn->mode == HCI_CM_ACTIVE)
1978 conn->power_save = 1;
1979 else
1980 conn->power_save = 0;
1981 }
1982
1983 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1984 hci_sco_setup(conn, ev->status);
1985 }
1986
1987 hci_dev_unlock(hdev);
1988 }
1989
1990 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1991 {
1992 struct hci_ev_pin_code_req *ev = (void *) skb->data;
1993 struct hci_conn *conn;
1994
1995 BT_DBG("%s", hdev->name);
1996
1997 hci_dev_lock(hdev);
1998
1999 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2000 if (conn && conn->state == BT_CONNECTED) {
2001 hci_conn_hold(conn);
2002 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2003 hci_conn_put(conn);
2004 }
2005
2006 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2007 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2008 sizeof(ev->bdaddr), &ev->bdaddr);
2009
2010 if (test_bit(HCI_MGMT, &hdev->flags))
2011 mgmt_pin_code_request(hdev->id, &ev->bdaddr);
2012
2013 hci_dev_unlock(hdev);
2014 }
2015
2016 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2017 {
2018 struct hci_ev_link_key_req *ev = (void *) skb->data;
2019 struct hci_cp_link_key_reply cp;
2020 struct hci_conn *conn;
2021 struct link_key *key;
2022
2023 BT_DBG("%s", hdev->name);
2024
2025 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2026 return;
2027
2028 hci_dev_lock(hdev);
2029
2030 key = hci_find_link_key(hdev, &ev->bdaddr);
2031 if (!key) {
2032 BT_DBG("%s link key not found for %s", hdev->name,
2033 batostr(&ev->bdaddr));
2034 goto not_found;
2035 }
2036
2037 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2038 batostr(&ev->bdaddr));
2039
2040 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
2041 BT_DBG("%s ignoring debug key", hdev->name);
2042 goto not_found;
2043 }
2044
2045 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2046
2047 if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
2048 (conn->auth_type & 0x01)) {
2049 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2050 goto not_found;
2051 }
2052
2053 bacpy(&cp.bdaddr, &ev->bdaddr);
2054 memcpy(cp.link_key, key->val, 16);
2055
2056 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2057
2058 hci_dev_unlock(hdev);
2059
2060 return;
2061
2062 not_found:
2063 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2064 hci_dev_unlock(hdev);
2065 }
2066
2067 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2068 {
2069 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2070 struct hci_conn *conn;
2071 u8 pin_len = 0;
2072
2073 BT_DBG("%s", hdev->name);
2074
2075 hci_dev_lock(hdev);
2076
2077 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2078 if (conn) {
2079 hci_conn_hold(conn);
2080 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2081 pin_len = conn->pin_length;
2082 hci_conn_put(conn);
2083 }
2084
2085 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2086 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
2087 ev->key_type, pin_len);
2088
2089 hci_dev_unlock(hdev);
2090 }
2091
2092 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2093 {
2094 struct hci_ev_clock_offset *ev = (void *) skb->data;
2095 struct hci_conn *conn;
2096
2097 BT_DBG("%s status %d", hdev->name, ev->status);
2098
2099 hci_dev_lock(hdev);
2100
2101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2102 if (conn && !ev->status) {
2103 struct inquiry_entry *ie;
2104
2105 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2106 if (ie) {
2107 ie->data.clock_offset = ev->clock_offset;
2108 ie->timestamp = jiffies;
2109 }
2110 }
2111
2112 hci_dev_unlock(hdev);
2113 }
2114
2115 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2116 {
2117 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2118 struct hci_conn *conn;
2119
2120 BT_DBG("%s status %d", hdev->name, ev->status);
2121
2122 hci_dev_lock(hdev);
2123
2124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2125 if (conn && !ev->status)
2126 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2127
2128 hci_dev_unlock(hdev);
2129 }
2130
2131 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2132 {
2133 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2134 struct inquiry_entry *ie;
2135
2136 BT_DBG("%s", hdev->name);
2137
2138 hci_dev_lock(hdev);
2139
2140 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2141 if (ie) {
2142 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2143 ie->timestamp = jiffies;
2144 }
2145
2146 hci_dev_unlock(hdev);
2147 }
2148
2149 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2150 {
2151 struct inquiry_data data;
2152 int num_rsp = *((__u8 *) skb->data);
2153
2154 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2155
2156 if (!num_rsp)
2157 return;
2158
2159 hci_dev_lock(hdev);
2160
2161 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2162 struct inquiry_info_with_rssi_and_pscan_mode *info;
2163 info = (void *) (skb->data + 1);
2164
2165 for (; num_rsp; num_rsp--, info++) {
2166 bacpy(&data.bdaddr, &info->bdaddr);
2167 data.pscan_rep_mode = info->pscan_rep_mode;
2168 data.pscan_period_mode = info->pscan_period_mode;
2169 data.pscan_mode = info->pscan_mode;
2170 memcpy(data.dev_class, info->dev_class, 3);
2171 data.clock_offset = info->clock_offset;
2172 data.rssi = info->rssi;
2173 data.ssp_mode = 0x00;
2174 hci_inquiry_cache_update(hdev, &data);
2175 mgmt_device_found(hdev->id, &info->bdaddr,
2176 info->dev_class, info->rssi,
2177 NULL);
2178 }
2179 } else {
2180 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2181
2182 for (; num_rsp; num_rsp--, info++) {
2183 bacpy(&data.bdaddr, &info->bdaddr);
2184 data.pscan_rep_mode = info->pscan_rep_mode;
2185 data.pscan_period_mode = info->pscan_period_mode;
2186 data.pscan_mode = 0x00;
2187 memcpy(data.dev_class, info->dev_class, 3);
2188 data.clock_offset = info->clock_offset;
2189 data.rssi = info->rssi;
2190 data.ssp_mode = 0x00;
2191 hci_inquiry_cache_update(hdev, &data);
2192 mgmt_device_found(hdev->id, &info->bdaddr,
2193 info->dev_class, info->rssi,
2194 NULL);
2195 }
2196 }
2197
2198 hci_dev_unlock(hdev);
2199 }
2200
2201 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2202 {
2203 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2204 struct hci_conn *conn;
2205
2206 BT_DBG("%s", hdev->name);
2207
2208 hci_dev_lock(hdev);
2209
2210 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2211 if (!conn)
2212 goto unlock;
2213
2214 if (!ev->status && ev->page == 0x01) {
2215 struct inquiry_entry *ie;
2216
2217 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2218 if (ie)
2219 ie->data.ssp_mode = (ev->features[0] & 0x01);
2220
2221 conn->ssp_mode = (ev->features[0] & 0x01);
2222 }
2223
2224 if (conn->state != BT_CONFIG)
2225 goto unlock;
2226
2227 if (!ev->status) {
2228 struct hci_cp_remote_name_req cp;
2229 memset(&cp, 0, sizeof(cp));
2230 bacpy(&cp.bdaddr, &conn->dst);
2231 cp.pscan_rep_mode = 0x02;
2232 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2233 }
2234
2235 if (!hci_outgoing_auth_needed(hdev, conn)) {
2236 conn->state = BT_CONNECTED;
2237 hci_proto_connect_cfm(conn, ev->status);
2238 hci_conn_put(conn);
2239 }
2240
2241 unlock:
2242 hci_dev_unlock(hdev);
2243 }
2244
2245 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2246 {
2247 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2248 struct hci_conn *conn;
2249
2250 BT_DBG("%s status %d", hdev->name, ev->status);
2251
2252 hci_dev_lock(hdev);
2253
2254 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2255 if (!conn) {
2256 if (ev->link_type == ESCO_LINK)
2257 goto unlock;
2258
2259 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2260 if (!conn)
2261 goto unlock;
2262
2263 conn->type = SCO_LINK;
2264 }
2265
2266 switch (ev->status) {
2267 case 0x00:
2268 conn->handle = __le16_to_cpu(ev->handle);
2269 conn->state = BT_CONNECTED;
2270
2271 hci_conn_hold_device(conn);
2272 hci_conn_add_sysfs(conn);
2273 break;
2274
2275 case 0x11: /* Unsupported Feature or Parameter Value */
2276 case 0x1c: /* SCO interval rejected */
2277 case 0x1a: /* Unsupported Remote Feature */
2278 case 0x1f: /* Unspecified error */
2279 if (conn->out && conn->attempt < 2) {
2280 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2281 (hdev->esco_type & EDR_ESCO_MASK);
2282 hci_setup_sync(conn, conn->link->handle);
2283 goto unlock;
2284 }
2285 /* fall through */
2286
2287 default:
2288 conn->state = BT_CLOSED;
2289 break;
2290 }
2291
2292 hci_proto_connect_cfm(conn, ev->status);
2293 if (ev->status)
2294 hci_conn_del(conn);
2295
2296 unlock:
2297 hci_dev_unlock(hdev);
2298 }
2299
2300 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2301 {
2302 BT_DBG("%s", hdev->name);
2303 }
2304
2305 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2306 {
2307 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2308
2309 BT_DBG("%s status %d", hdev->name, ev->status);
2310 }
2311
2312 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2313 {
2314 struct inquiry_data data;
2315 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2316 int num_rsp = *((__u8 *) skb->data);
2317
2318 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2319
2320 if (!num_rsp)
2321 return;
2322
2323 hci_dev_lock(hdev);
2324
2325 for (; num_rsp; num_rsp--, info++) {
2326 bacpy(&data.bdaddr, &info->bdaddr);
2327 data.pscan_rep_mode = info->pscan_rep_mode;
2328 data.pscan_period_mode = info->pscan_period_mode;
2329 data.pscan_mode = 0x00;
2330 memcpy(data.dev_class, info->dev_class, 3);
2331 data.clock_offset = info->clock_offset;
2332 data.rssi = info->rssi;
2333 data.ssp_mode = 0x01;
2334 hci_inquiry_cache_update(hdev, &data);
2335 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2336 info->rssi, info->data);
2337 }
2338
2339 hci_dev_unlock(hdev);
2340 }
2341
2342 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2343 {
2344 /* If remote requests dedicated bonding follow that lead */
2345 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2346 /* If both remote and local IO capabilities allow MITM
2347 * protection then require it, otherwise don't */
2348 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2349 return 0x02;
2350 else
2351 return 0x03;
2352 }
2353
2354 /* If remote requests no-bonding follow that lead */
2355 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2356 return 0x00;
2357
2358 return conn->auth_type;
2359 }
2360
2361 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2362 {
2363 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2364 struct hci_conn *conn;
2365
2366 BT_DBG("%s", hdev->name);
2367
2368 hci_dev_lock(hdev);
2369
2370 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2371 if (!conn)
2372 goto unlock;
2373
2374 hci_conn_hold(conn);
2375
2376 if (!test_bit(HCI_MGMT, &hdev->flags))
2377 goto unlock;
2378
2379 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2380 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2381 struct hci_cp_io_capability_reply cp;
2382
2383 bacpy(&cp.bdaddr, &ev->bdaddr);
2384 cp.capability = conn->io_capability;
2385 cp.authentication = hci_get_auth_req(conn);
2386
2387 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2388 hci_find_remote_oob_data(hdev, &conn->dst))
2389 cp.oob_data = 0x01;
2390 else
2391 cp.oob_data = 0x00;
2392
2393 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2394 sizeof(cp), &cp);
2395 } else {
2396 struct hci_cp_io_capability_neg_reply cp;
2397
2398 bacpy(&cp.bdaddr, &ev->bdaddr);
2399 cp.reason = 0x16; /* Pairing not allowed */
2400
2401 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2402 sizeof(cp), &cp);
2403 }
2404
2405 unlock:
2406 hci_dev_unlock(hdev);
2407 }
2408
2409 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2410 {
2411 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2412 struct hci_conn *conn;
2413
2414 BT_DBG("%s", hdev->name);
2415
2416 hci_dev_lock(hdev);
2417
2418 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2419 if (!conn)
2420 goto unlock;
2421
2422 hci_conn_hold(conn);
2423
2424 conn->remote_cap = ev->capability;
2425 conn->remote_oob = ev->oob_data;
2426 conn->remote_auth = ev->authentication;
2427
2428 unlock:
2429 hci_dev_unlock(hdev);
2430 }
2431
2432 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2433 struct sk_buff *skb)
2434 {
2435 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2436
2437 BT_DBG("%s", hdev->name);
2438
2439 hci_dev_lock(hdev);
2440
2441 if (test_bit(HCI_MGMT, &hdev->flags))
2442 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
2443
2444 hci_dev_unlock(hdev);
2445 }
2446
2447 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2448 {
2449 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2450 struct hci_conn *conn;
2451
2452 BT_DBG("%s", hdev->name);
2453
2454 hci_dev_lock(hdev);
2455
2456 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2457 if (!conn)
2458 goto unlock;
2459
2460 /* To avoid duplicate auth_failed events to user space we check
2461 * the HCI_CONN_AUTH_PEND flag which will be set if we
2462 * initiated the authentication. A traditional auth_complete
2463 * event gets always produced as initiator and is also mapped to
2464 * the mgmt_auth_failed event */
2465 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2466 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2467
2468 hci_conn_put(conn);
2469
2470 unlock:
2471 hci_dev_unlock(hdev);
2472 }
2473
2474 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2475 {
2476 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2477 struct inquiry_entry *ie;
2478
2479 BT_DBG("%s", hdev->name);
2480
2481 hci_dev_lock(hdev);
2482
2483 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2484 if (ie)
2485 ie->data.ssp_mode = (ev->features[0] & 0x01);
2486
2487 hci_dev_unlock(hdev);
2488 }
2489
2490 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2491 struct sk_buff *skb)
2492 {
2493 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2494 struct oob_data *data;
2495
2496 BT_DBG("%s", hdev->name);
2497
2498 hci_dev_lock(hdev);
2499
2500 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2501 if (data) {
2502 struct hci_cp_remote_oob_data_reply cp;
2503
2504 bacpy(&cp.bdaddr, &ev->bdaddr);
2505 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2506 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2507
2508 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2509 &cp);
2510 } else {
2511 struct hci_cp_remote_oob_data_neg_reply cp;
2512
2513 bacpy(&cp.bdaddr, &ev->bdaddr);
2514 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2515 &cp);
2516 }
2517
2518 hci_dev_unlock(hdev);
2519 }
2520
2521 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2522 {
2523 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2524 struct hci_conn *conn;
2525
2526 BT_DBG("%s status %d", hdev->name, ev->status);
2527
2528 hci_dev_lock(hdev);
2529
2530 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2531 if (!conn) {
2532 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2533 if (!conn) {
2534 BT_ERR("No memory for new connection");
2535 hci_dev_unlock(hdev);
2536 return;
2537 }
2538 }
2539
2540 if (ev->status) {
2541 hci_proto_connect_cfm(conn, ev->status);
2542 conn->state = BT_CLOSED;
2543 hci_conn_del(conn);
2544 goto unlock;
2545 }
2546
2547 conn->handle = __le16_to_cpu(ev->handle);
2548 conn->state = BT_CONNECTED;
2549
2550 hci_conn_hold_device(conn);
2551 hci_conn_add_sysfs(conn);
2552
2553 hci_proto_connect_cfm(conn, ev->status);
2554
2555 unlock:
2556 hci_dev_unlock(hdev);
2557 }
2558
2559 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2560 {
2561 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2562
2563 skb_pull(skb, sizeof(*le_ev));
2564
2565 switch (le_ev->subevent) {
2566 case HCI_EV_LE_CONN_COMPLETE:
2567 hci_le_conn_complete_evt(hdev, skb);
2568 break;
2569
2570 default:
2571 break;
2572 }
2573 }
2574
2575 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2576 {
2577 struct hci_event_hdr *hdr = (void *) skb->data;
2578 __u8 event = hdr->evt;
2579
2580 skb_pull(skb, HCI_EVENT_HDR_SIZE);
2581
2582 switch (event) {
2583 case HCI_EV_INQUIRY_COMPLETE:
2584 hci_inquiry_complete_evt(hdev, skb);
2585 break;
2586
2587 case HCI_EV_INQUIRY_RESULT:
2588 hci_inquiry_result_evt(hdev, skb);
2589 break;
2590
2591 case HCI_EV_CONN_COMPLETE:
2592 hci_conn_complete_evt(hdev, skb);
2593 break;
2594
2595 case HCI_EV_CONN_REQUEST:
2596 hci_conn_request_evt(hdev, skb);
2597 break;
2598
2599 case HCI_EV_DISCONN_COMPLETE:
2600 hci_disconn_complete_evt(hdev, skb);
2601 break;
2602
2603 case HCI_EV_AUTH_COMPLETE:
2604 hci_auth_complete_evt(hdev, skb);
2605 break;
2606
2607 case HCI_EV_REMOTE_NAME:
2608 hci_remote_name_evt(hdev, skb);
2609 break;
2610
2611 case HCI_EV_ENCRYPT_CHANGE:
2612 hci_encrypt_change_evt(hdev, skb);
2613 break;
2614
2615 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2616 hci_change_link_key_complete_evt(hdev, skb);
2617 break;
2618
2619 case HCI_EV_REMOTE_FEATURES:
2620 hci_remote_features_evt(hdev, skb);
2621 break;
2622
2623 case HCI_EV_REMOTE_VERSION:
2624 hci_remote_version_evt(hdev, skb);
2625 break;
2626
2627 case HCI_EV_QOS_SETUP_COMPLETE:
2628 hci_qos_setup_complete_evt(hdev, skb);
2629 break;
2630
2631 case HCI_EV_CMD_COMPLETE:
2632 hci_cmd_complete_evt(hdev, skb);
2633 break;
2634
2635 case HCI_EV_CMD_STATUS:
2636 hci_cmd_status_evt(hdev, skb);
2637 break;
2638
2639 case HCI_EV_ROLE_CHANGE:
2640 hci_role_change_evt(hdev, skb);
2641 break;
2642
2643 case HCI_EV_NUM_COMP_PKTS:
2644 hci_num_comp_pkts_evt(hdev, skb);
2645 break;
2646
2647 case HCI_EV_MODE_CHANGE:
2648 hci_mode_change_evt(hdev, skb);
2649 break;
2650
2651 case HCI_EV_PIN_CODE_REQ:
2652 hci_pin_code_request_evt(hdev, skb);
2653 break;
2654
2655 case HCI_EV_LINK_KEY_REQ:
2656 hci_link_key_request_evt(hdev, skb);
2657 break;
2658
2659 case HCI_EV_LINK_KEY_NOTIFY:
2660 hci_link_key_notify_evt(hdev, skb);
2661 break;
2662
2663 case HCI_EV_CLOCK_OFFSET:
2664 hci_clock_offset_evt(hdev, skb);
2665 break;
2666
2667 case HCI_EV_PKT_TYPE_CHANGE:
2668 hci_pkt_type_change_evt(hdev, skb);
2669 break;
2670
2671 case HCI_EV_PSCAN_REP_MODE:
2672 hci_pscan_rep_mode_evt(hdev, skb);
2673 break;
2674
2675 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
2676 hci_inquiry_result_with_rssi_evt(hdev, skb);
2677 break;
2678
2679 case HCI_EV_REMOTE_EXT_FEATURES:
2680 hci_remote_ext_features_evt(hdev, skb);
2681 break;
2682
2683 case HCI_EV_SYNC_CONN_COMPLETE:
2684 hci_sync_conn_complete_evt(hdev, skb);
2685 break;
2686
2687 case HCI_EV_SYNC_CONN_CHANGED:
2688 hci_sync_conn_changed_evt(hdev, skb);
2689 break;
2690
2691 case HCI_EV_SNIFF_SUBRATE:
2692 hci_sniff_subrate_evt(hdev, skb);
2693 break;
2694
2695 case HCI_EV_EXTENDED_INQUIRY_RESULT:
2696 hci_extended_inquiry_result_evt(hdev, skb);
2697 break;
2698
2699 case HCI_EV_IO_CAPA_REQUEST:
2700 hci_io_capa_request_evt(hdev, skb);
2701 break;
2702
2703 case HCI_EV_IO_CAPA_REPLY:
2704 hci_io_capa_reply_evt(hdev, skb);
2705 break;
2706
2707 case HCI_EV_USER_CONFIRM_REQUEST:
2708 hci_user_confirm_request_evt(hdev, skb);
2709 break;
2710
2711 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2712 hci_simple_pair_complete_evt(hdev, skb);
2713 break;
2714
2715 case HCI_EV_REMOTE_HOST_FEATURES:
2716 hci_remote_host_features_evt(hdev, skb);
2717 break;
2718
2719 case HCI_EV_LE_META:
2720 hci_le_meta_evt(hdev, skb);
2721 break;
2722
2723 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
2724 hci_remote_oob_data_request_evt(hdev, skb);
2725 break;
2726
2727 default:
2728 BT_DBG("%s event 0x%x", hdev->name, event);
2729 break;
2730 }
2731
2732 kfree_skb(skb);
2733 hdev->stat.evt_rx++;
2734 }
2735
2736 /* Generate internal stack event */
2737 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2738 {
2739 struct hci_event_hdr *hdr;
2740 struct hci_ev_stack_internal *ev;
2741 struct sk_buff *skb;
2742
2743 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
2744 if (!skb)
2745 return;
2746
2747 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
2748 hdr->evt = HCI_EV_STACK_INTERNAL;
2749 hdr->plen = sizeof(*ev) + dlen;
2750
2751 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
2752 ev->type = type;
2753 memcpy(ev->data, data, dlen);
2754
2755 bt_cb(skb)->incoming = 1;
2756 __net_timestamp(skb);
2757
2758 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2759 skb->dev = (void *) hdev;
2760 hci_send_to_sock(hdev, skb, NULL);
2761 kfree_skb(skb);
2762 }
This page took 0.094893 seconds and 5 git commands to generate.