2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46 /* Handle HCI Event packets */
48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
51 __u8 status = *((__u8 *) skb->data);
53 BT_DBG("%s status 0x%2.2x", hdev->name, status);
55 /* It is possible that we receive Inquiry Complete event right
56 * before we receive Inquiry Cancel Command Complete event, in
57 * which case the latter event should have status of Command
58 * Disallowed (0x0c). This should not be treated as error, since
59 * we actually achieve what Inquiry Cancel wants to achieve,
60 * which is to end the last Inquiry session.
62 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
63 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
72 clear_bit(HCI_INQUIRY, &hdev->flags);
73 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
74 wake_up_bit(&hdev->flags, HCI_INQUIRY);
77 /* Set discovery state to stopped if we're not doing LE active
80 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
81 hdev->le_scan_type != LE_SCAN_ACTIVE)
82 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
85 hci_conn_check_pending(hdev);
88 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
90 __u8 status = *((__u8 *) skb->data);
92 BT_DBG("%s status 0x%2.2x", hdev->name, status);
97 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
100 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
102 __u8 status = *((__u8 *) skb->data);
104 BT_DBG("%s status 0x%2.2x", hdev->name, status);
109 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
111 hci_conn_check_pending(hdev);
114 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
117 BT_DBG("%s", hdev->name);
120 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
122 struct hci_rp_role_discovery *rp = (void *) skb->data;
123 struct hci_conn *conn;
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 conn->role = rp->role;
136 hci_dev_unlock(hdev);
139 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 struct hci_rp_read_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
144 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 conn->link_policy = __le16_to_cpu(rp->policy);
155 hci_dev_unlock(hdev);
158 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 struct hci_rp_write_link_policy *rp = (void *) skb->data;
161 struct hci_conn *conn;
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
169 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
175 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
177 conn->link_policy = get_unaligned_le16(sent + 2);
179 hci_dev_unlock(hdev);
182 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
185 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
187 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
192 hdev->link_policy = __le16_to_cpu(rp->policy);
195 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
198 __u8 status = *((__u8 *) skb->data);
201 BT_DBG("%s status 0x%2.2x", hdev->name, status);
206 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
210 hdev->link_policy = get_unaligned_le16(sent);
213 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
215 __u8 status = *((__u8 *) skb->data);
217 BT_DBG("%s status 0x%2.2x", hdev->name, status);
219 clear_bit(HCI_RESET, &hdev->flags);
224 /* Reset all non-persistent flags */
225 hci_dev_clear_volatile_flags(hdev);
227 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
229 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
230 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
232 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
233 hdev->adv_data_len = 0;
235 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
236 hdev->scan_rsp_data_len = 0;
238 hdev->le_scan_type = LE_SCAN_PASSIVE;
240 hdev->ssp_debug_mode = 0;
242 hci_bdaddr_list_clear(&hdev->le_accept_list);
243 hci_bdaddr_list_clear(&hdev->le_resolv_list);
246 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
249 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
250 struct hci_cp_read_stored_link_key *sent;
252 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
254 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
258 if (!rp->status && sent->read_all == 0x01) {
259 hdev->stored_max_keys = rp->max_keys;
260 hdev->stored_num_keys = rp->num_keys;
264 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
267 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
269 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
274 if (rp->num_keys <= hdev->stored_num_keys)
275 hdev->stored_num_keys -= rp->num_keys;
277 hdev->stored_num_keys = 0;
280 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
282 __u8 status = *((__u8 *) skb->data);
285 BT_DBG("%s status 0x%2.2x", hdev->name, status);
287 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
293 if (hci_dev_test_flag(hdev, HCI_MGMT))
294 mgmt_set_local_name_complete(hdev, sent, status);
296 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
298 hci_dev_unlock(hdev);
301 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
303 struct hci_rp_read_local_name *rp = (void *) skb->data;
305 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
310 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
311 hci_dev_test_flag(hdev, HCI_CONFIG))
312 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
315 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
317 __u8 status = *((__u8 *) skb->data);
320 BT_DBG("%s status 0x%2.2x", hdev->name, status);
322 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
329 __u8 param = *((__u8 *) sent);
331 if (param == AUTH_ENABLED)
332 set_bit(HCI_AUTH, &hdev->flags);
334 clear_bit(HCI_AUTH, &hdev->flags);
337 if (hci_dev_test_flag(hdev, HCI_MGMT))
338 mgmt_auth_enable_complete(hdev, status);
340 hci_dev_unlock(hdev);
343 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
345 __u8 status = *((__u8 *) skb->data);
349 BT_DBG("%s status 0x%2.2x", hdev->name, status);
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
358 param = *((__u8 *) sent);
361 set_bit(HCI_ENCRYPT, &hdev->flags);
363 clear_bit(HCI_ENCRYPT, &hdev->flags);
366 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
368 __u8 status = *((__u8 *) skb->data);
372 BT_DBG("%s status 0x%2.2x", hdev->name, status);
374 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
378 param = *((__u8 *) sent);
383 hdev->discov_timeout = 0;
387 if (param & SCAN_INQUIRY)
388 set_bit(HCI_ISCAN, &hdev->flags);
390 clear_bit(HCI_ISCAN, &hdev->flags);
392 if (param & SCAN_PAGE)
393 set_bit(HCI_PSCAN, &hdev->flags);
395 clear_bit(HCI_PSCAN, &hdev->flags);
398 hci_dev_unlock(hdev);
401 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
403 __u8 status = *((__u8 *)skb->data);
404 struct hci_cp_set_event_filter *cp;
407 BT_DBG("%s status 0x%2.2x", hdev->name, status);
412 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
416 cp = (struct hci_cp_set_event_filter *)sent;
418 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
419 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
421 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
424 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
426 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
433 memcpy(hdev->dev_class, rp->dev_class, 3);
435 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
436 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
439 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
441 __u8 status = *((__u8 *) skb->data);
444 BT_DBG("%s status 0x%2.2x", hdev->name, status);
446 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
453 memcpy(hdev->dev_class, sent, 3);
455 if (hci_dev_test_flag(hdev, HCI_MGMT))
456 mgmt_set_class_of_dev_complete(hdev, sent, status);
458 hci_dev_unlock(hdev);
461 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
463 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
466 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
471 setting = __le16_to_cpu(rp->voice_setting);
473 if (hdev->voice_setting == setting)
476 hdev->voice_setting = setting;
478 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
481 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
484 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
487 __u8 status = *((__u8 *) skb->data);
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
496 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
500 setting = get_unaligned_le16(sent);
502 if (hdev->voice_setting == setting)
505 hdev->voice_setting = setting;
507 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
510 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
513 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
516 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
518 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
523 hdev->num_iac = rp->num_iac;
525 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
528 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
530 __u8 status = *((__u8 *) skb->data);
531 struct hci_cp_write_ssp_mode *sent;
533 BT_DBG("%s status 0x%2.2x", hdev->name, status);
535 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
543 hdev->features[1][0] |= LMP_HOST_SSP;
545 hdev->features[1][0] &= ~LMP_HOST_SSP;
548 if (hci_dev_test_flag(hdev, HCI_MGMT))
549 mgmt_ssp_enable_complete(hdev, sent->mode, status);
552 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
554 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
557 hci_dev_unlock(hdev);
560 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
562 u8 status = *((u8 *) skb->data);
563 struct hci_cp_write_sc_support *sent;
565 BT_DBG("%s status 0x%2.2x", hdev->name, status);
567 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
575 hdev->features[1][0] |= LMP_HOST_SC;
577 hdev->features[1][0] &= ~LMP_HOST_SC;
580 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
582 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
584 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
587 hci_dev_unlock(hdev);
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 struct hci_rp_read_local_version *rp = (void *) skb->data;
594 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
600 hci_dev_test_flag(hdev, HCI_CONFIG)) {
601 hdev->hci_ver = rp->hci_ver;
602 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
603 hdev->lmp_ver = rp->lmp_ver;
604 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
605 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
609 static void hci_cc_read_local_commands(struct hci_dev *hdev,
612 struct hci_rp_read_local_commands *rp = (void *) skb->data;
614 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
619 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
620 hci_dev_test_flag(hdev, HCI_CONFIG))
621 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
624 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
627 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
628 struct hci_conn *conn;
630 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
639 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
641 hci_dev_unlock(hdev);
644 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
647 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
648 struct hci_conn *conn;
651 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
656 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
662 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
664 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
666 hci_dev_unlock(hdev);
669 static void hci_cc_read_local_features(struct hci_dev *hdev,
672 struct hci_rp_read_local_features *rp = (void *) skb->data;
674 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
679 memcpy(hdev->features, rp->features, 8);
681 /* Adjust default settings according to features
682 * supported by device. */
684 if (hdev->features[0][0] & LMP_3SLOT)
685 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
687 if (hdev->features[0][0] & LMP_5SLOT)
688 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
690 if (hdev->features[0][1] & LMP_HV2) {
691 hdev->pkt_type |= (HCI_HV2);
692 hdev->esco_type |= (ESCO_HV2);
695 if (hdev->features[0][1] & LMP_HV3) {
696 hdev->pkt_type |= (HCI_HV3);
697 hdev->esco_type |= (ESCO_HV3);
700 if (lmp_esco_capable(hdev))
701 hdev->esco_type |= (ESCO_EV3);
703 if (hdev->features[0][4] & LMP_EV4)
704 hdev->esco_type |= (ESCO_EV4);
706 if (hdev->features[0][4] & LMP_EV5)
707 hdev->esco_type |= (ESCO_EV5);
709 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
710 hdev->esco_type |= (ESCO_2EV3);
712 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
713 hdev->esco_type |= (ESCO_3EV3);
715 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
716 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
719 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
722 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 if (hdev->max_page < rp->max_page)
730 hdev->max_page = rp->max_page;
732 if (rp->page < HCI_MAX_PAGES)
733 memcpy(hdev->features[rp->page], rp->features, 8);
736 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
739 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
741 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
746 hdev->flow_ctl_mode = rp->mode;
749 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
751 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
753 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
758 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
759 hdev->sco_mtu = rp->sco_mtu;
760 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
761 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
763 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
768 hdev->acl_cnt = hdev->acl_pkts;
769 hdev->sco_cnt = hdev->sco_pkts;
771 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
772 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
775 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
777 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
779 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
784 if (test_bit(HCI_INIT, &hdev->flags))
785 bacpy(&hdev->bdaddr, &rp->bdaddr);
787 if (hci_dev_test_flag(hdev, HCI_SETUP))
788 bacpy(&hdev->setup_addr, &rp->bdaddr);
791 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
794 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
801 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
802 hci_dev_test_flag(hdev, HCI_CONFIG)) {
803 hdev->pairing_opts = rp->pairing_opts;
804 hdev->max_enc_key_size = rp->max_key_size;
808 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
811 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
813 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
818 if (test_bit(HCI_INIT, &hdev->flags)) {
819 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
820 hdev->page_scan_window = __le16_to_cpu(rp->window);
824 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
827 u8 status = *((u8 *) skb->data);
828 struct hci_cp_write_page_scan_activity *sent;
830 BT_DBG("%s status 0x%2.2x", hdev->name, status);
835 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
839 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
840 hdev->page_scan_window = __le16_to_cpu(sent->window);
843 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
846 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
848 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853 if (test_bit(HCI_INIT, &hdev->flags))
854 hdev->page_scan_type = rp->type;
857 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
860 u8 status = *((u8 *) skb->data);
863 BT_DBG("%s status 0x%2.2x", hdev->name, status);
868 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
870 hdev->page_scan_type = *type;
873 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
876 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
878 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
884 hdev->block_len = __le16_to_cpu(rp->block_len);
885 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
887 hdev->block_cnt = hdev->num_blocks;
889 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
890 hdev->block_cnt, hdev->block_len);
893 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
895 struct hci_rp_read_clock *rp = (void *) skb->data;
896 struct hci_cp_read_clock *cp;
897 struct hci_conn *conn;
899 BT_DBG("%s", hdev->name);
901 if (skb->len < sizeof(*rp))
909 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
913 if (cp->which == 0x00) {
914 hdev->clock = le32_to_cpu(rp->clock);
918 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
920 conn->clock = le32_to_cpu(rp->clock);
921 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
925 hci_dev_unlock(hdev);
928 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
931 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
933 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
938 hdev->amp_status = rp->amp_status;
939 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
940 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
941 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
942 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
943 hdev->amp_type = rp->amp_type;
944 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
945 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
946 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
947 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
950 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
953 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
955 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
960 hdev->inq_tx_power = rp->tx_power;
963 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
966 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
968 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
973 hdev->err_data_reporting = rp->err_data_reporting;
976 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
979 __u8 status = *((__u8 *)skb->data);
980 struct hci_cp_write_def_err_data_reporting *cp;
982 BT_DBG("%s status 0x%2.2x", hdev->name, status);
987 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
991 hdev->err_data_reporting = cp->err_data_reporting;
994 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
996 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
997 struct hci_cp_pin_code_reply *cp;
998 struct hci_conn *conn;
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1010 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1014 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1016 conn->pin_length = cp->pin_len;
1019 hci_dev_unlock(hdev);
1022 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1024 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1026 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1034 hci_dev_unlock(hdev);
1037 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1038 struct sk_buff *skb)
1040 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1042 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1047 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1048 hdev->le_pkts = rp->le_max_pkt;
1050 hdev->le_cnt = hdev->le_pkts;
1052 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1055 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1056 struct sk_buff *skb)
1058 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1060 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1065 memcpy(hdev->le_features, rp->features, 8);
1068 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1069 struct sk_buff *skb)
1071 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1073 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1078 hdev->adv_tx_power = rp->tx_power;
1081 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1083 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1085 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1089 if (hci_dev_test_flag(hdev, HCI_MGMT))
1090 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1093 hci_dev_unlock(hdev);
1096 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1097 struct sk_buff *skb)
1099 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1105 if (hci_dev_test_flag(hdev, HCI_MGMT))
1106 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1107 ACL_LINK, 0, rp->status);
1109 hci_dev_unlock(hdev);
1112 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1114 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1116 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1120 if (hci_dev_test_flag(hdev, HCI_MGMT))
1121 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1124 hci_dev_unlock(hdev);
1127 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1128 struct sk_buff *skb)
1130 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1132 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1136 if (hci_dev_test_flag(hdev, HCI_MGMT))
1137 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1138 ACL_LINK, 0, rp->status);
1140 hci_dev_unlock(hdev);
1143 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1144 struct sk_buff *skb)
1146 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1148 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1151 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1152 struct sk_buff *skb)
1154 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1156 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1159 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1161 __u8 status = *((__u8 *) skb->data);
1164 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1169 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1175 bacpy(&hdev->random_addr, sent);
1177 if (!bacmp(&hdev->rpa, sent)) {
1178 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1179 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1180 secs_to_jiffies(hdev->rpa_timeout));
1183 hci_dev_unlock(hdev);
1186 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1188 __u8 status = *((__u8 *) skb->data);
1189 struct hci_cp_le_set_default_phy *cp;
1191 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1196 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1202 hdev->le_tx_def_phys = cp->tx_phys;
1203 hdev->le_rx_def_phys = cp->rx_phys;
1205 hci_dev_unlock(hdev);
1208 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1209 struct sk_buff *skb)
1211 __u8 status = *((__u8 *) skb->data);
1212 struct hci_cp_le_set_adv_set_rand_addr *cp;
1213 struct adv_info *adv;
1218 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1219 /* Update only in case the adv instance since handle 0x00 shall be using
1220 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1221 * non-extended adverting.
1223 if (!cp || !cp->handle)
1228 adv = hci_find_adv_instance(hdev, cp->handle);
1230 bacpy(&adv->random_addr, &cp->bdaddr);
1231 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1232 adv->rpa_expired = false;
1233 queue_delayed_work(hdev->workqueue,
1234 &adv->rpa_expired_cb,
1235 secs_to_jiffies(hdev->rpa_timeout));
1239 hci_dev_unlock(hdev);
1242 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1243 struct sk_buff *skb)
1245 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1247 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1252 hdev->min_le_tx_power = rp->min_le_tx_power;
1253 hdev->max_le_tx_power = rp->max_le_tx_power;
1256 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1258 __u8 *sent, status = *((__u8 *) skb->data);
1260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1265 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1271 /* If we're doing connection initiation as peripheral. Set a
1272 * timeout in case something goes wrong.
1275 struct hci_conn *conn;
1277 hci_dev_set_flag(hdev, HCI_LE_ADV);
1279 conn = hci_lookup_le_connect(hdev);
1281 queue_delayed_work(hdev->workqueue,
1282 &conn->le_conn_timeout,
1283 conn->conn_timeout);
1285 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1288 hci_dev_unlock(hdev);
1291 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1292 struct sk_buff *skb)
1294 struct hci_cp_le_set_ext_adv_enable *cp;
1295 struct hci_cp_ext_adv_set *set;
1296 __u8 status = *((__u8 *) skb->data);
1297 struct adv_info *adv = NULL, *n;
1299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1304 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1308 set = (void *)cp->data;
1312 if (cp->num_of_sets)
1313 adv = hci_find_adv_instance(hdev, set->handle);
1316 struct hci_conn *conn;
1318 hci_dev_set_flag(hdev, HCI_LE_ADV);
1321 adv->enabled = true;
1323 conn = hci_lookup_le_connect(hdev);
1325 queue_delayed_work(hdev->workqueue,
1326 &conn->le_conn_timeout,
1327 conn->conn_timeout);
1330 adv->enabled = false;
1331 /* If just one instance was disabled check if there are
1332 * any other instance enabled before clearing HCI_LE_ADV
1334 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1340 /* All instances shall be considered disabled */
1341 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1343 adv->enabled = false;
1346 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1350 hci_dev_unlock(hdev);
1353 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1355 struct hci_cp_le_set_scan_param *cp;
1356 __u8 status = *((__u8 *) skb->data);
1358 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1363 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1369 hdev->le_scan_type = cp->type;
1371 hci_dev_unlock(hdev);
1374 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1375 struct sk_buff *skb)
1377 struct hci_cp_le_set_ext_scan_params *cp;
1378 __u8 status = *((__u8 *) skb->data);
1379 struct hci_cp_le_scan_phy_params *phy_param;
1381 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1386 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1390 phy_param = (void *)cp->data;
1394 hdev->le_scan_type = phy_param->type;
1396 hci_dev_unlock(hdev);
1399 static bool has_pending_adv_report(struct hci_dev *hdev)
1401 struct discovery_state *d = &hdev->discovery;
1403 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1406 static void clear_pending_adv_report(struct hci_dev *hdev)
1408 struct discovery_state *d = &hdev->discovery;
1410 bacpy(&d->last_adv_addr, BDADDR_ANY);
1411 d->last_adv_data_len = 0;
1414 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1415 u8 bdaddr_type, s8 rssi, u32 flags,
1418 struct discovery_state *d = &hdev->discovery;
1420 if (len > HCI_MAX_AD_LENGTH)
1423 bacpy(&d->last_adv_addr, bdaddr);
1424 d->last_adv_addr_type = bdaddr_type;
1425 d->last_adv_rssi = rssi;
1426 d->last_adv_flags = flags;
1427 memcpy(d->last_adv_data, data, len);
1428 d->last_adv_data_len = len;
1431 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1436 case LE_SCAN_ENABLE:
1437 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1438 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1439 clear_pending_adv_report(hdev);
1442 case LE_SCAN_DISABLE:
1443 /* We do this here instead of when setting DISCOVERY_STOPPED
1444 * since the latter would potentially require waiting for
1445 * inquiry to stop too.
1447 if (has_pending_adv_report(hdev)) {
1448 struct discovery_state *d = &hdev->discovery;
1450 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1451 d->last_adv_addr_type, NULL,
1452 d->last_adv_rssi, d->last_adv_flags,
1454 d->last_adv_data_len, NULL, 0);
1457 /* Cancel this timer so that we don't try to disable scanning
1458 * when it's already disabled.
1460 cancel_delayed_work(&hdev->le_scan_disable);
1462 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1464 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1465 * interrupted scanning due to a connect request. Mark
1466 * therefore discovery as stopped. If this was not
1467 * because of a connect request advertising might have
1468 * been disabled because of active scanning, so
1469 * re-enable it again if necessary.
1471 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1472 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1473 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1474 hdev->discovery.state == DISCOVERY_FINDING)
1475 hci_req_reenable_advertising(hdev);
1480 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1485 hci_dev_unlock(hdev);
1488 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1489 struct sk_buff *skb)
1491 struct hci_cp_le_set_scan_enable *cp;
1492 __u8 status = *((__u8 *) skb->data);
1494 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1499 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1503 le_set_scan_enable_complete(hdev, cp->enable);
1506 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1507 struct sk_buff *skb)
1509 struct hci_cp_le_set_ext_scan_enable *cp;
1510 __u8 status = *((__u8 *) skb->data);
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1517 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1521 le_set_scan_enable_complete(hdev, cp->enable);
1524 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1525 struct sk_buff *skb)
1527 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1529 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1535 hdev->le_num_of_adv_sets = rp->num_of_sets;
1538 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1539 struct sk_buff *skb)
1541 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1543 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1548 hdev->le_accept_list_size = rp->size;
1551 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1552 struct sk_buff *skb)
1554 __u8 status = *((__u8 *) skb->data);
1556 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1561 hci_bdaddr_list_clear(&hdev->le_accept_list);
1564 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1565 struct sk_buff *skb)
1567 struct hci_cp_le_add_to_accept_list *sent;
1568 __u8 status = *((__u8 *) skb->data);
1570 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1575 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1579 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1583 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1584 struct sk_buff *skb)
1586 struct hci_cp_le_del_from_accept_list *sent;
1587 __u8 status = *((__u8 *) skb->data);
1589 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1594 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1598 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1602 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1603 struct sk_buff *skb)
1605 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1612 memcpy(hdev->le_states, rp->le_states, 8);
1615 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1616 struct sk_buff *skb)
1618 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1620 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1625 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1626 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1629 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1630 struct sk_buff *skb)
1632 struct hci_cp_le_write_def_data_len *sent;
1633 __u8 status = *((__u8 *) skb->data);
1635 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1640 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1644 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1645 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1648 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1649 struct sk_buff *skb)
1651 struct hci_cp_le_add_to_resolv_list *sent;
1652 __u8 status = *((__u8 *) skb->data);
1654 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1659 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1663 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1664 sent->bdaddr_type, sent->peer_irk,
1668 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1669 struct sk_buff *skb)
1671 struct hci_cp_le_del_from_resolv_list *sent;
1672 __u8 status = *((__u8 *) skb->data);
1674 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1679 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1683 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1687 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1688 struct sk_buff *skb)
1690 __u8 status = *((__u8 *) skb->data);
1692 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1697 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1700 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1701 struct sk_buff *skb)
1703 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1705 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1710 hdev->le_resolv_list_size = rp->size;
1713 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1714 struct sk_buff *skb)
1716 __u8 *sent, status = *((__u8 *) skb->data);
1718 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1723 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1730 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1732 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1734 hci_dev_unlock(hdev);
1737 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1738 struct sk_buff *skb)
1740 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1742 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1747 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1748 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1749 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1750 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1753 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1754 struct sk_buff *skb)
1756 struct hci_cp_write_le_host_supported *sent;
1757 __u8 status = *((__u8 *) skb->data);
1759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1764 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1771 hdev->features[1][0] |= LMP_HOST_LE;
1772 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1774 hdev->features[1][0] &= ~LMP_HOST_LE;
1775 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1776 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1780 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1782 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1784 hci_dev_unlock(hdev);
1787 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1789 struct hci_cp_le_set_adv_param *cp;
1790 u8 status = *((u8 *) skb->data);
1792 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1797 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1802 hdev->adv_addr_type = cp->own_address_type;
1803 hci_dev_unlock(hdev);
1806 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1808 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1809 struct hci_cp_le_set_ext_adv_params *cp;
1810 struct adv_info *adv_instance;
1812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1817 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1822 hdev->adv_addr_type = cp->own_addr_type;
1824 /* Store in hdev for instance 0 */
1825 hdev->adv_tx_power = rp->tx_power;
1827 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1829 adv_instance->tx_power = rp->tx_power;
1831 /* Update adv data as tx power is known now */
1832 hci_req_update_adv_data(hdev, cp->handle);
1834 hci_dev_unlock(hdev);
1837 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1839 struct hci_rp_read_rssi *rp = (void *) skb->data;
1840 struct hci_conn *conn;
1842 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1849 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1851 conn->rssi = rp->rssi;
1853 hci_dev_unlock(hdev);
1856 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1858 struct hci_cp_read_tx_power *sent;
1859 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1860 struct hci_conn *conn;
1862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1867 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1873 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1877 switch (sent->type) {
1879 conn->tx_power = rp->tx_power;
1882 conn->max_tx_power = rp->tx_power;
1887 hci_dev_unlock(hdev);
1890 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1892 u8 status = *((u8 *) skb->data);
1895 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1900 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1902 hdev->ssp_debug_mode = *mode;
1905 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1907 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1910 hci_conn_check_pending(hdev);
1914 set_bit(HCI_INQUIRY, &hdev->flags);
1917 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1919 struct hci_cp_create_conn *cp;
1920 struct hci_conn *conn;
1922 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1924 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1930 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1932 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1935 if (conn && conn->state == BT_CONNECT) {
1936 if (status != 0x0c || conn->attempt > 2) {
1937 conn->state = BT_CLOSED;
1938 hci_connect_cfm(conn, status);
1941 conn->state = BT_CONNECT2;
1945 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1948 bt_dev_err(hdev, "no memory for new connection");
1952 hci_dev_unlock(hdev);
1955 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1957 struct hci_cp_add_sco *cp;
1958 struct hci_conn *acl, *sco;
1961 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1966 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1970 handle = __le16_to_cpu(cp->handle);
1972 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1976 acl = hci_conn_hash_lookup_handle(hdev, handle);
1980 sco->state = BT_CLOSED;
1982 hci_connect_cfm(sco, status);
1987 hci_dev_unlock(hdev);
1990 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1992 struct hci_cp_auth_requested *cp;
1993 struct hci_conn *conn;
1995 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2000 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2006 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2008 if (conn->state == BT_CONFIG) {
2009 hci_connect_cfm(conn, status);
2010 hci_conn_drop(conn);
2014 hci_dev_unlock(hdev);
2017 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2019 struct hci_cp_set_conn_encrypt *cp;
2020 struct hci_conn *conn;
2022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2027 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2033 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2035 if (conn->state == BT_CONFIG) {
2036 hci_connect_cfm(conn, status);
2037 hci_conn_drop(conn);
2041 hci_dev_unlock(hdev);
2044 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2045 struct hci_conn *conn)
2047 if (conn->state != BT_CONFIG || !conn->out)
2050 if (conn->pending_sec_level == BT_SECURITY_SDP)
2053 /* Only request authentication for SSP connections or non-SSP
2054 * devices with sec_level MEDIUM or HIGH or if MITM protection
2057 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2058 conn->pending_sec_level != BT_SECURITY_FIPS &&
2059 conn->pending_sec_level != BT_SECURITY_HIGH &&
2060 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2066 static int hci_resolve_name(struct hci_dev *hdev,
2067 struct inquiry_entry *e)
2069 struct hci_cp_remote_name_req cp;
2071 memset(&cp, 0, sizeof(cp));
2073 bacpy(&cp.bdaddr, &e->data.bdaddr);
2074 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2075 cp.pscan_mode = e->data.pscan_mode;
2076 cp.clock_offset = e->data.clock_offset;
2078 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2081 static bool hci_resolve_next_name(struct hci_dev *hdev)
2083 struct discovery_state *discov = &hdev->discovery;
2084 struct inquiry_entry *e;
2086 if (list_empty(&discov->resolve))
2089 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2093 if (hci_resolve_name(hdev, e) == 0) {
2094 e->name_state = NAME_PENDING;
2101 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2102 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2104 struct discovery_state *discov = &hdev->discovery;
2105 struct inquiry_entry *e;
2107 /* Update the mgmt connected state if necessary. Be careful with
2108 * conn objects that exist but are not (yet) connected however.
2109 * Only those in BT_CONFIG or BT_CONNECTED states can be
2110 * considered connected.
2113 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2114 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2115 mgmt_device_connected(hdev, conn, name, name_len);
2117 if (discov->state == DISCOVERY_STOPPED)
2120 if (discov->state == DISCOVERY_STOPPING)
2121 goto discov_complete;
2123 if (discov->state != DISCOVERY_RESOLVING)
2126 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2127 /* If the device was not found in a list of found devices names of which
2128 * are pending. there is no need to continue resolving a next name as it
2129 * will be done upon receiving another Remote Name Request Complete
2136 e->name_state = NAME_KNOWN;
2137 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2138 e->data.rssi, name, name_len);
2140 e->name_state = NAME_NOT_KNOWN;
2143 if (hci_resolve_next_name(hdev))
2147 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2150 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2152 struct hci_cp_remote_name_req *cp;
2153 struct hci_conn *conn;
2155 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2157 /* If successful wait for the name req complete event before
2158 * checking for the need to do authentication */
2162 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2168 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2170 if (hci_dev_test_flag(hdev, HCI_MGMT))
2171 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2176 if (!hci_outgoing_auth_needed(hdev, conn))
2179 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2180 struct hci_cp_auth_requested auth_cp;
2182 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2184 auth_cp.handle = __cpu_to_le16(conn->handle);
2185 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2186 sizeof(auth_cp), &auth_cp);
2190 hci_dev_unlock(hdev);
2193 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2195 struct hci_cp_read_remote_features *cp;
2196 struct hci_conn *conn;
2198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2203 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2209 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2211 if (conn->state == BT_CONFIG) {
2212 hci_connect_cfm(conn, status);
2213 hci_conn_drop(conn);
2217 hci_dev_unlock(hdev);
2220 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2222 struct hci_cp_read_remote_ext_features *cp;
2223 struct hci_conn *conn;
2225 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2230 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2236 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2238 if (conn->state == BT_CONFIG) {
2239 hci_connect_cfm(conn, status);
2240 hci_conn_drop(conn);
2244 hci_dev_unlock(hdev);
2247 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2249 struct hci_cp_setup_sync_conn *cp;
2250 struct hci_conn *acl, *sco;
2253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2258 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2262 handle = __le16_to_cpu(cp->handle);
2264 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2268 acl = hci_conn_hash_lookup_handle(hdev, handle);
2272 sco->state = BT_CLOSED;
2274 hci_connect_cfm(sco, status);
2279 hci_dev_unlock(hdev);
2282 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2284 struct hci_cp_enhanced_setup_sync_conn *cp;
2285 struct hci_conn *acl, *sco;
2288 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2293 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2297 handle = __le16_to_cpu(cp->handle);
2299 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2303 acl = hci_conn_hash_lookup_handle(hdev, handle);
2307 sco->state = BT_CLOSED;
2309 hci_connect_cfm(sco, status);
2314 hci_dev_unlock(hdev);
2317 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2319 struct hci_cp_sniff_mode *cp;
2320 struct hci_conn *conn;
2322 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2327 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2333 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2335 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2337 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2338 hci_sco_setup(conn, status);
2341 hci_dev_unlock(hdev);
2344 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2346 struct hci_cp_exit_sniff_mode *cp;
2347 struct hci_conn *conn;
2349 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2354 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2360 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2362 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2364 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2365 hci_sco_setup(conn, status);
2368 hci_dev_unlock(hdev);
2371 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2373 struct hci_cp_disconnect *cp;
2374 struct hci_conn *conn;
2379 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2385 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2387 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2388 conn->dst_type, status);
2390 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2391 hdev->cur_adv_instance = conn->adv_instance;
2392 hci_req_reenable_advertising(hdev);
2395 /* If the disconnection failed for any reason, the upper layer
2396 * does not retry to disconnect in current implementation.
2397 * Hence, we need to do some basic cleanup here and re-enable
2398 * advertising if necessary.
2403 hci_dev_unlock(hdev);
2406 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2408 /* When using controller based address resolution, then the new
2409 * address types 0x02 and 0x03 are used. These types need to be
2410 * converted back into either public address or random address type
2413 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2416 return ADDR_LE_DEV_PUBLIC;
2417 case ADDR_LE_DEV_RANDOM_RESOLVED:
2420 return ADDR_LE_DEV_RANDOM;
2428 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2429 u8 peer_addr_type, u8 own_address_type,
2432 struct hci_conn *conn;
2434 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2439 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2441 /* Store the initiator and responder address information which
2442 * is needed for SMP. These values will not change during the
2443 * lifetime of the connection.
2445 conn->init_addr_type = own_address_type;
2446 if (own_address_type == ADDR_LE_DEV_RANDOM)
2447 bacpy(&conn->init_addr, &hdev->random_addr);
2449 bacpy(&conn->init_addr, &hdev->bdaddr);
2451 conn->resp_addr_type = peer_addr_type;
2452 bacpy(&conn->resp_addr, peer_addr);
2454 /* We don't want the connection attempt to stick around
2455 * indefinitely since LE doesn't have a page timeout concept
2456 * like BR/EDR. Set a timer for any connection that doesn't use
2457 * the accept list for connecting.
2459 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2460 queue_delayed_work(conn->hdev->workqueue,
2461 &conn->le_conn_timeout,
2462 conn->conn_timeout);
2465 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2467 struct hci_cp_le_create_conn *cp;
2469 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2471 /* All connection failure handling is taken care of by the
2472 * hci_le_conn_failed function which is triggered by the HCI
2473 * request completion callbacks used for connecting.
2478 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2484 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2485 cp->own_address_type, cp->filter_policy);
2487 hci_dev_unlock(hdev);
2490 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2492 struct hci_cp_le_ext_create_conn *cp;
2494 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2496 /* All connection failure handling is taken care of by the
2497 * hci_le_conn_failed function which is triggered by the HCI
2498 * request completion callbacks used for connecting.
2503 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2509 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2510 cp->own_addr_type, cp->filter_policy);
2512 hci_dev_unlock(hdev);
2515 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2517 struct hci_cp_le_read_remote_features *cp;
2518 struct hci_conn *conn;
2520 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2525 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2531 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2533 if (conn->state == BT_CONFIG) {
2534 hci_connect_cfm(conn, status);
2535 hci_conn_drop(conn);
2539 hci_dev_unlock(hdev);
2542 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2544 struct hci_cp_le_start_enc *cp;
2545 struct hci_conn *conn;
2547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2554 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2558 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2562 if (conn->state != BT_CONNECTED)
2565 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2566 hci_conn_drop(conn);
2569 hci_dev_unlock(hdev);
2572 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2574 struct hci_cp_switch_role *cp;
2575 struct hci_conn *conn;
2577 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2582 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2588 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2590 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2592 hci_dev_unlock(hdev);
2595 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2597 __u8 status = *((__u8 *) skb->data);
2598 struct discovery_state *discov = &hdev->discovery;
2599 struct inquiry_entry *e;
2601 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2603 hci_conn_check_pending(hdev);
2605 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2608 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2609 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2611 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2616 if (discov->state != DISCOVERY_FINDING)
2619 if (list_empty(&discov->resolve)) {
2620 /* When BR/EDR inquiry is active and no LE scanning is in
2621 * progress, then change discovery state to indicate completion.
2623 * When running LE scanning and BR/EDR inquiry simultaneously
2624 * and the LE scan already finished, then change the discovery
2625 * state to indicate completion.
2627 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2628 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2633 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2634 if (e && hci_resolve_name(hdev, e) == 0) {
2635 e->name_state = NAME_PENDING;
2636 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2638 /* When BR/EDR inquiry is active and no LE scanning is in
2639 * progress, then change discovery state to indicate completion.
2641 * When running LE scanning and BR/EDR inquiry simultaneously
2642 * and the LE scan already finished, then change the discovery
2643 * state to indicate completion.
2645 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2646 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2647 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2651 hci_dev_unlock(hdev);
2654 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2656 struct inquiry_data data;
2657 struct inquiry_info *info = (void *) (skb->data + 1);
2658 int num_rsp = *((__u8 *) skb->data);
2660 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2662 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2665 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2670 for (; num_rsp; num_rsp--, info++) {
2673 bacpy(&data.bdaddr, &info->bdaddr);
2674 data.pscan_rep_mode = info->pscan_rep_mode;
2675 data.pscan_period_mode = info->pscan_period_mode;
2676 data.pscan_mode = info->pscan_mode;
2677 memcpy(data.dev_class, info->dev_class, 3);
2678 data.clock_offset = info->clock_offset;
2679 data.rssi = HCI_RSSI_INVALID;
2680 data.ssp_mode = 0x00;
2682 flags = hci_inquiry_cache_update(hdev, &data, false);
2684 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2685 info->dev_class, HCI_RSSI_INVALID,
2686 flags, NULL, 0, NULL, 0);
2689 hci_dev_unlock(hdev);
2692 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2694 struct hci_ev_conn_complete *ev = (void *) skb->data;
2695 struct hci_conn *conn;
2697 BT_DBG("%s", hdev->name);
2701 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2703 /* Connection may not exist if auto-connected. Check the bredr
2704 * allowlist to see if this device is allowed to auto connect.
2705 * If link is an ACL type, create a connection class
2708 * Auto-connect will only occur if the event filter is
2709 * programmed with a given address. Right now, event filter is
2710 * only used during suspend.
2712 if (ev->link_type == ACL_LINK &&
2713 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2716 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2719 bt_dev_err(hdev, "no memory for new conn");
2723 if (ev->link_type != SCO_LINK)
2726 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2731 conn->type = SCO_LINK;
2736 conn->handle = __le16_to_cpu(ev->handle);
2738 if (conn->type == ACL_LINK) {
2739 conn->state = BT_CONFIG;
2740 hci_conn_hold(conn);
2742 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2743 !hci_find_link_key(hdev, &ev->bdaddr))
2744 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2746 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2748 conn->state = BT_CONNECTED;
2750 hci_debugfs_create_conn(conn);
2751 hci_conn_add_sysfs(conn);
2753 if (test_bit(HCI_AUTH, &hdev->flags))
2754 set_bit(HCI_CONN_AUTH, &conn->flags);
2756 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2757 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2759 /* Get remote features */
2760 if (conn->type == ACL_LINK) {
2761 struct hci_cp_read_remote_features cp;
2762 cp.handle = ev->handle;
2763 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2766 hci_req_update_scan(hdev);
2769 /* Set packet type for incoming connection */
2770 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2771 struct hci_cp_change_conn_ptype cp;
2772 cp.handle = ev->handle;
2773 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2774 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2778 conn->state = BT_CLOSED;
2779 if (conn->type == ACL_LINK)
2780 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2781 conn->dst_type, ev->status);
2784 if (conn->type == ACL_LINK)
2785 hci_sco_setup(conn, ev->status);
2788 hci_connect_cfm(conn, ev->status);
2790 } else if (ev->link_type == SCO_LINK) {
2791 switch (conn->setting & SCO_AIRMODE_MASK) {
2792 case SCO_AIRMODE_CVSD:
2794 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2798 hci_connect_cfm(conn, ev->status);
2802 hci_dev_unlock(hdev);
2804 hci_conn_check_pending(hdev);
2807 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2809 struct hci_cp_reject_conn_req cp;
2811 bacpy(&cp.bdaddr, bdaddr);
2812 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2813 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2816 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2818 struct hci_ev_conn_request *ev = (void *) skb->data;
2819 int mask = hdev->link_mode;
2820 struct inquiry_entry *ie;
2821 struct hci_conn *conn;
2824 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2827 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2830 if (!(mask & HCI_LM_ACCEPT)) {
2831 hci_reject_conn(hdev, &ev->bdaddr);
2835 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2837 hci_reject_conn(hdev, &ev->bdaddr);
2841 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2842 * connection. These features are only touched through mgmt so
2843 * only do the checks if HCI_MGMT is set.
2845 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2846 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2847 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2849 hci_reject_conn(hdev, &ev->bdaddr);
2853 /* Connection accepted */
2857 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2859 memcpy(ie->data.dev_class, ev->dev_class, 3);
2861 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2864 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2867 bt_dev_err(hdev, "no memory for new connection");
2868 hci_dev_unlock(hdev);
2873 memcpy(conn->dev_class, ev->dev_class, 3);
2875 hci_dev_unlock(hdev);
2877 if (ev->link_type == ACL_LINK ||
2878 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2879 struct hci_cp_accept_conn_req cp;
2880 conn->state = BT_CONNECT;
2882 bacpy(&cp.bdaddr, &ev->bdaddr);
2884 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2885 cp.role = 0x00; /* Become central */
2887 cp.role = 0x01; /* Remain peripheral */
2889 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2890 } else if (!(flags & HCI_PROTO_DEFER)) {
2891 struct hci_cp_accept_sync_conn_req cp;
2892 conn->state = BT_CONNECT;
2894 bacpy(&cp.bdaddr, &ev->bdaddr);
2895 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2897 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2898 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2899 cp.max_latency = cpu_to_le16(0xffff);
2900 cp.content_format = cpu_to_le16(hdev->voice_setting);
2901 cp.retrans_effort = 0xff;
2903 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2906 conn->state = BT_CONNECT2;
2907 hci_connect_cfm(conn, 0);
2911 static u8 hci_to_mgmt_reason(u8 err)
2914 case HCI_ERROR_CONNECTION_TIMEOUT:
2915 return MGMT_DEV_DISCONN_TIMEOUT;
2916 case HCI_ERROR_REMOTE_USER_TERM:
2917 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2918 case HCI_ERROR_REMOTE_POWER_OFF:
2919 return MGMT_DEV_DISCONN_REMOTE;
2920 case HCI_ERROR_LOCAL_HOST_TERM:
2921 return MGMT_DEV_DISCONN_LOCAL_HOST;
2923 return MGMT_DEV_DISCONN_UNKNOWN;
2927 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2929 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2931 struct hci_conn_params *params;
2932 struct hci_conn *conn;
2933 bool mgmt_connected;
2935 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2939 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2944 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2945 conn->dst_type, ev->status);
2949 conn->state = BT_CLOSED;
2951 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2953 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2954 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2956 reason = hci_to_mgmt_reason(ev->reason);
2958 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2959 reason, mgmt_connected);
2961 if (conn->type == ACL_LINK) {
2962 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2963 hci_remove_link_key(hdev, &conn->dst);
2965 hci_req_update_scan(hdev);
2968 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2970 switch (params->auto_connect) {
2971 case HCI_AUTO_CONN_LINK_LOSS:
2972 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2976 case HCI_AUTO_CONN_DIRECT:
2977 case HCI_AUTO_CONN_ALWAYS:
2978 list_del_init(¶ms->action);
2979 list_add(¶ms->action, &hdev->pend_le_conns);
2980 hci_update_background_scan(hdev);
2988 hci_disconn_cfm(conn, ev->reason);
2990 /* The suspend notifier is waiting for all devices to disconnect so
2991 * clear the bit from pending tasks and inform the wait queue.
2993 if (list_empty(&hdev->conn_hash.list) &&
2994 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2995 wake_up(&hdev->suspend_wait_q);
2998 /* Re-enable advertising if necessary, since it might
2999 * have been disabled by the connection. From the
3000 * HCI_LE_Set_Advertise_Enable command description in
3001 * the core specification (v4.0):
3002 * "The Controller shall continue advertising until the Host
3003 * issues an LE_Set_Advertise_Enable command with
3004 * Advertising_Enable set to 0x00 (Advertising is disabled)
3005 * or until a connection is created or until the Advertising
3006 * is timed out due to Directed Advertising."
3008 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3009 hdev->cur_adv_instance = conn->adv_instance;
3010 hci_req_reenable_advertising(hdev);
3016 hci_dev_unlock(hdev);
3019 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3021 struct hci_ev_auth_complete *ev = (void *) skb->data;
3022 struct hci_conn *conn;
3024 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3028 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3033 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3035 if (!hci_conn_ssp_enabled(conn) &&
3036 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3037 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3039 set_bit(HCI_CONN_AUTH, &conn->flags);
3040 conn->sec_level = conn->pending_sec_level;
3043 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3044 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3046 mgmt_auth_failed(conn, ev->status);
3049 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3050 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3052 if (conn->state == BT_CONFIG) {
3053 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3054 struct hci_cp_set_conn_encrypt cp;
3055 cp.handle = ev->handle;
3057 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3060 conn->state = BT_CONNECTED;
3061 hci_connect_cfm(conn, ev->status);
3062 hci_conn_drop(conn);
3065 hci_auth_cfm(conn, ev->status);
3067 hci_conn_hold(conn);
3068 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3069 hci_conn_drop(conn);
3072 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3074 struct hci_cp_set_conn_encrypt cp;
3075 cp.handle = ev->handle;
3077 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3080 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3081 hci_encrypt_cfm(conn, ev->status);
3086 hci_dev_unlock(hdev);
3089 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3091 struct hci_ev_remote_name *ev = (void *) skb->data;
3092 struct hci_conn *conn;
3094 BT_DBG("%s", hdev->name);
3096 hci_conn_check_pending(hdev);
3100 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3102 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3105 if (ev->status == 0)
3106 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3107 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3109 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3115 if (!hci_outgoing_auth_needed(hdev, conn))
3118 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3119 struct hci_cp_auth_requested cp;
3121 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3123 cp.handle = __cpu_to_le16(conn->handle);
3124 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3128 hci_dev_unlock(hdev);
3131 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3132 u16 opcode, struct sk_buff *skb)
3134 const struct hci_rp_read_enc_key_size *rp;
3135 struct hci_conn *conn;
3138 BT_DBG("%s status 0x%02x", hdev->name, status);
3140 if (!skb || skb->len < sizeof(*rp)) {
3141 bt_dev_err(hdev, "invalid read key size response");
3145 rp = (void *)skb->data;
3146 handle = le16_to_cpu(rp->handle);
3150 conn = hci_conn_hash_lookup_handle(hdev, handle);
3154 /* While unexpected, the read_enc_key_size command may fail. The most
3155 * secure approach is to then assume the key size is 0 to force a
3159 bt_dev_err(hdev, "failed to read key size for handle %u",
3161 conn->enc_key_size = 0;
3163 conn->enc_key_size = rp->key_size;
3166 hci_encrypt_cfm(conn, 0);
3169 hci_dev_unlock(hdev);
3172 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3174 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3175 struct hci_conn *conn;
3177 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3181 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3187 /* Encryption implies authentication */
3188 set_bit(HCI_CONN_AUTH, &conn->flags);
3189 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3190 conn->sec_level = conn->pending_sec_level;
3192 /* P-256 authentication key implies FIPS */
3193 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3194 set_bit(HCI_CONN_FIPS, &conn->flags);
3196 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3197 conn->type == LE_LINK)
3198 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3200 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3201 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3205 /* We should disregard the current RPA and generate a new one
3206 * whenever the encryption procedure fails.
3208 if (ev->status && conn->type == LE_LINK) {
3209 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3210 hci_adv_instances_set_rpa_expired(hdev, true);
3213 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3215 /* Check link security requirements are met */
3216 if (!hci_conn_check_link_mode(conn))
3217 ev->status = HCI_ERROR_AUTH_FAILURE;
3219 if (ev->status && conn->state == BT_CONNECTED) {
3220 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3221 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3223 /* Notify upper layers so they can cleanup before
3226 hci_encrypt_cfm(conn, ev->status);
3227 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3228 hci_conn_drop(conn);
3232 /* Try reading the encryption key size for encrypted ACL links */
3233 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3234 struct hci_cp_read_enc_key_size cp;
3235 struct hci_request req;
3237 /* Only send HCI_Read_Encryption_Key_Size if the
3238 * controller really supports it. If it doesn't, assume
3239 * the default size (16).
3241 if (!(hdev->commands[20] & 0x10)) {
3242 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3246 hci_req_init(&req, hdev);
3248 cp.handle = cpu_to_le16(conn->handle);
3249 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3251 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3252 bt_dev_err(hdev, "sending read key size failed");
3253 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3260 /* Set the default Authenticated Payload Timeout after
3261 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3262 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3263 * sent when the link is active and Encryption is enabled, the conn
3264 * type can be either LE or ACL and controller must support LMP Ping.
3265 * Ensure for AES-CCM encryption as well.
3267 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3268 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3269 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3270 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3271 struct hci_cp_write_auth_payload_to cp;
3273 cp.handle = cpu_to_le16(conn->handle);
3274 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3275 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3280 hci_encrypt_cfm(conn, ev->status);
3283 hci_dev_unlock(hdev);
3286 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3287 struct sk_buff *skb)
3289 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3290 struct hci_conn *conn;
3292 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3296 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3299 set_bit(HCI_CONN_SECURE, &conn->flags);
3301 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3303 hci_key_change_cfm(conn, ev->status);
3306 hci_dev_unlock(hdev);
3309 static void hci_remote_features_evt(struct hci_dev *hdev,
3310 struct sk_buff *skb)
3312 struct hci_ev_remote_features *ev = (void *) skb->data;
3313 struct hci_conn *conn;
3315 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3319 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3324 memcpy(conn->features[0], ev->features, 8);
3326 if (conn->state != BT_CONFIG)
3329 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3330 lmp_ext_feat_capable(conn)) {
3331 struct hci_cp_read_remote_ext_features cp;
3332 cp.handle = ev->handle;
3334 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3339 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3340 struct hci_cp_remote_name_req cp;
3341 memset(&cp, 0, sizeof(cp));
3342 bacpy(&cp.bdaddr, &conn->dst);
3343 cp.pscan_rep_mode = 0x02;
3344 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3345 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3346 mgmt_device_connected(hdev, conn, NULL, 0);
3348 if (!hci_outgoing_auth_needed(hdev, conn)) {
3349 conn->state = BT_CONNECTED;
3350 hci_connect_cfm(conn, ev->status);
3351 hci_conn_drop(conn);
3355 hci_dev_unlock(hdev);
3358 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3360 cancel_delayed_work(&hdev->cmd_timer);
3362 if (!test_bit(HCI_RESET, &hdev->flags)) {
3364 cancel_delayed_work(&hdev->ncmd_timer);
3365 atomic_set(&hdev->cmd_cnt, 1);
3367 schedule_delayed_work(&hdev->ncmd_timer,
3373 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3374 u16 *opcode, u8 *status,
3375 hci_req_complete_t *req_complete,
3376 hci_req_complete_skb_t *req_complete_skb)
3378 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3380 *opcode = __le16_to_cpu(ev->opcode);
3381 *status = skb->data[sizeof(*ev)];
3383 skb_pull(skb, sizeof(*ev));
3386 case HCI_OP_INQUIRY_CANCEL:
3387 hci_cc_inquiry_cancel(hdev, skb, status);
3390 case HCI_OP_PERIODIC_INQ:
3391 hci_cc_periodic_inq(hdev, skb);
3394 case HCI_OP_EXIT_PERIODIC_INQ:
3395 hci_cc_exit_periodic_inq(hdev, skb);
3398 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3399 hci_cc_remote_name_req_cancel(hdev, skb);
3402 case HCI_OP_ROLE_DISCOVERY:
3403 hci_cc_role_discovery(hdev, skb);
3406 case HCI_OP_READ_LINK_POLICY:
3407 hci_cc_read_link_policy(hdev, skb);
3410 case HCI_OP_WRITE_LINK_POLICY:
3411 hci_cc_write_link_policy(hdev, skb);
3414 case HCI_OP_READ_DEF_LINK_POLICY:
3415 hci_cc_read_def_link_policy(hdev, skb);
3418 case HCI_OP_WRITE_DEF_LINK_POLICY:
3419 hci_cc_write_def_link_policy(hdev, skb);
3423 hci_cc_reset(hdev, skb);
3426 case HCI_OP_READ_STORED_LINK_KEY:
3427 hci_cc_read_stored_link_key(hdev, skb);
3430 case HCI_OP_DELETE_STORED_LINK_KEY:
3431 hci_cc_delete_stored_link_key(hdev, skb);
3434 case HCI_OP_WRITE_LOCAL_NAME:
3435 hci_cc_write_local_name(hdev, skb);
3438 case HCI_OP_READ_LOCAL_NAME:
3439 hci_cc_read_local_name(hdev, skb);
3442 case HCI_OP_WRITE_AUTH_ENABLE:
3443 hci_cc_write_auth_enable(hdev, skb);
3446 case HCI_OP_WRITE_ENCRYPT_MODE:
3447 hci_cc_write_encrypt_mode(hdev, skb);
3450 case HCI_OP_WRITE_SCAN_ENABLE:
3451 hci_cc_write_scan_enable(hdev, skb);
3454 case HCI_OP_SET_EVENT_FLT:
3455 hci_cc_set_event_filter(hdev, skb);
3458 case HCI_OP_READ_CLASS_OF_DEV:
3459 hci_cc_read_class_of_dev(hdev, skb);
3462 case HCI_OP_WRITE_CLASS_OF_DEV:
3463 hci_cc_write_class_of_dev(hdev, skb);
3466 case HCI_OP_READ_VOICE_SETTING:
3467 hci_cc_read_voice_setting(hdev, skb);
3470 case HCI_OP_WRITE_VOICE_SETTING:
3471 hci_cc_write_voice_setting(hdev, skb);
3474 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3475 hci_cc_read_num_supported_iac(hdev, skb);
3478 case HCI_OP_WRITE_SSP_MODE:
3479 hci_cc_write_ssp_mode(hdev, skb);
3482 case HCI_OP_WRITE_SC_SUPPORT:
3483 hci_cc_write_sc_support(hdev, skb);
3486 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3487 hci_cc_read_auth_payload_timeout(hdev, skb);
3490 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3491 hci_cc_write_auth_payload_timeout(hdev, skb);
3494 case HCI_OP_READ_LOCAL_VERSION:
3495 hci_cc_read_local_version(hdev, skb);
3498 case HCI_OP_READ_LOCAL_COMMANDS:
3499 hci_cc_read_local_commands(hdev, skb);
3502 case HCI_OP_READ_LOCAL_FEATURES:
3503 hci_cc_read_local_features(hdev, skb);
3506 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3507 hci_cc_read_local_ext_features(hdev, skb);
3510 case HCI_OP_READ_BUFFER_SIZE:
3511 hci_cc_read_buffer_size(hdev, skb);
3514 case HCI_OP_READ_BD_ADDR:
3515 hci_cc_read_bd_addr(hdev, skb);
3518 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3519 hci_cc_read_local_pairing_opts(hdev, skb);
3522 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3523 hci_cc_read_page_scan_activity(hdev, skb);
3526 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3527 hci_cc_write_page_scan_activity(hdev, skb);
3530 case HCI_OP_READ_PAGE_SCAN_TYPE:
3531 hci_cc_read_page_scan_type(hdev, skb);
3534 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3535 hci_cc_write_page_scan_type(hdev, skb);
3538 case HCI_OP_READ_DATA_BLOCK_SIZE:
3539 hci_cc_read_data_block_size(hdev, skb);
3542 case HCI_OP_READ_FLOW_CONTROL_MODE:
3543 hci_cc_read_flow_control_mode(hdev, skb);
3546 case HCI_OP_READ_LOCAL_AMP_INFO:
3547 hci_cc_read_local_amp_info(hdev, skb);
3550 case HCI_OP_READ_CLOCK:
3551 hci_cc_read_clock(hdev, skb);
3554 case HCI_OP_READ_INQ_RSP_TX_POWER:
3555 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3558 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3559 hci_cc_read_def_err_data_reporting(hdev, skb);
3562 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3563 hci_cc_write_def_err_data_reporting(hdev, skb);
3566 case HCI_OP_PIN_CODE_REPLY:
3567 hci_cc_pin_code_reply(hdev, skb);
3570 case HCI_OP_PIN_CODE_NEG_REPLY:
3571 hci_cc_pin_code_neg_reply(hdev, skb);
3574 case HCI_OP_READ_LOCAL_OOB_DATA:
3575 hci_cc_read_local_oob_data(hdev, skb);
3578 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3579 hci_cc_read_local_oob_ext_data(hdev, skb);
3582 case HCI_OP_LE_READ_BUFFER_SIZE:
3583 hci_cc_le_read_buffer_size(hdev, skb);
3586 case HCI_OP_LE_READ_LOCAL_FEATURES:
3587 hci_cc_le_read_local_features(hdev, skb);
3590 case HCI_OP_LE_READ_ADV_TX_POWER:
3591 hci_cc_le_read_adv_tx_power(hdev, skb);
3594 case HCI_OP_USER_CONFIRM_REPLY:
3595 hci_cc_user_confirm_reply(hdev, skb);
3598 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3599 hci_cc_user_confirm_neg_reply(hdev, skb);
3602 case HCI_OP_USER_PASSKEY_REPLY:
3603 hci_cc_user_passkey_reply(hdev, skb);
3606 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3607 hci_cc_user_passkey_neg_reply(hdev, skb);
3610 case HCI_OP_LE_SET_RANDOM_ADDR:
3611 hci_cc_le_set_random_addr(hdev, skb);
3614 case HCI_OP_LE_SET_ADV_ENABLE:
3615 hci_cc_le_set_adv_enable(hdev, skb);
3618 case HCI_OP_LE_SET_SCAN_PARAM:
3619 hci_cc_le_set_scan_param(hdev, skb);
3622 case HCI_OP_LE_SET_SCAN_ENABLE:
3623 hci_cc_le_set_scan_enable(hdev, skb);
3626 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3627 hci_cc_le_read_accept_list_size(hdev, skb);
3630 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3631 hci_cc_le_clear_accept_list(hdev, skb);
3634 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3635 hci_cc_le_add_to_accept_list(hdev, skb);
3638 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3639 hci_cc_le_del_from_accept_list(hdev, skb);
3642 case HCI_OP_LE_READ_SUPPORTED_STATES:
3643 hci_cc_le_read_supported_states(hdev, skb);
3646 case HCI_OP_LE_READ_DEF_DATA_LEN:
3647 hci_cc_le_read_def_data_len(hdev, skb);
3650 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3651 hci_cc_le_write_def_data_len(hdev, skb);
3654 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3655 hci_cc_le_add_to_resolv_list(hdev, skb);
3658 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3659 hci_cc_le_del_from_resolv_list(hdev, skb);
3662 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3663 hci_cc_le_clear_resolv_list(hdev, skb);
3666 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3667 hci_cc_le_read_resolv_list_size(hdev, skb);
3670 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3671 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3674 case HCI_OP_LE_READ_MAX_DATA_LEN:
3675 hci_cc_le_read_max_data_len(hdev, skb);
3678 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3679 hci_cc_write_le_host_supported(hdev, skb);
3682 case HCI_OP_LE_SET_ADV_PARAM:
3683 hci_cc_set_adv_param(hdev, skb);
3686 case HCI_OP_READ_RSSI:
3687 hci_cc_read_rssi(hdev, skb);
3690 case HCI_OP_READ_TX_POWER:
3691 hci_cc_read_tx_power(hdev, skb);
3694 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3695 hci_cc_write_ssp_debug_mode(hdev, skb);
3698 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3699 hci_cc_le_set_ext_scan_param(hdev, skb);
3702 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3703 hci_cc_le_set_ext_scan_enable(hdev, skb);
3706 case HCI_OP_LE_SET_DEFAULT_PHY:
3707 hci_cc_le_set_default_phy(hdev, skb);
3710 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3711 hci_cc_le_read_num_adv_sets(hdev, skb);
3714 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3715 hci_cc_set_ext_adv_param(hdev, skb);
3718 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3719 hci_cc_le_set_ext_adv_enable(hdev, skb);
3722 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3723 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3726 case HCI_OP_LE_READ_TRANSMIT_POWER:
3727 hci_cc_le_read_transmit_power(hdev, skb);
3731 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3735 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3737 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3740 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3742 "unexpected event for opcode 0x%4.4x", *opcode);
3746 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3747 queue_work(hdev->workqueue, &hdev->cmd_work);
3750 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3751 u16 *opcode, u8 *status,
3752 hci_req_complete_t *req_complete,
3753 hci_req_complete_skb_t *req_complete_skb)
3755 struct hci_ev_cmd_status *ev = (void *) skb->data;
3757 skb_pull(skb, sizeof(*ev));
3759 *opcode = __le16_to_cpu(ev->opcode);
3760 *status = ev->status;
3763 case HCI_OP_INQUIRY:
3764 hci_cs_inquiry(hdev, ev->status);
3767 case HCI_OP_CREATE_CONN:
3768 hci_cs_create_conn(hdev, ev->status);
3771 case HCI_OP_DISCONNECT:
3772 hci_cs_disconnect(hdev, ev->status);
3775 case HCI_OP_ADD_SCO:
3776 hci_cs_add_sco(hdev, ev->status);
3779 case HCI_OP_AUTH_REQUESTED:
3780 hci_cs_auth_requested(hdev, ev->status);
3783 case HCI_OP_SET_CONN_ENCRYPT:
3784 hci_cs_set_conn_encrypt(hdev, ev->status);
3787 case HCI_OP_REMOTE_NAME_REQ:
3788 hci_cs_remote_name_req(hdev, ev->status);
3791 case HCI_OP_READ_REMOTE_FEATURES:
3792 hci_cs_read_remote_features(hdev, ev->status);
3795 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3796 hci_cs_read_remote_ext_features(hdev, ev->status);
3799 case HCI_OP_SETUP_SYNC_CONN:
3800 hci_cs_setup_sync_conn(hdev, ev->status);
3803 case HCI_OP_ENHANCED_SETUP_SYNC_CONN:
3804 hci_cs_enhanced_setup_sync_conn(hdev, ev->status);
3807 case HCI_OP_SNIFF_MODE:
3808 hci_cs_sniff_mode(hdev, ev->status);
3811 case HCI_OP_EXIT_SNIFF_MODE:
3812 hci_cs_exit_sniff_mode(hdev, ev->status);
3815 case HCI_OP_SWITCH_ROLE:
3816 hci_cs_switch_role(hdev, ev->status);
3819 case HCI_OP_LE_CREATE_CONN:
3820 hci_cs_le_create_conn(hdev, ev->status);
3823 case HCI_OP_LE_READ_REMOTE_FEATURES:
3824 hci_cs_le_read_remote_features(hdev, ev->status);
3827 case HCI_OP_LE_START_ENC:
3828 hci_cs_le_start_enc(hdev, ev->status);
3831 case HCI_OP_LE_EXT_CREATE_CONN:
3832 hci_cs_le_ext_create_conn(hdev, ev->status);
3836 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3840 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3842 /* Indicate request completion if the command failed. Also, if
3843 * we're not waiting for a special event and we get a success
3844 * command status we should try to flag the request as completed
3845 * (since for this kind of commands there will not be a command
3849 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3850 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3853 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3855 "unexpected event for opcode 0x%4.4x", *opcode);
3859 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3860 queue_work(hdev->workqueue, &hdev->cmd_work);
3863 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3865 struct hci_ev_hardware_error *ev = (void *) skb->data;
3867 hdev->hw_error_code = ev->code;
3869 queue_work(hdev->req_workqueue, &hdev->error_reset);
3872 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3874 struct hci_ev_role_change *ev = (void *) skb->data;
3875 struct hci_conn *conn;
3877 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3881 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3884 conn->role = ev->role;
3886 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3888 hci_role_switch_cfm(conn, ev->status, ev->role);
3891 hci_dev_unlock(hdev);
3894 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3896 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3899 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3900 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3904 if (skb->len < sizeof(*ev) ||
3905 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3906 BT_DBG("%s bad parameters", hdev->name);
3910 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3912 for (i = 0; i < ev->num_hndl; i++) {
3913 struct hci_comp_pkts_info *info = &ev->handles[i];
3914 struct hci_conn *conn;
3915 __u16 handle, count;
3917 handle = __le16_to_cpu(info->handle);
3918 count = __le16_to_cpu(info->count);
3920 conn = hci_conn_hash_lookup_handle(hdev, handle);
3924 conn->sent -= count;
3926 switch (conn->type) {
3928 hdev->acl_cnt += count;
3929 if (hdev->acl_cnt > hdev->acl_pkts)
3930 hdev->acl_cnt = hdev->acl_pkts;
3934 if (hdev->le_pkts) {
3935 hdev->le_cnt += count;
3936 if (hdev->le_cnt > hdev->le_pkts)
3937 hdev->le_cnt = hdev->le_pkts;
3939 hdev->acl_cnt += count;
3940 if (hdev->acl_cnt > hdev->acl_pkts)
3941 hdev->acl_cnt = hdev->acl_pkts;
3946 hdev->sco_cnt += count;
3947 if (hdev->sco_cnt > hdev->sco_pkts)
3948 hdev->sco_cnt = hdev->sco_pkts;
3952 bt_dev_err(hdev, "unknown type %d conn %p",
3958 queue_work(hdev->workqueue, &hdev->tx_work);
3961 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3964 struct hci_chan *chan;
3966 switch (hdev->dev_type) {
3968 return hci_conn_hash_lookup_handle(hdev, handle);
3970 chan = hci_chan_lookup_handle(hdev, handle);
3975 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3982 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3984 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3987 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3988 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3992 if (skb->len < sizeof(*ev) ||
3993 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3994 BT_DBG("%s bad parameters", hdev->name);
3998 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4001 for (i = 0; i < ev->num_hndl; i++) {
4002 struct hci_comp_blocks_info *info = &ev->handles[i];
4003 struct hci_conn *conn = NULL;
4004 __u16 handle, block_count;
4006 handle = __le16_to_cpu(info->handle);
4007 block_count = __le16_to_cpu(info->blocks);
4009 conn = __hci_conn_lookup_handle(hdev, handle);
4013 conn->sent -= block_count;
4015 switch (conn->type) {
4018 hdev->block_cnt += block_count;
4019 if (hdev->block_cnt > hdev->num_blocks)
4020 hdev->block_cnt = hdev->num_blocks;
4024 bt_dev_err(hdev, "unknown type %d conn %p",
4030 queue_work(hdev->workqueue, &hdev->tx_work);
4033 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4035 struct hci_ev_mode_change *ev = (void *) skb->data;
4036 struct hci_conn *conn;
4038 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4042 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4044 conn->mode = ev->mode;
4046 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4048 if (conn->mode == HCI_CM_ACTIVE)
4049 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4051 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4054 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4055 hci_sco_setup(conn, ev->status);
4058 hci_dev_unlock(hdev);
4061 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4063 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4064 struct hci_conn *conn;
4066 BT_DBG("%s", hdev->name);
4070 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4074 if (conn->state == BT_CONNECTED) {
4075 hci_conn_hold(conn);
4076 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4077 hci_conn_drop(conn);
4080 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4081 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4082 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4083 sizeof(ev->bdaddr), &ev->bdaddr);
4084 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4087 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4092 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4096 hci_dev_unlock(hdev);
4099 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4101 if (key_type == HCI_LK_CHANGED_COMBINATION)
4104 conn->pin_length = pin_len;
4105 conn->key_type = key_type;
4108 case HCI_LK_LOCAL_UNIT:
4109 case HCI_LK_REMOTE_UNIT:
4110 case HCI_LK_DEBUG_COMBINATION:
4112 case HCI_LK_COMBINATION:
4114 conn->pending_sec_level = BT_SECURITY_HIGH;
4116 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4118 case HCI_LK_UNAUTH_COMBINATION_P192:
4119 case HCI_LK_UNAUTH_COMBINATION_P256:
4120 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4122 case HCI_LK_AUTH_COMBINATION_P192:
4123 conn->pending_sec_level = BT_SECURITY_HIGH;
4125 case HCI_LK_AUTH_COMBINATION_P256:
4126 conn->pending_sec_level = BT_SECURITY_FIPS;
4131 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4133 struct hci_ev_link_key_req *ev = (void *) skb->data;
4134 struct hci_cp_link_key_reply cp;
4135 struct hci_conn *conn;
4136 struct link_key *key;
4138 BT_DBG("%s", hdev->name);
4140 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4145 key = hci_find_link_key(hdev, &ev->bdaddr);
4147 BT_DBG("%s link key not found for %pMR", hdev->name,
4152 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4157 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4159 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4160 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4161 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4162 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4166 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4167 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4168 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4169 BT_DBG("%s ignoring key unauthenticated for high security",
4174 conn_set_key(conn, key->type, key->pin_len);
4177 bacpy(&cp.bdaddr, &ev->bdaddr);
4178 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4180 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4182 hci_dev_unlock(hdev);
4187 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4188 hci_dev_unlock(hdev);
4191 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4193 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4194 struct hci_conn *conn;
4195 struct link_key *key;
4199 BT_DBG("%s", hdev->name);
4203 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4207 hci_conn_hold(conn);
4208 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4209 hci_conn_drop(conn);
4211 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4212 conn_set_key(conn, ev->key_type, conn->pin_length);
4214 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4217 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4218 ev->key_type, pin_len, &persistent);
4222 /* Update connection information since adding the key will have
4223 * fixed up the type in the case of changed combination keys.
4225 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4226 conn_set_key(conn, key->type, key->pin_len);
4228 mgmt_new_link_key(hdev, key, persistent);
4230 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4231 * is set. If it's not set simply remove the key from the kernel
4232 * list (we've still notified user space about it but with
4233 * store_hint being 0).
4235 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4236 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4237 list_del_rcu(&key->list);
4238 kfree_rcu(key, rcu);
4243 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4245 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4248 hci_dev_unlock(hdev);
4251 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4253 struct hci_ev_clock_offset *ev = (void *) skb->data;
4254 struct hci_conn *conn;
4256 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4261 if (conn && !ev->status) {
4262 struct inquiry_entry *ie;
4264 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4266 ie->data.clock_offset = ev->clock_offset;
4267 ie->timestamp = jiffies;
4271 hci_dev_unlock(hdev);
4274 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4276 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4277 struct hci_conn *conn;
4279 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4283 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4284 if (conn && !ev->status)
4285 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4287 hci_dev_unlock(hdev);
4290 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4292 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4293 struct inquiry_entry *ie;
4295 BT_DBG("%s", hdev->name);
4299 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4301 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4302 ie->timestamp = jiffies;
4305 hci_dev_unlock(hdev);
4308 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4309 struct sk_buff *skb)
4311 struct inquiry_data data;
4312 int num_rsp = *((__u8 *) skb->data);
4314 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4319 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4324 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4325 struct inquiry_info_with_rssi_and_pscan_mode *info;
4326 info = (void *) (skb->data + 1);
4328 if (skb->len < num_rsp * sizeof(*info) + 1)
4331 for (; num_rsp; num_rsp--, info++) {
4334 bacpy(&data.bdaddr, &info->bdaddr);
4335 data.pscan_rep_mode = info->pscan_rep_mode;
4336 data.pscan_period_mode = info->pscan_period_mode;
4337 data.pscan_mode = info->pscan_mode;
4338 memcpy(data.dev_class, info->dev_class, 3);
4339 data.clock_offset = info->clock_offset;
4340 data.rssi = info->rssi;
4341 data.ssp_mode = 0x00;
4343 flags = hci_inquiry_cache_update(hdev, &data, false);
4345 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4346 info->dev_class, info->rssi,
4347 flags, NULL, 0, NULL, 0);
4350 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4352 if (skb->len < num_rsp * sizeof(*info) + 1)
4355 for (; num_rsp; num_rsp--, info++) {
4358 bacpy(&data.bdaddr, &info->bdaddr);
4359 data.pscan_rep_mode = info->pscan_rep_mode;
4360 data.pscan_period_mode = info->pscan_period_mode;
4361 data.pscan_mode = 0x00;
4362 memcpy(data.dev_class, info->dev_class, 3);
4363 data.clock_offset = info->clock_offset;
4364 data.rssi = info->rssi;
4365 data.ssp_mode = 0x00;
4367 flags = hci_inquiry_cache_update(hdev, &data, false);
4369 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4370 info->dev_class, info->rssi,
4371 flags, NULL, 0, NULL, 0);
4376 hci_dev_unlock(hdev);
4379 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4380 struct sk_buff *skb)
4382 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4383 struct hci_conn *conn;
4385 BT_DBG("%s", hdev->name);
4389 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4393 if (ev->page < HCI_MAX_PAGES)
4394 memcpy(conn->features[ev->page], ev->features, 8);
4396 if (!ev->status && ev->page == 0x01) {
4397 struct inquiry_entry *ie;
4399 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4401 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4403 if (ev->features[0] & LMP_HOST_SSP) {
4404 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4406 /* It is mandatory by the Bluetooth specification that
4407 * Extended Inquiry Results are only used when Secure
4408 * Simple Pairing is enabled, but some devices violate
4411 * To make these devices work, the internal SSP
4412 * enabled flag needs to be cleared if the remote host
4413 * features do not indicate SSP support */
4414 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4417 if (ev->features[0] & LMP_HOST_SC)
4418 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4421 if (conn->state != BT_CONFIG)
4424 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4425 struct hci_cp_remote_name_req cp;
4426 memset(&cp, 0, sizeof(cp));
4427 bacpy(&cp.bdaddr, &conn->dst);
4428 cp.pscan_rep_mode = 0x02;
4429 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4430 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4431 mgmt_device_connected(hdev, conn, NULL, 0);
4433 if (!hci_outgoing_auth_needed(hdev, conn)) {
4434 conn->state = BT_CONNECTED;
4435 hci_connect_cfm(conn, ev->status);
4436 hci_conn_drop(conn);
4440 hci_dev_unlock(hdev);
4443 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4444 struct sk_buff *skb)
4446 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4447 struct hci_conn *conn;
4448 unsigned int notify_evt;
4450 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4454 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4456 if (ev->link_type == ESCO_LINK)
4459 /* When the link type in the event indicates SCO connection
4460 * and lookup of the connection object fails, then check
4461 * if an eSCO connection object exists.
4463 * The core limits the synchronous connections to either
4464 * SCO or eSCO. The eSCO connection is preferred and tried
4465 * to be setup first and until successfully established,
4466 * the link type will be hinted as eSCO.
4468 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4473 switch (ev->status) {
4475 /* The synchronous connection complete event should only be
4476 * sent once per new connection. Receiving a successful
4477 * complete event when the connection status is already
4478 * BT_CONNECTED means that the device is misbehaving and sent
4479 * multiple complete event packets for the same new connection.
4481 * Registering the device more than once can corrupt kernel
4482 * memory, hence upon detecting this invalid event, we report
4483 * an error and ignore the packet.
4485 if (conn->state == BT_CONNECTED) {
4486 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4490 conn->handle = __le16_to_cpu(ev->handle);
4491 conn->state = BT_CONNECTED;
4492 conn->type = ev->link_type;
4494 hci_debugfs_create_conn(conn);
4495 hci_conn_add_sysfs(conn);
4498 case 0x10: /* Connection Accept Timeout */
4499 case 0x0d: /* Connection Rejected due to Limited Resources */
4500 case 0x11: /* Unsupported Feature or Parameter Value */
4501 case 0x1c: /* SCO interval rejected */
4502 case 0x1a: /* Unsupported Remote Feature */
4503 case 0x1e: /* Invalid LMP Parameters */
4504 case 0x1f: /* Unspecified error */
4505 case 0x20: /* Unsupported LMP Parameter value */
4507 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4508 (hdev->esco_type & EDR_ESCO_MASK);
4509 if (hci_setup_sync(conn, conn->link->handle))
4515 conn->state = BT_CLOSED;
4519 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4521 switch (ev->air_mode) {
4523 notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD;
4526 notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP;
4530 /* Notify only in case of SCO over HCI transport data path which
4531 * is zero and non-zero value shall be non-HCI transport data path
4533 if (conn->codec.data_path == 0) {
4535 hdev->notify(hdev, notify_evt);
4538 hci_connect_cfm(conn, ev->status);
4543 hci_dev_unlock(hdev);
4546 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4550 while (parsed < eir_len) {
4551 u8 field_len = eir[0];
4556 parsed += field_len + 1;
4557 eir += field_len + 1;
4563 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4564 struct sk_buff *skb)
4566 struct inquiry_data data;
4567 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4568 int num_rsp = *((__u8 *) skb->data);
4571 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4573 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4576 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4581 for (; num_rsp; num_rsp--, info++) {
4585 bacpy(&data.bdaddr, &info->bdaddr);
4586 data.pscan_rep_mode = info->pscan_rep_mode;
4587 data.pscan_period_mode = info->pscan_period_mode;
4588 data.pscan_mode = 0x00;
4589 memcpy(data.dev_class, info->dev_class, 3);
4590 data.clock_offset = info->clock_offset;
4591 data.rssi = info->rssi;
4592 data.ssp_mode = 0x01;
4594 if (hci_dev_test_flag(hdev, HCI_MGMT))
4595 name_known = eir_get_data(info->data,
4597 EIR_NAME_COMPLETE, NULL);
4601 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4603 eir_len = eir_get_length(info->data, sizeof(info->data));
4605 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4606 info->dev_class, info->rssi,
4607 flags, info->data, eir_len, NULL, 0);
4610 hci_dev_unlock(hdev);
4613 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4614 struct sk_buff *skb)
4616 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4617 struct hci_conn *conn;
4619 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4620 __le16_to_cpu(ev->handle));
4624 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4628 /* For BR/EDR the necessary steps are taken through the
4629 * auth_complete event.
4631 if (conn->type != LE_LINK)
4635 conn->sec_level = conn->pending_sec_level;
4637 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4639 if (ev->status && conn->state == BT_CONNECTED) {
4640 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4641 hci_conn_drop(conn);
4645 if (conn->state == BT_CONFIG) {
4647 conn->state = BT_CONNECTED;
4649 hci_connect_cfm(conn, ev->status);
4650 hci_conn_drop(conn);
4652 hci_auth_cfm(conn, ev->status);
4654 hci_conn_hold(conn);
4655 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4656 hci_conn_drop(conn);
4660 hci_dev_unlock(hdev);
4663 static u8 hci_get_auth_req(struct hci_conn *conn)
4665 /* If remote requests no-bonding follow that lead */
4666 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4667 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4668 return conn->remote_auth | (conn->auth_type & 0x01);
4670 /* If both remote and local have enough IO capabilities, require
4673 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4674 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4675 return conn->remote_auth | 0x01;
4677 /* No MITM protection possible so ignore remote requirement */
4678 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4681 static u8 bredr_oob_data_present(struct hci_conn *conn)
4683 struct hci_dev *hdev = conn->hdev;
4684 struct oob_data *data;
4686 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4690 if (bredr_sc_enabled(hdev)) {
4691 /* When Secure Connections is enabled, then just
4692 * return the present value stored with the OOB
4693 * data. The stored value contains the right present
4694 * information. However it can only be trusted when
4695 * not in Secure Connection Only mode.
4697 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4698 return data->present;
4700 /* When Secure Connections Only mode is enabled, then
4701 * the P-256 values are required. If they are not
4702 * available, then do not declare that OOB data is
4705 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4706 !memcmp(data->hash256, ZERO_KEY, 16))
4712 /* When Secure Connections is not enabled or actually
4713 * not supported by the hardware, then check that if
4714 * P-192 data values are present.
4716 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4717 !memcmp(data->hash192, ZERO_KEY, 16))
4723 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4725 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4726 struct hci_conn *conn;
4728 BT_DBG("%s", hdev->name);
4732 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4736 hci_conn_hold(conn);
4738 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4741 /* Allow pairing if we're pairable, the initiators of the
4742 * pairing or if the remote is not requesting bonding.
4744 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4745 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4746 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4747 struct hci_cp_io_capability_reply cp;
4749 bacpy(&cp.bdaddr, &ev->bdaddr);
4750 /* Change the IO capability from KeyboardDisplay
4751 * to DisplayYesNo as it is not supported by BT spec. */
4752 cp.capability = (conn->io_capability == 0x04) ?
4753 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4755 /* If we are initiators, there is no remote information yet */
4756 if (conn->remote_auth == 0xff) {
4757 /* Request MITM protection if our IO caps allow it
4758 * except for the no-bonding case.
4760 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4761 conn->auth_type != HCI_AT_NO_BONDING)
4762 conn->auth_type |= 0x01;
4764 conn->auth_type = hci_get_auth_req(conn);
4767 /* If we're not bondable, force one of the non-bondable
4768 * authentication requirement values.
4770 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4771 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4773 cp.authentication = conn->auth_type;
4774 cp.oob_data = bredr_oob_data_present(conn);
4776 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4779 struct hci_cp_io_capability_neg_reply cp;
4781 bacpy(&cp.bdaddr, &ev->bdaddr);
4782 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4784 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4789 hci_dev_unlock(hdev);
4792 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4794 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4795 struct hci_conn *conn;
4797 BT_DBG("%s", hdev->name);
4801 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4805 conn->remote_cap = ev->capability;
4806 conn->remote_auth = ev->authentication;
4809 hci_dev_unlock(hdev);
4812 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4813 struct sk_buff *skb)
4815 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4816 int loc_mitm, rem_mitm, confirm_hint = 0;
4817 struct hci_conn *conn;
4819 BT_DBG("%s", hdev->name);
4823 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4826 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4830 loc_mitm = (conn->auth_type & 0x01);
4831 rem_mitm = (conn->remote_auth & 0x01);
4833 /* If we require MITM but the remote device can't provide that
4834 * (it has NoInputNoOutput) then reject the confirmation
4835 * request. We check the security level here since it doesn't
4836 * necessarily match conn->auth_type.
4838 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4839 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4840 BT_DBG("Rejecting request: remote device can't provide MITM");
4841 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4842 sizeof(ev->bdaddr), &ev->bdaddr);
4846 /* If no side requires MITM protection; auto-accept */
4847 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4848 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4850 /* If we're not the initiators request authorization to
4851 * proceed from user space (mgmt_user_confirm with
4852 * confirm_hint set to 1). The exception is if neither
4853 * side had MITM or if the local IO capability is
4854 * NoInputNoOutput, in which case we do auto-accept
4856 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4857 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4858 (loc_mitm || rem_mitm)) {
4859 BT_DBG("Confirming auto-accept as acceptor");
4864 /* If there already exists link key in local host, leave the
4865 * decision to user space since the remote device could be
4866 * legitimate or malicious.
4868 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4869 bt_dev_dbg(hdev, "Local host already has link key");
4874 BT_DBG("Auto-accept of user confirmation with %ums delay",
4875 hdev->auto_accept_delay);
4877 if (hdev->auto_accept_delay > 0) {
4878 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4879 queue_delayed_work(conn->hdev->workqueue,
4880 &conn->auto_accept_work, delay);
4884 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4885 sizeof(ev->bdaddr), &ev->bdaddr);
4890 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4891 le32_to_cpu(ev->passkey), confirm_hint);
4894 hci_dev_unlock(hdev);
4897 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4898 struct sk_buff *skb)
4900 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4902 BT_DBG("%s", hdev->name);
4904 if (hci_dev_test_flag(hdev, HCI_MGMT))
4905 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4908 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4909 struct sk_buff *skb)
4911 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4912 struct hci_conn *conn;
4914 BT_DBG("%s", hdev->name);
4916 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4920 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4921 conn->passkey_entered = 0;
4923 if (hci_dev_test_flag(hdev, HCI_MGMT))
4924 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4925 conn->dst_type, conn->passkey_notify,
4926 conn->passkey_entered);
4929 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4931 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4932 struct hci_conn *conn;
4934 BT_DBG("%s", hdev->name);
4936 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4941 case HCI_KEYPRESS_STARTED:
4942 conn->passkey_entered = 0;
4945 case HCI_KEYPRESS_ENTERED:
4946 conn->passkey_entered++;
4949 case HCI_KEYPRESS_ERASED:
4950 conn->passkey_entered--;
4953 case HCI_KEYPRESS_CLEARED:
4954 conn->passkey_entered = 0;
4957 case HCI_KEYPRESS_COMPLETED:
4961 if (hci_dev_test_flag(hdev, HCI_MGMT))
4962 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4963 conn->dst_type, conn->passkey_notify,
4964 conn->passkey_entered);
4967 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4968 struct sk_buff *skb)
4970 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4971 struct hci_conn *conn;
4973 BT_DBG("%s", hdev->name);
4977 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4981 /* Reset the authentication requirement to unknown */
4982 conn->remote_auth = 0xff;
4984 /* To avoid duplicate auth_failed events to user space we check
4985 * the HCI_CONN_AUTH_PEND flag which will be set if we
4986 * initiated the authentication. A traditional auth_complete
4987 * event gets always produced as initiator and is also mapped to
4988 * the mgmt_auth_failed event */
4989 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4990 mgmt_auth_failed(conn, ev->status);
4992 hci_conn_drop(conn);
4995 hci_dev_unlock(hdev);
4998 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4999 struct sk_buff *skb)
5001 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5002 struct inquiry_entry *ie;
5003 struct hci_conn *conn;
5005 BT_DBG("%s", hdev->name);
5009 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5011 memcpy(conn->features[1], ev->features, 8);
5013 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5015 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5017 hci_dev_unlock(hdev);
5020 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5021 struct sk_buff *skb)
5023 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5024 struct oob_data *data;
5026 BT_DBG("%s", hdev->name);
5030 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5033 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5035 struct hci_cp_remote_oob_data_neg_reply cp;
5037 bacpy(&cp.bdaddr, &ev->bdaddr);
5038 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5043 if (bredr_sc_enabled(hdev)) {
5044 struct hci_cp_remote_oob_ext_data_reply cp;
5046 bacpy(&cp.bdaddr, &ev->bdaddr);
5047 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5048 memset(cp.hash192, 0, sizeof(cp.hash192));
5049 memset(cp.rand192, 0, sizeof(cp.rand192));
5051 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5052 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5054 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5055 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5057 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5060 struct hci_cp_remote_oob_data_reply cp;
5062 bacpy(&cp.bdaddr, &ev->bdaddr);
5063 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5064 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5066 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5071 hci_dev_unlock(hdev);
5074 #if IS_ENABLED(CONFIG_BT_HS)
5075 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5077 struct hci_ev_channel_selected *ev = (void *)skb->data;
5078 struct hci_conn *hcon;
5080 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5082 skb_pull(skb, sizeof(*ev));
5084 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5088 amp_read_loc_assoc_final_data(hdev, hcon);
5091 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5092 struct sk_buff *skb)
5094 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5095 struct hci_conn *hcon, *bredr_hcon;
5097 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5102 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5114 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5116 hcon->state = BT_CONNECTED;
5117 bacpy(&hcon->dst, &bredr_hcon->dst);
5119 hci_conn_hold(hcon);
5120 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5121 hci_conn_drop(hcon);
5123 hci_debugfs_create_conn(hcon);
5124 hci_conn_add_sysfs(hcon);
5126 amp_physical_cfm(bredr_hcon, hcon);
5129 hci_dev_unlock(hdev);
5132 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5134 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5135 struct hci_conn *hcon;
5136 struct hci_chan *hchan;
5137 struct amp_mgr *mgr;
5139 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5140 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5143 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5147 /* Create AMP hchan */
5148 hchan = hci_chan_create(hcon);
5152 hchan->handle = le16_to_cpu(ev->handle);
5155 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5157 mgr = hcon->amp_mgr;
5158 if (mgr && mgr->bredr_chan) {
5159 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5161 l2cap_chan_lock(bredr_chan);
5163 bredr_chan->conn->mtu = hdev->block_mtu;
5164 l2cap_logical_cfm(bredr_chan, hchan, 0);
5165 hci_conn_hold(hcon);
5167 l2cap_chan_unlock(bredr_chan);
5171 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5172 struct sk_buff *skb)
5174 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5175 struct hci_chan *hchan;
5177 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5178 le16_to_cpu(ev->handle), ev->status);
5185 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5186 if (!hchan || !hchan->amp)
5189 amp_destroy_logical_link(hchan, ev->reason);
5192 hci_dev_unlock(hdev);
5195 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5196 struct sk_buff *skb)
5198 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5199 struct hci_conn *hcon;
5201 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5208 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5210 hcon->state = BT_CLOSED;
5214 hci_dev_unlock(hdev);
5218 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5219 u8 bdaddr_type, bdaddr_t *local_rpa)
5222 conn->dst_type = bdaddr_type;
5223 conn->resp_addr_type = bdaddr_type;
5224 bacpy(&conn->resp_addr, bdaddr);
5226 /* Check if the controller has set a Local RPA then it must be
5227 * used instead or hdev->rpa.
5229 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5230 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5231 bacpy(&conn->init_addr, local_rpa);
5232 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5233 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5234 bacpy(&conn->init_addr, &conn->hdev->rpa);
5236 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5237 &conn->init_addr_type);
5240 conn->resp_addr_type = conn->hdev->adv_addr_type;
5241 /* Check if the controller has set a Local RPA then it must be
5242 * used instead or hdev->rpa.
5244 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5245 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5246 bacpy(&conn->resp_addr, local_rpa);
5247 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5248 /* In case of ext adv, resp_addr will be updated in
5249 * Adv Terminated event.
5251 if (!ext_adv_capable(conn->hdev))
5252 bacpy(&conn->resp_addr,
5253 &conn->hdev->random_addr);
5255 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5258 conn->init_addr_type = bdaddr_type;
5259 bacpy(&conn->init_addr, bdaddr);
5261 /* For incoming connections, set the default minimum
5262 * and maximum connection interval. They will be used
5263 * to check if the parameters are in range and if not
5264 * trigger the connection update procedure.
5266 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5267 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5271 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5272 bdaddr_t *bdaddr, u8 bdaddr_type,
5273 bdaddr_t *local_rpa, u8 role, u16 handle,
5274 u16 interval, u16 latency,
5275 u16 supervision_timeout)
5277 struct hci_conn_params *params;
5278 struct hci_conn *conn;
5279 struct smp_irk *irk;
5284 /* All controllers implicitly stop advertising in the event of a
5285 * connection, so ensure that the state bit is cleared.
5287 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5289 conn = hci_lookup_le_connect(hdev);
5291 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5293 bt_dev_err(hdev, "no memory for new connection");
5297 conn->dst_type = bdaddr_type;
5299 /* If we didn't have a hci_conn object previously
5300 * but we're in central role this must be something
5301 * initiated using an accept list. Since accept list based
5302 * connections are not "first class citizens" we don't
5303 * have full tracking of them. Therefore, we go ahead
5304 * with a "best effort" approach of determining the
5305 * initiator address based on the HCI_PRIVACY flag.
5308 conn->resp_addr_type = bdaddr_type;
5309 bacpy(&conn->resp_addr, bdaddr);
5310 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5311 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5312 bacpy(&conn->init_addr, &hdev->rpa);
5314 hci_copy_identity_address(hdev,
5316 &conn->init_addr_type);
5320 cancel_delayed_work(&conn->le_conn_timeout);
5323 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5325 /* Lookup the identity address from the stored connection
5326 * address and address type.
5328 * When establishing connections to an identity address, the
5329 * connection procedure will store the resolvable random
5330 * address first. Now if it can be converted back into the
5331 * identity address, start using the identity address from
5334 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5336 bacpy(&conn->dst, &irk->bdaddr);
5337 conn->dst_type = irk->addr_type;
5340 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5343 hci_le_conn_failed(conn, status);
5347 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5348 addr_type = BDADDR_LE_PUBLIC;
5350 addr_type = BDADDR_LE_RANDOM;
5352 /* Drop the connection if the device is blocked */
5353 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5354 hci_conn_drop(conn);
5358 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5359 mgmt_device_connected(hdev, conn, NULL, 0);
5361 conn->sec_level = BT_SECURITY_LOW;
5362 conn->handle = handle;
5363 conn->state = BT_CONFIG;
5365 /* Store current advertising instance as connection advertising instance
5366 * when sotfware rotation is in use so it can be re-enabled when
5369 if (!ext_adv_capable(hdev))
5370 conn->adv_instance = hdev->cur_adv_instance;
5372 conn->le_conn_interval = interval;
5373 conn->le_conn_latency = latency;
5374 conn->le_supv_timeout = supervision_timeout;
5376 hci_debugfs_create_conn(conn);
5377 hci_conn_add_sysfs(conn);
5379 /* The remote features procedure is defined for central
5380 * role only. So only in case of an initiated connection
5381 * request the remote features.
5383 * If the local controller supports peripheral-initiated features
5384 * exchange, then requesting the remote features in peripheral
5385 * role is possible. Otherwise just transition into the
5386 * connected state without requesting the remote features.
5389 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5390 struct hci_cp_le_read_remote_features cp;
5392 cp.handle = __cpu_to_le16(conn->handle);
5394 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5397 hci_conn_hold(conn);
5399 conn->state = BT_CONNECTED;
5400 hci_connect_cfm(conn, status);
5403 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5406 list_del_init(¶ms->action);
5408 hci_conn_drop(params->conn);
5409 hci_conn_put(params->conn);
5410 params->conn = NULL;
5415 hci_update_background_scan(hdev);
5416 hci_dev_unlock(hdev);
5419 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5421 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5423 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5425 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5426 NULL, ev->role, le16_to_cpu(ev->handle),
5427 le16_to_cpu(ev->interval),
5428 le16_to_cpu(ev->latency),
5429 le16_to_cpu(ev->supervision_timeout));
5432 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5433 struct sk_buff *skb)
5435 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5437 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5439 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5440 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5441 le16_to_cpu(ev->interval),
5442 le16_to_cpu(ev->latency),
5443 le16_to_cpu(ev->supervision_timeout));
5445 if (use_ll_privacy(hdev) &&
5446 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5447 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5448 hci_req_disable_address_resolution(hdev);
5451 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5453 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5454 struct hci_conn *conn;
5455 struct adv_info *adv;
5457 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5459 adv = hci_find_adv_instance(hdev, ev->handle);
5465 /* Remove advertising as it has been terminated */
5466 hci_remove_adv_instance(hdev, ev->handle);
5467 mgmt_advertising_removed(NULL, hdev, ev->handle);
5473 adv->enabled = false;
5475 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5477 /* Store handle in the connection so the correct advertising
5478 * instance can be re-enabled when disconnected.
5480 conn->adv_instance = ev->handle;
5482 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5483 bacmp(&conn->resp_addr, BDADDR_ANY))
5487 bacpy(&conn->resp_addr, &hdev->random_addr);
5492 bacpy(&conn->resp_addr, &adv->random_addr);
5496 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5497 struct sk_buff *skb)
5499 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5500 struct hci_conn *conn;
5502 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5509 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5511 conn->le_conn_interval = le16_to_cpu(ev->interval);
5512 conn->le_conn_latency = le16_to_cpu(ev->latency);
5513 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5516 hci_dev_unlock(hdev);
5519 /* This function requires the caller holds hdev->lock */
5520 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5522 u8 addr_type, bool addr_resolved,
5523 u8 adv_type, bdaddr_t *direct_rpa)
5525 struct hci_conn *conn;
5526 struct hci_conn_params *params;
5528 /* If the event is not connectable don't proceed further */
5529 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5532 /* Ignore if the device is blocked */
5533 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5536 /* Most controller will fail if we try to create new connections
5537 * while we have an existing one in peripheral role.
5539 if (hdev->conn_hash.le_num_peripheral > 0 &&
5540 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5541 !(hdev->le_states[3] & 0x10)))
5544 /* If we're not connectable only connect devices that we have in
5545 * our pend_le_conns list.
5547 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5552 if (!params->explicit_connect) {
5553 switch (params->auto_connect) {
5554 case HCI_AUTO_CONN_DIRECT:
5555 /* Only devices advertising with ADV_DIRECT_IND are
5556 * triggering a connection attempt. This is allowing
5557 * incoming connections from peripheral devices.
5559 if (adv_type != LE_ADV_DIRECT_IND)
5562 case HCI_AUTO_CONN_ALWAYS:
5563 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5564 * are triggering a connection attempt. This means
5565 * that incoming connections from peripheral device are
5566 * accepted and also outgoing connections to peripheral
5567 * devices are established when found.
5575 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5576 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5577 HCI_ROLE_MASTER, direct_rpa);
5578 if (!IS_ERR(conn)) {
5579 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5580 * by higher layer that tried to connect, if no then
5581 * store the pointer since we don't really have any
5582 * other owner of the object besides the params that
5583 * triggered it. This way we can abort the connection if
5584 * the parameters get removed and keep the reference
5585 * count consistent once the connection is established.
5588 if (!params->explicit_connect)
5589 params->conn = hci_conn_get(conn);
5594 switch (PTR_ERR(conn)) {
5596 /* If hci_connect() returns -EBUSY it means there is already
5597 * an LE connection attempt going on. Since controllers don't
5598 * support more than one connection attempt at the time, we
5599 * don't consider this an error case.
5603 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5610 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5611 u8 bdaddr_type, bdaddr_t *direct_addr,
5612 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5615 struct discovery_state *d = &hdev->discovery;
5616 struct smp_irk *irk;
5617 struct hci_conn *conn;
5618 bool match, bdaddr_resolved;
5624 case LE_ADV_DIRECT_IND:
5625 case LE_ADV_SCAN_IND:
5626 case LE_ADV_NONCONN_IND:
5627 case LE_ADV_SCAN_RSP:
5630 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5631 "type: 0x%02x", type);
5635 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5636 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5640 /* Find the end of the data in case the report contains padded zero
5641 * bytes at the end causing an invalid length value.
5643 * When data is NULL, len is 0 so there is no need for extra ptr
5644 * check as 'ptr < data + 0' is already false in such case.
5646 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5647 if (ptr + 1 + *ptr > data + len)
5651 /* Adjust for actual length. This handles the case when remote
5652 * device is advertising with incorrect data length.
5656 /* If the direct address is present, then this report is from
5657 * a LE Direct Advertising Report event. In that case it is
5658 * important to see if the address is matching the local
5659 * controller address.
5662 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
5665 /* Only resolvable random addresses are valid for these
5666 * kind of reports and others can be ignored.
5668 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5671 /* If the controller is not using resolvable random
5672 * addresses, then this report can be ignored.
5674 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5677 /* If the local IRK of the controller does not match
5678 * with the resolvable random address provided, then
5679 * this report can be ignored.
5681 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5685 /* Check if we need to convert to identity address */
5686 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5688 bdaddr = &irk->bdaddr;
5689 bdaddr_type = irk->addr_type;
5692 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
5694 /* Check if we have been requested to connect to this device.
5696 * direct_addr is set only for directed advertising reports (it is NULL
5697 * for advertising reports) and is already verified to be RPA above.
5699 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
5701 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5702 /* Store report for later inclusion by
5703 * mgmt_device_connected
5705 memcpy(conn->le_adv_data, data, len);
5706 conn->le_adv_data_len = len;
5709 /* Passive scanning shouldn't trigger any device found events,
5710 * except for devices marked as CONN_REPORT for which we do send
5711 * device found events, or advertisement monitoring requested.
5713 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5714 if (type == LE_ADV_DIRECT_IND)
5717 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5718 bdaddr, bdaddr_type) &&
5719 idr_is_empty(&hdev->adv_monitors_idr))
5722 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5723 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5726 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5727 rssi, flags, data, len, NULL, 0);
5731 /* When receiving non-connectable or scannable undirected
5732 * advertising reports, this means that the remote device is
5733 * not connectable and then clearly indicate this in the
5734 * device found event.
5736 * When receiving a scan response, then there is no way to
5737 * know if the remote device is connectable or not. However
5738 * since scan responses are merged with a previously seen
5739 * advertising report, the flags field from that report
5742 * In the really unlikely case that a controller get confused
5743 * and just sends a scan response event, then it is marked as
5744 * not connectable as well.
5746 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5747 type == LE_ADV_SCAN_RSP)
5748 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5752 /* If there's nothing pending either store the data from this
5753 * event or send an immediate device found event if the data
5754 * should not be stored for later.
5756 if (!ext_adv && !has_pending_adv_report(hdev)) {
5757 /* If the report will trigger a SCAN_REQ store it for
5760 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5761 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5762 rssi, flags, data, len);
5766 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5767 rssi, flags, data, len, NULL, 0);
5771 /* Check if the pending report is for the same device as the new one */
5772 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5773 bdaddr_type == d->last_adv_addr_type);
5775 /* If the pending data doesn't match this report or this isn't a
5776 * scan response (e.g. we got a duplicate ADV_IND) then force
5777 * sending of the pending data.
5779 if (type != LE_ADV_SCAN_RSP || !match) {
5780 /* Send out whatever is in the cache, but skip duplicates */
5782 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5783 d->last_adv_addr_type, NULL,
5784 d->last_adv_rssi, d->last_adv_flags,
5786 d->last_adv_data_len, NULL, 0);
5788 /* If the new report will trigger a SCAN_REQ store it for
5791 if (!ext_adv && (type == LE_ADV_IND ||
5792 type == LE_ADV_SCAN_IND)) {
5793 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5794 rssi, flags, data, len);
5798 /* The advertising reports cannot be merged, so clear
5799 * the pending report and send out a device found event.
5801 clear_pending_adv_report(hdev);
5802 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5803 rssi, flags, data, len, NULL, 0);
5807 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5808 * the new event is a SCAN_RSP. We can therefore proceed with
5809 * sending a merged device found event.
5811 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5812 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5813 d->last_adv_data, d->last_adv_data_len, data, len);
5814 clear_pending_adv_report(hdev);
5817 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5819 u8 num_reports = skb->data[0];
5820 void *ptr = &skb->data[1];
5824 while (num_reports--) {
5825 struct hci_ev_le_advertising_info *ev = ptr;
5828 if (ev->length <= HCI_MAX_AD_LENGTH) {
5829 rssi = ev->data[ev->length];
5830 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5831 ev->bdaddr_type, NULL, 0, rssi,
5832 ev->data, ev->length, false);
5834 bt_dev_err(hdev, "Dropping invalid advertising data");
5837 ptr += sizeof(*ev) + ev->length + 1;
5840 hci_dev_unlock(hdev);
5843 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5845 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5847 case LE_LEGACY_ADV_IND:
5849 case LE_LEGACY_ADV_DIRECT_IND:
5850 return LE_ADV_DIRECT_IND;
5851 case LE_LEGACY_ADV_SCAN_IND:
5852 return LE_ADV_SCAN_IND;
5853 case LE_LEGACY_NONCONN_IND:
5854 return LE_ADV_NONCONN_IND;
5855 case LE_LEGACY_SCAN_RSP_ADV:
5856 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5857 return LE_ADV_SCAN_RSP;
5863 if (evt_type & LE_EXT_ADV_CONN_IND) {
5864 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5865 return LE_ADV_DIRECT_IND;
5870 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5871 return LE_ADV_SCAN_RSP;
5873 if (evt_type & LE_EXT_ADV_SCAN_IND)
5874 return LE_ADV_SCAN_IND;
5876 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5877 evt_type & LE_EXT_ADV_DIRECT_IND)
5878 return LE_ADV_NONCONN_IND;
5881 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5884 return LE_ADV_INVALID;
5887 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5889 u8 num_reports = skb->data[0];
5890 void *ptr = &skb->data[1];
5894 while (num_reports--) {
5895 struct hci_ev_le_ext_adv_report *ev = ptr;
5899 evt_type = __le16_to_cpu(ev->evt_type);
5900 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5901 if (legacy_evt_type != LE_ADV_INVALID) {
5902 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5903 ev->bdaddr_type, NULL, 0, ev->rssi,
5904 ev->data, ev->length,
5905 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5908 ptr += sizeof(*ev) + ev->length;
5911 hci_dev_unlock(hdev);
5914 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5915 struct sk_buff *skb)
5917 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5918 struct hci_conn *conn;
5920 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5924 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5927 memcpy(conn->features[0], ev->features, 8);
5929 if (conn->state == BT_CONFIG) {
5932 /* If the local controller supports peripheral-initiated
5933 * features exchange, but the remote controller does
5934 * not, then it is possible that the error code 0x1a
5935 * for unsupported remote feature gets returned.
5937 * In this specific case, allow the connection to
5938 * transition into connected state and mark it as
5941 if (!conn->out && ev->status == 0x1a &&
5942 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5945 status = ev->status;
5947 conn->state = BT_CONNECTED;
5948 hci_connect_cfm(conn, status);
5949 hci_conn_drop(conn);
5953 hci_dev_unlock(hdev);
5956 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5958 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5959 struct hci_cp_le_ltk_reply cp;
5960 struct hci_cp_le_ltk_neg_reply neg;
5961 struct hci_conn *conn;
5962 struct smp_ltk *ltk;
5964 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5968 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5972 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5976 if (smp_ltk_is_sc(ltk)) {
5977 /* With SC both EDiv and Rand are set to zero */
5978 if (ev->ediv || ev->rand)
5981 /* For non-SC keys check that EDiv and Rand match */
5982 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5986 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5987 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5988 cp.handle = cpu_to_le16(conn->handle);
5990 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5992 conn->enc_key_size = ltk->enc_size;
5994 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5996 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5997 * temporary key used to encrypt a connection following
5998 * pairing. It is used during the Encrypted Session Setup to
5999 * distribute the keys. Later, security can be re-established
6000 * using a distributed LTK.
6002 if (ltk->type == SMP_STK) {
6003 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6004 list_del_rcu(<k->list);
6005 kfree_rcu(ltk, rcu);
6007 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6010 hci_dev_unlock(hdev);
6015 neg.handle = ev->handle;
6016 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6017 hci_dev_unlock(hdev);
6020 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6023 struct hci_cp_le_conn_param_req_neg_reply cp;
6025 cp.handle = cpu_to_le16(handle);
6028 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6032 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6033 struct sk_buff *skb)
6035 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6036 struct hci_cp_le_conn_param_req_reply cp;
6037 struct hci_conn *hcon;
6038 u16 handle, min, max, latency, timeout;
6040 handle = le16_to_cpu(ev->handle);
6041 min = le16_to_cpu(ev->interval_min);
6042 max = le16_to_cpu(ev->interval_max);
6043 latency = le16_to_cpu(ev->latency);
6044 timeout = le16_to_cpu(ev->timeout);
6046 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6047 if (!hcon || hcon->state != BT_CONNECTED)
6048 return send_conn_param_neg_reply(hdev, handle,
6049 HCI_ERROR_UNKNOWN_CONN_ID);
6051 if (hci_check_conn_params(min, max, latency, timeout))
6052 return send_conn_param_neg_reply(hdev, handle,
6053 HCI_ERROR_INVALID_LL_PARAMS);
6055 if (hcon->role == HCI_ROLE_MASTER) {
6056 struct hci_conn_params *params;
6061 params = hci_conn_params_lookup(hdev, &hcon->dst,
6064 params->conn_min_interval = min;
6065 params->conn_max_interval = max;
6066 params->conn_latency = latency;
6067 params->supervision_timeout = timeout;
6073 hci_dev_unlock(hdev);
6075 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6076 store_hint, min, max, latency, timeout);
6079 cp.handle = ev->handle;
6080 cp.interval_min = ev->interval_min;
6081 cp.interval_max = ev->interval_max;
6082 cp.latency = ev->latency;
6083 cp.timeout = ev->timeout;
6087 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6090 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6091 struct sk_buff *skb)
6093 u8 num_reports = skb->data[0];
6094 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6096 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6101 for (; num_reports; num_reports--, ev++)
6102 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6103 ev->bdaddr_type, &ev->direct_addr,
6104 ev->direct_addr_type, ev->rssi, NULL, 0,
6107 hci_dev_unlock(hdev);
6110 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6112 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6113 struct hci_conn *conn;
6115 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6126 conn->le_tx_phy = ev->tx_phy;
6127 conn->le_rx_phy = ev->rx_phy;
6130 hci_dev_unlock(hdev);
6133 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6135 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6137 skb_pull(skb, sizeof(*le_ev));
6139 switch (le_ev->subevent) {
6140 case HCI_EV_LE_CONN_COMPLETE:
6141 hci_le_conn_complete_evt(hdev, skb);
6144 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6145 hci_le_conn_update_complete_evt(hdev, skb);
6148 case HCI_EV_LE_ADVERTISING_REPORT:
6149 hci_le_adv_report_evt(hdev, skb);
6152 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6153 hci_le_remote_feat_complete_evt(hdev, skb);
6156 case HCI_EV_LE_LTK_REQ:
6157 hci_le_ltk_request_evt(hdev, skb);
6160 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6161 hci_le_remote_conn_param_req_evt(hdev, skb);
6164 case HCI_EV_LE_DIRECT_ADV_REPORT:
6165 hci_le_direct_adv_report_evt(hdev, skb);
6168 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6169 hci_le_phy_update_evt(hdev, skb);
6172 case HCI_EV_LE_EXT_ADV_REPORT:
6173 hci_le_ext_adv_report_evt(hdev, skb);
6176 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6177 hci_le_enh_conn_complete_evt(hdev, skb);
6180 case HCI_EV_LE_EXT_ADV_SET_TERM:
6181 hci_le_ext_adv_term_evt(hdev, skb);
6189 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6190 u8 event, struct sk_buff *skb)
6192 struct hci_ev_cmd_complete *ev;
6193 struct hci_event_hdr *hdr;
6198 if (skb->len < sizeof(*hdr)) {
6199 bt_dev_err(hdev, "too short HCI event");
6203 hdr = (void *) skb->data;
6204 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6207 if (hdr->evt != event)
6212 /* Check if request ended in Command Status - no way to retrieve
6213 * any extra parameters in this case.
6215 if (hdr->evt == HCI_EV_CMD_STATUS)
6218 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6219 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6224 if (skb->len < sizeof(*ev)) {
6225 bt_dev_err(hdev, "too short cmd_complete event");
6229 ev = (void *) skb->data;
6230 skb_pull(skb, sizeof(*ev));
6232 if (opcode != __le16_to_cpu(ev->opcode)) {
6233 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6234 __le16_to_cpu(ev->opcode));
6241 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6242 struct sk_buff *skb)
6244 struct hci_ev_le_advertising_info *adv;
6245 struct hci_ev_le_direct_adv_info *direct_adv;
6246 struct hci_ev_le_ext_adv_report *ext_adv;
6247 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6248 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6252 /* If we are currently suspended and this is the first BT event seen,
6253 * save the wake reason associated with the event.
6255 if (!hdev->suspended || hdev->wake_reason)
6258 /* Default to remote wake. Values for wake_reason are documented in the
6259 * Bluez mgmt api docs.
6261 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6263 /* Once configured for remote wakeup, we should only wake up for
6264 * reconnections. It's useful to see which device is waking us up so
6265 * keep track of the bdaddr of the connection event that woke us up.
6267 if (event == HCI_EV_CONN_REQUEST) {
6268 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6269 hdev->wake_addr_type = BDADDR_BREDR;
6270 } else if (event == HCI_EV_CONN_COMPLETE) {
6271 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6272 hdev->wake_addr_type = BDADDR_BREDR;
6273 } else if (event == HCI_EV_LE_META) {
6274 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6275 u8 subevent = le_ev->subevent;
6276 u8 *ptr = &skb->data[sizeof(*le_ev)];
6277 u8 num_reports = *ptr;
6279 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6280 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6281 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6283 adv = (void *)(ptr + 1);
6284 direct_adv = (void *)(ptr + 1);
6285 ext_adv = (void *)(ptr + 1);
6288 case HCI_EV_LE_ADVERTISING_REPORT:
6289 bacpy(&hdev->wake_addr, &adv->bdaddr);
6290 hdev->wake_addr_type = adv->bdaddr_type;
6292 case HCI_EV_LE_DIRECT_ADV_REPORT:
6293 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6294 hdev->wake_addr_type = direct_adv->bdaddr_type;
6296 case HCI_EV_LE_EXT_ADV_REPORT:
6297 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6298 hdev->wake_addr_type = ext_adv->bdaddr_type;
6303 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6307 hci_dev_unlock(hdev);
6310 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6312 struct hci_event_hdr *hdr = (void *) skb->data;
6313 hci_req_complete_t req_complete = NULL;
6314 hci_req_complete_skb_t req_complete_skb = NULL;
6315 struct sk_buff *orig_skb = NULL;
6316 u8 status = 0, event = hdr->evt, req_evt = 0;
6317 u16 opcode = HCI_OP_NOP;
6320 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6324 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6325 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6326 opcode = __le16_to_cpu(cmd_hdr->opcode);
6327 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6332 /* If it looks like we might end up having to call
6333 * req_complete_skb, store a pristine copy of the skb since the
6334 * various handlers may modify the original one through
6335 * skb_pull() calls, etc.
6337 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6338 event == HCI_EV_CMD_COMPLETE)
6339 orig_skb = skb_clone(skb, GFP_KERNEL);
6341 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6343 /* Store wake reason if we're suspended */
6344 hci_store_wake_reason(hdev, event, skb);
6347 case HCI_EV_INQUIRY_COMPLETE:
6348 hci_inquiry_complete_evt(hdev, skb);
6351 case HCI_EV_INQUIRY_RESULT:
6352 hci_inquiry_result_evt(hdev, skb);
6355 case HCI_EV_CONN_COMPLETE:
6356 hci_conn_complete_evt(hdev, skb);
6359 case HCI_EV_CONN_REQUEST:
6360 hci_conn_request_evt(hdev, skb);
6363 case HCI_EV_DISCONN_COMPLETE:
6364 hci_disconn_complete_evt(hdev, skb);
6367 case HCI_EV_AUTH_COMPLETE:
6368 hci_auth_complete_evt(hdev, skb);
6371 case HCI_EV_REMOTE_NAME:
6372 hci_remote_name_evt(hdev, skb);
6375 case HCI_EV_ENCRYPT_CHANGE:
6376 hci_encrypt_change_evt(hdev, skb);
6379 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6380 hci_change_link_key_complete_evt(hdev, skb);
6383 case HCI_EV_REMOTE_FEATURES:
6384 hci_remote_features_evt(hdev, skb);
6387 case HCI_EV_CMD_COMPLETE:
6388 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6389 &req_complete, &req_complete_skb);
6392 case HCI_EV_CMD_STATUS:
6393 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6397 case HCI_EV_HARDWARE_ERROR:
6398 hci_hardware_error_evt(hdev, skb);
6401 case HCI_EV_ROLE_CHANGE:
6402 hci_role_change_evt(hdev, skb);
6405 case HCI_EV_NUM_COMP_PKTS:
6406 hci_num_comp_pkts_evt(hdev, skb);
6409 case HCI_EV_MODE_CHANGE:
6410 hci_mode_change_evt(hdev, skb);
6413 case HCI_EV_PIN_CODE_REQ:
6414 hci_pin_code_request_evt(hdev, skb);
6417 case HCI_EV_LINK_KEY_REQ:
6418 hci_link_key_request_evt(hdev, skb);
6421 case HCI_EV_LINK_KEY_NOTIFY:
6422 hci_link_key_notify_evt(hdev, skb);
6425 case HCI_EV_CLOCK_OFFSET:
6426 hci_clock_offset_evt(hdev, skb);
6429 case HCI_EV_PKT_TYPE_CHANGE:
6430 hci_pkt_type_change_evt(hdev, skb);
6433 case HCI_EV_PSCAN_REP_MODE:
6434 hci_pscan_rep_mode_evt(hdev, skb);
6437 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6438 hci_inquiry_result_with_rssi_evt(hdev, skb);
6441 case HCI_EV_REMOTE_EXT_FEATURES:
6442 hci_remote_ext_features_evt(hdev, skb);
6445 case HCI_EV_SYNC_CONN_COMPLETE:
6446 hci_sync_conn_complete_evt(hdev, skb);
6449 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6450 hci_extended_inquiry_result_evt(hdev, skb);
6453 case HCI_EV_KEY_REFRESH_COMPLETE:
6454 hci_key_refresh_complete_evt(hdev, skb);
6457 case HCI_EV_IO_CAPA_REQUEST:
6458 hci_io_capa_request_evt(hdev, skb);
6461 case HCI_EV_IO_CAPA_REPLY:
6462 hci_io_capa_reply_evt(hdev, skb);
6465 case HCI_EV_USER_CONFIRM_REQUEST:
6466 hci_user_confirm_request_evt(hdev, skb);
6469 case HCI_EV_USER_PASSKEY_REQUEST:
6470 hci_user_passkey_request_evt(hdev, skb);
6473 case HCI_EV_USER_PASSKEY_NOTIFY:
6474 hci_user_passkey_notify_evt(hdev, skb);
6477 case HCI_EV_KEYPRESS_NOTIFY:
6478 hci_keypress_notify_evt(hdev, skb);
6481 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6482 hci_simple_pair_complete_evt(hdev, skb);
6485 case HCI_EV_REMOTE_HOST_FEATURES:
6486 hci_remote_host_features_evt(hdev, skb);
6489 case HCI_EV_LE_META:
6490 hci_le_meta_evt(hdev, skb);
6493 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6494 hci_remote_oob_data_request_evt(hdev, skb);
6497 #if IS_ENABLED(CONFIG_BT_HS)
6498 case HCI_EV_CHANNEL_SELECTED:
6499 hci_chan_selected_evt(hdev, skb);
6502 case HCI_EV_PHY_LINK_COMPLETE:
6503 hci_phy_link_complete_evt(hdev, skb);
6506 case HCI_EV_LOGICAL_LINK_COMPLETE:
6507 hci_loglink_complete_evt(hdev, skb);
6510 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6511 hci_disconn_loglink_complete_evt(hdev, skb);
6514 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6515 hci_disconn_phylink_complete_evt(hdev, skb);
6519 case HCI_EV_NUM_COMP_BLOCKS:
6520 hci_num_comp_blocks_evt(hdev, skb);
6524 msft_vendor_evt(hdev, skb);
6528 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6533 req_complete(hdev, status, opcode);
6534 } else if (req_complete_skb) {
6535 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6536 kfree_skb(orig_skb);
6539 req_complete_skb(hdev, status, opcode, orig_skb);
6543 kfree_skb(orig_skb);
6545 hdev->stat.evt_rx++;