2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
61 hci_conn_check_pending(hdev);
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
66 __u8 status = *((__u8 *) skb->data);
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
78 __u8 status = *((__u8 *) skb->data);
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
87 hci_conn_check_pending(hdev);
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
93 BT_DBG("%s", hdev->name);
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
110 conn->role = rp->role;
112 hci_dev_unlock(hdev);
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 conn->link_policy = __le16_to_cpu(rp->policy);
131 hci_dev_unlock(hdev);
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 conn->link_policy = get_unaligned_le16(sent + 2);
155 hci_dev_unlock(hdev);
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 hdev->link_policy = __le16_to_cpu(rp->policy);
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 __u8 status = *((__u8 *) skb->data);
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
186 hdev->link_policy = get_unaligned_le16(sent);
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 __u8 status = *((__u8 *) skb->data);
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 clear_bit(HCI_RESET, &hdev->flags);
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
216 hdev->ssp_debug_mode = 0;
218 hci_bdaddr_list_clear(&hdev->le_white_list);
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
252 hdev->stored_num_keys = 0;
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
257 __u8 status = *((__u8 *) skb->data);
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
273 hci_dev_unlock(hdev);
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 __u8 status = *((__u8 *) skb->data);
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 __u8 param = *((__u8 *) sent);
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
309 clear_bit(HCI_AUTH, &hdev->flags);
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
315 hci_dev_unlock(hdev);
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
320 __u8 status = *((__u8 *) skb->data);
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
333 param = *((__u8 *) sent);
336 set_bit(HCI_ENCRYPT, &hdev->flags);
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
343 __u8 status = *((__u8 *) skb->data);
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
353 param = *((__u8 *) sent);
358 hdev->discov_timeout = 0;
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
365 clear_bit(HCI_ISCAN, &hdev->flags);
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
370 clear_bit(HCI_PSCAN, &hdev->flags);
373 hci_dev_unlock(hdev);
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385 memcpy(hdev->dev_class, rp->dev_class, 3);
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
393 __u8 status = *((__u8 *) skb->data);
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 memcpy(hdev->dev_class, sent, 3);
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
410 hci_dev_unlock(hdev);
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
423 setting = __le16_to_cpu(rp->voice_setting);
425 if (hdev->voice_setting == setting)
428 hdev->voice_setting = setting;
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
439 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
452 setting = get_unaligned_le16(sent);
454 if (hdev->voice_setting == setting)
457 hdev->voice_setting = setting;
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
475 hdev->num_iac = rp->num_iac;
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
495 hdev->features[1][0] |= LMP_HOST_SSP;
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
509 hci_dev_unlock(hdev);
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
527 hdev->features[1][0] |= LMP_HOST_SC;
529 hdev->features[1][0] &= ~LMP_HOST_SC;
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
539 hci_dev_unlock(hdev);
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 memcpy(hdev->features, rp->features, 8);
588 /* Adjust default settings according to features
589 * supported by device. */
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653 hdev->flow_ctl_mode = rp->mode;
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
750 u8 status = *((u8 *) skb->data);
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
760 hdev->page_scan_type = *type;
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
777 hdev->block_cnt = hdev->num_blocks;
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
789 BT_DBG("%s", hdev->name);
791 if (skb->len < sizeof(*rp))
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
815 hci_dev_unlock(hdev);
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
840 a2mp_send_getinfo_rsp(hdev);
843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
847 struct amp_assoc *assoc = &hdev->loc_assoc;
848 size_t rem_len, frag_len;
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
855 frag_len = skb->len - sizeof(*rp);
856 rem_len = __le16_to_cpu(rp->rem_len);
858 if (rem_len > frag_len) {
859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
862 assoc->offset += frag_len;
864 /* Read other fragments */
865 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
871 assoc->len = assoc->offset + rem_len;
875 /* Send A2MP Rsp when all fragments are received */
876 a2mp_send_getampassoc_rsp(hdev, rp->status);
877 a2mp_send_create_phy_link_req(hdev, rp->status);
880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890 hdev->inq_tx_power = rp->tx_power;
893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
895 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
896 struct hci_cp_pin_code_reply *cp;
897 struct hci_conn *conn;
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
903 if (hci_dev_test_flag(hdev, HCI_MGMT))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
915 conn->pin_length = cp->pin_len;
918 hci_dev_unlock(hdev);
921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
933 hci_dev_unlock(hdev);
936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
947 hdev->le_pkts = rp->le_max_pkt;
949 hdev->le_cnt = hdev->le_pkts;
951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
954 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
957 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964 memcpy(hdev->le_features, rp->features, 8);
967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
977 hdev->adv_tx_power = rp->tx_power;
980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
988 if (hci_dev_test_flag(hdev, HCI_MGMT))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
992 hci_dev_unlock(hdev);
995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status);
1008 hci_dev_unlock(hdev);
1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1019 if (hci_dev_test_flag(hdev, HCI_MGMT))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1023 hci_dev_unlock(hdev);
1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1027 struct sk_buff *skb)
1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1035 if (hci_dev_test_flag(hdev, HCI_MGMT))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status);
1039 hci_dev_unlock(hdev);
1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1050 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1051 struct sk_buff *skb)
1053 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1055 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1058 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1060 __u8 status = *((__u8 *) skb->data);
1063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1068 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1074 bacpy(&hdev->random_addr, sent);
1076 hci_dev_unlock(hdev);
1079 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1081 __u8 *sent, status = *((__u8 *) skb->data);
1083 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1088 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1094 /* If we're doing connection initiation as peripheral. Set a
1095 * timeout in case something goes wrong.
1098 struct hci_conn *conn;
1100 hci_dev_set_flag(hdev, HCI_LE_ADV);
1102 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1104 queue_delayed_work(hdev->workqueue,
1105 &conn->le_conn_timeout,
1106 conn->conn_timeout);
1108 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1111 hci_dev_unlock(hdev);
1114 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1116 struct hci_cp_le_set_scan_param *cp;
1117 __u8 status = *((__u8 *) skb->data);
1119 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1124 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1130 hdev->le_scan_type = cp->type;
1132 hci_dev_unlock(hdev);
1135 static bool has_pending_adv_report(struct hci_dev *hdev)
1137 struct discovery_state *d = &hdev->discovery;
1139 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1142 static void clear_pending_adv_report(struct hci_dev *hdev)
1144 struct discovery_state *d = &hdev->discovery;
1146 bacpy(&d->last_adv_addr, BDADDR_ANY);
1147 d->last_adv_data_len = 0;
1150 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1151 u8 bdaddr_type, s8 rssi, u32 flags,
1154 struct discovery_state *d = &hdev->discovery;
1156 bacpy(&d->last_adv_addr, bdaddr);
1157 d->last_adv_addr_type = bdaddr_type;
1158 d->last_adv_rssi = rssi;
1159 d->last_adv_flags = flags;
1160 memcpy(d->last_adv_data, data, len);
1161 d->last_adv_data_len = len;
1164 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1165 struct sk_buff *skb)
1167 struct hci_cp_le_set_scan_enable *cp;
1168 __u8 status = *((__u8 *) skb->data);
1170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1181 switch (cp->enable) {
1182 case LE_SCAN_ENABLE:
1183 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1184 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1185 clear_pending_adv_report(hdev);
1188 case LE_SCAN_DISABLE:
1189 /* We do this here instead of when setting DISCOVERY_STOPPED
1190 * since the latter would potentially require waiting for
1191 * inquiry to stop too.
1193 if (has_pending_adv_report(hdev)) {
1194 struct discovery_state *d = &hdev->discovery;
1196 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1197 d->last_adv_addr_type, NULL,
1198 d->last_adv_rssi, d->last_adv_flags,
1200 d->last_adv_data_len, NULL, 0);
1203 /* Cancel this timer so that we don't try to disable scanning
1204 * when it's already disabled.
1206 cancel_delayed_work(&hdev->le_scan_disable);
1208 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1210 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1211 * interrupted scanning due to a connect request. Mark
1212 * therefore discovery as stopped. If this was not
1213 * because of a connect request advertising might have
1214 * been disabled because of active scanning, so
1215 * re-enable it again if necessary.
1217 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1218 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1219 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1220 hdev->discovery.state == DISCOVERY_FINDING)
1221 mgmt_reenable_advertising(hdev);
1226 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1230 hci_dev_unlock(hdev);
1233 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1234 struct sk_buff *skb)
1236 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1238 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1243 hdev->le_white_list_size = rp->size;
1246 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1249 __u8 status = *((__u8 *) skb->data);
1251 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1256 hci_bdaddr_list_clear(&hdev->le_white_list);
1259 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1260 struct sk_buff *skb)
1262 struct hci_cp_le_add_to_white_list *sent;
1263 __u8 status = *((__u8 *) skb->data);
1265 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1270 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1274 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1278 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1279 struct sk_buff *skb)
1281 struct hci_cp_le_del_from_white_list *sent;
1282 __u8 status = *((__u8 *) skb->data);
1284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1289 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1293 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1297 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1300 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1307 memcpy(hdev->le_states, rp->le_states, 8);
1310 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1311 struct sk_buff *skb)
1313 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1315 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1320 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1321 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1324 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1325 struct sk_buff *skb)
1327 struct hci_cp_le_write_def_data_len *sent;
1328 __u8 status = *((__u8 *) skb->data);
1330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1335 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1339 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1340 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1343 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1344 struct sk_buff *skb)
1346 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1348 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1353 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1354 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1355 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1356 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1359 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1360 struct sk_buff *skb)
1362 struct hci_cp_write_le_host_supported *sent;
1363 __u8 status = *((__u8 *) skb->data);
1365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1370 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1377 hdev->features[1][0] |= LMP_HOST_LE;
1378 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1380 hdev->features[1][0] &= ~LMP_HOST_LE;
1381 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1382 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1386 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1388 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1390 hci_dev_unlock(hdev);
1393 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1395 struct hci_cp_le_set_adv_param *cp;
1396 u8 status = *((u8 *) skb->data);
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1403 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1408 hdev->adv_addr_type = cp->own_address_type;
1409 hci_dev_unlock(hdev);
1412 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1413 struct sk_buff *skb)
1415 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1417 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1418 hdev->name, rp->status, rp->phy_handle);
1423 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1426 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1428 struct hci_rp_read_rssi *rp = (void *) skb->data;
1429 struct hci_conn *conn;
1431 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1438 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1440 conn->rssi = rp->rssi;
1442 hci_dev_unlock(hdev);
1445 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1447 struct hci_cp_read_tx_power *sent;
1448 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1449 struct hci_conn *conn;
1451 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1456 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1466 switch (sent->type) {
1468 conn->tx_power = rp->tx_power;
1471 conn->max_tx_power = rp->tx_power;
1476 hci_dev_unlock(hdev);
1479 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1481 u8 status = *((u8 *) skb->data);
1484 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1489 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1491 hdev->ssp_debug_mode = *mode;
1494 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1496 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1499 hci_conn_check_pending(hdev);
1503 set_bit(HCI_INQUIRY, &hdev->flags);
1506 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1508 struct hci_cp_create_conn *cp;
1509 struct hci_conn *conn;
1511 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1521 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1524 if (conn && conn->state == BT_CONNECT) {
1525 if (status != 0x0c || conn->attempt > 2) {
1526 conn->state = BT_CLOSED;
1527 hci_connect_cfm(conn, status);
1530 conn->state = BT_CONNECT2;
1534 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1537 BT_ERR("No memory for new connection");
1541 hci_dev_unlock(hdev);
1544 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1546 struct hci_cp_add_sco *cp;
1547 struct hci_conn *acl, *sco;
1550 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1555 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1559 handle = __le16_to_cpu(cp->handle);
1561 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1565 acl = hci_conn_hash_lookup_handle(hdev, handle);
1569 sco->state = BT_CLOSED;
1571 hci_connect_cfm(sco, status);
1576 hci_dev_unlock(hdev);
1579 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1581 struct hci_cp_auth_requested *cp;
1582 struct hci_conn *conn;
1584 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1589 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1597 if (conn->state == BT_CONFIG) {
1598 hci_connect_cfm(conn, status);
1599 hci_conn_drop(conn);
1603 hci_dev_unlock(hdev);
1606 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1608 struct hci_cp_set_conn_encrypt *cp;
1609 struct hci_conn *conn;
1611 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1616 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1622 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1624 if (conn->state == BT_CONFIG) {
1625 hci_connect_cfm(conn, status);
1626 hci_conn_drop(conn);
1630 hci_dev_unlock(hdev);
1633 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1634 struct hci_conn *conn)
1636 if (conn->state != BT_CONFIG || !conn->out)
1639 if (conn->pending_sec_level == BT_SECURITY_SDP)
1642 /* Only request authentication for SSP connections or non-SSP
1643 * devices with sec_level MEDIUM or HIGH or if MITM protection
1646 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1647 conn->pending_sec_level != BT_SECURITY_FIPS &&
1648 conn->pending_sec_level != BT_SECURITY_HIGH &&
1649 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1655 static int hci_resolve_name(struct hci_dev *hdev,
1656 struct inquiry_entry *e)
1658 struct hci_cp_remote_name_req cp;
1660 memset(&cp, 0, sizeof(cp));
1662 bacpy(&cp.bdaddr, &e->data.bdaddr);
1663 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1664 cp.pscan_mode = e->data.pscan_mode;
1665 cp.clock_offset = e->data.clock_offset;
1667 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1670 static bool hci_resolve_next_name(struct hci_dev *hdev)
1672 struct discovery_state *discov = &hdev->discovery;
1673 struct inquiry_entry *e;
1675 if (list_empty(&discov->resolve))
1678 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1682 if (hci_resolve_name(hdev, e) == 0) {
1683 e->name_state = NAME_PENDING;
1690 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1691 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1693 struct discovery_state *discov = &hdev->discovery;
1694 struct inquiry_entry *e;
1696 /* Update the mgmt connected state if necessary. Be careful with
1697 * conn objects that exist but are not (yet) connected however.
1698 * Only those in BT_CONFIG or BT_CONNECTED states can be
1699 * considered connected.
1702 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1703 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1704 mgmt_device_connected(hdev, conn, 0, name, name_len);
1706 if (discov->state == DISCOVERY_STOPPED)
1709 if (discov->state == DISCOVERY_STOPPING)
1710 goto discov_complete;
1712 if (discov->state != DISCOVERY_RESOLVING)
1715 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1716 /* If the device was not found in a list of found devices names of which
1717 * are pending. there is no need to continue resolving a next name as it
1718 * will be done upon receiving another Remote Name Request Complete
1725 e->name_state = NAME_KNOWN;
1726 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1727 e->data.rssi, name, name_len);
1729 e->name_state = NAME_NOT_KNOWN;
1732 if (hci_resolve_next_name(hdev))
1736 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1739 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1741 struct hci_cp_remote_name_req *cp;
1742 struct hci_conn *conn;
1744 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1746 /* If successful wait for the name req complete event before
1747 * checking for the need to do authentication */
1751 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1757 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1759 if (hci_dev_test_flag(hdev, HCI_MGMT))
1760 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1765 if (!hci_outgoing_auth_needed(hdev, conn))
1768 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1769 struct hci_cp_auth_requested auth_cp;
1771 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1773 auth_cp.handle = __cpu_to_le16(conn->handle);
1774 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1775 sizeof(auth_cp), &auth_cp);
1779 hci_dev_unlock(hdev);
1782 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1784 struct hci_cp_read_remote_features *cp;
1785 struct hci_conn *conn;
1787 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1792 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1798 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1800 if (conn->state == BT_CONFIG) {
1801 hci_connect_cfm(conn, status);
1802 hci_conn_drop(conn);
1806 hci_dev_unlock(hdev);
1809 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1811 struct hci_cp_read_remote_ext_features *cp;
1812 struct hci_conn *conn;
1814 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1819 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1827 if (conn->state == BT_CONFIG) {
1828 hci_connect_cfm(conn, status);
1829 hci_conn_drop(conn);
1833 hci_dev_unlock(hdev);
1836 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1838 struct hci_cp_setup_sync_conn *cp;
1839 struct hci_conn *acl, *sco;
1842 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1847 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1851 handle = __le16_to_cpu(cp->handle);
1853 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1857 acl = hci_conn_hash_lookup_handle(hdev, handle);
1861 sco->state = BT_CLOSED;
1863 hci_connect_cfm(sco, status);
1868 hci_dev_unlock(hdev);
1871 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1873 struct hci_cp_sniff_mode *cp;
1874 struct hci_conn *conn;
1876 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1881 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1889 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1891 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1892 hci_sco_setup(conn, status);
1895 hci_dev_unlock(hdev);
1898 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1900 struct hci_cp_exit_sniff_mode *cp;
1901 struct hci_conn *conn;
1903 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1908 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1914 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1916 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1918 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1919 hci_sco_setup(conn, status);
1922 hci_dev_unlock(hdev);
1925 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1927 struct hci_cp_disconnect *cp;
1928 struct hci_conn *conn;
1933 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1939 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1941 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1942 conn->dst_type, status);
1944 hci_dev_unlock(hdev);
1947 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1949 struct hci_cp_create_phy_link *cp;
1951 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1960 struct hci_conn *hcon;
1962 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1966 amp_write_remote_assoc(hdev, cp->phy_handle);
1969 hci_dev_unlock(hdev);
1972 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1974 struct hci_cp_accept_phy_link *cp;
1976 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1981 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1985 amp_write_remote_assoc(hdev, cp->phy_handle);
1988 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1990 struct hci_cp_le_create_conn *cp;
1991 struct hci_conn *conn;
1993 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1995 /* All connection failure handling is taken care of by the
1996 * hci_le_conn_failed function which is triggered by the HCI
1997 * request completion callbacks used for connecting.
2002 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2008 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2012 /* Store the initiator and responder address information which
2013 * is needed for SMP. These values will not change during the
2014 * lifetime of the connection.
2016 conn->init_addr_type = cp->own_address_type;
2017 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2018 bacpy(&conn->init_addr, &hdev->random_addr);
2020 bacpy(&conn->init_addr, &hdev->bdaddr);
2022 conn->resp_addr_type = cp->peer_addr_type;
2023 bacpy(&conn->resp_addr, &cp->peer_addr);
2025 /* We don't want the connection attempt to stick around
2026 * indefinitely since LE doesn't have a page timeout concept
2027 * like BR/EDR. Set a timer for any connection that doesn't use
2028 * the white list for connecting.
2030 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2031 queue_delayed_work(conn->hdev->workqueue,
2032 &conn->le_conn_timeout,
2033 conn->conn_timeout);
2036 hci_dev_unlock(hdev);
2039 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2041 struct hci_cp_le_read_remote_features *cp;
2042 struct hci_conn *conn;
2044 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2049 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2057 if (conn->state == BT_CONFIG) {
2058 hci_connect_cfm(conn, status);
2059 hci_conn_drop(conn);
2063 hci_dev_unlock(hdev);
2066 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2068 struct hci_cp_le_start_enc *cp;
2069 struct hci_conn *conn;
2071 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2082 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2086 if (conn->state != BT_CONNECTED)
2089 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2090 hci_conn_drop(conn);
2093 hci_dev_unlock(hdev);
2096 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2098 struct hci_cp_switch_role *cp;
2099 struct hci_conn *conn;
2101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2106 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2112 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2114 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2116 hci_dev_unlock(hdev);
2119 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2121 __u8 status = *((__u8 *) skb->data);
2122 struct discovery_state *discov = &hdev->discovery;
2123 struct inquiry_entry *e;
2125 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2127 hci_conn_check_pending(hdev);
2129 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2132 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2133 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2135 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2140 if (discov->state != DISCOVERY_FINDING)
2143 if (list_empty(&discov->resolve)) {
2144 /* When BR/EDR inquiry is active and no LE scanning is in
2145 * progress, then change discovery state to indicate completion.
2147 * When running LE scanning and BR/EDR inquiry simultaneously
2148 * and the LE scan already finished, then change the discovery
2149 * state to indicate completion.
2151 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2152 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2153 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2157 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2158 if (e && hci_resolve_name(hdev, e) == 0) {
2159 e->name_state = NAME_PENDING;
2160 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2162 /* When BR/EDR inquiry is active and no LE scanning is in
2163 * progress, then change discovery state to indicate completion.
2165 * When running LE scanning and BR/EDR inquiry simultaneously
2166 * and the LE scan already finished, then change the discovery
2167 * state to indicate completion.
2169 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2170 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2175 hci_dev_unlock(hdev);
2178 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2180 struct inquiry_data data;
2181 struct inquiry_info *info = (void *) (skb->data + 1);
2182 int num_rsp = *((__u8 *) skb->data);
2184 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2189 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2194 for (; num_rsp; num_rsp--, info++) {
2197 bacpy(&data.bdaddr, &info->bdaddr);
2198 data.pscan_rep_mode = info->pscan_rep_mode;
2199 data.pscan_period_mode = info->pscan_period_mode;
2200 data.pscan_mode = info->pscan_mode;
2201 memcpy(data.dev_class, info->dev_class, 3);
2202 data.clock_offset = info->clock_offset;
2203 data.rssi = HCI_RSSI_INVALID;
2204 data.ssp_mode = 0x00;
2206 flags = hci_inquiry_cache_update(hdev, &data, false);
2208 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2209 info->dev_class, HCI_RSSI_INVALID,
2210 flags, NULL, 0, NULL, 0);
2213 hci_dev_unlock(hdev);
2216 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2218 struct hci_ev_conn_complete *ev = (void *) skb->data;
2219 struct hci_conn *conn;
2221 BT_DBG("%s", hdev->name);
2225 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2227 if (ev->link_type != SCO_LINK)
2230 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2234 conn->type = SCO_LINK;
2238 conn->handle = __le16_to_cpu(ev->handle);
2240 if (conn->type == ACL_LINK) {
2241 conn->state = BT_CONFIG;
2242 hci_conn_hold(conn);
2244 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2245 !hci_find_link_key(hdev, &ev->bdaddr))
2246 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2248 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2250 conn->state = BT_CONNECTED;
2252 hci_debugfs_create_conn(conn);
2253 hci_conn_add_sysfs(conn);
2255 if (test_bit(HCI_AUTH, &hdev->flags))
2256 set_bit(HCI_CONN_AUTH, &conn->flags);
2258 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2259 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2261 /* Get remote features */
2262 if (conn->type == ACL_LINK) {
2263 struct hci_cp_read_remote_features cp;
2264 cp.handle = ev->handle;
2265 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2268 hci_update_page_scan(hdev);
2271 /* Set packet type for incoming connection */
2272 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2273 struct hci_cp_change_conn_ptype cp;
2274 cp.handle = ev->handle;
2275 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2276 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2280 conn->state = BT_CLOSED;
2281 if (conn->type == ACL_LINK)
2282 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2283 conn->dst_type, ev->status);
2286 if (conn->type == ACL_LINK)
2287 hci_sco_setup(conn, ev->status);
2290 hci_connect_cfm(conn, ev->status);
2292 } else if (ev->link_type != ACL_LINK)
2293 hci_connect_cfm(conn, ev->status);
2296 hci_dev_unlock(hdev);
2298 hci_conn_check_pending(hdev);
2301 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2303 struct hci_cp_reject_conn_req cp;
2305 bacpy(&cp.bdaddr, bdaddr);
2306 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2307 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2310 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2312 struct hci_ev_conn_request *ev = (void *) skb->data;
2313 int mask = hdev->link_mode;
2314 struct inquiry_entry *ie;
2315 struct hci_conn *conn;
2318 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2321 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2324 if (!(mask & HCI_LM_ACCEPT)) {
2325 hci_reject_conn(hdev, &ev->bdaddr);
2329 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2331 hci_reject_conn(hdev, &ev->bdaddr);
2335 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2336 * connection. These features are only touched through mgmt so
2337 * only do the checks if HCI_MGMT is set.
2339 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2340 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2341 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2343 hci_reject_conn(hdev, &ev->bdaddr);
2347 /* Connection accepted */
2351 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2353 memcpy(ie->data.dev_class, ev->dev_class, 3);
2355 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2358 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2361 BT_ERR("No memory for new connection");
2362 hci_dev_unlock(hdev);
2367 memcpy(conn->dev_class, ev->dev_class, 3);
2369 hci_dev_unlock(hdev);
2371 if (ev->link_type == ACL_LINK ||
2372 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2373 struct hci_cp_accept_conn_req cp;
2374 conn->state = BT_CONNECT;
2376 bacpy(&cp.bdaddr, &ev->bdaddr);
2378 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2379 cp.role = 0x00; /* Become master */
2381 cp.role = 0x01; /* Remain slave */
2383 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2384 } else if (!(flags & HCI_PROTO_DEFER)) {
2385 struct hci_cp_accept_sync_conn_req cp;
2386 conn->state = BT_CONNECT;
2388 bacpy(&cp.bdaddr, &ev->bdaddr);
2389 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2391 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2392 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2393 cp.max_latency = cpu_to_le16(0xffff);
2394 cp.content_format = cpu_to_le16(hdev->voice_setting);
2395 cp.retrans_effort = 0xff;
2397 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2400 conn->state = BT_CONNECT2;
2401 hci_connect_cfm(conn, 0);
2405 static u8 hci_to_mgmt_reason(u8 err)
2408 case HCI_ERROR_CONNECTION_TIMEOUT:
2409 return MGMT_DEV_DISCONN_TIMEOUT;
2410 case HCI_ERROR_REMOTE_USER_TERM:
2411 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2412 case HCI_ERROR_REMOTE_POWER_OFF:
2413 return MGMT_DEV_DISCONN_REMOTE;
2414 case HCI_ERROR_LOCAL_HOST_TERM:
2415 return MGMT_DEV_DISCONN_LOCAL_HOST;
2417 return MGMT_DEV_DISCONN_UNKNOWN;
2421 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2423 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2424 u8 reason = hci_to_mgmt_reason(ev->reason);
2425 struct hci_conn_params *params;
2426 struct hci_conn *conn;
2427 bool mgmt_connected;
2430 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2434 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2439 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2440 conn->dst_type, ev->status);
2444 conn->state = BT_CLOSED;
2446 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2447 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2448 reason, mgmt_connected);
2450 if (conn->type == ACL_LINK) {
2451 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2452 hci_remove_link_key(hdev, &conn->dst);
2454 hci_update_page_scan(hdev);
2457 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2459 switch (params->auto_connect) {
2460 case HCI_AUTO_CONN_LINK_LOSS:
2461 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2465 case HCI_AUTO_CONN_DIRECT:
2466 case HCI_AUTO_CONN_ALWAYS:
2467 list_del_init(¶ms->action);
2468 list_add(¶ms->action, &hdev->pend_le_conns);
2469 hci_update_background_scan(hdev);
2479 hci_disconn_cfm(conn, ev->reason);
2482 /* Re-enable advertising if necessary, since it might
2483 * have been disabled by the connection. From the
2484 * HCI_LE_Set_Advertise_Enable command description in
2485 * the core specification (v4.0):
2486 * "The Controller shall continue advertising until the Host
2487 * issues an LE_Set_Advertise_Enable command with
2488 * Advertising_Enable set to 0x00 (Advertising is disabled)
2489 * or until a connection is created or until the Advertising
2490 * is timed out due to Directed Advertising."
2492 if (type == LE_LINK)
2493 mgmt_reenable_advertising(hdev);
2496 hci_dev_unlock(hdev);
2499 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2501 struct hci_ev_auth_complete *ev = (void *) skb->data;
2502 struct hci_conn *conn;
2504 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2508 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2513 if (!hci_conn_ssp_enabled(conn) &&
2514 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2515 BT_INFO("re-auth of legacy device is not possible.");
2517 set_bit(HCI_CONN_AUTH, &conn->flags);
2518 conn->sec_level = conn->pending_sec_level;
2521 mgmt_auth_failed(conn, ev->status);
2524 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2525 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2527 if (conn->state == BT_CONFIG) {
2528 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2529 struct hci_cp_set_conn_encrypt cp;
2530 cp.handle = ev->handle;
2532 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2535 conn->state = BT_CONNECTED;
2536 hci_connect_cfm(conn, ev->status);
2537 hci_conn_drop(conn);
2540 hci_auth_cfm(conn, ev->status);
2542 hci_conn_hold(conn);
2543 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2544 hci_conn_drop(conn);
2547 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2549 struct hci_cp_set_conn_encrypt cp;
2550 cp.handle = ev->handle;
2552 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2555 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2556 hci_encrypt_cfm(conn, ev->status, 0x00);
2561 hci_dev_unlock(hdev);
2564 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2566 struct hci_ev_remote_name *ev = (void *) skb->data;
2567 struct hci_conn *conn;
2569 BT_DBG("%s", hdev->name);
2571 hci_conn_check_pending(hdev);
2575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2577 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2580 if (ev->status == 0)
2581 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2582 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2584 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2590 if (!hci_outgoing_auth_needed(hdev, conn))
2593 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2594 struct hci_cp_auth_requested cp;
2596 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2598 cp.handle = __cpu_to_le16(conn->handle);
2599 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2603 hci_dev_unlock(hdev);
2606 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2608 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2609 struct hci_conn *conn;
2611 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2615 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2621 /* Encryption implies authentication */
2622 set_bit(HCI_CONN_AUTH, &conn->flags);
2623 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2624 conn->sec_level = conn->pending_sec_level;
2626 /* P-256 authentication key implies FIPS */
2627 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2628 set_bit(HCI_CONN_FIPS, &conn->flags);
2630 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2631 conn->type == LE_LINK)
2632 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2634 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2635 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2639 /* We should disregard the current RPA and generate a new one
2640 * whenever the encryption procedure fails.
2642 if (ev->status && conn->type == LE_LINK)
2643 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2645 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2647 if (ev->status && conn->state == BT_CONNECTED) {
2648 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2649 hci_conn_drop(conn);
2653 /* In Secure Connections Only mode, do not allow any connections
2654 * that are not encrypted with AES-CCM using a P-256 authenticated
2657 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2658 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2659 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2660 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2661 hci_conn_drop(conn);
2665 if (conn->state == BT_CONFIG) {
2667 conn->state = BT_CONNECTED;
2669 hci_connect_cfm(conn, ev->status);
2670 hci_conn_drop(conn);
2672 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2675 hci_dev_unlock(hdev);
2678 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2679 struct sk_buff *skb)
2681 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2682 struct hci_conn *conn;
2684 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2691 set_bit(HCI_CONN_SECURE, &conn->flags);
2693 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2695 hci_key_change_cfm(conn, ev->status);
2698 hci_dev_unlock(hdev);
2701 static void hci_remote_features_evt(struct hci_dev *hdev,
2702 struct sk_buff *skb)
2704 struct hci_ev_remote_features *ev = (void *) skb->data;
2705 struct hci_conn *conn;
2707 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2711 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2716 memcpy(conn->features[0], ev->features, 8);
2718 if (conn->state != BT_CONFIG)
2721 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2722 lmp_ext_feat_capable(conn)) {
2723 struct hci_cp_read_remote_ext_features cp;
2724 cp.handle = ev->handle;
2726 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2731 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2732 struct hci_cp_remote_name_req cp;
2733 memset(&cp, 0, sizeof(cp));
2734 bacpy(&cp.bdaddr, &conn->dst);
2735 cp.pscan_rep_mode = 0x02;
2736 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2737 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2738 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2740 if (!hci_outgoing_auth_needed(hdev, conn)) {
2741 conn->state = BT_CONNECTED;
2742 hci_connect_cfm(conn, ev->status);
2743 hci_conn_drop(conn);
2747 hci_dev_unlock(hdev);
2750 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2751 u16 *opcode, u8 *status,
2752 hci_req_complete_t *req_complete,
2753 hci_req_complete_skb_t *req_complete_skb)
2755 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2757 *opcode = __le16_to_cpu(ev->opcode);
2758 *status = skb->data[sizeof(*ev)];
2760 skb_pull(skb, sizeof(*ev));
2763 case HCI_OP_INQUIRY_CANCEL:
2764 hci_cc_inquiry_cancel(hdev, skb);
2767 case HCI_OP_PERIODIC_INQ:
2768 hci_cc_periodic_inq(hdev, skb);
2771 case HCI_OP_EXIT_PERIODIC_INQ:
2772 hci_cc_exit_periodic_inq(hdev, skb);
2775 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2776 hci_cc_remote_name_req_cancel(hdev, skb);
2779 case HCI_OP_ROLE_DISCOVERY:
2780 hci_cc_role_discovery(hdev, skb);
2783 case HCI_OP_READ_LINK_POLICY:
2784 hci_cc_read_link_policy(hdev, skb);
2787 case HCI_OP_WRITE_LINK_POLICY:
2788 hci_cc_write_link_policy(hdev, skb);
2791 case HCI_OP_READ_DEF_LINK_POLICY:
2792 hci_cc_read_def_link_policy(hdev, skb);
2795 case HCI_OP_WRITE_DEF_LINK_POLICY:
2796 hci_cc_write_def_link_policy(hdev, skb);
2800 hci_cc_reset(hdev, skb);
2803 case HCI_OP_READ_STORED_LINK_KEY:
2804 hci_cc_read_stored_link_key(hdev, skb);
2807 case HCI_OP_DELETE_STORED_LINK_KEY:
2808 hci_cc_delete_stored_link_key(hdev, skb);
2811 case HCI_OP_WRITE_LOCAL_NAME:
2812 hci_cc_write_local_name(hdev, skb);
2815 case HCI_OP_READ_LOCAL_NAME:
2816 hci_cc_read_local_name(hdev, skb);
2819 case HCI_OP_WRITE_AUTH_ENABLE:
2820 hci_cc_write_auth_enable(hdev, skb);
2823 case HCI_OP_WRITE_ENCRYPT_MODE:
2824 hci_cc_write_encrypt_mode(hdev, skb);
2827 case HCI_OP_WRITE_SCAN_ENABLE:
2828 hci_cc_write_scan_enable(hdev, skb);
2831 case HCI_OP_READ_CLASS_OF_DEV:
2832 hci_cc_read_class_of_dev(hdev, skb);
2835 case HCI_OP_WRITE_CLASS_OF_DEV:
2836 hci_cc_write_class_of_dev(hdev, skb);
2839 case HCI_OP_READ_VOICE_SETTING:
2840 hci_cc_read_voice_setting(hdev, skb);
2843 case HCI_OP_WRITE_VOICE_SETTING:
2844 hci_cc_write_voice_setting(hdev, skb);
2847 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2848 hci_cc_read_num_supported_iac(hdev, skb);
2851 case HCI_OP_WRITE_SSP_MODE:
2852 hci_cc_write_ssp_mode(hdev, skb);
2855 case HCI_OP_WRITE_SC_SUPPORT:
2856 hci_cc_write_sc_support(hdev, skb);
2859 case HCI_OP_READ_LOCAL_VERSION:
2860 hci_cc_read_local_version(hdev, skb);
2863 case HCI_OP_READ_LOCAL_COMMANDS:
2864 hci_cc_read_local_commands(hdev, skb);
2867 case HCI_OP_READ_LOCAL_FEATURES:
2868 hci_cc_read_local_features(hdev, skb);
2871 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2872 hci_cc_read_local_ext_features(hdev, skb);
2875 case HCI_OP_READ_BUFFER_SIZE:
2876 hci_cc_read_buffer_size(hdev, skb);
2879 case HCI_OP_READ_BD_ADDR:
2880 hci_cc_read_bd_addr(hdev, skb);
2883 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2884 hci_cc_read_page_scan_activity(hdev, skb);
2887 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2888 hci_cc_write_page_scan_activity(hdev, skb);
2891 case HCI_OP_READ_PAGE_SCAN_TYPE:
2892 hci_cc_read_page_scan_type(hdev, skb);
2895 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2896 hci_cc_write_page_scan_type(hdev, skb);
2899 case HCI_OP_READ_DATA_BLOCK_SIZE:
2900 hci_cc_read_data_block_size(hdev, skb);
2903 case HCI_OP_READ_FLOW_CONTROL_MODE:
2904 hci_cc_read_flow_control_mode(hdev, skb);
2907 case HCI_OP_READ_LOCAL_AMP_INFO:
2908 hci_cc_read_local_amp_info(hdev, skb);
2911 case HCI_OP_READ_CLOCK:
2912 hci_cc_read_clock(hdev, skb);
2915 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2916 hci_cc_read_local_amp_assoc(hdev, skb);
2919 case HCI_OP_READ_INQ_RSP_TX_POWER:
2920 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2923 case HCI_OP_PIN_CODE_REPLY:
2924 hci_cc_pin_code_reply(hdev, skb);
2927 case HCI_OP_PIN_CODE_NEG_REPLY:
2928 hci_cc_pin_code_neg_reply(hdev, skb);
2931 case HCI_OP_READ_LOCAL_OOB_DATA:
2932 hci_cc_read_local_oob_data(hdev, skb);
2935 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2936 hci_cc_read_local_oob_ext_data(hdev, skb);
2939 case HCI_OP_LE_READ_BUFFER_SIZE:
2940 hci_cc_le_read_buffer_size(hdev, skb);
2943 case HCI_OP_LE_READ_LOCAL_FEATURES:
2944 hci_cc_le_read_local_features(hdev, skb);
2947 case HCI_OP_LE_READ_ADV_TX_POWER:
2948 hci_cc_le_read_adv_tx_power(hdev, skb);
2951 case HCI_OP_USER_CONFIRM_REPLY:
2952 hci_cc_user_confirm_reply(hdev, skb);
2955 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2956 hci_cc_user_confirm_neg_reply(hdev, skb);
2959 case HCI_OP_USER_PASSKEY_REPLY:
2960 hci_cc_user_passkey_reply(hdev, skb);
2963 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2964 hci_cc_user_passkey_neg_reply(hdev, skb);
2967 case HCI_OP_LE_SET_RANDOM_ADDR:
2968 hci_cc_le_set_random_addr(hdev, skb);
2971 case HCI_OP_LE_SET_ADV_ENABLE:
2972 hci_cc_le_set_adv_enable(hdev, skb);
2975 case HCI_OP_LE_SET_SCAN_PARAM:
2976 hci_cc_le_set_scan_param(hdev, skb);
2979 case HCI_OP_LE_SET_SCAN_ENABLE:
2980 hci_cc_le_set_scan_enable(hdev, skb);
2983 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2984 hci_cc_le_read_white_list_size(hdev, skb);
2987 case HCI_OP_LE_CLEAR_WHITE_LIST:
2988 hci_cc_le_clear_white_list(hdev, skb);
2991 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2992 hci_cc_le_add_to_white_list(hdev, skb);
2995 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2996 hci_cc_le_del_from_white_list(hdev, skb);
2999 case HCI_OP_LE_READ_SUPPORTED_STATES:
3000 hci_cc_le_read_supported_states(hdev, skb);
3003 case HCI_OP_LE_READ_DEF_DATA_LEN:
3004 hci_cc_le_read_def_data_len(hdev, skb);
3007 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3008 hci_cc_le_write_def_data_len(hdev, skb);
3011 case HCI_OP_LE_READ_MAX_DATA_LEN:
3012 hci_cc_le_read_max_data_len(hdev, skb);
3015 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3016 hci_cc_write_le_host_supported(hdev, skb);
3019 case HCI_OP_LE_SET_ADV_PARAM:
3020 hci_cc_set_adv_param(hdev, skb);
3023 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
3024 hci_cc_write_remote_amp_assoc(hdev, skb);
3027 case HCI_OP_READ_RSSI:
3028 hci_cc_read_rssi(hdev, skb);
3031 case HCI_OP_READ_TX_POWER:
3032 hci_cc_read_tx_power(hdev, skb);
3035 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3036 hci_cc_write_ssp_debug_mode(hdev, skb);
3040 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3044 if (*opcode != HCI_OP_NOP)
3045 cancel_delayed_work(&hdev->cmd_timer);
3047 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3048 atomic_set(&hdev->cmd_cnt, 1);
3050 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3053 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3054 queue_work(hdev->workqueue, &hdev->cmd_work);
3057 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3058 u16 *opcode, u8 *status,
3059 hci_req_complete_t *req_complete,
3060 hci_req_complete_skb_t *req_complete_skb)
3062 struct hci_ev_cmd_status *ev = (void *) skb->data;
3064 skb_pull(skb, sizeof(*ev));
3066 *opcode = __le16_to_cpu(ev->opcode);
3067 *status = ev->status;
3070 case HCI_OP_INQUIRY:
3071 hci_cs_inquiry(hdev, ev->status);
3074 case HCI_OP_CREATE_CONN:
3075 hci_cs_create_conn(hdev, ev->status);
3078 case HCI_OP_DISCONNECT:
3079 hci_cs_disconnect(hdev, ev->status);
3082 case HCI_OP_ADD_SCO:
3083 hci_cs_add_sco(hdev, ev->status);
3086 case HCI_OP_AUTH_REQUESTED:
3087 hci_cs_auth_requested(hdev, ev->status);
3090 case HCI_OP_SET_CONN_ENCRYPT:
3091 hci_cs_set_conn_encrypt(hdev, ev->status);
3094 case HCI_OP_REMOTE_NAME_REQ:
3095 hci_cs_remote_name_req(hdev, ev->status);
3098 case HCI_OP_READ_REMOTE_FEATURES:
3099 hci_cs_read_remote_features(hdev, ev->status);
3102 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3103 hci_cs_read_remote_ext_features(hdev, ev->status);
3106 case HCI_OP_SETUP_SYNC_CONN:
3107 hci_cs_setup_sync_conn(hdev, ev->status);
3110 case HCI_OP_CREATE_PHY_LINK:
3111 hci_cs_create_phylink(hdev, ev->status);
3114 case HCI_OP_ACCEPT_PHY_LINK:
3115 hci_cs_accept_phylink(hdev, ev->status);
3118 case HCI_OP_SNIFF_MODE:
3119 hci_cs_sniff_mode(hdev, ev->status);
3122 case HCI_OP_EXIT_SNIFF_MODE:
3123 hci_cs_exit_sniff_mode(hdev, ev->status);
3126 case HCI_OP_SWITCH_ROLE:
3127 hci_cs_switch_role(hdev, ev->status);
3130 case HCI_OP_LE_CREATE_CONN:
3131 hci_cs_le_create_conn(hdev, ev->status);
3134 case HCI_OP_LE_READ_REMOTE_FEATURES:
3135 hci_cs_le_read_remote_features(hdev, ev->status);
3138 case HCI_OP_LE_START_ENC:
3139 hci_cs_le_start_enc(hdev, ev->status);
3143 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3147 if (*opcode != HCI_OP_NOP)
3148 cancel_delayed_work(&hdev->cmd_timer);
3150 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3151 atomic_set(&hdev->cmd_cnt, 1);
3153 /* Indicate request completion if the command failed. Also, if
3154 * we're not waiting for a special event and we get a success
3155 * command status we should try to flag the request as completed
3156 * (since for this kind of commands there will not be a command
3160 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3161 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3164 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3165 queue_work(hdev->workqueue, &hdev->cmd_work);
3168 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3170 struct hci_ev_hardware_error *ev = (void *) skb->data;
3172 hdev->hw_error_code = ev->code;
3174 queue_work(hdev->req_workqueue, &hdev->error_reset);
3177 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3179 struct hci_ev_role_change *ev = (void *) skb->data;
3180 struct hci_conn *conn;
3182 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3186 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3189 conn->role = ev->role;
3191 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3193 hci_role_switch_cfm(conn, ev->status, ev->role);
3196 hci_dev_unlock(hdev);
3199 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3201 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3204 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3205 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3209 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3210 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3211 BT_DBG("%s bad parameters", hdev->name);
3215 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3217 for (i = 0; i < ev->num_hndl; i++) {
3218 struct hci_comp_pkts_info *info = &ev->handles[i];
3219 struct hci_conn *conn;
3220 __u16 handle, count;
3222 handle = __le16_to_cpu(info->handle);
3223 count = __le16_to_cpu(info->count);
3225 conn = hci_conn_hash_lookup_handle(hdev, handle);
3229 conn->sent -= count;
3231 switch (conn->type) {
3233 hdev->acl_cnt += count;
3234 if (hdev->acl_cnt > hdev->acl_pkts)
3235 hdev->acl_cnt = hdev->acl_pkts;
3239 if (hdev->le_pkts) {
3240 hdev->le_cnt += count;
3241 if (hdev->le_cnt > hdev->le_pkts)
3242 hdev->le_cnt = hdev->le_pkts;
3244 hdev->acl_cnt += count;
3245 if (hdev->acl_cnt > hdev->acl_pkts)
3246 hdev->acl_cnt = hdev->acl_pkts;
3251 hdev->sco_cnt += count;
3252 if (hdev->sco_cnt > hdev->sco_pkts)
3253 hdev->sco_cnt = hdev->sco_pkts;
3257 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3262 queue_work(hdev->workqueue, &hdev->tx_work);
3265 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3268 struct hci_chan *chan;
3270 switch (hdev->dev_type) {
3272 return hci_conn_hash_lookup_handle(hdev, handle);
3274 chan = hci_chan_lookup_handle(hdev, handle);
3279 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3286 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3288 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3291 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3292 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3296 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3297 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3298 BT_DBG("%s bad parameters", hdev->name);
3302 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3305 for (i = 0; i < ev->num_hndl; i++) {
3306 struct hci_comp_blocks_info *info = &ev->handles[i];
3307 struct hci_conn *conn = NULL;
3308 __u16 handle, block_count;
3310 handle = __le16_to_cpu(info->handle);
3311 block_count = __le16_to_cpu(info->blocks);
3313 conn = __hci_conn_lookup_handle(hdev, handle);
3317 conn->sent -= block_count;
3319 switch (conn->type) {
3322 hdev->block_cnt += block_count;
3323 if (hdev->block_cnt > hdev->num_blocks)
3324 hdev->block_cnt = hdev->num_blocks;
3328 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3333 queue_work(hdev->workqueue, &hdev->tx_work);
3336 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3338 struct hci_ev_mode_change *ev = (void *) skb->data;
3339 struct hci_conn *conn;
3341 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3345 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3347 conn->mode = ev->mode;
3349 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3351 if (conn->mode == HCI_CM_ACTIVE)
3352 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3354 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3357 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3358 hci_sco_setup(conn, ev->status);
3361 hci_dev_unlock(hdev);
3364 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3366 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3367 struct hci_conn *conn;
3369 BT_DBG("%s", hdev->name);
3373 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3377 if (conn->state == BT_CONNECTED) {
3378 hci_conn_hold(conn);
3379 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3380 hci_conn_drop(conn);
3383 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3384 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3385 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3386 sizeof(ev->bdaddr), &ev->bdaddr);
3387 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3390 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3395 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3399 hci_dev_unlock(hdev);
3402 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3404 if (key_type == HCI_LK_CHANGED_COMBINATION)
3407 conn->pin_length = pin_len;
3408 conn->key_type = key_type;
3411 case HCI_LK_LOCAL_UNIT:
3412 case HCI_LK_REMOTE_UNIT:
3413 case HCI_LK_DEBUG_COMBINATION:
3415 case HCI_LK_COMBINATION:
3417 conn->pending_sec_level = BT_SECURITY_HIGH;
3419 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3421 case HCI_LK_UNAUTH_COMBINATION_P192:
3422 case HCI_LK_UNAUTH_COMBINATION_P256:
3423 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3425 case HCI_LK_AUTH_COMBINATION_P192:
3426 conn->pending_sec_level = BT_SECURITY_HIGH;
3428 case HCI_LK_AUTH_COMBINATION_P256:
3429 conn->pending_sec_level = BT_SECURITY_FIPS;
3434 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3436 struct hci_ev_link_key_req *ev = (void *) skb->data;
3437 struct hci_cp_link_key_reply cp;
3438 struct hci_conn *conn;
3439 struct link_key *key;
3441 BT_DBG("%s", hdev->name);
3443 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3448 key = hci_find_link_key(hdev, &ev->bdaddr);
3450 BT_DBG("%s link key not found for %pMR", hdev->name,
3455 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3458 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3460 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3462 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3463 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3464 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3465 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3469 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3470 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3471 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3472 BT_DBG("%s ignoring key unauthenticated for high security",
3477 conn_set_key(conn, key->type, key->pin_len);
3480 bacpy(&cp.bdaddr, &ev->bdaddr);
3481 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3483 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3485 hci_dev_unlock(hdev);
3490 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3491 hci_dev_unlock(hdev);
3494 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3496 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3497 struct hci_conn *conn;
3498 struct link_key *key;
3502 BT_DBG("%s", hdev->name);
3506 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3510 hci_conn_hold(conn);
3511 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3512 hci_conn_drop(conn);
3514 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3515 conn_set_key(conn, ev->key_type, conn->pin_length);
3517 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3520 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3521 ev->key_type, pin_len, &persistent);
3525 /* Update connection information since adding the key will have
3526 * fixed up the type in the case of changed combination keys.
3528 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3529 conn_set_key(conn, key->type, key->pin_len);
3531 mgmt_new_link_key(hdev, key, persistent);
3533 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3534 * is set. If it's not set simply remove the key from the kernel
3535 * list (we've still notified user space about it but with
3536 * store_hint being 0).
3538 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3539 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3540 list_del_rcu(&key->list);
3541 kfree_rcu(key, rcu);
3546 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3548 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3551 hci_dev_unlock(hdev);
3554 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3556 struct hci_ev_clock_offset *ev = (void *) skb->data;
3557 struct hci_conn *conn;
3559 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3563 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3564 if (conn && !ev->status) {
3565 struct inquiry_entry *ie;
3567 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3569 ie->data.clock_offset = ev->clock_offset;
3570 ie->timestamp = jiffies;
3574 hci_dev_unlock(hdev);
3577 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3579 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3580 struct hci_conn *conn;
3582 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3586 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3587 if (conn && !ev->status)
3588 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3590 hci_dev_unlock(hdev);
3593 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3595 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3596 struct inquiry_entry *ie;
3598 BT_DBG("%s", hdev->name);
3602 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3604 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3605 ie->timestamp = jiffies;
3608 hci_dev_unlock(hdev);
3611 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3612 struct sk_buff *skb)
3614 struct inquiry_data data;
3615 int num_rsp = *((__u8 *) skb->data);
3617 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3622 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3627 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3628 struct inquiry_info_with_rssi_and_pscan_mode *info;
3629 info = (void *) (skb->data + 1);
3631 for (; num_rsp; num_rsp--, info++) {
3634 bacpy(&data.bdaddr, &info->bdaddr);
3635 data.pscan_rep_mode = info->pscan_rep_mode;
3636 data.pscan_period_mode = info->pscan_period_mode;
3637 data.pscan_mode = info->pscan_mode;
3638 memcpy(data.dev_class, info->dev_class, 3);
3639 data.clock_offset = info->clock_offset;
3640 data.rssi = info->rssi;
3641 data.ssp_mode = 0x00;
3643 flags = hci_inquiry_cache_update(hdev, &data, false);
3645 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3646 info->dev_class, info->rssi,
3647 flags, NULL, 0, NULL, 0);
3650 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3652 for (; num_rsp; num_rsp--, info++) {
3655 bacpy(&data.bdaddr, &info->bdaddr);
3656 data.pscan_rep_mode = info->pscan_rep_mode;
3657 data.pscan_period_mode = info->pscan_period_mode;
3658 data.pscan_mode = 0x00;
3659 memcpy(data.dev_class, info->dev_class, 3);
3660 data.clock_offset = info->clock_offset;
3661 data.rssi = info->rssi;
3662 data.ssp_mode = 0x00;
3664 flags = hci_inquiry_cache_update(hdev, &data, false);
3666 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3667 info->dev_class, info->rssi,
3668 flags, NULL, 0, NULL, 0);
3672 hci_dev_unlock(hdev);
3675 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3676 struct sk_buff *skb)
3678 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3679 struct hci_conn *conn;
3681 BT_DBG("%s", hdev->name);
3685 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3689 if (ev->page < HCI_MAX_PAGES)
3690 memcpy(conn->features[ev->page], ev->features, 8);
3692 if (!ev->status && ev->page == 0x01) {
3693 struct inquiry_entry *ie;
3695 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3697 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3699 if (ev->features[0] & LMP_HOST_SSP) {
3700 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3702 /* It is mandatory by the Bluetooth specification that
3703 * Extended Inquiry Results are only used when Secure
3704 * Simple Pairing is enabled, but some devices violate
3707 * To make these devices work, the internal SSP
3708 * enabled flag needs to be cleared if the remote host
3709 * features do not indicate SSP support */
3710 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3713 if (ev->features[0] & LMP_HOST_SC)
3714 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3717 if (conn->state != BT_CONFIG)
3720 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3721 struct hci_cp_remote_name_req cp;
3722 memset(&cp, 0, sizeof(cp));
3723 bacpy(&cp.bdaddr, &conn->dst);
3724 cp.pscan_rep_mode = 0x02;
3725 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3726 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3727 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3729 if (!hci_outgoing_auth_needed(hdev, conn)) {
3730 conn->state = BT_CONNECTED;
3731 hci_connect_cfm(conn, ev->status);
3732 hci_conn_drop(conn);
3736 hci_dev_unlock(hdev);
3739 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3740 struct sk_buff *skb)
3742 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3743 struct hci_conn *conn;
3745 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3749 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3751 if (ev->link_type == ESCO_LINK)
3754 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3758 conn->type = SCO_LINK;
3761 switch (ev->status) {
3763 conn->handle = __le16_to_cpu(ev->handle);
3764 conn->state = BT_CONNECTED;
3766 hci_debugfs_create_conn(conn);
3767 hci_conn_add_sysfs(conn);
3770 case 0x10: /* Connection Accept Timeout */
3771 case 0x0d: /* Connection Rejected due to Limited Resources */
3772 case 0x11: /* Unsupported Feature or Parameter Value */
3773 case 0x1c: /* SCO interval rejected */
3774 case 0x1a: /* Unsupported Remote Feature */
3775 case 0x1f: /* Unspecified error */
3776 case 0x20: /* Unsupported LMP Parameter value */
3778 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3779 (hdev->esco_type & EDR_ESCO_MASK);
3780 if (hci_setup_sync(conn, conn->link->handle))
3786 conn->state = BT_CLOSED;
3790 hci_connect_cfm(conn, ev->status);
3795 hci_dev_unlock(hdev);
3798 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3802 while (parsed < eir_len) {
3803 u8 field_len = eir[0];
3808 parsed += field_len + 1;
3809 eir += field_len + 1;
3815 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3816 struct sk_buff *skb)
3818 struct inquiry_data data;
3819 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3820 int num_rsp = *((__u8 *) skb->data);
3823 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3828 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3833 for (; num_rsp; num_rsp--, info++) {
3837 bacpy(&data.bdaddr, &info->bdaddr);
3838 data.pscan_rep_mode = info->pscan_rep_mode;
3839 data.pscan_period_mode = info->pscan_period_mode;
3840 data.pscan_mode = 0x00;
3841 memcpy(data.dev_class, info->dev_class, 3);
3842 data.clock_offset = info->clock_offset;
3843 data.rssi = info->rssi;
3844 data.ssp_mode = 0x01;
3846 if (hci_dev_test_flag(hdev, HCI_MGMT))
3847 name_known = eir_has_data_type(info->data,
3853 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3855 eir_len = eir_get_length(info->data, sizeof(info->data));
3857 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3858 info->dev_class, info->rssi,
3859 flags, info->data, eir_len, NULL, 0);
3862 hci_dev_unlock(hdev);
3865 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3866 struct sk_buff *skb)
3868 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3869 struct hci_conn *conn;
3871 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3872 __le16_to_cpu(ev->handle));
3876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3880 /* For BR/EDR the necessary steps are taken through the
3881 * auth_complete event.
3883 if (conn->type != LE_LINK)
3887 conn->sec_level = conn->pending_sec_level;
3889 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3891 if (ev->status && conn->state == BT_CONNECTED) {
3892 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3893 hci_conn_drop(conn);
3897 if (conn->state == BT_CONFIG) {
3899 conn->state = BT_CONNECTED;
3901 hci_connect_cfm(conn, ev->status);
3902 hci_conn_drop(conn);
3904 hci_auth_cfm(conn, ev->status);
3906 hci_conn_hold(conn);
3907 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3908 hci_conn_drop(conn);
3912 hci_dev_unlock(hdev);
3915 static u8 hci_get_auth_req(struct hci_conn *conn)
3917 /* If remote requests no-bonding follow that lead */
3918 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3919 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3920 return conn->remote_auth | (conn->auth_type & 0x01);
3922 /* If both remote and local have enough IO capabilities, require
3925 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3926 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3927 return conn->remote_auth | 0x01;
3929 /* No MITM protection possible so ignore remote requirement */
3930 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3933 static u8 bredr_oob_data_present(struct hci_conn *conn)
3935 struct hci_dev *hdev = conn->hdev;
3936 struct oob_data *data;
3938 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3942 if (bredr_sc_enabled(hdev)) {
3943 /* When Secure Connections is enabled, then just
3944 * return the present value stored with the OOB
3945 * data. The stored value contains the right present
3946 * information. However it can only be trusted when
3947 * not in Secure Connection Only mode.
3949 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3950 return data->present;
3952 /* When Secure Connections Only mode is enabled, then
3953 * the P-256 values are required. If they are not
3954 * available, then do not declare that OOB data is
3957 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3958 !memcmp(data->hash256, ZERO_KEY, 16))
3964 /* When Secure Connections is not enabled or actually
3965 * not supported by the hardware, then check that if
3966 * P-192 data values are present.
3968 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3969 !memcmp(data->hash192, ZERO_KEY, 16))
3975 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3977 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3978 struct hci_conn *conn;
3980 BT_DBG("%s", hdev->name);
3984 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3988 hci_conn_hold(conn);
3990 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3993 /* Allow pairing if we're pairable, the initiators of the
3994 * pairing or if the remote is not requesting bonding.
3996 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3997 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3998 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3999 struct hci_cp_io_capability_reply cp;
4001 bacpy(&cp.bdaddr, &ev->bdaddr);
4002 /* Change the IO capability from KeyboardDisplay
4003 * to DisplayYesNo as it is not supported by BT spec. */
4004 cp.capability = (conn->io_capability == 0x04) ?
4005 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4007 /* If we are initiators, there is no remote information yet */
4008 if (conn->remote_auth == 0xff) {
4009 /* Request MITM protection if our IO caps allow it
4010 * except for the no-bonding case.
4012 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4013 conn->auth_type != HCI_AT_NO_BONDING)
4014 conn->auth_type |= 0x01;
4016 conn->auth_type = hci_get_auth_req(conn);
4019 /* If we're not bondable, force one of the non-bondable
4020 * authentication requirement values.
4022 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4023 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4025 cp.authentication = conn->auth_type;
4026 cp.oob_data = bredr_oob_data_present(conn);
4028 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4031 struct hci_cp_io_capability_neg_reply cp;
4033 bacpy(&cp.bdaddr, &ev->bdaddr);
4034 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4036 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4041 hci_dev_unlock(hdev);
4044 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4046 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4047 struct hci_conn *conn;
4049 BT_DBG("%s", hdev->name);
4053 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4057 conn->remote_cap = ev->capability;
4058 conn->remote_auth = ev->authentication;
4061 hci_dev_unlock(hdev);
4064 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4065 struct sk_buff *skb)
4067 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4068 int loc_mitm, rem_mitm, confirm_hint = 0;
4069 struct hci_conn *conn;
4071 BT_DBG("%s", hdev->name);
4075 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4078 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4082 loc_mitm = (conn->auth_type & 0x01);
4083 rem_mitm = (conn->remote_auth & 0x01);
4085 /* If we require MITM but the remote device can't provide that
4086 * (it has NoInputNoOutput) then reject the confirmation
4087 * request. We check the security level here since it doesn't
4088 * necessarily match conn->auth_type.
4090 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4091 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4092 BT_DBG("Rejecting request: remote device can't provide MITM");
4093 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4094 sizeof(ev->bdaddr), &ev->bdaddr);
4098 /* If no side requires MITM protection; auto-accept */
4099 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4100 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4102 /* If we're not the initiators request authorization to
4103 * proceed from user space (mgmt_user_confirm with
4104 * confirm_hint set to 1). The exception is if neither
4105 * side had MITM or if the local IO capability is
4106 * NoInputNoOutput, in which case we do auto-accept
4108 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4109 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4110 (loc_mitm || rem_mitm)) {
4111 BT_DBG("Confirming auto-accept as acceptor");
4116 BT_DBG("Auto-accept of user confirmation with %ums delay",
4117 hdev->auto_accept_delay);
4119 if (hdev->auto_accept_delay > 0) {
4120 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4121 queue_delayed_work(conn->hdev->workqueue,
4122 &conn->auto_accept_work, delay);
4126 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4127 sizeof(ev->bdaddr), &ev->bdaddr);
4132 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4133 le32_to_cpu(ev->passkey), confirm_hint);
4136 hci_dev_unlock(hdev);
4139 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4140 struct sk_buff *skb)
4142 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4144 BT_DBG("%s", hdev->name);
4146 if (hci_dev_test_flag(hdev, HCI_MGMT))
4147 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4150 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4151 struct sk_buff *skb)
4153 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4154 struct hci_conn *conn;
4156 BT_DBG("%s", hdev->name);
4158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4162 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4163 conn->passkey_entered = 0;
4165 if (hci_dev_test_flag(hdev, HCI_MGMT))
4166 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4167 conn->dst_type, conn->passkey_notify,
4168 conn->passkey_entered);
4171 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4173 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4174 struct hci_conn *conn;
4176 BT_DBG("%s", hdev->name);
4178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4183 case HCI_KEYPRESS_STARTED:
4184 conn->passkey_entered = 0;
4187 case HCI_KEYPRESS_ENTERED:
4188 conn->passkey_entered++;
4191 case HCI_KEYPRESS_ERASED:
4192 conn->passkey_entered--;
4195 case HCI_KEYPRESS_CLEARED:
4196 conn->passkey_entered = 0;
4199 case HCI_KEYPRESS_COMPLETED:
4203 if (hci_dev_test_flag(hdev, HCI_MGMT))
4204 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4205 conn->dst_type, conn->passkey_notify,
4206 conn->passkey_entered);
4209 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4210 struct sk_buff *skb)
4212 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4213 struct hci_conn *conn;
4215 BT_DBG("%s", hdev->name);
4219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4223 /* Reset the authentication requirement to unknown */
4224 conn->remote_auth = 0xff;
4226 /* To avoid duplicate auth_failed events to user space we check
4227 * the HCI_CONN_AUTH_PEND flag which will be set if we
4228 * initiated the authentication. A traditional auth_complete
4229 * event gets always produced as initiator and is also mapped to
4230 * the mgmt_auth_failed event */
4231 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4232 mgmt_auth_failed(conn, ev->status);
4234 hci_conn_drop(conn);
4237 hci_dev_unlock(hdev);
4240 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4241 struct sk_buff *skb)
4243 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4244 struct inquiry_entry *ie;
4245 struct hci_conn *conn;
4247 BT_DBG("%s", hdev->name);
4251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4253 memcpy(conn->features[1], ev->features, 8);
4255 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4257 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4259 hci_dev_unlock(hdev);
4262 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4263 struct sk_buff *skb)
4265 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4266 struct oob_data *data;
4268 BT_DBG("%s", hdev->name);
4272 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4275 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4277 struct hci_cp_remote_oob_data_neg_reply cp;
4279 bacpy(&cp.bdaddr, &ev->bdaddr);
4280 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4285 if (bredr_sc_enabled(hdev)) {
4286 struct hci_cp_remote_oob_ext_data_reply cp;
4288 bacpy(&cp.bdaddr, &ev->bdaddr);
4289 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4290 memset(cp.hash192, 0, sizeof(cp.hash192));
4291 memset(cp.rand192, 0, sizeof(cp.rand192));
4293 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4294 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4296 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4297 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4299 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4302 struct hci_cp_remote_oob_data_reply cp;
4304 bacpy(&cp.bdaddr, &ev->bdaddr);
4305 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4306 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4308 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4313 hci_dev_unlock(hdev);
4316 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4317 struct sk_buff *skb)
4319 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4320 struct hci_conn *hcon, *bredr_hcon;
4322 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4327 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4329 hci_dev_unlock(hdev);
4335 hci_dev_unlock(hdev);
4339 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4341 hcon->state = BT_CONNECTED;
4342 bacpy(&hcon->dst, &bredr_hcon->dst);
4344 hci_conn_hold(hcon);
4345 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4346 hci_conn_drop(hcon);
4348 hci_debugfs_create_conn(hcon);
4349 hci_conn_add_sysfs(hcon);
4351 amp_physical_cfm(bredr_hcon, hcon);
4353 hci_dev_unlock(hdev);
4356 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4358 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4359 struct hci_conn *hcon;
4360 struct hci_chan *hchan;
4361 struct amp_mgr *mgr;
4363 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4364 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4367 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4371 /* Create AMP hchan */
4372 hchan = hci_chan_create(hcon);
4376 hchan->handle = le16_to_cpu(ev->handle);
4378 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4380 mgr = hcon->amp_mgr;
4381 if (mgr && mgr->bredr_chan) {
4382 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4384 l2cap_chan_lock(bredr_chan);
4386 bredr_chan->conn->mtu = hdev->block_mtu;
4387 l2cap_logical_cfm(bredr_chan, hchan, 0);
4388 hci_conn_hold(hcon);
4390 l2cap_chan_unlock(bredr_chan);
4394 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4395 struct sk_buff *skb)
4397 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4398 struct hci_chan *hchan;
4400 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4401 le16_to_cpu(ev->handle), ev->status);
4408 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4412 amp_destroy_logical_link(hchan, ev->reason);
4415 hci_dev_unlock(hdev);
4418 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4419 struct sk_buff *skb)
4421 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4422 struct hci_conn *hcon;
4424 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4431 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4433 hcon->state = BT_CLOSED;
4437 hci_dev_unlock(hdev);
4440 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4442 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4443 struct hci_conn_params *params;
4444 struct hci_conn *conn;
4445 struct smp_irk *irk;
4448 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4452 /* All controllers implicitly stop advertising in the event of a
4453 * connection, so ensure that the state bit is cleared.
4455 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4457 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4459 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4461 BT_ERR("No memory for new connection");
4465 conn->dst_type = ev->bdaddr_type;
4467 /* If we didn't have a hci_conn object previously
4468 * but we're in master role this must be something
4469 * initiated using a white list. Since white list based
4470 * connections are not "first class citizens" we don't
4471 * have full tracking of them. Therefore, we go ahead
4472 * with a "best effort" approach of determining the
4473 * initiator address based on the HCI_PRIVACY flag.
4476 conn->resp_addr_type = ev->bdaddr_type;
4477 bacpy(&conn->resp_addr, &ev->bdaddr);
4478 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4479 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4480 bacpy(&conn->init_addr, &hdev->rpa);
4482 hci_copy_identity_address(hdev,
4484 &conn->init_addr_type);
4488 cancel_delayed_work(&conn->le_conn_timeout);
4492 /* Set the responder (our side) address type based on
4493 * the advertising address type.
4495 conn->resp_addr_type = hdev->adv_addr_type;
4496 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4497 bacpy(&conn->resp_addr, &hdev->random_addr);
4499 bacpy(&conn->resp_addr, &hdev->bdaddr);
4501 conn->init_addr_type = ev->bdaddr_type;
4502 bacpy(&conn->init_addr, &ev->bdaddr);
4504 /* For incoming connections, set the default minimum
4505 * and maximum connection interval. They will be used
4506 * to check if the parameters are in range and if not
4507 * trigger the connection update procedure.
4509 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4510 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4513 /* Lookup the identity address from the stored connection
4514 * address and address type.
4516 * When establishing connections to an identity address, the
4517 * connection procedure will store the resolvable random
4518 * address first. Now if it can be converted back into the
4519 * identity address, start using the identity address from
4522 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4524 bacpy(&conn->dst, &irk->bdaddr);
4525 conn->dst_type = irk->addr_type;
4529 hci_le_conn_failed(conn, ev->status);
4533 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4534 addr_type = BDADDR_LE_PUBLIC;
4536 addr_type = BDADDR_LE_RANDOM;
4538 /* Drop the connection if the device is blocked */
4539 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4540 hci_conn_drop(conn);
4544 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4545 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4547 conn->sec_level = BT_SECURITY_LOW;
4548 conn->handle = __le16_to_cpu(ev->handle);
4549 conn->state = BT_CONFIG;
4551 conn->le_conn_interval = le16_to_cpu(ev->interval);
4552 conn->le_conn_latency = le16_to_cpu(ev->latency);
4553 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4555 hci_debugfs_create_conn(conn);
4556 hci_conn_add_sysfs(conn);
4559 /* The remote features procedure is defined for master
4560 * role only. So only in case of an initiated connection
4561 * request the remote features.
4563 * If the local controller supports slave-initiated features
4564 * exchange, then requesting the remote features in slave
4565 * role is possible. Otherwise just transition into the
4566 * connected state without requesting the remote features.
4569 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4570 struct hci_cp_le_read_remote_features cp;
4572 cp.handle = __cpu_to_le16(conn->handle);
4574 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4577 hci_conn_hold(conn);
4579 conn->state = BT_CONNECTED;
4580 hci_connect_cfm(conn, ev->status);
4583 hci_connect_cfm(conn, ev->status);
4586 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4589 list_del_init(¶ms->action);
4591 hci_conn_drop(params->conn);
4592 hci_conn_put(params->conn);
4593 params->conn = NULL;
4598 hci_update_background_scan(hdev);
4599 hci_dev_unlock(hdev);
4602 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4603 struct sk_buff *skb)
4605 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4606 struct hci_conn *conn;
4608 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4615 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4617 conn->le_conn_interval = le16_to_cpu(ev->interval);
4618 conn->le_conn_latency = le16_to_cpu(ev->latency);
4619 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4622 hci_dev_unlock(hdev);
4625 /* This function requires the caller holds hdev->lock */
4626 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4628 u8 addr_type, u8 adv_type)
4630 struct hci_conn *conn;
4631 struct hci_conn_params *params;
4633 /* If the event is not connectable don't proceed further */
4634 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4637 /* Ignore if the device is blocked */
4638 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4641 /* Most controller will fail if we try to create new connections
4642 * while we have an existing one in slave role.
4644 if (hdev->conn_hash.le_num_slave > 0)
4647 /* If we're not connectable only connect devices that we have in
4648 * our pend_le_conns list.
4650 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4655 switch (params->auto_connect) {
4656 case HCI_AUTO_CONN_DIRECT:
4657 /* Only devices advertising with ADV_DIRECT_IND are
4658 * triggering a connection attempt. This is allowing
4659 * incoming connections from slave devices.
4661 if (adv_type != LE_ADV_DIRECT_IND)
4664 case HCI_AUTO_CONN_ALWAYS:
4665 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4666 * are triggering a connection attempt. This means
4667 * that incoming connectioms from slave device are
4668 * accepted and also outgoing connections to slave
4669 * devices are established when found.
4676 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4677 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4678 if (!IS_ERR(conn)) {
4679 /* Store the pointer since we don't really have any
4680 * other owner of the object besides the params that
4681 * triggered it. This way we can abort the connection if
4682 * the parameters get removed and keep the reference
4683 * count consistent once the connection is established.
4685 params->conn = hci_conn_get(conn);
4689 switch (PTR_ERR(conn)) {
4691 /* If hci_connect() returns -EBUSY it means there is already
4692 * an LE connection attempt going on. Since controllers don't
4693 * support more than one connection attempt at the time, we
4694 * don't consider this an error case.
4698 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4705 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4706 u8 bdaddr_type, bdaddr_t *direct_addr,
4707 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4709 struct discovery_state *d = &hdev->discovery;
4710 struct smp_irk *irk;
4711 struct hci_conn *conn;
4715 /* If the direct address is present, then this report is from
4716 * a LE Direct Advertising Report event. In that case it is
4717 * important to see if the address is matching the local
4718 * controller address.
4721 /* Only resolvable random addresses are valid for these
4722 * kind of reports and others can be ignored.
4724 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4727 /* If the controller is not using resolvable random
4728 * addresses, then this report can be ignored.
4730 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4733 /* If the local IRK of the controller does not match
4734 * with the resolvable random address provided, then
4735 * this report can be ignored.
4737 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4741 /* Check if we need to convert to identity address */
4742 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4744 bdaddr = &irk->bdaddr;
4745 bdaddr_type = irk->addr_type;
4748 /* Check if we have been requested to connect to this device */
4749 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4750 if (conn && type == LE_ADV_IND) {
4751 /* Store report for later inclusion by
4752 * mgmt_device_connected
4754 memcpy(conn->le_adv_data, data, len);
4755 conn->le_adv_data_len = len;
4758 /* Passive scanning shouldn't trigger any device found events,
4759 * except for devices marked as CONN_REPORT for which we do send
4760 * device found events.
4762 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4763 if (type == LE_ADV_DIRECT_IND)
4766 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4767 bdaddr, bdaddr_type))
4770 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4771 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4774 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4775 rssi, flags, data, len, NULL, 0);
4779 /* When receiving non-connectable or scannable undirected
4780 * advertising reports, this means that the remote device is
4781 * not connectable and then clearly indicate this in the
4782 * device found event.
4784 * When receiving a scan response, then there is no way to
4785 * know if the remote device is connectable or not. However
4786 * since scan responses are merged with a previously seen
4787 * advertising report, the flags field from that report
4790 * In the really unlikely case that a controller get confused
4791 * and just sends a scan response event, then it is marked as
4792 * not connectable as well.
4794 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4795 type == LE_ADV_SCAN_RSP)
4796 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4800 /* If there's nothing pending either store the data from this
4801 * event or send an immediate device found event if the data
4802 * should not be stored for later.
4804 if (!has_pending_adv_report(hdev)) {
4805 /* If the report will trigger a SCAN_REQ store it for
4808 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4809 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4810 rssi, flags, data, len);
4814 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4815 rssi, flags, data, len, NULL, 0);
4819 /* Check if the pending report is for the same device as the new one */
4820 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4821 bdaddr_type == d->last_adv_addr_type);
4823 /* If the pending data doesn't match this report or this isn't a
4824 * scan response (e.g. we got a duplicate ADV_IND) then force
4825 * sending of the pending data.
4827 if (type != LE_ADV_SCAN_RSP || !match) {
4828 /* Send out whatever is in the cache, but skip duplicates */
4830 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4831 d->last_adv_addr_type, NULL,
4832 d->last_adv_rssi, d->last_adv_flags,
4834 d->last_adv_data_len, NULL, 0);
4836 /* If the new report will trigger a SCAN_REQ store it for
4839 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4840 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4841 rssi, flags, data, len);
4845 /* The advertising reports cannot be merged, so clear
4846 * the pending report and send out a device found event.
4848 clear_pending_adv_report(hdev);
4849 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4850 rssi, flags, data, len, NULL, 0);
4854 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4855 * the new event is a SCAN_RSP. We can therefore proceed with
4856 * sending a merged device found event.
4858 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4859 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4860 d->last_adv_data, d->last_adv_data_len, data, len);
4861 clear_pending_adv_report(hdev);
4864 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4866 u8 num_reports = skb->data[0];
4867 void *ptr = &skb->data[1];
4871 while (num_reports--) {
4872 struct hci_ev_le_advertising_info *ev = ptr;
4875 rssi = ev->data[ev->length];
4876 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4877 ev->bdaddr_type, NULL, 0, rssi,
4878 ev->data, ev->length);
4880 ptr += sizeof(*ev) + ev->length + 1;
4883 hci_dev_unlock(hdev);
4886 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4887 struct sk_buff *skb)
4889 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4890 struct hci_conn *conn;
4892 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4896 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4899 memcpy(conn->features[0], ev->features, 8);
4901 if (conn->state == BT_CONFIG) {
4904 /* If the local controller supports slave-initiated
4905 * features exchange, but the remote controller does
4906 * not, then it is possible that the error code 0x1a
4907 * for unsupported remote feature gets returned.
4909 * In this specific case, allow the connection to
4910 * transition into connected state and mark it as
4913 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4914 !conn->out && ev->status == 0x1a)
4917 status = ev->status;
4919 conn->state = BT_CONNECTED;
4920 hci_connect_cfm(conn, status);
4921 hci_conn_drop(conn);
4925 hci_dev_unlock(hdev);
4928 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4930 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4931 struct hci_cp_le_ltk_reply cp;
4932 struct hci_cp_le_ltk_neg_reply neg;
4933 struct hci_conn *conn;
4934 struct smp_ltk *ltk;
4936 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4940 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4944 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4948 if (smp_ltk_is_sc(ltk)) {
4949 /* With SC both EDiv and Rand are set to zero */
4950 if (ev->ediv || ev->rand)
4953 /* For non-SC keys check that EDiv and Rand match */
4954 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4958 memcpy(cp.ltk, ltk->val, ltk->enc_size);
4959 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
4960 cp.handle = cpu_to_le16(conn->handle);
4962 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4964 conn->enc_key_size = ltk->enc_size;
4966 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4968 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4969 * temporary key used to encrypt a connection following
4970 * pairing. It is used during the Encrypted Session Setup to
4971 * distribute the keys. Later, security can be re-established
4972 * using a distributed LTK.
4974 if (ltk->type == SMP_STK) {
4975 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4976 list_del_rcu(<k->list);
4977 kfree_rcu(ltk, rcu);
4979 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4982 hci_dev_unlock(hdev);
4987 neg.handle = ev->handle;
4988 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4989 hci_dev_unlock(hdev);
4992 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4995 struct hci_cp_le_conn_param_req_neg_reply cp;
4997 cp.handle = cpu_to_le16(handle);
5000 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5004 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5005 struct sk_buff *skb)
5007 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5008 struct hci_cp_le_conn_param_req_reply cp;
5009 struct hci_conn *hcon;
5010 u16 handle, min, max, latency, timeout;
5012 handle = le16_to_cpu(ev->handle);
5013 min = le16_to_cpu(ev->interval_min);
5014 max = le16_to_cpu(ev->interval_max);
5015 latency = le16_to_cpu(ev->latency);
5016 timeout = le16_to_cpu(ev->timeout);
5018 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5019 if (!hcon || hcon->state != BT_CONNECTED)
5020 return send_conn_param_neg_reply(hdev, handle,
5021 HCI_ERROR_UNKNOWN_CONN_ID);
5023 if (hci_check_conn_params(min, max, latency, timeout))
5024 return send_conn_param_neg_reply(hdev, handle,
5025 HCI_ERROR_INVALID_LL_PARAMS);
5027 if (hcon->role == HCI_ROLE_MASTER) {
5028 struct hci_conn_params *params;
5033 params = hci_conn_params_lookup(hdev, &hcon->dst,
5036 params->conn_min_interval = min;
5037 params->conn_max_interval = max;
5038 params->conn_latency = latency;
5039 params->supervision_timeout = timeout;
5045 hci_dev_unlock(hdev);
5047 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5048 store_hint, min, max, latency, timeout);
5051 cp.handle = ev->handle;
5052 cp.interval_min = ev->interval_min;
5053 cp.interval_max = ev->interval_max;
5054 cp.latency = ev->latency;
5055 cp.timeout = ev->timeout;
5059 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5062 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5063 struct sk_buff *skb)
5065 u8 num_reports = skb->data[0];
5066 void *ptr = &skb->data[1];
5070 while (num_reports--) {
5071 struct hci_ev_le_direct_adv_info *ev = ptr;
5073 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5074 ev->bdaddr_type, &ev->direct_addr,
5075 ev->direct_addr_type, ev->rssi, NULL, 0);
5080 hci_dev_unlock(hdev);
5083 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5085 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5087 skb_pull(skb, sizeof(*le_ev));
5089 switch (le_ev->subevent) {
5090 case HCI_EV_LE_CONN_COMPLETE:
5091 hci_le_conn_complete_evt(hdev, skb);
5094 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5095 hci_le_conn_update_complete_evt(hdev, skb);
5098 case HCI_EV_LE_ADVERTISING_REPORT:
5099 hci_le_adv_report_evt(hdev, skb);
5102 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5103 hci_le_remote_feat_complete_evt(hdev, skb);
5106 case HCI_EV_LE_LTK_REQ:
5107 hci_le_ltk_request_evt(hdev, skb);
5110 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5111 hci_le_remote_conn_param_req_evt(hdev, skb);
5114 case HCI_EV_LE_DIRECT_ADV_REPORT:
5115 hci_le_direct_adv_report_evt(hdev, skb);
5123 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5125 struct hci_ev_channel_selected *ev = (void *) skb->data;
5126 struct hci_conn *hcon;
5128 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5130 skb_pull(skb, sizeof(*ev));
5132 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5136 amp_read_loc_assoc_final_data(hdev, hcon);
5139 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5140 u8 event, struct sk_buff *skb)
5142 struct hci_ev_cmd_complete *ev;
5143 struct hci_event_hdr *hdr;
5148 if (skb->len < sizeof(*hdr)) {
5149 BT_ERR("Too short HCI event");
5153 hdr = (void *) skb->data;
5154 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5157 if (hdr->evt != event)
5162 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5163 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5167 if (skb->len < sizeof(*ev)) {
5168 BT_ERR("Too short cmd_complete event");
5172 ev = (void *) skb->data;
5173 skb_pull(skb, sizeof(*ev));
5175 if (opcode != __le16_to_cpu(ev->opcode)) {
5176 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5177 __le16_to_cpu(ev->opcode));
5184 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5186 struct hci_event_hdr *hdr = (void *) skb->data;
5187 hci_req_complete_t req_complete = NULL;
5188 hci_req_complete_skb_t req_complete_skb = NULL;
5189 struct sk_buff *orig_skb = NULL;
5190 u8 status = 0, event = hdr->evt, req_evt = 0;
5191 u16 opcode = HCI_OP_NOP;
5193 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5194 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5195 opcode = __le16_to_cpu(cmd_hdr->opcode);
5196 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5201 /* If it looks like we might end up having to call
5202 * req_complete_skb, store a pristine copy of the skb since the
5203 * various handlers may modify the original one through
5204 * skb_pull() calls, etc.
5206 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5207 event == HCI_EV_CMD_COMPLETE)
5208 orig_skb = skb_clone(skb, GFP_KERNEL);
5210 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5213 case HCI_EV_INQUIRY_COMPLETE:
5214 hci_inquiry_complete_evt(hdev, skb);
5217 case HCI_EV_INQUIRY_RESULT:
5218 hci_inquiry_result_evt(hdev, skb);
5221 case HCI_EV_CONN_COMPLETE:
5222 hci_conn_complete_evt(hdev, skb);
5225 case HCI_EV_CONN_REQUEST:
5226 hci_conn_request_evt(hdev, skb);
5229 case HCI_EV_DISCONN_COMPLETE:
5230 hci_disconn_complete_evt(hdev, skb);
5233 case HCI_EV_AUTH_COMPLETE:
5234 hci_auth_complete_evt(hdev, skb);
5237 case HCI_EV_REMOTE_NAME:
5238 hci_remote_name_evt(hdev, skb);
5241 case HCI_EV_ENCRYPT_CHANGE:
5242 hci_encrypt_change_evt(hdev, skb);
5245 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5246 hci_change_link_key_complete_evt(hdev, skb);
5249 case HCI_EV_REMOTE_FEATURES:
5250 hci_remote_features_evt(hdev, skb);
5253 case HCI_EV_CMD_COMPLETE:
5254 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5255 &req_complete, &req_complete_skb);
5258 case HCI_EV_CMD_STATUS:
5259 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5263 case HCI_EV_HARDWARE_ERROR:
5264 hci_hardware_error_evt(hdev, skb);
5267 case HCI_EV_ROLE_CHANGE:
5268 hci_role_change_evt(hdev, skb);
5271 case HCI_EV_NUM_COMP_PKTS:
5272 hci_num_comp_pkts_evt(hdev, skb);
5275 case HCI_EV_MODE_CHANGE:
5276 hci_mode_change_evt(hdev, skb);
5279 case HCI_EV_PIN_CODE_REQ:
5280 hci_pin_code_request_evt(hdev, skb);
5283 case HCI_EV_LINK_KEY_REQ:
5284 hci_link_key_request_evt(hdev, skb);
5287 case HCI_EV_LINK_KEY_NOTIFY:
5288 hci_link_key_notify_evt(hdev, skb);
5291 case HCI_EV_CLOCK_OFFSET:
5292 hci_clock_offset_evt(hdev, skb);
5295 case HCI_EV_PKT_TYPE_CHANGE:
5296 hci_pkt_type_change_evt(hdev, skb);
5299 case HCI_EV_PSCAN_REP_MODE:
5300 hci_pscan_rep_mode_evt(hdev, skb);
5303 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5304 hci_inquiry_result_with_rssi_evt(hdev, skb);
5307 case HCI_EV_REMOTE_EXT_FEATURES:
5308 hci_remote_ext_features_evt(hdev, skb);
5311 case HCI_EV_SYNC_CONN_COMPLETE:
5312 hci_sync_conn_complete_evt(hdev, skb);
5315 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5316 hci_extended_inquiry_result_evt(hdev, skb);
5319 case HCI_EV_KEY_REFRESH_COMPLETE:
5320 hci_key_refresh_complete_evt(hdev, skb);
5323 case HCI_EV_IO_CAPA_REQUEST:
5324 hci_io_capa_request_evt(hdev, skb);
5327 case HCI_EV_IO_CAPA_REPLY:
5328 hci_io_capa_reply_evt(hdev, skb);
5331 case HCI_EV_USER_CONFIRM_REQUEST:
5332 hci_user_confirm_request_evt(hdev, skb);
5335 case HCI_EV_USER_PASSKEY_REQUEST:
5336 hci_user_passkey_request_evt(hdev, skb);
5339 case HCI_EV_USER_PASSKEY_NOTIFY:
5340 hci_user_passkey_notify_evt(hdev, skb);
5343 case HCI_EV_KEYPRESS_NOTIFY:
5344 hci_keypress_notify_evt(hdev, skb);
5347 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5348 hci_simple_pair_complete_evt(hdev, skb);
5351 case HCI_EV_REMOTE_HOST_FEATURES:
5352 hci_remote_host_features_evt(hdev, skb);
5355 case HCI_EV_LE_META:
5356 hci_le_meta_evt(hdev, skb);
5359 case HCI_EV_CHANNEL_SELECTED:
5360 hci_chan_selected_evt(hdev, skb);
5363 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5364 hci_remote_oob_data_request_evt(hdev, skb);
5367 case HCI_EV_PHY_LINK_COMPLETE:
5368 hci_phy_link_complete_evt(hdev, skb);
5371 case HCI_EV_LOGICAL_LINK_COMPLETE:
5372 hci_loglink_complete_evt(hdev, skb);
5375 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5376 hci_disconn_loglink_complete_evt(hdev, skb);
5379 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5380 hci_disconn_phylink_complete_evt(hdev, skb);
5383 case HCI_EV_NUM_COMP_BLOCKS:
5384 hci_num_comp_blocks_evt(hdev, skb);
5388 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5393 req_complete(hdev, status, opcode);
5394 } else if (req_complete_skb) {
5395 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5396 kfree_skb(orig_skb);
5399 req_complete_skb(hdev, status, opcode, orig_skb);
5402 kfree_skb(orig_skb);
5404 hdev->stat.evt_rx++;