2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
183 list_for_each_entry_safe(p, n, &cache->all, all) {
188 INIT_LIST_HEAD(&cache->unknown);
189 INIT_LIST_HEAD(&cache->resolve);
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(&e->data.bdaddr, bdaddr))
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(&e->data.bdaddr, bdaddr))
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
236 if (!bacmp(&e->data.bdaddr, bdaddr))
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
259 list_add(&ie->list, pos);
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
297 list_add(&ie->all, &cache->all);
300 ie->name_state = NAME_KNOWN;
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(&ie->list, &cache->unknown);
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
337 bacpy(&info->bdaddr, &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
348 BT_DBG("cache %p, copied %d", cache, copied);
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
358 BT_DBG("%s", hdev->name);
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
372 int hci_inquiry(void __user *arg)
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
384 hdev = hci_dev_get(ir.dev_id);
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
398 if (hdev->dev_type != HCI_PRIMARY) {
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
420 hci_dev_unlock(hdev);
422 timeo = ir.length * msecs_to_jiffies(2000);
425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
440 /* for unlimited number of responses we will use buffer with
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 hci_dev_unlock(hdev);
458 BT_DBG("num_rsp %d", ir.num_rsp);
460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 static int hci_dev_do_open(struct hci_dev *hdev)
479 BT_DBG("%s %p", hdev->name, hdev);
481 hci_req_sync_lock(hdev);
483 ret = hci_dev_open_sync(hdev);
485 hci_req_sync_unlock(hdev);
489 /* ---- HCI ioctl helpers ---- */
491 int hci_dev_open(__u16 dev)
493 struct hci_dev *hdev;
496 hdev = hci_dev_get(dev);
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(&hdev->power_off);
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
527 flush_workqueue(hdev->req_workqueue);
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
539 err = hci_dev_do_open(hdev);
546 int hci_dev_do_close(struct hci_dev *hdev)
550 BT_DBG("%s %p", hdev->name, hdev);
552 hci_req_sync_lock(hdev);
554 err = hci_dev_close_sync(hdev);
556 hci_req_sync_unlock(hdev);
561 int hci_dev_close(__u16 dev)
563 struct hci_dev *hdev;
566 hdev = hci_dev_get(dev);
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
575 cancel_work_sync(&hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(&hdev->power_off);
579 err = hci_dev_do_close(hdev);
586 static int hci_dev_do_reset(struct hci_dev *hdev)
590 BT_DBG("%s %p", hdev->name, hdev);
592 hci_req_sync_lock(hdev);
595 skb_queue_purge(&hdev->rx_q);
596 skb_queue_purge(&hdev->cmd_q);
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
605 * inside RCU section to see the flag or complete scheduling.
608 /* Explicitly cancel works in case scheduled after setting the flag. */
609 cancel_delayed_work(&hdev->cmd_timer);
610 cancel_delayed_work(&hdev->ncmd_timer);
612 /* Avoid potential lockdep warnings from the *_flush() calls by
613 * ensuring the workqueue is empty up front.
615 drain_workqueue(hdev->workqueue);
618 hci_inquiry_cache_flush(hdev);
619 hci_conn_hash_flush(hdev);
620 hci_dev_unlock(hdev);
625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
627 atomic_set(&hdev->cmd_cnt, 1);
633 ret = hci_reset_sync(hdev);
635 hci_req_sync_unlock(hdev);
639 int hci_dev_reset(__u16 dev)
641 struct hci_dev *hdev;
644 hdev = hci_dev_get(dev);
648 if (!test_bit(HCI_UP, &hdev->flags)) {
653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
663 err = hci_dev_do_reset(hdev);
670 int hci_dev_reset_stat(__u16 dev)
672 struct hci_dev *hdev;
675 hdev = hci_dev_get(dev);
679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
698 bool conn_changed, discov_changed;
700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
702 if ((scan & SCAN_PAGE))
703 conn_changed = !hci_dev_test_and_set_flag(hdev,
706 conn_changed = hci_dev_test_and_clear_flag(hdev,
709 if ((scan & SCAN_INQUIRY)) {
710 discov_changed = !hci_dev_test_and_set_flag(hdev,
713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 discov_changed = hci_dev_test_and_clear_flag(hdev,
718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
721 if (conn_changed || discov_changed) {
722 /* In case this was disabled through mgmt */
723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 hci_update_adv_data(hdev, hdev->cur_adv_instance);
728 mgmt_new_settings(hdev);
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
738 if (copy_from_user(&dr, arg, sizeof(dr)))
741 hdev = hci_dev_get(dr.dev_id);
745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
755 if (hdev->dev_type != HCI_PRIMARY) {
760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
772 if (!lmp_encrypt_capable(hdev)) {
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 HCI_INIT_TIMEOUT, NULL);
785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 HCI_INIT_TIMEOUT, NULL);
790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 HCI_INIT_TIMEOUT, NULL);
793 /* Ensure that the connectable and discoverable states
794 * get correctly modified as this was a non-mgmt change.
797 hci_update_passive_scan_state(hdev, dr.dev_opt);
801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 HCI_INIT_TIMEOUT, NULL);
806 hdev->link_mode = ((__u16) dr.dev_opt) &
807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
811 if (hdev->pkt_type == (__u16) dr.dev_opt)
814 hdev->pkt_type = (__u16) dr.dev_opt;
815 mgmt_phy_configuration_changed(hdev, NULL);
819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
838 int hci_get_dev_list(void __user *arg)
840 struct hci_dev *hdev;
841 struct hci_dev_list_req *dl;
842 struct hci_dev_req *dr;
843 int n = 0, size, err;
846 if (get_user(dev_num, (__u16 __user *) arg))
849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
852 size = sizeof(*dl) + dev_num * sizeof(*dr);
854 dl = kzalloc(size, GFP_KERNEL);
860 read_lock(&hci_dev_list_lock);
861 list_for_each_entry(hdev, &hci_dev_list, list) {
862 unsigned long flags = hdev->flags;
864 /* When the auto-off is configured it means the transport
865 * is running, but in that case still indicate that the
866 * device is actually down.
868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 flags &= ~BIT(HCI_UP);
871 (dr + n)->dev_id = hdev->id;
872 (dr + n)->dev_opt = flags;
877 read_unlock(&hci_dev_list_lock);
880 size = sizeof(*dl) + n * sizeof(*dr);
882 err = copy_to_user(arg, dl, size);
885 return err ? -EFAULT : 0;
888 int hci_get_dev_info(void __user *arg)
890 struct hci_dev *hdev;
891 struct hci_dev_info di;
895 if (copy_from_user(&di, arg, sizeof(di)))
898 hdev = hci_dev_get(di.dev_id);
902 /* When the auto-off is configured it means the transport
903 * is running, but in that case still indicate that the
904 * device is actually down.
906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 flags = hdev->flags & ~BIT(HCI_UP);
911 strcpy(di.name, hdev->name);
912 di.bdaddr = hdev->bdaddr;
913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
915 di.pkt_type = hdev->pkt_type;
916 if (lmp_bredr_capable(hdev)) {
917 di.acl_mtu = hdev->acl_mtu;
918 di.acl_pkts = hdev->acl_pkts;
919 di.sco_mtu = hdev->sco_mtu;
920 di.sco_pkts = hdev->sco_pkts;
922 di.acl_mtu = hdev->le_mtu;
923 di.acl_pkts = hdev->le_pkts;
927 di.link_policy = hdev->link_policy;
928 di.link_mode = hdev->link_mode;
930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 memcpy(&di.features, &hdev->features, sizeof(di.features));
933 if (copy_to_user(arg, &di, sizeof(di)))
941 /* ---- Interface to HCI drivers ---- */
943 static int hci_rfkill_set_block(void *data, bool blocked)
945 struct hci_dev *hdev = data;
947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
953 hci_dev_set_flag(hdev, HCI_RFKILLED);
954 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 !hci_dev_test_flag(hdev, HCI_CONFIG))
956 hci_dev_do_close(hdev);
958 hci_dev_clear_flag(hdev, HCI_RFKILLED);
964 static const struct rfkill_ops hci_rfkill_ops = {
965 .set_block = hci_rfkill_set_block,
968 static void hci_power_on(struct work_struct *work)
970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
973 BT_DBG("%s", hdev->name);
975 if (test_bit(HCI_UP, &hdev->flags) &&
976 hci_dev_test_flag(hdev, HCI_MGMT) &&
977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 cancel_delayed_work(&hdev->power_off);
979 err = hci_powered_update_sync(hdev);
980 mgmt_power_on(hdev, err);
984 err = hci_dev_do_open(hdev);
987 mgmt_set_powered_failed(hdev, err);
988 hci_dev_unlock(hdev);
992 /* During the HCI setup phase, a few error conditions are
993 * ignored and they need to be checked now. If they are still
994 * valid, it is important to turn the device back off.
996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 (hdev->dev_type == HCI_PRIMARY &&
999 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 hci_dev_do_close(hdev);
1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 HCI_AUTO_OFF_TIMEOUT);
1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 /* For unconfigured devices, set the HCI_RAW flag
1010 * so that userspace can easily identify them.
1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 set_bit(HCI_RAW, &hdev->flags);
1015 /* For fully configured devices, this will send
1016 * the Index Added event. For unconfigured devices,
1017 * it will send Unconfigued Index Added event.
1019 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 * and no event will be send.
1022 mgmt_index_added(hdev);
1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 /* When the controller is now configured, then it
1025 * is important to clear the HCI_RAW flag.
1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 clear_bit(HCI_RAW, &hdev->flags);
1030 /* Powering on the controller with HCI_CONFIG set only
1031 * happens with the transition from unconfigured to
1032 * configured. This will send the Index Added event.
1034 mgmt_index_added(hdev);
1038 static void hci_power_off(struct work_struct *work)
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1043 BT_DBG("%s", hdev->name);
1045 hci_dev_do_close(hdev);
1048 static void hci_error_reset(struct work_struct *work)
1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1052 BT_DBG("%s", hdev->name);
1055 hdev->hw_error(hdev, hdev->hw_error_code);
1057 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1059 if (hci_dev_do_close(hdev))
1062 hci_dev_do_open(hdev);
1065 void hci_uuids_clear(struct hci_dev *hdev)
1067 struct bt_uuid *uuid, *tmp;
1069 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070 list_del(&uuid->list);
1075 void hci_link_keys_clear(struct hci_dev *hdev)
1077 struct link_key *key;
1079 list_for_each_entry(key, &hdev->link_keys, list) {
1080 list_del_rcu(&key->list);
1081 kfree_rcu(key, rcu);
1085 void hci_smp_ltks_clear(struct hci_dev *hdev)
1089 list_for_each_entry(k, &hdev->long_term_keys, list) {
1090 list_del_rcu(&k->list);
1095 void hci_smp_irks_clear(struct hci_dev *hdev)
1099 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1100 list_del_rcu(&k->list);
1105 void hci_blocked_keys_clear(struct hci_dev *hdev)
1107 struct blocked_key *b;
1109 list_for_each_entry(b, &hdev->blocked_keys, list) {
1110 list_del_rcu(&b->list);
1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1117 bool blocked = false;
1118 struct blocked_key *b;
1121 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1137 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1141 if (hci_is_blocked_key(hdev,
1142 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1144 bt_dev_warn_ratelimited(hdev,
1145 "Link key blocked for %pMR",
1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159 u8 key_type, u8 old_key_type)
1162 if (key_type < 0x03)
1165 /* Debug keys are insecure so don't store them persistently */
1166 if (key_type == HCI_LK_DEBUG_COMBINATION)
1169 /* Changed combination key and there's no previous one */
1170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1173 /* Security mode 3 case */
1177 /* BR/EDR key derived using SC from an LE link */
1178 if (conn->type == LE_LINK)
1181 /* Neither local nor remote side had no-bonding as requirement */
1182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1185 /* Local side had dedicated bonding as requirement */
1186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1189 /* Remote side had dedicated bonding as requirement */
1190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1193 /* If none of the above criteria match, then don't store the key
1198 static u8 ltk_role(u8 type)
1200 if (type == SMP_LTK)
1201 return HCI_ROLE_MASTER;
1203 return HCI_ROLE_SLAVE;
1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207 u8 addr_type, u8 role)
1212 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1216 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1219 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1221 bt_dev_warn_ratelimited(hdev,
1222 "LTK blocked for %pMR",
1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1237 struct smp_irk *irk_to_return = NULL;
1238 struct smp_irk *irk;
1241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242 if (!bacmp(&irk->rpa, rpa)) {
1243 irk_to_return = irk;
1248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249 if (smp_irk_matches(hdev, irk->val, rpa)) {
1250 bacpy(&irk->rpa, rpa);
1251 irk_to_return = irk;
1257 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258 irk_to_return->val)) {
1259 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260 &irk_to_return->bdaddr);
1261 irk_to_return = NULL;
1266 return irk_to_return;
1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272 struct smp_irk *irk_to_return = NULL;
1273 struct smp_irk *irk;
1275 /* Identity Address must be public or static random */
1276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (addr_type == irk->addr_type &&
1282 bacmp(bdaddr, &irk->bdaddr) == 0) {
1283 irk_to_return = irk;
1290 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291 irk_to_return->val)) {
1292 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293 &irk_to_return->bdaddr);
1294 irk_to_return = NULL;
1299 return irk_to_return;
1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303 bdaddr_t *bdaddr, u8 *val, u8 type,
1304 u8 pin_len, bool *persistent)
1306 struct link_key *key, *old_key;
1309 old_key = hci_find_link_key(hdev, bdaddr);
1311 old_key_type = old_key->type;
1314 old_key_type = conn ? conn->key_type : 0xff;
1315 key = kzalloc(sizeof(*key), GFP_KERNEL);
1318 list_add_rcu(&key->list, &hdev->link_keys);
1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1323 /* Some buggy controller combinations generate a changed
1324 * combination key for legacy pairing even when there's no
1326 if (type == HCI_LK_CHANGED_COMBINATION &&
1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 type = HCI_LK_COMBINATION;
1330 conn->key_type = type;
1333 bacpy(&key->bdaddr, bdaddr);
1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 key->pin_len = pin_len;
1337 if (type == HCI_LK_CHANGED_COMBINATION)
1338 key->type = old_key_type;
1343 *persistent = hci_persistent_key(hdev, conn, type,
1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 u8 addr_type, u8 type, u8 authenticated,
1351 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1353 struct smp_ltk *key, *old_key;
1354 u8 role = ltk_role(type);
1356 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1360 key = kzalloc(sizeof(*key), GFP_KERNEL);
1363 list_add_rcu(&key->list, &hdev->long_term_keys);
1366 bacpy(&key->bdaddr, bdaddr);
1367 key->bdaddr_type = addr_type;
1368 memcpy(key->val, tk, sizeof(key->val));
1369 key->authenticated = authenticated;
1372 key->enc_size = enc_size;
1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1381 struct smp_irk *irk;
1383 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1385 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1389 bacpy(&irk->bdaddr, bdaddr);
1390 irk->addr_type = addr_type;
1392 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1395 memcpy(irk->val, val, 16);
1396 bacpy(&irk->rpa, rpa);
1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403 struct link_key *key;
1405 key = hci_find_link_key(hdev, bdaddr);
1409 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1411 list_del_rcu(&key->list);
1412 kfree_rcu(key, rcu);
1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1422 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1423 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1428 list_del_rcu(&k->list);
1433 return removed ? 0 : -ENOENT;
1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1440 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1441 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1446 list_del_rcu(&k->list);
1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1454 struct smp_irk *irk;
1457 if (type == BDADDR_BREDR) {
1458 if (hci_find_link_key(hdev, bdaddr))
1463 /* Convert to HCI addr type which struct smp_ltk uses */
1464 if (type == BDADDR_LE_PUBLIC)
1465 addr_type = ADDR_LE_DEV_PUBLIC;
1467 addr_type = ADDR_LE_DEV_RANDOM;
1469 irk = hci_get_irk(hdev, bdaddr, addr_type);
1471 bdaddr = &irk->bdaddr;
1472 addr_type = irk->addr_type;
1476 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1487 /* HCI command timer function */
1488 static void hci_cmd_timeout(struct work_struct *work)
1490 struct hci_dev *hdev = container_of(work, struct hci_dev,
1493 if (hdev->sent_cmd) {
1494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495 u16 opcode = __le16_to_cpu(sent->opcode);
1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1499 bt_dev_err(hdev, "command tx timeout");
1502 if (hdev->cmd_timeout)
1503 hdev->cmd_timeout(hdev);
1505 atomic_set(&hdev->cmd_cnt, 1);
1506 queue_work(hdev->workqueue, &hdev->cmd_work);
1509 /* HCI ncmd timer function */
1510 static void hci_ncmd_timeout(struct work_struct *work)
1512 struct hci_dev *hdev = container_of(work, struct hci_dev,
1515 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1517 /* During HCI_INIT phase no events can be injected if the ncmd timer
1518 * triggers since the procedure has its own timeout handling.
1520 if (test_bit(HCI_INIT, &hdev->flags))
1523 /* This is an irrecoverable state, inject hardware error event */
1524 hci_reset_dev(hdev);
1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528 bdaddr_t *bdaddr, u8 bdaddr_type)
1530 struct oob_data *data;
1532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533 if (bacmp(bdaddr, &data->bdaddr) != 0)
1535 if (data->bdaddr_type != bdaddr_type)
1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1546 struct oob_data *data;
1548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1554 list_del(&data->list);
1560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1562 struct oob_data *data, *n;
1564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565 list_del(&data->list);
1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572 u8 *hash256, u8 *rand256)
1574 struct oob_data *data;
1576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1578 data = kmalloc(sizeof(*data), GFP_KERNEL);
1582 bacpy(&data->bdaddr, bdaddr);
1583 data->bdaddr_type = bdaddr_type;
1584 list_add(&data->list, &hdev->remote_oob_data);
1587 if (hash192 && rand192) {
1588 memcpy(data->hash192, hash192, sizeof(data->hash192));
1589 memcpy(data->rand192, rand192, sizeof(data->rand192));
1590 if (hash256 && rand256)
1591 data->present = 0x03;
1593 memset(data->hash192, 0, sizeof(data->hash192));
1594 memset(data->rand192, 0, sizeof(data->rand192));
1595 if (hash256 && rand256)
1596 data->present = 0x02;
1598 data->present = 0x00;
1601 if (hash256 && rand256) {
1602 memcpy(data->hash256, hash256, sizeof(data->hash256));
1603 memcpy(data->rand256, rand256, sizeof(data->rand256));
1605 memset(data->hash256, 0, sizeof(data->hash256));
1606 memset(data->rand256, 0, sizeof(data->rand256));
1607 if (hash192 && rand192)
1608 data->present = 0x01;
1611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1616 /* This function requires the caller holds hdev->lock */
1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1619 struct adv_info *adv_instance;
1621 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622 if (adv_instance->instance == instance)
1623 return adv_instance;
1629 /* This function requires the caller holds hdev->lock */
1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1632 struct adv_info *cur_instance;
1634 cur_instance = hci_find_adv_instance(hdev, instance);
1638 if (cur_instance == list_last_entry(&hdev->adv_instances,
1639 struct adv_info, list))
1640 return list_first_entry(&hdev->adv_instances,
1641 struct adv_info, list);
1643 return list_next_entry(cur_instance, list);
1646 /* This function requires the caller holds hdev->lock */
1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1649 struct adv_info *adv_instance;
1651 adv_instance = hci_find_adv_instance(hdev, instance);
1655 BT_DBG("%s removing %dMR", hdev->name, instance);
1657 if (hdev->cur_adv_instance == instance) {
1658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work(&hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1662 hdev->cur_adv_instance = 0x00;
1665 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1667 list_del(&adv_instance->list);
1668 kfree(adv_instance);
1670 hdev->adv_instance_cnt--;
1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1677 struct adv_info *adv_instance, *n;
1679 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680 adv_instance->rpa_expired = rpa_expired;
1683 /* This function requires the caller holds hdev->lock */
1684 void hci_adv_instances_clear(struct hci_dev *hdev)
1686 struct adv_info *adv_instance, *n;
1688 if (hdev->adv_instance_timeout) {
1689 cancel_delayed_work(&hdev->adv_instance_expire);
1690 hdev->adv_instance_timeout = 0;
1693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695 list_del(&adv_instance->list);
1696 kfree(adv_instance);
1699 hdev->adv_instance_cnt = 0;
1700 hdev->cur_adv_instance = 0x00;
1703 static void adv_instance_rpa_expired(struct work_struct *work)
1705 struct adv_info *adv_instance = container_of(work, struct adv_info,
1706 rpa_expired_cb.work);
1710 adv_instance->rpa_expired = true;
1713 /* This function requires the caller holds hdev->lock */
1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715 u32 flags, u16 adv_data_len, u8 *adv_data,
1716 u16 scan_rsp_len, u8 *scan_rsp_data,
1717 u16 timeout, u16 duration, s8 tx_power,
1718 u32 min_interval, u32 max_interval,
1721 struct adv_info *adv;
1723 adv = hci_find_adv_instance(hdev, instance);
1725 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1726 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1727 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1729 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1730 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1731 return ERR_PTR(-EOVERFLOW);
1733 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1735 return ERR_PTR(-ENOMEM);
1737 adv->pending = true;
1738 adv->instance = instance;
1739 list_add(&adv->list, &hdev->adv_instances);
1740 hdev->adv_instance_cnt++;
1744 adv->min_interval = min_interval;
1745 adv->max_interval = max_interval;
1746 adv->tx_power = tx_power;
1747 /* Defining a mesh_handle changes the timing units to ms,
1748 * rather than seconds, and ties the instance to the requested
1751 adv->mesh = mesh_handle;
1753 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1754 scan_rsp_len, scan_rsp_data);
1756 adv->timeout = timeout;
1757 adv->remaining_time = timeout;
1760 adv->duration = hdev->def_multi_adv_rotation_duration;
1762 adv->duration = duration;
1764 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1766 BT_DBG("%s for %dMR", hdev->name, instance);
1771 /* This function requires the caller holds hdev->lock */
1772 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1773 u32 flags, u8 data_len, u8 *data,
1774 u32 min_interval, u32 max_interval)
1776 struct adv_info *adv;
1778 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1779 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1780 min_interval, max_interval, 0);
1784 adv->periodic = true;
1785 adv->per_adv_data_len = data_len;
1788 memcpy(adv->per_adv_data, data, data_len);
1793 /* This function requires the caller holds hdev->lock */
1794 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1795 u16 adv_data_len, u8 *adv_data,
1796 u16 scan_rsp_len, u8 *scan_rsp_data)
1798 struct adv_info *adv;
1800 adv = hci_find_adv_instance(hdev, instance);
1802 /* If advertisement doesn't exist, we can't modify its data */
1806 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1807 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1808 memcpy(adv->adv_data, adv_data, adv_data_len);
1809 adv->adv_data_len = adv_data_len;
1810 adv->adv_data_changed = true;
1813 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1814 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1815 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1816 adv->scan_rsp_len = scan_rsp_len;
1817 adv->scan_rsp_changed = true;
1820 /* Mark as changed if there are flags which would affect it */
1821 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1822 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1823 adv->scan_rsp_changed = true;
1828 /* This function requires the caller holds hdev->lock */
1829 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1832 struct adv_info *adv;
1834 if (instance == 0x00) {
1835 /* Instance 0 always manages the "Tx Power" and "Flags"
1838 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1840 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1841 * corresponds to the "connectable" instance flag.
1843 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1844 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1846 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1847 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1848 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1849 flags |= MGMT_ADV_FLAG_DISCOV;
1854 adv = hci_find_adv_instance(hdev, instance);
1856 /* Return 0 when we got an invalid instance identifier. */
1863 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1865 struct adv_info *adv;
1867 /* Instance 0x00 always set local name */
1868 if (instance == 0x00)
1871 adv = hci_find_adv_instance(hdev, instance);
1875 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1876 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1879 return adv->scan_rsp_len ? true : false;
1882 /* This function requires the caller holds hdev->lock */
1883 void hci_adv_monitors_clear(struct hci_dev *hdev)
1885 struct adv_monitor *monitor;
1888 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1889 hci_free_adv_monitor(hdev, monitor);
1891 idr_destroy(&hdev->adv_monitors_idr);
1894 /* Frees the monitor structure and do some bookkeepings.
1895 * This function requires the caller holds hdev->lock.
1897 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1899 struct adv_pattern *pattern;
1900 struct adv_pattern *tmp;
1905 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1906 list_del(&pattern->list);
1910 if (monitor->handle)
1911 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1913 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1914 hdev->adv_monitors_cnt--;
1915 mgmt_adv_monitor_removed(hdev, monitor->handle);
1921 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1922 * also attempts to forward the request to the controller.
1923 * This function requires the caller holds hci_req_sync_lock.
1925 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1927 int min, max, handle;
1935 min = HCI_MIN_ADV_MONITOR_HANDLE;
1936 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1937 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1940 hci_dev_unlock(hdev);
1945 monitor->handle = handle;
1947 if (!hdev_is_powered(hdev))
1950 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1951 case HCI_ADV_MONITOR_EXT_NONE:
1952 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1953 monitor->handle, status);
1954 /* Message was not forwarded to controller - not an error */
1957 case HCI_ADV_MONITOR_EXT_MSFT:
1958 status = msft_add_monitor_pattern(hdev, monitor);
1959 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1960 monitor->handle, status);
1967 /* Attempts to tell the controller and free the monitor. If somehow the
1968 * controller doesn't have a corresponding handle, remove anyway.
1969 * This function requires the caller holds hci_req_sync_lock.
1971 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1972 struct adv_monitor *monitor)
1976 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1977 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1978 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1979 monitor->handle, status);
1982 case HCI_ADV_MONITOR_EXT_MSFT:
1983 status = msft_remove_monitor(hdev, monitor);
1984 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1985 hdev->name, monitor->handle, status);
1989 /* In case no matching handle registered, just free the monitor */
1990 if (status == -ENOENT)
1996 if (status == -ENOENT)
1997 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1999 hci_free_adv_monitor(hdev, monitor);
2004 /* This function requires the caller holds hci_req_sync_lock */
2005 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2007 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2012 return hci_remove_adv_monitor(hdev, monitor);
2015 /* This function requires the caller holds hci_req_sync_lock */
2016 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2018 struct adv_monitor *monitor;
2019 int idr_next_id = 0;
2023 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2027 status = hci_remove_adv_monitor(hdev, monitor);
2037 /* This function requires the caller holds hdev->lock */
2038 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2040 return !idr_is_empty(&hdev->adv_monitors_idr);
2043 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2045 if (msft_monitor_supported(hdev))
2046 return HCI_ADV_MONITOR_EXT_MSFT;
2048 return HCI_ADV_MONITOR_EXT_NONE;
2051 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2052 bdaddr_t *bdaddr, u8 type)
2054 struct bdaddr_list *b;
2056 list_for_each_entry(b, bdaddr_list, list) {
2057 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2064 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2065 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2068 struct bdaddr_list_with_irk *b;
2070 list_for_each_entry(b, bdaddr_list, list) {
2071 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2078 struct bdaddr_list_with_flags *
2079 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2080 bdaddr_t *bdaddr, u8 type)
2082 struct bdaddr_list_with_flags *b;
2084 list_for_each_entry(b, bdaddr_list, list) {
2085 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2092 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2094 struct bdaddr_list *b, *n;
2096 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2102 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2104 struct bdaddr_list *entry;
2106 if (!bacmp(bdaddr, BDADDR_ANY))
2109 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2112 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2116 bacpy(&entry->bdaddr, bdaddr);
2117 entry->bdaddr_type = type;
2119 list_add(&entry->list, list);
2124 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2125 u8 type, u8 *peer_irk, u8 *local_irk)
2127 struct bdaddr_list_with_irk *entry;
2129 if (!bacmp(bdaddr, BDADDR_ANY))
2132 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2135 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2139 bacpy(&entry->bdaddr, bdaddr);
2140 entry->bdaddr_type = type;
2143 memcpy(entry->peer_irk, peer_irk, 16);
2146 memcpy(entry->local_irk, local_irk, 16);
2148 list_add(&entry->list, list);
2153 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2156 struct bdaddr_list_with_flags *entry;
2158 if (!bacmp(bdaddr, BDADDR_ANY))
2161 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2164 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2168 bacpy(&entry->bdaddr, bdaddr);
2169 entry->bdaddr_type = type;
2170 entry->flags = flags;
2172 list_add(&entry->list, list);
2177 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2179 struct bdaddr_list *entry;
2181 if (!bacmp(bdaddr, BDADDR_ANY)) {
2182 hci_bdaddr_list_clear(list);
2186 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2190 list_del(&entry->list);
2196 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2199 struct bdaddr_list_with_irk *entry;
2201 if (!bacmp(bdaddr, BDADDR_ANY)) {
2202 hci_bdaddr_list_clear(list);
2206 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2210 list_del(&entry->list);
2216 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2219 struct bdaddr_list_with_flags *entry;
2221 if (!bacmp(bdaddr, BDADDR_ANY)) {
2222 hci_bdaddr_list_clear(list);
2226 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2230 list_del(&entry->list);
2236 /* This function requires the caller holds hdev->lock */
2237 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2238 bdaddr_t *addr, u8 addr_type)
2240 struct hci_conn_params *params;
2242 list_for_each_entry(params, &hdev->le_conn_params, list) {
2243 if (bacmp(¶ms->addr, addr) == 0 &&
2244 params->addr_type == addr_type) {
2252 /* This function requires the caller holds hdev->lock */
2253 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2254 bdaddr_t *addr, u8 addr_type)
2256 struct hci_conn_params *param;
2258 list_for_each_entry(param, list, action) {
2259 if (bacmp(¶m->addr, addr) == 0 &&
2260 param->addr_type == addr_type)
2267 /* This function requires the caller holds hdev->lock */
2268 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2269 bdaddr_t *addr, u8 addr_type)
2271 struct hci_conn_params *params;
2273 params = hci_conn_params_lookup(hdev, addr, addr_type);
2277 params = kzalloc(sizeof(*params), GFP_KERNEL);
2279 bt_dev_err(hdev, "out of memory");
2283 bacpy(¶ms->addr, addr);
2284 params->addr_type = addr_type;
2286 list_add(¶ms->list, &hdev->le_conn_params);
2287 INIT_LIST_HEAD(¶ms->action);
2289 params->conn_min_interval = hdev->le_conn_min_interval;
2290 params->conn_max_interval = hdev->le_conn_max_interval;
2291 params->conn_latency = hdev->le_conn_latency;
2292 params->supervision_timeout = hdev->le_supv_timeout;
2293 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2295 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2300 static void hci_conn_params_free(struct hci_conn_params *params)
2303 hci_conn_drop(params->conn);
2304 hci_conn_put(params->conn);
2307 list_del(¶ms->action);
2308 list_del(¶ms->list);
2312 /* This function requires the caller holds hdev->lock */
2313 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2315 struct hci_conn_params *params;
2317 params = hci_conn_params_lookup(hdev, addr, addr_type);
2321 hci_conn_params_free(params);
2323 hci_update_passive_scan(hdev);
2325 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2328 /* This function requires the caller holds hdev->lock */
2329 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2331 struct hci_conn_params *params, *tmp;
2333 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2334 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2337 /* If trying to establish one time connection to disabled
2338 * device, leave the params, but mark them as just once.
2340 if (params->explicit_connect) {
2341 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2345 list_del(¶ms->list);
2349 BT_DBG("All LE disabled connection parameters were removed");
2352 /* This function requires the caller holds hdev->lock */
2353 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2355 struct hci_conn_params *params, *tmp;
2357 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2358 hci_conn_params_free(params);
2360 BT_DBG("All LE connection parameters were removed");
2363 /* Copy the Identity Address of the controller.
2365 * If the controller has a public BD_ADDR, then by default use that one.
2366 * If this is a LE only controller without a public address, default to
2367 * the static random address.
2369 * For debugging purposes it is possible to force controllers with a
2370 * public address to use the static random address instead.
2372 * In case BR/EDR has been disabled on a dual-mode controller and
2373 * userspace has configured a static address, then that address
2374 * becomes the identity address instead of the public BR/EDR address.
2376 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2379 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2380 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2381 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2382 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2383 bacpy(bdaddr, &hdev->static_addr);
2384 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2386 bacpy(bdaddr, &hdev->bdaddr);
2387 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2391 static void hci_clear_wake_reason(struct hci_dev *hdev)
2395 hdev->wake_reason = 0;
2396 bacpy(&hdev->wake_addr, BDADDR_ANY);
2397 hdev->wake_addr_type = 0;
2399 hci_dev_unlock(hdev);
2402 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2405 struct hci_dev *hdev =
2406 container_of(nb, struct hci_dev, suspend_notifier);
2409 /* Userspace has full control of this device. Do nothing. */
2410 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2413 if (action == PM_SUSPEND_PREPARE)
2414 ret = hci_suspend_dev(hdev);
2415 else if (action == PM_POST_SUSPEND)
2416 ret = hci_resume_dev(hdev);
2419 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2425 /* Alloc HCI device */
2426 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2428 struct hci_dev *hdev;
2429 unsigned int alloc_size;
2431 alloc_size = sizeof(*hdev);
2433 /* Fixme: May need ALIGN-ment? */
2434 alloc_size += sizeof_priv;
2437 hdev = kzalloc(alloc_size, GFP_KERNEL);
2441 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2442 hdev->esco_type = (ESCO_HV1);
2443 hdev->link_mode = (HCI_LM_ACCEPT);
2444 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2445 hdev->io_capability = 0x03; /* No Input No Output */
2446 hdev->manufacturer = 0xffff; /* Default to internal use */
2447 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2448 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2449 hdev->adv_instance_cnt = 0;
2450 hdev->cur_adv_instance = 0x00;
2451 hdev->adv_instance_timeout = 0;
2453 hdev->advmon_allowlist_duration = 300;
2454 hdev->advmon_no_filter_duration = 500;
2455 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2457 hdev->sniff_max_interval = 800;
2458 hdev->sniff_min_interval = 80;
2460 hdev->le_adv_channel_map = 0x07;
2461 hdev->le_adv_min_interval = 0x0800;
2462 hdev->le_adv_max_interval = 0x0800;
2463 hdev->le_scan_interval = 0x0060;
2464 hdev->le_scan_window = 0x0030;
2465 hdev->le_scan_int_suspend = 0x0400;
2466 hdev->le_scan_window_suspend = 0x0012;
2467 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2468 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2469 hdev->le_scan_int_adv_monitor = 0x0060;
2470 hdev->le_scan_window_adv_monitor = 0x0030;
2471 hdev->le_scan_int_connect = 0x0060;
2472 hdev->le_scan_window_connect = 0x0060;
2473 hdev->le_conn_min_interval = 0x0018;
2474 hdev->le_conn_max_interval = 0x0028;
2475 hdev->le_conn_latency = 0x0000;
2476 hdev->le_supv_timeout = 0x002a;
2477 hdev->le_def_tx_len = 0x001b;
2478 hdev->le_def_tx_time = 0x0148;
2479 hdev->le_max_tx_len = 0x001b;
2480 hdev->le_max_tx_time = 0x0148;
2481 hdev->le_max_rx_len = 0x001b;
2482 hdev->le_max_rx_time = 0x0148;
2483 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2484 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2485 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2486 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2487 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2488 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2489 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2490 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2491 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2493 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2494 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2495 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2496 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2497 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2498 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2500 /* default 1.28 sec page scan */
2501 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2502 hdev->def_page_scan_int = 0x0800;
2503 hdev->def_page_scan_window = 0x0012;
2505 mutex_init(&hdev->lock);
2506 mutex_init(&hdev->req_lock);
2508 INIT_LIST_HEAD(&hdev->mesh_pending);
2509 INIT_LIST_HEAD(&hdev->mgmt_pending);
2510 INIT_LIST_HEAD(&hdev->reject_list);
2511 INIT_LIST_HEAD(&hdev->accept_list);
2512 INIT_LIST_HEAD(&hdev->uuids);
2513 INIT_LIST_HEAD(&hdev->link_keys);
2514 INIT_LIST_HEAD(&hdev->long_term_keys);
2515 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2516 INIT_LIST_HEAD(&hdev->remote_oob_data);
2517 INIT_LIST_HEAD(&hdev->le_accept_list);
2518 INIT_LIST_HEAD(&hdev->le_resolv_list);
2519 INIT_LIST_HEAD(&hdev->le_conn_params);
2520 INIT_LIST_HEAD(&hdev->pend_le_conns);
2521 INIT_LIST_HEAD(&hdev->pend_le_reports);
2522 INIT_LIST_HEAD(&hdev->conn_hash.list);
2523 INIT_LIST_HEAD(&hdev->adv_instances);
2524 INIT_LIST_HEAD(&hdev->blocked_keys);
2525 INIT_LIST_HEAD(&hdev->monitored_devices);
2527 INIT_LIST_HEAD(&hdev->local_codecs);
2528 INIT_WORK(&hdev->rx_work, hci_rx_work);
2529 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2530 INIT_WORK(&hdev->tx_work, hci_tx_work);
2531 INIT_WORK(&hdev->power_on, hci_power_on);
2532 INIT_WORK(&hdev->error_reset, hci_error_reset);
2534 hci_cmd_sync_init(hdev);
2536 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2538 skb_queue_head_init(&hdev->rx_q);
2539 skb_queue_head_init(&hdev->cmd_q);
2540 skb_queue_head_init(&hdev->raw_q);
2542 init_waitqueue_head(&hdev->req_wait_q);
2544 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2545 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2547 hci_devcd_setup(hdev);
2548 hci_request_setup(hdev);
2550 hci_init_sysfs(hdev);
2551 discovery_init(hdev);
2555 EXPORT_SYMBOL(hci_alloc_dev_priv);
2557 /* Free HCI device */
2558 void hci_free_dev(struct hci_dev *hdev)
2560 /* will free via device release */
2561 put_device(&hdev->dev);
2563 EXPORT_SYMBOL(hci_free_dev);
2565 /* Register HCI device */
2566 int hci_register_dev(struct hci_dev *hdev)
2570 if (!hdev->open || !hdev->close || !hdev->send)
2573 /* Do not allow HCI_AMP devices to register at index 0,
2574 * so the index can be used as the AMP controller ID.
2576 switch (hdev->dev_type) {
2578 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2581 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2590 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2593 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2595 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2596 if (!hdev->workqueue) {
2601 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2603 if (!hdev->req_workqueue) {
2604 destroy_workqueue(hdev->workqueue);
2609 if (!IS_ERR_OR_NULL(bt_debugfs))
2610 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2612 dev_set_name(&hdev->dev, "%s", hdev->name);
2614 error = device_add(&hdev->dev);
2618 hci_leds_init(hdev);
2620 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2621 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2624 if (rfkill_register(hdev->rfkill) < 0) {
2625 rfkill_destroy(hdev->rfkill);
2626 hdev->rfkill = NULL;
2630 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2631 hci_dev_set_flag(hdev, HCI_RFKILLED);
2633 hci_dev_set_flag(hdev, HCI_SETUP);
2634 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2636 if (hdev->dev_type == HCI_PRIMARY) {
2637 /* Assume BR/EDR support until proven otherwise (such as
2638 * through reading supported features during init.
2640 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2643 write_lock(&hci_dev_list_lock);
2644 list_add(&hdev->list, &hci_dev_list);
2645 write_unlock(&hci_dev_list_lock);
2647 /* Devices that are marked for raw-only usage are unconfigured
2648 * and should not be included in normal operation.
2650 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2651 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2653 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2657 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2659 hci_sock_dev_event(hdev, HCI_DEV_REG);
2662 error = hci_register_suspend_notifier(hdev);
2664 BT_WARN("register suspend notifier failed error:%d\n", error);
2666 queue_work(hdev->req_workqueue, &hdev->power_on);
2668 idr_init(&hdev->adv_monitors_idr);
2669 msft_register(hdev);
2674 debugfs_remove_recursive(hdev->debugfs);
2675 destroy_workqueue(hdev->workqueue);
2676 destroy_workqueue(hdev->req_workqueue);
2678 ida_simple_remove(&hci_index_ida, hdev->id);
2682 EXPORT_SYMBOL(hci_register_dev);
2684 /* Unregister HCI device */
2685 void hci_unregister_dev(struct hci_dev *hdev)
2687 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2689 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2691 write_lock(&hci_dev_list_lock);
2692 list_del(&hdev->list);
2693 write_unlock(&hci_dev_list_lock);
2695 cancel_work_sync(&hdev->power_on);
2697 hci_cmd_sync_clear(hdev);
2699 hci_unregister_suspend_notifier(hdev);
2701 msft_unregister(hdev);
2703 hci_dev_do_close(hdev);
2705 if (!test_bit(HCI_INIT, &hdev->flags) &&
2706 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2707 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2709 mgmt_index_removed(hdev);
2710 hci_dev_unlock(hdev);
2713 /* mgmt_index_removed should take care of emptying the
2715 BUG_ON(!list_empty(&hdev->mgmt_pending));
2717 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2720 rfkill_unregister(hdev->rfkill);
2721 rfkill_destroy(hdev->rfkill);
2724 device_del(&hdev->dev);
2725 /* Actual cleanup is deferred until hci_release_dev(). */
2728 EXPORT_SYMBOL(hci_unregister_dev);
2730 /* Release HCI device */
2731 void hci_release_dev(struct hci_dev *hdev)
2733 debugfs_remove_recursive(hdev->debugfs);
2734 kfree_const(hdev->hw_info);
2735 kfree_const(hdev->fw_info);
2737 destroy_workqueue(hdev->workqueue);
2738 destroy_workqueue(hdev->req_workqueue);
2741 hci_bdaddr_list_clear(&hdev->reject_list);
2742 hci_bdaddr_list_clear(&hdev->accept_list);
2743 hci_uuids_clear(hdev);
2744 hci_link_keys_clear(hdev);
2745 hci_smp_ltks_clear(hdev);
2746 hci_smp_irks_clear(hdev);
2747 hci_remote_oob_data_clear(hdev);
2748 hci_adv_instances_clear(hdev);
2749 hci_adv_monitors_clear(hdev);
2750 hci_bdaddr_list_clear(&hdev->le_accept_list);
2751 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2752 hci_conn_params_clear_all(hdev);
2753 hci_discovery_filter_clear(hdev);
2754 hci_blocked_keys_clear(hdev);
2755 hci_dev_unlock(hdev);
2757 ida_simple_remove(&hci_index_ida, hdev->id);
2758 kfree_skb(hdev->sent_cmd);
2759 kfree_skb(hdev->recv_event);
2762 EXPORT_SYMBOL(hci_release_dev);
2764 int hci_register_suspend_notifier(struct hci_dev *hdev)
2768 if (!hdev->suspend_notifier.notifier_call &&
2769 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2770 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2771 ret = register_pm_notifier(&hdev->suspend_notifier);
2777 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2781 if (hdev->suspend_notifier.notifier_call) {
2782 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2784 hdev->suspend_notifier.notifier_call = NULL;
2790 /* Suspend HCI device */
2791 int hci_suspend_dev(struct hci_dev *hdev)
2795 bt_dev_dbg(hdev, "");
2797 /* Suspend should only act on when powered. */
2798 if (!hdev_is_powered(hdev) ||
2799 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2802 /* If powering down don't attempt to suspend */
2803 if (mgmt_powering_down(hdev))
2806 /* Cancel potentially blocking sync operation before suspend */
2807 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2809 hci_req_sync_lock(hdev);
2810 ret = hci_suspend_sync(hdev);
2811 hci_req_sync_unlock(hdev);
2813 hci_clear_wake_reason(hdev);
2814 mgmt_suspending(hdev, hdev->suspend_state);
2816 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2819 EXPORT_SYMBOL(hci_suspend_dev);
2821 /* Resume HCI device */
2822 int hci_resume_dev(struct hci_dev *hdev)
2826 bt_dev_dbg(hdev, "");
2828 /* Resume should only act on when powered. */
2829 if (!hdev_is_powered(hdev) ||
2830 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2833 /* If powering down don't attempt to resume */
2834 if (mgmt_powering_down(hdev))
2837 hci_req_sync_lock(hdev);
2838 ret = hci_resume_sync(hdev);
2839 hci_req_sync_unlock(hdev);
2841 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2842 hdev->wake_addr_type);
2844 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2847 EXPORT_SYMBOL(hci_resume_dev);
2849 /* Reset HCI device */
2850 int hci_reset_dev(struct hci_dev *hdev)
2852 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2853 struct sk_buff *skb;
2855 skb = bt_skb_alloc(3, GFP_ATOMIC);
2859 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2860 skb_put_data(skb, hw_err, 3);
2862 bt_dev_err(hdev, "Injecting HCI hardware error event");
2864 /* Send Hardware Error to upper stack */
2865 return hci_recv_frame(hdev, skb);
2867 EXPORT_SYMBOL(hci_reset_dev);
2869 /* Receive frame from HCI drivers */
2870 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2872 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2873 && !test_bit(HCI_INIT, &hdev->flags))) {
2878 switch (hci_skb_pkt_type(skb)) {
2881 case HCI_ACLDATA_PKT:
2882 /* Detect if ISO packet has been sent as ACL */
2883 if (hci_conn_num(hdev, ISO_LINK)) {
2884 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2887 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2888 if (type == ISO_LINK)
2889 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2892 case HCI_SCODATA_PKT:
2894 case HCI_ISODATA_PKT:
2902 bt_cb(skb)->incoming = 1;
2905 __net_timestamp(skb);
2907 skb_queue_tail(&hdev->rx_q, skb);
2908 queue_work(hdev->workqueue, &hdev->rx_work);
2912 EXPORT_SYMBOL(hci_recv_frame);
2914 /* Receive diagnostic message from HCI drivers */
2915 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2917 /* Mark as diagnostic packet */
2918 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2921 __net_timestamp(skb);
2923 skb_queue_tail(&hdev->rx_q, skb);
2924 queue_work(hdev->workqueue, &hdev->rx_work);
2928 EXPORT_SYMBOL(hci_recv_diag);
2930 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2934 va_start(vargs, fmt);
2935 kfree_const(hdev->hw_info);
2936 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2939 EXPORT_SYMBOL(hci_set_hw_info);
2941 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2945 va_start(vargs, fmt);
2946 kfree_const(hdev->fw_info);
2947 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2950 EXPORT_SYMBOL(hci_set_fw_info);
2952 /* ---- Interface to upper protocols ---- */
2954 int hci_register_cb(struct hci_cb *cb)
2956 BT_DBG("%p name %s", cb, cb->name);
2958 mutex_lock(&hci_cb_list_lock);
2959 list_add_tail(&cb->list, &hci_cb_list);
2960 mutex_unlock(&hci_cb_list_lock);
2964 EXPORT_SYMBOL(hci_register_cb);
2966 int hci_unregister_cb(struct hci_cb *cb)
2968 BT_DBG("%p name %s", cb, cb->name);
2970 mutex_lock(&hci_cb_list_lock);
2971 list_del(&cb->list);
2972 mutex_unlock(&hci_cb_list_lock);
2976 EXPORT_SYMBOL(hci_unregister_cb);
2978 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2982 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2986 __net_timestamp(skb);
2988 /* Send copy to monitor */
2989 hci_send_to_monitor(hdev, skb);
2991 if (atomic_read(&hdev->promisc)) {
2992 /* Send copy to the sockets */
2993 hci_send_to_sock(hdev, skb);
2996 /* Get rid of skb owner, prior to sending to the driver. */
2999 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3004 err = hdev->send(hdev, skb);
3006 bt_dev_err(hdev, "sending frame failed (%d)", err);
3014 /* Send HCI command */
3015 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3018 struct sk_buff *skb;
3020 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3022 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3024 bt_dev_err(hdev, "no memory for command");
3028 /* Stand-alone HCI commands must be flagged as
3029 * single-command requests.
3031 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3033 skb_queue_tail(&hdev->cmd_q, skb);
3034 queue_work(hdev->workqueue, &hdev->cmd_work);
3039 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3042 struct sk_buff *skb;
3044 if (hci_opcode_ogf(opcode) != 0x3f) {
3045 /* A controller receiving a command shall respond with either
3046 * a Command Status Event or a Command Complete Event.
3047 * Therefore, all standard HCI commands must be sent via the
3048 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3049 * Some vendors do not comply with this rule for vendor-specific
3050 * commands and do not return any event. We want to support
3051 * unresponded commands for such cases only.
3053 bt_dev_err(hdev, "unresponded command not supported");
3057 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3059 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3064 hci_send_frame(hdev, skb);
3068 EXPORT_SYMBOL(__hci_cmd_send);
3070 /* Get data from the previously sent command */
3071 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3073 struct hci_command_hdr *hdr;
3075 if (!hdev->sent_cmd)
3078 hdr = (void *) hdev->sent_cmd->data;
3080 if (hdr->opcode != cpu_to_le16(opcode))
3083 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3085 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3088 /* Get data from last received event */
3089 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3091 struct hci_event_hdr *hdr;
3094 if (!hdev->recv_event)
3097 hdr = (void *)hdev->recv_event->data;
3098 offset = sizeof(*hdr);
3100 if (hdr->evt != event) {
3101 /* In case of LE metaevent check the subevent match */
3102 if (hdr->evt == HCI_EV_LE_META) {
3103 struct hci_ev_le_meta *ev;
3105 ev = (void *)hdev->recv_event->data + offset;
3106 offset += sizeof(*ev);
3107 if (ev->subevent == event)
3114 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3116 return hdev->recv_event->data + offset;
3120 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3122 struct hci_acl_hdr *hdr;
3125 skb_push(skb, HCI_ACL_HDR_SIZE);
3126 skb_reset_transport_header(skb);
3127 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3128 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3129 hdr->dlen = cpu_to_le16(len);
3132 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3133 struct sk_buff *skb, __u16 flags)
3135 struct hci_conn *conn = chan->conn;
3136 struct hci_dev *hdev = conn->hdev;
3137 struct sk_buff *list;
3139 skb->len = skb_headlen(skb);
3142 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3144 switch (hdev->dev_type) {
3146 hci_add_acl_hdr(skb, conn->handle, flags);
3149 hci_add_acl_hdr(skb, chan->handle, flags);
3152 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3156 list = skb_shinfo(skb)->frag_list;
3158 /* Non fragmented */
3159 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3161 skb_queue_tail(queue, skb);
3164 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3166 skb_shinfo(skb)->frag_list = NULL;
3168 /* Queue all fragments atomically. We need to use spin_lock_bh
3169 * here because of 6LoWPAN links, as there this function is
3170 * called from softirq and using normal spin lock could cause
3173 spin_lock_bh(&queue->lock);
3175 __skb_queue_tail(queue, skb);
3177 flags &= ~ACL_START;
3180 skb = list; list = list->next;
3182 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3183 hci_add_acl_hdr(skb, conn->handle, flags);
3185 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3187 __skb_queue_tail(queue, skb);
3190 spin_unlock_bh(&queue->lock);
3194 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3196 struct hci_dev *hdev = chan->conn->hdev;
3198 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3200 hci_queue_acl(chan, &chan->data_q, skb, flags);
3202 queue_work(hdev->workqueue, &hdev->tx_work);
3206 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3208 struct hci_dev *hdev = conn->hdev;
3209 struct hci_sco_hdr hdr;
3211 BT_DBG("%s len %d", hdev->name, skb->len);
3213 hdr.handle = cpu_to_le16(conn->handle);
3214 hdr.dlen = skb->len;
3216 skb_push(skb, HCI_SCO_HDR_SIZE);
3217 skb_reset_transport_header(skb);
3218 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3220 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3222 skb_queue_tail(&conn->data_q, skb);
3223 queue_work(hdev->workqueue, &hdev->tx_work);
3227 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3229 struct hci_iso_hdr *hdr;
3232 skb_push(skb, HCI_ISO_HDR_SIZE);
3233 skb_reset_transport_header(skb);
3234 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3235 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3236 hdr->dlen = cpu_to_le16(len);
3239 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3240 struct sk_buff *skb)
3242 struct hci_dev *hdev = conn->hdev;
3243 struct sk_buff *list;
3246 skb->len = skb_headlen(skb);
3249 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3251 list = skb_shinfo(skb)->frag_list;
3253 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3254 hci_add_iso_hdr(skb, conn->handle, flags);
3257 /* Non fragmented */
3258 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3260 skb_queue_tail(queue, skb);
3263 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3265 skb_shinfo(skb)->frag_list = NULL;
3267 __skb_queue_tail(queue, skb);
3270 skb = list; list = list->next;
3272 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3273 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3275 hci_add_iso_hdr(skb, conn->handle, flags);
3277 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3279 __skb_queue_tail(queue, skb);
3284 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3286 struct hci_dev *hdev = conn->hdev;
3288 BT_DBG("%s len %d", hdev->name, skb->len);
3290 hci_queue_iso(conn, &conn->data_q, skb);
3292 queue_work(hdev->workqueue, &hdev->tx_work);
3295 /* ---- HCI TX task (outgoing data) ---- */
3297 /* HCI Connection scheduler */
3298 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3300 struct hci_dev *hdev;
3310 switch (conn->type) {
3312 cnt = hdev->acl_cnt;
3315 cnt = hdev->block_cnt;
3319 cnt = hdev->sco_cnt;
3322 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3325 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3326 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3330 bt_dev_err(hdev, "unknown link type %d", conn->type);
3337 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3340 struct hci_conn_hash *h = &hdev->conn_hash;
3341 struct hci_conn *conn = NULL, *c;
3342 unsigned int num = 0, min = ~0;
3344 /* We don't have to lock device here. Connections are always
3345 * added and removed with TX task disabled. */
3349 list_for_each_entry_rcu(c, &h->list, list) {
3350 if (c->type != type || skb_queue_empty(&c->data_q))
3353 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3358 if (c->sent < min) {
3363 if (hci_conn_num(hdev, type) == num)
3369 hci_quote_sent(conn, num, quote);
3371 BT_DBG("conn %p quote %d", conn, *quote);
3375 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3377 struct hci_conn_hash *h = &hdev->conn_hash;
3380 bt_dev_err(hdev, "link tx timeout");
3384 /* Kill stalled connections */
3385 list_for_each_entry_rcu(c, &h->list, list) {
3386 if (c->type == type && c->sent) {
3387 bt_dev_err(hdev, "killing stalled connection %pMR",
3389 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3396 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3399 struct hci_conn_hash *h = &hdev->conn_hash;
3400 struct hci_chan *chan = NULL;
3401 unsigned int num = 0, min = ~0, cur_prio = 0;
3402 struct hci_conn *conn;
3405 BT_DBG("%s", hdev->name);
3409 list_for_each_entry_rcu(conn, &h->list, list) {
3410 struct hci_chan *tmp;
3412 if (conn->type != type)
3415 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3420 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3421 struct sk_buff *skb;
3423 if (skb_queue_empty(&tmp->data_q))
3426 skb = skb_peek(&tmp->data_q);
3427 if (skb->priority < cur_prio)
3430 if (skb->priority > cur_prio) {
3433 cur_prio = skb->priority;
3438 if (conn->sent < min) {
3444 if (hci_conn_num(hdev, type) == conn_num)
3453 hci_quote_sent(chan->conn, num, quote);
3455 BT_DBG("chan %p quote %d", chan, *quote);
3459 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3461 struct hci_conn_hash *h = &hdev->conn_hash;
3462 struct hci_conn *conn;
3465 BT_DBG("%s", hdev->name);
3469 list_for_each_entry_rcu(conn, &h->list, list) {
3470 struct hci_chan *chan;
3472 if (conn->type != type)
3475 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3480 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3481 struct sk_buff *skb;
3488 if (skb_queue_empty(&chan->data_q))
3491 skb = skb_peek(&chan->data_q);
3492 if (skb->priority >= HCI_PRIO_MAX - 1)
3495 skb->priority = HCI_PRIO_MAX - 1;
3497 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3501 if (hci_conn_num(hdev, type) == num)
3509 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3511 /* Calculate count of blocks used by this packet */
3512 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3515 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3517 unsigned long last_tx;
3519 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3524 last_tx = hdev->le_last_tx;
3527 last_tx = hdev->acl_last_tx;
3531 /* tx timeout must be longer than maximum link supervision timeout
3534 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3535 hci_link_tx_to(hdev, type);
3539 static void hci_sched_sco(struct hci_dev *hdev)
3541 struct hci_conn *conn;
3542 struct sk_buff *skb;
3545 BT_DBG("%s", hdev->name);
3547 if (!hci_conn_num(hdev, SCO_LINK))
3550 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3551 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3552 BT_DBG("skb %p len %d", skb, skb->len);
3553 hci_send_frame(hdev, skb);
3556 if (conn->sent == ~0)
3562 static void hci_sched_esco(struct hci_dev *hdev)
3564 struct hci_conn *conn;
3565 struct sk_buff *skb;
3568 BT_DBG("%s", hdev->name);
3570 if (!hci_conn_num(hdev, ESCO_LINK))
3573 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3575 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3576 BT_DBG("skb %p len %d", skb, skb->len);
3577 hci_send_frame(hdev, skb);
3580 if (conn->sent == ~0)
3586 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3588 unsigned int cnt = hdev->acl_cnt;
3589 struct hci_chan *chan;
3590 struct sk_buff *skb;
3593 __check_timeout(hdev, cnt, ACL_LINK);
3595 while (hdev->acl_cnt &&
3596 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3597 u32 priority = (skb_peek(&chan->data_q))->priority;
3598 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3599 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3600 skb->len, skb->priority);
3602 /* Stop if priority has changed */
3603 if (skb->priority < priority)
3606 skb = skb_dequeue(&chan->data_q);
3608 hci_conn_enter_active_mode(chan->conn,
3609 bt_cb(skb)->force_active);
3611 hci_send_frame(hdev, skb);
3612 hdev->acl_last_tx = jiffies;
3618 /* Send pending SCO packets right away */
3619 hci_sched_sco(hdev);
3620 hci_sched_esco(hdev);
3624 if (cnt != hdev->acl_cnt)
3625 hci_prio_recalculate(hdev, ACL_LINK);
3628 static void hci_sched_acl_blk(struct hci_dev *hdev)
3630 unsigned int cnt = hdev->block_cnt;
3631 struct hci_chan *chan;
3632 struct sk_buff *skb;
3636 BT_DBG("%s", hdev->name);
3638 if (hdev->dev_type == HCI_AMP)
3643 __check_timeout(hdev, cnt, type);
3645 while (hdev->block_cnt > 0 &&
3646 (chan = hci_chan_sent(hdev, type, "e))) {
3647 u32 priority = (skb_peek(&chan->data_q))->priority;
3648 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3651 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3652 skb->len, skb->priority);
3654 /* Stop if priority has changed */
3655 if (skb->priority < priority)
3658 skb = skb_dequeue(&chan->data_q);
3660 blocks = __get_blocks(hdev, skb);
3661 if (blocks > hdev->block_cnt)
3664 hci_conn_enter_active_mode(chan->conn,
3665 bt_cb(skb)->force_active);
3667 hci_send_frame(hdev, skb);
3668 hdev->acl_last_tx = jiffies;
3670 hdev->block_cnt -= blocks;
3673 chan->sent += blocks;
3674 chan->conn->sent += blocks;
3678 if (cnt != hdev->block_cnt)
3679 hci_prio_recalculate(hdev, type);
3682 static void hci_sched_acl(struct hci_dev *hdev)
3684 BT_DBG("%s", hdev->name);
3686 /* No ACL link over BR/EDR controller */
3687 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3690 /* No AMP link over AMP controller */
3691 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3694 switch (hdev->flow_ctl_mode) {
3695 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3696 hci_sched_acl_pkt(hdev);
3699 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3700 hci_sched_acl_blk(hdev);
3705 static void hci_sched_le(struct hci_dev *hdev)
3707 struct hci_chan *chan;
3708 struct sk_buff *skb;
3709 int quote, cnt, tmp;
3711 BT_DBG("%s", hdev->name);
3713 if (!hci_conn_num(hdev, LE_LINK))
3716 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3718 __check_timeout(hdev, cnt, LE_LINK);
3721 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3722 u32 priority = (skb_peek(&chan->data_q))->priority;
3723 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3724 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3725 skb->len, skb->priority);
3727 /* Stop if priority has changed */
3728 if (skb->priority < priority)
3731 skb = skb_dequeue(&chan->data_q);
3733 hci_send_frame(hdev, skb);
3734 hdev->le_last_tx = jiffies;
3740 /* Send pending SCO packets right away */
3741 hci_sched_sco(hdev);
3742 hci_sched_esco(hdev);
3749 hdev->acl_cnt = cnt;
3752 hci_prio_recalculate(hdev, LE_LINK);
3756 static void hci_sched_iso(struct hci_dev *hdev)
3758 struct hci_conn *conn;
3759 struct sk_buff *skb;
3762 BT_DBG("%s", hdev->name);
3764 if (!hci_conn_num(hdev, ISO_LINK))
3767 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3768 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3769 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3770 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3771 BT_DBG("skb %p len %d", skb, skb->len);
3772 hci_send_frame(hdev, skb);
3775 if (conn->sent == ~0)
3782 static void hci_tx_work(struct work_struct *work)
3784 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3785 struct sk_buff *skb;
3787 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3788 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3790 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3791 /* Schedule queues and send stuff to HCI driver */
3792 hci_sched_sco(hdev);
3793 hci_sched_esco(hdev);
3794 hci_sched_iso(hdev);
3795 hci_sched_acl(hdev);
3799 /* Send next queued raw (unknown type) packet */
3800 while ((skb = skb_dequeue(&hdev->raw_q)))
3801 hci_send_frame(hdev, skb);
3804 /* ----- HCI RX task (incoming data processing) ----- */
3806 /* ACL data packet */
3807 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3809 struct hci_acl_hdr *hdr = (void *) skb->data;
3810 struct hci_conn *conn;
3811 __u16 handle, flags;
3813 skb_pull(skb, HCI_ACL_HDR_SIZE);
3815 handle = __le16_to_cpu(hdr->handle);
3816 flags = hci_flags(handle);
3817 handle = hci_handle(handle);
3819 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3822 hdev->stat.acl_rx++;
3825 conn = hci_conn_hash_lookup_handle(hdev, handle);
3826 hci_dev_unlock(hdev);
3829 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3831 /* Send to upper protocol */
3832 l2cap_recv_acldata(conn, skb, flags);
3835 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3842 /* SCO data packet */
3843 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3845 struct hci_sco_hdr *hdr = (void *) skb->data;
3846 struct hci_conn *conn;
3847 __u16 handle, flags;
3849 skb_pull(skb, HCI_SCO_HDR_SIZE);
3851 handle = __le16_to_cpu(hdr->handle);
3852 flags = hci_flags(handle);
3853 handle = hci_handle(handle);
3855 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3858 hdev->stat.sco_rx++;
3861 conn = hci_conn_hash_lookup_handle(hdev, handle);
3862 hci_dev_unlock(hdev);
3865 /* Send to upper protocol */
3866 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3867 sco_recv_scodata(conn, skb);
3870 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3877 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3879 struct hci_iso_hdr *hdr;
3880 struct hci_conn *conn;
3881 __u16 handle, flags;
3883 hdr = skb_pull_data(skb, sizeof(*hdr));
3885 bt_dev_err(hdev, "ISO packet too small");
3889 handle = __le16_to_cpu(hdr->handle);
3890 flags = hci_flags(handle);
3891 handle = hci_handle(handle);
3893 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3897 conn = hci_conn_hash_lookup_handle(hdev, handle);
3898 hci_dev_unlock(hdev);
3901 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3906 /* Send to upper protocol */
3907 iso_recv(conn, skb, flags);
3914 static bool hci_req_is_complete(struct hci_dev *hdev)
3916 struct sk_buff *skb;
3918 skb = skb_peek(&hdev->cmd_q);
3922 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3925 static void hci_resend_last(struct hci_dev *hdev)
3927 struct hci_command_hdr *sent;
3928 struct sk_buff *skb;
3931 if (!hdev->sent_cmd)
3934 sent = (void *) hdev->sent_cmd->data;
3935 opcode = __le16_to_cpu(sent->opcode);
3936 if (opcode == HCI_OP_RESET)
3939 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3943 skb_queue_head(&hdev->cmd_q, skb);
3944 queue_work(hdev->workqueue, &hdev->cmd_work);
3947 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3948 hci_req_complete_t *req_complete,
3949 hci_req_complete_skb_t *req_complete_skb)
3951 struct sk_buff *skb;
3952 unsigned long flags;
3954 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3956 /* If the completed command doesn't match the last one that was
3957 * sent we need to do special handling of it.
3959 if (!hci_sent_cmd_data(hdev, opcode)) {
3960 /* Some CSR based controllers generate a spontaneous
3961 * reset complete event during init and any pending
3962 * command will never be completed. In such a case we
3963 * need to resend whatever was the last sent
3966 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3967 hci_resend_last(hdev);
3972 /* If we reach this point this event matches the last command sent */
3973 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3975 /* If the command succeeded and there's still more commands in
3976 * this request the request is not yet complete.
3978 if (!status && !hci_req_is_complete(hdev))
3981 /* If this was the last command in a request the complete
3982 * callback would be found in hdev->sent_cmd instead of the
3983 * command queue (hdev->cmd_q).
3985 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3986 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3990 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3991 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3995 /* Remove all pending commands belonging to this request */
3996 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3997 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3998 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3999 __skb_queue_head(&hdev->cmd_q, skb);
4003 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4004 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4006 *req_complete = bt_cb(skb)->hci.req_complete;
4007 dev_kfree_skb_irq(skb);
4009 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4012 static void hci_rx_work(struct work_struct *work)
4014 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4015 struct sk_buff *skb;
4017 BT_DBG("%s", hdev->name);
4019 /* The kcov_remote functions used for collecting packet parsing
4020 * coverage information from this background thread and associate
4021 * the coverage with the syscall's thread which originally injected
4022 * the packet. This helps fuzzing the kernel.
4024 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4025 kcov_remote_start_common(skb_get_kcov_handle(skb));
4027 /* Send copy to monitor */
4028 hci_send_to_monitor(hdev, skb);
4030 if (atomic_read(&hdev->promisc)) {
4031 /* Send copy to the sockets */
4032 hci_send_to_sock(hdev, skb);
4035 /* If the device has been opened in HCI_USER_CHANNEL,
4036 * the userspace has exclusive access to device.
4037 * When device is HCI_INIT, we still need to process
4038 * the data packets to the driver in order
4039 * to complete its setup().
4041 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4042 !test_bit(HCI_INIT, &hdev->flags)) {
4047 if (test_bit(HCI_INIT, &hdev->flags)) {
4048 /* Don't process data packets in this states. */
4049 switch (hci_skb_pkt_type(skb)) {
4050 case HCI_ACLDATA_PKT:
4051 case HCI_SCODATA_PKT:
4052 case HCI_ISODATA_PKT:
4059 switch (hci_skb_pkt_type(skb)) {
4061 BT_DBG("%s Event packet", hdev->name);
4062 hci_event_packet(hdev, skb);
4065 case HCI_ACLDATA_PKT:
4066 BT_DBG("%s ACL data packet", hdev->name);
4067 hci_acldata_packet(hdev, skb);
4070 case HCI_SCODATA_PKT:
4071 BT_DBG("%s SCO data packet", hdev->name);
4072 hci_scodata_packet(hdev, skb);
4075 case HCI_ISODATA_PKT:
4076 BT_DBG("%s ISO data packet", hdev->name);
4077 hci_isodata_packet(hdev, skb);
4087 static void hci_cmd_work(struct work_struct *work)
4089 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4090 struct sk_buff *skb;
4092 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4093 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4095 /* Send queued commands */
4096 if (atomic_read(&hdev->cmd_cnt)) {
4097 skb = skb_dequeue(&hdev->cmd_q);
4101 kfree_skb(hdev->sent_cmd);
4103 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4104 if (hdev->sent_cmd) {
4106 if (hci_req_status_pend(hdev))
4107 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4108 atomic_dec(&hdev->cmd_cnt);
4110 res = hci_send_frame(hdev, skb);
4112 __hci_cmd_sync_cancel(hdev, -res);
4115 if (test_bit(HCI_RESET, &hdev->flags) ||
4116 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4117 cancel_delayed_work(&hdev->cmd_timer);
4119 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4123 skb_queue_head(&hdev->cmd_q, skb);
4124 queue_work(hdev->workqueue, &hdev->cmd_work);