2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
48 #include "hci_codec.h"
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
65 static int hci_scan_req(struct hci_request *req, unsigned long opt)
69 BT_DBG("%s %x", req->hdev->name, scan);
71 /* Inquiry and Page scans */
72 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
76 static int hci_auth_req(struct hci_request *req, unsigned long opt)
80 BT_DBG("%s %x", req->hdev->name, auth);
83 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
91 BT_DBG("%s %x", req->hdev->name, encrypt);
94 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 __le16 policy = cpu_to_le16(opt);
102 BT_DBG("%s %x", req->hdev->name, policy);
104 /* Default link policy */
105 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
109 /* Get HCI device by index.
110 * Device is held on return. */
111 struct hci_dev *hci_dev_get(int index)
113 struct hci_dev *hdev = NULL, *d;
120 read_lock(&hci_dev_list_lock);
121 list_for_each_entry(d, &hci_dev_list, list) {
122 if (d->id == index) {
123 hdev = hci_dev_hold(d);
127 read_unlock(&hci_dev_list_lock);
131 /* ---- Inquiry support ---- */
133 bool hci_discovery_active(struct hci_dev *hdev)
135 struct discovery_state *discov = &hdev->discovery;
137 switch (discov->state) {
138 case DISCOVERY_FINDING:
139 case DISCOVERY_RESOLVING:
147 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 int old_state = hdev->discovery.state;
151 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153 if (old_state == state)
156 hdev->discovery.state = state;
159 case DISCOVERY_STOPPED:
160 hci_update_passive_scan(hdev);
162 if (old_state != DISCOVERY_STARTING)
163 mgmt_discovering(hdev, 0);
165 case DISCOVERY_STARTING:
167 case DISCOVERY_FINDING:
168 mgmt_discovering(hdev, 1);
170 case DISCOVERY_RESOLVING:
172 case DISCOVERY_STOPPING:
177 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 struct discovery_state *cache = &hdev->discovery;
180 struct inquiry_entry *p, *n;
182 list_for_each_entry_safe(p, n, &cache->all, all) {
187 INIT_LIST_HEAD(&cache->unknown);
188 INIT_LIST_HEAD(&cache->resolve);
191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
194 struct discovery_state *cache = &hdev->discovery;
195 struct inquiry_entry *e;
197 BT_DBG("cache %p, %pMR", cache, bdaddr);
199 list_for_each_entry(e, &cache->all, all) {
200 if (!bacmp(&e->data.bdaddr, bdaddr))
207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
210 struct discovery_state *cache = &hdev->discovery;
211 struct inquiry_entry *e;
213 BT_DBG("cache %p, %pMR", cache, bdaddr);
215 list_for_each_entry(e, &cache->unknown, list) {
216 if (!bacmp(&e->data.bdaddr, bdaddr))
223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
230 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232 list_for_each_entry(e, &cache->resolve, list) {
233 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235 if (!bacmp(&e->data.bdaddr, bdaddr))
242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243 struct inquiry_entry *ie)
245 struct discovery_state *cache = &hdev->discovery;
246 struct list_head *pos = &cache->resolve;
247 struct inquiry_entry *p;
251 list_for_each_entry(p, &cache->resolve, list) {
252 if (p->name_state != NAME_PENDING &&
253 abs(p->data.rssi) >= abs(ie->data.rssi))
258 list_add(&ie->list, pos);
261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
264 struct discovery_state *cache = &hdev->discovery;
265 struct inquiry_entry *ie;
268 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
273 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277 if (!ie->data.ssp_mode)
278 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280 if (ie->name_state == NAME_NEEDED &&
281 data->rssi != ie->data.rssi) {
282 ie->data.rssi = data->rssi;
283 hci_inquiry_cache_update_resolve(hdev, ie);
289 /* Entry not in the cache. Add new one. */
290 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
296 list_add(&ie->all, &cache->all);
299 ie->name_state = NAME_KNOWN;
301 ie->name_state = NAME_NOT_KNOWN;
302 list_add(&ie->list, &cache->unknown);
306 if (name_known && ie->name_state != NAME_KNOWN &&
307 ie->name_state != NAME_PENDING) {
308 ie->name_state = NAME_KNOWN;
312 memcpy(&ie->data, data, sizeof(*data));
313 ie->timestamp = jiffies;
314 cache->timestamp = jiffies;
316 if (ie->name_state == NAME_NOT_KNOWN)
317 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 struct discovery_state *cache = &hdev->discovery;
326 struct inquiry_info *info = (struct inquiry_info *) buf;
327 struct inquiry_entry *e;
330 list_for_each_entry(e, &cache->all, all) {
331 struct inquiry_data *data = &e->data;
336 bacpy(&info->bdaddr, &data->bdaddr);
337 info->pscan_rep_mode = data->pscan_rep_mode;
338 info->pscan_period_mode = data->pscan_period_mode;
339 info->pscan_mode = data->pscan_mode;
340 memcpy(info->dev_class, data->dev_class, 3);
341 info->clock_offset = data->clock_offset;
347 BT_DBG("cache %p, copied %d", cache, copied);
351 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_inquiry cp;
357 BT_DBG("%s", hdev->name);
359 if (test_bit(HCI_INQUIRY, &hdev->flags))
363 memcpy(&cp.lap, &ir->lap, 3);
364 cp.length = ir->length;
365 cp.num_rsp = ir->num_rsp;
366 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
371 int hci_inquiry(void __user *arg)
373 __u8 __user *ptr = arg;
374 struct hci_inquiry_req ir;
375 struct hci_dev *hdev;
376 int err = 0, do_inquiry = 0, max_rsp;
380 if (copy_from_user(&ir, ptr, sizeof(ir)))
383 hdev = hci_dev_get(ir.dev_id);
387 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
392 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
397 if (hdev->dev_type != HCI_PRIMARY) {
402 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
407 /* Restrict maximum inquiry length to 60 seconds */
408 if (ir.length > 60) {
414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416 hci_inquiry_cache_flush(hdev);
419 hci_dev_unlock(hdev);
421 timeo = ir.length * msecs_to_jiffies(2000);
424 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
429 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430 * cleared). If it is interrupted by a signal, return -EINTR.
432 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433 TASK_INTERRUPTIBLE)) {
439 /* for unlimited number of responses we will use buffer with
442 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445 * copy it to the user space.
447 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
454 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455 hci_dev_unlock(hdev);
457 BT_DBG("num_rsp %d", ir.num_rsp);
459 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474 static int hci_dev_do_open(struct hci_dev *hdev)
478 BT_DBG("%s %p", hdev->name, hdev);
480 hci_req_sync_lock(hdev);
482 ret = hci_dev_open_sync(hdev);
484 hci_req_sync_unlock(hdev);
488 /* ---- HCI ioctl helpers ---- */
490 int hci_dev_open(__u16 dev)
492 struct hci_dev *hdev;
495 hdev = hci_dev_get(dev);
499 /* Devices that are marked as unconfigured can only be powered
500 * up as user channel. Trying to bring them up as normal devices
501 * will result into a failure. Only user channel operation is
504 * When this function is called for a user channel, the flag
505 * HCI_USER_CHANNEL will be set first before attempting to
508 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
514 /* We need to ensure that no other power on/off work is pending
515 * before proceeding to call hci_dev_do_open. This is
516 * particularly important if the setup procedure has not yet
519 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520 cancel_delayed_work(&hdev->power_off);
522 /* After this call it is guaranteed that the setup procedure
523 * has finished. This means that error conditions like RFKILL
524 * or no valid public or static random address apply.
526 flush_workqueue(hdev->req_workqueue);
528 /* For controllers not using the management interface and that
529 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530 * so that pairing works for them. Once the management interface
531 * is in use this bit will be cleared again and userspace has
532 * to explicitly enable it.
534 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535 !hci_dev_test_flag(hdev, HCI_MGMT))
536 hci_dev_set_flag(hdev, HCI_BONDABLE);
538 err = hci_dev_do_open(hdev);
545 int hci_dev_do_close(struct hci_dev *hdev)
549 BT_DBG("%s %p", hdev->name, hdev);
551 hci_req_sync_lock(hdev);
553 err = hci_dev_close_sync(hdev);
555 hci_req_sync_unlock(hdev);
560 int hci_dev_close(__u16 dev)
562 struct hci_dev *hdev;
565 hdev = hci_dev_get(dev);
569 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
574 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
575 cancel_delayed_work(&hdev->power_off);
577 err = hci_dev_do_close(hdev);
584 static int hci_dev_do_reset(struct hci_dev *hdev)
588 BT_DBG("%s %p", hdev->name, hdev);
590 hci_req_sync_lock(hdev);
593 skb_queue_purge(&hdev->rx_q);
594 skb_queue_purge(&hdev->cmd_q);
596 /* Avoid potential lockdep warnings from the *_flush() calls by
597 * ensuring the workqueue is empty up front.
599 drain_workqueue(hdev->workqueue);
602 hci_inquiry_cache_flush(hdev);
603 hci_conn_hash_flush(hdev);
604 hci_dev_unlock(hdev);
609 atomic_set(&hdev->cmd_cnt, 1);
610 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
612 ret = hci_reset_sync(hdev);
614 hci_req_sync_unlock(hdev);
618 int hci_dev_reset(__u16 dev)
620 struct hci_dev *hdev;
623 hdev = hci_dev_get(dev);
627 if (!test_bit(HCI_UP, &hdev->flags)) {
632 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
637 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
642 err = hci_dev_do_reset(hdev);
649 int hci_dev_reset_stat(__u16 dev)
651 struct hci_dev *hdev;
654 hdev = hci_dev_get(dev);
658 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
663 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
668 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
675 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
677 bool conn_changed, discov_changed;
679 BT_DBG("%s scan 0x%02x", hdev->name, scan);
681 if ((scan & SCAN_PAGE))
682 conn_changed = !hci_dev_test_and_set_flag(hdev,
685 conn_changed = hci_dev_test_and_clear_flag(hdev,
688 if ((scan & SCAN_INQUIRY)) {
689 discov_changed = !hci_dev_test_and_set_flag(hdev,
692 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
693 discov_changed = hci_dev_test_and_clear_flag(hdev,
697 if (!hci_dev_test_flag(hdev, HCI_MGMT))
700 if (conn_changed || discov_changed) {
701 /* In case this was disabled through mgmt */
702 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
704 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
705 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
707 mgmt_new_settings(hdev);
711 int hci_dev_cmd(unsigned int cmd, void __user *arg)
713 struct hci_dev *hdev;
714 struct hci_dev_req dr;
717 if (copy_from_user(&dr, arg, sizeof(dr)))
720 hdev = hci_dev_get(dr.dev_id);
724 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
729 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
734 if (hdev->dev_type != HCI_PRIMARY) {
739 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
746 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
747 HCI_INIT_TIMEOUT, NULL);
751 if (!lmp_encrypt_capable(hdev)) {
756 if (!test_bit(HCI_AUTH, &hdev->flags)) {
757 /* Auth must be enabled first */
758 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
759 HCI_INIT_TIMEOUT, NULL);
764 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
765 HCI_INIT_TIMEOUT, NULL);
769 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
770 HCI_INIT_TIMEOUT, NULL);
772 /* Ensure that the connectable and discoverable states
773 * get correctly modified as this was a non-mgmt change.
776 hci_update_passive_scan_state(hdev, dr.dev_opt);
780 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
781 HCI_INIT_TIMEOUT, NULL);
785 hdev->link_mode = ((__u16) dr.dev_opt) &
786 (HCI_LM_MASTER | HCI_LM_ACCEPT);
790 if (hdev->pkt_type == (__u16) dr.dev_opt)
793 hdev->pkt_type = (__u16) dr.dev_opt;
794 mgmt_phy_configuration_changed(hdev, NULL);
798 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
799 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
803 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
804 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
817 int hci_get_dev_list(void __user *arg)
819 struct hci_dev *hdev;
820 struct hci_dev_list_req *dl;
821 struct hci_dev_req *dr;
822 int n = 0, size, err;
825 if (get_user(dev_num, (__u16 __user *) arg))
828 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
831 size = sizeof(*dl) + dev_num * sizeof(*dr);
833 dl = kzalloc(size, GFP_KERNEL);
839 read_lock(&hci_dev_list_lock);
840 list_for_each_entry(hdev, &hci_dev_list, list) {
841 unsigned long flags = hdev->flags;
843 /* When the auto-off is configured it means the transport
844 * is running, but in that case still indicate that the
845 * device is actually down.
847 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
848 flags &= ~BIT(HCI_UP);
850 (dr + n)->dev_id = hdev->id;
851 (dr + n)->dev_opt = flags;
856 read_unlock(&hci_dev_list_lock);
859 size = sizeof(*dl) + n * sizeof(*dr);
861 err = copy_to_user(arg, dl, size);
864 return err ? -EFAULT : 0;
867 int hci_get_dev_info(void __user *arg)
869 struct hci_dev *hdev;
870 struct hci_dev_info di;
874 if (copy_from_user(&di, arg, sizeof(di)))
877 hdev = hci_dev_get(di.dev_id);
881 /* When the auto-off is configured it means the transport
882 * is running, but in that case still indicate that the
883 * device is actually down.
885 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
886 flags = hdev->flags & ~BIT(HCI_UP);
890 strcpy(di.name, hdev->name);
891 di.bdaddr = hdev->bdaddr;
892 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
894 di.pkt_type = hdev->pkt_type;
895 if (lmp_bredr_capable(hdev)) {
896 di.acl_mtu = hdev->acl_mtu;
897 di.acl_pkts = hdev->acl_pkts;
898 di.sco_mtu = hdev->sco_mtu;
899 di.sco_pkts = hdev->sco_pkts;
901 di.acl_mtu = hdev->le_mtu;
902 di.acl_pkts = hdev->le_pkts;
906 di.link_policy = hdev->link_policy;
907 di.link_mode = hdev->link_mode;
909 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910 memcpy(&di.features, &hdev->features, sizeof(di.features));
912 if (copy_to_user(arg, &di, sizeof(di)))
920 /* ---- Interface to HCI drivers ---- */
922 static int hci_rfkill_set_block(void *data, bool blocked)
924 struct hci_dev *hdev = data;
926 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
928 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
932 hci_dev_set_flag(hdev, HCI_RFKILLED);
933 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
934 !hci_dev_test_flag(hdev, HCI_CONFIG))
935 hci_dev_do_close(hdev);
937 hci_dev_clear_flag(hdev, HCI_RFKILLED);
943 static const struct rfkill_ops hci_rfkill_ops = {
944 .set_block = hci_rfkill_set_block,
947 static void hci_power_on(struct work_struct *work)
949 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
952 BT_DBG("%s", hdev->name);
954 if (test_bit(HCI_UP, &hdev->flags) &&
955 hci_dev_test_flag(hdev, HCI_MGMT) &&
956 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
957 cancel_delayed_work(&hdev->power_off);
958 err = hci_powered_update_sync(hdev);
959 mgmt_power_on(hdev, err);
963 err = hci_dev_do_open(hdev);
966 mgmt_set_powered_failed(hdev, err);
967 hci_dev_unlock(hdev);
971 /* During the HCI setup phase, a few error conditions are
972 * ignored and they need to be checked now. If they are still
973 * valid, it is important to turn the device back off.
975 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
976 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
977 (hdev->dev_type == HCI_PRIMARY &&
978 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
979 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
980 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
981 hci_dev_do_close(hdev);
982 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
983 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
984 HCI_AUTO_OFF_TIMEOUT);
987 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
988 /* For unconfigured devices, set the HCI_RAW flag
989 * so that userspace can easily identify them.
991 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
992 set_bit(HCI_RAW, &hdev->flags);
994 /* For fully configured devices, this will send
995 * the Index Added event. For unconfigured devices,
996 * it will send Unconfigued Index Added event.
998 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
999 * and no event will be send.
1001 mgmt_index_added(hdev);
1002 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1003 /* When the controller is now configured, then it
1004 * is important to clear the HCI_RAW flag.
1006 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1007 clear_bit(HCI_RAW, &hdev->flags);
1009 /* Powering on the controller with HCI_CONFIG set only
1010 * happens with the transition from unconfigured to
1011 * configured. This will send the Index Added event.
1013 mgmt_index_added(hdev);
1017 static void hci_power_off(struct work_struct *work)
1019 struct hci_dev *hdev = container_of(work, struct hci_dev,
1022 BT_DBG("%s", hdev->name);
1024 hci_dev_do_close(hdev);
1027 static void hci_error_reset(struct work_struct *work)
1029 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1031 BT_DBG("%s", hdev->name);
1034 hdev->hw_error(hdev, hdev->hw_error_code);
1036 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1038 if (hci_dev_do_close(hdev))
1041 hci_dev_do_open(hdev);
1044 void hci_uuids_clear(struct hci_dev *hdev)
1046 struct bt_uuid *uuid, *tmp;
1048 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1049 list_del(&uuid->list);
1054 void hci_link_keys_clear(struct hci_dev *hdev)
1056 struct link_key *key;
1058 list_for_each_entry(key, &hdev->link_keys, list) {
1059 list_del_rcu(&key->list);
1060 kfree_rcu(key, rcu);
1064 void hci_smp_ltks_clear(struct hci_dev *hdev)
1068 list_for_each_entry(k, &hdev->long_term_keys, list) {
1069 list_del_rcu(&k->list);
1074 void hci_smp_irks_clear(struct hci_dev *hdev)
1078 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1079 list_del_rcu(&k->list);
1084 void hci_blocked_keys_clear(struct hci_dev *hdev)
1086 struct blocked_key *b;
1088 list_for_each_entry(b, &hdev->blocked_keys, list) {
1089 list_del_rcu(&b->list);
1094 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1096 bool blocked = false;
1097 struct blocked_key *b;
1100 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1101 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1111 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1116 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1117 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1120 if (hci_is_blocked_key(hdev,
1121 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1123 bt_dev_warn_ratelimited(hdev,
1124 "Link key blocked for %pMR",
1137 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1138 u8 key_type, u8 old_key_type)
1141 if (key_type < 0x03)
1144 /* Debug keys are insecure so don't store them persistently */
1145 if (key_type == HCI_LK_DEBUG_COMBINATION)
1148 /* Changed combination key and there's no previous one */
1149 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1152 /* Security mode 3 case */
1156 /* BR/EDR key derived using SC from an LE link */
1157 if (conn->type == LE_LINK)
1160 /* Neither local nor remote side had no-bonding as requirement */
1161 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1164 /* Local side had dedicated bonding as requirement */
1165 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1168 /* Remote side had dedicated bonding as requirement */
1169 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1172 /* If none of the above criteria match, then don't store the key
1177 static u8 ltk_role(u8 type)
1179 if (type == SMP_LTK)
1180 return HCI_ROLE_MASTER;
1182 return HCI_ROLE_SLAVE;
1185 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1186 u8 addr_type, u8 role)
1191 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1192 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1195 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1198 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1200 bt_dev_warn_ratelimited(hdev,
1201 "LTK blocked for %pMR",
1214 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1216 struct smp_irk *irk_to_return = NULL;
1217 struct smp_irk *irk;
1220 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1221 if (!bacmp(&irk->rpa, rpa)) {
1222 irk_to_return = irk;
1227 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1228 if (smp_irk_matches(hdev, irk->val, rpa)) {
1229 bacpy(&irk->rpa, rpa);
1230 irk_to_return = irk;
1236 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1237 irk_to_return->val)) {
1238 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1239 &irk_to_return->bdaddr);
1240 irk_to_return = NULL;
1245 return irk_to_return;
1248 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1251 struct smp_irk *irk_to_return = NULL;
1252 struct smp_irk *irk;
1254 /* Identity Address must be public or static random */
1255 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1259 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1260 if (addr_type == irk->addr_type &&
1261 bacmp(bdaddr, &irk->bdaddr) == 0) {
1262 irk_to_return = irk;
1269 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1270 irk_to_return->val)) {
1271 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1272 &irk_to_return->bdaddr);
1273 irk_to_return = NULL;
1278 return irk_to_return;
1281 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1282 bdaddr_t *bdaddr, u8 *val, u8 type,
1283 u8 pin_len, bool *persistent)
1285 struct link_key *key, *old_key;
1288 old_key = hci_find_link_key(hdev, bdaddr);
1290 old_key_type = old_key->type;
1293 old_key_type = conn ? conn->key_type : 0xff;
1294 key = kzalloc(sizeof(*key), GFP_KERNEL);
1297 list_add_rcu(&key->list, &hdev->link_keys);
1300 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1302 /* Some buggy controller combinations generate a changed
1303 * combination key for legacy pairing even when there's no
1305 if (type == HCI_LK_CHANGED_COMBINATION &&
1306 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1307 type = HCI_LK_COMBINATION;
1309 conn->key_type = type;
1312 bacpy(&key->bdaddr, bdaddr);
1313 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1314 key->pin_len = pin_len;
1316 if (type == HCI_LK_CHANGED_COMBINATION)
1317 key->type = old_key_type;
1322 *persistent = hci_persistent_key(hdev, conn, type,
1328 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1329 u8 addr_type, u8 type, u8 authenticated,
1330 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1332 struct smp_ltk *key, *old_key;
1333 u8 role = ltk_role(type);
1335 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1339 key = kzalloc(sizeof(*key), GFP_KERNEL);
1342 list_add_rcu(&key->list, &hdev->long_term_keys);
1345 bacpy(&key->bdaddr, bdaddr);
1346 key->bdaddr_type = addr_type;
1347 memcpy(key->val, tk, sizeof(key->val));
1348 key->authenticated = authenticated;
1351 key->enc_size = enc_size;
1357 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1358 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1360 struct smp_irk *irk;
1362 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1364 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1368 bacpy(&irk->bdaddr, bdaddr);
1369 irk->addr_type = addr_type;
1371 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1374 memcpy(irk->val, val, 16);
1375 bacpy(&irk->rpa, rpa);
1380 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1382 struct link_key *key;
1384 key = hci_find_link_key(hdev, bdaddr);
1388 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1390 list_del_rcu(&key->list);
1391 kfree_rcu(key, rcu);
1396 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1401 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1402 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1405 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1407 list_del_rcu(&k->list);
1412 return removed ? 0 : -ENOENT;
1415 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1419 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1420 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1423 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1425 list_del_rcu(&k->list);
1430 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1433 struct smp_irk *irk;
1436 if (type == BDADDR_BREDR) {
1437 if (hci_find_link_key(hdev, bdaddr))
1442 /* Convert to HCI addr type which struct smp_ltk uses */
1443 if (type == BDADDR_LE_PUBLIC)
1444 addr_type = ADDR_LE_DEV_PUBLIC;
1446 addr_type = ADDR_LE_DEV_RANDOM;
1448 irk = hci_get_irk(hdev, bdaddr, addr_type);
1450 bdaddr = &irk->bdaddr;
1451 addr_type = irk->addr_type;
1455 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1456 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1466 /* HCI command timer function */
1467 static void hci_cmd_timeout(struct work_struct *work)
1469 struct hci_dev *hdev = container_of(work, struct hci_dev,
1472 if (hdev->sent_cmd) {
1473 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1474 u16 opcode = __le16_to_cpu(sent->opcode);
1476 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1478 bt_dev_err(hdev, "command tx timeout");
1481 if (hdev->cmd_timeout)
1482 hdev->cmd_timeout(hdev);
1484 atomic_set(&hdev->cmd_cnt, 1);
1485 queue_work(hdev->workqueue, &hdev->cmd_work);
1488 /* HCI ncmd timer function */
1489 static void hci_ncmd_timeout(struct work_struct *work)
1491 struct hci_dev *hdev = container_of(work, struct hci_dev,
1494 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1496 /* During HCI_INIT phase no events can be injected if the ncmd timer
1497 * triggers since the procedure has its own timeout handling.
1499 if (test_bit(HCI_INIT, &hdev->flags))
1502 /* This is an irrecoverable state, inject hardware error event */
1503 hci_reset_dev(hdev);
1506 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1507 bdaddr_t *bdaddr, u8 bdaddr_type)
1509 struct oob_data *data;
1511 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1512 if (bacmp(bdaddr, &data->bdaddr) != 0)
1514 if (data->bdaddr_type != bdaddr_type)
1522 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1525 struct oob_data *data;
1527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1531 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1533 list_del(&data->list);
1539 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1541 struct oob_data *data, *n;
1543 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1544 list_del(&data->list);
1549 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1550 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1551 u8 *hash256, u8 *rand256)
1553 struct oob_data *data;
1555 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1557 data = kmalloc(sizeof(*data), GFP_KERNEL);
1561 bacpy(&data->bdaddr, bdaddr);
1562 data->bdaddr_type = bdaddr_type;
1563 list_add(&data->list, &hdev->remote_oob_data);
1566 if (hash192 && rand192) {
1567 memcpy(data->hash192, hash192, sizeof(data->hash192));
1568 memcpy(data->rand192, rand192, sizeof(data->rand192));
1569 if (hash256 && rand256)
1570 data->present = 0x03;
1572 memset(data->hash192, 0, sizeof(data->hash192));
1573 memset(data->rand192, 0, sizeof(data->rand192));
1574 if (hash256 && rand256)
1575 data->present = 0x02;
1577 data->present = 0x00;
1580 if (hash256 && rand256) {
1581 memcpy(data->hash256, hash256, sizeof(data->hash256));
1582 memcpy(data->rand256, rand256, sizeof(data->rand256));
1584 memset(data->hash256, 0, sizeof(data->hash256));
1585 memset(data->rand256, 0, sizeof(data->rand256));
1586 if (hash192 && rand192)
1587 data->present = 0x01;
1590 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1595 /* This function requires the caller holds hdev->lock */
1596 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1598 struct adv_info *adv_instance;
1600 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1601 if (adv_instance->instance == instance)
1602 return adv_instance;
1608 /* This function requires the caller holds hdev->lock */
1609 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1611 struct adv_info *cur_instance;
1613 cur_instance = hci_find_adv_instance(hdev, instance);
1617 if (cur_instance == list_last_entry(&hdev->adv_instances,
1618 struct adv_info, list))
1619 return list_first_entry(&hdev->adv_instances,
1620 struct adv_info, list);
1622 return list_next_entry(cur_instance, list);
1625 /* This function requires the caller holds hdev->lock */
1626 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1628 struct adv_info *adv_instance;
1630 adv_instance = hci_find_adv_instance(hdev, instance);
1634 BT_DBG("%s removing %dMR", hdev->name, instance);
1636 if (hdev->cur_adv_instance == instance) {
1637 if (hdev->adv_instance_timeout) {
1638 cancel_delayed_work(&hdev->adv_instance_expire);
1639 hdev->adv_instance_timeout = 0;
1641 hdev->cur_adv_instance = 0x00;
1644 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1646 list_del(&adv_instance->list);
1647 kfree(adv_instance);
1649 hdev->adv_instance_cnt--;
1654 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1656 struct adv_info *adv_instance, *n;
1658 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1659 adv_instance->rpa_expired = rpa_expired;
1662 /* This function requires the caller holds hdev->lock */
1663 void hci_adv_instances_clear(struct hci_dev *hdev)
1665 struct adv_info *adv_instance, *n;
1667 if (hdev->adv_instance_timeout) {
1668 cancel_delayed_work(&hdev->adv_instance_expire);
1669 hdev->adv_instance_timeout = 0;
1672 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1673 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1674 list_del(&adv_instance->list);
1675 kfree(adv_instance);
1678 hdev->adv_instance_cnt = 0;
1679 hdev->cur_adv_instance = 0x00;
1682 static void adv_instance_rpa_expired(struct work_struct *work)
1684 struct adv_info *adv_instance = container_of(work, struct adv_info,
1685 rpa_expired_cb.work);
1689 adv_instance->rpa_expired = true;
1692 /* This function requires the caller holds hdev->lock */
1693 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1694 u16 adv_data_len, u8 *adv_data,
1695 u16 scan_rsp_len, u8 *scan_rsp_data,
1696 u16 timeout, u16 duration, s8 tx_power,
1697 u32 min_interval, u32 max_interval)
1699 struct adv_info *adv_instance;
1701 adv_instance = hci_find_adv_instance(hdev, instance);
1703 memset(adv_instance->adv_data, 0,
1704 sizeof(adv_instance->adv_data));
1705 memset(adv_instance->scan_rsp_data, 0,
1706 sizeof(adv_instance->scan_rsp_data));
1708 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1709 instance < 1 || instance > hdev->le_num_of_adv_sets)
1712 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1716 adv_instance->pending = true;
1717 adv_instance->instance = instance;
1718 list_add(&adv_instance->list, &hdev->adv_instances);
1719 hdev->adv_instance_cnt++;
1722 adv_instance->flags = flags;
1723 adv_instance->adv_data_len = adv_data_len;
1724 adv_instance->scan_rsp_len = scan_rsp_len;
1725 adv_instance->min_interval = min_interval;
1726 adv_instance->max_interval = max_interval;
1727 adv_instance->tx_power = tx_power;
1730 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1733 memcpy(adv_instance->scan_rsp_data,
1734 scan_rsp_data, scan_rsp_len);
1736 adv_instance->timeout = timeout;
1737 adv_instance->remaining_time = timeout;
1740 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1742 adv_instance->duration = duration;
1744 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1745 adv_instance_rpa_expired);
1747 BT_DBG("%s for %dMR", hdev->name, instance);
1752 /* This function requires the caller holds hdev->lock */
1753 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1754 u16 adv_data_len, u8 *adv_data,
1755 u16 scan_rsp_len, u8 *scan_rsp_data)
1757 struct adv_info *adv_instance;
1759 adv_instance = hci_find_adv_instance(hdev, instance);
1761 /* If advertisement doesn't exist, we can't modify its data */
1766 memset(adv_instance->adv_data, 0,
1767 sizeof(adv_instance->adv_data));
1768 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1769 adv_instance->adv_data_len = adv_data_len;
1773 memset(adv_instance->scan_rsp_data, 0,
1774 sizeof(adv_instance->scan_rsp_data));
1775 memcpy(adv_instance->scan_rsp_data,
1776 scan_rsp_data, scan_rsp_len);
1777 adv_instance->scan_rsp_len = scan_rsp_len;
1783 /* This function requires the caller holds hdev->lock */
1784 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1787 struct adv_info *adv;
1789 if (instance == 0x00) {
1790 /* Instance 0 always manages the "Tx Power" and "Flags"
1793 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1795 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1796 * corresponds to the "connectable" instance flag.
1798 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1799 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1801 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1802 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1803 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1804 flags |= MGMT_ADV_FLAG_DISCOV;
1809 adv = hci_find_adv_instance(hdev, instance);
1811 /* Return 0 when we got an invalid instance identifier. */
1818 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1820 struct adv_info *adv;
1822 /* Instance 0x00 always set local name */
1823 if (instance == 0x00)
1826 adv = hci_find_adv_instance(hdev, instance);
1830 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1831 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1834 return adv->scan_rsp_len ? true : false;
1837 /* This function requires the caller holds hdev->lock */
1838 void hci_adv_monitors_clear(struct hci_dev *hdev)
1840 struct adv_monitor *monitor;
1843 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1844 hci_free_adv_monitor(hdev, monitor);
1846 idr_destroy(&hdev->adv_monitors_idr);
1849 /* Frees the monitor structure and do some bookkeepings.
1850 * This function requires the caller holds hdev->lock.
1852 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1854 struct adv_pattern *pattern;
1855 struct adv_pattern *tmp;
1860 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1861 list_del(&pattern->list);
1865 if (monitor->handle)
1866 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1868 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1869 hdev->adv_monitors_cnt--;
1870 mgmt_adv_monitor_removed(hdev, monitor->handle);
1876 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1878 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1881 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1883 return mgmt_remove_adv_monitor_complete(hdev, status);
1886 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1887 * also attempts to forward the request to the controller.
1888 * Returns true if request is forwarded (result is pending), false otherwise.
1889 * This function requires the caller holds hdev->lock.
1891 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1894 int min, max, handle;
1903 min = HCI_MIN_ADV_MONITOR_HANDLE;
1904 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1905 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1912 monitor->handle = handle;
1914 if (!hdev_is_powered(hdev))
1917 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918 case HCI_ADV_MONITOR_EXT_NONE:
1919 hci_update_passive_scan(hdev);
1920 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1921 /* Message was not forwarded to controller - not an error */
1923 case HCI_ADV_MONITOR_EXT_MSFT:
1924 *err = msft_add_monitor_pattern(hdev, monitor);
1925 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1933 /* Attempts to tell the controller and free the monitor. If somehow the
1934 * controller doesn't have a corresponding handle, remove anyway.
1935 * Returns true if request is forwarded (result is pending), false otherwise.
1936 * This function requires the caller holds hdev->lock.
1938 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1939 struct adv_monitor *monitor,
1940 u16 handle, int *err)
1944 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1947 case HCI_ADV_MONITOR_EXT_MSFT:
1948 *err = msft_remove_monitor(hdev, monitor, handle);
1952 /* In case no matching handle registered, just free the monitor */
1953 if (*err == -ENOENT)
1959 if (*err == -ENOENT)
1960 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1962 hci_free_adv_monitor(hdev, monitor);
1968 /* Returns true if request is forwarded (result is pending), false otherwise.
1969 * This function requires the caller holds hdev->lock.
1971 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1973 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1981 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1982 if (!*err && !pending)
1983 hci_update_passive_scan(hdev);
1985 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1986 hdev->name, handle, *err, pending ? "" : "not ");
1991 /* Returns true if request is forwarded (result is pending), false otherwise.
1992 * This function requires the caller holds hdev->lock.
1994 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
1996 struct adv_monitor *monitor;
1997 int idr_next_id = 0;
1998 bool pending = false;
1999 bool update = false;
2003 while (!*err && !pending) {
2004 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2008 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2010 if (!*err && !pending)
2015 hci_update_passive_scan(hdev);
2017 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2018 hdev->name, *err, pending ? "" : "not ");
2023 /* This function requires the caller holds hdev->lock */
2024 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2026 return !idr_is_empty(&hdev->adv_monitors_idr);
2029 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2031 if (msft_monitor_supported(hdev))
2032 return HCI_ADV_MONITOR_EXT_MSFT;
2034 return HCI_ADV_MONITOR_EXT_NONE;
2037 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2038 bdaddr_t *bdaddr, u8 type)
2040 struct bdaddr_list *b;
2042 list_for_each_entry(b, bdaddr_list, list) {
2043 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2050 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2051 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2054 struct bdaddr_list_with_irk *b;
2056 list_for_each_entry(b, bdaddr_list, list) {
2057 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2064 struct bdaddr_list_with_flags *
2065 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2066 bdaddr_t *bdaddr, u8 type)
2068 struct bdaddr_list_with_flags *b;
2070 list_for_each_entry(b, bdaddr_list, list) {
2071 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2078 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2080 struct bdaddr_list *b, *n;
2082 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2088 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2090 struct bdaddr_list *entry;
2092 if (!bacmp(bdaddr, BDADDR_ANY))
2095 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2098 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2102 bacpy(&entry->bdaddr, bdaddr);
2103 entry->bdaddr_type = type;
2105 list_add(&entry->list, list);
2110 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2111 u8 type, u8 *peer_irk, u8 *local_irk)
2113 struct bdaddr_list_with_irk *entry;
2115 if (!bacmp(bdaddr, BDADDR_ANY))
2118 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2121 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2125 bacpy(&entry->bdaddr, bdaddr);
2126 entry->bdaddr_type = type;
2129 memcpy(entry->peer_irk, peer_irk, 16);
2132 memcpy(entry->local_irk, local_irk, 16);
2134 list_add(&entry->list, list);
2139 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2142 struct bdaddr_list_with_flags *entry;
2144 if (!bacmp(bdaddr, BDADDR_ANY))
2147 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2150 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2154 bacpy(&entry->bdaddr, bdaddr);
2155 entry->bdaddr_type = type;
2156 bitmap_from_u64(entry->flags, flags);
2158 list_add(&entry->list, list);
2163 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2165 struct bdaddr_list *entry;
2167 if (!bacmp(bdaddr, BDADDR_ANY)) {
2168 hci_bdaddr_list_clear(list);
2172 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2176 list_del(&entry->list);
2182 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2185 struct bdaddr_list_with_irk *entry;
2187 if (!bacmp(bdaddr, BDADDR_ANY)) {
2188 hci_bdaddr_list_clear(list);
2192 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2196 list_del(&entry->list);
2202 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2205 struct bdaddr_list_with_flags *entry;
2207 if (!bacmp(bdaddr, BDADDR_ANY)) {
2208 hci_bdaddr_list_clear(list);
2212 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2216 list_del(&entry->list);
2222 /* This function requires the caller holds hdev->lock */
2223 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2224 bdaddr_t *addr, u8 addr_type)
2226 struct hci_conn_params *params;
2228 list_for_each_entry(params, &hdev->le_conn_params, list) {
2229 if (bacmp(¶ms->addr, addr) == 0 &&
2230 params->addr_type == addr_type) {
2238 /* This function requires the caller holds hdev->lock */
2239 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2240 bdaddr_t *addr, u8 addr_type)
2242 struct hci_conn_params *param;
2244 list_for_each_entry(param, list, action) {
2245 if (bacmp(¶m->addr, addr) == 0 &&
2246 param->addr_type == addr_type)
2253 /* This function requires the caller holds hdev->lock */
2254 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2255 bdaddr_t *addr, u8 addr_type)
2257 struct hci_conn_params *params;
2259 params = hci_conn_params_lookup(hdev, addr, addr_type);
2263 params = kzalloc(sizeof(*params), GFP_KERNEL);
2265 bt_dev_err(hdev, "out of memory");
2269 bacpy(¶ms->addr, addr);
2270 params->addr_type = addr_type;
2272 list_add(¶ms->list, &hdev->le_conn_params);
2273 INIT_LIST_HEAD(¶ms->action);
2275 params->conn_min_interval = hdev->le_conn_min_interval;
2276 params->conn_max_interval = hdev->le_conn_max_interval;
2277 params->conn_latency = hdev->le_conn_latency;
2278 params->supervision_timeout = hdev->le_supv_timeout;
2279 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2281 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2286 static void hci_conn_params_free(struct hci_conn_params *params)
2289 hci_conn_drop(params->conn);
2290 hci_conn_put(params->conn);
2293 list_del(¶ms->action);
2294 list_del(¶ms->list);
2298 /* This function requires the caller holds hdev->lock */
2299 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2301 struct hci_conn_params *params;
2303 params = hci_conn_params_lookup(hdev, addr, addr_type);
2307 hci_conn_params_free(params);
2309 hci_update_passive_scan(hdev);
2311 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2314 /* This function requires the caller holds hdev->lock */
2315 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2317 struct hci_conn_params *params, *tmp;
2319 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2320 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2323 /* If trying to establish one time connection to disabled
2324 * device, leave the params, but mark them as just once.
2326 if (params->explicit_connect) {
2327 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2331 list_del(¶ms->list);
2335 BT_DBG("All LE disabled connection parameters were removed");
2338 /* This function requires the caller holds hdev->lock */
2339 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2341 struct hci_conn_params *params, *tmp;
2343 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2344 hci_conn_params_free(params);
2346 BT_DBG("All LE connection parameters were removed");
2349 /* Copy the Identity Address of the controller.
2351 * If the controller has a public BD_ADDR, then by default use that one.
2352 * If this is a LE only controller without a public address, default to
2353 * the static random address.
2355 * For debugging purposes it is possible to force controllers with a
2356 * public address to use the static random address instead.
2358 * In case BR/EDR has been disabled on a dual-mode controller and
2359 * userspace has configured a static address, then that address
2360 * becomes the identity address instead of the public BR/EDR address.
2362 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2365 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2366 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2367 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2368 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2369 bacpy(bdaddr, &hdev->static_addr);
2370 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2372 bacpy(bdaddr, &hdev->bdaddr);
2373 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2377 static void hci_clear_wake_reason(struct hci_dev *hdev)
2381 hdev->wake_reason = 0;
2382 bacpy(&hdev->wake_addr, BDADDR_ANY);
2383 hdev->wake_addr_type = 0;
2385 hci_dev_unlock(hdev);
2388 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2391 struct hci_dev *hdev =
2392 container_of(nb, struct hci_dev, suspend_notifier);
2395 if (action == PM_SUSPEND_PREPARE)
2396 ret = hci_suspend_dev(hdev);
2397 else if (action == PM_POST_SUSPEND)
2398 ret = hci_resume_dev(hdev);
2401 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2407 /* Alloc HCI device */
2408 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2410 struct hci_dev *hdev;
2411 unsigned int alloc_size;
2413 alloc_size = sizeof(*hdev);
2415 /* Fixme: May need ALIGN-ment? */
2416 alloc_size += sizeof_priv;
2419 hdev = kzalloc(alloc_size, GFP_KERNEL);
2423 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2424 hdev->esco_type = (ESCO_HV1);
2425 hdev->link_mode = (HCI_LM_ACCEPT);
2426 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2427 hdev->io_capability = 0x03; /* No Input No Output */
2428 hdev->manufacturer = 0xffff; /* Default to internal use */
2429 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2430 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2431 hdev->adv_instance_cnt = 0;
2432 hdev->cur_adv_instance = 0x00;
2433 hdev->adv_instance_timeout = 0;
2435 hdev->advmon_allowlist_duration = 300;
2436 hdev->advmon_no_filter_duration = 500;
2437 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2439 hdev->sniff_max_interval = 800;
2440 hdev->sniff_min_interval = 80;
2442 hdev->le_adv_channel_map = 0x07;
2443 hdev->le_adv_min_interval = 0x0800;
2444 hdev->le_adv_max_interval = 0x0800;
2445 hdev->le_scan_interval = 0x0060;
2446 hdev->le_scan_window = 0x0030;
2447 hdev->le_scan_int_suspend = 0x0400;
2448 hdev->le_scan_window_suspend = 0x0012;
2449 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2450 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2451 hdev->le_scan_int_adv_monitor = 0x0060;
2452 hdev->le_scan_window_adv_monitor = 0x0030;
2453 hdev->le_scan_int_connect = 0x0060;
2454 hdev->le_scan_window_connect = 0x0060;
2455 hdev->le_conn_min_interval = 0x0018;
2456 hdev->le_conn_max_interval = 0x0028;
2457 hdev->le_conn_latency = 0x0000;
2458 hdev->le_supv_timeout = 0x002a;
2459 hdev->le_def_tx_len = 0x001b;
2460 hdev->le_def_tx_time = 0x0148;
2461 hdev->le_max_tx_len = 0x001b;
2462 hdev->le_max_tx_time = 0x0148;
2463 hdev->le_max_rx_len = 0x001b;
2464 hdev->le_max_rx_time = 0x0148;
2465 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2466 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2467 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2468 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2469 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2470 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2471 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2472 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2473 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2475 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2476 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2477 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2478 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2479 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2480 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2482 /* default 1.28 sec page scan */
2483 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2484 hdev->def_page_scan_int = 0x0800;
2485 hdev->def_page_scan_window = 0x0012;
2487 mutex_init(&hdev->lock);
2488 mutex_init(&hdev->req_lock);
2490 INIT_LIST_HEAD(&hdev->mgmt_pending);
2491 INIT_LIST_HEAD(&hdev->reject_list);
2492 INIT_LIST_HEAD(&hdev->accept_list);
2493 INIT_LIST_HEAD(&hdev->uuids);
2494 INIT_LIST_HEAD(&hdev->link_keys);
2495 INIT_LIST_HEAD(&hdev->long_term_keys);
2496 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2497 INIT_LIST_HEAD(&hdev->remote_oob_data);
2498 INIT_LIST_HEAD(&hdev->le_accept_list);
2499 INIT_LIST_HEAD(&hdev->le_resolv_list);
2500 INIT_LIST_HEAD(&hdev->le_conn_params);
2501 INIT_LIST_HEAD(&hdev->pend_le_conns);
2502 INIT_LIST_HEAD(&hdev->pend_le_reports);
2503 INIT_LIST_HEAD(&hdev->conn_hash.list);
2504 INIT_LIST_HEAD(&hdev->adv_instances);
2505 INIT_LIST_HEAD(&hdev->blocked_keys);
2506 INIT_LIST_HEAD(&hdev->monitored_devices);
2508 INIT_LIST_HEAD(&hdev->local_codecs);
2509 INIT_WORK(&hdev->rx_work, hci_rx_work);
2510 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2511 INIT_WORK(&hdev->tx_work, hci_tx_work);
2512 INIT_WORK(&hdev->power_on, hci_power_on);
2513 INIT_WORK(&hdev->error_reset, hci_error_reset);
2515 hci_cmd_sync_init(hdev);
2517 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2519 skb_queue_head_init(&hdev->rx_q);
2520 skb_queue_head_init(&hdev->cmd_q);
2521 skb_queue_head_init(&hdev->raw_q);
2523 init_waitqueue_head(&hdev->req_wait_q);
2525 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2526 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2528 hci_request_setup(hdev);
2530 hci_init_sysfs(hdev);
2531 discovery_init(hdev);
2535 EXPORT_SYMBOL(hci_alloc_dev_priv);
2537 /* Free HCI device */
2538 void hci_free_dev(struct hci_dev *hdev)
2540 /* will free via device release */
2541 put_device(&hdev->dev);
2543 EXPORT_SYMBOL(hci_free_dev);
2545 /* Register HCI device */
2546 int hci_register_dev(struct hci_dev *hdev)
2550 if (!hdev->open || !hdev->close || !hdev->send)
2553 /* Do not allow HCI_AMP devices to register at index 0,
2554 * so the index can be used as the AMP controller ID.
2556 switch (hdev->dev_type) {
2558 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2561 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2570 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2573 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2575 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2576 if (!hdev->workqueue) {
2581 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2583 if (!hdev->req_workqueue) {
2584 destroy_workqueue(hdev->workqueue);
2589 if (!IS_ERR_OR_NULL(bt_debugfs))
2590 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2592 dev_set_name(&hdev->dev, "%s", hdev->name);
2594 error = device_add(&hdev->dev);
2598 hci_leds_init(hdev);
2600 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2601 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2604 if (rfkill_register(hdev->rfkill) < 0) {
2605 rfkill_destroy(hdev->rfkill);
2606 hdev->rfkill = NULL;
2610 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2611 hci_dev_set_flag(hdev, HCI_RFKILLED);
2613 hci_dev_set_flag(hdev, HCI_SETUP);
2614 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2616 if (hdev->dev_type == HCI_PRIMARY) {
2617 /* Assume BR/EDR support until proven otherwise (such as
2618 * through reading supported features during init.
2620 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2623 write_lock(&hci_dev_list_lock);
2624 list_add(&hdev->list, &hci_dev_list);
2625 write_unlock(&hci_dev_list_lock);
2627 /* Devices that are marked for raw-only usage are unconfigured
2628 * and should not be included in normal operation.
2630 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2631 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2633 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2637 set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
2639 hci_sock_dev_event(hdev, HCI_DEV_REG);
2642 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2643 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2644 error = register_pm_notifier(&hdev->suspend_notifier);
2649 queue_work(hdev->req_workqueue, &hdev->power_on);
2651 idr_init(&hdev->adv_monitors_idr);
2652 msft_register(hdev);
2657 debugfs_remove_recursive(hdev->debugfs);
2658 destroy_workqueue(hdev->workqueue);
2659 destroy_workqueue(hdev->req_workqueue);
2661 ida_simple_remove(&hci_index_ida, hdev->id);
2665 EXPORT_SYMBOL(hci_register_dev);
2667 /* Unregister HCI device */
2668 void hci_unregister_dev(struct hci_dev *hdev)
2670 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2672 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2674 write_lock(&hci_dev_list_lock);
2675 list_del(&hdev->list);
2676 write_unlock(&hci_dev_list_lock);
2678 cancel_work_sync(&hdev->power_on);
2680 hci_cmd_sync_clear(hdev);
2682 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2683 unregister_pm_notifier(&hdev->suspend_notifier);
2685 msft_unregister(hdev);
2687 hci_dev_do_close(hdev);
2689 if (!test_bit(HCI_INIT, &hdev->flags) &&
2690 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2691 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2693 mgmt_index_removed(hdev);
2694 hci_dev_unlock(hdev);
2697 /* mgmt_index_removed should take care of emptying the
2699 BUG_ON(!list_empty(&hdev->mgmt_pending));
2701 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2704 rfkill_unregister(hdev->rfkill);
2705 rfkill_destroy(hdev->rfkill);
2708 device_del(&hdev->dev);
2709 /* Actual cleanup is deferred until hci_release_dev(). */
2712 EXPORT_SYMBOL(hci_unregister_dev);
2714 /* Release HCI device */
2715 void hci_release_dev(struct hci_dev *hdev)
2717 debugfs_remove_recursive(hdev->debugfs);
2718 kfree_const(hdev->hw_info);
2719 kfree_const(hdev->fw_info);
2721 destroy_workqueue(hdev->workqueue);
2722 destroy_workqueue(hdev->req_workqueue);
2725 hci_bdaddr_list_clear(&hdev->reject_list);
2726 hci_bdaddr_list_clear(&hdev->accept_list);
2727 hci_uuids_clear(hdev);
2728 hci_link_keys_clear(hdev);
2729 hci_smp_ltks_clear(hdev);
2730 hci_smp_irks_clear(hdev);
2731 hci_remote_oob_data_clear(hdev);
2732 hci_adv_instances_clear(hdev);
2733 hci_adv_monitors_clear(hdev);
2734 hci_bdaddr_list_clear(&hdev->le_accept_list);
2735 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2736 hci_conn_params_clear_all(hdev);
2737 hci_discovery_filter_clear(hdev);
2738 hci_blocked_keys_clear(hdev);
2739 hci_dev_unlock(hdev);
2741 ida_simple_remove(&hci_index_ida, hdev->id);
2742 kfree_skb(hdev->sent_cmd);
2745 EXPORT_SYMBOL(hci_release_dev);
2747 /* Suspend HCI device */
2748 int hci_suspend_dev(struct hci_dev *hdev)
2752 bt_dev_dbg(hdev, "");
2754 /* Suspend should only act on when powered. */
2755 if (!hdev_is_powered(hdev) ||
2756 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2759 /* If powering down don't attempt to suspend */
2760 if (mgmt_powering_down(hdev))
2763 hci_req_sync_lock(hdev);
2764 ret = hci_suspend_sync(hdev);
2765 hci_req_sync_unlock(hdev);
2767 hci_clear_wake_reason(hdev);
2768 mgmt_suspending(hdev, hdev->suspend_state);
2770 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2773 EXPORT_SYMBOL(hci_suspend_dev);
2775 /* Resume HCI device */
2776 int hci_resume_dev(struct hci_dev *hdev)
2780 bt_dev_dbg(hdev, "");
2782 /* Resume should only act on when powered. */
2783 if (!hdev_is_powered(hdev) ||
2784 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2787 /* If powering down don't attempt to resume */
2788 if (mgmt_powering_down(hdev))
2791 hci_req_sync_lock(hdev);
2792 ret = hci_resume_sync(hdev);
2793 hci_req_sync_unlock(hdev);
2795 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2796 hdev->wake_addr_type);
2798 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2801 EXPORT_SYMBOL(hci_resume_dev);
2803 /* Reset HCI device */
2804 int hci_reset_dev(struct hci_dev *hdev)
2806 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2807 struct sk_buff *skb;
2809 skb = bt_skb_alloc(3, GFP_ATOMIC);
2813 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2814 skb_put_data(skb, hw_err, 3);
2816 bt_dev_err(hdev, "Injecting HCI hardware error event");
2818 /* Send Hardware Error to upper stack */
2819 return hci_recv_frame(hdev, skb);
2821 EXPORT_SYMBOL(hci_reset_dev);
2823 /* Receive frame from HCI drivers */
2824 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2826 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2827 && !test_bit(HCI_INIT, &hdev->flags))) {
2832 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2833 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2834 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2835 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2841 bt_cb(skb)->incoming = 1;
2844 __net_timestamp(skb);
2846 skb_queue_tail(&hdev->rx_q, skb);
2847 queue_work(hdev->workqueue, &hdev->rx_work);
2851 EXPORT_SYMBOL(hci_recv_frame);
2853 /* Receive diagnostic message from HCI drivers */
2854 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2856 /* Mark as diagnostic packet */
2857 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2860 __net_timestamp(skb);
2862 skb_queue_tail(&hdev->rx_q, skb);
2863 queue_work(hdev->workqueue, &hdev->rx_work);
2867 EXPORT_SYMBOL(hci_recv_diag);
2869 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2873 va_start(vargs, fmt);
2874 kfree_const(hdev->hw_info);
2875 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2878 EXPORT_SYMBOL(hci_set_hw_info);
2880 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2884 va_start(vargs, fmt);
2885 kfree_const(hdev->fw_info);
2886 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2889 EXPORT_SYMBOL(hci_set_fw_info);
2891 /* ---- Interface to upper protocols ---- */
2893 int hci_register_cb(struct hci_cb *cb)
2895 BT_DBG("%p name %s", cb, cb->name);
2897 mutex_lock(&hci_cb_list_lock);
2898 list_add_tail(&cb->list, &hci_cb_list);
2899 mutex_unlock(&hci_cb_list_lock);
2903 EXPORT_SYMBOL(hci_register_cb);
2905 int hci_unregister_cb(struct hci_cb *cb)
2907 BT_DBG("%p name %s", cb, cb->name);
2909 mutex_lock(&hci_cb_list_lock);
2910 list_del(&cb->list);
2911 mutex_unlock(&hci_cb_list_lock);
2915 EXPORT_SYMBOL(hci_unregister_cb);
2917 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2921 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2925 __net_timestamp(skb);
2927 /* Send copy to monitor */
2928 hci_send_to_monitor(hdev, skb);
2930 if (atomic_read(&hdev->promisc)) {
2931 /* Send copy to the sockets */
2932 hci_send_to_sock(hdev, skb);
2935 /* Get rid of skb owner, prior to sending to the driver. */
2938 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2943 err = hdev->send(hdev, skb);
2945 bt_dev_err(hdev, "sending frame failed (%d)", err);
2953 /* Send HCI command */
2954 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2957 struct sk_buff *skb;
2959 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2961 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2963 bt_dev_err(hdev, "no memory for command");
2967 /* Stand-alone HCI commands must be flagged as
2968 * single-command requests.
2970 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2972 skb_queue_tail(&hdev->cmd_q, skb);
2973 queue_work(hdev->workqueue, &hdev->cmd_work);
2978 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2981 struct sk_buff *skb;
2983 if (hci_opcode_ogf(opcode) != 0x3f) {
2984 /* A controller receiving a command shall respond with either
2985 * a Command Status Event or a Command Complete Event.
2986 * Therefore, all standard HCI commands must be sent via the
2987 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2988 * Some vendors do not comply with this rule for vendor-specific
2989 * commands and do not return any event. We want to support
2990 * unresponded commands for such cases only.
2992 bt_dev_err(hdev, "unresponded command not supported");
2996 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2998 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3003 hci_send_frame(hdev, skb);
3007 EXPORT_SYMBOL(__hci_cmd_send);
3009 /* Get data from the previously sent command */
3010 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3012 struct hci_command_hdr *hdr;
3014 if (!hdev->sent_cmd)
3017 hdr = (void *) hdev->sent_cmd->data;
3019 if (hdr->opcode != cpu_to_le16(opcode))
3022 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3024 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3028 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3030 struct hci_acl_hdr *hdr;
3033 skb_push(skb, HCI_ACL_HDR_SIZE);
3034 skb_reset_transport_header(skb);
3035 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3036 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3037 hdr->dlen = cpu_to_le16(len);
3040 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3041 struct sk_buff *skb, __u16 flags)
3043 struct hci_conn *conn = chan->conn;
3044 struct hci_dev *hdev = conn->hdev;
3045 struct sk_buff *list;
3047 skb->len = skb_headlen(skb);
3050 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3052 switch (hdev->dev_type) {
3054 hci_add_acl_hdr(skb, conn->handle, flags);
3057 hci_add_acl_hdr(skb, chan->handle, flags);
3060 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3064 list = skb_shinfo(skb)->frag_list;
3066 /* Non fragmented */
3067 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3069 skb_queue_tail(queue, skb);
3072 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3074 skb_shinfo(skb)->frag_list = NULL;
3076 /* Queue all fragments atomically. We need to use spin_lock_bh
3077 * here because of 6LoWPAN links, as there this function is
3078 * called from softirq and using normal spin lock could cause
3081 spin_lock_bh(&queue->lock);
3083 __skb_queue_tail(queue, skb);
3085 flags &= ~ACL_START;
3088 skb = list; list = list->next;
3090 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3091 hci_add_acl_hdr(skb, conn->handle, flags);
3093 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3095 __skb_queue_tail(queue, skb);
3098 spin_unlock_bh(&queue->lock);
3102 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3104 struct hci_dev *hdev = chan->conn->hdev;
3106 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3108 hci_queue_acl(chan, &chan->data_q, skb, flags);
3110 queue_work(hdev->workqueue, &hdev->tx_work);
3114 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3116 struct hci_dev *hdev = conn->hdev;
3117 struct hci_sco_hdr hdr;
3119 BT_DBG("%s len %d", hdev->name, skb->len);
3121 hdr.handle = cpu_to_le16(conn->handle);
3122 hdr.dlen = skb->len;
3124 skb_push(skb, HCI_SCO_HDR_SIZE);
3125 skb_reset_transport_header(skb);
3126 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3128 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3130 skb_queue_tail(&conn->data_q, skb);
3131 queue_work(hdev->workqueue, &hdev->tx_work);
3134 /* ---- HCI TX task (outgoing data) ---- */
3136 /* HCI Connection scheduler */
3137 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3140 struct hci_conn_hash *h = &hdev->conn_hash;
3141 struct hci_conn *conn = NULL, *c;
3142 unsigned int num = 0, min = ~0;
3144 /* We don't have to lock device here. Connections are always
3145 * added and removed with TX task disabled. */
3149 list_for_each_entry_rcu(c, &h->list, list) {
3150 if (c->type != type || skb_queue_empty(&c->data_q))
3153 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3158 if (c->sent < min) {
3163 if (hci_conn_num(hdev, type) == num)
3172 switch (conn->type) {
3174 cnt = hdev->acl_cnt;
3178 cnt = hdev->sco_cnt;
3181 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3185 bt_dev_err(hdev, "unknown link type %d", conn->type);
3193 BT_DBG("conn %p quote %d", conn, *quote);
3197 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3199 struct hci_conn_hash *h = &hdev->conn_hash;
3202 bt_dev_err(hdev, "link tx timeout");
3206 /* Kill stalled connections */
3207 list_for_each_entry_rcu(c, &h->list, list) {
3208 if (c->type == type && c->sent) {
3209 bt_dev_err(hdev, "killing stalled connection %pMR",
3211 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3218 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3221 struct hci_conn_hash *h = &hdev->conn_hash;
3222 struct hci_chan *chan = NULL;
3223 unsigned int num = 0, min = ~0, cur_prio = 0;
3224 struct hci_conn *conn;
3225 int cnt, q, conn_num = 0;
3227 BT_DBG("%s", hdev->name);
3231 list_for_each_entry_rcu(conn, &h->list, list) {
3232 struct hci_chan *tmp;
3234 if (conn->type != type)
3237 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3242 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3243 struct sk_buff *skb;
3245 if (skb_queue_empty(&tmp->data_q))
3248 skb = skb_peek(&tmp->data_q);
3249 if (skb->priority < cur_prio)
3252 if (skb->priority > cur_prio) {
3255 cur_prio = skb->priority;
3260 if (conn->sent < min) {
3266 if (hci_conn_num(hdev, type) == conn_num)
3275 switch (chan->conn->type) {
3277 cnt = hdev->acl_cnt;
3280 cnt = hdev->block_cnt;
3284 cnt = hdev->sco_cnt;
3287 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3291 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3296 BT_DBG("chan %p quote %d", chan, *quote);
3300 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3302 struct hci_conn_hash *h = &hdev->conn_hash;
3303 struct hci_conn *conn;
3306 BT_DBG("%s", hdev->name);
3310 list_for_each_entry_rcu(conn, &h->list, list) {
3311 struct hci_chan *chan;
3313 if (conn->type != type)
3316 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3321 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3322 struct sk_buff *skb;
3329 if (skb_queue_empty(&chan->data_q))
3332 skb = skb_peek(&chan->data_q);
3333 if (skb->priority >= HCI_PRIO_MAX - 1)
3336 skb->priority = HCI_PRIO_MAX - 1;
3338 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3342 if (hci_conn_num(hdev, type) == num)
3350 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3352 /* Calculate count of blocks used by this packet */
3353 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3356 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3358 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3359 /* ACL tx timeout must be longer than maximum
3360 * link supervision timeout (40.9 seconds) */
3361 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3362 HCI_ACL_TX_TIMEOUT))
3363 hci_link_tx_to(hdev, ACL_LINK);
3368 static void hci_sched_sco(struct hci_dev *hdev)
3370 struct hci_conn *conn;
3371 struct sk_buff *skb;
3374 BT_DBG("%s", hdev->name);
3376 if (!hci_conn_num(hdev, SCO_LINK))
3379 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3380 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3381 BT_DBG("skb %p len %d", skb, skb->len);
3382 hci_send_frame(hdev, skb);
3385 if (conn->sent == ~0)
3391 static void hci_sched_esco(struct hci_dev *hdev)
3393 struct hci_conn *conn;
3394 struct sk_buff *skb;
3397 BT_DBG("%s", hdev->name);
3399 if (!hci_conn_num(hdev, ESCO_LINK))
3402 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3404 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3405 BT_DBG("skb %p len %d", skb, skb->len);
3406 hci_send_frame(hdev, skb);
3409 if (conn->sent == ~0)
3415 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3417 unsigned int cnt = hdev->acl_cnt;
3418 struct hci_chan *chan;
3419 struct sk_buff *skb;
3422 __check_timeout(hdev, cnt);
3424 while (hdev->acl_cnt &&
3425 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3426 u32 priority = (skb_peek(&chan->data_q))->priority;
3427 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3428 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3429 skb->len, skb->priority);
3431 /* Stop if priority has changed */
3432 if (skb->priority < priority)
3435 skb = skb_dequeue(&chan->data_q);
3437 hci_conn_enter_active_mode(chan->conn,
3438 bt_cb(skb)->force_active);
3440 hci_send_frame(hdev, skb);
3441 hdev->acl_last_tx = jiffies;
3447 /* Send pending SCO packets right away */
3448 hci_sched_sco(hdev);
3449 hci_sched_esco(hdev);
3453 if (cnt != hdev->acl_cnt)
3454 hci_prio_recalculate(hdev, ACL_LINK);
3457 static void hci_sched_acl_blk(struct hci_dev *hdev)
3459 unsigned int cnt = hdev->block_cnt;
3460 struct hci_chan *chan;
3461 struct sk_buff *skb;
3465 __check_timeout(hdev, cnt);
3467 BT_DBG("%s", hdev->name);
3469 if (hdev->dev_type == HCI_AMP)
3474 while (hdev->block_cnt > 0 &&
3475 (chan = hci_chan_sent(hdev, type, "e))) {
3476 u32 priority = (skb_peek(&chan->data_q))->priority;
3477 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3480 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3481 skb->len, skb->priority);
3483 /* Stop if priority has changed */
3484 if (skb->priority < priority)
3487 skb = skb_dequeue(&chan->data_q);
3489 blocks = __get_blocks(hdev, skb);
3490 if (blocks > hdev->block_cnt)
3493 hci_conn_enter_active_mode(chan->conn,
3494 bt_cb(skb)->force_active);
3496 hci_send_frame(hdev, skb);
3497 hdev->acl_last_tx = jiffies;
3499 hdev->block_cnt -= blocks;
3502 chan->sent += blocks;
3503 chan->conn->sent += blocks;
3507 if (cnt != hdev->block_cnt)
3508 hci_prio_recalculate(hdev, type);
3511 static void hci_sched_acl(struct hci_dev *hdev)
3513 BT_DBG("%s", hdev->name);
3515 /* No ACL link over BR/EDR controller */
3516 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3519 /* No AMP link over AMP controller */
3520 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3523 switch (hdev->flow_ctl_mode) {
3524 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3525 hci_sched_acl_pkt(hdev);
3528 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3529 hci_sched_acl_blk(hdev);
3534 static void hci_sched_le(struct hci_dev *hdev)
3536 struct hci_chan *chan;
3537 struct sk_buff *skb;
3538 int quote, cnt, tmp;
3540 BT_DBG("%s", hdev->name);
3542 if (!hci_conn_num(hdev, LE_LINK))
3545 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3547 __check_timeout(hdev, cnt);
3550 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3551 u32 priority = (skb_peek(&chan->data_q))->priority;
3552 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3553 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3554 skb->len, skb->priority);
3556 /* Stop if priority has changed */
3557 if (skb->priority < priority)
3560 skb = skb_dequeue(&chan->data_q);
3562 hci_send_frame(hdev, skb);
3563 hdev->le_last_tx = jiffies;
3569 /* Send pending SCO packets right away */
3570 hci_sched_sco(hdev);
3571 hci_sched_esco(hdev);
3578 hdev->acl_cnt = cnt;
3581 hci_prio_recalculate(hdev, LE_LINK);
3584 static void hci_tx_work(struct work_struct *work)
3586 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3587 struct sk_buff *skb;
3589 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3590 hdev->sco_cnt, hdev->le_cnt);
3592 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3593 /* Schedule queues and send stuff to HCI driver */
3594 hci_sched_sco(hdev);
3595 hci_sched_esco(hdev);
3596 hci_sched_acl(hdev);
3600 /* Send next queued raw (unknown type) packet */
3601 while ((skb = skb_dequeue(&hdev->raw_q)))
3602 hci_send_frame(hdev, skb);
3605 /* ----- HCI RX task (incoming data processing) ----- */
3607 /* ACL data packet */
3608 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3610 struct hci_acl_hdr *hdr = (void *) skb->data;
3611 struct hci_conn *conn;
3612 __u16 handle, flags;
3614 skb_pull(skb, HCI_ACL_HDR_SIZE);
3616 handle = __le16_to_cpu(hdr->handle);
3617 flags = hci_flags(handle);
3618 handle = hci_handle(handle);
3620 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3623 hdev->stat.acl_rx++;
3626 conn = hci_conn_hash_lookup_handle(hdev, handle);
3627 hci_dev_unlock(hdev);
3630 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3632 /* Send to upper protocol */
3633 l2cap_recv_acldata(conn, skb, flags);
3636 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3643 /* SCO data packet */
3644 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3646 struct hci_sco_hdr *hdr = (void *) skb->data;
3647 struct hci_conn *conn;
3648 __u16 handle, flags;
3650 skb_pull(skb, HCI_SCO_HDR_SIZE);
3652 handle = __le16_to_cpu(hdr->handle);
3653 flags = hci_flags(handle);
3654 handle = hci_handle(handle);
3656 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3659 hdev->stat.sco_rx++;
3662 conn = hci_conn_hash_lookup_handle(hdev, handle);
3663 hci_dev_unlock(hdev);
3666 /* Send to upper protocol */
3667 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3668 sco_recv_scodata(conn, skb);
3671 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3678 static bool hci_req_is_complete(struct hci_dev *hdev)
3680 struct sk_buff *skb;
3682 skb = skb_peek(&hdev->cmd_q);
3686 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3689 static void hci_resend_last(struct hci_dev *hdev)
3691 struct hci_command_hdr *sent;
3692 struct sk_buff *skb;
3695 if (!hdev->sent_cmd)
3698 sent = (void *) hdev->sent_cmd->data;
3699 opcode = __le16_to_cpu(sent->opcode);
3700 if (opcode == HCI_OP_RESET)
3703 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3707 skb_queue_head(&hdev->cmd_q, skb);
3708 queue_work(hdev->workqueue, &hdev->cmd_work);
3711 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3712 hci_req_complete_t *req_complete,
3713 hci_req_complete_skb_t *req_complete_skb)
3715 struct sk_buff *skb;
3716 unsigned long flags;
3718 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3720 /* If the completed command doesn't match the last one that was
3721 * sent we need to do special handling of it.
3723 if (!hci_sent_cmd_data(hdev, opcode)) {
3724 /* Some CSR based controllers generate a spontaneous
3725 * reset complete event during init and any pending
3726 * command will never be completed. In such a case we
3727 * need to resend whatever was the last sent
3730 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3731 hci_resend_last(hdev);
3736 /* If we reach this point this event matches the last command sent */
3737 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3739 /* If the command succeeded and there's still more commands in
3740 * this request the request is not yet complete.
3742 if (!status && !hci_req_is_complete(hdev))
3745 /* If this was the last command in a request the complete
3746 * callback would be found in hdev->sent_cmd instead of the
3747 * command queue (hdev->cmd_q).
3749 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3750 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3754 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3755 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3759 /* Remove all pending commands belonging to this request */
3760 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3761 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3762 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3763 __skb_queue_head(&hdev->cmd_q, skb);
3767 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3768 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3770 *req_complete = bt_cb(skb)->hci.req_complete;
3773 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3776 static void hci_rx_work(struct work_struct *work)
3778 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3779 struct sk_buff *skb;
3781 BT_DBG("%s", hdev->name);
3783 while ((skb = skb_dequeue(&hdev->rx_q))) {
3784 /* Send copy to monitor */
3785 hci_send_to_monitor(hdev, skb);
3787 if (atomic_read(&hdev->promisc)) {
3788 /* Send copy to the sockets */
3789 hci_send_to_sock(hdev, skb);
3792 /* If the device has been opened in HCI_USER_CHANNEL,
3793 * the userspace has exclusive access to device.
3794 * When device is HCI_INIT, we still need to process
3795 * the data packets to the driver in order
3796 * to complete its setup().
3798 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3799 !test_bit(HCI_INIT, &hdev->flags)) {
3804 if (test_bit(HCI_INIT, &hdev->flags)) {
3805 /* Don't process data packets in this states. */
3806 switch (hci_skb_pkt_type(skb)) {
3807 case HCI_ACLDATA_PKT:
3808 case HCI_SCODATA_PKT:
3809 case HCI_ISODATA_PKT:
3816 switch (hci_skb_pkt_type(skb)) {
3818 BT_DBG("%s Event packet", hdev->name);
3819 hci_event_packet(hdev, skb);
3822 case HCI_ACLDATA_PKT:
3823 BT_DBG("%s ACL data packet", hdev->name);
3824 hci_acldata_packet(hdev, skb);
3827 case HCI_SCODATA_PKT:
3828 BT_DBG("%s SCO data packet", hdev->name);
3829 hci_scodata_packet(hdev, skb);
3839 static void hci_cmd_work(struct work_struct *work)
3841 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3842 struct sk_buff *skb;
3844 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3845 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3847 /* Send queued commands */
3848 if (atomic_read(&hdev->cmd_cnt)) {
3849 skb = skb_dequeue(&hdev->cmd_q);
3853 kfree_skb(hdev->sent_cmd);
3855 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3856 if (hdev->sent_cmd) {
3858 if (hci_req_status_pend(hdev))
3859 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3860 atomic_dec(&hdev->cmd_cnt);
3862 res = hci_send_frame(hdev, skb);
3864 __hci_cmd_sync_cancel(hdev, -res);
3866 if (test_bit(HCI_RESET, &hdev->flags))
3867 cancel_delayed_work(&hdev->cmd_timer);
3869 schedule_delayed_work(&hdev->cmd_timer,
3872 skb_queue_head(&hdev->cmd_q, skb);
3873 queue_work(hdev->workqueue, &hdev->cmd_work);