2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
86 if (!test_bit(HCI_UP, &hdev->flags))
89 err = kstrtobool_from_user(user_buf, count, &enable);
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
96 hci_req_sync_lock(hdev);
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 hci_req_sync_unlock(hdev);
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
115 static const struct file_operations dut_mode_fops = {
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
125 struct hci_dev *hdev = file->private_data;
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
137 struct hci_dev *hdev = file->private_data;
141 err = kstrtobool_from_user(user_buf, count, &enable);
145 /* When the diagnostic flags are not persistent and the transport
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
155 hci_req_sync_lock(hdev);
156 err = hdev->set_diag(hdev, enable);
157 hci_req_sync_unlock(hdev);
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
171 static const struct file_operations vendor_diag_fops = {
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
190 BT_DBG("%s %ld", req->hdev->name, opt);
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
198 static void bredr_init(struct hci_request *req)
200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
202 /* Read Local Supported Features */
203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
205 /* Read Local Version */
206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
208 /* Read BD Address */
209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
212 static void amp_init1(struct hci_request *req)
214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
216 /* Read Local Version */
217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
222 /* Read Local AMP Info */
223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
225 /* Read Data Blk size */
226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
235 static int amp_init2(struct hci_request *req)
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
249 struct hci_dev *hdev = req->hdev;
251 BT_DBG("%s %ld", hdev->name, opt);
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255 hci_reset_req(req, 0);
257 switch (hdev->dev_type) {
265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
272 static void bredr_setup(struct hci_request *req)
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
280 /* Read Class of Device */
281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
283 /* Read Local Name */
284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
286 /* Read Voice Setting */
287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
299 /* Connection accept timeout ~20 secs */
300 param = cpu_to_le16(0x7d00);
301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
304 static void le_setup(struct hci_request *req)
306 struct hci_dev *hdev = req->hdev;
308 /* Read LE Buffer Size */
309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
311 /* Read LE Local Supported Features */
312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
322 static void hci_setup_event_mask(struct hci_request *req)
324 struct hci_dev *hdev = req->hdev;
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
420 struct hci_dev *hdev = req->hdev;
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
425 if (lmp_bredr_capable(hdev))
428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
430 if (lmp_le_capable(hdev))
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
445 if (lmp_ssp_capable(hdev)) {
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
452 hdev->max_page = 0x01;
454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
460 struct hci_cp_write_eir cp;
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
482 if (lmp_inq_tx_pwr_capable(hdev))
483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
502 static void hci_setup_link_policy(struct hci_request *req)
504 struct hci_dev *hdev = req->hdev;
505 struct hci_cp_write_def_link_policy cp;
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
517 cp.policy = cpu_to_le16(link_policy);
518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
521 static void hci_set_le_support(struct hci_request *req)
523 struct hci_dev *hdev = req->hdev;
524 struct hci_cp_write_le_host_supported cp;
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
530 memset(&cp, 0, sizeof(cp));
532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
537 if (cp.le != lmp_host_le_capable(hdev))
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
542 static void hci_set_event_mask_page_2(struct hci_request *req)
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546 bool changed = false;
548 /* If Connectionless Peripheral Broadcast central role is supported
549 * enable all necessary events for it.
551 if (lmp_cpb_central_capable(hdev)) {
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Peripheral Page Response Timeout */
555 events[2] |= 0x20; /* CPB Channel Map Change */
559 /* If Connectionless Peripheral Broadcast peripheral role is supported
560 * enable all necessary events for it.
562 if (lmp_cpb_peripheral_capable(hdev)) {
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CPB Receive */
565 events[2] |= 0x04; /* CPB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
570 /* Enable Authenticated Payload Timeout Expired event if supported */
571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
589 struct hci_dev *hdev = req->hdev;
592 hci_setup_event_mask(req);
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596 struct hci_cp_read_stored_link_key cp;
598 bacpy(&cp.bdaddr, BDADDR_ANY);
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
603 if (hdev->commands[5] & 0x10)
604 hci_setup_link_policy(req);
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
609 if (hdev->commands[18] & 0x04 &&
610 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
613 /* Some older Broadcom based Bluetooth 1.2 controllers do not
614 * support the Read Page Scan Type command. Check support for
615 * this command in the bit mask of supported commands.
617 if (hdev->commands[13] & 0x01)
618 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620 if (lmp_le_capable(hdev)) {
623 memset(events, 0, sizeof(events));
625 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626 events[0] |= 0x10; /* LE Long Term Key Request */
628 /* If controller supports the Connection Parameters Request
629 * Link Layer Procedure, enable the corresponding event.
631 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632 events[0] |= 0x20; /* LE Remote Connection
636 /* If the controller supports the Data Length Extension
637 * feature, enable the corresponding event.
639 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640 events[0] |= 0x40; /* LE Data Length Change */
642 /* If the controller supports LL Privacy feature, enable
643 * the corresponding event.
645 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646 events[1] |= 0x02; /* LE Enhanced Connection
650 /* If the controller supports Extended Scanner Filter
651 * Policies, enable the corresponding event.
653 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654 events[1] |= 0x04; /* LE Direct Advertising
658 /* If the controller supports Channel Selection Algorithm #2
659 * feature, enable the corresponding event.
661 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662 events[2] |= 0x08; /* LE Channel Selection
666 /* If the controller supports the LE Set Scan Enable command,
667 * enable the corresponding advertising report event.
669 if (hdev->commands[26] & 0x08)
670 events[0] |= 0x02; /* LE Advertising Report */
672 /* If the controller supports the LE Create Connection
673 * command, enable the corresponding event.
675 if (hdev->commands[26] & 0x10)
676 events[0] |= 0x01; /* LE Connection Complete */
678 /* If the controller supports the LE Connection Update
679 * command, enable the corresponding event.
681 if (hdev->commands[27] & 0x04)
682 events[0] |= 0x04; /* LE Connection Update
686 /* If the controller supports the LE Read Remote Used Features
687 * command, enable the corresponding event.
689 if (hdev->commands[27] & 0x20)
690 events[0] |= 0x08; /* LE Read Remote Used
694 /* If the controller supports the LE Read Local P-256
695 * Public Key command, enable the corresponding event.
697 if (hdev->commands[34] & 0x02)
698 events[0] |= 0x80; /* LE Read Local P-256
699 * Public Key Complete
702 /* If the controller supports the LE Generate DHKey
703 * command, enable the corresponding event.
705 if (hdev->commands[34] & 0x04)
706 events[1] |= 0x01; /* LE Generate DHKey Complete */
708 /* If the controller supports the LE Set Default PHY or
709 * LE Set PHY commands, enable the corresponding event.
711 if (hdev->commands[35] & (0x20 | 0x40))
712 events[1] |= 0x08; /* LE PHY Update Complete */
714 /* If the controller supports LE Set Extended Scan Parameters
715 * and LE Set Extended Scan Enable commands, enable the
716 * corresponding event.
718 if (use_ext_scan(hdev))
719 events[1] |= 0x10; /* LE Extended Advertising
723 /* If the controller supports the LE Extended Advertising
724 * command, enable the corresponding event.
726 if (ext_adv_capable(hdev))
727 events[2] |= 0x02; /* LE Advertising Set
731 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
734 /* Read LE Advertising Channel TX Power */
735 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736 /* HCI TS spec forbids mixing of legacy and extended
737 * advertising commands wherein READ_ADV_TX_POWER is
738 * also included. So do not call it if extended adv
739 * is supported otherwise controller will return
740 * COMMAND_DISALLOWED for extended commands.
742 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
745 if (hdev->commands[38] & 0x80) {
746 /* Read LE Min/Max Tx Power*/
747 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
751 if (hdev->commands[26] & 0x40) {
752 /* Read LE Accept List Size */
753 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
757 if (hdev->commands[26] & 0x80) {
758 /* Clear LE Accept List */
759 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
762 if (hdev->commands[34] & 0x40) {
763 /* Read LE Resolving List Size */
764 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
768 if (hdev->commands[34] & 0x20) {
769 /* Clear LE Resolving List */
770 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
773 if (hdev->commands[35] & 0x04) {
774 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
776 /* Set RPA timeout */
777 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
781 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782 /* Read LE Maximum Data Length */
783 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
785 /* Read LE Suggested Default Data Length */
786 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
789 if (ext_adv_capable(hdev)) {
790 /* Read LE Number of Supported Advertising Sets */
791 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
795 hci_set_le_support(req);
798 /* Read features beyond page 1 if available */
799 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800 struct hci_cp_read_local_ext_features cp;
803 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
810 static int hci_init4_req(struct hci_request *req, unsigned long opt)
812 struct hci_dev *hdev = req->hdev;
814 /* Some Broadcom based Bluetooth controllers do not support the
815 * Delete Stored Link Key command. They are clearly indicating its
816 * absence in the bit mask of supported commands.
818 * Check the supported commands and only if the command is marked
819 * as supported send it. If not supported assume that the controller
820 * does not have actual support for stored link keys which makes this
821 * command redundant anyway.
823 * Some controllers indicate that they support handling deleting
824 * stored link keys, but they don't. The quirk lets a driver
825 * just disable this command.
827 if (hdev->commands[6] & 0x80 &&
828 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829 struct hci_cp_delete_stored_link_key cp;
831 bacpy(&cp.bdaddr, BDADDR_ANY);
832 cp.delete_all = 0x01;
833 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
837 /* Set event mask page 2 if the HCI command for it is supported */
838 if (hdev->commands[22] & 0x04)
839 hci_set_event_mask_page_2(req);
841 /* Read local codec list if the HCI command is supported */
842 if (hdev->commands[29] & 0x20)
843 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
845 /* Read local pairing options if the HCI command is supported */
846 if (hdev->commands[41] & 0x08)
847 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
849 /* Get MWS transport configuration if the HCI command is supported */
850 if (hdev->commands[30] & 0x08)
851 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
853 /* Check for Synchronization Train support */
854 if (lmp_sync_train_capable(hdev))
855 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
857 /* Enable Secure Connections if supported and configured */
858 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
859 bredr_sc_enabled(hdev)) {
862 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863 sizeof(support), &support);
866 /* Set erroneous data reporting if supported to the wideband speech
869 if (hdev->commands[18] & 0x08 &&
870 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
871 bool enabled = hci_dev_test_flag(hdev,
872 HCI_WIDEBAND_SPEECH_ENABLED);
875 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876 struct hci_cp_write_def_err_data_reporting cp;
878 cp.err_data_reporting = enabled ?
879 ERR_DATA_REPORTING_ENABLED :
880 ERR_DATA_REPORTING_DISABLED;
882 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
887 /* Set Suggested Default Data Length to maximum if supported */
888 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889 struct hci_cp_le_write_def_data_len cp;
891 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
893 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
896 /* Set Default PHY parameters if command is supported */
897 if (hdev->commands[35] & 0x20) {
898 struct hci_cp_le_set_default_phy cp;
901 cp.tx_phys = hdev->le_tx_def_phys;
902 cp.rx_phys = hdev->le_rx_def_phys;
904 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
910 static int __hci_init(struct hci_dev *hdev)
914 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
918 if (hci_dev_test_flag(hdev, HCI_SETUP))
919 hci_debugfs_create_basic(hdev);
921 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
925 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
926 * BR/EDR/LE type controllers. AMP controllers only need the
927 * first two stages of init.
929 if (hdev->dev_type != HCI_PRIMARY)
932 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
936 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
940 /* This function is only called when the controller is actually in
941 * configured state. When the controller is marked as unconfigured,
942 * this initialization procedure is not run.
944 * It means that it is possible that a controller runs through its
945 * setup phase and then discovers missing settings. If that is the
946 * case, then this function will not be called. It then will only
947 * be called during the config phase.
949 * So only when in setup phase or config phase, create the debugfs
950 * entries and register the SMP channels.
952 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953 !hci_dev_test_flag(hdev, HCI_CONFIG))
956 hci_debugfs_create_common(hdev);
958 if (lmp_bredr_capable(hdev))
959 hci_debugfs_create_bredr(hdev);
961 if (lmp_le_capable(hdev))
962 hci_debugfs_create_le(hdev);
967 static int hci_init0_req(struct hci_request *req, unsigned long opt)
969 struct hci_dev *hdev = req->hdev;
971 BT_DBG("%s %ld", hdev->name, opt);
974 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975 hci_reset_req(req, 0);
977 /* Read Local Version */
978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
980 /* Read BD Address */
981 if (hdev->set_bdaddr)
982 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
987 static int __hci_unconf_init(struct hci_dev *hdev)
991 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
994 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
998 if (hci_dev_test_flag(hdev, HCI_SETUP))
999 hci_debugfs_create_basic(hdev);
1004 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1008 BT_DBG("%s %x", req->hdev->name, scan);
1010 /* Inquiry and Page scans */
1011 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1015 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1019 BT_DBG("%s %x", req->hdev->name, auth);
1021 /* Authentication */
1022 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1026 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1030 BT_DBG("%s %x", req->hdev->name, encrypt);
1033 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1037 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 __le16 policy = cpu_to_le16(opt);
1041 BT_DBG("%s %x", req->hdev->name, policy);
1043 /* Default link policy */
1044 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1048 /* Get HCI device by index.
1049 * Device is held on return. */
1050 struct hci_dev *hci_dev_get(int index)
1052 struct hci_dev *hdev = NULL, *d;
1054 BT_DBG("%d", index);
1059 read_lock(&hci_dev_list_lock);
1060 list_for_each_entry(d, &hci_dev_list, list) {
1061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1066 read_unlock(&hci_dev_list_lock);
1070 /* ---- Inquiry support ---- */
1072 bool hci_discovery_active(struct hci_dev *hdev)
1074 struct discovery_state *discov = &hdev->discovery;
1076 switch (discov->state) {
1077 case DISCOVERY_FINDING:
1078 case DISCOVERY_RESOLVING:
1086 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1088 int old_state = hdev->discovery.state;
1090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1092 if (old_state == state)
1095 hdev->discovery.state = state;
1098 case DISCOVERY_STOPPED:
1099 hci_update_background_scan(hdev);
1101 if (old_state != DISCOVERY_STARTING)
1102 mgmt_discovering(hdev, 0);
1104 case DISCOVERY_STARTING:
1106 case DISCOVERY_FINDING:
1107 mgmt_discovering(hdev, 1);
1109 case DISCOVERY_RESOLVING:
1111 case DISCOVERY_STOPPING:
1116 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1118 struct discovery_state *cache = &hdev->discovery;
1119 struct inquiry_entry *p, *n;
1121 list_for_each_entry_safe(p, n, &cache->all, all) {
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
1130 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_entry *e;
1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1138 list_for_each_entry(e, &cache->all, all) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1146 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1149 struct discovery_state *cache = &hdev->discovery;
1150 struct inquiry_entry *e;
1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
1154 list_for_each_entry(e, &cache->unknown, list) {
1155 if (!bacmp(&e->data.bdaddr, bdaddr))
1162 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1181 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182 struct inquiry_entry *ie)
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1188 list_del(&ie->list);
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
1192 abs(p->data.rssi) >= abs(ie->data.rssi))
1197 list_add(&ie->list, pos);
1200 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1203 struct discovery_state *cache = &hdev->discovery;
1204 struct inquiry_entry *ie;
1207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1219 if (ie->name_state == NAME_NEEDED &&
1220 data->rssi != ie->data.rssi) {
1221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1228 /* Entry not in the cache. Add new one. */
1229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1235 list_add(&ie->all, &cache->all);
1238 ie->name_state = NAME_KNOWN;
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1245 if (name_known && ie->name_state != NAME_KNOWN &&
1246 ie->name_state != NAME_PENDING) {
1247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
1251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
1253 cache->timestamp = jiffies;
1255 if (ie->name_state == NAME_NOT_KNOWN)
1256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1262 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1264 struct discovery_state *cache = &hdev->discovery;
1265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1269 list_for_each_entry(e, &cache->all, all) {
1270 struct inquiry_data *data = &e->data;
1275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
1286 BT_DBG("cache %p, copied %d", cache, copied);
1290 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293 struct hci_dev *hdev = req->hdev;
1294 struct hci_cp_inquiry cp;
1296 BT_DBG("%s", hdev->name);
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
1305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1310 int hci_inquiry(void __user *arg)
1312 __u8 __user *ptr = arg;
1313 struct hci_inquiry_req ir;
1314 struct hci_dev *hdev;
1315 int err = 0, do_inquiry = 0, max_rsp;
1319 if (copy_from_user(&ir, ptr, sizeof(ir)))
1322 hdev = hci_dev_get(ir.dev_id);
1326 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1331 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1336 if (hdev->dev_type != HCI_PRIMARY) {
1341 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1347 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1348 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1349 hci_inquiry_cache_flush(hdev);
1352 hci_dev_unlock(hdev);
1354 timeo = ir.length * msecs_to_jiffies(2000);
1357 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1362 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1363 * cleared). If it is interrupted by a signal, return -EINTR.
1365 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1366 TASK_INTERRUPTIBLE)) {
1372 /* for unlimited number of responses we will use buffer with
1375 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1377 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1378 * copy it to the user space.
1380 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1387 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1388 hci_dev_unlock(hdev);
1390 BT_DBG("num_rsp %d", ir.num_rsp);
1392 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1394 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1408 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1409 * (BD_ADDR) for a HCI device from
1410 * a firmware node property.
1411 * @hdev: The HCI device
1413 * Search the firmware node for 'local-bd-address'.
1415 * All-zero BD addresses are rejected, because those could be properties
1416 * that exist in the firmware tables, but were not updated by the firmware. For
1417 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1419 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1421 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1425 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1426 (u8 *)&ba, sizeof(ba));
1427 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1430 bacpy(&hdev->public_addr, &ba);
1433 static int hci_dev_do_open(struct hci_dev *hdev)
1437 BT_DBG("%s %p", hdev->name, hdev);
1439 hci_req_sync_lock(hdev);
1441 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1446 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1448 /* Check for rfkill but allow the HCI setup stage to
1449 * proceed (which in itself doesn't cause any RF activity).
1451 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1456 /* Check for valid public address or a configured static
1457 * random address, but let the HCI setup proceed to
1458 * be able to determine if there is a public address
1461 * In case of user channel usage, it is not important
1462 * if a public address or static random address is
1465 * This check is only valid for BR/EDR controllers
1466 * since AMP controllers do not have an address.
1468 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1469 hdev->dev_type == HCI_PRIMARY &&
1470 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1471 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1472 ret = -EADDRNOTAVAIL;
1477 if (test_bit(HCI_UP, &hdev->flags)) {
1482 if (hdev->open(hdev)) {
1487 set_bit(HCI_RUNNING, &hdev->flags);
1488 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1490 atomic_set(&hdev->cmd_cnt, 1);
1491 set_bit(HCI_INIT, &hdev->flags);
1493 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1494 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1495 bool invalid_bdaddr;
1497 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1500 ret = hdev->setup(hdev);
1502 /* The transport driver can set the quirk to mark the
1503 * BD_ADDR invalid before creating the HCI device or in
1504 * its setup callback.
1506 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1512 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1513 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1514 hci_dev_get_bd_addr_from_property(hdev);
1516 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1518 ret = hdev->set_bdaddr(hdev,
1519 &hdev->public_addr);
1521 /* If setting of the BD_ADDR from the device
1522 * property succeeds, then treat the address
1523 * as valid even if the invalid BD_ADDR
1524 * quirk indicates otherwise.
1527 invalid_bdaddr = false;
1532 /* The transport driver can set these quirks before
1533 * creating the HCI device or in its setup callback.
1535 * For the invalid BD_ADDR quirk it is possible that
1536 * it becomes a valid address if the bootloader does
1537 * provide it (see above).
1539 * In case any of them is set, the controller has to
1540 * start up as unconfigured.
1542 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1544 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1546 /* For an unconfigured controller it is required to
1547 * read at least the version information provided by
1548 * the Read Local Version Information command.
1550 * If the set_bdaddr driver callback is provided, then
1551 * also the original Bluetooth public device address
1552 * will be read using the Read BD Address command.
1554 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1555 ret = __hci_unconf_init(hdev);
1558 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1559 /* If public address change is configured, ensure that
1560 * the address gets programmed. If the driver does not
1561 * support changing the public address, fail the power
1564 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1566 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1568 ret = -EADDRNOTAVAIL;
1572 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1573 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1574 ret = __hci_init(hdev);
1575 if (!ret && hdev->post_init)
1576 ret = hdev->post_init(hdev);
1580 /* If the HCI Reset command is clearing all diagnostic settings,
1581 * then they need to be reprogrammed after the init procedure
1584 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1586 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1587 ret = hdev->set_diag(hdev, true);
1592 clear_bit(HCI_INIT, &hdev->flags);
1596 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1597 hci_adv_instances_set_rpa_expired(hdev, true);
1598 set_bit(HCI_UP, &hdev->flags);
1599 hci_sock_dev_event(hdev, HCI_DEV_UP);
1600 hci_leds_update_powered(hdev, true);
1601 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1602 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1603 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1604 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1605 hci_dev_test_flag(hdev, HCI_MGMT) &&
1606 hdev->dev_type == HCI_PRIMARY) {
1607 ret = __hci_req_hci_power_on(hdev);
1608 mgmt_power_on(hdev, ret);
1611 /* Init failed, cleanup */
1612 flush_work(&hdev->tx_work);
1614 /* Since hci_rx_work() is possible to awake new cmd_work
1615 * it should be flushed first to avoid unexpected call of
1618 flush_work(&hdev->rx_work);
1619 flush_work(&hdev->cmd_work);
1621 skb_queue_purge(&hdev->cmd_q);
1622 skb_queue_purge(&hdev->rx_q);
1627 if (hdev->sent_cmd) {
1628 kfree_skb(hdev->sent_cmd);
1629 hdev->sent_cmd = NULL;
1632 clear_bit(HCI_RUNNING, &hdev->flags);
1633 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1636 hdev->flags &= BIT(HCI_RAW);
1640 hci_req_sync_unlock(hdev);
1644 /* ---- HCI ioctl helpers ---- */
1646 int hci_dev_open(__u16 dev)
1648 struct hci_dev *hdev;
1651 hdev = hci_dev_get(dev);
1655 /* Devices that are marked as unconfigured can only be powered
1656 * up as user channel. Trying to bring them up as normal devices
1657 * will result into a failure. Only user channel operation is
1660 * When this function is called for a user channel, the flag
1661 * HCI_USER_CHANNEL will be set first before attempting to
1664 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1665 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1670 /* We need to ensure that no other power on/off work is pending
1671 * before proceeding to call hci_dev_do_open. This is
1672 * particularly important if the setup procedure has not yet
1675 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1676 cancel_delayed_work(&hdev->power_off);
1678 /* After this call it is guaranteed that the setup procedure
1679 * has finished. This means that error conditions like RFKILL
1680 * or no valid public or static random address apply.
1682 flush_workqueue(hdev->req_workqueue);
1684 /* For controllers not using the management interface and that
1685 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1686 * so that pairing works for them. Once the management interface
1687 * is in use this bit will be cleared again and userspace has
1688 * to explicitly enable it.
1690 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1691 !hci_dev_test_flag(hdev, HCI_MGMT))
1692 hci_dev_set_flag(hdev, HCI_BONDABLE);
1694 err = hci_dev_do_open(hdev);
1701 /* This function requires the caller holds hdev->lock */
1702 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1704 struct hci_conn_params *p;
1706 list_for_each_entry(p, &hdev->le_conn_params, list) {
1708 hci_conn_drop(p->conn);
1709 hci_conn_put(p->conn);
1712 list_del_init(&p->action);
1715 BT_DBG("All LE pending actions cleared");
1718 int hci_dev_do_close(struct hci_dev *hdev)
1722 BT_DBG("%s %p", hdev->name, hdev);
1724 cancel_delayed_work(&hdev->power_off);
1725 cancel_delayed_work(&hdev->ncmd_timer);
1727 hci_request_cancel_all(hdev);
1728 hci_req_sync_lock(hdev);
1730 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1731 cancel_delayed_work_sync(&hdev->cmd_timer);
1732 hci_req_sync_unlock(hdev);
1736 hci_leds_update_powered(hdev, false);
1738 /* Flush RX and TX works */
1739 flush_work(&hdev->tx_work);
1740 flush_work(&hdev->rx_work);
1742 if (hdev->discov_timeout > 0) {
1743 hdev->discov_timeout = 0;
1744 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1745 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1748 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1749 cancel_delayed_work(&hdev->service_cache);
1751 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1752 struct adv_info *adv_instance;
1754 cancel_delayed_work_sync(&hdev->rpa_expired);
1756 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1757 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1760 /* Avoid potential lockdep warnings from the *_flush() calls by
1761 * ensuring the workqueue is empty up front.
1763 drain_workqueue(hdev->workqueue);
1767 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1769 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1771 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1772 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1773 hci_dev_test_flag(hdev, HCI_MGMT))
1774 __mgmt_power_off(hdev);
1776 hci_inquiry_cache_flush(hdev);
1777 hci_pend_le_actions_clear(hdev);
1778 hci_conn_hash_flush(hdev);
1779 hci_dev_unlock(hdev);
1781 smp_unregister(hdev);
1783 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1785 aosp_do_close(hdev);
1786 msft_do_close(hdev);
1792 skb_queue_purge(&hdev->cmd_q);
1793 atomic_set(&hdev->cmd_cnt, 1);
1794 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1795 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1796 set_bit(HCI_INIT, &hdev->flags);
1797 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1798 clear_bit(HCI_INIT, &hdev->flags);
1801 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1802 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1803 test_bit(HCI_UP, &hdev->flags)) {
1804 /* Execute vendor specific shutdown routine */
1806 hdev->shutdown(hdev);
1809 /* flush cmd work */
1810 flush_work(&hdev->cmd_work);
1813 skb_queue_purge(&hdev->rx_q);
1814 skb_queue_purge(&hdev->cmd_q);
1815 skb_queue_purge(&hdev->raw_q);
1817 /* Drop last sent command */
1818 if (hdev->sent_cmd) {
1819 cancel_delayed_work_sync(&hdev->cmd_timer);
1820 kfree_skb(hdev->sent_cmd);
1821 hdev->sent_cmd = NULL;
1824 clear_bit(HCI_RUNNING, &hdev->flags);
1825 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1827 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1828 wake_up(&hdev->suspend_wait_q);
1830 /* After this point our queues are empty
1831 * and no tasks are scheduled. */
1835 hdev->flags &= BIT(HCI_RAW);
1836 hci_dev_clear_volatile_flags(hdev);
1838 /* Controller radio is available but is currently powered down */
1839 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1841 memset(hdev->eir, 0, sizeof(hdev->eir));
1842 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1843 bacpy(&hdev->random_addr, BDADDR_ANY);
1845 hci_req_sync_unlock(hdev);
1851 int hci_dev_close(__u16 dev)
1853 struct hci_dev *hdev;
1856 hdev = hci_dev_get(dev);
1860 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1865 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1866 cancel_delayed_work(&hdev->power_off);
1868 err = hci_dev_do_close(hdev);
1875 static int hci_dev_do_reset(struct hci_dev *hdev)
1879 BT_DBG("%s %p", hdev->name, hdev);
1881 hci_req_sync_lock(hdev);
1884 skb_queue_purge(&hdev->rx_q);
1885 skb_queue_purge(&hdev->cmd_q);
1887 /* Avoid potential lockdep warnings from the *_flush() calls by
1888 * ensuring the workqueue is empty up front.
1890 drain_workqueue(hdev->workqueue);
1893 hci_inquiry_cache_flush(hdev);
1894 hci_conn_hash_flush(hdev);
1895 hci_dev_unlock(hdev);
1900 atomic_set(&hdev->cmd_cnt, 1);
1901 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1903 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1905 hci_req_sync_unlock(hdev);
1909 int hci_dev_reset(__u16 dev)
1911 struct hci_dev *hdev;
1914 hdev = hci_dev_get(dev);
1918 if (!test_bit(HCI_UP, &hdev->flags)) {
1923 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1928 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1933 err = hci_dev_do_reset(hdev);
1940 int hci_dev_reset_stat(__u16 dev)
1942 struct hci_dev *hdev;
1945 hdev = hci_dev_get(dev);
1949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1954 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1959 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1966 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1968 bool conn_changed, discov_changed;
1970 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1972 if ((scan & SCAN_PAGE))
1973 conn_changed = !hci_dev_test_and_set_flag(hdev,
1976 conn_changed = hci_dev_test_and_clear_flag(hdev,
1979 if ((scan & SCAN_INQUIRY)) {
1980 discov_changed = !hci_dev_test_and_set_flag(hdev,
1983 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1984 discov_changed = hci_dev_test_and_clear_flag(hdev,
1988 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1991 if (conn_changed || discov_changed) {
1992 /* In case this was disabled through mgmt */
1993 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1995 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1996 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1998 mgmt_new_settings(hdev);
2002 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2004 struct hci_dev *hdev;
2005 struct hci_dev_req dr;
2008 if (copy_from_user(&dr, arg, sizeof(dr)))
2011 hdev = hci_dev_get(dr.dev_id);
2015 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2020 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2025 if (hdev->dev_type != HCI_PRIMARY) {
2030 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2037 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2038 HCI_INIT_TIMEOUT, NULL);
2042 if (!lmp_encrypt_capable(hdev)) {
2047 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2048 /* Auth must be enabled first */
2049 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2050 HCI_INIT_TIMEOUT, NULL);
2055 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2056 HCI_INIT_TIMEOUT, NULL);
2060 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2061 HCI_INIT_TIMEOUT, NULL);
2063 /* Ensure that the connectable and discoverable states
2064 * get correctly modified as this was a non-mgmt change.
2067 hci_update_scan_state(hdev, dr.dev_opt);
2071 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2072 HCI_INIT_TIMEOUT, NULL);
2075 case HCISETLINKMODE:
2076 hdev->link_mode = ((__u16) dr.dev_opt) &
2077 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2081 if (hdev->pkt_type == (__u16) dr.dev_opt)
2084 hdev->pkt_type = (__u16) dr.dev_opt;
2085 mgmt_phy_configuration_changed(hdev, NULL);
2089 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2090 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2094 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2095 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2108 int hci_get_dev_list(void __user *arg)
2110 struct hci_dev *hdev;
2111 struct hci_dev_list_req *dl;
2112 struct hci_dev_req *dr;
2113 int n = 0, size, err;
2116 if (get_user(dev_num, (__u16 __user *) arg))
2119 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2122 size = sizeof(*dl) + dev_num * sizeof(*dr);
2124 dl = kzalloc(size, GFP_KERNEL);
2130 read_lock(&hci_dev_list_lock);
2131 list_for_each_entry(hdev, &hci_dev_list, list) {
2132 unsigned long flags = hdev->flags;
2134 /* When the auto-off is configured it means the transport
2135 * is running, but in that case still indicate that the
2136 * device is actually down.
2138 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2139 flags &= ~BIT(HCI_UP);
2141 (dr + n)->dev_id = hdev->id;
2142 (dr + n)->dev_opt = flags;
2147 read_unlock(&hci_dev_list_lock);
2150 size = sizeof(*dl) + n * sizeof(*dr);
2152 err = copy_to_user(arg, dl, size);
2155 return err ? -EFAULT : 0;
2158 int hci_get_dev_info(void __user *arg)
2160 struct hci_dev *hdev;
2161 struct hci_dev_info di;
2162 unsigned long flags;
2165 if (copy_from_user(&di, arg, sizeof(di)))
2168 hdev = hci_dev_get(di.dev_id);
2172 /* When the auto-off is configured it means the transport
2173 * is running, but in that case still indicate that the
2174 * device is actually down.
2176 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2177 flags = hdev->flags & ~BIT(HCI_UP);
2179 flags = hdev->flags;
2181 strcpy(di.name, hdev->name);
2182 di.bdaddr = hdev->bdaddr;
2183 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2185 di.pkt_type = hdev->pkt_type;
2186 if (lmp_bredr_capable(hdev)) {
2187 di.acl_mtu = hdev->acl_mtu;
2188 di.acl_pkts = hdev->acl_pkts;
2189 di.sco_mtu = hdev->sco_mtu;
2190 di.sco_pkts = hdev->sco_pkts;
2192 di.acl_mtu = hdev->le_mtu;
2193 di.acl_pkts = hdev->le_pkts;
2197 di.link_policy = hdev->link_policy;
2198 di.link_mode = hdev->link_mode;
2200 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2201 memcpy(&di.features, &hdev->features, sizeof(di.features));
2203 if (copy_to_user(arg, &di, sizeof(di)))
2211 /* ---- Interface to HCI drivers ---- */
2213 static int hci_rfkill_set_block(void *data, bool blocked)
2215 struct hci_dev *hdev = data;
2217 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2219 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2223 hci_dev_set_flag(hdev, HCI_RFKILLED);
2224 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2225 !hci_dev_test_flag(hdev, HCI_CONFIG))
2226 hci_dev_do_close(hdev);
2228 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2234 static const struct rfkill_ops hci_rfkill_ops = {
2235 .set_block = hci_rfkill_set_block,
2238 static void hci_power_on(struct work_struct *work)
2240 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2243 BT_DBG("%s", hdev->name);
2245 if (test_bit(HCI_UP, &hdev->flags) &&
2246 hci_dev_test_flag(hdev, HCI_MGMT) &&
2247 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2248 cancel_delayed_work(&hdev->power_off);
2249 hci_req_sync_lock(hdev);
2250 err = __hci_req_hci_power_on(hdev);
2251 hci_req_sync_unlock(hdev);
2252 mgmt_power_on(hdev, err);
2256 err = hci_dev_do_open(hdev);
2259 mgmt_set_powered_failed(hdev, err);
2260 hci_dev_unlock(hdev);
2264 /* During the HCI setup phase, a few error conditions are
2265 * ignored and they need to be checked now. If they are still
2266 * valid, it is important to turn the device back off.
2268 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2269 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2270 (hdev->dev_type == HCI_PRIMARY &&
2271 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2272 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2273 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2274 hci_dev_do_close(hdev);
2275 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2276 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2277 HCI_AUTO_OFF_TIMEOUT);
2280 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2281 /* For unconfigured devices, set the HCI_RAW flag
2282 * so that userspace can easily identify them.
2284 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2285 set_bit(HCI_RAW, &hdev->flags);
2287 /* For fully configured devices, this will send
2288 * the Index Added event. For unconfigured devices,
2289 * it will send Unconfigued Index Added event.
2291 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2292 * and no event will be send.
2294 mgmt_index_added(hdev);
2295 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2296 /* When the controller is now configured, then it
2297 * is important to clear the HCI_RAW flag.
2299 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2300 clear_bit(HCI_RAW, &hdev->flags);
2302 /* Powering on the controller with HCI_CONFIG set only
2303 * happens with the transition from unconfigured to
2304 * configured. This will send the Index Added event.
2306 mgmt_index_added(hdev);
2310 static void hci_power_off(struct work_struct *work)
2312 struct hci_dev *hdev = container_of(work, struct hci_dev,
2315 BT_DBG("%s", hdev->name);
2317 hci_dev_do_close(hdev);
2320 static void hci_error_reset(struct work_struct *work)
2322 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2324 BT_DBG("%s", hdev->name);
2327 hdev->hw_error(hdev, hdev->hw_error_code);
2329 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2331 if (hci_dev_do_close(hdev))
2334 hci_dev_do_open(hdev);
2337 void hci_uuids_clear(struct hci_dev *hdev)
2339 struct bt_uuid *uuid, *tmp;
2341 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2342 list_del(&uuid->list);
2347 void hci_link_keys_clear(struct hci_dev *hdev)
2349 struct link_key *key;
2351 list_for_each_entry(key, &hdev->link_keys, list) {
2352 list_del_rcu(&key->list);
2353 kfree_rcu(key, rcu);
2357 void hci_smp_ltks_clear(struct hci_dev *hdev)
2361 list_for_each_entry(k, &hdev->long_term_keys, list) {
2362 list_del_rcu(&k->list);
2367 void hci_smp_irks_clear(struct hci_dev *hdev)
2371 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2372 list_del_rcu(&k->list);
2377 void hci_blocked_keys_clear(struct hci_dev *hdev)
2379 struct blocked_key *b;
2381 list_for_each_entry(b, &hdev->blocked_keys, list) {
2382 list_del_rcu(&b->list);
2387 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2389 bool blocked = false;
2390 struct blocked_key *b;
2393 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2394 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2404 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2409 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2410 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2413 if (hci_is_blocked_key(hdev,
2414 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2416 bt_dev_warn_ratelimited(hdev,
2417 "Link key blocked for %pMR",
2430 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2431 u8 key_type, u8 old_key_type)
2434 if (key_type < 0x03)
2437 /* Debug keys are insecure so don't store them persistently */
2438 if (key_type == HCI_LK_DEBUG_COMBINATION)
2441 /* Changed combination key and there's no previous one */
2442 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2445 /* Security mode 3 case */
2449 /* BR/EDR key derived using SC from an LE link */
2450 if (conn->type == LE_LINK)
2453 /* Neither local nor remote side had no-bonding as requirement */
2454 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2457 /* Local side had dedicated bonding as requirement */
2458 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2461 /* Remote side had dedicated bonding as requirement */
2462 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2465 /* If none of the above criteria match, then don't store the key
2470 static u8 ltk_role(u8 type)
2472 if (type == SMP_LTK)
2473 return HCI_ROLE_MASTER;
2475 return HCI_ROLE_SLAVE;
2478 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2479 u8 addr_type, u8 role)
2484 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2485 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2488 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2491 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2493 bt_dev_warn_ratelimited(hdev,
2494 "LTK blocked for %pMR",
2507 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2509 struct smp_irk *irk_to_return = NULL;
2510 struct smp_irk *irk;
2513 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2514 if (!bacmp(&irk->rpa, rpa)) {
2515 irk_to_return = irk;
2520 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2521 if (smp_irk_matches(hdev, irk->val, rpa)) {
2522 bacpy(&irk->rpa, rpa);
2523 irk_to_return = irk;
2529 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2530 irk_to_return->val)) {
2531 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2532 &irk_to_return->bdaddr);
2533 irk_to_return = NULL;
2538 return irk_to_return;
2541 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2544 struct smp_irk *irk_to_return = NULL;
2545 struct smp_irk *irk;
2547 /* Identity Address must be public or static random */
2548 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2552 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2553 if (addr_type == irk->addr_type &&
2554 bacmp(bdaddr, &irk->bdaddr) == 0) {
2555 irk_to_return = irk;
2562 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2563 irk_to_return->val)) {
2564 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2565 &irk_to_return->bdaddr);
2566 irk_to_return = NULL;
2571 return irk_to_return;
2574 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2575 bdaddr_t *bdaddr, u8 *val, u8 type,
2576 u8 pin_len, bool *persistent)
2578 struct link_key *key, *old_key;
2581 old_key = hci_find_link_key(hdev, bdaddr);
2583 old_key_type = old_key->type;
2586 old_key_type = conn ? conn->key_type : 0xff;
2587 key = kzalloc(sizeof(*key), GFP_KERNEL);
2590 list_add_rcu(&key->list, &hdev->link_keys);
2593 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2595 /* Some buggy controller combinations generate a changed
2596 * combination key for legacy pairing even when there's no
2598 if (type == HCI_LK_CHANGED_COMBINATION &&
2599 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2600 type = HCI_LK_COMBINATION;
2602 conn->key_type = type;
2605 bacpy(&key->bdaddr, bdaddr);
2606 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2607 key->pin_len = pin_len;
2609 if (type == HCI_LK_CHANGED_COMBINATION)
2610 key->type = old_key_type;
2615 *persistent = hci_persistent_key(hdev, conn, type,
2621 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2622 u8 addr_type, u8 type, u8 authenticated,
2623 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2625 struct smp_ltk *key, *old_key;
2626 u8 role = ltk_role(type);
2628 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2632 key = kzalloc(sizeof(*key), GFP_KERNEL);
2635 list_add_rcu(&key->list, &hdev->long_term_keys);
2638 bacpy(&key->bdaddr, bdaddr);
2639 key->bdaddr_type = addr_type;
2640 memcpy(key->val, tk, sizeof(key->val));
2641 key->authenticated = authenticated;
2644 key->enc_size = enc_size;
2650 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2651 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2653 struct smp_irk *irk;
2655 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2657 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2661 bacpy(&irk->bdaddr, bdaddr);
2662 irk->addr_type = addr_type;
2664 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2667 memcpy(irk->val, val, 16);
2668 bacpy(&irk->rpa, rpa);
2673 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2675 struct link_key *key;
2677 key = hci_find_link_key(hdev, bdaddr);
2681 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2683 list_del_rcu(&key->list);
2684 kfree_rcu(key, rcu);
2689 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2694 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2695 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2698 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2700 list_del_rcu(&k->list);
2705 return removed ? 0 : -ENOENT;
2708 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2712 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2713 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2716 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2718 list_del_rcu(&k->list);
2723 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2726 struct smp_irk *irk;
2729 if (type == BDADDR_BREDR) {
2730 if (hci_find_link_key(hdev, bdaddr))
2735 /* Convert to HCI addr type which struct smp_ltk uses */
2736 if (type == BDADDR_LE_PUBLIC)
2737 addr_type = ADDR_LE_DEV_PUBLIC;
2739 addr_type = ADDR_LE_DEV_RANDOM;
2741 irk = hci_get_irk(hdev, bdaddr, addr_type);
2743 bdaddr = &irk->bdaddr;
2744 addr_type = irk->addr_type;
2748 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2749 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2759 /* HCI command timer function */
2760 static void hci_cmd_timeout(struct work_struct *work)
2762 struct hci_dev *hdev = container_of(work, struct hci_dev,
2765 if (hdev->sent_cmd) {
2766 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2767 u16 opcode = __le16_to_cpu(sent->opcode);
2769 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2771 bt_dev_err(hdev, "command tx timeout");
2774 if (hdev->cmd_timeout)
2775 hdev->cmd_timeout(hdev);
2777 atomic_set(&hdev->cmd_cnt, 1);
2778 queue_work(hdev->workqueue, &hdev->cmd_work);
2781 /* HCI ncmd timer function */
2782 static void hci_ncmd_timeout(struct work_struct *work)
2784 struct hci_dev *hdev = container_of(work, struct hci_dev,
2787 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2789 /* During HCI_INIT phase no events can be injected if the ncmd timer
2790 * triggers since the procedure has its own timeout handling.
2792 if (test_bit(HCI_INIT, &hdev->flags))
2795 /* This is an irrecoverable state, inject hardware error event */
2796 hci_reset_dev(hdev);
2799 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2800 bdaddr_t *bdaddr, u8 bdaddr_type)
2802 struct oob_data *data;
2804 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2805 if (bacmp(bdaddr, &data->bdaddr) != 0)
2807 if (data->bdaddr_type != bdaddr_type)
2815 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2818 struct oob_data *data;
2820 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2824 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2826 list_del(&data->list);
2832 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2834 struct oob_data *data, *n;
2836 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2837 list_del(&data->list);
2842 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2843 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2844 u8 *hash256, u8 *rand256)
2846 struct oob_data *data;
2848 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2850 data = kmalloc(sizeof(*data), GFP_KERNEL);
2854 bacpy(&data->bdaddr, bdaddr);
2855 data->bdaddr_type = bdaddr_type;
2856 list_add(&data->list, &hdev->remote_oob_data);
2859 if (hash192 && rand192) {
2860 memcpy(data->hash192, hash192, sizeof(data->hash192));
2861 memcpy(data->rand192, rand192, sizeof(data->rand192));
2862 if (hash256 && rand256)
2863 data->present = 0x03;
2865 memset(data->hash192, 0, sizeof(data->hash192));
2866 memset(data->rand192, 0, sizeof(data->rand192));
2867 if (hash256 && rand256)
2868 data->present = 0x02;
2870 data->present = 0x00;
2873 if (hash256 && rand256) {
2874 memcpy(data->hash256, hash256, sizeof(data->hash256));
2875 memcpy(data->rand256, rand256, sizeof(data->rand256));
2877 memset(data->hash256, 0, sizeof(data->hash256));
2878 memset(data->rand256, 0, sizeof(data->rand256));
2879 if (hash192 && rand192)
2880 data->present = 0x01;
2883 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2888 /* This function requires the caller holds hdev->lock */
2889 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2891 struct adv_info *adv_instance;
2893 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2894 if (adv_instance->instance == instance)
2895 return adv_instance;
2901 /* This function requires the caller holds hdev->lock */
2902 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2904 struct adv_info *cur_instance;
2906 cur_instance = hci_find_adv_instance(hdev, instance);
2910 if (cur_instance == list_last_entry(&hdev->adv_instances,
2911 struct adv_info, list))
2912 return list_first_entry(&hdev->adv_instances,
2913 struct adv_info, list);
2915 return list_next_entry(cur_instance, list);
2918 /* This function requires the caller holds hdev->lock */
2919 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2921 struct adv_info *adv_instance;
2923 adv_instance = hci_find_adv_instance(hdev, instance);
2927 BT_DBG("%s removing %dMR", hdev->name, instance);
2929 if (hdev->cur_adv_instance == instance) {
2930 if (hdev->adv_instance_timeout) {
2931 cancel_delayed_work(&hdev->adv_instance_expire);
2932 hdev->adv_instance_timeout = 0;
2934 hdev->cur_adv_instance = 0x00;
2937 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2939 list_del(&adv_instance->list);
2940 kfree(adv_instance);
2942 hdev->adv_instance_cnt--;
2947 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2949 struct adv_info *adv_instance, *n;
2951 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2952 adv_instance->rpa_expired = rpa_expired;
2955 /* This function requires the caller holds hdev->lock */
2956 void hci_adv_instances_clear(struct hci_dev *hdev)
2958 struct adv_info *adv_instance, *n;
2960 if (hdev->adv_instance_timeout) {
2961 cancel_delayed_work(&hdev->adv_instance_expire);
2962 hdev->adv_instance_timeout = 0;
2965 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2966 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2967 list_del(&adv_instance->list);
2968 kfree(adv_instance);
2971 hdev->adv_instance_cnt = 0;
2972 hdev->cur_adv_instance = 0x00;
2975 static void adv_instance_rpa_expired(struct work_struct *work)
2977 struct adv_info *adv_instance = container_of(work, struct adv_info,
2978 rpa_expired_cb.work);
2982 adv_instance->rpa_expired = true;
2985 /* This function requires the caller holds hdev->lock */
2986 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2987 u16 adv_data_len, u8 *adv_data,
2988 u16 scan_rsp_len, u8 *scan_rsp_data,
2989 u16 timeout, u16 duration, s8 tx_power,
2990 u32 min_interval, u32 max_interval)
2992 struct adv_info *adv_instance;
2994 adv_instance = hci_find_adv_instance(hdev, instance);
2996 memset(adv_instance->adv_data, 0,
2997 sizeof(adv_instance->adv_data));
2998 memset(adv_instance->scan_rsp_data, 0,
2999 sizeof(adv_instance->scan_rsp_data));
3001 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3002 instance < 1 || instance > hdev->le_num_of_adv_sets)
3005 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3009 adv_instance->pending = true;
3010 adv_instance->instance = instance;
3011 list_add(&adv_instance->list, &hdev->adv_instances);
3012 hdev->adv_instance_cnt++;
3015 adv_instance->flags = flags;
3016 adv_instance->adv_data_len = adv_data_len;
3017 adv_instance->scan_rsp_len = scan_rsp_len;
3018 adv_instance->min_interval = min_interval;
3019 adv_instance->max_interval = max_interval;
3020 adv_instance->tx_power = tx_power;
3023 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3026 memcpy(adv_instance->scan_rsp_data,
3027 scan_rsp_data, scan_rsp_len);
3029 adv_instance->timeout = timeout;
3030 adv_instance->remaining_time = timeout;
3033 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3035 adv_instance->duration = duration;
3037 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3038 adv_instance_rpa_expired);
3040 BT_DBG("%s for %dMR", hdev->name, instance);
3045 /* This function requires the caller holds hdev->lock */
3046 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3047 u16 adv_data_len, u8 *adv_data,
3048 u16 scan_rsp_len, u8 *scan_rsp_data)
3050 struct adv_info *adv_instance;
3052 adv_instance = hci_find_adv_instance(hdev, instance);
3054 /* If advertisement doesn't exist, we can't modify its data */
3059 memset(adv_instance->adv_data, 0,
3060 sizeof(adv_instance->adv_data));
3061 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3062 adv_instance->adv_data_len = adv_data_len;
3066 memset(adv_instance->scan_rsp_data, 0,
3067 sizeof(adv_instance->scan_rsp_data));
3068 memcpy(adv_instance->scan_rsp_data,
3069 scan_rsp_data, scan_rsp_len);
3070 adv_instance->scan_rsp_len = scan_rsp_len;
3076 /* This function requires the caller holds hdev->lock */
3077 void hci_adv_monitors_clear(struct hci_dev *hdev)
3079 struct adv_monitor *monitor;
3082 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3083 hci_free_adv_monitor(hdev, monitor);
3085 idr_destroy(&hdev->adv_monitors_idr);
3088 /* Frees the monitor structure and do some bookkeepings.
3089 * This function requires the caller holds hdev->lock.
3091 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3093 struct adv_pattern *pattern;
3094 struct adv_pattern *tmp;
3099 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3100 list_del(&pattern->list);
3104 if (monitor->handle)
3105 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3107 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3108 hdev->adv_monitors_cnt--;
3109 mgmt_adv_monitor_removed(hdev, monitor->handle);
3115 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3117 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3120 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3122 return mgmt_remove_adv_monitor_complete(hdev, status);
3125 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3126 * also attempts to forward the request to the controller.
3127 * Returns true if request is forwarded (result is pending), false otherwise.
3128 * This function requires the caller holds hdev->lock.
3130 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3133 int min, max, handle;
3142 min = HCI_MIN_ADV_MONITOR_HANDLE;
3143 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3144 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3151 monitor->handle = handle;
3153 if (!hdev_is_powered(hdev))
3156 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3157 case HCI_ADV_MONITOR_EXT_NONE:
3158 hci_update_background_scan(hdev);
3159 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3160 /* Message was not forwarded to controller - not an error */
3162 case HCI_ADV_MONITOR_EXT_MSFT:
3163 *err = msft_add_monitor_pattern(hdev, monitor);
3164 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3172 /* Attempts to tell the controller and free the monitor. If somehow the
3173 * controller doesn't have a corresponding handle, remove anyway.
3174 * Returns true if request is forwarded (result is pending), false otherwise.
3175 * This function requires the caller holds hdev->lock.
3177 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3178 struct adv_monitor *monitor,
3179 u16 handle, int *err)
3183 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3184 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3186 case HCI_ADV_MONITOR_EXT_MSFT:
3187 *err = msft_remove_monitor(hdev, monitor, handle);
3191 /* In case no matching handle registered, just free the monitor */
3192 if (*err == -ENOENT)
3198 if (*err == -ENOENT)
3199 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3201 hci_free_adv_monitor(hdev, monitor);
3207 /* Returns true if request is forwarded (result is pending), false otherwise.
3208 * This function requires the caller holds hdev->lock.
3210 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3212 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3220 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3221 if (!*err && !pending)
3222 hci_update_background_scan(hdev);
3224 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3225 hdev->name, handle, *err, pending ? "" : "not ");
3230 /* Returns true if request is forwarded (result is pending), false otherwise.
3231 * This function requires the caller holds hdev->lock.
3233 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3235 struct adv_monitor *monitor;
3236 int idr_next_id = 0;
3237 bool pending = false;
3238 bool update = false;
3242 while (!*err && !pending) {
3243 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3247 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3249 if (!*err && !pending)
3254 hci_update_background_scan(hdev);
3256 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3257 hdev->name, *err, pending ? "" : "not ");
3262 /* This function requires the caller holds hdev->lock */
3263 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3265 return !idr_is_empty(&hdev->adv_monitors_idr);
3268 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3270 if (msft_monitor_supported(hdev))
3271 return HCI_ADV_MONITOR_EXT_MSFT;
3273 return HCI_ADV_MONITOR_EXT_NONE;
3276 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3277 bdaddr_t *bdaddr, u8 type)
3279 struct bdaddr_list *b;
3281 list_for_each_entry(b, bdaddr_list, list) {
3282 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3289 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3290 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3293 struct bdaddr_list_with_irk *b;
3295 list_for_each_entry(b, bdaddr_list, list) {
3296 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3303 struct bdaddr_list_with_flags *
3304 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3305 bdaddr_t *bdaddr, u8 type)
3307 struct bdaddr_list_with_flags *b;
3309 list_for_each_entry(b, bdaddr_list, list) {
3310 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3317 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3319 struct bdaddr_list *b, *n;
3321 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3327 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3329 struct bdaddr_list *entry;
3331 if (!bacmp(bdaddr, BDADDR_ANY))
3334 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3337 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3341 bacpy(&entry->bdaddr, bdaddr);
3342 entry->bdaddr_type = type;
3344 list_add(&entry->list, list);
3349 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3350 u8 type, u8 *peer_irk, u8 *local_irk)
3352 struct bdaddr_list_with_irk *entry;
3354 if (!bacmp(bdaddr, BDADDR_ANY))
3357 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3360 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3364 bacpy(&entry->bdaddr, bdaddr);
3365 entry->bdaddr_type = type;
3368 memcpy(entry->peer_irk, peer_irk, 16);
3371 memcpy(entry->local_irk, local_irk, 16);
3373 list_add(&entry->list, list);
3378 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3381 struct bdaddr_list_with_flags *entry;
3383 if (!bacmp(bdaddr, BDADDR_ANY))
3386 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3389 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3393 bacpy(&entry->bdaddr, bdaddr);
3394 entry->bdaddr_type = type;
3395 entry->current_flags = flags;
3397 list_add(&entry->list, list);
3402 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3404 struct bdaddr_list *entry;
3406 if (!bacmp(bdaddr, BDADDR_ANY)) {
3407 hci_bdaddr_list_clear(list);
3411 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3415 list_del(&entry->list);
3421 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3424 struct bdaddr_list_with_irk *entry;
3426 if (!bacmp(bdaddr, BDADDR_ANY)) {
3427 hci_bdaddr_list_clear(list);
3431 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3435 list_del(&entry->list);
3441 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3444 struct bdaddr_list_with_flags *entry;
3446 if (!bacmp(bdaddr, BDADDR_ANY)) {
3447 hci_bdaddr_list_clear(list);
3451 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3455 list_del(&entry->list);
3461 /* This function requires the caller holds hdev->lock */
3462 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3463 bdaddr_t *addr, u8 addr_type)
3465 struct hci_conn_params *params;
3467 list_for_each_entry(params, &hdev->le_conn_params, list) {
3468 if (bacmp(¶ms->addr, addr) == 0 &&
3469 params->addr_type == addr_type) {
3477 /* This function requires the caller holds hdev->lock */
3478 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3479 bdaddr_t *addr, u8 addr_type)
3481 struct hci_conn_params *param;
3483 switch (addr_type) {
3484 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3485 addr_type = ADDR_LE_DEV_PUBLIC;
3487 case ADDR_LE_DEV_RANDOM_RESOLVED:
3488 addr_type = ADDR_LE_DEV_RANDOM;
3492 list_for_each_entry(param, list, action) {
3493 if (bacmp(¶m->addr, addr) == 0 &&
3494 param->addr_type == addr_type)
3501 /* This function requires the caller holds hdev->lock */
3502 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3503 bdaddr_t *addr, u8 addr_type)
3505 struct hci_conn_params *params;
3507 params = hci_conn_params_lookup(hdev, addr, addr_type);
3511 params = kzalloc(sizeof(*params), GFP_KERNEL);
3513 bt_dev_err(hdev, "out of memory");
3517 bacpy(¶ms->addr, addr);
3518 params->addr_type = addr_type;
3520 list_add(¶ms->list, &hdev->le_conn_params);
3521 INIT_LIST_HEAD(¶ms->action);
3523 params->conn_min_interval = hdev->le_conn_min_interval;
3524 params->conn_max_interval = hdev->le_conn_max_interval;
3525 params->conn_latency = hdev->le_conn_latency;
3526 params->supervision_timeout = hdev->le_supv_timeout;
3527 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3529 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3534 static void hci_conn_params_free(struct hci_conn_params *params)
3537 hci_conn_drop(params->conn);
3538 hci_conn_put(params->conn);
3541 list_del(¶ms->action);
3542 list_del(¶ms->list);
3546 /* This function requires the caller holds hdev->lock */
3547 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3549 struct hci_conn_params *params;
3551 params = hci_conn_params_lookup(hdev, addr, addr_type);
3555 hci_conn_params_free(params);
3557 hci_update_background_scan(hdev);
3559 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3562 /* This function requires the caller holds hdev->lock */
3563 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3565 struct hci_conn_params *params, *tmp;
3567 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3568 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3571 /* If trying to establish one time connection to disabled
3572 * device, leave the params, but mark them as just once.
3574 if (params->explicit_connect) {
3575 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3579 list_del(¶ms->list);
3583 BT_DBG("All LE disabled connection parameters were removed");
3586 /* This function requires the caller holds hdev->lock */
3587 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3589 struct hci_conn_params *params, *tmp;
3591 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3592 hci_conn_params_free(params);
3594 BT_DBG("All LE connection parameters were removed");
3597 /* Copy the Identity Address of the controller.
3599 * If the controller has a public BD_ADDR, then by default use that one.
3600 * If this is a LE only controller without a public address, default to
3601 * the static random address.
3603 * For debugging purposes it is possible to force controllers with a
3604 * public address to use the static random address instead.
3606 * In case BR/EDR has been disabled on a dual-mode controller and
3607 * userspace has configured a static address, then that address
3608 * becomes the identity address instead of the public BR/EDR address.
3610 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3613 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3614 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3615 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3616 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3617 bacpy(bdaddr, &hdev->static_addr);
3618 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3620 bacpy(bdaddr, &hdev->bdaddr);
3621 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3625 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3629 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3630 clear_bit(i, hdev->suspend_tasks);
3632 wake_up(&hdev->suspend_wait_q);
3635 static int hci_suspend_wait_event(struct hci_dev *hdev)
3638 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3639 __SUSPEND_NUM_TASKS)
3642 int ret = wait_event_timeout(hdev->suspend_wait_q,
3643 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3646 bt_dev_err(hdev, "Timed out waiting for suspend events");
3647 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3648 if (test_bit(i, hdev->suspend_tasks))
3649 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3650 clear_bit(i, hdev->suspend_tasks);
3661 static void hci_prepare_suspend(struct work_struct *work)
3663 struct hci_dev *hdev =
3664 container_of(work, struct hci_dev, suspend_prepare);
3667 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3668 hci_dev_unlock(hdev);
3671 static int hci_change_suspend_state(struct hci_dev *hdev,
3672 enum suspended_state next)
3674 hdev->suspend_state_next = next;
3675 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3676 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3677 return hci_suspend_wait_event(hdev);
3680 static void hci_clear_wake_reason(struct hci_dev *hdev)
3684 hdev->wake_reason = 0;
3685 bacpy(&hdev->wake_addr, BDADDR_ANY);
3686 hdev->wake_addr_type = 0;
3688 hci_dev_unlock(hdev);
3691 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3694 struct hci_dev *hdev =
3695 container_of(nb, struct hci_dev, suspend_notifier);
3697 u8 state = BT_RUNNING;
3699 /* If powering down, wait for completion. */
3700 if (mgmt_powering_down(hdev)) {
3701 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3702 ret = hci_suspend_wait_event(hdev);
3707 /* Suspend notifier should only act on events when powered. */
3708 if (!hdev_is_powered(hdev) ||
3709 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3712 if (action == PM_SUSPEND_PREPARE) {
3713 /* Suspend consists of two actions:
3714 * - First, disconnect everything and make the controller not
3715 * connectable (disabling scanning)
3716 * - Second, program event filter/accept list and enable scan
3718 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3720 state = BT_SUSPEND_DISCONNECT;
3722 /* Only configure accept list if disconnect succeeded and wake
3723 * isn't being prevented.
3725 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3726 ret = hci_change_suspend_state(hdev,
3727 BT_SUSPEND_CONFIGURE_WAKE);
3729 state = BT_SUSPEND_CONFIGURE_WAKE;
3732 hci_clear_wake_reason(hdev);
3733 mgmt_suspending(hdev, state);
3735 } else if (action == PM_POST_SUSPEND) {
3736 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3738 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3739 hdev->wake_addr_type);
3743 /* We always allow suspend even if suspend preparation failed and
3744 * attempt to recover in resume.
3747 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3753 /* Alloc HCI device */
3754 struct hci_dev *hci_alloc_dev(void)
3756 struct hci_dev *hdev;
3758 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3762 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3763 hdev->esco_type = (ESCO_HV1);
3764 hdev->link_mode = (HCI_LM_ACCEPT);
3765 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3766 hdev->io_capability = 0x03; /* No Input No Output */
3767 hdev->manufacturer = 0xffff; /* Default to internal use */
3768 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3769 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3770 hdev->adv_instance_cnt = 0;
3771 hdev->cur_adv_instance = 0x00;
3772 hdev->adv_instance_timeout = 0;
3774 hdev->advmon_allowlist_duration = 300;
3775 hdev->advmon_no_filter_duration = 500;
3776 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
3778 hdev->sniff_max_interval = 800;
3779 hdev->sniff_min_interval = 80;
3781 hdev->le_adv_channel_map = 0x07;
3782 hdev->le_adv_min_interval = 0x0800;
3783 hdev->le_adv_max_interval = 0x0800;
3784 hdev->le_scan_interval = 0x0060;
3785 hdev->le_scan_window = 0x0030;
3786 hdev->le_scan_int_suspend = 0x0400;
3787 hdev->le_scan_window_suspend = 0x0012;
3788 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3789 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3790 hdev->le_scan_int_adv_monitor = 0x0060;
3791 hdev->le_scan_window_adv_monitor = 0x0030;
3792 hdev->le_scan_int_connect = 0x0060;
3793 hdev->le_scan_window_connect = 0x0060;
3794 hdev->le_conn_min_interval = 0x0018;
3795 hdev->le_conn_max_interval = 0x0028;
3796 hdev->le_conn_latency = 0x0000;
3797 hdev->le_supv_timeout = 0x002a;
3798 hdev->le_def_tx_len = 0x001b;
3799 hdev->le_def_tx_time = 0x0148;
3800 hdev->le_max_tx_len = 0x001b;
3801 hdev->le_max_tx_time = 0x0148;
3802 hdev->le_max_rx_len = 0x001b;
3803 hdev->le_max_rx_time = 0x0148;
3804 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3805 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3806 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3807 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3808 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3809 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3810 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3811 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3812 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3814 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3815 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3816 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3817 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3818 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3819 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3821 /* default 1.28 sec page scan */
3822 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3823 hdev->def_page_scan_int = 0x0800;
3824 hdev->def_page_scan_window = 0x0012;
3826 mutex_init(&hdev->lock);
3827 mutex_init(&hdev->req_lock);
3829 INIT_LIST_HEAD(&hdev->mgmt_pending);
3830 INIT_LIST_HEAD(&hdev->reject_list);
3831 INIT_LIST_HEAD(&hdev->accept_list);
3832 INIT_LIST_HEAD(&hdev->uuids);
3833 INIT_LIST_HEAD(&hdev->link_keys);
3834 INIT_LIST_HEAD(&hdev->long_term_keys);
3835 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3836 INIT_LIST_HEAD(&hdev->remote_oob_data);
3837 INIT_LIST_HEAD(&hdev->le_accept_list);
3838 INIT_LIST_HEAD(&hdev->le_resolv_list);
3839 INIT_LIST_HEAD(&hdev->le_conn_params);
3840 INIT_LIST_HEAD(&hdev->pend_le_conns);
3841 INIT_LIST_HEAD(&hdev->pend_le_reports);
3842 INIT_LIST_HEAD(&hdev->conn_hash.list);
3843 INIT_LIST_HEAD(&hdev->adv_instances);
3844 INIT_LIST_HEAD(&hdev->blocked_keys);
3846 INIT_WORK(&hdev->rx_work, hci_rx_work);
3847 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3848 INIT_WORK(&hdev->tx_work, hci_tx_work);
3849 INIT_WORK(&hdev->power_on, hci_power_on);
3850 INIT_WORK(&hdev->error_reset, hci_error_reset);
3851 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3853 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3855 skb_queue_head_init(&hdev->rx_q);
3856 skb_queue_head_init(&hdev->cmd_q);
3857 skb_queue_head_init(&hdev->raw_q);
3859 init_waitqueue_head(&hdev->req_wait_q);
3860 init_waitqueue_head(&hdev->suspend_wait_q);
3862 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3863 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3865 hci_request_setup(hdev);
3867 hci_init_sysfs(hdev);
3868 discovery_init(hdev);
3872 EXPORT_SYMBOL(hci_alloc_dev);
3874 /* Free HCI device */
3875 void hci_free_dev(struct hci_dev *hdev)
3877 /* will free via device release */
3878 put_device(&hdev->dev);
3880 EXPORT_SYMBOL(hci_free_dev);
3882 /* Register HCI device */
3883 int hci_register_dev(struct hci_dev *hdev)
3887 if (!hdev->open || !hdev->close || !hdev->send)
3890 /* Do not allow HCI_AMP devices to register at index 0,
3891 * so the index can be used as the AMP controller ID.
3893 switch (hdev->dev_type) {
3895 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3898 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3907 sprintf(hdev->name, "hci%d", id);
3910 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3912 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3913 if (!hdev->workqueue) {
3918 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3920 if (!hdev->req_workqueue) {
3921 destroy_workqueue(hdev->workqueue);
3926 if (!IS_ERR_OR_NULL(bt_debugfs))
3927 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3929 dev_set_name(&hdev->dev, "%s", hdev->name);
3931 error = device_add(&hdev->dev);
3935 hci_leds_init(hdev);
3937 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3938 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3941 if (rfkill_register(hdev->rfkill) < 0) {
3942 rfkill_destroy(hdev->rfkill);
3943 hdev->rfkill = NULL;
3947 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3948 hci_dev_set_flag(hdev, HCI_RFKILLED);
3950 hci_dev_set_flag(hdev, HCI_SETUP);
3951 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3953 if (hdev->dev_type == HCI_PRIMARY) {
3954 /* Assume BR/EDR support until proven otherwise (such as
3955 * through reading supported features during init.
3957 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3960 write_lock(&hci_dev_list_lock);
3961 list_add(&hdev->list, &hci_dev_list);
3962 write_unlock(&hci_dev_list_lock);
3964 /* Devices that are marked for raw-only usage are unconfigured
3965 * and should not be included in normal operation.
3967 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3968 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3970 hci_sock_dev_event(hdev, HCI_DEV_REG);
3973 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3974 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3975 error = register_pm_notifier(&hdev->suspend_notifier);
3980 queue_work(hdev->req_workqueue, &hdev->power_on);
3982 idr_init(&hdev->adv_monitors_idr);
3987 destroy_workqueue(hdev->workqueue);
3988 destroy_workqueue(hdev->req_workqueue);
3990 ida_simple_remove(&hci_index_ida, hdev->id);
3994 EXPORT_SYMBOL(hci_register_dev);
3996 /* Unregister HCI device */
3997 void hci_unregister_dev(struct hci_dev *hdev)
3999 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4001 hci_dev_set_flag(hdev, HCI_UNREGISTER);
4003 write_lock(&hci_dev_list_lock);
4004 list_del(&hdev->list);
4005 write_unlock(&hci_dev_list_lock);
4007 cancel_work_sync(&hdev->power_on);
4009 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4010 hci_suspend_clear_tasks(hdev);
4011 unregister_pm_notifier(&hdev->suspend_notifier);
4012 cancel_work_sync(&hdev->suspend_prepare);
4015 hci_dev_do_close(hdev);
4017 if (!test_bit(HCI_INIT, &hdev->flags) &&
4018 !hci_dev_test_flag(hdev, HCI_SETUP) &&
4019 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4021 mgmt_index_removed(hdev);
4022 hci_dev_unlock(hdev);
4025 /* mgmt_index_removed should take care of emptying the
4027 BUG_ON(!list_empty(&hdev->mgmt_pending));
4029 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4032 rfkill_unregister(hdev->rfkill);
4033 rfkill_destroy(hdev->rfkill);
4036 device_del(&hdev->dev);
4037 /* Actual cleanup is deferred until hci_cleanup_dev(). */
4040 EXPORT_SYMBOL(hci_unregister_dev);
4042 /* Cleanup HCI device */
4043 void hci_cleanup_dev(struct hci_dev *hdev)
4045 debugfs_remove_recursive(hdev->debugfs);
4046 kfree_const(hdev->hw_info);
4047 kfree_const(hdev->fw_info);
4049 destroy_workqueue(hdev->workqueue);
4050 destroy_workqueue(hdev->req_workqueue);
4053 hci_bdaddr_list_clear(&hdev->reject_list);
4054 hci_bdaddr_list_clear(&hdev->accept_list);
4055 hci_uuids_clear(hdev);
4056 hci_link_keys_clear(hdev);
4057 hci_smp_ltks_clear(hdev);
4058 hci_smp_irks_clear(hdev);
4059 hci_remote_oob_data_clear(hdev);
4060 hci_adv_instances_clear(hdev);
4061 hci_adv_monitors_clear(hdev);
4062 hci_bdaddr_list_clear(&hdev->le_accept_list);
4063 hci_bdaddr_list_clear(&hdev->le_resolv_list);
4064 hci_conn_params_clear_all(hdev);
4065 hci_discovery_filter_clear(hdev);
4066 hci_blocked_keys_clear(hdev);
4067 hci_dev_unlock(hdev);
4069 ida_simple_remove(&hci_index_ida, hdev->id);
4072 /* Suspend HCI device */
4073 int hci_suspend_dev(struct hci_dev *hdev)
4075 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4078 EXPORT_SYMBOL(hci_suspend_dev);
4080 /* Resume HCI device */
4081 int hci_resume_dev(struct hci_dev *hdev)
4083 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4086 EXPORT_SYMBOL(hci_resume_dev);
4088 /* Reset HCI device */
4089 int hci_reset_dev(struct hci_dev *hdev)
4091 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4092 struct sk_buff *skb;
4094 skb = bt_skb_alloc(3, GFP_ATOMIC);
4098 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4099 skb_put_data(skb, hw_err, 3);
4101 bt_dev_err(hdev, "Injecting HCI hardware error event");
4103 /* Send Hardware Error to upper stack */
4104 return hci_recv_frame(hdev, skb);
4106 EXPORT_SYMBOL(hci_reset_dev);
4108 /* Receive frame from HCI drivers */
4109 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4111 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4112 && !test_bit(HCI_INIT, &hdev->flags))) {
4117 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4118 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4119 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4120 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4126 bt_cb(skb)->incoming = 1;
4129 __net_timestamp(skb);
4131 skb_queue_tail(&hdev->rx_q, skb);
4132 queue_work(hdev->workqueue, &hdev->rx_work);
4136 EXPORT_SYMBOL(hci_recv_frame);
4138 /* Receive diagnostic message from HCI drivers */
4139 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4141 /* Mark as diagnostic packet */
4142 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4145 __net_timestamp(skb);
4147 skb_queue_tail(&hdev->rx_q, skb);
4148 queue_work(hdev->workqueue, &hdev->rx_work);
4152 EXPORT_SYMBOL(hci_recv_diag);
4154 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4158 va_start(vargs, fmt);
4159 kfree_const(hdev->hw_info);
4160 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4163 EXPORT_SYMBOL(hci_set_hw_info);
4165 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4169 va_start(vargs, fmt);
4170 kfree_const(hdev->fw_info);
4171 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4174 EXPORT_SYMBOL(hci_set_fw_info);
4176 /* ---- Interface to upper protocols ---- */
4178 int hci_register_cb(struct hci_cb *cb)
4180 BT_DBG("%p name %s", cb, cb->name);
4182 mutex_lock(&hci_cb_list_lock);
4183 list_add_tail(&cb->list, &hci_cb_list);
4184 mutex_unlock(&hci_cb_list_lock);
4188 EXPORT_SYMBOL(hci_register_cb);
4190 int hci_unregister_cb(struct hci_cb *cb)
4192 BT_DBG("%p name %s", cb, cb->name);
4194 mutex_lock(&hci_cb_list_lock);
4195 list_del(&cb->list);
4196 mutex_unlock(&hci_cb_list_lock);
4200 EXPORT_SYMBOL(hci_unregister_cb);
4202 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4206 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4210 __net_timestamp(skb);
4212 /* Send copy to monitor */
4213 hci_send_to_monitor(hdev, skb);
4215 if (atomic_read(&hdev->promisc)) {
4216 /* Send copy to the sockets */
4217 hci_send_to_sock(hdev, skb);
4220 /* Get rid of skb owner, prior to sending to the driver. */
4223 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4228 err = hdev->send(hdev, skb);
4230 bt_dev_err(hdev, "sending frame failed (%d)", err);
4235 /* Send HCI command */
4236 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4239 struct sk_buff *skb;
4241 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4243 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4245 bt_dev_err(hdev, "no memory for command");
4249 /* Stand-alone HCI commands must be flagged as
4250 * single-command requests.
4252 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4254 skb_queue_tail(&hdev->cmd_q, skb);
4255 queue_work(hdev->workqueue, &hdev->cmd_work);
4260 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4263 struct sk_buff *skb;
4265 if (hci_opcode_ogf(opcode) != 0x3f) {
4266 /* A controller receiving a command shall respond with either
4267 * a Command Status Event or a Command Complete Event.
4268 * Therefore, all standard HCI commands must be sent via the
4269 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4270 * Some vendors do not comply with this rule for vendor-specific
4271 * commands and do not return any event. We want to support
4272 * unresponded commands for such cases only.
4274 bt_dev_err(hdev, "unresponded command not supported");
4278 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4280 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4285 hci_send_frame(hdev, skb);
4289 EXPORT_SYMBOL(__hci_cmd_send);
4291 /* Get data from the previously sent command */
4292 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4294 struct hci_command_hdr *hdr;
4296 if (!hdev->sent_cmd)
4299 hdr = (void *) hdev->sent_cmd->data;
4301 if (hdr->opcode != cpu_to_le16(opcode))
4304 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4306 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4309 /* Send HCI command and wait for command complete event */
4310 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4311 const void *param, u32 timeout)
4313 struct sk_buff *skb;
4315 if (!test_bit(HCI_UP, &hdev->flags))
4316 return ERR_PTR(-ENETDOWN);
4318 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4320 hci_req_sync_lock(hdev);
4321 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4322 hci_req_sync_unlock(hdev);
4326 EXPORT_SYMBOL(hci_cmd_sync);
4329 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4331 struct hci_acl_hdr *hdr;
4334 skb_push(skb, HCI_ACL_HDR_SIZE);
4335 skb_reset_transport_header(skb);
4336 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4337 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4338 hdr->dlen = cpu_to_le16(len);
4341 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4342 struct sk_buff *skb, __u16 flags)
4344 struct hci_conn *conn = chan->conn;
4345 struct hci_dev *hdev = conn->hdev;
4346 struct sk_buff *list;
4348 skb->len = skb_headlen(skb);
4351 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4353 switch (hdev->dev_type) {
4355 hci_add_acl_hdr(skb, conn->handle, flags);
4358 hci_add_acl_hdr(skb, chan->handle, flags);
4361 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4365 list = skb_shinfo(skb)->frag_list;
4367 /* Non fragmented */
4368 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4370 skb_queue_tail(queue, skb);
4373 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4375 skb_shinfo(skb)->frag_list = NULL;
4377 /* Queue all fragments atomically. We need to use spin_lock_bh
4378 * here because of 6LoWPAN links, as there this function is
4379 * called from softirq and using normal spin lock could cause
4382 spin_lock_bh(&queue->lock);
4384 __skb_queue_tail(queue, skb);
4386 flags &= ~ACL_START;
4389 skb = list; list = list->next;
4391 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4392 hci_add_acl_hdr(skb, conn->handle, flags);
4394 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4396 __skb_queue_tail(queue, skb);
4399 spin_unlock_bh(&queue->lock);
4403 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4405 struct hci_dev *hdev = chan->conn->hdev;
4407 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4409 hci_queue_acl(chan, &chan->data_q, skb, flags);
4411 queue_work(hdev->workqueue, &hdev->tx_work);
4415 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4417 struct hci_dev *hdev = conn->hdev;
4418 struct hci_sco_hdr hdr;
4420 BT_DBG("%s len %d", hdev->name, skb->len);
4422 hdr.handle = cpu_to_le16(conn->handle);
4423 hdr.dlen = skb->len;
4425 skb_push(skb, HCI_SCO_HDR_SIZE);
4426 skb_reset_transport_header(skb);
4427 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4429 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4431 skb_queue_tail(&conn->data_q, skb);
4432 queue_work(hdev->workqueue, &hdev->tx_work);
4435 /* ---- HCI TX task (outgoing data) ---- */
4437 /* HCI Connection scheduler */
4438 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4441 struct hci_conn_hash *h = &hdev->conn_hash;
4442 struct hci_conn *conn = NULL, *c;
4443 unsigned int num = 0, min = ~0;
4445 /* We don't have to lock device here. Connections are always
4446 * added and removed with TX task disabled. */
4450 list_for_each_entry_rcu(c, &h->list, list) {
4451 if (c->type != type || skb_queue_empty(&c->data_q))
4454 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4459 if (c->sent < min) {
4464 if (hci_conn_num(hdev, type) == num)
4473 switch (conn->type) {
4475 cnt = hdev->acl_cnt;
4479 cnt = hdev->sco_cnt;
4482 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4486 bt_dev_err(hdev, "unknown link type %d", conn->type);
4494 BT_DBG("conn %p quote %d", conn, *quote);
4498 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4500 struct hci_conn_hash *h = &hdev->conn_hash;
4503 bt_dev_err(hdev, "link tx timeout");
4507 /* Kill stalled connections */
4508 list_for_each_entry_rcu(c, &h->list, list) {
4509 if (c->type == type && c->sent) {
4510 bt_dev_err(hdev, "killing stalled connection %pMR",
4512 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4519 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4522 struct hci_conn_hash *h = &hdev->conn_hash;
4523 struct hci_chan *chan = NULL;
4524 unsigned int num = 0, min = ~0, cur_prio = 0;
4525 struct hci_conn *conn;
4526 int cnt, q, conn_num = 0;
4528 BT_DBG("%s", hdev->name);
4532 list_for_each_entry_rcu(conn, &h->list, list) {
4533 struct hci_chan *tmp;
4535 if (conn->type != type)
4538 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4543 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4544 struct sk_buff *skb;
4546 if (skb_queue_empty(&tmp->data_q))
4549 skb = skb_peek(&tmp->data_q);
4550 if (skb->priority < cur_prio)
4553 if (skb->priority > cur_prio) {
4556 cur_prio = skb->priority;
4561 if (conn->sent < min) {
4567 if (hci_conn_num(hdev, type) == conn_num)
4576 switch (chan->conn->type) {
4578 cnt = hdev->acl_cnt;
4581 cnt = hdev->block_cnt;
4585 cnt = hdev->sco_cnt;
4588 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4592 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4597 BT_DBG("chan %p quote %d", chan, *quote);
4601 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4603 struct hci_conn_hash *h = &hdev->conn_hash;
4604 struct hci_conn *conn;
4607 BT_DBG("%s", hdev->name);
4611 list_for_each_entry_rcu(conn, &h->list, list) {
4612 struct hci_chan *chan;
4614 if (conn->type != type)
4617 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4622 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4623 struct sk_buff *skb;
4630 if (skb_queue_empty(&chan->data_q))
4633 skb = skb_peek(&chan->data_q);
4634 if (skb->priority >= HCI_PRIO_MAX - 1)
4637 skb->priority = HCI_PRIO_MAX - 1;
4639 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4643 if (hci_conn_num(hdev, type) == num)
4651 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4653 /* Calculate count of blocks used by this packet */
4654 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4657 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4659 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4660 /* ACL tx timeout must be longer than maximum
4661 * link supervision timeout (40.9 seconds) */
4662 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4663 HCI_ACL_TX_TIMEOUT))
4664 hci_link_tx_to(hdev, ACL_LINK);
4669 static void hci_sched_sco(struct hci_dev *hdev)
4671 struct hci_conn *conn;
4672 struct sk_buff *skb;
4675 BT_DBG("%s", hdev->name);
4677 if (!hci_conn_num(hdev, SCO_LINK))
4680 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4681 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4682 BT_DBG("skb %p len %d", skb, skb->len);
4683 hci_send_frame(hdev, skb);
4686 if (conn->sent == ~0)
4692 static void hci_sched_esco(struct hci_dev *hdev)
4694 struct hci_conn *conn;
4695 struct sk_buff *skb;
4698 BT_DBG("%s", hdev->name);
4700 if (!hci_conn_num(hdev, ESCO_LINK))
4703 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4705 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4706 BT_DBG("skb %p len %d", skb, skb->len);
4707 hci_send_frame(hdev, skb);
4710 if (conn->sent == ~0)
4716 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4718 unsigned int cnt = hdev->acl_cnt;
4719 struct hci_chan *chan;
4720 struct sk_buff *skb;
4723 __check_timeout(hdev, cnt);
4725 while (hdev->acl_cnt &&
4726 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4727 u32 priority = (skb_peek(&chan->data_q))->priority;
4728 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4729 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4730 skb->len, skb->priority);
4732 /* Stop if priority has changed */
4733 if (skb->priority < priority)
4736 skb = skb_dequeue(&chan->data_q);
4738 hci_conn_enter_active_mode(chan->conn,
4739 bt_cb(skb)->force_active);
4741 hci_send_frame(hdev, skb);
4742 hdev->acl_last_tx = jiffies;
4748 /* Send pending SCO packets right away */
4749 hci_sched_sco(hdev);
4750 hci_sched_esco(hdev);
4754 if (cnt != hdev->acl_cnt)
4755 hci_prio_recalculate(hdev, ACL_LINK);
4758 static void hci_sched_acl_blk(struct hci_dev *hdev)
4760 unsigned int cnt = hdev->block_cnt;
4761 struct hci_chan *chan;
4762 struct sk_buff *skb;
4766 __check_timeout(hdev, cnt);
4768 BT_DBG("%s", hdev->name);
4770 if (hdev->dev_type == HCI_AMP)
4775 while (hdev->block_cnt > 0 &&
4776 (chan = hci_chan_sent(hdev, type, "e))) {
4777 u32 priority = (skb_peek(&chan->data_q))->priority;
4778 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4781 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4782 skb->len, skb->priority);
4784 /* Stop if priority has changed */
4785 if (skb->priority < priority)
4788 skb = skb_dequeue(&chan->data_q);
4790 blocks = __get_blocks(hdev, skb);
4791 if (blocks > hdev->block_cnt)
4794 hci_conn_enter_active_mode(chan->conn,
4795 bt_cb(skb)->force_active);
4797 hci_send_frame(hdev, skb);
4798 hdev->acl_last_tx = jiffies;
4800 hdev->block_cnt -= blocks;
4803 chan->sent += blocks;
4804 chan->conn->sent += blocks;
4808 if (cnt != hdev->block_cnt)
4809 hci_prio_recalculate(hdev, type);
4812 static void hci_sched_acl(struct hci_dev *hdev)
4814 BT_DBG("%s", hdev->name);
4816 /* No ACL link over BR/EDR controller */
4817 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4820 /* No AMP link over AMP controller */
4821 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4824 switch (hdev->flow_ctl_mode) {
4825 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4826 hci_sched_acl_pkt(hdev);
4829 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4830 hci_sched_acl_blk(hdev);
4835 static void hci_sched_le(struct hci_dev *hdev)
4837 struct hci_chan *chan;
4838 struct sk_buff *skb;
4839 int quote, cnt, tmp;
4841 BT_DBG("%s", hdev->name);
4843 if (!hci_conn_num(hdev, LE_LINK))
4846 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4848 __check_timeout(hdev, cnt);
4851 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4852 u32 priority = (skb_peek(&chan->data_q))->priority;
4853 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4854 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4855 skb->len, skb->priority);
4857 /* Stop if priority has changed */
4858 if (skb->priority < priority)
4861 skb = skb_dequeue(&chan->data_q);
4863 hci_send_frame(hdev, skb);
4864 hdev->le_last_tx = jiffies;
4870 /* Send pending SCO packets right away */
4871 hci_sched_sco(hdev);
4872 hci_sched_esco(hdev);
4879 hdev->acl_cnt = cnt;
4882 hci_prio_recalculate(hdev, LE_LINK);
4885 static void hci_tx_work(struct work_struct *work)
4887 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4888 struct sk_buff *skb;
4890 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4891 hdev->sco_cnt, hdev->le_cnt);
4893 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4894 /* Schedule queues and send stuff to HCI driver */
4895 hci_sched_sco(hdev);
4896 hci_sched_esco(hdev);
4897 hci_sched_acl(hdev);
4901 /* Send next queued raw (unknown type) packet */
4902 while ((skb = skb_dequeue(&hdev->raw_q)))
4903 hci_send_frame(hdev, skb);
4906 /* ----- HCI RX task (incoming data processing) ----- */
4908 /* ACL data packet */
4909 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4911 struct hci_acl_hdr *hdr = (void *) skb->data;
4912 struct hci_conn *conn;
4913 __u16 handle, flags;
4915 skb_pull(skb, HCI_ACL_HDR_SIZE);
4917 handle = __le16_to_cpu(hdr->handle);
4918 flags = hci_flags(handle);
4919 handle = hci_handle(handle);
4921 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4924 hdev->stat.acl_rx++;
4927 conn = hci_conn_hash_lookup_handle(hdev, handle);
4928 hci_dev_unlock(hdev);
4931 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4933 /* Send to upper protocol */
4934 l2cap_recv_acldata(conn, skb, flags);
4937 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4944 /* SCO data packet */
4945 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4947 struct hci_sco_hdr *hdr = (void *) skb->data;
4948 struct hci_conn *conn;
4949 __u16 handle, flags;
4951 skb_pull(skb, HCI_SCO_HDR_SIZE);
4953 handle = __le16_to_cpu(hdr->handle);
4954 flags = hci_flags(handle);
4955 handle = hci_handle(handle);
4957 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4960 hdev->stat.sco_rx++;
4963 conn = hci_conn_hash_lookup_handle(hdev, handle);
4964 hci_dev_unlock(hdev);
4967 /* Send to upper protocol */
4968 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4969 sco_recv_scodata(conn, skb);
4972 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4979 static bool hci_req_is_complete(struct hci_dev *hdev)
4981 struct sk_buff *skb;
4983 skb = skb_peek(&hdev->cmd_q);
4987 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4990 static void hci_resend_last(struct hci_dev *hdev)
4992 struct hci_command_hdr *sent;
4993 struct sk_buff *skb;
4996 if (!hdev->sent_cmd)
4999 sent = (void *) hdev->sent_cmd->data;
5000 opcode = __le16_to_cpu(sent->opcode);
5001 if (opcode == HCI_OP_RESET)
5004 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5008 skb_queue_head(&hdev->cmd_q, skb);
5009 queue_work(hdev->workqueue, &hdev->cmd_work);
5012 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5013 hci_req_complete_t *req_complete,
5014 hci_req_complete_skb_t *req_complete_skb)
5016 struct sk_buff *skb;
5017 unsigned long flags;
5019 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5021 /* If the completed command doesn't match the last one that was
5022 * sent we need to do special handling of it.
5024 if (!hci_sent_cmd_data(hdev, opcode)) {
5025 /* Some CSR based controllers generate a spontaneous
5026 * reset complete event during init and any pending
5027 * command will never be completed. In such a case we
5028 * need to resend whatever was the last sent
5031 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5032 hci_resend_last(hdev);
5037 /* If we reach this point this event matches the last command sent */
5038 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5040 /* If the command succeeded and there's still more commands in
5041 * this request the request is not yet complete.
5043 if (!status && !hci_req_is_complete(hdev))
5046 /* If this was the last command in a request the complete
5047 * callback would be found in hdev->sent_cmd instead of the
5048 * command queue (hdev->cmd_q).
5050 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5051 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5055 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5056 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5060 /* Remove all pending commands belonging to this request */
5061 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5062 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5063 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5064 __skb_queue_head(&hdev->cmd_q, skb);
5068 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5069 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5071 *req_complete = bt_cb(skb)->hci.req_complete;
5074 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5077 static void hci_rx_work(struct work_struct *work)
5079 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5080 struct sk_buff *skb;
5082 BT_DBG("%s", hdev->name);
5084 while ((skb = skb_dequeue(&hdev->rx_q))) {
5085 /* Send copy to monitor */
5086 hci_send_to_monitor(hdev, skb);
5088 if (atomic_read(&hdev->promisc)) {
5089 /* Send copy to the sockets */
5090 hci_send_to_sock(hdev, skb);
5093 /* If the device has been opened in HCI_USER_CHANNEL,
5094 * the userspace has exclusive access to device.
5095 * When device is HCI_INIT, we still need to process
5096 * the data packets to the driver in order
5097 * to complete its setup().
5099 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5100 !test_bit(HCI_INIT, &hdev->flags)) {
5105 if (test_bit(HCI_INIT, &hdev->flags)) {
5106 /* Don't process data packets in this states. */
5107 switch (hci_skb_pkt_type(skb)) {
5108 case HCI_ACLDATA_PKT:
5109 case HCI_SCODATA_PKT:
5110 case HCI_ISODATA_PKT:
5117 switch (hci_skb_pkt_type(skb)) {
5119 BT_DBG("%s Event packet", hdev->name);
5120 hci_event_packet(hdev, skb);
5123 case HCI_ACLDATA_PKT:
5124 BT_DBG("%s ACL data packet", hdev->name);
5125 hci_acldata_packet(hdev, skb);
5128 case HCI_SCODATA_PKT:
5129 BT_DBG("%s SCO data packet", hdev->name);
5130 hci_scodata_packet(hdev, skb);
5140 static void hci_cmd_work(struct work_struct *work)
5142 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5143 struct sk_buff *skb;
5145 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5146 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5148 /* Send queued commands */
5149 if (atomic_read(&hdev->cmd_cnt)) {
5150 skb = skb_dequeue(&hdev->cmd_q);
5154 kfree_skb(hdev->sent_cmd);
5156 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5157 if (hdev->sent_cmd) {
5158 if (hci_req_status_pend(hdev))
5159 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5160 atomic_dec(&hdev->cmd_cnt);
5161 hci_send_frame(hdev, skb);
5162 if (test_bit(HCI_RESET, &hdev->flags))
5163 cancel_delayed_work(&hdev->cmd_timer);
5165 schedule_delayed_work(&hdev->cmd_timer,
5168 skb_queue_head(&hdev->cmd_q, skb);
5169 queue_work(hdev->workqueue, &hdev->cmd_work);