2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
48 #include "hci_codec.h"
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
65 static int hci_reset_req(struct hci_request *req, unsigned long opt)
67 BT_DBG("%s %ld", req->hdev->name, opt);
70 set_bit(HCI_RESET, &req->hdev->flags);
71 hci_req_add(req, HCI_OP_RESET, 0, NULL);
75 static void bredr_init(struct hci_request *req)
77 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
79 /* Read Local Supported Features */
80 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
82 /* Read Local Version */
83 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
86 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
89 static void amp_init1(struct hci_request *req)
91 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
93 /* Read Local Version */
94 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
96 /* Read Local Supported Commands */
97 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
99 /* Read Local AMP Info */
100 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
102 /* Read Data Blk size */
103 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
105 /* Read Flow Control Mode */
106 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
108 /* Read Location Data */
109 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
112 static int amp_init2(struct hci_request *req)
114 /* Read Local Supported Features. Not all AMP controllers
115 * support this so it's placed conditionally in the second
118 if (req->hdev->commands[14] & 0x20)
119 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
124 static int hci_init1_req(struct hci_request *req, unsigned long opt)
126 struct hci_dev *hdev = req->hdev;
128 BT_DBG("%s %ld", hdev->name, opt);
131 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
132 hci_reset_req(req, 0);
134 switch (hdev->dev_type) {
142 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
149 static void bredr_setup(struct hci_request *req)
154 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
155 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
157 /* Read Class of Device */
158 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
160 /* Read Local Name */
161 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
163 /* Read Voice Setting */
164 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
166 /* Read Number of Supported IAC */
167 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
169 /* Read Current IAC LAP */
170 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
172 /* Clear Event Filters */
173 flt_type = HCI_FLT_CLEAR_ALL;
174 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
176 /* Connection accept timeout ~20 secs */
177 param = cpu_to_le16(0x7d00);
178 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
181 static void le_setup(struct hci_request *req)
183 struct hci_dev *hdev = req->hdev;
185 /* Read LE Buffer Size */
186 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
188 /* Read LE Local Supported Features */
189 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
191 /* Read LE Supported States */
192 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
194 /* LE-only controllers have LE implicitly enabled */
195 if (!lmp_bredr_capable(hdev))
196 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
199 static void hci_setup_event_mask(struct hci_request *req)
201 struct hci_dev *hdev = req->hdev;
203 /* The second byte is 0xff instead of 0x9f (two reserved bits
204 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
207 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
209 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
210 * any event mask for pre 1.2 devices.
212 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
215 if (lmp_bredr_capable(hdev)) {
216 events[4] |= 0x01; /* Flow Specification Complete */
218 /* Use a different default for LE-only devices */
219 memset(events, 0, sizeof(events));
220 events[1] |= 0x20; /* Command Complete */
221 events[1] |= 0x40; /* Command Status */
222 events[1] |= 0x80; /* Hardware Error */
224 /* If the controller supports the Disconnect command, enable
225 * the corresponding event. In addition enable packet flow
226 * control related events.
228 if (hdev->commands[0] & 0x20) {
229 events[0] |= 0x10; /* Disconnection Complete */
230 events[2] |= 0x04; /* Number of Completed Packets */
231 events[3] |= 0x02; /* Data Buffer Overflow */
234 /* If the controller supports the Read Remote Version
235 * Information command, enable the corresponding event.
237 if (hdev->commands[2] & 0x80)
238 events[1] |= 0x08; /* Read Remote Version Information
242 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
243 events[0] |= 0x80; /* Encryption Change */
244 events[5] |= 0x80; /* Encryption Key Refresh Complete */
248 if (lmp_inq_rssi_capable(hdev) ||
249 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
250 events[4] |= 0x02; /* Inquiry Result with RSSI */
252 if (lmp_ext_feat_capable(hdev))
253 events[4] |= 0x04; /* Read Remote Extended Features Complete */
255 if (lmp_esco_capable(hdev)) {
256 events[5] |= 0x08; /* Synchronous Connection Complete */
257 events[5] |= 0x10; /* Synchronous Connection Changed */
260 if (lmp_sniffsubr_capable(hdev))
261 events[5] |= 0x20; /* Sniff Subrating */
263 if (lmp_pause_enc_capable(hdev))
264 events[5] |= 0x80; /* Encryption Key Refresh Complete */
266 if (lmp_ext_inq_capable(hdev))
267 events[5] |= 0x40; /* Extended Inquiry Result */
269 if (lmp_no_flush_capable(hdev))
270 events[7] |= 0x01; /* Enhanced Flush Complete */
272 if (lmp_lsto_capable(hdev))
273 events[6] |= 0x80; /* Link Supervision Timeout Changed */
275 if (lmp_ssp_capable(hdev)) {
276 events[6] |= 0x01; /* IO Capability Request */
277 events[6] |= 0x02; /* IO Capability Response */
278 events[6] |= 0x04; /* User Confirmation Request */
279 events[6] |= 0x08; /* User Passkey Request */
280 events[6] |= 0x10; /* Remote OOB Data Request */
281 events[6] |= 0x20; /* Simple Pairing Complete */
282 events[7] |= 0x04; /* User Passkey Notification */
283 events[7] |= 0x08; /* Keypress Notification */
284 events[7] |= 0x10; /* Remote Host Supported
285 * Features Notification
289 if (lmp_le_capable(hdev))
290 events[7] |= 0x20; /* LE Meta-Event */
292 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
295 static int hci_init2_req(struct hci_request *req, unsigned long opt)
297 struct hci_dev *hdev = req->hdev;
299 if (hdev->dev_type == HCI_AMP)
300 return amp_init2(req);
302 if (lmp_bredr_capable(hdev))
305 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
307 if (lmp_le_capable(hdev))
310 /* All Bluetooth 1.2 and later controllers should support the
311 * HCI command for reading the local supported commands.
313 * Unfortunately some controllers indicate Bluetooth 1.2 support,
314 * but do not have support for this command. If that is the case,
315 * the driver can quirk the behavior and skip reading the local
316 * supported commands.
318 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
319 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
320 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
322 if (lmp_ssp_capable(hdev)) {
323 /* When SSP is available, then the host features page
324 * should also be available as well. However some
325 * controllers list the max_page as 0 as long as SSP
326 * has not been enabled. To achieve proper debugging
327 * output, force the minimum max_page to 1 at least.
329 hdev->max_page = 0x01;
331 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
334 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
335 sizeof(mode), &mode);
337 struct hci_cp_write_eir cp;
339 memset(hdev->eir, 0, sizeof(hdev->eir));
340 memset(&cp, 0, sizeof(cp));
342 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
346 if (lmp_inq_rssi_capable(hdev) ||
347 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
350 /* If Extended Inquiry Result events are supported, then
351 * they are clearly preferred over Inquiry Result with RSSI
354 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
356 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
359 if (lmp_inq_tx_pwr_capable(hdev))
360 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
362 if (lmp_ext_feat_capable(hdev)) {
363 struct hci_cp_read_local_ext_features cp;
366 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
370 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
372 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
379 static void hci_setup_link_policy(struct hci_request *req)
381 struct hci_dev *hdev = req->hdev;
382 struct hci_cp_write_def_link_policy cp;
385 if (lmp_rswitch_capable(hdev))
386 link_policy |= HCI_LP_RSWITCH;
387 if (lmp_hold_capable(hdev))
388 link_policy |= HCI_LP_HOLD;
389 if (lmp_sniff_capable(hdev))
390 link_policy |= HCI_LP_SNIFF;
391 if (lmp_park_capable(hdev))
392 link_policy |= HCI_LP_PARK;
394 cp.policy = cpu_to_le16(link_policy);
395 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
398 static void hci_set_le_support(struct hci_request *req)
400 struct hci_dev *hdev = req->hdev;
401 struct hci_cp_write_le_host_supported cp;
403 /* LE-only devices do not support explicit enablement */
404 if (!lmp_bredr_capable(hdev))
407 memset(&cp, 0, sizeof(cp));
409 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
414 if (cp.le != lmp_host_le_capable(hdev))
415 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
419 static void hci_set_event_mask_page_2(struct hci_request *req)
421 struct hci_dev *hdev = req->hdev;
422 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
423 bool changed = false;
425 /* If Connectionless Peripheral Broadcast central role is supported
426 * enable all necessary events for it.
428 if (lmp_cpb_central_capable(hdev)) {
429 events[1] |= 0x40; /* Triggered Clock Capture */
430 events[1] |= 0x80; /* Synchronization Train Complete */
431 events[2] |= 0x10; /* Peripheral Page Response Timeout */
432 events[2] |= 0x20; /* CPB Channel Map Change */
436 /* If Connectionless Peripheral Broadcast peripheral role is supported
437 * enable all necessary events for it.
439 if (lmp_cpb_peripheral_capable(hdev)) {
440 events[2] |= 0x01; /* Synchronization Train Received */
441 events[2] |= 0x02; /* CPB Receive */
442 events[2] |= 0x04; /* CPB Timeout */
443 events[2] |= 0x08; /* Truncated Page Complete */
447 /* Enable Authenticated Payload Timeout Expired event if supported */
448 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
453 /* Some Broadcom based controllers indicate support for Set Event
454 * Mask Page 2 command, but then actually do not support it. Since
455 * the default value is all bits set to zero, the command is only
456 * required if the event mask has to be changed. In case no change
457 * to the event mask is needed, skip this command.
460 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
461 sizeof(events), events);
464 static int hci_init3_req(struct hci_request *req, unsigned long opt)
466 struct hci_dev *hdev = req->hdev;
469 hci_setup_event_mask(req);
471 if (hdev->commands[6] & 0x20 &&
472 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
473 struct hci_cp_read_stored_link_key cp;
475 bacpy(&cp.bdaddr, BDADDR_ANY);
477 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
480 if (hdev->commands[5] & 0x10)
481 hci_setup_link_policy(req);
483 if (hdev->commands[8] & 0x01)
484 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
486 if (hdev->commands[18] & 0x04 &&
487 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
488 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
490 /* Some older Broadcom based Bluetooth 1.2 controllers do not
491 * support the Read Page Scan Type command. Check support for
492 * this command in the bit mask of supported commands.
494 if (hdev->commands[13] & 0x01)
495 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
497 if (lmp_le_capable(hdev)) {
500 memset(events, 0, sizeof(events));
502 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
503 events[0] |= 0x10; /* LE Long Term Key Request */
505 /* If controller supports the Connection Parameters Request
506 * Link Layer Procedure, enable the corresponding event.
508 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
509 events[0] |= 0x20; /* LE Remote Connection
513 /* If the controller supports the Data Length Extension
514 * feature, enable the corresponding event.
516 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
517 events[0] |= 0x40; /* LE Data Length Change */
519 /* If the controller supports LL Privacy feature, enable
520 * the corresponding event.
522 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
523 events[1] |= 0x02; /* LE Enhanced Connection
527 /* If the controller supports Extended Scanner Filter
528 * Policies, enable the corresponding event.
530 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
531 events[1] |= 0x04; /* LE Direct Advertising
535 /* If the controller supports Channel Selection Algorithm #2
536 * feature, enable the corresponding event.
538 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
539 events[2] |= 0x08; /* LE Channel Selection
543 /* If the controller supports the LE Set Scan Enable command,
544 * enable the corresponding advertising report event.
546 if (hdev->commands[26] & 0x08)
547 events[0] |= 0x02; /* LE Advertising Report */
549 /* If the controller supports the LE Create Connection
550 * command, enable the corresponding event.
552 if (hdev->commands[26] & 0x10)
553 events[0] |= 0x01; /* LE Connection Complete */
555 /* If the controller supports the LE Connection Update
556 * command, enable the corresponding event.
558 if (hdev->commands[27] & 0x04)
559 events[0] |= 0x04; /* LE Connection Update
563 /* If the controller supports the LE Read Remote Used Features
564 * command, enable the corresponding event.
566 if (hdev->commands[27] & 0x20)
567 events[0] |= 0x08; /* LE Read Remote Used
571 /* If the controller supports the LE Read Local P-256
572 * Public Key command, enable the corresponding event.
574 if (hdev->commands[34] & 0x02)
575 events[0] |= 0x80; /* LE Read Local P-256
576 * Public Key Complete
579 /* If the controller supports the LE Generate DHKey
580 * command, enable the corresponding event.
582 if (hdev->commands[34] & 0x04)
583 events[1] |= 0x01; /* LE Generate DHKey Complete */
585 /* If the controller supports the LE Set Default PHY or
586 * LE Set PHY commands, enable the corresponding event.
588 if (hdev->commands[35] & (0x20 | 0x40))
589 events[1] |= 0x08; /* LE PHY Update Complete */
591 /* If the controller supports LE Set Extended Scan Parameters
592 * and LE Set Extended Scan Enable commands, enable the
593 * corresponding event.
595 if (use_ext_scan(hdev))
596 events[1] |= 0x10; /* LE Extended Advertising
600 /* If the controller supports the LE Extended Advertising
601 * command, enable the corresponding event.
603 if (ext_adv_capable(hdev))
604 events[2] |= 0x02; /* LE Advertising Set
608 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
611 /* Read LE Advertising Channel TX Power */
612 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
613 /* HCI TS spec forbids mixing of legacy and extended
614 * advertising commands wherein READ_ADV_TX_POWER is
615 * also included. So do not call it if extended adv
616 * is supported otherwise controller will return
617 * COMMAND_DISALLOWED for extended commands.
619 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
622 if (hdev->commands[38] & 0x80) {
623 /* Read LE Min/Max Tx Power*/
624 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
628 if (hdev->commands[26] & 0x40) {
629 /* Read LE Accept List Size */
630 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
634 if (hdev->commands[26] & 0x80) {
635 /* Clear LE Accept List */
636 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
639 if (hdev->commands[34] & 0x40) {
640 /* Read LE Resolving List Size */
641 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
645 if (hdev->commands[34] & 0x20) {
646 /* Clear LE Resolving List */
647 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
650 if (hdev->commands[35] & 0x04) {
651 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
653 /* Set RPA timeout */
654 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
658 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
659 /* Read LE Maximum Data Length */
660 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
662 /* Read LE Suggested Default Data Length */
663 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
666 if (ext_adv_capable(hdev)) {
667 /* Read LE Number of Supported Advertising Sets */
668 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
672 hci_set_le_support(req);
675 /* Read features beyond page 1 if available */
676 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
677 struct hci_cp_read_local_ext_features cp;
680 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
687 static int hci_init4_req(struct hci_request *req, unsigned long opt)
689 struct hci_dev *hdev = req->hdev;
691 /* Some Broadcom based Bluetooth controllers do not support the
692 * Delete Stored Link Key command. They are clearly indicating its
693 * absence in the bit mask of supported commands.
695 * Check the supported commands and only if the command is marked
696 * as supported send it. If not supported assume that the controller
697 * does not have actual support for stored link keys which makes this
698 * command redundant anyway.
700 * Some controllers indicate that they support handling deleting
701 * stored link keys, but they don't. The quirk lets a driver
702 * just disable this command.
704 if (hdev->commands[6] & 0x80 &&
705 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
706 struct hci_cp_delete_stored_link_key cp;
708 bacpy(&cp.bdaddr, BDADDR_ANY);
709 cp.delete_all = 0x01;
710 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
714 /* Set event mask page 2 if the HCI command for it is supported */
715 if (hdev->commands[22] & 0x04)
716 hci_set_event_mask_page_2(req);
718 /* Read local pairing options if the HCI command is supported */
719 if (hdev->commands[41] & 0x08)
720 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
722 /* Get MWS transport configuration if the HCI command is supported */
723 if (hdev->commands[30] & 0x08)
724 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
726 /* Check for Synchronization Train support */
727 if (lmp_sync_train_capable(hdev))
728 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
730 /* Enable Secure Connections if supported and configured */
731 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
732 bredr_sc_enabled(hdev)) {
735 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
736 sizeof(support), &support);
739 /* Set erroneous data reporting if supported to the wideband speech
742 if (hdev->commands[18] & 0x08 &&
743 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
744 bool enabled = hci_dev_test_flag(hdev,
745 HCI_WIDEBAND_SPEECH_ENABLED);
748 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
749 struct hci_cp_write_def_err_data_reporting cp;
751 cp.err_data_reporting = enabled ?
752 ERR_DATA_REPORTING_ENABLED :
753 ERR_DATA_REPORTING_DISABLED;
755 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
760 /* Set Suggested Default Data Length to maximum if supported */
761 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
762 struct hci_cp_le_write_def_data_len cp;
764 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
765 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
766 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
769 /* Set Default PHY parameters if command is supported */
770 if (hdev->commands[35] & 0x20) {
771 struct hci_cp_le_set_default_phy cp;
774 cp.tx_phys = hdev->le_tx_def_phys;
775 cp.rx_phys = hdev->le_rx_def_phys;
777 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
783 static int __hci_init(struct hci_dev *hdev)
787 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
791 if (hci_dev_test_flag(hdev, HCI_SETUP))
792 hci_debugfs_create_basic(hdev);
794 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
798 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
799 * BR/EDR/LE type controllers. AMP controllers only need the
800 * first two stages of init.
802 if (hdev->dev_type != HCI_PRIMARY)
805 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
809 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
813 /* Read local codec list if the HCI command is supported */
814 if (hdev->commands[45] & 0x04)
815 hci_read_supported_codecs_v2(hdev);
816 else if (hdev->commands[29] & 0x20)
817 hci_read_supported_codecs(hdev);
819 /* This function is only called when the controller is actually in
820 * configured state. When the controller is marked as unconfigured,
821 * this initialization procedure is not run.
823 * It means that it is possible that a controller runs through its
824 * setup phase and then discovers missing settings. If that is the
825 * case, then this function will not be called. It then will only
826 * be called during the config phase.
828 * So only when in setup phase or config phase, create the debugfs
829 * entries and register the SMP channels.
831 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
832 !hci_dev_test_flag(hdev, HCI_CONFIG))
835 hci_debugfs_create_common(hdev);
837 if (lmp_bredr_capable(hdev))
838 hci_debugfs_create_bredr(hdev);
840 if (lmp_le_capable(hdev))
841 hci_debugfs_create_le(hdev);
846 static int hci_init0_req(struct hci_request *req, unsigned long opt)
848 struct hci_dev *hdev = req->hdev;
850 BT_DBG("%s %ld", hdev->name, opt);
853 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
854 hci_reset_req(req, 0);
856 /* Read Local Version */
857 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
859 /* Read BD Address */
860 if (hdev->set_bdaddr)
861 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
866 static int __hci_unconf_init(struct hci_dev *hdev)
870 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
873 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
877 if (hci_dev_test_flag(hdev, HCI_SETUP))
878 hci_debugfs_create_basic(hdev);
883 static int hci_scan_req(struct hci_request *req, unsigned long opt)
887 BT_DBG("%s %x", req->hdev->name, scan);
889 /* Inquiry and Page scans */
890 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
894 static int hci_auth_req(struct hci_request *req, unsigned long opt)
898 BT_DBG("%s %x", req->hdev->name, auth);
901 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
905 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
909 BT_DBG("%s %x", req->hdev->name, encrypt);
912 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
916 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
918 __le16 policy = cpu_to_le16(opt);
920 BT_DBG("%s %x", req->hdev->name, policy);
922 /* Default link policy */
923 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
927 /* Get HCI device by index.
928 * Device is held on return. */
929 struct hci_dev *hci_dev_get(int index)
931 struct hci_dev *hdev = NULL, *d;
938 read_lock(&hci_dev_list_lock);
939 list_for_each_entry(d, &hci_dev_list, list) {
940 if (d->id == index) {
941 hdev = hci_dev_hold(d);
945 read_unlock(&hci_dev_list_lock);
949 /* ---- Inquiry support ---- */
951 bool hci_discovery_active(struct hci_dev *hdev)
953 struct discovery_state *discov = &hdev->discovery;
955 switch (discov->state) {
956 case DISCOVERY_FINDING:
957 case DISCOVERY_RESOLVING:
965 void hci_discovery_set_state(struct hci_dev *hdev, int state)
967 int old_state = hdev->discovery.state;
969 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
971 if (old_state == state)
974 hdev->discovery.state = state;
977 case DISCOVERY_STOPPED:
978 hci_update_background_scan(hdev);
980 if (old_state != DISCOVERY_STARTING)
981 mgmt_discovering(hdev, 0);
983 case DISCOVERY_STARTING:
985 case DISCOVERY_FINDING:
986 mgmt_discovering(hdev, 1);
988 case DISCOVERY_RESOLVING:
990 case DISCOVERY_STOPPING:
995 void hci_inquiry_cache_flush(struct hci_dev *hdev)
997 struct discovery_state *cache = &hdev->discovery;
998 struct inquiry_entry *p, *n;
1000 list_for_each_entry_safe(p, n, &cache->all, all) {
1005 INIT_LIST_HEAD(&cache->unknown);
1006 INIT_LIST_HEAD(&cache->resolve);
1009 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1012 struct discovery_state *cache = &hdev->discovery;
1013 struct inquiry_entry *e;
1015 BT_DBG("cache %p, %pMR", cache, bdaddr);
1017 list_for_each_entry(e, &cache->all, all) {
1018 if (!bacmp(&e->data.bdaddr, bdaddr))
1025 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1028 struct discovery_state *cache = &hdev->discovery;
1029 struct inquiry_entry *e;
1031 BT_DBG("cache %p, %pMR", cache, bdaddr);
1033 list_for_each_entry(e, &cache->unknown, list) {
1034 if (!bacmp(&e->data.bdaddr, bdaddr))
1041 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1045 struct discovery_state *cache = &hdev->discovery;
1046 struct inquiry_entry *e;
1048 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1050 list_for_each_entry(e, &cache->resolve, list) {
1051 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1053 if (!bacmp(&e->data.bdaddr, bdaddr))
1060 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1061 struct inquiry_entry *ie)
1063 struct discovery_state *cache = &hdev->discovery;
1064 struct list_head *pos = &cache->resolve;
1065 struct inquiry_entry *p;
1067 list_del(&ie->list);
1069 list_for_each_entry(p, &cache->resolve, list) {
1070 if (p->name_state != NAME_PENDING &&
1071 abs(p->data.rssi) >= abs(ie->data.rssi))
1076 list_add(&ie->list, pos);
1079 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1082 struct discovery_state *cache = &hdev->discovery;
1083 struct inquiry_entry *ie;
1086 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1088 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1090 if (!data->ssp_mode)
1091 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1093 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1095 if (!ie->data.ssp_mode)
1096 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1098 if (ie->name_state == NAME_NEEDED &&
1099 data->rssi != ie->data.rssi) {
1100 ie->data.rssi = data->rssi;
1101 hci_inquiry_cache_update_resolve(hdev, ie);
1107 /* Entry not in the cache. Add new one. */
1108 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1110 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1114 list_add(&ie->all, &cache->all);
1117 ie->name_state = NAME_KNOWN;
1119 ie->name_state = NAME_NOT_KNOWN;
1120 list_add(&ie->list, &cache->unknown);
1124 if (name_known && ie->name_state != NAME_KNOWN &&
1125 ie->name_state != NAME_PENDING) {
1126 ie->name_state = NAME_KNOWN;
1127 list_del(&ie->list);
1130 memcpy(&ie->data, data, sizeof(*data));
1131 ie->timestamp = jiffies;
1132 cache->timestamp = jiffies;
1134 if (ie->name_state == NAME_NOT_KNOWN)
1135 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1141 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1143 struct discovery_state *cache = &hdev->discovery;
1144 struct inquiry_info *info = (struct inquiry_info *) buf;
1145 struct inquiry_entry *e;
1148 list_for_each_entry(e, &cache->all, all) {
1149 struct inquiry_data *data = &e->data;
1154 bacpy(&info->bdaddr, &data->bdaddr);
1155 info->pscan_rep_mode = data->pscan_rep_mode;
1156 info->pscan_period_mode = data->pscan_period_mode;
1157 info->pscan_mode = data->pscan_mode;
1158 memcpy(info->dev_class, data->dev_class, 3);
1159 info->clock_offset = data->clock_offset;
1165 BT_DBG("cache %p, copied %d", cache, copied);
1169 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1171 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1172 struct hci_dev *hdev = req->hdev;
1173 struct hci_cp_inquiry cp;
1175 BT_DBG("%s", hdev->name);
1177 if (test_bit(HCI_INQUIRY, &hdev->flags))
1181 memcpy(&cp.lap, &ir->lap, 3);
1182 cp.length = ir->length;
1183 cp.num_rsp = ir->num_rsp;
1184 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1189 int hci_inquiry(void __user *arg)
1191 __u8 __user *ptr = arg;
1192 struct hci_inquiry_req ir;
1193 struct hci_dev *hdev;
1194 int err = 0, do_inquiry = 0, max_rsp;
1198 if (copy_from_user(&ir, ptr, sizeof(ir)))
1201 hdev = hci_dev_get(ir.dev_id);
1205 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1210 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1215 if (hdev->dev_type != HCI_PRIMARY) {
1220 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1225 /* Restrict maximum inquiry length to 60 seconds */
1226 if (ir.length > 60) {
1232 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1233 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1234 hci_inquiry_cache_flush(hdev);
1237 hci_dev_unlock(hdev);
1239 timeo = ir.length * msecs_to_jiffies(2000);
1242 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1247 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1248 * cleared). If it is interrupted by a signal, return -EINTR.
1250 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1251 TASK_INTERRUPTIBLE)) {
1257 /* for unlimited number of responses we will use buffer with
1260 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1262 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1263 * copy it to the user space.
1265 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1272 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1273 hci_dev_unlock(hdev);
1275 BT_DBG("num_rsp %d", ir.num_rsp);
1277 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1279 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1293 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1294 * (BD_ADDR) for a HCI device from
1295 * a firmware node property.
1296 * @hdev: The HCI device
1298 * Search the firmware node for 'local-bd-address'.
1300 * All-zero BD addresses are rejected, because those could be properties
1301 * that exist in the firmware tables, but were not updated by the firmware. For
1302 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1304 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1306 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1310 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1311 (u8 *)&ba, sizeof(ba));
1312 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1315 bacpy(&hdev->public_addr, &ba);
1318 static int hci_dev_do_open(struct hci_dev *hdev)
1322 BT_DBG("%s %p", hdev->name, hdev);
1324 hci_req_sync_lock(hdev);
1326 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1331 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1332 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1333 /* Check for rfkill but allow the HCI setup stage to
1334 * proceed (which in itself doesn't cause any RF activity).
1336 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1341 /* Check for valid public address or a configured static
1342 * random address, but let the HCI setup proceed to
1343 * be able to determine if there is a public address
1346 * In case of user channel usage, it is not important
1347 * if a public address or static random address is
1350 * This check is only valid for BR/EDR controllers
1351 * since AMP controllers do not have an address.
1353 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1354 hdev->dev_type == HCI_PRIMARY &&
1355 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1356 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1357 ret = -EADDRNOTAVAIL;
1362 if (test_bit(HCI_UP, &hdev->flags)) {
1367 if (hdev->open(hdev)) {
1372 set_bit(HCI_RUNNING, &hdev->flags);
1373 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1375 atomic_set(&hdev->cmd_cnt, 1);
1376 set_bit(HCI_INIT, &hdev->flags);
1378 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1379 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1380 bool invalid_bdaddr;
1382 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1385 ret = hdev->setup(hdev);
1387 /* The transport driver can set the quirk to mark the
1388 * BD_ADDR invalid before creating the HCI device or in
1389 * its setup callback.
1391 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1397 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1398 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1399 hci_dev_get_bd_addr_from_property(hdev);
1401 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1403 ret = hdev->set_bdaddr(hdev,
1404 &hdev->public_addr);
1406 /* If setting of the BD_ADDR from the device
1407 * property succeeds, then treat the address
1408 * as valid even if the invalid BD_ADDR
1409 * quirk indicates otherwise.
1412 invalid_bdaddr = false;
1417 /* The transport driver can set these quirks before
1418 * creating the HCI device or in its setup callback.
1420 * For the invalid BD_ADDR quirk it is possible that
1421 * it becomes a valid address if the bootloader does
1422 * provide it (see above).
1424 * In case any of them is set, the controller has to
1425 * start up as unconfigured.
1427 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1429 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1431 /* For an unconfigured controller it is required to
1432 * read at least the version information provided by
1433 * the Read Local Version Information command.
1435 * If the set_bdaddr driver callback is provided, then
1436 * also the original Bluetooth public device address
1437 * will be read using the Read BD Address command.
1439 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1440 ret = __hci_unconf_init(hdev);
1443 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1444 /* If public address change is configured, ensure that
1445 * the address gets programmed. If the driver does not
1446 * support changing the public address, fail the power
1449 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1451 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1453 ret = -EADDRNOTAVAIL;
1457 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1458 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1459 ret = __hci_init(hdev);
1460 if (!ret && hdev->post_init)
1461 ret = hdev->post_init(hdev);
1465 /* If the HCI Reset command is clearing all diagnostic settings,
1466 * then they need to be reprogrammed after the init procedure
1469 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1470 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1471 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1472 ret = hdev->set_diag(hdev, true);
1477 clear_bit(HCI_INIT, &hdev->flags);
1481 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1482 hci_adv_instances_set_rpa_expired(hdev, true);
1483 set_bit(HCI_UP, &hdev->flags);
1484 hci_sock_dev_event(hdev, HCI_DEV_UP);
1485 hci_leds_update_powered(hdev, true);
1486 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1487 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1488 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1489 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1490 hci_dev_test_flag(hdev, HCI_MGMT) &&
1491 hdev->dev_type == HCI_PRIMARY) {
1492 ret = __hci_req_hci_power_on(hdev);
1493 mgmt_power_on(hdev, ret);
1496 /* Init failed, cleanup */
1497 flush_work(&hdev->tx_work);
1499 /* Since hci_rx_work() is possible to awake new cmd_work
1500 * it should be flushed first to avoid unexpected call of
1503 flush_work(&hdev->rx_work);
1504 flush_work(&hdev->cmd_work);
1506 skb_queue_purge(&hdev->cmd_q);
1507 skb_queue_purge(&hdev->rx_q);
1512 if (hdev->sent_cmd) {
1513 kfree_skb(hdev->sent_cmd);
1514 hdev->sent_cmd = NULL;
1517 clear_bit(HCI_RUNNING, &hdev->flags);
1518 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1521 hdev->flags &= BIT(HCI_RAW);
1525 hci_req_sync_unlock(hdev);
1529 /* ---- HCI ioctl helpers ---- */
1531 int hci_dev_open(__u16 dev)
1533 struct hci_dev *hdev;
1536 hdev = hci_dev_get(dev);
1540 /* Devices that are marked as unconfigured can only be powered
1541 * up as user channel. Trying to bring them up as normal devices
1542 * will result into a failure. Only user channel operation is
1545 * When this function is called for a user channel, the flag
1546 * HCI_USER_CHANNEL will be set first before attempting to
1549 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1550 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1555 /* We need to ensure that no other power on/off work is pending
1556 * before proceeding to call hci_dev_do_open. This is
1557 * particularly important if the setup procedure has not yet
1560 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1561 cancel_delayed_work(&hdev->power_off);
1563 /* After this call it is guaranteed that the setup procedure
1564 * has finished. This means that error conditions like RFKILL
1565 * or no valid public or static random address apply.
1567 flush_workqueue(hdev->req_workqueue);
1569 /* For controllers not using the management interface and that
1570 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1571 * so that pairing works for them. Once the management interface
1572 * is in use this bit will be cleared again and userspace has
1573 * to explicitly enable it.
1575 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1576 !hci_dev_test_flag(hdev, HCI_MGMT))
1577 hci_dev_set_flag(hdev, HCI_BONDABLE);
1579 err = hci_dev_do_open(hdev);
1586 /* This function requires the caller holds hdev->lock */
1587 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1589 struct hci_conn_params *p;
1591 list_for_each_entry(p, &hdev->le_conn_params, list) {
1593 hci_conn_drop(p->conn);
1594 hci_conn_put(p->conn);
1597 list_del_init(&p->action);
1600 BT_DBG("All LE pending actions cleared");
1603 int hci_dev_do_close(struct hci_dev *hdev)
1608 BT_DBG("%s %p", hdev->name, hdev);
1610 cancel_delayed_work(&hdev->power_off);
1611 cancel_delayed_work(&hdev->ncmd_timer);
1613 hci_request_cancel_all(hdev);
1614 hci_req_sync_lock(hdev);
1616 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1617 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1618 test_bit(HCI_UP, &hdev->flags)) {
1619 /* Execute vendor specific shutdown routine */
1621 err = hdev->shutdown(hdev);
1624 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1625 cancel_delayed_work_sync(&hdev->cmd_timer);
1626 hci_req_sync_unlock(hdev);
1630 hci_leds_update_powered(hdev, false);
1632 /* Flush RX and TX works */
1633 flush_work(&hdev->tx_work);
1634 flush_work(&hdev->rx_work);
1636 if (hdev->discov_timeout > 0) {
1637 hdev->discov_timeout = 0;
1638 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1639 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1642 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1643 cancel_delayed_work(&hdev->service_cache);
1645 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1646 struct adv_info *adv_instance;
1648 cancel_delayed_work_sync(&hdev->rpa_expired);
1650 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1651 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1654 /* Avoid potential lockdep warnings from the *_flush() calls by
1655 * ensuring the workqueue is empty up front.
1657 drain_workqueue(hdev->workqueue);
1661 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1663 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1665 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1666 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1667 hci_dev_test_flag(hdev, HCI_MGMT))
1668 __mgmt_power_off(hdev);
1670 hci_inquiry_cache_flush(hdev);
1671 hci_pend_le_actions_clear(hdev);
1672 hci_conn_hash_flush(hdev);
1673 hci_dev_unlock(hdev);
1675 smp_unregister(hdev);
1677 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1679 aosp_do_close(hdev);
1680 msft_do_close(hdev);
1686 skb_queue_purge(&hdev->cmd_q);
1687 atomic_set(&hdev->cmd_cnt, 1);
1688 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1689 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1690 set_bit(HCI_INIT, &hdev->flags);
1691 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1692 clear_bit(HCI_INIT, &hdev->flags);
1695 /* flush cmd work */
1696 flush_work(&hdev->cmd_work);
1699 skb_queue_purge(&hdev->rx_q);
1700 skb_queue_purge(&hdev->cmd_q);
1701 skb_queue_purge(&hdev->raw_q);
1703 /* Drop last sent command */
1704 if (hdev->sent_cmd) {
1705 cancel_delayed_work_sync(&hdev->cmd_timer);
1706 kfree_skb(hdev->sent_cmd);
1707 hdev->sent_cmd = NULL;
1710 clear_bit(HCI_RUNNING, &hdev->flags);
1711 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1713 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1714 wake_up(&hdev->suspend_wait_q);
1716 /* After this point our queues are empty
1717 * and no tasks are scheduled. */
1721 hdev->flags &= BIT(HCI_RAW);
1722 hci_dev_clear_volatile_flags(hdev);
1724 /* Controller radio is available but is currently powered down */
1725 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1727 memset(hdev->eir, 0, sizeof(hdev->eir));
1728 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1729 bacpy(&hdev->random_addr, BDADDR_ANY);
1730 hci_codec_list_clear(&hdev->local_codecs);
1732 hci_req_sync_unlock(hdev);
1738 int hci_dev_close(__u16 dev)
1740 struct hci_dev *hdev;
1743 hdev = hci_dev_get(dev);
1747 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1752 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1753 cancel_delayed_work(&hdev->power_off);
1755 err = hci_dev_do_close(hdev);
1762 static int hci_dev_do_reset(struct hci_dev *hdev)
1766 BT_DBG("%s %p", hdev->name, hdev);
1768 hci_req_sync_lock(hdev);
1771 skb_queue_purge(&hdev->rx_q);
1772 skb_queue_purge(&hdev->cmd_q);
1774 /* Avoid potential lockdep warnings from the *_flush() calls by
1775 * ensuring the workqueue is empty up front.
1777 drain_workqueue(hdev->workqueue);
1780 hci_inquiry_cache_flush(hdev);
1781 hci_conn_hash_flush(hdev);
1782 hci_dev_unlock(hdev);
1787 atomic_set(&hdev->cmd_cnt, 1);
1788 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1790 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1792 hci_req_sync_unlock(hdev);
1796 int hci_dev_reset(__u16 dev)
1798 struct hci_dev *hdev;
1801 hdev = hci_dev_get(dev);
1805 if (!test_bit(HCI_UP, &hdev->flags)) {
1810 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1815 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1820 err = hci_dev_do_reset(hdev);
1827 int hci_dev_reset_stat(__u16 dev)
1829 struct hci_dev *hdev;
1832 hdev = hci_dev_get(dev);
1836 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1846 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1853 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1855 bool conn_changed, discov_changed;
1857 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1859 if ((scan & SCAN_PAGE))
1860 conn_changed = !hci_dev_test_and_set_flag(hdev,
1863 conn_changed = hci_dev_test_and_clear_flag(hdev,
1866 if ((scan & SCAN_INQUIRY)) {
1867 discov_changed = !hci_dev_test_and_set_flag(hdev,
1870 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1871 discov_changed = hci_dev_test_and_clear_flag(hdev,
1875 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1878 if (conn_changed || discov_changed) {
1879 /* In case this was disabled through mgmt */
1880 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1882 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1883 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1885 mgmt_new_settings(hdev);
1889 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1891 struct hci_dev *hdev;
1892 struct hci_dev_req dr;
1895 if (copy_from_user(&dr, arg, sizeof(dr)))
1898 hdev = hci_dev_get(dr.dev_id);
1902 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1907 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1912 if (hdev->dev_type != HCI_PRIMARY) {
1917 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1924 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1925 HCI_INIT_TIMEOUT, NULL);
1929 if (!lmp_encrypt_capable(hdev)) {
1934 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1935 /* Auth must be enabled first */
1936 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1937 HCI_INIT_TIMEOUT, NULL);
1942 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1943 HCI_INIT_TIMEOUT, NULL);
1947 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1948 HCI_INIT_TIMEOUT, NULL);
1950 /* Ensure that the connectable and discoverable states
1951 * get correctly modified as this was a non-mgmt change.
1954 hci_update_scan_state(hdev, dr.dev_opt);
1958 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1959 HCI_INIT_TIMEOUT, NULL);
1962 case HCISETLINKMODE:
1963 hdev->link_mode = ((__u16) dr.dev_opt) &
1964 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1968 if (hdev->pkt_type == (__u16) dr.dev_opt)
1971 hdev->pkt_type = (__u16) dr.dev_opt;
1972 mgmt_phy_configuration_changed(hdev, NULL);
1976 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1977 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1981 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1982 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1995 int hci_get_dev_list(void __user *arg)
1997 struct hci_dev *hdev;
1998 struct hci_dev_list_req *dl;
1999 struct hci_dev_req *dr;
2000 int n = 0, size, err;
2003 if (get_user(dev_num, (__u16 __user *) arg))
2006 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2009 size = sizeof(*dl) + dev_num * sizeof(*dr);
2011 dl = kzalloc(size, GFP_KERNEL);
2017 read_lock(&hci_dev_list_lock);
2018 list_for_each_entry(hdev, &hci_dev_list, list) {
2019 unsigned long flags = hdev->flags;
2021 /* When the auto-off is configured it means the transport
2022 * is running, but in that case still indicate that the
2023 * device is actually down.
2025 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2026 flags &= ~BIT(HCI_UP);
2028 (dr + n)->dev_id = hdev->id;
2029 (dr + n)->dev_opt = flags;
2034 read_unlock(&hci_dev_list_lock);
2037 size = sizeof(*dl) + n * sizeof(*dr);
2039 err = copy_to_user(arg, dl, size);
2042 return err ? -EFAULT : 0;
2045 int hci_get_dev_info(void __user *arg)
2047 struct hci_dev *hdev;
2048 struct hci_dev_info di;
2049 unsigned long flags;
2052 if (copy_from_user(&di, arg, sizeof(di)))
2055 hdev = hci_dev_get(di.dev_id);
2059 /* When the auto-off is configured it means the transport
2060 * is running, but in that case still indicate that the
2061 * device is actually down.
2063 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2064 flags = hdev->flags & ~BIT(HCI_UP);
2066 flags = hdev->flags;
2068 strcpy(di.name, hdev->name);
2069 di.bdaddr = hdev->bdaddr;
2070 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2072 di.pkt_type = hdev->pkt_type;
2073 if (lmp_bredr_capable(hdev)) {
2074 di.acl_mtu = hdev->acl_mtu;
2075 di.acl_pkts = hdev->acl_pkts;
2076 di.sco_mtu = hdev->sco_mtu;
2077 di.sco_pkts = hdev->sco_pkts;
2079 di.acl_mtu = hdev->le_mtu;
2080 di.acl_pkts = hdev->le_pkts;
2084 di.link_policy = hdev->link_policy;
2085 di.link_mode = hdev->link_mode;
2087 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2088 memcpy(&di.features, &hdev->features, sizeof(di.features));
2090 if (copy_to_user(arg, &di, sizeof(di)))
2098 /* ---- Interface to HCI drivers ---- */
2100 static int hci_rfkill_set_block(void *data, bool blocked)
2102 struct hci_dev *hdev = data;
2104 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2106 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2110 hci_dev_set_flag(hdev, HCI_RFKILLED);
2111 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2112 !hci_dev_test_flag(hdev, HCI_CONFIG))
2113 hci_dev_do_close(hdev);
2115 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2121 static const struct rfkill_ops hci_rfkill_ops = {
2122 .set_block = hci_rfkill_set_block,
2125 static void hci_power_on(struct work_struct *work)
2127 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2130 BT_DBG("%s", hdev->name);
2132 if (test_bit(HCI_UP, &hdev->flags) &&
2133 hci_dev_test_flag(hdev, HCI_MGMT) &&
2134 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2135 cancel_delayed_work(&hdev->power_off);
2136 hci_req_sync_lock(hdev);
2137 err = __hci_req_hci_power_on(hdev);
2138 hci_req_sync_unlock(hdev);
2139 mgmt_power_on(hdev, err);
2143 err = hci_dev_do_open(hdev);
2146 mgmt_set_powered_failed(hdev, err);
2147 hci_dev_unlock(hdev);
2151 /* During the HCI setup phase, a few error conditions are
2152 * ignored and they need to be checked now. If they are still
2153 * valid, it is important to turn the device back off.
2155 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2156 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2157 (hdev->dev_type == HCI_PRIMARY &&
2158 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2159 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2160 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2161 hci_dev_do_close(hdev);
2162 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2163 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2164 HCI_AUTO_OFF_TIMEOUT);
2167 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2168 /* For unconfigured devices, set the HCI_RAW flag
2169 * so that userspace can easily identify them.
2171 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2172 set_bit(HCI_RAW, &hdev->flags);
2174 /* For fully configured devices, this will send
2175 * the Index Added event. For unconfigured devices,
2176 * it will send Unconfigued Index Added event.
2178 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2179 * and no event will be send.
2181 mgmt_index_added(hdev);
2182 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2183 /* When the controller is now configured, then it
2184 * is important to clear the HCI_RAW flag.
2186 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2187 clear_bit(HCI_RAW, &hdev->flags);
2189 /* Powering on the controller with HCI_CONFIG set only
2190 * happens with the transition from unconfigured to
2191 * configured. This will send the Index Added event.
2193 mgmt_index_added(hdev);
2197 static void hci_power_off(struct work_struct *work)
2199 struct hci_dev *hdev = container_of(work, struct hci_dev,
2202 BT_DBG("%s", hdev->name);
2204 hci_dev_do_close(hdev);
2207 static void hci_error_reset(struct work_struct *work)
2209 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2211 BT_DBG("%s", hdev->name);
2214 hdev->hw_error(hdev, hdev->hw_error_code);
2216 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2218 if (hci_dev_do_close(hdev))
2221 hci_dev_do_open(hdev);
2224 void hci_uuids_clear(struct hci_dev *hdev)
2226 struct bt_uuid *uuid, *tmp;
2228 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2229 list_del(&uuid->list);
2234 void hci_link_keys_clear(struct hci_dev *hdev)
2236 struct link_key *key;
2238 list_for_each_entry(key, &hdev->link_keys, list) {
2239 list_del_rcu(&key->list);
2240 kfree_rcu(key, rcu);
2244 void hci_smp_ltks_clear(struct hci_dev *hdev)
2248 list_for_each_entry(k, &hdev->long_term_keys, list) {
2249 list_del_rcu(&k->list);
2254 void hci_smp_irks_clear(struct hci_dev *hdev)
2258 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2259 list_del_rcu(&k->list);
2264 void hci_blocked_keys_clear(struct hci_dev *hdev)
2266 struct blocked_key *b;
2268 list_for_each_entry(b, &hdev->blocked_keys, list) {
2269 list_del_rcu(&b->list);
2274 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2276 bool blocked = false;
2277 struct blocked_key *b;
2280 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2281 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2291 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2296 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2297 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2300 if (hci_is_blocked_key(hdev,
2301 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2303 bt_dev_warn_ratelimited(hdev,
2304 "Link key blocked for %pMR",
2317 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2318 u8 key_type, u8 old_key_type)
2321 if (key_type < 0x03)
2324 /* Debug keys are insecure so don't store them persistently */
2325 if (key_type == HCI_LK_DEBUG_COMBINATION)
2328 /* Changed combination key and there's no previous one */
2329 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2332 /* Security mode 3 case */
2336 /* BR/EDR key derived using SC from an LE link */
2337 if (conn->type == LE_LINK)
2340 /* Neither local nor remote side had no-bonding as requirement */
2341 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2344 /* Local side had dedicated bonding as requirement */
2345 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2348 /* Remote side had dedicated bonding as requirement */
2349 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2352 /* If none of the above criteria match, then don't store the key
2357 static u8 ltk_role(u8 type)
2359 if (type == SMP_LTK)
2360 return HCI_ROLE_MASTER;
2362 return HCI_ROLE_SLAVE;
2365 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2366 u8 addr_type, u8 role)
2371 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2372 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2375 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2378 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2380 bt_dev_warn_ratelimited(hdev,
2381 "LTK blocked for %pMR",
2394 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2396 struct smp_irk *irk_to_return = NULL;
2397 struct smp_irk *irk;
2400 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2401 if (!bacmp(&irk->rpa, rpa)) {
2402 irk_to_return = irk;
2407 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2408 if (smp_irk_matches(hdev, irk->val, rpa)) {
2409 bacpy(&irk->rpa, rpa);
2410 irk_to_return = irk;
2416 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2417 irk_to_return->val)) {
2418 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2419 &irk_to_return->bdaddr);
2420 irk_to_return = NULL;
2425 return irk_to_return;
2428 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2431 struct smp_irk *irk_to_return = NULL;
2432 struct smp_irk *irk;
2434 /* Identity Address must be public or static random */
2435 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2439 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2440 if (addr_type == irk->addr_type &&
2441 bacmp(bdaddr, &irk->bdaddr) == 0) {
2442 irk_to_return = irk;
2449 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2450 irk_to_return->val)) {
2451 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2452 &irk_to_return->bdaddr);
2453 irk_to_return = NULL;
2458 return irk_to_return;
2461 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2462 bdaddr_t *bdaddr, u8 *val, u8 type,
2463 u8 pin_len, bool *persistent)
2465 struct link_key *key, *old_key;
2468 old_key = hci_find_link_key(hdev, bdaddr);
2470 old_key_type = old_key->type;
2473 old_key_type = conn ? conn->key_type : 0xff;
2474 key = kzalloc(sizeof(*key), GFP_KERNEL);
2477 list_add_rcu(&key->list, &hdev->link_keys);
2480 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2482 /* Some buggy controller combinations generate a changed
2483 * combination key for legacy pairing even when there's no
2485 if (type == HCI_LK_CHANGED_COMBINATION &&
2486 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2487 type = HCI_LK_COMBINATION;
2489 conn->key_type = type;
2492 bacpy(&key->bdaddr, bdaddr);
2493 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2494 key->pin_len = pin_len;
2496 if (type == HCI_LK_CHANGED_COMBINATION)
2497 key->type = old_key_type;
2502 *persistent = hci_persistent_key(hdev, conn, type,
2508 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2509 u8 addr_type, u8 type, u8 authenticated,
2510 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2512 struct smp_ltk *key, *old_key;
2513 u8 role = ltk_role(type);
2515 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2519 key = kzalloc(sizeof(*key), GFP_KERNEL);
2522 list_add_rcu(&key->list, &hdev->long_term_keys);
2525 bacpy(&key->bdaddr, bdaddr);
2526 key->bdaddr_type = addr_type;
2527 memcpy(key->val, tk, sizeof(key->val));
2528 key->authenticated = authenticated;
2531 key->enc_size = enc_size;
2537 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2538 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2540 struct smp_irk *irk;
2542 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2544 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2548 bacpy(&irk->bdaddr, bdaddr);
2549 irk->addr_type = addr_type;
2551 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2554 memcpy(irk->val, val, 16);
2555 bacpy(&irk->rpa, rpa);
2560 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2562 struct link_key *key;
2564 key = hci_find_link_key(hdev, bdaddr);
2568 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2570 list_del_rcu(&key->list);
2571 kfree_rcu(key, rcu);
2576 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2581 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2582 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2585 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2587 list_del_rcu(&k->list);
2592 return removed ? 0 : -ENOENT;
2595 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2599 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2600 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2603 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2605 list_del_rcu(&k->list);
2610 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2613 struct smp_irk *irk;
2616 if (type == BDADDR_BREDR) {
2617 if (hci_find_link_key(hdev, bdaddr))
2622 /* Convert to HCI addr type which struct smp_ltk uses */
2623 if (type == BDADDR_LE_PUBLIC)
2624 addr_type = ADDR_LE_DEV_PUBLIC;
2626 addr_type = ADDR_LE_DEV_RANDOM;
2628 irk = hci_get_irk(hdev, bdaddr, addr_type);
2630 bdaddr = &irk->bdaddr;
2631 addr_type = irk->addr_type;
2635 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2636 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2646 /* HCI command timer function */
2647 static void hci_cmd_timeout(struct work_struct *work)
2649 struct hci_dev *hdev = container_of(work, struct hci_dev,
2652 if (hdev->sent_cmd) {
2653 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2654 u16 opcode = __le16_to_cpu(sent->opcode);
2656 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2658 bt_dev_err(hdev, "command tx timeout");
2661 if (hdev->cmd_timeout)
2662 hdev->cmd_timeout(hdev);
2664 atomic_set(&hdev->cmd_cnt, 1);
2665 queue_work(hdev->workqueue, &hdev->cmd_work);
2668 /* HCI ncmd timer function */
2669 static void hci_ncmd_timeout(struct work_struct *work)
2671 struct hci_dev *hdev = container_of(work, struct hci_dev,
2674 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2676 /* During HCI_INIT phase no events can be injected if the ncmd timer
2677 * triggers since the procedure has its own timeout handling.
2679 if (test_bit(HCI_INIT, &hdev->flags))
2682 /* This is an irrecoverable state, inject hardware error event */
2683 hci_reset_dev(hdev);
2686 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2687 bdaddr_t *bdaddr, u8 bdaddr_type)
2689 struct oob_data *data;
2691 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2692 if (bacmp(bdaddr, &data->bdaddr) != 0)
2694 if (data->bdaddr_type != bdaddr_type)
2702 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2705 struct oob_data *data;
2707 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2711 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2713 list_del(&data->list);
2719 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2721 struct oob_data *data, *n;
2723 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2724 list_del(&data->list);
2729 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2730 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2731 u8 *hash256, u8 *rand256)
2733 struct oob_data *data;
2735 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2737 data = kmalloc(sizeof(*data), GFP_KERNEL);
2741 bacpy(&data->bdaddr, bdaddr);
2742 data->bdaddr_type = bdaddr_type;
2743 list_add(&data->list, &hdev->remote_oob_data);
2746 if (hash192 && rand192) {
2747 memcpy(data->hash192, hash192, sizeof(data->hash192));
2748 memcpy(data->rand192, rand192, sizeof(data->rand192));
2749 if (hash256 && rand256)
2750 data->present = 0x03;
2752 memset(data->hash192, 0, sizeof(data->hash192));
2753 memset(data->rand192, 0, sizeof(data->rand192));
2754 if (hash256 && rand256)
2755 data->present = 0x02;
2757 data->present = 0x00;
2760 if (hash256 && rand256) {
2761 memcpy(data->hash256, hash256, sizeof(data->hash256));
2762 memcpy(data->rand256, rand256, sizeof(data->rand256));
2764 memset(data->hash256, 0, sizeof(data->hash256));
2765 memset(data->rand256, 0, sizeof(data->rand256));
2766 if (hash192 && rand192)
2767 data->present = 0x01;
2770 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2775 /* This function requires the caller holds hdev->lock */
2776 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2778 struct adv_info *adv_instance;
2780 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2781 if (adv_instance->instance == instance)
2782 return adv_instance;
2788 /* This function requires the caller holds hdev->lock */
2789 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2791 struct adv_info *cur_instance;
2793 cur_instance = hci_find_adv_instance(hdev, instance);
2797 if (cur_instance == list_last_entry(&hdev->adv_instances,
2798 struct adv_info, list))
2799 return list_first_entry(&hdev->adv_instances,
2800 struct adv_info, list);
2802 return list_next_entry(cur_instance, list);
2805 /* This function requires the caller holds hdev->lock */
2806 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2808 struct adv_info *adv_instance;
2810 adv_instance = hci_find_adv_instance(hdev, instance);
2814 BT_DBG("%s removing %dMR", hdev->name, instance);
2816 if (hdev->cur_adv_instance == instance) {
2817 if (hdev->adv_instance_timeout) {
2818 cancel_delayed_work(&hdev->adv_instance_expire);
2819 hdev->adv_instance_timeout = 0;
2821 hdev->cur_adv_instance = 0x00;
2824 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2826 list_del(&adv_instance->list);
2827 kfree(adv_instance);
2829 hdev->adv_instance_cnt--;
2834 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2836 struct adv_info *adv_instance, *n;
2838 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2839 adv_instance->rpa_expired = rpa_expired;
2842 /* This function requires the caller holds hdev->lock */
2843 void hci_adv_instances_clear(struct hci_dev *hdev)
2845 struct adv_info *adv_instance, *n;
2847 if (hdev->adv_instance_timeout) {
2848 cancel_delayed_work(&hdev->adv_instance_expire);
2849 hdev->adv_instance_timeout = 0;
2852 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2853 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2854 list_del(&adv_instance->list);
2855 kfree(adv_instance);
2858 hdev->adv_instance_cnt = 0;
2859 hdev->cur_adv_instance = 0x00;
2862 static void adv_instance_rpa_expired(struct work_struct *work)
2864 struct adv_info *adv_instance = container_of(work, struct adv_info,
2865 rpa_expired_cb.work);
2869 adv_instance->rpa_expired = true;
2872 /* This function requires the caller holds hdev->lock */
2873 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2874 u16 adv_data_len, u8 *adv_data,
2875 u16 scan_rsp_len, u8 *scan_rsp_data,
2876 u16 timeout, u16 duration, s8 tx_power,
2877 u32 min_interval, u32 max_interval)
2879 struct adv_info *adv_instance;
2881 adv_instance = hci_find_adv_instance(hdev, instance);
2883 memset(adv_instance->adv_data, 0,
2884 sizeof(adv_instance->adv_data));
2885 memset(adv_instance->scan_rsp_data, 0,
2886 sizeof(adv_instance->scan_rsp_data));
2888 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2889 instance < 1 || instance > hdev->le_num_of_adv_sets)
2892 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2896 adv_instance->pending = true;
2897 adv_instance->instance = instance;
2898 list_add(&adv_instance->list, &hdev->adv_instances);
2899 hdev->adv_instance_cnt++;
2902 adv_instance->flags = flags;
2903 adv_instance->adv_data_len = adv_data_len;
2904 adv_instance->scan_rsp_len = scan_rsp_len;
2905 adv_instance->min_interval = min_interval;
2906 adv_instance->max_interval = max_interval;
2907 adv_instance->tx_power = tx_power;
2910 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2913 memcpy(adv_instance->scan_rsp_data,
2914 scan_rsp_data, scan_rsp_len);
2916 adv_instance->timeout = timeout;
2917 adv_instance->remaining_time = timeout;
2920 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
2922 adv_instance->duration = duration;
2924 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2925 adv_instance_rpa_expired);
2927 BT_DBG("%s for %dMR", hdev->name, instance);
2932 /* This function requires the caller holds hdev->lock */
2933 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
2934 u16 adv_data_len, u8 *adv_data,
2935 u16 scan_rsp_len, u8 *scan_rsp_data)
2937 struct adv_info *adv_instance;
2939 adv_instance = hci_find_adv_instance(hdev, instance);
2941 /* If advertisement doesn't exist, we can't modify its data */
2946 memset(adv_instance->adv_data, 0,
2947 sizeof(adv_instance->adv_data));
2948 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2949 adv_instance->adv_data_len = adv_data_len;
2953 memset(adv_instance->scan_rsp_data, 0,
2954 sizeof(adv_instance->scan_rsp_data));
2955 memcpy(adv_instance->scan_rsp_data,
2956 scan_rsp_data, scan_rsp_len);
2957 adv_instance->scan_rsp_len = scan_rsp_len;
2963 /* This function requires the caller holds hdev->lock */
2964 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
2967 struct adv_info *adv;
2969 if (instance == 0x00) {
2970 /* Instance 0 always manages the "Tx Power" and "Flags"
2973 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
2975 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
2976 * corresponds to the "connectable" instance flag.
2978 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
2979 flags |= MGMT_ADV_FLAG_CONNECTABLE;
2981 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2982 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
2983 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2984 flags |= MGMT_ADV_FLAG_DISCOV;
2989 adv = hci_find_adv_instance(hdev, instance);
2991 /* Return 0 when we got an invalid instance identifier. */
2998 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
3000 struct adv_info *adv;
3002 /* Instance 0x00 always set local name */
3003 if (instance == 0x00)
3006 adv = hci_find_adv_instance(hdev, instance);
3010 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
3011 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
3014 return adv->scan_rsp_len ? true : false;
3017 /* This function requires the caller holds hdev->lock */
3018 void hci_adv_monitors_clear(struct hci_dev *hdev)
3020 struct adv_monitor *monitor;
3023 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3024 hci_free_adv_monitor(hdev, monitor);
3026 idr_destroy(&hdev->adv_monitors_idr);
3029 /* Frees the monitor structure and do some bookkeepings.
3030 * This function requires the caller holds hdev->lock.
3032 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3034 struct adv_pattern *pattern;
3035 struct adv_pattern *tmp;
3040 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3041 list_del(&pattern->list);
3045 if (monitor->handle)
3046 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3048 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3049 hdev->adv_monitors_cnt--;
3050 mgmt_adv_monitor_removed(hdev, monitor->handle);
3056 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3058 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3061 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3063 return mgmt_remove_adv_monitor_complete(hdev, status);
3066 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3067 * also attempts to forward the request to the controller.
3068 * Returns true if request is forwarded (result is pending), false otherwise.
3069 * This function requires the caller holds hdev->lock.
3071 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3074 int min, max, handle;
3083 min = HCI_MIN_ADV_MONITOR_HANDLE;
3084 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3085 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3092 monitor->handle = handle;
3094 if (!hdev_is_powered(hdev))
3097 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3098 case HCI_ADV_MONITOR_EXT_NONE:
3099 hci_update_background_scan(hdev);
3100 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3101 /* Message was not forwarded to controller - not an error */
3103 case HCI_ADV_MONITOR_EXT_MSFT:
3104 *err = msft_add_monitor_pattern(hdev, monitor);
3105 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3113 /* Attempts to tell the controller and free the monitor. If somehow the
3114 * controller doesn't have a corresponding handle, remove anyway.
3115 * Returns true if request is forwarded (result is pending), false otherwise.
3116 * This function requires the caller holds hdev->lock.
3118 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3119 struct adv_monitor *monitor,
3120 u16 handle, int *err)
3124 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3125 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3127 case HCI_ADV_MONITOR_EXT_MSFT:
3128 *err = msft_remove_monitor(hdev, monitor, handle);
3132 /* In case no matching handle registered, just free the monitor */
3133 if (*err == -ENOENT)
3139 if (*err == -ENOENT)
3140 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3142 hci_free_adv_monitor(hdev, monitor);
3148 /* Returns true if request is forwarded (result is pending), false otherwise.
3149 * This function requires the caller holds hdev->lock.
3151 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3153 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3161 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3162 if (!*err && !pending)
3163 hci_update_background_scan(hdev);
3165 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3166 hdev->name, handle, *err, pending ? "" : "not ");
3171 /* Returns true if request is forwarded (result is pending), false otherwise.
3172 * This function requires the caller holds hdev->lock.
3174 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3176 struct adv_monitor *monitor;
3177 int idr_next_id = 0;
3178 bool pending = false;
3179 bool update = false;
3183 while (!*err && !pending) {
3184 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3188 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3190 if (!*err && !pending)
3195 hci_update_background_scan(hdev);
3197 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3198 hdev->name, *err, pending ? "" : "not ");
3203 /* This function requires the caller holds hdev->lock */
3204 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3206 return !idr_is_empty(&hdev->adv_monitors_idr);
3209 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3211 if (msft_monitor_supported(hdev))
3212 return HCI_ADV_MONITOR_EXT_MSFT;
3214 return HCI_ADV_MONITOR_EXT_NONE;
3217 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3218 bdaddr_t *bdaddr, u8 type)
3220 struct bdaddr_list *b;
3222 list_for_each_entry(b, bdaddr_list, list) {
3223 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3230 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3231 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3234 struct bdaddr_list_with_irk *b;
3236 list_for_each_entry(b, bdaddr_list, list) {
3237 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3244 struct bdaddr_list_with_flags *
3245 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3246 bdaddr_t *bdaddr, u8 type)
3248 struct bdaddr_list_with_flags *b;
3250 list_for_each_entry(b, bdaddr_list, list) {
3251 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3258 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3260 struct bdaddr_list *b, *n;
3262 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3268 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3270 struct bdaddr_list *entry;
3272 if (!bacmp(bdaddr, BDADDR_ANY))
3275 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3278 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3282 bacpy(&entry->bdaddr, bdaddr);
3283 entry->bdaddr_type = type;
3285 list_add(&entry->list, list);
3290 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3291 u8 type, u8 *peer_irk, u8 *local_irk)
3293 struct bdaddr_list_with_irk *entry;
3295 if (!bacmp(bdaddr, BDADDR_ANY))
3298 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3301 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3305 bacpy(&entry->bdaddr, bdaddr);
3306 entry->bdaddr_type = type;
3309 memcpy(entry->peer_irk, peer_irk, 16);
3312 memcpy(entry->local_irk, local_irk, 16);
3314 list_add(&entry->list, list);
3319 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3322 struct bdaddr_list_with_flags *entry;
3324 if (!bacmp(bdaddr, BDADDR_ANY))
3327 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3330 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3334 bacpy(&entry->bdaddr, bdaddr);
3335 entry->bdaddr_type = type;
3336 entry->current_flags = flags;
3338 list_add(&entry->list, list);
3343 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3345 struct bdaddr_list *entry;
3347 if (!bacmp(bdaddr, BDADDR_ANY)) {
3348 hci_bdaddr_list_clear(list);
3352 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3356 list_del(&entry->list);
3362 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3365 struct bdaddr_list_with_irk *entry;
3367 if (!bacmp(bdaddr, BDADDR_ANY)) {
3368 hci_bdaddr_list_clear(list);
3372 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3376 list_del(&entry->list);
3382 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3385 struct bdaddr_list_with_flags *entry;
3387 if (!bacmp(bdaddr, BDADDR_ANY)) {
3388 hci_bdaddr_list_clear(list);
3392 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3396 list_del(&entry->list);
3402 /* This function requires the caller holds hdev->lock */
3403 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3404 bdaddr_t *addr, u8 addr_type)
3406 struct hci_conn_params *params;
3408 list_for_each_entry(params, &hdev->le_conn_params, list) {
3409 if (bacmp(¶ms->addr, addr) == 0 &&
3410 params->addr_type == addr_type) {
3418 /* This function requires the caller holds hdev->lock */
3419 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3420 bdaddr_t *addr, u8 addr_type)
3422 struct hci_conn_params *param;
3424 list_for_each_entry(param, list, action) {
3425 if (bacmp(¶m->addr, addr) == 0 &&
3426 param->addr_type == addr_type)
3433 /* This function requires the caller holds hdev->lock */
3434 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3435 bdaddr_t *addr, u8 addr_type)
3437 struct hci_conn_params *params;
3439 params = hci_conn_params_lookup(hdev, addr, addr_type);
3443 params = kzalloc(sizeof(*params), GFP_KERNEL);
3445 bt_dev_err(hdev, "out of memory");
3449 bacpy(¶ms->addr, addr);
3450 params->addr_type = addr_type;
3452 list_add(¶ms->list, &hdev->le_conn_params);
3453 INIT_LIST_HEAD(¶ms->action);
3455 params->conn_min_interval = hdev->le_conn_min_interval;
3456 params->conn_max_interval = hdev->le_conn_max_interval;
3457 params->conn_latency = hdev->le_conn_latency;
3458 params->supervision_timeout = hdev->le_supv_timeout;
3459 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3461 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3466 static void hci_conn_params_free(struct hci_conn_params *params)
3469 hci_conn_drop(params->conn);
3470 hci_conn_put(params->conn);
3473 list_del(¶ms->action);
3474 list_del(¶ms->list);
3478 /* This function requires the caller holds hdev->lock */
3479 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3481 struct hci_conn_params *params;
3483 params = hci_conn_params_lookup(hdev, addr, addr_type);
3487 hci_conn_params_free(params);
3489 hci_update_background_scan(hdev);
3491 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3494 /* This function requires the caller holds hdev->lock */
3495 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3497 struct hci_conn_params *params, *tmp;
3499 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3500 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3503 /* If trying to establish one time connection to disabled
3504 * device, leave the params, but mark them as just once.
3506 if (params->explicit_connect) {
3507 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3511 list_del(¶ms->list);
3515 BT_DBG("All LE disabled connection parameters were removed");
3518 /* This function requires the caller holds hdev->lock */
3519 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3521 struct hci_conn_params *params, *tmp;
3523 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3524 hci_conn_params_free(params);
3526 BT_DBG("All LE connection parameters were removed");
3529 /* Copy the Identity Address of the controller.
3531 * If the controller has a public BD_ADDR, then by default use that one.
3532 * If this is a LE only controller without a public address, default to
3533 * the static random address.
3535 * For debugging purposes it is possible to force controllers with a
3536 * public address to use the static random address instead.
3538 * In case BR/EDR has been disabled on a dual-mode controller and
3539 * userspace has configured a static address, then that address
3540 * becomes the identity address instead of the public BR/EDR address.
3542 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3545 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3546 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3547 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3548 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3549 bacpy(bdaddr, &hdev->static_addr);
3550 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3552 bacpy(bdaddr, &hdev->bdaddr);
3553 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3557 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3561 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3562 clear_bit(i, hdev->suspend_tasks);
3564 wake_up(&hdev->suspend_wait_q);
3567 static int hci_suspend_wait_event(struct hci_dev *hdev)
3570 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3571 __SUSPEND_NUM_TASKS)
3574 int ret = wait_event_timeout(hdev->suspend_wait_q,
3575 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3578 bt_dev_err(hdev, "Timed out waiting for suspend events");
3579 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3580 if (test_bit(i, hdev->suspend_tasks))
3581 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3582 clear_bit(i, hdev->suspend_tasks);
3593 static void hci_prepare_suspend(struct work_struct *work)
3595 struct hci_dev *hdev =
3596 container_of(work, struct hci_dev, suspend_prepare);
3599 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3600 hci_dev_unlock(hdev);
3603 static int hci_change_suspend_state(struct hci_dev *hdev,
3604 enum suspended_state next)
3606 hdev->suspend_state_next = next;
3607 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3608 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3609 return hci_suspend_wait_event(hdev);
3612 static void hci_clear_wake_reason(struct hci_dev *hdev)
3616 hdev->wake_reason = 0;
3617 bacpy(&hdev->wake_addr, BDADDR_ANY);
3618 hdev->wake_addr_type = 0;
3620 hci_dev_unlock(hdev);
3623 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3626 struct hci_dev *hdev =
3627 container_of(nb, struct hci_dev, suspend_notifier);
3630 if (action == PM_SUSPEND_PREPARE)
3631 ret = hci_suspend_dev(hdev);
3632 else if (action == PM_POST_SUSPEND)
3633 ret = hci_resume_dev(hdev);
3636 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3642 /* Alloc HCI device */
3643 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3645 struct hci_dev *hdev;
3646 unsigned int alloc_size;
3648 alloc_size = sizeof(*hdev);
3650 /* Fixme: May need ALIGN-ment? */
3651 alloc_size += sizeof_priv;
3654 hdev = kzalloc(alloc_size, GFP_KERNEL);
3658 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3659 hdev->esco_type = (ESCO_HV1);
3660 hdev->link_mode = (HCI_LM_ACCEPT);
3661 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3662 hdev->io_capability = 0x03; /* No Input No Output */
3663 hdev->manufacturer = 0xffff; /* Default to internal use */
3664 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3665 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3666 hdev->adv_instance_cnt = 0;
3667 hdev->cur_adv_instance = 0x00;
3668 hdev->adv_instance_timeout = 0;
3670 hdev->advmon_allowlist_duration = 300;
3671 hdev->advmon_no_filter_duration = 500;
3672 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
3674 hdev->sniff_max_interval = 800;
3675 hdev->sniff_min_interval = 80;
3677 hdev->le_adv_channel_map = 0x07;
3678 hdev->le_adv_min_interval = 0x0800;
3679 hdev->le_adv_max_interval = 0x0800;
3680 hdev->le_scan_interval = 0x0060;
3681 hdev->le_scan_window = 0x0030;
3682 hdev->le_scan_int_suspend = 0x0400;
3683 hdev->le_scan_window_suspend = 0x0012;
3684 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3685 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3686 hdev->le_scan_int_adv_monitor = 0x0060;
3687 hdev->le_scan_window_adv_monitor = 0x0030;
3688 hdev->le_scan_int_connect = 0x0060;
3689 hdev->le_scan_window_connect = 0x0060;
3690 hdev->le_conn_min_interval = 0x0018;
3691 hdev->le_conn_max_interval = 0x0028;
3692 hdev->le_conn_latency = 0x0000;
3693 hdev->le_supv_timeout = 0x002a;
3694 hdev->le_def_tx_len = 0x001b;
3695 hdev->le_def_tx_time = 0x0148;
3696 hdev->le_max_tx_len = 0x001b;
3697 hdev->le_max_tx_time = 0x0148;
3698 hdev->le_max_rx_len = 0x001b;
3699 hdev->le_max_rx_time = 0x0148;
3700 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3701 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3702 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3703 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3704 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3705 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3706 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3707 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3708 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3710 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3711 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3712 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3713 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3714 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3715 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3717 /* default 1.28 sec page scan */
3718 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3719 hdev->def_page_scan_int = 0x0800;
3720 hdev->def_page_scan_window = 0x0012;
3722 mutex_init(&hdev->lock);
3723 mutex_init(&hdev->req_lock);
3725 INIT_LIST_HEAD(&hdev->mgmt_pending);
3726 INIT_LIST_HEAD(&hdev->reject_list);
3727 INIT_LIST_HEAD(&hdev->accept_list);
3728 INIT_LIST_HEAD(&hdev->uuids);
3729 INIT_LIST_HEAD(&hdev->link_keys);
3730 INIT_LIST_HEAD(&hdev->long_term_keys);
3731 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3732 INIT_LIST_HEAD(&hdev->remote_oob_data);
3733 INIT_LIST_HEAD(&hdev->le_accept_list);
3734 INIT_LIST_HEAD(&hdev->le_resolv_list);
3735 INIT_LIST_HEAD(&hdev->le_conn_params);
3736 INIT_LIST_HEAD(&hdev->pend_le_conns);
3737 INIT_LIST_HEAD(&hdev->pend_le_reports);
3738 INIT_LIST_HEAD(&hdev->conn_hash.list);
3739 INIT_LIST_HEAD(&hdev->adv_instances);
3740 INIT_LIST_HEAD(&hdev->blocked_keys);
3742 INIT_LIST_HEAD(&hdev->local_codecs);
3743 INIT_WORK(&hdev->rx_work, hci_rx_work);
3744 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3745 INIT_WORK(&hdev->tx_work, hci_tx_work);
3746 INIT_WORK(&hdev->power_on, hci_power_on);
3747 INIT_WORK(&hdev->error_reset, hci_error_reset);
3748 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3750 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3752 skb_queue_head_init(&hdev->rx_q);
3753 skb_queue_head_init(&hdev->cmd_q);
3754 skb_queue_head_init(&hdev->raw_q);
3756 init_waitqueue_head(&hdev->req_wait_q);
3757 init_waitqueue_head(&hdev->suspend_wait_q);
3759 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3760 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3762 hci_request_setup(hdev);
3764 hci_init_sysfs(hdev);
3765 discovery_init(hdev);
3769 EXPORT_SYMBOL(hci_alloc_dev_priv);
3771 /* Free HCI device */
3772 void hci_free_dev(struct hci_dev *hdev)
3774 /* will free via device release */
3775 put_device(&hdev->dev);
3777 EXPORT_SYMBOL(hci_free_dev);
3779 /* Register HCI device */
3780 int hci_register_dev(struct hci_dev *hdev)
3784 if (!hdev->open || !hdev->close || !hdev->send)
3787 /* Do not allow HCI_AMP devices to register at index 0,
3788 * so the index can be used as the AMP controller ID.
3790 switch (hdev->dev_type) {
3792 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3795 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3804 sprintf(hdev->name, "hci%d", id);
3807 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3809 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3810 if (!hdev->workqueue) {
3815 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3817 if (!hdev->req_workqueue) {
3818 destroy_workqueue(hdev->workqueue);
3823 if (!IS_ERR_OR_NULL(bt_debugfs))
3824 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3826 dev_set_name(&hdev->dev, "%s", hdev->name);
3828 error = device_add(&hdev->dev);
3832 hci_leds_init(hdev);
3834 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3835 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3838 if (rfkill_register(hdev->rfkill) < 0) {
3839 rfkill_destroy(hdev->rfkill);
3840 hdev->rfkill = NULL;
3844 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3845 hci_dev_set_flag(hdev, HCI_RFKILLED);
3847 hci_dev_set_flag(hdev, HCI_SETUP);
3848 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3850 if (hdev->dev_type == HCI_PRIMARY) {
3851 /* Assume BR/EDR support until proven otherwise (such as
3852 * through reading supported features during init.
3854 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3857 write_lock(&hci_dev_list_lock);
3858 list_add(&hdev->list, &hci_dev_list);
3859 write_unlock(&hci_dev_list_lock);
3861 /* Devices that are marked for raw-only usage are unconfigured
3862 * and should not be included in normal operation.
3864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3865 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3867 hci_sock_dev_event(hdev, HCI_DEV_REG);
3870 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3871 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3872 error = register_pm_notifier(&hdev->suspend_notifier);
3877 queue_work(hdev->req_workqueue, &hdev->power_on);
3879 idr_init(&hdev->adv_monitors_idr);
3880 msft_register(hdev);
3885 destroy_workqueue(hdev->workqueue);
3886 destroy_workqueue(hdev->req_workqueue);
3888 ida_simple_remove(&hci_index_ida, hdev->id);
3892 EXPORT_SYMBOL(hci_register_dev);
3894 /* Unregister HCI device */
3895 void hci_unregister_dev(struct hci_dev *hdev)
3897 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3899 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3901 write_lock(&hci_dev_list_lock);
3902 list_del(&hdev->list);
3903 write_unlock(&hci_dev_list_lock);
3905 cancel_work_sync(&hdev->power_on);
3907 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3908 hci_suspend_clear_tasks(hdev);
3909 unregister_pm_notifier(&hdev->suspend_notifier);
3910 cancel_work_sync(&hdev->suspend_prepare);
3913 msft_unregister(hdev);
3915 hci_dev_do_close(hdev);
3917 if (!test_bit(HCI_INIT, &hdev->flags) &&
3918 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3919 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3921 mgmt_index_removed(hdev);
3922 hci_dev_unlock(hdev);
3925 /* mgmt_index_removed should take care of emptying the
3927 BUG_ON(!list_empty(&hdev->mgmt_pending));
3929 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3932 rfkill_unregister(hdev->rfkill);
3933 rfkill_destroy(hdev->rfkill);
3936 device_del(&hdev->dev);
3937 /* Actual cleanup is deferred until hci_release_dev(). */
3940 EXPORT_SYMBOL(hci_unregister_dev);
3942 /* Release HCI device */
3943 void hci_release_dev(struct hci_dev *hdev)
3945 debugfs_remove_recursive(hdev->debugfs);
3946 kfree_const(hdev->hw_info);
3947 kfree_const(hdev->fw_info);
3949 destroy_workqueue(hdev->workqueue);
3950 destroy_workqueue(hdev->req_workqueue);
3953 hci_bdaddr_list_clear(&hdev->reject_list);
3954 hci_bdaddr_list_clear(&hdev->accept_list);
3955 hci_uuids_clear(hdev);
3956 hci_link_keys_clear(hdev);
3957 hci_smp_ltks_clear(hdev);
3958 hci_smp_irks_clear(hdev);
3959 hci_remote_oob_data_clear(hdev);
3960 hci_adv_instances_clear(hdev);
3961 hci_adv_monitors_clear(hdev);
3962 hci_bdaddr_list_clear(&hdev->le_accept_list);
3963 hci_bdaddr_list_clear(&hdev->le_resolv_list);
3964 hci_conn_params_clear_all(hdev);
3965 hci_discovery_filter_clear(hdev);
3966 hci_blocked_keys_clear(hdev);
3967 hci_dev_unlock(hdev);
3969 ida_simple_remove(&hci_index_ida, hdev->id);
3972 EXPORT_SYMBOL(hci_release_dev);
3974 /* Suspend HCI device */
3975 int hci_suspend_dev(struct hci_dev *hdev)
3978 u8 state = BT_RUNNING;
3980 bt_dev_dbg(hdev, "");
3982 /* Suspend should only act on when powered. */
3983 if (!hdev_is_powered(hdev) ||
3984 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3987 /* If powering down, wait for completion. */
3988 if (mgmt_powering_down(hdev)) {
3989 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3990 ret = hci_suspend_wait_event(hdev);
3995 /* Suspend consists of two actions:
3996 * - First, disconnect everything and make the controller not
3997 * connectable (disabling scanning)
3998 * - Second, program event filter/accept list and enable scan
4000 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
4004 state = BT_SUSPEND_DISCONNECT;
4006 /* Only configure accept list if device may wakeup. */
4007 if (hdev->wakeup && hdev->wakeup(hdev)) {
4008 ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE);
4010 state = BT_SUSPEND_CONFIGURE_WAKE;
4014 hci_clear_wake_reason(hdev);
4015 mgmt_suspending(hdev, state);
4018 /* We always allow suspend even if suspend preparation failed and
4019 * attempt to recover in resume.
4021 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4024 EXPORT_SYMBOL(hci_suspend_dev);
4026 /* Resume HCI device */
4027 int hci_resume_dev(struct hci_dev *hdev)
4031 bt_dev_dbg(hdev, "");
4033 /* Resume should only act on when powered. */
4034 if (!hdev_is_powered(hdev) ||
4035 hci_dev_test_flag(hdev, HCI_UNREGISTER))
4038 /* If powering down don't attempt to resume */
4039 if (mgmt_powering_down(hdev))
4042 ret = hci_change_suspend_state(hdev, BT_RUNNING);
4044 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
4045 hdev->wake_addr_type);
4047 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4050 EXPORT_SYMBOL(hci_resume_dev);
4052 /* Reset HCI device */
4053 int hci_reset_dev(struct hci_dev *hdev)
4055 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4056 struct sk_buff *skb;
4058 skb = bt_skb_alloc(3, GFP_ATOMIC);
4062 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4063 skb_put_data(skb, hw_err, 3);
4065 bt_dev_err(hdev, "Injecting HCI hardware error event");
4067 /* Send Hardware Error to upper stack */
4068 return hci_recv_frame(hdev, skb);
4070 EXPORT_SYMBOL(hci_reset_dev);
4072 /* Receive frame from HCI drivers */
4073 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4075 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4076 && !test_bit(HCI_INIT, &hdev->flags))) {
4081 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4082 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4083 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4084 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4090 bt_cb(skb)->incoming = 1;
4093 __net_timestamp(skb);
4095 skb_queue_tail(&hdev->rx_q, skb);
4096 queue_work(hdev->workqueue, &hdev->rx_work);
4100 EXPORT_SYMBOL(hci_recv_frame);
4102 /* Receive diagnostic message from HCI drivers */
4103 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4105 /* Mark as diagnostic packet */
4106 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4109 __net_timestamp(skb);
4111 skb_queue_tail(&hdev->rx_q, skb);
4112 queue_work(hdev->workqueue, &hdev->rx_work);
4116 EXPORT_SYMBOL(hci_recv_diag);
4118 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4122 va_start(vargs, fmt);
4123 kfree_const(hdev->hw_info);
4124 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4127 EXPORT_SYMBOL(hci_set_hw_info);
4129 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4133 va_start(vargs, fmt);
4134 kfree_const(hdev->fw_info);
4135 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4138 EXPORT_SYMBOL(hci_set_fw_info);
4140 /* ---- Interface to upper protocols ---- */
4142 int hci_register_cb(struct hci_cb *cb)
4144 BT_DBG("%p name %s", cb, cb->name);
4146 mutex_lock(&hci_cb_list_lock);
4147 list_add_tail(&cb->list, &hci_cb_list);
4148 mutex_unlock(&hci_cb_list_lock);
4152 EXPORT_SYMBOL(hci_register_cb);
4154 int hci_unregister_cb(struct hci_cb *cb)
4156 BT_DBG("%p name %s", cb, cb->name);
4158 mutex_lock(&hci_cb_list_lock);
4159 list_del(&cb->list);
4160 mutex_unlock(&hci_cb_list_lock);
4164 EXPORT_SYMBOL(hci_unregister_cb);
4166 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4170 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4174 __net_timestamp(skb);
4176 /* Send copy to monitor */
4177 hci_send_to_monitor(hdev, skb);
4179 if (atomic_read(&hdev->promisc)) {
4180 /* Send copy to the sockets */
4181 hci_send_to_sock(hdev, skb);
4184 /* Get rid of skb owner, prior to sending to the driver. */
4187 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4192 err = hdev->send(hdev, skb);
4194 bt_dev_err(hdev, "sending frame failed (%d)", err);
4199 /* Send HCI command */
4200 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4203 struct sk_buff *skb;
4205 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4207 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4209 bt_dev_err(hdev, "no memory for command");
4213 /* Stand-alone HCI commands must be flagged as
4214 * single-command requests.
4216 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4218 skb_queue_tail(&hdev->cmd_q, skb);
4219 queue_work(hdev->workqueue, &hdev->cmd_work);
4224 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4227 struct sk_buff *skb;
4229 if (hci_opcode_ogf(opcode) != 0x3f) {
4230 /* A controller receiving a command shall respond with either
4231 * a Command Status Event or a Command Complete Event.
4232 * Therefore, all standard HCI commands must be sent via the
4233 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4234 * Some vendors do not comply with this rule for vendor-specific
4235 * commands and do not return any event. We want to support
4236 * unresponded commands for such cases only.
4238 bt_dev_err(hdev, "unresponded command not supported");
4242 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4244 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4249 hci_send_frame(hdev, skb);
4253 EXPORT_SYMBOL(__hci_cmd_send);
4255 /* Get data from the previously sent command */
4256 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4258 struct hci_command_hdr *hdr;
4260 if (!hdev->sent_cmd)
4263 hdr = (void *) hdev->sent_cmd->data;
4265 if (hdr->opcode != cpu_to_le16(opcode))
4268 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4270 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4273 /* Send HCI command and wait for command complete event */
4274 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4275 const void *param, u32 timeout)
4277 struct sk_buff *skb;
4279 if (!test_bit(HCI_UP, &hdev->flags))
4280 return ERR_PTR(-ENETDOWN);
4282 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4284 hci_req_sync_lock(hdev);
4285 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4286 hci_req_sync_unlock(hdev);
4290 EXPORT_SYMBOL(hci_cmd_sync);
4293 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4295 struct hci_acl_hdr *hdr;
4298 skb_push(skb, HCI_ACL_HDR_SIZE);
4299 skb_reset_transport_header(skb);
4300 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4301 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4302 hdr->dlen = cpu_to_le16(len);
4305 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4306 struct sk_buff *skb, __u16 flags)
4308 struct hci_conn *conn = chan->conn;
4309 struct hci_dev *hdev = conn->hdev;
4310 struct sk_buff *list;
4312 skb->len = skb_headlen(skb);
4315 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4317 switch (hdev->dev_type) {
4319 hci_add_acl_hdr(skb, conn->handle, flags);
4322 hci_add_acl_hdr(skb, chan->handle, flags);
4325 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4329 list = skb_shinfo(skb)->frag_list;
4331 /* Non fragmented */
4332 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4334 skb_queue_tail(queue, skb);
4337 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4339 skb_shinfo(skb)->frag_list = NULL;
4341 /* Queue all fragments atomically. We need to use spin_lock_bh
4342 * here because of 6LoWPAN links, as there this function is
4343 * called from softirq and using normal spin lock could cause
4346 spin_lock_bh(&queue->lock);
4348 __skb_queue_tail(queue, skb);
4350 flags &= ~ACL_START;
4353 skb = list; list = list->next;
4355 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4356 hci_add_acl_hdr(skb, conn->handle, flags);
4358 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4360 __skb_queue_tail(queue, skb);
4363 spin_unlock_bh(&queue->lock);
4367 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4369 struct hci_dev *hdev = chan->conn->hdev;
4371 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4373 hci_queue_acl(chan, &chan->data_q, skb, flags);
4375 queue_work(hdev->workqueue, &hdev->tx_work);
4379 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4381 struct hci_dev *hdev = conn->hdev;
4382 struct hci_sco_hdr hdr;
4384 BT_DBG("%s len %d", hdev->name, skb->len);
4386 hdr.handle = cpu_to_le16(conn->handle);
4387 hdr.dlen = skb->len;
4389 skb_push(skb, HCI_SCO_HDR_SIZE);
4390 skb_reset_transport_header(skb);
4391 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4393 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4395 skb_queue_tail(&conn->data_q, skb);
4396 queue_work(hdev->workqueue, &hdev->tx_work);
4399 /* ---- HCI TX task (outgoing data) ---- */
4401 /* HCI Connection scheduler */
4402 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4405 struct hci_conn_hash *h = &hdev->conn_hash;
4406 struct hci_conn *conn = NULL, *c;
4407 unsigned int num = 0, min = ~0;
4409 /* We don't have to lock device here. Connections are always
4410 * added and removed with TX task disabled. */
4414 list_for_each_entry_rcu(c, &h->list, list) {
4415 if (c->type != type || skb_queue_empty(&c->data_q))
4418 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4423 if (c->sent < min) {
4428 if (hci_conn_num(hdev, type) == num)
4437 switch (conn->type) {
4439 cnt = hdev->acl_cnt;
4443 cnt = hdev->sco_cnt;
4446 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4450 bt_dev_err(hdev, "unknown link type %d", conn->type);
4458 BT_DBG("conn %p quote %d", conn, *quote);
4462 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4464 struct hci_conn_hash *h = &hdev->conn_hash;
4467 bt_dev_err(hdev, "link tx timeout");
4471 /* Kill stalled connections */
4472 list_for_each_entry_rcu(c, &h->list, list) {
4473 if (c->type == type && c->sent) {
4474 bt_dev_err(hdev, "killing stalled connection %pMR",
4476 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4483 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4486 struct hci_conn_hash *h = &hdev->conn_hash;
4487 struct hci_chan *chan = NULL;
4488 unsigned int num = 0, min = ~0, cur_prio = 0;
4489 struct hci_conn *conn;
4490 int cnt, q, conn_num = 0;
4492 BT_DBG("%s", hdev->name);
4496 list_for_each_entry_rcu(conn, &h->list, list) {
4497 struct hci_chan *tmp;
4499 if (conn->type != type)
4502 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4507 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4508 struct sk_buff *skb;
4510 if (skb_queue_empty(&tmp->data_q))
4513 skb = skb_peek(&tmp->data_q);
4514 if (skb->priority < cur_prio)
4517 if (skb->priority > cur_prio) {
4520 cur_prio = skb->priority;
4525 if (conn->sent < min) {
4531 if (hci_conn_num(hdev, type) == conn_num)
4540 switch (chan->conn->type) {
4542 cnt = hdev->acl_cnt;
4545 cnt = hdev->block_cnt;
4549 cnt = hdev->sco_cnt;
4552 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4556 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4561 BT_DBG("chan %p quote %d", chan, *quote);
4565 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4567 struct hci_conn_hash *h = &hdev->conn_hash;
4568 struct hci_conn *conn;
4571 BT_DBG("%s", hdev->name);
4575 list_for_each_entry_rcu(conn, &h->list, list) {
4576 struct hci_chan *chan;
4578 if (conn->type != type)
4581 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4586 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4587 struct sk_buff *skb;
4594 if (skb_queue_empty(&chan->data_q))
4597 skb = skb_peek(&chan->data_q);
4598 if (skb->priority >= HCI_PRIO_MAX - 1)
4601 skb->priority = HCI_PRIO_MAX - 1;
4603 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4607 if (hci_conn_num(hdev, type) == num)
4615 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4617 /* Calculate count of blocks used by this packet */
4618 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4621 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4623 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4624 /* ACL tx timeout must be longer than maximum
4625 * link supervision timeout (40.9 seconds) */
4626 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4627 HCI_ACL_TX_TIMEOUT))
4628 hci_link_tx_to(hdev, ACL_LINK);
4633 static void hci_sched_sco(struct hci_dev *hdev)
4635 struct hci_conn *conn;
4636 struct sk_buff *skb;
4639 BT_DBG("%s", hdev->name);
4641 if (!hci_conn_num(hdev, SCO_LINK))
4644 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4645 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4646 BT_DBG("skb %p len %d", skb, skb->len);
4647 hci_send_frame(hdev, skb);
4650 if (conn->sent == ~0)
4656 static void hci_sched_esco(struct hci_dev *hdev)
4658 struct hci_conn *conn;
4659 struct sk_buff *skb;
4662 BT_DBG("%s", hdev->name);
4664 if (!hci_conn_num(hdev, ESCO_LINK))
4667 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4669 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4670 BT_DBG("skb %p len %d", skb, skb->len);
4671 hci_send_frame(hdev, skb);
4674 if (conn->sent == ~0)
4680 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4682 unsigned int cnt = hdev->acl_cnt;
4683 struct hci_chan *chan;
4684 struct sk_buff *skb;
4687 __check_timeout(hdev, cnt);
4689 while (hdev->acl_cnt &&
4690 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4691 u32 priority = (skb_peek(&chan->data_q))->priority;
4692 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4693 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4694 skb->len, skb->priority);
4696 /* Stop if priority has changed */
4697 if (skb->priority < priority)
4700 skb = skb_dequeue(&chan->data_q);
4702 hci_conn_enter_active_mode(chan->conn,
4703 bt_cb(skb)->force_active);
4705 hci_send_frame(hdev, skb);
4706 hdev->acl_last_tx = jiffies;
4712 /* Send pending SCO packets right away */
4713 hci_sched_sco(hdev);
4714 hci_sched_esco(hdev);
4718 if (cnt != hdev->acl_cnt)
4719 hci_prio_recalculate(hdev, ACL_LINK);
4722 static void hci_sched_acl_blk(struct hci_dev *hdev)
4724 unsigned int cnt = hdev->block_cnt;
4725 struct hci_chan *chan;
4726 struct sk_buff *skb;
4730 __check_timeout(hdev, cnt);
4732 BT_DBG("%s", hdev->name);
4734 if (hdev->dev_type == HCI_AMP)
4739 while (hdev->block_cnt > 0 &&
4740 (chan = hci_chan_sent(hdev, type, "e))) {
4741 u32 priority = (skb_peek(&chan->data_q))->priority;
4742 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4745 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4746 skb->len, skb->priority);
4748 /* Stop if priority has changed */
4749 if (skb->priority < priority)
4752 skb = skb_dequeue(&chan->data_q);
4754 blocks = __get_blocks(hdev, skb);
4755 if (blocks > hdev->block_cnt)
4758 hci_conn_enter_active_mode(chan->conn,
4759 bt_cb(skb)->force_active);
4761 hci_send_frame(hdev, skb);
4762 hdev->acl_last_tx = jiffies;
4764 hdev->block_cnt -= blocks;
4767 chan->sent += blocks;
4768 chan->conn->sent += blocks;
4772 if (cnt != hdev->block_cnt)
4773 hci_prio_recalculate(hdev, type);
4776 static void hci_sched_acl(struct hci_dev *hdev)
4778 BT_DBG("%s", hdev->name);
4780 /* No ACL link over BR/EDR controller */
4781 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4784 /* No AMP link over AMP controller */
4785 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4788 switch (hdev->flow_ctl_mode) {
4789 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4790 hci_sched_acl_pkt(hdev);
4793 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4794 hci_sched_acl_blk(hdev);
4799 static void hci_sched_le(struct hci_dev *hdev)
4801 struct hci_chan *chan;
4802 struct sk_buff *skb;
4803 int quote, cnt, tmp;
4805 BT_DBG("%s", hdev->name);
4807 if (!hci_conn_num(hdev, LE_LINK))
4810 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4812 __check_timeout(hdev, cnt);
4815 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4816 u32 priority = (skb_peek(&chan->data_q))->priority;
4817 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4818 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4819 skb->len, skb->priority);
4821 /* Stop if priority has changed */
4822 if (skb->priority < priority)
4825 skb = skb_dequeue(&chan->data_q);
4827 hci_send_frame(hdev, skb);
4828 hdev->le_last_tx = jiffies;
4834 /* Send pending SCO packets right away */
4835 hci_sched_sco(hdev);
4836 hci_sched_esco(hdev);
4843 hdev->acl_cnt = cnt;
4846 hci_prio_recalculate(hdev, LE_LINK);
4849 static void hci_tx_work(struct work_struct *work)
4851 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4852 struct sk_buff *skb;
4854 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4855 hdev->sco_cnt, hdev->le_cnt);
4857 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4858 /* Schedule queues and send stuff to HCI driver */
4859 hci_sched_sco(hdev);
4860 hci_sched_esco(hdev);
4861 hci_sched_acl(hdev);
4865 /* Send next queued raw (unknown type) packet */
4866 while ((skb = skb_dequeue(&hdev->raw_q)))
4867 hci_send_frame(hdev, skb);
4870 /* ----- HCI RX task (incoming data processing) ----- */
4872 /* ACL data packet */
4873 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4875 struct hci_acl_hdr *hdr = (void *) skb->data;
4876 struct hci_conn *conn;
4877 __u16 handle, flags;
4879 skb_pull(skb, HCI_ACL_HDR_SIZE);
4881 handle = __le16_to_cpu(hdr->handle);
4882 flags = hci_flags(handle);
4883 handle = hci_handle(handle);
4885 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4888 hdev->stat.acl_rx++;
4891 conn = hci_conn_hash_lookup_handle(hdev, handle);
4892 hci_dev_unlock(hdev);
4895 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4897 /* Send to upper protocol */
4898 l2cap_recv_acldata(conn, skb, flags);
4901 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4908 /* SCO data packet */
4909 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4911 struct hci_sco_hdr *hdr = (void *) skb->data;
4912 struct hci_conn *conn;
4913 __u16 handle, flags;
4915 skb_pull(skb, HCI_SCO_HDR_SIZE);
4917 handle = __le16_to_cpu(hdr->handle);
4918 flags = hci_flags(handle);
4919 handle = hci_handle(handle);
4921 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4924 hdev->stat.sco_rx++;
4927 conn = hci_conn_hash_lookup_handle(hdev, handle);
4928 hci_dev_unlock(hdev);
4931 /* Send to upper protocol */
4932 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4933 sco_recv_scodata(conn, skb);
4936 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4943 static bool hci_req_is_complete(struct hci_dev *hdev)
4945 struct sk_buff *skb;
4947 skb = skb_peek(&hdev->cmd_q);
4951 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4954 static void hci_resend_last(struct hci_dev *hdev)
4956 struct hci_command_hdr *sent;
4957 struct sk_buff *skb;
4960 if (!hdev->sent_cmd)
4963 sent = (void *) hdev->sent_cmd->data;
4964 opcode = __le16_to_cpu(sent->opcode);
4965 if (opcode == HCI_OP_RESET)
4968 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4972 skb_queue_head(&hdev->cmd_q, skb);
4973 queue_work(hdev->workqueue, &hdev->cmd_work);
4976 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4977 hci_req_complete_t *req_complete,
4978 hci_req_complete_skb_t *req_complete_skb)
4980 struct sk_buff *skb;
4981 unsigned long flags;
4983 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4985 /* If the completed command doesn't match the last one that was
4986 * sent we need to do special handling of it.
4988 if (!hci_sent_cmd_data(hdev, opcode)) {
4989 /* Some CSR based controllers generate a spontaneous
4990 * reset complete event during init and any pending
4991 * command will never be completed. In such a case we
4992 * need to resend whatever was the last sent
4995 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4996 hci_resend_last(hdev);
5001 /* If we reach this point this event matches the last command sent */
5002 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5004 /* If the command succeeded and there's still more commands in
5005 * this request the request is not yet complete.
5007 if (!status && !hci_req_is_complete(hdev))
5010 /* If this was the last command in a request the complete
5011 * callback would be found in hdev->sent_cmd instead of the
5012 * command queue (hdev->cmd_q).
5014 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5015 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5019 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5020 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5024 /* Remove all pending commands belonging to this request */
5025 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5026 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5027 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5028 __skb_queue_head(&hdev->cmd_q, skb);
5032 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5033 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5035 *req_complete = bt_cb(skb)->hci.req_complete;
5038 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5041 static void hci_rx_work(struct work_struct *work)
5043 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5044 struct sk_buff *skb;
5046 BT_DBG("%s", hdev->name);
5048 while ((skb = skb_dequeue(&hdev->rx_q))) {
5049 /* Send copy to monitor */
5050 hci_send_to_monitor(hdev, skb);
5052 if (atomic_read(&hdev->promisc)) {
5053 /* Send copy to the sockets */
5054 hci_send_to_sock(hdev, skb);
5057 /* If the device has been opened in HCI_USER_CHANNEL,
5058 * the userspace has exclusive access to device.
5059 * When device is HCI_INIT, we still need to process
5060 * the data packets to the driver in order
5061 * to complete its setup().
5063 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5064 !test_bit(HCI_INIT, &hdev->flags)) {
5069 if (test_bit(HCI_INIT, &hdev->flags)) {
5070 /* Don't process data packets in this states. */
5071 switch (hci_skb_pkt_type(skb)) {
5072 case HCI_ACLDATA_PKT:
5073 case HCI_SCODATA_PKT:
5074 case HCI_ISODATA_PKT:
5081 switch (hci_skb_pkt_type(skb)) {
5083 BT_DBG("%s Event packet", hdev->name);
5084 hci_event_packet(hdev, skb);
5087 case HCI_ACLDATA_PKT:
5088 BT_DBG("%s ACL data packet", hdev->name);
5089 hci_acldata_packet(hdev, skb);
5092 case HCI_SCODATA_PKT:
5093 BT_DBG("%s SCO data packet", hdev->name);
5094 hci_scodata_packet(hdev, skb);
5104 static void hci_cmd_work(struct work_struct *work)
5106 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5107 struct sk_buff *skb;
5109 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5110 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5112 /* Send queued commands */
5113 if (atomic_read(&hdev->cmd_cnt)) {
5114 skb = skb_dequeue(&hdev->cmd_q);
5118 kfree_skb(hdev->sent_cmd);
5120 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5121 if (hdev->sent_cmd) {
5122 if (hci_req_status_pend(hdev))
5123 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5124 atomic_dec(&hdev->cmd_cnt);
5125 hci_send_frame(hdev, skb);
5126 if (test_bit(HCI_RESET, &hdev->flags))
5127 cancel_delayed_work(&hdev->cmd_timer);
5129 schedule_delayed_work(&hdev->cmd_timer,
5132 skb_queue_head(&hdev->cmd_q, skb);
5133 queue_work(hdev->workqueue, &hdev->cmd_work);