2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 21
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_LINK_SECURITY,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
133 static const u16 mgmt_events[] = {
134 MGMT_EV_CONTROLLER_ERROR,
136 MGMT_EV_INDEX_REMOVED,
137 MGMT_EV_NEW_SETTINGS,
138 MGMT_EV_CLASS_OF_DEV_CHANGED,
139 MGMT_EV_LOCAL_NAME_CHANGED,
140 MGMT_EV_NEW_LINK_KEY,
141 MGMT_EV_NEW_LONG_TERM_KEY,
142 MGMT_EV_DEVICE_CONNECTED,
143 MGMT_EV_DEVICE_DISCONNECTED,
144 MGMT_EV_CONNECT_FAILED,
145 MGMT_EV_PIN_CODE_REQUEST,
146 MGMT_EV_USER_CONFIRM_REQUEST,
147 MGMT_EV_USER_PASSKEY_REQUEST,
149 MGMT_EV_DEVICE_FOUND,
151 MGMT_EV_DEVICE_BLOCKED,
152 MGMT_EV_DEVICE_UNBLOCKED,
153 MGMT_EV_DEVICE_UNPAIRED,
154 MGMT_EV_PASSKEY_NOTIFY,
157 MGMT_EV_DEVICE_ADDED,
158 MGMT_EV_DEVICE_REMOVED,
159 MGMT_EV_NEW_CONN_PARAM,
160 MGMT_EV_UNCONF_INDEX_ADDED,
161 MGMT_EV_UNCONF_INDEX_REMOVED,
162 MGMT_EV_NEW_CONFIG_OPTIONS,
163 MGMT_EV_EXT_INDEX_ADDED,
164 MGMT_EV_EXT_INDEX_REMOVED,
165 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
166 MGMT_EV_ADVERTISING_ADDED,
167 MGMT_EV_ADVERTISING_REMOVED,
168 MGMT_EV_EXT_INFO_CHANGED,
169 MGMT_EV_PHY_CONFIGURATION_CHANGED,
170 MGMT_EV_EXP_FEATURE_CHANGED,
171 MGMT_EV_DEVICE_FLAGS_CHANGED,
172 MGMT_EV_ADV_MONITOR_ADDED,
173 MGMT_EV_ADV_MONITOR_REMOVED,
174 MGMT_EV_CONTROLLER_SUSPEND,
175 MGMT_EV_CONTROLLER_RESUME,
178 static const u16 mgmt_untrusted_commands[] = {
179 MGMT_OP_READ_INDEX_LIST,
181 MGMT_OP_READ_UNCONF_INDEX_LIST,
182 MGMT_OP_READ_CONFIG_INFO,
183 MGMT_OP_READ_EXT_INDEX_LIST,
184 MGMT_OP_READ_EXT_INFO,
185 MGMT_OP_READ_CONTROLLER_CAP,
186 MGMT_OP_READ_EXP_FEATURES_INFO,
187 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
188 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
191 static const u16 mgmt_untrusted_events[] = {
193 MGMT_EV_INDEX_REMOVED,
194 MGMT_EV_NEW_SETTINGS,
195 MGMT_EV_CLASS_OF_DEV_CHANGED,
196 MGMT_EV_LOCAL_NAME_CHANGED,
197 MGMT_EV_UNCONF_INDEX_ADDED,
198 MGMT_EV_UNCONF_INDEX_REMOVED,
199 MGMT_EV_NEW_CONFIG_OPTIONS,
200 MGMT_EV_EXT_INDEX_ADDED,
201 MGMT_EV_EXT_INDEX_REMOVED,
202 MGMT_EV_EXT_INFO_CHANGED,
203 MGMT_EV_EXP_FEATURE_CHANGED,
206 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
208 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
209 "\x00\x00\x00\x00\x00\x00\x00\x00"
211 /* HCI to MGMT error code conversion table */
212 static const u8 mgmt_status_table[] = {
214 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
215 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
216 MGMT_STATUS_FAILED, /* Hardware Failure */
217 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
218 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
219 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
220 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
221 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
222 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
223 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
224 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
225 MGMT_STATUS_BUSY, /* Command Disallowed */
226 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
227 MGMT_STATUS_REJECTED, /* Rejected Security */
228 MGMT_STATUS_REJECTED, /* Rejected Personal */
229 MGMT_STATUS_TIMEOUT, /* Host Timeout */
230 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
231 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
232 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
233 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
234 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
235 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
236 MGMT_STATUS_BUSY, /* Repeated Attempts */
237 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
238 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
240 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
241 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
242 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
243 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
244 MGMT_STATUS_FAILED, /* Unspecified Error */
245 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
246 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
247 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
248 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
249 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
250 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
251 MGMT_STATUS_FAILED, /* Unit Link Key Used */
252 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
253 MGMT_STATUS_TIMEOUT, /* Instant Passed */
254 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
255 MGMT_STATUS_FAILED, /* Transaction Collision */
256 MGMT_STATUS_FAILED, /* Reserved for future use */
257 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
258 MGMT_STATUS_REJECTED, /* QoS Rejected */
259 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
260 MGMT_STATUS_REJECTED, /* Insufficient Security */
261 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
262 MGMT_STATUS_FAILED, /* Reserved for future use */
263 MGMT_STATUS_BUSY, /* Role Switch Pending */
264 MGMT_STATUS_FAILED, /* Reserved for future use */
265 MGMT_STATUS_FAILED, /* Slot Violation */
266 MGMT_STATUS_FAILED, /* Role Switch Failed */
267 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
268 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
269 MGMT_STATUS_BUSY, /* Host Busy Pairing */
270 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
271 MGMT_STATUS_BUSY, /* Controller Busy */
272 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
273 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
274 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
275 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
276 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
279 static u8 mgmt_status(u8 hci_status)
281 if (hci_status < ARRAY_SIZE(mgmt_status_table))
282 return mgmt_status_table[hci_status];
284 return MGMT_STATUS_FAILED;
287 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
290 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
294 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
295 u16 len, int flag, struct sock *skip_sk)
297 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
301 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
302 struct sock *skip_sk)
304 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
305 HCI_SOCK_TRUSTED, skip_sk);
308 static u8 le_addr_type(u8 mgmt_addr_type)
310 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
311 return ADDR_LE_DEV_PUBLIC;
313 return ADDR_LE_DEV_RANDOM;
316 void mgmt_fill_version_info(void *ver)
318 struct mgmt_rp_read_version *rp = ver;
320 rp->version = MGMT_VERSION;
321 rp->revision = cpu_to_le16(MGMT_REVISION);
324 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
327 struct mgmt_rp_read_version rp;
329 bt_dev_dbg(hdev, "sock %p", sk);
331 mgmt_fill_version_info(&rp);
333 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
337 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_commands *rp;
341 u16 num_commands, num_events;
345 bt_dev_dbg(hdev, "sock %p", sk);
347 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
348 num_commands = ARRAY_SIZE(mgmt_commands);
349 num_events = ARRAY_SIZE(mgmt_events);
351 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
352 num_events = ARRAY_SIZE(mgmt_untrusted_events);
355 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
357 rp = kmalloc(rp_size, GFP_KERNEL);
361 rp->num_commands = cpu_to_le16(num_commands);
362 rp->num_events = cpu_to_le16(num_events);
364 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
365 __le16 *opcode = rp->opcodes;
367 for (i = 0; i < num_commands; i++, opcode++)
368 put_unaligned_le16(mgmt_commands[i], opcode);
370 for (i = 0; i < num_events; i++, opcode++)
371 put_unaligned_le16(mgmt_events[i], opcode);
373 __le16 *opcode = rp->opcodes;
375 for (i = 0; i < num_commands; i++, opcode++)
376 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
378 for (i = 0; i < num_events; i++, opcode++)
379 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
382 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
389 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
392 struct mgmt_rp_read_index_list *rp;
398 bt_dev_dbg(hdev, "sock %p", sk);
400 read_lock(&hci_dev_list_lock);
403 list_for_each_entry(d, &hci_dev_list, list) {
404 if (d->dev_type == HCI_PRIMARY &&
405 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
409 rp_len = sizeof(*rp) + (2 * count);
410 rp = kmalloc(rp_len, GFP_ATOMIC);
412 read_unlock(&hci_dev_list_lock);
417 list_for_each_entry(d, &hci_dev_list, list) {
418 if (hci_dev_test_flag(d, HCI_SETUP) ||
419 hci_dev_test_flag(d, HCI_CONFIG) ||
420 hci_dev_test_flag(d, HCI_USER_CHANNEL))
423 /* Devices marked as raw-only are neither configured
424 * nor unconfigured controllers.
426 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
429 if (d->dev_type == HCI_PRIMARY &&
430 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
431 rp->index[count++] = cpu_to_le16(d->id);
432 bt_dev_dbg(hdev, "Added hci%u", d->id);
436 rp->num_controllers = cpu_to_le16(count);
437 rp_len = sizeof(*rp) + (2 * count);
439 read_unlock(&hci_dev_list_lock);
441 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
449 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
450 void *data, u16 data_len)
452 struct mgmt_rp_read_unconf_index_list *rp;
458 bt_dev_dbg(hdev, "sock %p", sk);
460 read_lock(&hci_dev_list_lock);
463 list_for_each_entry(d, &hci_dev_list, list) {
464 if (d->dev_type == HCI_PRIMARY &&
465 hci_dev_test_flag(d, HCI_UNCONFIGURED))
469 rp_len = sizeof(*rp) + (2 * count);
470 rp = kmalloc(rp_len, GFP_ATOMIC);
472 read_unlock(&hci_dev_list_lock);
477 list_for_each_entry(d, &hci_dev_list, list) {
478 if (hci_dev_test_flag(d, HCI_SETUP) ||
479 hci_dev_test_flag(d, HCI_CONFIG) ||
480 hci_dev_test_flag(d, HCI_USER_CHANNEL))
483 /* Devices marked as raw-only are neither configured
484 * nor unconfigured controllers.
486 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
489 if (d->dev_type == HCI_PRIMARY &&
490 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
491 rp->index[count++] = cpu_to_le16(d->id);
492 bt_dev_dbg(hdev, "Added hci%u", d->id);
496 rp->num_controllers = cpu_to_le16(count);
497 rp_len = sizeof(*rp) + (2 * count);
499 read_unlock(&hci_dev_list_lock);
501 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
502 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
509 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
510 void *data, u16 data_len)
512 struct mgmt_rp_read_ext_index_list *rp;
517 bt_dev_dbg(hdev, "sock %p", sk);
519 read_lock(&hci_dev_list_lock);
522 list_for_each_entry(d, &hci_dev_list, list) {
523 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
527 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
529 read_unlock(&hci_dev_list_lock);
534 list_for_each_entry(d, &hci_dev_list, list) {
535 if (hci_dev_test_flag(d, HCI_SETUP) ||
536 hci_dev_test_flag(d, HCI_CONFIG) ||
537 hci_dev_test_flag(d, HCI_USER_CHANNEL))
540 /* Devices marked as raw-only are neither configured
541 * nor unconfigured controllers.
543 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
546 if (d->dev_type == HCI_PRIMARY) {
547 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
548 rp->entry[count].type = 0x01;
550 rp->entry[count].type = 0x00;
551 } else if (d->dev_type == HCI_AMP) {
552 rp->entry[count].type = 0x02;
557 rp->entry[count].bus = d->bus;
558 rp->entry[count++].index = cpu_to_le16(d->id);
559 bt_dev_dbg(hdev, "Added hci%u", d->id);
562 rp->num_controllers = cpu_to_le16(count);
564 read_unlock(&hci_dev_list_lock);
566 /* If this command is called at least once, then all the
567 * default index and unconfigured index events are disabled
568 * and from now on only extended index events are used.
570 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
571 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
572 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
574 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
575 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
576 struct_size(rp, entry, count));
583 static bool is_configured(struct hci_dev *hdev)
585 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
586 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
589 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
590 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
591 !bacmp(&hdev->public_addr, BDADDR_ANY))
597 static __le32 get_missing_options(struct hci_dev *hdev)
601 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
602 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
603 options |= MGMT_OPTION_EXTERNAL_CONFIG;
605 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
606 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
607 !bacmp(&hdev->public_addr, BDADDR_ANY))
608 options |= MGMT_OPTION_PUBLIC_ADDRESS;
610 return cpu_to_le32(options);
613 static int new_options(struct hci_dev *hdev, struct sock *skip)
615 __le32 options = get_missing_options(hdev);
617 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
618 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
621 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
623 __le32 options = get_missing_options(hdev);
625 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
629 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
630 void *data, u16 data_len)
632 struct mgmt_rp_read_config_info rp;
635 bt_dev_dbg(hdev, "sock %p", sk);
639 memset(&rp, 0, sizeof(rp));
640 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
642 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
643 options |= MGMT_OPTION_EXTERNAL_CONFIG;
645 if (hdev->set_bdaddr)
646 options |= MGMT_OPTION_PUBLIC_ADDRESS;
648 rp.supported_options = cpu_to_le32(options);
649 rp.missing_options = get_missing_options(hdev);
651 hci_dev_unlock(hdev);
653 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
657 static u32 get_supported_phys(struct hci_dev *hdev)
659 u32 supported_phys = 0;
661 if (lmp_bredr_capable(hdev)) {
662 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
664 if (hdev->features[0][0] & LMP_3SLOT)
665 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
667 if (hdev->features[0][0] & LMP_5SLOT)
668 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
670 if (lmp_edr_2m_capable(hdev)) {
671 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
673 if (lmp_edr_3slot_capable(hdev))
674 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
676 if (lmp_edr_5slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
679 if (lmp_edr_3m_capable(hdev)) {
680 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
682 if (lmp_edr_3slot_capable(hdev))
683 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
685 if (lmp_edr_5slot_capable(hdev))
686 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
691 if (lmp_le_capable(hdev)) {
692 supported_phys |= MGMT_PHY_LE_1M_TX;
693 supported_phys |= MGMT_PHY_LE_1M_RX;
695 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
696 supported_phys |= MGMT_PHY_LE_2M_TX;
697 supported_phys |= MGMT_PHY_LE_2M_RX;
700 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
701 supported_phys |= MGMT_PHY_LE_CODED_TX;
702 supported_phys |= MGMT_PHY_LE_CODED_RX;
706 return supported_phys;
709 static u32 get_selected_phys(struct hci_dev *hdev)
711 u32 selected_phys = 0;
713 if (lmp_bredr_capable(hdev)) {
714 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
716 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
717 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
719 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
720 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
722 if (lmp_edr_2m_capable(hdev)) {
723 if (!(hdev->pkt_type & HCI_2DH1))
724 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
726 if (lmp_edr_3slot_capable(hdev) &&
727 !(hdev->pkt_type & HCI_2DH3))
728 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
730 if (lmp_edr_5slot_capable(hdev) &&
731 !(hdev->pkt_type & HCI_2DH5))
732 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
734 if (lmp_edr_3m_capable(hdev)) {
735 if (!(hdev->pkt_type & HCI_3DH1))
736 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
738 if (lmp_edr_3slot_capable(hdev) &&
739 !(hdev->pkt_type & HCI_3DH3))
740 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
742 if (lmp_edr_5slot_capable(hdev) &&
743 !(hdev->pkt_type & HCI_3DH5))
744 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
749 if (lmp_le_capable(hdev)) {
750 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
751 selected_phys |= MGMT_PHY_LE_1M_TX;
753 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
754 selected_phys |= MGMT_PHY_LE_1M_RX;
756 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
757 selected_phys |= MGMT_PHY_LE_2M_TX;
759 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
760 selected_phys |= MGMT_PHY_LE_2M_RX;
762 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
763 selected_phys |= MGMT_PHY_LE_CODED_TX;
765 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
766 selected_phys |= MGMT_PHY_LE_CODED_RX;
769 return selected_phys;
772 static u32 get_configurable_phys(struct hci_dev *hdev)
774 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
775 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
778 static u32 get_supported_settings(struct hci_dev *hdev)
782 settings |= MGMT_SETTING_POWERED;
783 settings |= MGMT_SETTING_BONDABLE;
784 settings |= MGMT_SETTING_DEBUG_KEYS;
785 settings |= MGMT_SETTING_CONNECTABLE;
786 settings |= MGMT_SETTING_DISCOVERABLE;
788 if (lmp_bredr_capable(hdev)) {
789 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
790 settings |= MGMT_SETTING_FAST_CONNECTABLE;
791 settings |= MGMT_SETTING_BREDR;
792 settings |= MGMT_SETTING_LINK_SECURITY;
794 if (lmp_ssp_capable(hdev)) {
795 settings |= MGMT_SETTING_SSP;
796 if (IS_ENABLED(CONFIG_BT_HS))
797 settings |= MGMT_SETTING_HS;
800 if (lmp_sc_capable(hdev))
801 settings |= MGMT_SETTING_SECURE_CONN;
803 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
805 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
808 if (lmp_le_capable(hdev)) {
809 settings |= MGMT_SETTING_LE;
810 settings |= MGMT_SETTING_SECURE_CONN;
811 settings |= MGMT_SETTING_PRIVACY;
812 settings |= MGMT_SETTING_STATIC_ADDRESS;
814 /* When the experimental feature for LL Privacy support is
815 * enabled, then advertising is no longer supported.
817 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
818 settings |= MGMT_SETTING_ADVERTISING;
821 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
823 settings |= MGMT_SETTING_CONFIGURATION;
825 settings |= MGMT_SETTING_PHY_CONFIGURATION;
830 static u32 get_current_settings(struct hci_dev *hdev)
834 if (hdev_is_powered(hdev))
835 settings |= MGMT_SETTING_POWERED;
837 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
838 settings |= MGMT_SETTING_CONNECTABLE;
840 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
841 settings |= MGMT_SETTING_FAST_CONNECTABLE;
843 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
844 settings |= MGMT_SETTING_DISCOVERABLE;
846 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
847 settings |= MGMT_SETTING_BONDABLE;
849 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
850 settings |= MGMT_SETTING_BREDR;
852 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
853 settings |= MGMT_SETTING_LE;
855 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
856 settings |= MGMT_SETTING_LINK_SECURITY;
858 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
859 settings |= MGMT_SETTING_SSP;
861 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
862 settings |= MGMT_SETTING_HS;
864 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
865 settings |= MGMT_SETTING_ADVERTISING;
867 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
868 settings |= MGMT_SETTING_SECURE_CONN;
870 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
871 settings |= MGMT_SETTING_DEBUG_KEYS;
873 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
874 settings |= MGMT_SETTING_PRIVACY;
876 /* The current setting for static address has two purposes. The
877 * first is to indicate if the static address will be used and
878 * the second is to indicate if it is actually set.
880 * This means if the static address is not configured, this flag
881 * will never be set. If the address is configured, then if the
882 * address is actually used decides if the flag is set or not.
884 * For single mode LE only controllers and dual-mode controllers
885 * with BR/EDR disabled, the existence of the static address will
888 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
889 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
890 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
891 if (bacmp(&hdev->static_addr, BDADDR_ANY))
892 settings |= MGMT_SETTING_STATIC_ADDRESS;
895 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
896 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
901 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
903 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
906 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
907 struct hci_dev *hdev,
910 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
913 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
915 struct mgmt_pending_cmd *cmd;
917 /* If there's a pending mgmt command the flags will not yet have
918 * their final values, so check for this first.
920 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
922 struct mgmt_mode *cp = cmd->param;
924 return LE_AD_GENERAL;
925 else if (cp->val == 0x02)
926 return LE_AD_LIMITED;
928 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
929 return LE_AD_LIMITED;
930 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
931 return LE_AD_GENERAL;
937 bool mgmt_get_connectable(struct hci_dev *hdev)
939 struct mgmt_pending_cmd *cmd;
941 /* If there's a pending mgmt command the flag will not yet have
942 * it's final value, so check for this first.
944 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
946 struct mgmt_mode *cp = cmd->param;
951 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
954 static void service_cache_off(struct work_struct *work)
956 struct hci_dev *hdev = container_of(work, struct hci_dev,
958 struct hci_request req;
960 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
963 hci_req_init(&req, hdev);
967 __hci_req_update_eir(&req);
968 __hci_req_update_class(&req);
970 hci_dev_unlock(hdev);
972 hci_req_run(&req, NULL);
975 static void rpa_expired(struct work_struct *work)
977 struct hci_dev *hdev = container_of(work, struct hci_dev,
979 struct hci_request req;
981 bt_dev_dbg(hdev, "");
983 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
985 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
988 /* The generation of a new RPA and programming it into the
989 * controller happens in the hci_req_enable_advertising()
992 hci_req_init(&req, hdev);
993 if (ext_adv_capable(hdev))
994 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
996 __hci_req_enable_advertising(&req);
997 hci_req_run(&req, NULL);
1000 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1002 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1005 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1006 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1008 /* Non-mgmt controlled devices get this bit set
1009 * implicitly so that pairing works for them, however
1010 * for mgmt we require user-space to explicitly enable
1013 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1016 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1017 void *data, u16 data_len)
1019 struct mgmt_rp_read_info rp;
1021 bt_dev_dbg(hdev, "sock %p", sk);
1025 memset(&rp, 0, sizeof(rp));
1027 bacpy(&rp.bdaddr, &hdev->bdaddr);
1029 rp.version = hdev->hci_ver;
1030 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1032 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1033 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1035 memcpy(rp.dev_class, hdev->dev_class, 3);
1037 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1038 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1040 hci_dev_unlock(hdev);
1042 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1046 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1051 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1052 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1053 hdev->dev_class, 3);
1055 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1056 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1059 name_len = strlen(hdev->dev_name);
1060 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1061 hdev->dev_name, name_len);
1063 name_len = strlen(hdev->short_name);
1064 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1065 hdev->short_name, name_len);
1070 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1071 void *data, u16 data_len)
1074 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1077 bt_dev_dbg(hdev, "sock %p", sk);
1079 memset(&buf, 0, sizeof(buf));
1083 bacpy(&rp->bdaddr, &hdev->bdaddr);
1085 rp->version = hdev->hci_ver;
1086 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1088 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1089 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1092 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1093 rp->eir_len = cpu_to_le16(eir_len);
1095 hci_dev_unlock(hdev);
1097 /* If this command is called at least once, then the events
1098 * for class of device and local name changes are disabled
1099 * and only the new extended controller information event
1102 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1103 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1104 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1106 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1107 sizeof(*rp) + eir_len);
1110 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1113 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1116 memset(buf, 0, sizeof(buf));
1118 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1119 ev->eir_len = cpu_to_le16(eir_len);
1121 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1122 sizeof(*ev) + eir_len,
1123 HCI_MGMT_EXT_INFO_EVENTS, skip);
1126 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1128 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1130 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1134 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1136 bt_dev_dbg(hdev, "status 0x%02x", status);
1138 if (hci_conn_count(hdev) == 0) {
1139 cancel_delayed_work(&hdev->power_off);
1140 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1144 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1146 struct mgmt_ev_advertising_added ev;
1148 ev.instance = instance;
1150 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1153 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1156 struct mgmt_ev_advertising_removed ev;
1158 ev.instance = instance;
1160 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1163 static void cancel_adv_timeout(struct hci_dev *hdev)
1165 if (hdev->adv_instance_timeout) {
1166 hdev->adv_instance_timeout = 0;
1167 cancel_delayed_work(&hdev->adv_instance_expire);
1171 static int clean_up_hci_state(struct hci_dev *hdev)
1173 struct hci_request req;
1174 struct hci_conn *conn;
1175 bool discov_stopped;
1178 hci_req_init(&req, hdev);
1180 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1181 test_bit(HCI_PSCAN, &hdev->flags)) {
1183 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1186 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1188 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1189 __hci_req_disable_advertising(&req);
1191 discov_stopped = hci_req_stop_discovery(&req);
1193 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1194 /* 0x15 == Terminated due to Power Off */
1195 __hci_abort_conn(&req, conn, 0x15);
1198 err = hci_req_run(&req, clean_up_hci_complete);
1199 if (!err && discov_stopped)
1200 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1205 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1208 struct mgmt_mode *cp = data;
1209 struct mgmt_pending_cmd *cmd;
1212 bt_dev_dbg(hdev, "sock %p", sk);
1214 if (cp->val != 0x00 && cp->val != 0x01)
1215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1216 MGMT_STATUS_INVALID_PARAMS);
1220 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1221 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1226 if (!!cp->val == hdev_is_powered(hdev)) {
1227 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1231 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1238 queue_work(hdev->req_workqueue, &hdev->power_on);
1241 /* Disconnect connections, stop scans, etc */
1242 err = clean_up_hci_state(hdev);
1244 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1245 HCI_POWER_OFF_TIMEOUT);
1247 /* ENODATA means there were no HCI commands queued */
1248 if (err == -ENODATA) {
1249 cancel_delayed_work(&hdev->power_off);
1250 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1256 hci_dev_unlock(hdev);
1260 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1262 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1264 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1265 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1268 int mgmt_new_settings(struct hci_dev *hdev)
1270 return new_settings(hdev, NULL);
1275 struct hci_dev *hdev;
1279 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1281 struct cmd_lookup *match = data;
1283 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1285 list_del(&cmd->list);
1287 if (match->sk == NULL) {
1288 match->sk = cmd->sk;
1289 sock_hold(match->sk);
1292 mgmt_pending_free(cmd);
1295 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1299 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1300 mgmt_pending_remove(cmd);
1303 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1305 if (cmd->cmd_complete) {
1308 cmd->cmd_complete(cmd, *status);
1309 mgmt_pending_remove(cmd);
1314 cmd_status_rsp(cmd, data);
1317 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1319 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 cmd->param, cmd->param_len);
1323 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1325 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1326 cmd->param, sizeof(struct mgmt_addr_info));
1329 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1331 if (!lmp_bredr_capable(hdev))
1332 return MGMT_STATUS_NOT_SUPPORTED;
1333 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1334 return MGMT_STATUS_REJECTED;
1336 return MGMT_STATUS_SUCCESS;
1339 static u8 mgmt_le_support(struct hci_dev *hdev)
1341 if (!lmp_le_capable(hdev))
1342 return MGMT_STATUS_NOT_SUPPORTED;
1343 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1344 return MGMT_STATUS_REJECTED;
1346 return MGMT_STATUS_SUCCESS;
1349 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1351 struct mgmt_pending_cmd *cmd;
1353 bt_dev_dbg(hdev, "status 0x%02x", status);
1357 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1362 u8 mgmt_err = mgmt_status(status);
1363 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1364 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1368 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1369 hdev->discov_timeout > 0) {
1370 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1371 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1374 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1375 new_settings(hdev, cmd->sk);
1378 mgmt_pending_remove(cmd);
1381 hci_dev_unlock(hdev);
1384 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1387 struct mgmt_cp_set_discoverable *cp = data;
1388 struct mgmt_pending_cmd *cmd;
1392 bt_dev_dbg(hdev, "sock %p", sk);
1394 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1395 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1396 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 MGMT_STATUS_REJECTED);
1399 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1400 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1401 MGMT_STATUS_INVALID_PARAMS);
1403 timeout = __le16_to_cpu(cp->timeout);
1405 /* Disabling discoverable requires that no timeout is set,
1406 * and enabling limited discoverable requires a timeout.
1408 if ((cp->val == 0x00 && timeout > 0) ||
1409 (cp->val == 0x02 && timeout == 0))
1410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_INVALID_PARAMS);
1415 if (!hdev_is_powered(hdev) && timeout > 0) {
1416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1417 MGMT_STATUS_NOT_POWERED);
1421 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1422 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1428 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 MGMT_STATUS_REJECTED);
1434 if (hdev->advertising_paused) {
1435 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1440 if (!hdev_is_powered(hdev)) {
1441 bool changed = false;
1443 /* Setting limited discoverable when powered off is
1444 * not a valid operation since it requires a timeout
1445 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1447 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1448 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1452 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1457 err = new_settings(hdev, sk);
1462 /* If the current mode is the same, then just update the timeout
1463 * value with the new value. And if only the timeout gets updated,
1464 * then no need for any HCI transactions.
1466 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1467 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1468 HCI_LIMITED_DISCOVERABLE)) {
1469 cancel_delayed_work(&hdev->discov_off);
1470 hdev->discov_timeout = timeout;
1472 if (cp->val && hdev->discov_timeout > 0) {
1473 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1474 queue_delayed_work(hdev->req_workqueue,
1475 &hdev->discov_off, to);
1478 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1482 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1488 /* Cancel any potential discoverable timeout that might be
1489 * still active and store new timeout value. The arming of
1490 * the timeout happens in the complete handler.
1492 cancel_delayed_work(&hdev->discov_off);
1493 hdev->discov_timeout = timeout;
1496 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1498 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1500 /* Limited discoverable mode */
1501 if (cp->val == 0x02)
1502 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1504 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1506 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1510 hci_dev_unlock(hdev);
1514 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1516 struct mgmt_pending_cmd *cmd;
1518 bt_dev_dbg(hdev, "status 0x%02x", status);
1522 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1527 u8 mgmt_err = mgmt_status(status);
1528 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1532 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1533 new_settings(hdev, cmd->sk);
1536 mgmt_pending_remove(cmd);
1539 hci_dev_unlock(hdev);
1542 static int set_connectable_update_settings(struct hci_dev *hdev,
1543 struct sock *sk, u8 val)
1545 bool changed = false;
1548 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1552 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1554 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1555 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1558 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1563 hci_req_update_scan(hdev);
1564 hci_update_background_scan(hdev);
1565 return new_settings(hdev, sk);
1571 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1574 struct mgmt_mode *cp = data;
1575 struct mgmt_pending_cmd *cmd;
1578 bt_dev_dbg(hdev, "sock %p", sk);
1580 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1581 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1583 MGMT_STATUS_REJECTED);
1585 if (cp->val != 0x00 && cp->val != 0x01)
1586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1587 MGMT_STATUS_INVALID_PARAMS);
1591 if (!hdev_is_powered(hdev)) {
1592 err = set_connectable_update_settings(hdev, sk, cp->val);
1596 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1597 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1603 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1610 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1612 if (hdev->discov_timeout > 0)
1613 cancel_delayed_work(&hdev->discov_off);
1615 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1616 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1617 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1620 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1624 hci_dev_unlock(hdev);
1628 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1631 struct mgmt_mode *cp = data;
1635 bt_dev_dbg(hdev, "sock %p", sk);
1637 if (cp->val != 0x00 && cp->val != 0x01)
1638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1639 MGMT_STATUS_INVALID_PARAMS);
1644 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1646 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1648 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1653 /* In limited privacy mode the change of bondable mode
1654 * may affect the local advertising address.
1656 if (hdev_is_powered(hdev) &&
1657 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1658 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1659 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1660 queue_work(hdev->req_workqueue,
1661 &hdev->discoverable_update);
1663 err = new_settings(hdev, sk);
1667 hci_dev_unlock(hdev);
1671 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1674 struct mgmt_mode *cp = data;
1675 struct mgmt_pending_cmd *cmd;
1679 bt_dev_dbg(hdev, "sock %p", sk);
1681 status = mgmt_bredr_support(hdev);
1683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1686 if (cp->val != 0x00 && cp->val != 0x01)
1687 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1688 MGMT_STATUS_INVALID_PARAMS);
1692 if (!hdev_is_powered(hdev)) {
1693 bool changed = false;
1695 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1696 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1700 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1705 err = new_settings(hdev, sk);
1710 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1711 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1718 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1719 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1723 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1729 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1731 mgmt_pending_remove(cmd);
1736 hci_dev_unlock(hdev);
1740 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1742 struct mgmt_mode *cp = data;
1743 struct mgmt_pending_cmd *cmd;
1747 bt_dev_dbg(hdev, "sock %p", sk);
1749 status = mgmt_bredr_support(hdev);
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1753 if (!lmp_ssp_capable(hdev))
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1755 MGMT_STATUS_NOT_SUPPORTED);
1757 if (cp->val != 0x00 && cp->val != 0x01)
1758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1759 MGMT_STATUS_INVALID_PARAMS);
1763 if (!hdev_is_powered(hdev)) {
1767 changed = !hci_dev_test_and_set_flag(hdev,
1770 changed = hci_dev_test_and_clear_flag(hdev,
1773 changed = hci_dev_test_and_clear_flag(hdev,
1776 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1779 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1784 err = new_settings(hdev, sk);
1789 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1790 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1795 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1796 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1800 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1806 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1807 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1808 sizeof(cp->val), &cp->val);
1810 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1812 mgmt_pending_remove(cmd);
1817 hci_dev_unlock(hdev);
1821 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1823 struct mgmt_mode *cp = data;
1828 bt_dev_dbg(hdev, "sock %p", sk);
1830 if (!IS_ENABLED(CONFIG_BT_HS))
1831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1832 MGMT_STATUS_NOT_SUPPORTED);
1834 status = mgmt_bredr_support(hdev);
1836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1838 if (!lmp_ssp_capable(hdev))
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1840 MGMT_STATUS_NOT_SUPPORTED);
1842 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1844 MGMT_STATUS_REJECTED);
1846 if (cp->val != 0x00 && cp->val != 0x01)
1847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 MGMT_STATUS_INVALID_PARAMS);
1852 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1859 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1861 if (hdev_is_powered(hdev)) {
1862 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1863 MGMT_STATUS_REJECTED);
1867 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1870 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1875 err = new_settings(hdev, sk);
1878 hci_dev_unlock(hdev);
1882 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1884 struct cmd_lookup match = { NULL, hdev };
1889 u8 mgmt_err = mgmt_status(status);
1891 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1896 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1898 new_settings(hdev, match.sk);
1903 /* Make sure the controller has a good default for
1904 * advertising data. Restrict the update to when LE
1905 * has actually been enabled. During power on, the
1906 * update in powered_update_hci will take care of it.
1908 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1909 struct hci_request req;
1910 hci_req_init(&req, hdev);
1911 if (ext_adv_capable(hdev)) {
1914 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1916 __hci_req_update_scan_rsp_data(&req, 0x00);
1918 __hci_req_update_adv_data(&req, 0x00);
1919 __hci_req_update_scan_rsp_data(&req, 0x00);
1921 hci_req_run(&req, NULL);
1922 hci_update_background_scan(hdev);
1926 hci_dev_unlock(hdev);
1929 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1931 struct mgmt_mode *cp = data;
1932 struct hci_cp_write_le_host_supported hci_cp;
1933 struct mgmt_pending_cmd *cmd;
1934 struct hci_request req;
1938 bt_dev_dbg(hdev, "sock %p", sk);
1940 if (!lmp_le_capable(hdev))
1941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1942 MGMT_STATUS_NOT_SUPPORTED);
1944 if (cp->val != 0x00 && cp->val != 0x01)
1945 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1946 MGMT_STATUS_INVALID_PARAMS);
1948 /* Bluetooth single mode LE only controllers or dual-mode
1949 * controllers configured as LE only devices, do not allow
1950 * switching LE off. These have either LE enabled explicitly
1951 * or BR/EDR has been previously switched off.
1953 * When trying to enable an already enabled LE, then gracefully
1954 * send a positive response. Trying to disable it however will
1955 * result into rejection.
1957 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1958 if (cp->val == 0x01)
1959 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1962 MGMT_STATUS_REJECTED);
1968 enabled = lmp_host_le_capable(hdev);
1971 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1973 if (!hdev_is_powered(hdev) || val == enabled) {
1974 bool changed = false;
1976 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1977 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1981 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1982 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1986 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1991 err = new_settings(hdev, sk);
1996 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1997 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1998 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2003 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2009 hci_req_init(&req, hdev);
2011 memset(&hci_cp, 0, sizeof(hci_cp));
2015 hci_cp.simul = 0x00;
2017 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2018 __hci_req_disable_advertising(&req);
2020 if (ext_adv_capable(hdev))
2021 __hci_req_clear_ext_adv_sets(&req);
2024 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2027 err = hci_req_run(&req, le_enable_complete);
2029 mgmt_pending_remove(cmd);
2032 hci_dev_unlock(hdev);
2036 /* This is a helper function to test for pending mgmt commands that can
2037 * cause CoD or EIR HCI commands. We can only allow one such pending
2038 * mgmt command at a time since otherwise we cannot easily track what
2039 * the current values are, will be, and based on that calculate if a new
2040 * HCI command needs to be sent and if yes with what value.
2042 static bool pending_eir_or_class(struct hci_dev *hdev)
2044 struct mgmt_pending_cmd *cmd;
2046 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2047 switch (cmd->opcode) {
2048 case MGMT_OP_ADD_UUID:
2049 case MGMT_OP_REMOVE_UUID:
2050 case MGMT_OP_SET_DEV_CLASS:
2051 case MGMT_OP_SET_POWERED:
2059 static const u8 bluetooth_base_uuid[] = {
2060 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2061 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2064 static u8 get_uuid_size(const u8 *uuid)
2068 if (memcmp(uuid, bluetooth_base_uuid, 12))
2071 val = get_unaligned_le32(&uuid[12]);
2078 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2080 struct mgmt_pending_cmd *cmd;
2084 cmd = pending_find(mgmt_op, hdev);
2088 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2089 mgmt_status(status), hdev->dev_class, 3);
2091 mgmt_pending_remove(cmd);
2094 hci_dev_unlock(hdev);
2097 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2099 bt_dev_dbg(hdev, "status 0x%02x", status);
2101 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2104 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2106 struct mgmt_cp_add_uuid *cp = data;
2107 struct mgmt_pending_cmd *cmd;
2108 struct hci_request req;
2109 struct bt_uuid *uuid;
2112 bt_dev_dbg(hdev, "sock %p", sk);
2116 if (pending_eir_or_class(hdev)) {
2117 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2122 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2128 memcpy(uuid->uuid, cp->uuid, 16);
2129 uuid->svc_hint = cp->svc_hint;
2130 uuid->size = get_uuid_size(cp->uuid);
2132 list_add_tail(&uuid->list, &hdev->uuids);
2134 hci_req_init(&req, hdev);
2136 __hci_req_update_class(&req);
2137 __hci_req_update_eir(&req);
2139 err = hci_req_run(&req, add_uuid_complete);
2141 if (err != -ENODATA)
2144 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2145 hdev->dev_class, 3);
2149 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2158 hci_dev_unlock(hdev);
2162 static bool enable_service_cache(struct hci_dev *hdev)
2164 if (!hdev_is_powered(hdev))
2167 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2168 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2176 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2178 bt_dev_dbg(hdev, "status 0x%02x", status);
2180 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2183 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2186 struct mgmt_cp_remove_uuid *cp = data;
2187 struct mgmt_pending_cmd *cmd;
2188 struct bt_uuid *match, *tmp;
2189 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2190 struct hci_request req;
2193 bt_dev_dbg(hdev, "sock %p", sk);
2197 if (pending_eir_or_class(hdev)) {
2198 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2203 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2204 hci_uuids_clear(hdev);
2206 if (enable_service_cache(hdev)) {
2207 err = mgmt_cmd_complete(sk, hdev->id,
2208 MGMT_OP_REMOVE_UUID,
2209 0, hdev->dev_class, 3);
2218 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2219 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2222 list_del(&match->list);
2228 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2229 MGMT_STATUS_INVALID_PARAMS);
2234 hci_req_init(&req, hdev);
2236 __hci_req_update_class(&req);
2237 __hci_req_update_eir(&req);
2239 err = hci_req_run(&req, remove_uuid_complete);
2241 if (err != -ENODATA)
2244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2245 hdev->dev_class, 3);
2249 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2258 hci_dev_unlock(hdev);
2262 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2264 bt_dev_dbg(hdev, "status 0x%02x", status);
2266 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2269 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2272 struct mgmt_cp_set_dev_class *cp = data;
2273 struct mgmt_pending_cmd *cmd;
2274 struct hci_request req;
2277 bt_dev_dbg(hdev, "sock %p", sk);
2279 if (!lmp_bredr_capable(hdev))
2280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2281 MGMT_STATUS_NOT_SUPPORTED);
2285 if (pending_eir_or_class(hdev)) {
2286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2291 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2292 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2293 MGMT_STATUS_INVALID_PARAMS);
2297 hdev->major_class = cp->major;
2298 hdev->minor_class = cp->minor;
2300 if (!hdev_is_powered(hdev)) {
2301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2302 hdev->dev_class, 3);
2306 hci_req_init(&req, hdev);
2308 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2309 hci_dev_unlock(hdev);
2310 cancel_delayed_work_sync(&hdev->service_cache);
2312 __hci_req_update_eir(&req);
2315 __hci_req_update_class(&req);
2317 err = hci_req_run(&req, set_class_complete);
2319 if (err != -ENODATA)
2322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2323 hdev->dev_class, 3);
2327 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2336 hci_dev_unlock(hdev);
2340 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2343 struct mgmt_cp_load_link_keys *cp = data;
2344 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2345 sizeof(struct mgmt_link_key_info));
2346 u16 key_count, expected_len;
2350 bt_dev_dbg(hdev, "sock %p", sk);
2352 if (!lmp_bredr_capable(hdev))
2353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2354 MGMT_STATUS_NOT_SUPPORTED);
2356 key_count = __le16_to_cpu(cp->key_count);
2357 if (key_count > max_key_count) {
2358 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2361 MGMT_STATUS_INVALID_PARAMS);
2364 expected_len = struct_size(cp, keys, key_count);
2365 if (expected_len != len) {
2366 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2368 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2369 MGMT_STATUS_INVALID_PARAMS);
2372 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2373 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2374 MGMT_STATUS_INVALID_PARAMS);
2376 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2379 for (i = 0; i < key_count; i++) {
2380 struct mgmt_link_key_info *key = &cp->keys[i];
2382 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2383 return mgmt_cmd_status(sk, hdev->id,
2384 MGMT_OP_LOAD_LINK_KEYS,
2385 MGMT_STATUS_INVALID_PARAMS);
2390 hci_link_keys_clear(hdev);
2393 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2395 changed = hci_dev_test_and_clear_flag(hdev,
2396 HCI_KEEP_DEBUG_KEYS);
2399 new_settings(hdev, NULL);
2401 for (i = 0; i < key_count; i++) {
2402 struct mgmt_link_key_info *key = &cp->keys[i];
2404 if (hci_is_blocked_key(hdev,
2405 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2407 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2412 /* Always ignore debug keys and require a new pairing if
2413 * the user wants to use them.
2415 if (key->type == HCI_LK_DEBUG_COMBINATION)
2418 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2419 key->type, key->pin_len, NULL);
2422 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2424 hci_dev_unlock(hdev);
2429 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2430 u8 addr_type, struct sock *skip_sk)
2432 struct mgmt_ev_device_unpaired ev;
2434 bacpy(&ev.addr.bdaddr, bdaddr);
2435 ev.addr.type = addr_type;
2437 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2441 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2444 struct mgmt_cp_unpair_device *cp = data;
2445 struct mgmt_rp_unpair_device rp;
2446 struct hci_conn_params *params;
2447 struct mgmt_pending_cmd *cmd;
2448 struct hci_conn *conn;
2452 memset(&rp, 0, sizeof(rp));
2453 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2454 rp.addr.type = cp->addr.type;
2456 if (!bdaddr_type_is_valid(cp->addr.type))
2457 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 MGMT_STATUS_INVALID_PARAMS,
2461 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2462 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2463 MGMT_STATUS_INVALID_PARAMS,
2468 if (!hdev_is_powered(hdev)) {
2469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2470 MGMT_STATUS_NOT_POWERED, &rp,
2475 if (cp->addr.type == BDADDR_BREDR) {
2476 /* If disconnection is requested, then look up the
2477 * connection. If the remote device is connected, it
2478 * will be later used to terminate the link.
2480 * Setting it to NULL explicitly will cause no
2481 * termination of the link.
2484 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2489 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2491 err = mgmt_cmd_complete(sk, hdev->id,
2492 MGMT_OP_UNPAIR_DEVICE,
2493 MGMT_STATUS_NOT_PAIRED, &rp,
2501 /* LE address type */
2502 addr_type = le_addr_type(cp->addr.type);
2504 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2505 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2507 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2508 MGMT_STATUS_NOT_PAIRED, &rp,
2513 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2515 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2520 /* Defer clearing up the connection parameters until closing to
2521 * give a chance of keeping them if a repairing happens.
2523 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2525 /* Disable auto-connection parameters if present */
2526 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2528 if (params->explicit_connect)
2529 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2531 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2534 /* If disconnection is not requested, then clear the connection
2535 * variable so that the link is not terminated.
2537 if (!cp->disconnect)
2541 /* If the connection variable is set, then termination of the
2542 * link is requested.
2545 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2547 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2551 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2558 cmd->cmd_complete = addr_cmd_complete;
2560 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2562 mgmt_pending_remove(cmd);
2565 hci_dev_unlock(hdev);
2569 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2572 struct mgmt_cp_disconnect *cp = data;
2573 struct mgmt_rp_disconnect rp;
2574 struct mgmt_pending_cmd *cmd;
2575 struct hci_conn *conn;
2578 bt_dev_dbg(hdev, "sock %p", sk);
2580 memset(&rp, 0, sizeof(rp));
2581 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2582 rp.addr.type = cp->addr.type;
2584 if (!bdaddr_type_is_valid(cp->addr.type))
2585 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2586 MGMT_STATUS_INVALID_PARAMS,
2591 if (!test_bit(HCI_UP, &hdev->flags)) {
2592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2593 MGMT_STATUS_NOT_POWERED, &rp,
2598 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2600 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2604 if (cp->addr.type == BDADDR_BREDR)
2605 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2608 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2609 le_addr_type(cp->addr.type));
2611 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2612 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2613 MGMT_STATUS_NOT_CONNECTED, &rp,
2618 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2624 cmd->cmd_complete = generic_cmd_complete;
2626 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2628 mgmt_pending_remove(cmd);
2631 hci_dev_unlock(hdev);
2635 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2637 switch (link_type) {
2639 switch (addr_type) {
2640 case ADDR_LE_DEV_PUBLIC:
2641 return BDADDR_LE_PUBLIC;
2644 /* Fallback to LE Random address type */
2645 return BDADDR_LE_RANDOM;
2649 /* Fallback to BR/EDR type */
2650 return BDADDR_BREDR;
2654 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2657 struct mgmt_rp_get_connections *rp;
2662 bt_dev_dbg(hdev, "sock %p", sk);
2666 if (!hdev_is_powered(hdev)) {
2667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2668 MGMT_STATUS_NOT_POWERED);
2673 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2674 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2678 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2685 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2686 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2688 bacpy(&rp->addr[i].bdaddr, &c->dst);
2689 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2690 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2695 rp->conn_count = cpu_to_le16(i);
2697 /* Recalculate length in case of filtered SCO connections, etc */
2698 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2699 struct_size(rp, addr, i));
2704 hci_dev_unlock(hdev);
2708 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2709 struct mgmt_cp_pin_code_neg_reply *cp)
2711 struct mgmt_pending_cmd *cmd;
2714 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2719 cmd->cmd_complete = addr_cmd_complete;
2721 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2722 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2724 mgmt_pending_remove(cmd);
2729 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2732 struct hci_conn *conn;
2733 struct mgmt_cp_pin_code_reply *cp = data;
2734 struct hci_cp_pin_code_reply reply;
2735 struct mgmt_pending_cmd *cmd;
2738 bt_dev_dbg(hdev, "sock %p", sk);
2742 if (!hdev_is_powered(hdev)) {
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2744 MGMT_STATUS_NOT_POWERED);
2748 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2750 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2751 MGMT_STATUS_NOT_CONNECTED);
2755 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2756 struct mgmt_cp_pin_code_neg_reply ncp;
2758 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2760 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2762 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2765 MGMT_STATUS_INVALID_PARAMS);
2770 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2776 cmd->cmd_complete = addr_cmd_complete;
2778 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2779 reply.pin_len = cp->pin_len;
2780 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2782 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2784 mgmt_pending_remove(cmd);
2787 hci_dev_unlock(hdev);
2791 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2794 struct mgmt_cp_set_io_capability *cp = data;
2796 bt_dev_dbg(hdev, "sock %p", sk);
2798 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2799 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2800 MGMT_STATUS_INVALID_PARAMS);
2804 hdev->io_capability = cp->io_capability;
2806 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2808 hci_dev_unlock(hdev);
2810 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2814 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2816 struct hci_dev *hdev = conn->hdev;
2817 struct mgmt_pending_cmd *cmd;
2819 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2820 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2823 if (cmd->user_data != conn)
2832 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2834 struct mgmt_rp_pair_device rp;
2835 struct hci_conn *conn = cmd->user_data;
2838 bacpy(&rp.addr.bdaddr, &conn->dst);
2839 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2841 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2842 status, &rp, sizeof(rp));
2844 /* So we don't get further callbacks for this connection */
2845 conn->connect_cfm_cb = NULL;
2846 conn->security_cfm_cb = NULL;
2847 conn->disconn_cfm_cb = NULL;
2849 hci_conn_drop(conn);
2851 /* The device is paired so there is no need to remove
2852 * its connection parameters anymore.
2854 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2861 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2863 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2864 struct mgmt_pending_cmd *cmd;
2866 cmd = find_pairing(conn);
2868 cmd->cmd_complete(cmd, status);
2869 mgmt_pending_remove(cmd);
2873 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2875 struct mgmt_pending_cmd *cmd;
2877 BT_DBG("status %u", status);
2879 cmd = find_pairing(conn);
2881 BT_DBG("Unable to find a pending command");
2885 cmd->cmd_complete(cmd, mgmt_status(status));
2886 mgmt_pending_remove(cmd);
2889 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2891 struct mgmt_pending_cmd *cmd;
2893 BT_DBG("status %u", status);
2898 cmd = find_pairing(conn);
2900 BT_DBG("Unable to find a pending command");
2904 cmd->cmd_complete(cmd, mgmt_status(status));
2905 mgmt_pending_remove(cmd);
2908 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2911 struct mgmt_cp_pair_device *cp = data;
2912 struct mgmt_rp_pair_device rp;
2913 struct mgmt_pending_cmd *cmd;
2914 u8 sec_level, auth_type;
2915 struct hci_conn *conn;
2918 bt_dev_dbg(hdev, "sock %p", sk);
2920 memset(&rp, 0, sizeof(rp));
2921 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2922 rp.addr.type = cp->addr.type;
2924 if (!bdaddr_type_is_valid(cp->addr.type))
2925 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 MGMT_STATUS_INVALID_PARAMS,
2929 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2930 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2931 MGMT_STATUS_INVALID_PARAMS,
2936 if (!hdev_is_powered(hdev)) {
2937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2938 MGMT_STATUS_NOT_POWERED, &rp,
2943 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2944 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2945 MGMT_STATUS_ALREADY_PAIRED, &rp,
2950 sec_level = BT_SECURITY_MEDIUM;
2951 auth_type = HCI_AT_DEDICATED_BONDING;
2953 if (cp->addr.type == BDADDR_BREDR) {
2954 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2955 auth_type, CONN_REASON_PAIR_DEVICE);
2957 u8 addr_type = le_addr_type(cp->addr.type);
2958 struct hci_conn_params *p;
2960 /* When pairing a new device, it is expected to remember
2961 * this device for future connections. Adding the connection
2962 * parameter information ahead of time allows tracking
2963 * of the peripheral preferred values and will speed up any
2964 * further connection establishment.
2966 * If connection parameters already exist, then they
2967 * will be kept and this function does nothing.
2969 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2971 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2972 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2974 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2975 sec_level, HCI_LE_CONN_TIMEOUT,
2976 CONN_REASON_PAIR_DEVICE);
2982 if (PTR_ERR(conn) == -EBUSY)
2983 status = MGMT_STATUS_BUSY;
2984 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2985 status = MGMT_STATUS_NOT_SUPPORTED;
2986 else if (PTR_ERR(conn) == -ECONNREFUSED)
2987 status = MGMT_STATUS_REJECTED;
2989 status = MGMT_STATUS_CONNECT_FAILED;
2991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2992 status, &rp, sizeof(rp));
2996 if (conn->connect_cfm_cb) {
2997 hci_conn_drop(conn);
2998 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2999 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3003 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3006 hci_conn_drop(conn);
3010 cmd->cmd_complete = pairing_complete;
3012 /* For LE, just connecting isn't a proof that the pairing finished */
3013 if (cp->addr.type == BDADDR_BREDR) {
3014 conn->connect_cfm_cb = pairing_complete_cb;
3015 conn->security_cfm_cb = pairing_complete_cb;
3016 conn->disconn_cfm_cb = pairing_complete_cb;
3018 conn->connect_cfm_cb = le_pairing_complete_cb;
3019 conn->security_cfm_cb = le_pairing_complete_cb;
3020 conn->disconn_cfm_cb = le_pairing_complete_cb;
3023 conn->io_capability = cp->io_cap;
3024 cmd->user_data = hci_conn_get(conn);
3026 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3027 hci_conn_security(conn, sec_level, auth_type, true)) {
3028 cmd->cmd_complete(cmd, 0);
3029 mgmt_pending_remove(cmd);
3035 hci_dev_unlock(hdev);
3039 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3042 struct mgmt_addr_info *addr = data;
3043 struct mgmt_pending_cmd *cmd;
3044 struct hci_conn *conn;
3047 bt_dev_dbg(hdev, "sock %p", sk);
3051 if (!hdev_is_powered(hdev)) {
3052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3053 MGMT_STATUS_NOT_POWERED);
3057 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3059 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3060 MGMT_STATUS_INVALID_PARAMS);
3064 conn = cmd->user_data;
3066 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3067 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3068 MGMT_STATUS_INVALID_PARAMS);
3072 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3073 mgmt_pending_remove(cmd);
3075 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3076 addr, sizeof(*addr));
3078 /* Since user doesn't want to proceed with the connection, abort any
3079 * ongoing pairing and then terminate the link if it was created
3080 * because of the pair device action.
3082 if (addr->type == BDADDR_BREDR)
3083 hci_remove_link_key(hdev, &addr->bdaddr);
3085 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3086 le_addr_type(addr->type));
3088 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3089 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3092 hci_dev_unlock(hdev);
3096 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3097 struct mgmt_addr_info *addr, u16 mgmt_op,
3098 u16 hci_op, __le32 passkey)
3100 struct mgmt_pending_cmd *cmd;
3101 struct hci_conn *conn;
3106 if (!hdev_is_powered(hdev)) {
3107 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3108 MGMT_STATUS_NOT_POWERED, addr,
3113 if (addr->type == BDADDR_BREDR)
3114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3116 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3117 le_addr_type(addr->type));
3120 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3121 MGMT_STATUS_NOT_CONNECTED, addr,
3126 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3127 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3129 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3130 MGMT_STATUS_SUCCESS, addr,
3133 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3134 MGMT_STATUS_FAILED, addr,
3140 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3146 cmd->cmd_complete = addr_cmd_complete;
3148 /* Continue with pairing via HCI */
3149 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3150 struct hci_cp_user_passkey_reply cp;
3152 bacpy(&cp.bdaddr, &addr->bdaddr);
3153 cp.passkey = passkey;
3154 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3156 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3160 mgmt_pending_remove(cmd);
3163 hci_dev_unlock(hdev);
3167 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3168 void *data, u16 len)
3170 struct mgmt_cp_pin_code_neg_reply *cp = data;
3172 bt_dev_dbg(hdev, "sock %p", sk);
3174 return user_pairing_resp(sk, hdev, &cp->addr,
3175 MGMT_OP_PIN_CODE_NEG_REPLY,
3176 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3179 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3182 struct mgmt_cp_user_confirm_reply *cp = data;
3184 bt_dev_dbg(hdev, "sock %p", sk);
3186 if (len != sizeof(*cp))
3187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3188 MGMT_STATUS_INVALID_PARAMS);
3190 return user_pairing_resp(sk, hdev, &cp->addr,
3191 MGMT_OP_USER_CONFIRM_REPLY,
3192 HCI_OP_USER_CONFIRM_REPLY, 0);
3195 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3196 void *data, u16 len)
3198 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3200 bt_dev_dbg(hdev, "sock %p", sk);
3202 return user_pairing_resp(sk, hdev, &cp->addr,
3203 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3204 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3207 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3210 struct mgmt_cp_user_passkey_reply *cp = data;
3212 bt_dev_dbg(hdev, "sock %p", sk);
3214 return user_pairing_resp(sk, hdev, &cp->addr,
3215 MGMT_OP_USER_PASSKEY_REPLY,
3216 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3219 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3220 void *data, u16 len)
3222 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3224 bt_dev_dbg(hdev, "sock %p", sk);
3226 return user_pairing_resp(sk, hdev, &cp->addr,
3227 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3228 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3231 static void adv_expire(struct hci_dev *hdev, u32 flags)
3233 struct adv_info *adv_instance;
3234 struct hci_request req;
3237 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3241 /* stop if current instance doesn't need to be changed */
3242 if (!(adv_instance->flags & flags))
3245 cancel_adv_timeout(hdev);
3247 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3251 hci_req_init(&req, hdev);
3252 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3257 hci_req_run(&req, NULL);
3260 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3262 struct mgmt_cp_set_local_name *cp;
3263 struct mgmt_pending_cmd *cmd;
3265 bt_dev_dbg(hdev, "status 0x%02x", status);
3269 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3276 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3277 mgmt_status(status));
3279 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3282 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3283 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3286 mgmt_pending_remove(cmd);
3289 hci_dev_unlock(hdev);
3292 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3295 struct mgmt_cp_set_local_name *cp = data;
3296 struct mgmt_pending_cmd *cmd;
3297 struct hci_request req;
3300 bt_dev_dbg(hdev, "sock %p", sk);
3304 /* If the old values are the same as the new ones just return a
3305 * direct command complete event.
3307 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3308 !memcmp(hdev->short_name, cp->short_name,
3309 sizeof(hdev->short_name))) {
3310 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3315 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3317 if (!hdev_is_powered(hdev)) {
3318 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3320 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3325 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3326 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3327 ext_info_changed(hdev, sk);
3332 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3338 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3340 hci_req_init(&req, hdev);
3342 if (lmp_bredr_capable(hdev)) {
3343 __hci_req_update_name(&req);
3344 __hci_req_update_eir(&req);
3347 /* The name is stored in the scan response data and so
3348 * no need to update the advertising data here.
3350 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3351 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3353 err = hci_req_run(&req, set_name_complete);
3355 mgmt_pending_remove(cmd);
3358 hci_dev_unlock(hdev);
3362 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3365 struct mgmt_cp_set_appearance *cp = data;
3369 bt_dev_dbg(hdev, "sock %p", sk);
3371 if (!lmp_le_capable(hdev))
3372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3373 MGMT_STATUS_NOT_SUPPORTED);
3375 appearance = le16_to_cpu(cp->appearance);
3379 if (hdev->appearance != appearance) {
3380 hdev->appearance = appearance;
3382 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3383 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3385 ext_info_changed(hdev, sk);
3388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3391 hci_dev_unlock(hdev);
3396 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3397 void *data, u16 len)
3399 struct mgmt_rp_get_phy_configuration rp;
3401 bt_dev_dbg(hdev, "sock %p", sk);
3405 memset(&rp, 0, sizeof(rp));
3407 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3408 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3409 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3411 hci_dev_unlock(hdev);
3413 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3417 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3419 struct mgmt_ev_phy_configuration_changed ev;
3421 memset(&ev, 0, sizeof(ev));
3423 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3425 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3429 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3430 u16 opcode, struct sk_buff *skb)
3432 struct mgmt_pending_cmd *cmd;
3434 bt_dev_dbg(hdev, "status 0x%02x", status);
3438 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3443 mgmt_cmd_status(cmd->sk, hdev->id,
3444 MGMT_OP_SET_PHY_CONFIGURATION,
3445 mgmt_status(status));
3447 mgmt_cmd_complete(cmd->sk, hdev->id,
3448 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3451 mgmt_phy_configuration_changed(hdev, cmd->sk);
3454 mgmt_pending_remove(cmd);
3457 hci_dev_unlock(hdev);
3460 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3461 void *data, u16 len)
3463 struct mgmt_cp_set_phy_configuration *cp = data;
3464 struct hci_cp_le_set_default_phy cp_phy;
3465 struct mgmt_pending_cmd *cmd;
3466 struct hci_request req;
3467 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3468 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3469 bool changed = false;
3472 bt_dev_dbg(hdev, "sock %p", sk);
3474 configurable_phys = get_configurable_phys(hdev);
3475 supported_phys = get_supported_phys(hdev);
3476 selected_phys = __le32_to_cpu(cp->selected_phys);
3478 if (selected_phys & ~supported_phys)
3479 return mgmt_cmd_status(sk, hdev->id,
3480 MGMT_OP_SET_PHY_CONFIGURATION,
3481 MGMT_STATUS_INVALID_PARAMS);
3483 unconfigure_phys = supported_phys & ~configurable_phys;
3485 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3486 return mgmt_cmd_status(sk, hdev->id,
3487 MGMT_OP_SET_PHY_CONFIGURATION,
3488 MGMT_STATUS_INVALID_PARAMS);
3490 if (selected_phys == get_selected_phys(hdev))
3491 return mgmt_cmd_complete(sk, hdev->id,
3492 MGMT_OP_SET_PHY_CONFIGURATION,
3497 if (!hdev_is_powered(hdev)) {
3498 err = mgmt_cmd_status(sk, hdev->id,
3499 MGMT_OP_SET_PHY_CONFIGURATION,
3500 MGMT_STATUS_REJECTED);
3504 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3505 err = mgmt_cmd_status(sk, hdev->id,
3506 MGMT_OP_SET_PHY_CONFIGURATION,
3511 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3512 pkt_type |= (HCI_DH3 | HCI_DM3);
3514 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3516 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3517 pkt_type |= (HCI_DH5 | HCI_DM5);
3519 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3521 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3522 pkt_type &= ~HCI_2DH1;
3524 pkt_type |= HCI_2DH1;
3526 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3527 pkt_type &= ~HCI_2DH3;
3529 pkt_type |= HCI_2DH3;
3531 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3532 pkt_type &= ~HCI_2DH5;
3534 pkt_type |= HCI_2DH5;
3536 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3537 pkt_type &= ~HCI_3DH1;
3539 pkt_type |= HCI_3DH1;
3541 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3542 pkt_type &= ~HCI_3DH3;
3544 pkt_type |= HCI_3DH3;
3546 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3547 pkt_type &= ~HCI_3DH5;
3549 pkt_type |= HCI_3DH5;
3551 if (pkt_type != hdev->pkt_type) {
3552 hdev->pkt_type = pkt_type;
3556 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3557 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3559 mgmt_phy_configuration_changed(hdev, sk);
3561 err = mgmt_cmd_complete(sk, hdev->id,
3562 MGMT_OP_SET_PHY_CONFIGURATION,
3568 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3575 hci_req_init(&req, hdev);
3577 memset(&cp_phy, 0, sizeof(cp_phy));
3579 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3580 cp_phy.all_phys |= 0x01;
3582 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3583 cp_phy.all_phys |= 0x02;
3585 if (selected_phys & MGMT_PHY_LE_1M_TX)
3586 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3588 if (selected_phys & MGMT_PHY_LE_2M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3591 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3594 if (selected_phys & MGMT_PHY_LE_1M_RX)
3595 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3597 if (selected_phys & MGMT_PHY_LE_2M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3600 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3603 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3605 err = hci_req_run_skb(&req, set_default_phy_complete);
3607 mgmt_pending_remove(cmd);
3610 hci_dev_unlock(hdev);
3615 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3618 int err = MGMT_STATUS_SUCCESS;
3619 struct mgmt_cp_set_blocked_keys *keys = data;
3620 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3621 sizeof(struct mgmt_blocked_key_info));
3622 u16 key_count, expected_len;
3625 bt_dev_dbg(hdev, "sock %p", sk);
3627 key_count = __le16_to_cpu(keys->key_count);
3628 if (key_count > max_key_count) {
3629 bt_dev_err(hdev, "too big key_count value %u", key_count);
3630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3631 MGMT_STATUS_INVALID_PARAMS);
3634 expected_len = struct_size(keys, keys, key_count);
3635 if (expected_len != len) {
3636 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3639 MGMT_STATUS_INVALID_PARAMS);
3644 hci_blocked_keys_clear(hdev);
3646 for (i = 0; i < keys->key_count; ++i) {
3647 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3650 err = MGMT_STATUS_NO_RESOURCES;
3654 b->type = keys->keys[i].type;
3655 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3656 list_add_rcu(&b->list, &hdev->blocked_keys);
3658 hci_dev_unlock(hdev);
3660 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3664 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3665 void *data, u16 len)
3667 struct mgmt_mode *cp = data;
3669 bool changed = false;
3671 bt_dev_dbg(hdev, "sock %p", sk);
3673 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3674 return mgmt_cmd_status(sk, hdev->id,
3675 MGMT_OP_SET_WIDEBAND_SPEECH,
3676 MGMT_STATUS_NOT_SUPPORTED);
3678 if (cp->val != 0x00 && cp->val != 0x01)
3679 return mgmt_cmd_status(sk, hdev->id,
3680 MGMT_OP_SET_WIDEBAND_SPEECH,
3681 MGMT_STATUS_INVALID_PARAMS);
3685 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3686 err = mgmt_cmd_status(sk, hdev->id,
3687 MGMT_OP_SET_WIDEBAND_SPEECH,
3692 if (hdev_is_powered(hdev) &&
3693 !!cp->val != hci_dev_test_flag(hdev,
3694 HCI_WIDEBAND_SPEECH_ENABLED)) {
3695 err = mgmt_cmd_status(sk, hdev->id,
3696 MGMT_OP_SET_WIDEBAND_SPEECH,
3697 MGMT_STATUS_REJECTED);
3702 changed = !hci_dev_test_and_set_flag(hdev,
3703 HCI_WIDEBAND_SPEECH_ENABLED);
3705 changed = hci_dev_test_and_clear_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3708 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3713 err = new_settings(hdev, sk);
3716 hci_dev_unlock(hdev);
3720 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3721 void *data, u16 data_len)
3724 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3727 u8 tx_power_range[2];
3729 bt_dev_dbg(hdev, "sock %p", sk);
3731 memset(&buf, 0, sizeof(buf));
3735 /* When the Read Simple Pairing Options command is supported, then
3736 * the remote public key validation is supported.
3738 * Alternatively, when Microsoft extensions are available, they can
3739 * indicate support for public key validation as well.
3741 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3742 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3744 flags |= 0x02; /* Remote public key validation (LE) */
3746 /* When the Read Encryption Key Size command is supported, then the
3747 * encryption key size is enforced.
3749 if (hdev->commands[20] & 0x10)
3750 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3752 flags |= 0x08; /* Encryption key size enforcement (LE) */
3754 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3757 /* When the Read Simple Pairing Options command is supported, then
3758 * also max encryption key size information is provided.
3760 if (hdev->commands[41] & 0x08)
3761 cap_len = eir_append_le16(rp->cap, cap_len,
3762 MGMT_CAP_MAX_ENC_KEY_SIZE,
3763 hdev->max_enc_key_size);
3765 cap_len = eir_append_le16(rp->cap, cap_len,
3766 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3767 SMP_MAX_ENC_KEY_SIZE);
3769 /* Append the min/max LE tx power parameters if we were able to fetch
3770 * it from the controller
3772 if (hdev->commands[38] & 0x80) {
3773 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3774 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3775 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3779 rp->cap_len = cpu_to_le16(cap_len);
3781 hci_dev_unlock(hdev);
3783 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3784 rp, sizeof(*rp) + cap_len);
3787 #ifdef CONFIG_BT_FEATURE_DEBUG
3788 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3789 static const u8 debug_uuid[16] = {
3790 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3791 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3795 /* 330859bc-7506-492d-9370-9a6f0614037f */
3796 static const u8 quality_report_uuid[16] = {
3797 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3798 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3801 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3802 static const u8 offload_codecs_uuid[16] = {
3803 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3804 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3807 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3808 static const u8 simult_central_periph_uuid[16] = {
3809 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3810 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3813 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3814 static const u8 rpa_resolution_uuid[16] = {
3815 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3816 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3819 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3820 void *data, u16 data_len)
3822 char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */
3823 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3827 bt_dev_dbg(hdev, "sock %p", sk);
3829 memset(&buf, 0, sizeof(buf));
3831 #ifdef CONFIG_BT_FEATURE_DEBUG
3833 flags = bt_dbg_get() ? BIT(0) : 0;
3835 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3836 rp->features[idx].flags = cpu_to_le32(flags);
3842 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3843 (hdev->le_states[4] & 0x08) && /* Central */
3844 (hdev->le_states[4] & 0x40) && /* Peripheral */
3845 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3850 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3851 rp->features[idx].flags = cpu_to_le32(flags);
3855 if (hdev && use_ll_privacy(hdev)) {
3856 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3857 flags = BIT(0) | BIT(1);
3861 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3862 rp->features[idx].flags = cpu_to_le32(flags);
3866 if (hdev && hdev->set_quality_report) {
3867 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3872 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3873 rp->features[idx].flags = cpu_to_le32(flags);
3877 if (hdev && hdev->get_data_path_id) {
3878 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3883 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3884 rp->features[idx].flags = cpu_to_le32(flags);
3888 rp->feature_count = cpu_to_le16(idx);
3890 /* After reading the experimental features information, enable
3891 * the events to update client on any future change.
3893 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3895 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3896 MGMT_OP_READ_EXP_FEATURES_INFO,
3897 0, rp, sizeof(*rp) + (20 * idx));
3900 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3903 struct mgmt_ev_exp_feature_changed ev;
3905 memset(&ev, 0, sizeof(ev));
3906 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3907 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3909 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3911 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3915 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3918 struct mgmt_ev_exp_feature_changed ev;
3920 memset(&ev, 0, sizeof(ev));
3921 memcpy(ev.uuid, debug_uuid, 16);
3922 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3924 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3926 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3930 static int exp_quality_report_feature_changed(bool enabled, struct sock *skip)
3932 struct mgmt_ev_exp_feature_changed ev;
3934 memset(&ev, 0, sizeof(ev));
3935 memcpy(ev.uuid, quality_report_uuid, 16);
3936 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3938 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3940 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3943 #define EXP_FEAT(_uuid, _set_func) \
3946 .set_func = _set_func, \
3949 /* The zero key uuid is special. Multiple exp features are set through it. */
3950 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3951 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3953 struct mgmt_rp_set_exp_feature rp;
3955 memset(rp.uuid, 0, 16);
3956 rp.flags = cpu_to_le32(0);
3958 #ifdef CONFIG_BT_FEATURE_DEBUG
3960 bool changed = bt_dbg_get();
3965 exp_debug_feature_changed(false, sk);
3969 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3970 bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3972 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3975 exp_ll_privacy_feature_changed(false, hdev, sk);
3978 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3980 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3981 MGMT_OP_SET_EXP_FEATURE, 0,
3985 #ifdef CONFIG_BT_FEATURE_DEBUG
3986 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3987 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3989 struct mgmt_rp_set_exp_feature rp;
3994 /* Command requires to use the non-controller index */
3996 return mgmt_cmd_status(sk, hdev->id,
3997 MGMT_OP_SET_EXP_FEATURE,
3998 MGMT_STATUS_INVALID_INDEX);
4000 /* Parameters are limited to a single octet */
4001 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4002 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4003 MGMT_OP_SET_EXP_FEATURE,
4004 MGMT_STATUS_INVALID_PARAMS);
4006 /* Only boolean on/off is supported */
4007 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4008 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4009 MGMT_OP_SET_EXP_FEATURE,
4010 MGMT_STATUS_INVALID_PARAMS);
4012 val = !!cp->param[0];
4013 changed = val ? !bt_dbg_get() : bt_dbg_get();
4016 memcpy(rp.uuid, debug_uuid, 16);
4017 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4019 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4021 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4022 MGMT_OP_SET_EXP_FEATURE, 0,
4026 exp_debug_feature_changed(val, sk);
4032 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4033 struct mgmt_cp_set_exp_feature *cp,
4036 struct mgmt_rp_set_exp_feature rp;
4041 /* Command requires to use the controller index */
4043 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4044 MGMT_OP_SET_EXP_FEATURE,
4045 MGMT_STATUS_INVALID_INDEX);
4047 /* Changes can only be made when controller is powered down */
4048 if (hdev_is_powered(hdev))
4049 return mgmt_cmd_status(sk, hdev->id,
4050 MGMT_OP_SET_EXP_FEATURE,
4051 MGMT_STATUS_REJECTED);
4053 /* Parameters are limited to a single octet */
4054 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4055 return mgmt_cmd_status(sk, hdev->id,
4056 MGMT_OP_SET_EXP_FEATURE,
4057 MGMT_STATUS_INVALID_PARAMS);
4059 /* Only boolean on/off is supported */
4060 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4061 return mgmt_cmd_status(sk, hdev->id,
4062 MGMT_OP_SET_EXP_FEATURE,
4063 MGMT_STATUS_INVALID_PARAMS);
4065 val = !!cp->param[0];
4068 changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4069 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4070 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4072 /* Enable LL privacy + supported settings changed */
4073 flags = BIT(0) | BIT(1);
4075 changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4076 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4078 /* Disable LL privacy + supported settings changed */
4082 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4083 rp.flags = cpu_to_le32(flags);
4085 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4087 err = mgmt_cmd_complete(sk, hdev->id,
4088 MGMT_OP_SET_EXP_FEATURE, 0,
4092 exp_ll_privacy_feature_changed(val, hdev, sk);
4097 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4098 struct mgmt_cp_set_exp_feature *cp,
4101 struct mgmt_rp_set_exp_feature rp;
4105 /* Command requires to use a valid controller index */
4107 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4108 MGMT_OP_SET_EXP_FEATURE,
4109 MGMT_STATUS_INVALID_INDEX);
4111 /* Parameters are limited to a single octet */
4112 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4113 return mgmt_cmd_status(sk, hdev->id,
4114 MGMT_OP_SET_EXP_FEATURE,
4115 MGMT_STATUS_INVALID_PARAMS);
4117 /* Only boolean on/off is supported */
4118 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4119 return mgmt_cmd_status(sk, hdev->id,
4120 MGMT_OP_SET_EXP_FEATURE,
4121 MGMT_STATUS_INVALID_PARAMS);
4123 hci_req_sync_lock(hdev);
4125 val = !!cp->param[0];
4126 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4128 if (!hdev->set_quality_report) {
4129 err = mgmt_cmd_status(sk, hdev->id,
4130 MGMT_OP_SET_EXP_FEATURE,
4131 MGMT_STATUS_NOT_SUPPORTED);
4132 goto unlock_quality_report;
4136 err = hdev->set_quality_report(hdev, val);
4138 err = mgmt_cmd_status(sk, hdev->id,
4139 MGMT_OP_SET_EXP_FEATURE,
4140 MGMT_STATUS_FAILED);
4141 goto unlock_quality_report;
4144 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4146 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4149 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4151 memcpy(rp.uuid, quality_report_uuid, 16);
4152 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4153 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4154 err = mgmt_cmd_complete(sk, hdev->id,
4155 MGMT_OP_SET_EXP_FEATURE, 0,
4159 exp_quality_report_feature_changed(val, sk);
4161 unlock_quality_report:
4162 hci_req_sync_unlock(hdev);
4166 static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip)
4168 struct mgmt_ev_exp_feature_changed ev;
4170 memset(&ev, 0, sizeof(ev));
4171 memcpy(ev.uuid, offload_codecs_uuid, 16);
4172 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4174 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
4176 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4179 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4180 struct mgmt_cp_set_exp_feature *cp,
4185 struct mgmt_rp_set_exp_feature rp;
4187 /* Command requires to use a valid controller index */
4189 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4190 MGMT_OP_SET_EXP_FEATURE,
4191 MGMT_STATUS_INVALID_INDEX);
4193 /* Parameters are limited to a single octet */
4194 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4195 return mgmt_cmd_status(sk, hdev->id,
4196 MGMT_OP_SET_EXP_FEATURE,
4197 MGMT_STATUS_INVALID_PARAMS);
4199 /* Only boolean on/off is supported */
4200 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4201 return mgmt_cmd_status(sk, hdev->id,
4202 MGMT_OP_SET_EXP_FEATURE,
4203 MGMT_STATUS_INVALID_PARAMS);
4205 val = !!cp->param[0];
4206 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4208 if (!hdev->get_data_path_id) {
4209 return mgmt_cmd_status(sk, hdev->id,
4210 MGMT_OP_SET_EXP_FEATURE,
4211 MGMT_STATUS_NOT_SUPPORTED);
4216 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4218 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4221 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4224 memcpy(rp.uuid, offload_codecs_uuid, 16);
4225 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4226 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4227 err = mgmt_cmd_complete(sk, hdev->id,
4228 MGMT_OP_SET_EXP_FEATURE, 0,
4232 exp_offload_codec_feature_changed(val, sk);
4237 static const struct mgmt_exp_feature {
4239 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4240 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4241 } exp_features[] = {
4242 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4243 #ifdef CONFIG_BT_FEATURE_DEBUG
4244 EXP_FEAT(debug_uuid, set_debug_func),
4246 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4247 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4248 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4250 /* end with a null feature */
4251 EXP_FEAT(NULL, NULL)
4254 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4255 void *data, u16 data_len)
4257 struct mgmt_cp_set_exp_feature *cp = data;
4260 bt_dev_dbg(hdev, "sock %p", sk);
4262 for (i = 0; exp_features[i].uuid; i++) {
4263 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4264 return exp_features[i].set_func(sk, hdev, cp, data_len);
4267 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4268 MGMT_OP_SET_EXP_FEATURE,
4269 MGMT_STATUS_NOT_SUPPORTED);
4272 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4274 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4277 struct mgmt_cp_get_device_flags *cp = data;
4278 struct mgmt_rp_get_device_flags rp;
4279 struct bdaddr_list_with_flags *br_params;
4280 struct hci_conn_params *params;
4281 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4282 u32 current_flags = 0;
4283 u8 status = MGMT_STATUS_INVALID_PARAMS;
4285 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4286 &cp->addr.bdaddr, cp->addr.type);
4290 memset(&rp, 0, sizeof(rp));
4292 if (cp->addr.type == BDADDR_BREDR) {
4293 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4299 current_flags = br_params->current_flags;
4301 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4302 le_addr_type(cp->addr.type));
4307 current_flags = params->current_flags;
4310 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4311 rp.addr.type = cp->addr.type;
4312 rp.supported_flags = cpu_to_le32(supported_flags);
4313 rp.current_flags = cpu_to_le32(current_flags);
4315 status = MGMT_STATUS_SUCCESS;
4318 hci_dev_unlock(hdev);
4320 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4324 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4325 bdaddr_t *bdaddr, u8 bdaddr_type,
4326 u32 supported_flags, u32 current_flags)
4328 struct mgmt_ev_device_flags_changed ev;
4330 bacpy(&ev.addr.bdaddr, bdaddr);
4331 ev.addr.type = bdaddr_type;
4332 ev.supported_flags = cpu_to_le32(supported_flags);
4333 ev.current_flags = cpu_to_le32(current_flags);
4335 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4338 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4341 struct mgmt_cp_set_device_flags *cp = data;
4342 struct bdaddr_list_with_flags *br_params;
4343 struct hci_conn_params *params;
4344 u8 status = MGMT_STATUS_INVALID_PARAMS;
4345 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4346 u32 current_flags = __le32_to_cpu(cp->current_flags);
4348 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4349 &cp->addr.bdaddr, cp->addr.type,
4350 __le32_to_cpu(current_flags));
4352 if ((supported_flags | current_flags) != supported_flags) {
4353 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4354 current_flags, supported_flags);
4360 if (cp->addr.type == BDADDR_BREDR) {
4361 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4366 br_params->current_flags = current_flags;
4367 status = MGMT_STATUS_SUCCESS;
4369 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4370 &cp->addr.bdaddr, cp->addr.type);
4373 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4374 le_addr_type(cp->addr.type));
4376 params->current_flags = current_flags;
4377 status = MGMT_STATUS_SUCCESS;
4379 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4381 le_addr_type(cp->addr.type));
4386 hci_dev_unlock(hdev);
4388 if (status == MGMT_STATUS_SUCCESS)
4389 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4390 supported_flags, current_flags);
4392 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4393 &cp->addr, sizeof(cp->addr));
4396 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4399 struct mgmt_ev_adv_monitor_added ev;
4401 ev.monitor_handle = cpu_to_le16(handle);
4403 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4406 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4408 struct mgmt_ev_adv_monitor_removed ev;
4409 struct mgmt_pending_cmd *cmd;
4410 struct sock *sk_skip = NULL;
4411 struct mgmt_cp_remove_adv_monitor *cp;
4413 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4417 if (cp->monitor_handle)
4421 ev.monitor_handle = cpu_to_le16(handle);
4423 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4426 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4427 void *data, u16 len)
4429 struct adv_monitor *monitor = NULL;
4430 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4433 __u32 supported = 0;
4435 __u16 num_handles = 0;
4436 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4438 BT_DBG("request for %s", hdev->name);
4442 if (msft_monitor_supported(hdev))
4443 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4445 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4446 handles[num_handles++] = monitor->handle;
4448 hci_dev_unlock(hdev);
4450 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4451 rp = kmalloc(rp_size, GFP_KERNEL);
4455 /* All supported features are currently enabled */
4456 enabled = supported;
4458 rp->supported_features = cpu_to_le32(supported);
4459 rp->enabled_features = cpu_to_le32(enabled);
4460 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4461 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4462 rp->num_handles = cpu_to_le16(num_handles);
4464 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4466 err = mgmt_cmd_complete(sk, hdev->id,
4467 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4468 MGMT_STATUS_SUCCESS, rp, rp_size);
4475 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4477 struct mgmt_rp_add_adv_patterns_monitor rp;
4478 struct mgmt_pending_cmd *cmd;
4479 struct adv_monitor *monitor;
4484 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4486 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4491 monitor = cmd->user_data;
4492 rp.monitor_handle = cpu_to_le16(monitor->handle);
4495 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4496 hdev->adv_monitors_cnt++;
4497 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4498 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4499 hci_update_background_scan(hdev);
4502 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4503 mgmt_status(status), &rp, sizeof(rp));
4504 mgmt_pending_remove(cmd);
4507 hci_dev_unlock(hdev);
4508 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4509 rp.monitor_handle, status);
4514 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4515 struct adv_monitor *m, u8 status,
4516 void *data, u16 len, u16 op)
4518 struct mgmt_rp_add_adv_patterns_monitor rp;
4519 struct mgmt_pending_cmd *cmd;
4528 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4529 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4530 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4531 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4532 status = MGMT_STATUS_BUSY;
4536 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4538 status = MGMT_STATUS_NO_RESOURCES;
4543 pending = hci_add_adv_monitor(hdev, m, &err);
4545 if (err == -ENOSPC || err == -ENOMEM)
4546 status = MGMT_STATUS_NO_RESOURCES;
4547 else if (err == -EINVAL)
4548 status = MGMT_STATUS_INVALID_PARAMS;
4550 status = MGMT_STATUS_FAILED;
4552 mgmt_pending_remove(cmd);
4557 mgmt_pending_remove(cmd);
4558 rp.monitor_handle = cpu_to_le16(m->handle);
4559 mgmt_adv_monitor_added(sk, hdev, m->handle);
4560 m->state = ADV_MONITOR_STATE_REGISTERED;
4561 hdev->adv_monitors_cnt++;
4563 hci_dev_unlock(hdev);
4564 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4568 hci_dev_unlock(hdev);
4573 hci_free_adv_monitor(hdev, m);
4574 hci_dev_unlock(hdev);
4575 return mgmt_cmd_status(sk, hdev->id, op, status);
4578 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4579 struct mgmt_adv_rssi_thresholds *rssi)
4582 m->rssi.low_threshold = rssi->low_threshold;
4583 m->rssi.low_threshold_timeout =
4584 __le16_to_cpu(rssi->low_threshold_timeout);
4585 m->rssi.high_threshold = rssi->high_threshold;
4586 m->rssi.high_threshold_timeout =
4587 __le16_to_cpu(rssi->high_threshold_timeout);
4588 m->rssi.sampling_period = rssi->sampling_period;
4590 /* Default values. These numbers are the least constricting
4591 * parameters for MSFT API to work, so it behaves as if there
4592 * are no rssi parameter to consider. May need to be changed
4593 * if other API are to be supported.
4595 m->rssi.low_threshold = -127;
4596 m->rssi.low_threshold_timeout = 60;
4597 m->rssi.high_threshold = -127;
4598 m->rssi.high_threshold_timeout = 0;
4599 m->rssi.sampling_period = 0;
4603 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4604 struct mgmt_adv_pattern *patterns)
4606 u8 offset = 0, length = 0;
4607 struct adv_pattern *p = NULL;
4610 for (i = 0; i < pattern_count; i++) {
4611 offset = patterns[i].offset;
4612 length = patterns[i].length;
4613 if (offset >= HCI_MAX_AD_LENGTH ||
4614 length > HCI_MAX_AD_LENGTH ||
4615 (offset + length) > HCI_MAX_AD_LENGTH)
4616 return MGMT_STATUS_INVALID_PARAMS;
4618 p = kmalloc(sizeof(*p), GFP_KERNEL);
4620 return MGMT_STATUS_NO_RESOURCES;
4622 p->ad_type = patterns[i].ad_type;
4623 p->offset = patterns[i].offset;
4624 p->length = patterns[i].length;
4625 memcpy(p->value, patterns[i].value, p->length);
4627 INIT_LIST_HEAD(&p->list);
4628 list_add(&p->list, &m->patterns);
4631 return MGMT_STATUS_SUCCESS;
4634 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4635 void *data, u16 len)
4637 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4638 struct adv_monitor *m = NULL;
4639 u8 status = MGMT_STATUS_SUCCESS;
4640 size_t expected_size = sizeof(*cp);
4642 BT_DBG("request for %s", hdev->name);
4644 if (len <= sizeof(*cp)) {
4645 status = MGMT_STATUS_INVALID_PARAMS;
4649 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4650 if (len != expected_size) {
4651 status = MGMT_STATUS_INVALID_PARAMS;
4655 m = kzalloc(sizeof(*m), GFP_KERNEL);
4657 status = MGMT_STATUS_NO_RESOURCES;
4661 INIT_LIST_HEAD(&m->patterns);
4663 parse_adv_monitor_rssi(m, NULL);
4664 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4667 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4668 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4671 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4672 void *data, u16 len)
4674 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4675 struct adv_monitor *m = NULL;
4676 u8 status = MGMT_STATUS_SUCCESS;
4677 size_t expected_size = sizeof(*cp);
4679 BT_DBG("request for %s", hdev->name);
4681 if (len <= sizeof(*cp)) {
4682 status = MGMT_STATUS_INVALID_PARAMS;
4686 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4687 if (len != expected_size) {
4688 status = MGMT_STATUS_INVALID_PARAMS;
4692 m = kzalloc(sizeof(*m), GFP_KERNEL);
4694 status = MGMT_STATUS_NO_RESOURCES;
4698 INIT_LIST_HEAD(&m->patterns);
4700 parse_adv_monitor_rssi(m, &cp->rssi);
4701 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4704 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4705 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4708 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4710 struct mgmt_rp_remove_adv_monitor rp;
4711 struct mgmt_cp_remove_adv_monitor *cp;
4712 struct mgmt_pending_cmd *cmd;
4717 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4722 rp.monitor_handle = cp->monitor_handle;
4725 hci_update_background_scan(hdev);
4727 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4728 mgmt_status(status), &rp, sizeof(rp));
4729 mgmt_pending_remove(cmd);
4732 hci_dev_unlock(hdev);
4733 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4734 rp.monitor_handle, status);
4739 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4740 void *data, u16 len)
4742 struct mgmt_cp_remove_adv_monitor *cp = data;
4743 struct mgmt_rp_remove_adv_monitor rp;
4744 struct mgmt_pending_cmd *cmd;
4745 u16 handle = __le16_to_cpu(cp->monitor_handle);
4749 BT_DBG("request for %s", hdev->name);
4750 rp.monitor_handle = cp->monitor_handle;
4754 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4755 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4756 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4757 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4758 status = MGMT_STATUS_BUSY;
4762 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4764 status = MGMT_STATUS_NO_RESOURCES;
4769 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4771 pending = hci_remove_all_adv_monitor(hdev, &err);
4774 mgmt_pending_remove(cmd);
4777 status = MGMT_STATUS_INVALID_INDEX;
4779 status = MGMT_STATUS_FAILED;
4784 /* monitor can be removed without forwarding request to controller */
4786 mgmt_pending_remove(cmd);
4787 hci_dev_unlock(hdev);
4789 return mgmt_cmd_complete(sk, hdev->id,
4790 MGMT_OP_REMOVE_ADV_MONITOR,
4791 MGMT_STATUS_SUCCESS,
4795 hci_dev_unlock(hdev);
4799 hci_dev_unlock(hdev);
4800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4804 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4805 u16 opcode, struct sk_buff *skb)
4807 struct mgmt_rp_read_local_oob_data mgmt_rp;
4808 size_t rp_size = sizeof(mgmt_rp);
4809 struct mgmt_pending_cmd *cmd;
4811 bt_dev_dbg(hdev, "status %u", status);
4813 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4817 if (status || !skb) {
4818 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4819 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4823 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4825 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4826 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4828 if (skb->len < sizeof(*rp)) {
4829 mgmt_cmd_status(cmd->sk, hdev->id,
4830 MGMT_OP_READ_LOCAL_OOB_DATA,
4831 MGMT_STATUS_FAILED);
4835 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4836 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4838 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4840 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4842 if (skb->len < sizeof(*rp)) {
4843 mgmt_cmd_status(cmd->sk, hdev->id,
4844 MGMT_OP_READ_LOCAL_OOB_DATA,
4845 MGMT_STATUS_FAILED);
4849 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4850 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4852 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4853 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4856 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4857 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4860 mgmt_pending_remove(cmd);
4863 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4864 void *data, u16 data_len)
4866 struct mgmt_pending_cmd *cmd;
4867 struct hci_request req;
4870 bt_dev_dbg(hdev, "sock %p", sk);
4874 if (!hdev_is_powered(hdev)) {
4875 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4876 MGMT_STATUS_NOT_POWERED);
4880 if (!lmp_ssp_capable(hdev)) {
4881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4882 MGMT_STATUS_NOT_SUPPORTED);
4886 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4892 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4898 hci_req_init(&req, hdev);
4900 if (bredr_sc_enabled(hdev))
4901 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4903 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4905 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4907 mgmt_pending_remove(cmd);
4910 hci_dev_unlock(hdev);
4914 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4915 void *data, u16 len)
4917 struct mgmt_addr_info *addr = data;
4920 bt_dev_dbg(hdev, "sock %p", sk);
4922 if (!bdaddr_type_is_valid(addr->type))
4923 return mgmt_cmd_complete(sk, hdev->id,
4924 MGMT_OP_ADD_REMOTE_OOB_DATA,
4925 MGMT_STATUS_INVALID_PARAMS,
4926 addr, sizeof(*addr));
4930 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4931 struct mgmt_cp_add_remote_oob_data *cp = data;
4934 if (cp->addr.type != BDADDR_BREDR) {
4935 err = mgmt_cmd_complete(sk, hdev->id,
4936 MGMT_OP_ADD_REMOTE_OOB_DATA,
4937 MGMT_STATUS_INVALID_PARAMS,
4938 &cp->addr, sizeof(cp->addr));
4942 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4943 cp->addr.type, cp->hash,
4944 cp->rand, NULL, NULL);
4946 status = MGMT_STATUS_FAILED;
4948 status = MGMT_STATUS_SUCCESS;
4950 err = mgmt_cmd_complete(sk, hdev->id,
4951 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4952 &cp->addr, sizeof(cp->addr));
4953 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4954 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4955 u8 *rand192, *hash192, *rand256, *hash256;
4958 if (bdaddr_type_is_le(cp->addr.type)) {
4959 /* Enforce zero-valued 192-bit parameters as
4960 * long as legacy SMP OOB isn't implemented.
4962 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4963 memcmp(cp->hash192, ZERO_KEY, 16)) {
4964 err = mgmt_cmd_complete(sk, hdev->id,
4965 MGMT_OP_ADD_REMOTE_OOB_DATA,
4966 MGMT_STATUS_INVALID_PARAMS,
4967 addr, sizeof(*addr));
4974 /* In case one of the P-192 values is set to zero,
4975 * then just disable OOB data for P-192.
4977 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4978 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4982 rand192 = cp->rand192;
4983 hash192 = cp->hash192;
4987 /* In case one of the P-256 values is set to zero, then just
4988 * disable OOB data for P-256.
4990 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4991 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4995 rand256 = cp->rand256;
4996 hash256 = cp->hash256;
4999 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5000 cp->addr.type, hash192, rand192,
5003 status = MGMT_STATUS_FAILED;
5005 status = MGMT_STATUS_SUCCESS;
5007 err = mgmt_cmd_complete(sk, hdev->id,
5008 MGMT_OP_ADD_REMOTE_OOB_DATA,
5009 status, &cp->addr, sizeof(cp->addr));
5011 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5013 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5014 MGMT_STATUS_INVALID_PARAMS);
5018 hci_dev_unlock(hdev);
5022 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5023 void *data, u16 len)
5025 struct mgmt_cp_remove_remote_oob_data *cp = data;
5029 bt_dev_dbg(hdev, "sock %p", sk);
5031 if (cp->addr.type != BDADDR_BREDR)
5032 return mgmt_cmd_complete(sk, hdev->id,
5033 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5034 MGMT_STATUS_INVALID_PARAMS,
5035 &cp->addr, sizeof(cp->addr));
5039 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5040 hci_remote_oob_data_clear(hdev);
5041 status = MGMT_STATUS_SUCCESS;
5045 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5047 status = MGMT_STATUS_INVALID_PARAMS;
5049 status = MGMT_STATUS_SUCCESS;
5052 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5053 status, &cp->addr, sizeof(cp->addr));
5055 hci_dev_unlock(hdev);
5059 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5061 struct mgmt_pending_cmd *cmd;
5063 bt_dev_dbg(hdev, "status %u", status);
5067 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5069 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5072 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5075 cmd->cmd_complete(cmd, mgmt_status(status));
5076 mgmt_pending_remove(cmd);
5079 hci_dev_unlock(hdev);
5081 /* Handle suspend notifier */
5082 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
5083 hdev->suspend_tasks)) {
5084 bt_dev_dbg(hdev, "Unpaused discovery");
5085 wake_up(&hdev->suspend_wait_q);
5089 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5090 uint8_t *mgmt_status)
5093 case DISCOV_TYPE_LE:
5094 *mgmt_status = mgmt_le_support(hdev);
5098 case DISCOV_TYPE_INTERLEAVED:
5099 *mgmt_status = mgmt_le_support(hdev);
5103 case DISCOV_TYPE_BREDR:
5104 *mgmt_status = mgmt_bredr_support(hdev);
5109 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5116 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5117 u16 op, void *data, u16 len)
5119 struct mgmt_cp_start_discovery *cp = data;
5120 struct mgmt_pending_cmd *cmd;
5124 bt_dev_dbg(hdev, "sock %p", sk);
5128 if (!hdev_is_powered(hdev)) {
5129 err = mgmt_cmd_complete(sk, hdev->id, op,
5130 MGMT_STATUS_NOT_POWERED,
5131 &cp->type, sizeof(cp->type));
5135 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5136 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5137 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5138 &cp->type, sizeof(cp->type));
5142 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5143 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5144 &cp->type, sizeof(cp->type));
5148 /* Can't start discovery when it is paused */
5149 if (hdev->discovery_paused) {
5150 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5151 &cp->type, sizeof(cp->type));
5155 /* Clear the discovery filter first to free any previously
5156 * allocated memory for the UUID list.
5158 hci_discovery_filter_clear(hdev);
5160 hdev->discovery.type = cp->type;
5161 hdev->discovery.report_invalid_rssi = false;
5162 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5163 hdev->discovery.limited = true;
5165 hdev->discovery.limited = false;
5167 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5173 cmd->cmd_complete = generic_cmd_complete;
5175 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5176 queue_work(hdev->req_workqueue, &hdev->discov_update);
5180 hci_dev_unlock(hdev);
5184 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5185 void *data, u16 len)
5187 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5191 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5192 void *data, u16 len)
5194 return start_discovery_internal(sk, hdev,
5195 MGMT_OP_START_LIMITED_DISCOVERY,
5199 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5202 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5206 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5207 void *data, u16 len)
5209 struct mgmt_cp_start_service_discovery *cp = data;
5210 struct mgmt_pending_cmd *cmd;
5211 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5212 u16 uuid_count, expected_len;
5216 bt_dev_dbg(hdev, "sock %p", sk);
5220 if (!hdev_is_powered(hdev)) {
5221 err = mgmt_cmd_complete(sk, hdev->id,
5222 MGMT_OP_START_SERVICE_DISCOVERY,
5223 MGMT_STATUS_NOT_POWERED,
5224 &cp->type, sizeof(cp->type));
5228 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5229 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5230 err = mgmt_cmd_complete(sk, hdev->id,
5231 MGMT_OP_START_SERVICE_DISCOVERY,
5232 MGMT_STATUS_BUSY, &cp->type,
5237 if (hdev->discovery_paused) {
5238 err = mgmt_cmd_complete(sk, hdev->id,
5239 MGMT_OP_START_SERVICE_DISCOVERY,
5240 MGMT_STATUS_BUSY, &cp->type,
5245 uuid_count = __le16_to_cpu(cp->uuid_count);
5246 if (uuid_count > max_uuid_count) {
5247 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5249 err = mgmt_cmd_complete(sk, hdev->id,
5250 MGMT_OP_START_SERVICE_DISCOVERY,
5251 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5256 expected_len = sizeof(*cp) + uuid_count * 16;
5257 if (expected_len != len) {
5258 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5260 err = mgmt_cmd_complete(sk, hdev->id,
5261 MGMT_OP_START_SERVICE_DISCOVERY,
5262 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5267 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5268 err = mgmt_cmd_complete(sk, hdev->id,
5269 MGMT_OP_START_SERVICE_DISCOVERY,
5270 status, &cp->type, sizeof(cp->type));
5274 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5281 cmd->cmd_complete = service_discovery_cmd_complete;
5283 /* Clear the discovery filter first to free any previously
5284 * allocated memory for the UUID list.
5286 hci_discovery_filter_clear(hdev);
5288 hdev->discovery.result_filtering = true;
5289 hdev->discovery.type = cp->type;
5290 hdev->discovery.rssi = cp->rssi;
5291 hdev->discovery.uuid_count = uuid_count;
5293 if (uuid_count > 0) {
5294 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5296 if (!hdev->discovery.uuids) {
5297 err = mgmt_cmd_complete(sk, hdev->id,
5298 MGMT_OP_START_SERVICE_DISCOVERY,
5300 &cp->type, sizeof(cp->type));
5301 mgmt_pending_remove(cmd);
5306 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5307 queue_work(hdev->req_workqueue, &hdev->discov_update);
5311 hci_dev_unlock(hdev);
5315 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5317 struct mgmt_pending_cmd *cmd;
5319 bt_dev_dbg(hdev, "status %u", status);
5323 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5325 cmd->cmd_complete(cmd, mgmt_status(status));
5326 mgmt_pending_remove(cmd);
5329 hci_dev_unlock(hdev);
5331 /* Handle suspend notifier */
5332 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5333 bt_dev_dbg(hdev, "Paused discovery");
5334 wake_up(&hdev->suspend_wait_q);
5338 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5341 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5342 struct mgmt_pending_cmd *cmd;
5345 bt_dev_dbg(hdev, "sock %p", sk);
5349 if (!hci_discovery_active(hdev)) {
5350 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5351 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5352 sizeof(mgmt_cp->type));
5356 if (hdev->discovery.type != mgmt_cp->type) {
5357 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5358 MGMT_STATUS_INVALID_PARAMS,
5359 &mgmt_cp->type, sizeof(mgmt_cp->type));
5363 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5369 cmd->cmd_complete = generic_cmd_complete;
5371 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5372 queue_work(hdev->req_workqueue, &hdev->discov_update);
5376 hci_dev_unlock(hdev);
5380 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5383 struct mgmt_cp_confirm_name *cp = data;
5384 struct inquiry_entry *e;
5387 bt_dev_dbg(hdev, "sock %p", sk);
5391 if (!hci_discovery_active(hdev)) {
5392 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5393 MGMT_STATUS_FAILED, &cp->addr,
5398 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5400 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5401 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5406 if (cp->name_known) {
5407 e->name_state = NAME_KNOWN;
5410 e->name_state = NAME_NEEDED;
5411 hci_inquiry_cache_update_resolve(hdev, e);
5414 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5415 &cp->addr, sizeof(cp->addr));
5418 hci_dev_unlock(hdev);
5422 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5425 struct mgmt_cp_block_device *cp = data;
5429 bt_dev_dbg(hdev, "sock %p", sk);
5431 if (!bdaddr_type_is_valid(cp->addr.type))
5432 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5433 MGMT_STATUS_INVALID_PARAMS,
5434 &cp->addr, sizeof(cp->addr));
5438 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5441 status = MGMT_STATUS_FAILED;
5445 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5447 status = MGMT_STATUS_SUCCESS;
5450 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5451 &cp->addr, sizeof(cp->addr));
5453 hci_dev_unlock(hdev);
5458 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5461 struct mgmt_cp_unblock_device *cp = data;
5465 bt_dev_dbg(hdev, "sock %p", sk);
5467 if (!bdaddr_type_is_valid(cp->addr.type))
5468 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5469 MGMT_STATUS_INVALID_PARAMS,
5470 &cp->addr, sizeof(cp->addr));
5474 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5477 status = MGMT_STATUS_INVALID_PARAMS;
5481 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5483 status = MGMT_STATUS_SUCCESS;
5486 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5487 &cp->addr, sizeof(cp->addr));
5489 hci_dev_unlock(hdev);
5494 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5497 struct mgmt_cp_set_device_id *cp = data;
5498 struct hci_request req;
5502 bt_dev_dbg(hdev, "sock %p", sk);
5504 source = __le16_to_cpu(cp->source);
5506 if (source > 0x0002)
5507 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5508 MGMT_STATUS_INVALID_PARAMS);
5512 hdev->devid_source = source;
5513 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5514 hdev->devid_product = __le16_to_cpu(cp->product);
5515 hdev->devid_version = __le16_to_cpu(cp->version);
5517 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5520 hci_req_init(&req, hdev);
5521 __hci_req_update_eir(&req);
5522 hci_req_run(&req, NULL);
5524 hci_dev_unlock(hdev);
5529 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5532 bt_dev_dbg(hdev, "status %u", status);
5535 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5538 struct cmd_lookup match = { NULL, hdev };
5539 struct hci_request req;
5541 struct adv_info *adv_instance;
5547 u8 mgmt_err = mgmt_status(status);
5549 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5550 cmd_status_rsp, &mgmt_err);
5554 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5555 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5557 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5559 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5562 new_settings(hdev, match.sk);
5567 /* Handle suspend notifier */
5568 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5569 hdev->suspend_tasks)) {
5570 bt_dev_dbg(hdev, "Paused advertising");
5571 wake_up(&hdev->suspend_wait_q);
5572 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5573 hdev->suspend_tasks)) {
5574 bt_dev_dbg(hdev, "Unpaused advertising");
5575 wake_up(&hdev->suspend_wait_q);
5578 /* If "Set Advertising" was just disabled and instance advertising was
5579 * set up earlier, then re-enable multi-instance advertising.
5581 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5582 list_empty(&hdev->adv_instances))
5585 instance = hdev->cur_adv_instance;
5587 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5588 struct adv_info, list);
5592 instance = adv_instance->instance;
5595 hci_req_init(&req, hdev);
5597 err = __hci_req_schedule_adv_instance(&req, instance, true);
5600 err = hci_req_run(&req, enable_advertising_instance);
5603 bt_dev_err(hdev, "failed to re-configure advertising");
5606 hci_dev_unlock(hdev);
5609 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5612 struct mgmt_mode *cp = data;
5613 struct mgmt_pending_cmd *cmd;
5614 struct hci_request req;
5618 bt_dev_dbg(hdev, "sock %p", sk);
5620 status = mgmt_le_support(hdev);
5622 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5625 /* Enabling the experimental LL Privay support disables support for
5628 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5630 MGMT_STATUS_NOT_SUPPORTED);
5632 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5634 MGMT_STATUS_INVALID_PARAMS);
5636 if (hdev->advertising_paused)
5637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5644 /* The following conditions are ones which mean that we should
5645 * not do any HCI communication but directly send a mgmt
5646 * response to user space (after toggling the flag if
5649 if (!hdev_is_powered(hdev) ||
5650 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5651 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5652 hci_conn_num(hdev, LE_LINK) > 0 ||
5653 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5654 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5658 hdev->cur_adv_instance = 0x00;
5659 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5660 if (cp->val == 0x02)
5661 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5663 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5665 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5666 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5669 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5674 err = new_settings(hdev, sk);
5679 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5680 pending_find(MGMT_OP_SET_LE, hdev)) {
5681 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5686 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5692 hci_req_init(&req, hdev);
5694 if (cp->val == 0x02)
5695 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5697 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5699 cancel_adv_timeout(hdev);
5702 /* Switch to instance "0" for the Set Advertising setting.
5703 * We cannot use update_[adv|scan_rsp]_data() here as the
5704 * HCI_ADVERTISING flag is not yet set.
5706 hdev->cur_adv_instance = 0x00;
5708 if (ext_adv_capable(hdev)) {
5709 __hci_req_start_ext_adv(&req, 0x00);
5711 __hci_req_update_adv_data(&req, 0x00);
5712 __hci_req_update_scan_rsp_data(&req, 0x00);
5713 __hci_req_enable_advertising(&req);
5716 __hci_req_disable_advertising(&req);
5719 err = hci_req_run(&req, set_advertising_complete);
5721 mgmt_pending_remove(cmd);
5724 hci_dev_unlock(hdev);
5728 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5729 void *data, u16 len)
5731 struct mgmt_cp_set_static_address *cp = data;
5734 bt_dev_dbg(hdev, "sock %p", sk);
5736 if (!lmp_le_capable(hdev))
5737 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5738 MGMT_STATUS_NOT_SUPPORTED);
5740 if (hdev_is_powered(hdev))
5741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5742 MGMT_STATUS_REJECTED);
5744 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5745 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5746 return mgmt_cmd_status(sk, hdev->id,
5747 MGMT_OP_SET_STATIC_ADDRESS,
5748 MGMT_STATUS_INVALID_PARAMS);
5750 /* Two most significant bits shall be set */
5751 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5752 return mgmt_cmd_status(sk, hdev->id,
5753 MGMT_OP_SET_STATIC_ADDRESS,
5754 MGMT_STATUS_INVALID_PARAMS);
5759 bacpy(&hdev->static_addr, &cp->bdaddr);
5761 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5765 err = new_settings(hdev, sk);
5768 hci_dev_unlock(hdev);
5772 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5773 void *data, u16 len)
5775 struct mgmt_cp_set_scan_params *cp = data;
5776 __u16 interval, window;
5779 bt_dev_dbg(hdev, "sock %p", sk);
5781 if (!lmp_le_capable(hdev))
5782 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5783 MGMT_STATUS_NOT_SUPPORTED);
5785 interval = __le16_to_cpu(cp->interval);
5787 if (interval < 0x0004 || interval > 0x4000)
5788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5789 MGMT_STATUS_INVALID_PARAMS);
5791 window = __le16_to_cpu(cp->window);
5793 if (window < 0x0004 || window > 0x4000)
5794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5795 MGMT_STATUS_INVALID_PARAMS);
5797 if (window > interval)
5798 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5799 MGMT_STATUS_INVALID_PARAMS);
5803 hdev->le_scan_interval = interval;
5804 hdev->le_scan_window = window;
5806 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5809 /* If background scan is running, restart it so new parameters are
5812 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5813 hdev->discovery.state == DISCOVERY_STOPPED) {
5814 struct hci_request req;
5816 hci_req_init(&req, hdev);
5818 hci_req_add_le_scan_disable(&req, false);
5819 hci_req_add_le_passive_scan(&req);
5821 hci_req_run(&req, NULL);
5824 hci_dev_unlock(hdev);
5829 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5832 struct mgmt_pending_cmd *cmd;
5834 bt_dev_dbg(hdev, "status 0x%02x", status);
5838 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5843 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5844 mgmt_status(status));
5846 struct mgmt_mode *cp = cmd->param;
5849 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5851 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5853 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5854 new_settings(hdev, cmd->sk);
5857 mgmt_pending_remove(cmd);
5860 hci_dev_unlock(hdev);
5863 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5864 void *data, u16 len)
5866 struct mgmt_mode *cp = data;
5867 struct mgmt_pending_cmd *cmd;
5868 struct hci_request req;
5871 bt_dev_dbg(hdev, "sock %p", sk);
5873 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5874 hdev->hci_ver < BLUETOOTH_VER_1_2)
5875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5876 MGMT_STATUS_NOT_SUPPORTED);
5878 if (cp->val != 0x00 && cp->val != 0x01)
5879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5880 MGMT_STATUS_INVALID_PARAMS);
5884 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5890 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5891 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5896 if (!hdev_is_powered(hdev)) {
5897 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5898 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5900 new_settings(hdev, sk);
5904 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5911 hci_req_init(&req, hdev);
5913 __hci_req_write_fast_connectable(&req, cp->val);
5915 err = hci_req_run(&req, fast_connectable_complete);
5917 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5918 MGMT_STATUS_FAILED);
5919 mgmt_pending_remove(cmd);
5923 hci_dev_unlock(hdev);
5928 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5930 struct mgmt_pending_cmd *cmd;
5932 bt_dev_dbg(hdev, "status 0x%02x", status);
5936 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5941 u8 mgmt_err = mgmt_status(status);
5943 /* We need to restore the flag if related HCI commands
5946 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5948 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5950 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5951 new_settings(hdev, cmd->sk);
5954 mgmt_pending_remove(cmd);
5957 hci_dev_unlock(hdev);
5960 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5962 struct mgmt_mode *cp = data;
5963 struct mgmt_pending_cmd *cmd;
5964 struct hci_request req;
5967 bt_dev_dbg(hdev, "sock %p", sk);
5969 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5971 MGMT_STATUS_NOT_SUPPORTED);
5973 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5975 MGMT_STATUS_REJECTED);
5977 if (cp->val != 0x00 && cp->val != 0x01)
5978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5979 MGMT_STATUS_INVALID_PARAMS);
5983 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5984 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5988 if (!hdev_is_powered(hdev)) {
5990 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5991 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5992 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5993 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5994 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5997 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5999 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6003 err = new_settings(hdev, sk);
6007 /* Reject disabling when powered on */
6009 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6010 MGMT_STATUS_REJECTED);
6013 /* When configuring a dual-mode controller to operate
6014 * with LE only and using a static address, then switching
6015 * BR/EDR back on is not allowed.
6017 * Dual-mode controllers shall operate with the public
6018 * address as its identity address for BR/EDR and LE. So
6019 * reject the attempt to create an invalid configuration.
6021 * The same restrictions applies when secure connections
6022 * has been enabled. For BR/EDR this is a controller feature
6023 * while for LE it is a host stack feature. This means that
6024 * switching BR/EDR back on when secure connections has been
6025 * enabled is not a supported transaction.
6027 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6028 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6029 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6030 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6031 MGMT_STATUS_REJECTED);
6036 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
6037 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6042 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6048 /* We need to flip the bit already here so that
6049 * hci_req_update_adv_data generates the correct flags.
6051 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6053 hci_req_init(&req, hdev);
6055 __hci_req_write_fast_connectable(&req, false);
6056 __hci_req_update_scan(&req);
6058 /* Since only the advertising data flags will change, there
6059 * is no need to update the scan response data.
6061 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
6063 err = hci_req_run(&req, set_bredr_complete);
6065 mgmt_pending_remove(cmd);
6068 hci_dev_unlock(hdev);
6072 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6074 struct mgmt_pending_cmd *cmd;
6075 struct mgmt_mode *cp;
6077 bt_dev_dbg(hdev, "status %u", status);
6081 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
6086 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6087 mgmt_status(status));
6095 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6096 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6099 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6100 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6103 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6104 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6108 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
6109 new_settings(hdev, cmd->sk);
6112 mgmt_pending_remove(cmd);
6114 hci_dev_unlock(hdev);
6117 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6118 void *data, u16 len)
6120 struct mgmt_mode *cp = data;
6121 struct mgmt_pending_cmd *cmd;
6122 struct hci_request req;
6126 bt_dev_dbg(hdev, "sock %p", sk);
6128 if (!lmp_sc_capable(hdev) &&
6129 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6131 MGMT_STATUS_NOT_SUPPORTED);
6133 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6134 lmp_sc_capable(hdev) &&
6135 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6136 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6137 MGMT_STATUS_REJECTED);
6139 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6141 MGMT_STATUS_INVALID_PARAMS);
6145 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6146 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6150 changed = !hci_dev_test_and_set_flag(hdev,
6152 if (cp->val == 0x02)
6153 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6155 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6157 changed = hci_dev_test_and_clear_flag(hdev,
6159 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6162 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6167 err = new_settings(hdev, sk);
6172 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
6173 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6180 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6181 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6182 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6186 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6192 hci_req_init(&req, hdev);
6193 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6194 err = hci_req_run(&req, sc_enable_complete);
6196 mgmt_pending_remove(cmd);
6201 hci_dev_unlock(hdev);
6205 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6206 void *data, u16 len)
6208 struct mgmt_mode *cp = data;
6209 bool changed, use_changed;
6212 bt_dev_dbg(hdev, "sock %p", sk);
6214 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6216 MGMT_STATUS_INVALID_PARAMS);
6221 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6223 changed = hci_dev_test_and_clear_flag(hdev,
6224 HCI_KEEP_DEBUG_KEYS);
6226 if (cp->val == 0x02)
6227 use_changed = !hci_dev_test_and_set_flag(hdev,
6228 HCI_USE_DEBUG_KEYS);
6230 use_changed = hci_dev_test_and_clear_flag(hdev,
6231 HCI_USE_DEBUG_KEYS);
6233 if (hdev_is_powered(hdev) && use_changed &&
6234 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6235 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6236 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6237 sizeof(mode), &mode);
6240 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6245 err = new_settings(hdev, sk);
6248 hci_dev_unlock(hdev);
6252 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6255 struct mgmt_cp_set_privacy *cp = cp_data;
6259 bt_dev_dbg(hdev, "sock %p", sk);
6261 if (!lmp_le_capable(hdev))
6262 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6263 MGMT_STATUS_NOT_SUPPORTED);
6265 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6266 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6267 MGMT_STATUS_INVALID_PARAMS);
6269 if (hdev_is_powered(hdev))
6270 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6271 MGMT_STATUS_REJECTED);
6275 /* If user space supports this command it is also expected to
6276 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6278 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6281 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6282 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6283 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6284 hci_adv_instances_set_rpa_expired(hdev, true);
6285 if (cp->privacy == 0x02)
6286 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6288 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6290 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6291 memset(hdev->irk, 0, sizeof(hdev->irk));
6292 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6293 hci_adv_instances_set_rpa_expired(hdev, false);
6294 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6297 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6302 err = new_settings(hdev, sk);
6305 hci_dev_unlock(hdev);
6309 static bool irk_is_valid(struct mgmt_irk_info *irk)
6311 switch (irk->addr.type) {
6312 case BDADDR_LE_PUBLIC:
6315 case BDADDR_LE_RANDOM:
6316 /* Two most significant bits shall be set */
6317 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6325 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6328 struct mgmt_cp_load_irks *cp = cp_data;
6329 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6330 sizeof(struct mgmt_irk_info));
6331 u16 irk_count, expected_len;
6334 bt_dev_dbg(hdev, "sock %p", sk);
6336 if (!lmp_le_capable(hdev))
6337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6338 MGMT_STATUS_NOT_SUPPORTED);
6340 irk_count = __le16_to_cpu(cp->irk_count);
6341 if (irk_count > max_irk_count) {
6342 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6344 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6345 MGMT_STATUS_INVALID_PARAMS);
6348 expected_len = struct_size(cp, irks, irk_count);
6349 if (expected_len != len) {
6350 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6353 MGMT_STATUS_INVALID_PARAMS);
6356 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6358 for (i = 0; i < irk_count; i++) {
6359 struct mgmt_irk_info *key = &cp->irks[i];
6361 if (!irk_is_valid(key))
6362 return mgmt_cmd_status(sk, hdev->id,
6364 MGMT_STATUS_INVALID_PARAMS);
6369 hci_smp_irks_clear(hdev);
6371 for (i = 0; i < irk_count; i++) {
6372 struct mgmt_irk_info *irk = &cp->irks[i];
6374 if (hci_is_blocked_key(hdev,
6375 HCI_BLOCKED_KEY_TYPE_IRK,
6377 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6382 hci_add_irk(hdev, &irk->addr.bdaddr,
6383 le_addr_type(irk->addr.type), irk->val,
6387 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6389 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6391 hci_dev_unlock(hdev);
6396 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6398 if (key->initiator != 0x00 && key->initiator != 0x01)
6401 switch (key->addr.type) {
6402 case BDADDR_LE_PUBLIC:
6405 case BDADDR_LE_RANDOM:
6406 /* Two most significant bits shall be set */
6407 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6415 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6416 void *cp_data, u16 len)
6418 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6419 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6420 sizeof(struct mgmt_ltk_info));
6421 u16 key_count, expected_len;
6424 bt_dev_dbg(hdev, "sock %p", sk);
6426 if (!lmp_le_capable(hdev))
6427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6428 MGMT_STATUS_NOT_SUPPORTED);
6430 key_count = __le16_to_cpu(cp->key_count);
6431 if (key_count > max_key_count) {
6432 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6435 MGMT_STATUS_INVALID_PARAMS);
6438 expected_len = struct_size(cp, keys, key_count);
6439 if (expected_len != len) {
6440 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6443 MGMT_STATUS_INVALID_PARAMS);
6446 bt_dev_dbg(hdev, "key_count %u", key_count);
6448 for (i = 0; i < key_count; i++) {
6449 struct mgmt_ltk_info *key = &cp->keys[i];
6451 if (!ltk_is_valid(key))
6452 return mgmt_cmd_status(sk, hdev->id,
6453 MGMT_OP_LOAD_LONG_TERM_KEYS,
6454 MGMT_STATUS_INVALID_PARAMS);
6459 hci_smp_ltks_clear(hdev);
6461 for (i = 0; i < key_count; i++) {
6462 struct mgmt_ltk_info *key = &cp->keys[i];
6463 u8 type, authenticated;
6465 if (hci_is_blocked_key(hdev,
6466 HCI_BLOCKED_KEY_TYPE_LTK,
6468 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6473 switch (key->type) {
6474 case MGMT_LTK_UNAUTHENTICATED:
6475 authenticated = 0x00;
6476 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6478 case MGMT_LTK_AUTHENTICATED:
6479 authenticated = 0x01;
6480 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6482 case MGMT_LTK_P256_UNAUTH:
6483 authenticated = 0x00;
6484 type = SMP_LTK_P256;
6486 case MGMT_LTK_P256_AUTH:
6487 authenticated = 0x01;
6488 type = SMP_LTK_P256;
6490 case MGMT_LTK_P256_DEBUG:
6491 authenticated = 0x00;
6492 type = SMP_LTK_P256_DEBUG;
6498 hci_add_ltk(hdev, &key->addr.bdaddr,
6499 le_addr_type(key->addr.type), type, authenticated,
6500 key->val, key->enc_size, key->ediv, key->rand);
6503 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6506 hci_dev_unlock(hdev);
6511 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6513 struct hci_conn *conn = cmd->user_data;
6514 struct mgmt_rp_get_conn_info rp;
6517 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6519 if (status == MGMT_STATUS_SUCCESS) {
6520 rp.rssi = conn->rssi;
6521 rp.tx_power = conn->tx_power;
6522 rp.max_tx_power = conn->max_tx_power;
6524 rp.rssi = HCI_RSSI_INVALID;
6525 rp.tx_power = HCI_TX_POWER_INVALID;
6526 rp.max_tx_power = HCI_TX_POWER_INVALID;
6529 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6530 status, &rp, sizeof(rp));
6532 hci_conn_drop(conn);
6538 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6541 struct hci_cp_read_rssi *cp;
6542 struct mgmt_pending_cmd *cmd;
6543 struct hci_conn *conn;
6547 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6551 /* Commands sent in request are either Read RSSI or Read Transmit Power
6552 * Level so we check which one was last sent to retrieve connection
6553 * handle. Both commands have handle as first parameter so it's safe to
6554 * cast data on the same command struct.
6556 * First command sent is always Read RSSI and we fail only if it fails.
6557 * In other case we simply override error to indicate success as we
6558 * already remembered if TX power value is actually valid.
6560 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6562 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6563 status = MGMT_STATUS_SUCCESS;
6565 status = mgmt_status(hci_status);
6569 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6573 handle = __le16_to_cpu(cp->handle);
6574 conn = hci_conn_hash_lookup_handle(hdev, handle);
6576 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
6581 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6585 cmd->cmd_complete(cmd, status);
6586 mgmt_pending_remove(cmd);
6589 hci_dev_unlock(hdev);
6592 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6595 struct mgmt_cp_get_conn_info *cp = data;
6596 struct mgmt_rp_get_conn_info rp;
6597 struct hci_conn *conn;
6598 unsigned long conn_info_age;
6601 bt_dev_dbg(hdev, "sock %p", sk);
6603 memset(&rp, 0, sizeof(rp));
6604 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6605 rp.addr.type = cp->addr.type;
6607 if (!bdaddr_type_is_valid(cp->addr.type))
6608 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6609 MGMT_STATUS_INVALID_PARAMS,
6614 if (!hdev_is_powered(hdev)) {
6615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6616 MGMT_STATUS_NOT_POWERED, &rp,
6621 if (cp->addr.type == BDADDR_BREDR)
6622 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6625 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6627 if (!conn || conn->state != BT_CONNECTED) {
6628 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6629 MGMT_STATUS_NOT_CONNECTED, &rp,
6634 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6635 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6636 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6640 /* To avoid client trying to guess when to poll again for information we
6641 * calculate conn info age as random value between min/max set in hdev.
6643 conn_info_age = hdev->conn_info_min_age +
6644 prandom_u32_max(hdev->conn_info_max_age -
6645 hdev->conn_info_min_age);
6647 /* Query controller to refresh cached values if they are too old or were
6650 if (time_after(jiffies, conn->conn_info_timestamp +
6651 msecs_to_jiffies(conn_info_age)) ||
6652 !conn->conn_info_timestamp) {
6653 struct hci_request req;
6654 struct hci_cp_read_tx_power req_txp_cp;
6655 struct hci_cp_read_rssi req_rssi_cp;
6656 struct mgmt_pending_cmd *cmd;
6658 hci_req_init(&req, hdev);
6659 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6660 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6663 /* For LE links TX power does not change thus we don't need to
6664 * query for it once value is known.
6666 if (!bdaddr_type_is_le(cp->addr.type) ||
6667 conn->tx_power == HCI_TX_POWER_INVALID) {
6668 req_txp_cp.handle = cpu_to_le16(conn->handle);
6669 req_txp_cp.type = 0x00;
6670 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6671 sizeof(req_txp_cp), &req_txp_cp);
6674 /* Max TX power needs to be read only once per connection */
6675 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6676 req_txp_cp.handle = cpu_to_le16(conn->handle);
6677 req_txp_cp.type = 0x01;
6678 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6679 sizeof(req_txp_cp), &req_txp_cp);
6682 err = hci_req_run(&req, conn_info_refresh_complete);
6686 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6693 hci_conn_hold(conn);
6694 cmd->user_data = hci_conn_get(conn);
6695 cmd->cmd_complete = conn_info_cmd_complete;
6697 conn->conn_info_timestamp = jiffies;
6699 /* Cache is valid, just reply with values cached in hci_conn */
6700 rp.rssi = conn->rssi;
6701 rp.tx_power = conn->tx_power;
6702 rp.max_tx_power = conn->max_tx_power;
6704 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6705 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6709 hci_dev_unlock(hdev);
6713 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6715 struct hci_conn *conn = cmd->user_data;
6716 struct mgmt_rp_get_clock_info rp;
6717 struct hci_dev *hdev;
6720 memset(&rp, 0, sizeof(rp));
6721 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6726 hdev = hci_dev_get(cmd->index);
6728 rp.local_clock = cpu_to_le32(hdev->clock);
6733 rp.piconet_clock = cpu_to_le32(conn->clock);
6734 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6738 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6742 hci_conn_drop(conn);
6749 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6751 struct hci_cp_read_clock *hci_cp;
6752 struct mgmt_pending_cmd *cmd;
6753 struct hci_conn *conn;
6755 bt_dev_dbg(hdev, "status %u", status);
6759 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6763 if (hci_cp->which) {
6764 u16 handle = __le16_to_cpu(hci_cp->handle);
6765 conn = hci_conn_hash_lookup_handle(hdev, handle);
6770 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6774 cmd->cmd_complete(cmd, mgmt_status(status));
6775 mgmt_pending_remove(cmd);
6778 hci_dev_unlock(hdev);
6781 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6784 struct mgmt_cp_get_clock_info *cp = data;
6785 struct mgmt_rp_get_clock_info rp;
6786 struct hci_cp_read_clock hci_cp;
6787 struct mgmt_pending_cmd *cmd;
6788 struct hci_request req;
6789 struct hci_conn *conn;
6792 bt_dev_dbg(hdev, "sock %p", sk);
6794 memset(&rp, 0, sizeof(rp));
6795 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6796 rp.addr.type = cp->addr.type;
6798 if (cp->addr.type != BDADDR_BREDR)
6799 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6800 MGMT_STATUS_INVALID_PARAMS,
6805 if (!hdev_is_powered(hdev)) {
6806 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6807 MGMT_STATUS_NOT_POWERED, &rp,
6812 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6813 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6815 if (!conn || conn->state != BT_CONNECTED) {
6816 err = mgmt_cmd_complete(sk, hdev->id,
6817 MGMT_OP_GET_CLOCK_INFO,
6818 MGMT_STATUS_NOT_CONNECTED,
6826 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6832 cmd->cmd_complete = clock_info_cmd_complete;
6834 hci_req_init(&req, hdev);
6836 memset(&hci_cp, 0, sizeof(hci_cp));
6837 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6840 hci_conn_hold(conn);
6841 cmd->user_data = hci_conn_get(conn);
6843 hci_cp.handle = cpu_to_le16(conn->handle);
6844 hci_cp.which = 0x01; /* Piconet clock */
6845 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6848 err = hci_req_run(&req, get_clock_info_complete);
6850 mgmt_pending_remove(cmd);
6853 hci_dev_unlock(hdev);
6857 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6859 struct hci_conn *conn;
6861 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6865 if (conn->dst_type != type)
6868 if (conn->state != BT_CONNECTED)
6874 /* This function requires the caller holds hdev->lock */
6875 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6876 u8 addr_type, u8 auto_connect)
6878 struct hci_conn_params *params;
6880 params = hci_conn_params_add(hdev, addr, addr_type);
6884 if (params->auto_connect == auto_connect)
6887 list_del_init(¶ms->action);
6889 switch (auto_connect) {
6890 case HCI_AUTO_CONN_DISABLED:
6891 case HCI_AUTO_CONN_LINK_LOSS:
6892 /* If auto connect is being disabled when we're trying to
6893 * connect to device, keep connecting.
6895 if (params->explicit_connect)
6896 list_add(¶ms->action, &hdev->pend_le_conns);
6898 case HCI_AUTO_CONN_REPORT:
6899 if (params->explicit_connect)
6900 list_add(¶ms->action, &hdev->pend_le_conns);
6902 list_add(¶ms->action, &hdev->pend_le_reports);
6904 case HCI_AUTO_CONN_DIRECT:
6905 case HCI_AUTO_CONN_ALWAYS:
6906 if (!is_connected(hdev, addr, addr_type))
6907 list_add(¶ms->action, &hdev->pend_le_conns);
6911 params->auto_connect = auto_connect;
6913 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6914 addr, addr_type, auto_connect);
6919 static void device_added(struct sock *sk, struct hci_dev *hdev,
6920 bdaddr_t *bdaddr, u8 type, u8 action)
6922 struct mgmt_ev_device_added ev;
6924 bacpy(&ev.addr.bdaddr, bdaddr);
6925 ev.addr.type = type;
6928 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6931 static int add_device(struct sock *sk, struct hci_dev *hdev,
6932 void *data, u16 len)
6934 struct mgmt_cp_add_device *cp = data;
6935 u8 auto_conn, addr_type;
6936 struct hci_conn_params *params;
6938 u32 current_flags = 0;
6940 bt_dev_dbg(hdev, "sock %p", sk);
6942 if (!bdaddr_type_is_valid(cp->addr.type) ||
6943 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6944 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6945 MGMT_STATUS_INVALID_PARAMS,
6946 &cp->addr, sizeof(cp->addr));
6948 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6949 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6950 MGMT_STATUS_INVALID_PARAMS,
6951 &cp->addr, sizeof(cp->addr));
6955 if (cp->addr.type == BDADDR_BREDR) {
6956 /* Only incoming connections action is supported for now */
6957 if (cp->action != 0x01) {
6958 err = mgmt_cmd_complete(sk, hdev->id,
6960 MGMT_STATUS_INVALID_PARAMS,
6961 &cp->addr, sizeof(cp->addr));
6965 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6971 hci_req_update_scan(hdev);
6976 addr_type = le_addr_type(cp->addr.type);
6978 if (cp->action == 0x02)
6979 auto_conn = HCI_AUTO_CONN_ALWAYS;
6980 else if (cp->action == 0x01)
6981 auto_conn = HCI_AUTO_CONN_DIRECT;
6983 auto_conn = HCI_AUTO_CONN_REPORT;
6985 /* Kernel internally uses conn_params with resolvable private
6986 * address, but Add Device allows only identity addresses.
6987 * Make sure it is enforced before calling
6988 * hci_conn_params_lookup.
6990 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6992 MGMT_STATUS_INVALID_PARAMS,
6993 &cp->addr, sizeof(cp->addr));
6997 /* If the connection parameters don't exist for this device,
6998 * they will be created and configured with defaults.
7000 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7002 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7003 MGMT_STATUS_FAILED, &cp->addr,
7007 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7010 current_flags = params->current_flags;
7013 hci_update_background_scan(hdev);
7016 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7017 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7018 SUPPORTED_DEVICE_FLAGS(), current_flags);
7020 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7021 MGMT_STATUS_SUCCESS, &cp->addr,
7025 hci_dev_unlock(hdev);
7029 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7030 bdaddr_t *bdaddr, u8 type)
7032 struct mgmt_ev_device_removed ev;
7034 bacpy(&ev.addr.bdaddr, bdaddr);
7035 ev.addr.type = type;
7037 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7040 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7041 void *data, u16 len)
7043 struct mgmt_cp_remove_device *cp = data;
7046 bt_dev_dbg(hdev, "sock %p", sk);
7050 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7051 struct hci_conn_params *params;
7054 if (!bdaddr_type_is_valid(cp->addr.type)) {
7055 err = mgmt_cmd_complete(sk, hdev->id,
7056 MGMT_OP_REMOVE_DEVICE,
7057 MGMT_STATUS_INVALID_PARAMS,
7058 &cp->addr, sizeof(cp->addr));
7062 if (cp->addr.type == BDADDR_BREDR) {
7063 err = hci_bdaddr_list_del(&hdev->accept_list,
7067 err = mgmt_cmd_complete(sk, hdev->id,
7068 MGMT_OP_REMOVE_DEVICE,
7069 MGMT_STATUS_INVALID_PARAMS,
7075 hci_req_update_scan(hdev);
7077 device_removed(sk, hdev, &cp->addr.bdaddr,
7082 addr_type = le_addr_type(cp->addr.type);
7084 /* Kernel internally uses conn_params with resolvable private
7085 * address, but Remove Device allows only identity addresses.
7086 * Make sure it is enforced before calling
7087 * hci_conn_params_lookup.
7089 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7090 err = mgmt_cmd_complete(sk, hdev->id,
7091 MGMT_OP_REMOVE_DEVICE,
7092 MGMT_STATUS_INVALID_PARAMS,
7093 &cp->addr, sizeof(cp->addr));
7097 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7100 err = mgmt_cmd_complete(sk, hdev->id,
7101 MGMT_OP_REMOVE_DEVICE,
7102 MGMT_STATUS_INVALID_PARAMS,
7103 &cp->addr, sizeof(cp->addr));
7107 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7108 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7109 err = mgmt_cmd_complete(sk, hdev->id,
7110 MGMT_OP_REMOVE_DEVICE,
7111 MGMT_STATUS_INVALID_PARAMS,
7112 &cp->addr, sizeof(cp->addr));
7116 list_del(¶ms->action);
7117 list_del(¶ms->list);
7119 hci_update_background_scan(hdev);
7121 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7123 struct hci_conn_params *p, *tmp;
7124 struct bdaddr_list *b, *btmp;
7126 if (cp->addr.type) {
7127 err = mgmt_cmd_complete(sk, hdev->id,
7128 MGMT_OP_REMOVE_DEVICE,
7129 MGMT_STATUS_INVALID_PARAMS,
7130 &cp->addr, sizeof(cp->addr));
7134 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7135 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7140 hci_req_update_scan(hdev);
7142 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7143 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7145 device_removed(sk, hdev, &p->addr, p->addr_type);
7146 if (p->explicit_connect) {
7147 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7150 list_del(&p->action);
7155 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7157 hci_update_background_scan(hdev);
7161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7162 MGMT_STATUS_SUCCESS, &cp->addr,
7165 hci_dev_unlock(hdev);
7169 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7172 struct mgmt_cp_load_conn_param *cp = data;
7173 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7174 sizeof(struct mgmt_conn_param));
7175 u16 param_count, expected_len;
7178 if (!lmp_le_capable(hdev))
7179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7180 MGMT_STATUS_NOT_SUPPORTED);
7182 param_count = __le16_to_cpu(cp->param_count);
7183 if (param_count > max_param_count) {
7184 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7187 MGMT_STATUS_INVALID_PARAMS);
7190 expected_len = struct_size(cp, params, param_count);
7191 if (expected_len != len) {
7192 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7194 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7195 MGMT_STATUS_INVALID_PARAMS);
7198 bt_dev_dbg(hdev, "param_count %u", param_count);
7202 hci_conn_params_clear_disabled(hdev);
7204 for (i = 0; i < param_count; i++) {
7205 struct mgmt_conn_param *param = &cp->params[i];
7206 struct hci_conn_params *hci_param;
7207 u16 min, max, latency, timeout;
7210 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7213 if (param->addr.type == BDADDR_LE_PUBLIC) {
7214 addr_type = ADDR_LE_DEV_PUBLIC;
7215 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7216 addr_type = ADDR_LE_DEV_RANDOM;
7218 bt_dev_err(hdev, "ignoring invalid connection parameters");
7222 min = le16_to_cpu(param->min_interval);
7223 max = le16_to_cpu(param->max_interval);
7224 latency = le16_to_cpu(param->latency);
7225 timeout = le16_to_cpu(param->timeout);
7227 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7228 min, max, latency, timeout);
7230 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7231 bt_dev_err(hdev, "ignoring invalid connection parameters");
7235 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7238 bt_dev_err(hdev, "failed to add connection parameters");
7242 hci_param->conn_min_interval = min;
7243 hci_param->conn_max_interval = max;
7244 hci_param->conn_latency = latency;
7245 hci_param->supervision_timeout = timeout;
7248 hci_dev_unlock(hdev);
7250 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7254 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7255 void *data, u16 len)
7257 struct mgmt_cp_set_external_config *cp = data;
7261 bt_dev_dbg(hdev, "sock %p", sk);
7263 if (hdev_is_powered(hdev))
7264 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7265 MGMT_STATUS_REJECTED);
7267 if (cp->config != 0x00 && cp->config != 0x01)
7268 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7269 MGMT_STATUS_INVALID_PARAMS);
7271 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7273 MGMT_STATUS_NOT_SUPPORTED);
7278 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7280 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7282 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7289 err = new_options(hdev, sk);
7291 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7292 mgmt_index_removed(hdev);
7294 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7295 hci_dev_set_flag(hdev, HCI_CONFIG);
7296 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7298 queue_work(hdev->req_workqueue, &hdev->power_on);
7300 set_bit(HCI_RAW, &hdev->flags);
7301 mgmt_index_added(hdev);
7306 hci_dev_unlock(hdev);
7310 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7311 void *data, u16 len)
7313 struct mgmt_cp_set_public_address *cp = data;
7317 bt_dev_dbg(hdev, "sock %p", sk);
7319 if (hdev_is_powered(hdev))
7320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7321 MGMT_STATUS_REJECTED);
7323 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7324 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7325 MGMT_STATUS_INVALID_PARAMS);
7327 if (!hdev->set_bdaddr)
7328 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7329 MGMT_STATUS_NOT_SUPPORTED);
7333 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7334 bacpy(&hdev->public_addr, &cp->bdaddr);
7336 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7343 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7344 err = new_options(hdev, sk);
7346 if (is_configured(hdev)) {
7347 mgmt_index_removed(hdev);
7349 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7351 hci_dev_set_flag(hdev, HCI_CONFIG);
7352 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7354 queue_work(hdev->req_workqueue, &hdev->power_on);
7358 hci_dev_unlock(hdev);
7362 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7363 u16 opcode, struct sk_buff *skb)
7365 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7366 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7367 u8 *h192, *r192, *h256, *r256;
7368 struct mgmt_pending_cmd *cmd;
7372 bt_dev_dbg(hdev, "status %u", status);
7374 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7378 mgmt_cp = cmd->param;
7381 status = mgmt_status(status);
7388 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7389 struct hci_rp_read_local_oob_data *rp;
7391 if (skb->len != sizeof(*rp)) {
7392 status = MGMT_STATUS_FAILED;
7395 status = MGMT_STATUS_SUCCESS;
7396 rp = (void *)skb->data;
7398 eir_len = 5 + 18 + 18;
7405 struct hci_rp_read_local_oob_ext_data *rp;
7407 if (skb->len != sizeof(*rp)) {
7408 status = MGMT_STATUS_FAILED;
7411 status = MGMT_STATUS_SUCCESS;
7412 rp = (void *)skb->data;
7414 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7415 eir_len = 5 + 18 + 18;
7419 eir_len = 5 + 18 + 18 + 18 + 18;
7429 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7436 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7437 hdev->dev_class, 3);
7440 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7441 EIR_SSP_HASH_C192, h192, 16);
7442 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7443 EIR_SSP_RAND_R192, r192, 16);
7447 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7448 EIR_SSP_HASH_C256, h256, 16);
7449 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7450 EIR_SSP_RAND_R256, r256, 16);
7454 mgmt_rp->type = mgmt_cp->type;
7455 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7457 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7458 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7459 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7460 if (err < 0 || status)
7463 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7465 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7466 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7467 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7470 mgmt_pending_remove(cmd);
7473 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7474 struct mgmt_cp_read_local_oob_ext_data *cp)
7476 struct mgmt_pending_cmd *cmd;
7477 struct hci_request req;
7480 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7485 hci_req_init(&req, hdev);
7487 if (bredr_sc_enabled(hdev))
7488 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7490 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7492 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7494 mgmt_pending_remove(cmd);
7501 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7502 void *data, u16 data_len)
7504 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7505 struct mgmt_rp_read_local_oob_ext_data *rp;
7508 u8 status, flags, role, addr[7], hash[16], rand[16];
7511 bt_dev_dbg(hdev, "sock %p", sk);
7513 if (hdev_is_powered(hdev)) {
7515 case BIT(BDADDR_BREDR):
7516 status = mgmt_bredr_support(hdev);
7522 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7523 status = mgmt_le_support(hdev);
7527 eir_len = 9 + 3 + 18 + 18 + 3;
7530 status = MGMT_STATUS_INVALID_PARAMS;
7535 status = MGMT_STATUS_NOT_POWERED;
7539 rp_len = sizeof(*rp) + eir_len;
7540 rp = kmalloc(rp_len, GFP_ATOMIC);
7544 if (!status && !lmp_ssp_capable(hdev)) {
7545 status = MGMT_STATUS_NOT_SUPPORTED;
7556 case BIT(BDADDR_BREDR):
7557 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7558 err = read_local_ssp_oob_req(hdev, sk, cp);
7559 hci_dev_unlock(hdev);
7563 status = MGMT_STATUS_FAILED;
7566 eir_len = eir_append_data(rp->eir, eir_len,
7568 hdev->dev_class, 3);
7571 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7572 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7573 smp_generate_oob(hdev, hash, rand) < 0) {
7574 hci_dev_unlock(hdev);
7575 status = MGMT_STATUS_FAILED;
7579 /* This should return the active RPA, but since the RPA
7580 * is only programmed on demand, it is really hard to fill
7581 * this in at the moment. For now disallow retrieving
7582 * local out-of-band data when privacy is in use.
7584 * Returning the identity address will not help here since
7585 * pairing happens before the identity resolving key is
7586 * known and thus the connection establishment happens
7587 * based on the RPA and not the identity address.
7589 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7590 hci_dev_unlock(hdev);
7591 status = MGMT_STATUS_REJECTED;
7595 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7596 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7597 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7598 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7599 memcpy(addr, &hdev->static_addr, 6);
7602 memcpy(addr, &hdev->bdaddr, 6);
7606 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7607 addr, sizeof(addr));
7609 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7614 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7615 &role, sizeof(role));
7617 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7618 eir_len = eir_append_data(rp->eir, eir_len,
7620 hash, sizeof(hash));
7622 eir_len = eir_append_data(rp->eir, eir_len,
7624 rand, sizeof(rand));
7627 flags = mgmt_get_adv_discov_flags(hdev);
7629 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7630 flags |= LE_AD_NO_BREDR;
7632 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7633 &flags, sizeof(flags));
7637 hci_dev_unlock(hdev);
7639 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7641 status = MGMT_STATUS_SUCCESS;
7644 rp->type = cp->type;
7645 rp->eir_len = cpu_to_le16(eir_len);
7647 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7648 status, rp, sizeof(*rp) + eir_len);
7649 if (err < 0 || status)
7652 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7653 rp, sizeof(*rp) + eir_len,
7654 HCI_MGMT_OOB_DATA_EVENTS, sk);
7662 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7666 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7667 flags |= MGMT_ADV_FLAG_DISCOV;
7668 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7669 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7670 flags |= MGMT_ADV_FLAG_APPEARANCE;
7671 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7672 flags |= MGMT_ADV_PARAM_DURATION;
7673 flags |= MGMT_ADV_PARAM_TIMEOUT;
7674 flags |= MGMT_ADV_PARAM_INTERVALS;
7675 flags |= MGMT_ADV_PARAM_TX_POWER;
7676 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7678 /* In extended adv TX_POWER returned from Set Adv Param
7679 * will be always valid.
7681 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7682 ext_adv_capable(hdev))
7683 flags |= MGMT_ADV_FLAG_TX_POWER;
7685 if (ext_adv_capable(hdev)) {
7686 flags |= MGMT_ADV_FLAG_SEC_1M;
7687 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7688 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7690 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7691 flags |= MGMT_ADV_FLAG_SEC_2M;
7693 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7694 flags |= MGMT_ADV_FLAG_SEC_CODED;
7700 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7701 void *data, u16 data_len)
7703 struct mgmt_rp_read_adv_features *rp;
7706 struct adv_info *adv_instance;
7707 u32 supported_flags;
7710 bt_dev_dbg(hdev, "sock %p", sk);
7712 if (!lmp_le_capable(hdev))
7713 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7714 MGMT_STATUS_REJECTED);
7716 /* Enabling the experimental LL Privay support disables support for
7719 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7720 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7721 MGMT_STATUS_NOT_SUPPORTED);
7725 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7726 rp = kmalloc(rp_len, GFP_ATOMIC);
7728 hci_dev_unlock(hdev);
7732 supported_flags = get_supported_adv_flags(hdev);
7734 rp->supported_flags = cpu_to_le32(supported_flags);
7735 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7736 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7737 rp->max_instances = hdev->le_num_of_adv_sets;
7738 rp->num_instances = hdev->adv_instance_cnt;
7740 instance = rp->instance;
7741 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7742 *instance = adv_instance->instance;
7746 hci_dev_unlock(hdev);
7748 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7749 MGMT_STATUS_SUCCESS, rp, rp_len);
7756 static u8 calculate_name_len(struct hci_dev *hdev)
7758 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7760 return eir_append_local_name(hdev, buf, 0);
7763 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7766 u8 max_len = HCI_MAX_AD_LENGTH;
7769 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7770 MGMT_ADV_FLAG_LIMITED_DISCOV |
7771 MGMT_ADV_FLAG_MANAGED_FLAGS))
7774 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7777 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7778 max_len -= calculate_name_len(hdev);
7780 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7787 static bool flags_managed(u32 adv_flags)
7789 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7790 MGMT_ADV_FLAG_LIMITED_DISCOV |
7791 MGMT_ADV_FLAG_MANAGED_FLAGS);
7794 static bool tx_power_managed(u32 adv_flags)
7796 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7799 static bool name_managed(u32 adv_flags)
7801 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7804 static bool appearance_managed(u32 adv_flags)
7806 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7809 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7810 u8 len, bool is_adv_data)
7815 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7820 /* Make sure that the data is correctly formatted. */
7821 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7827 if (data[i + 1] == EIR_FLAGS &&
7828 (!is_adv_data || flags_managed(adv_flags)))
7831 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7834 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7837 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7840 if (data[i + 1] == EIR_APPEARANCE &&
7841 appearance_managed(adv_flags))
7844 /* If the current field length would exceed the total data
7845 * length, then it's invalid.
7847 if (i + cur_len >= len)
7854 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7856 u32 supported_flags, phy_flags;
7858 /* The current implementation only supports a subset of the specified
7859 * flags. Also need to check mutual exclusiveness of sec flags.
7861 supported_flags = get_supported_adv_flags(hdev);
7862 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7863 if (adv_flags & ~supported_flags ||
7864 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7870 static bool adv_busy(struct hci_dev *hdev)
7872 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7873 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7874 pending_find(MGMT_OP_SET_LE, hdev) ||
7875 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7876 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7879 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7882 struct mgmt_pending_cmd *cmd;
7883 struct mgmt_cp_add_advertising *cp;
7884 struct mgmt_rp_add_advertising rp;
7885 struct adv_info *adv_instance, *n;
7888 bt_dev_dbg(hdev, "status %u", status);
7892 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7894 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7896 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7897 if (!adv_instance->pending)
7901 adv_instance->pending = false;
7905 instance = adv_instance->instance;
7907 if (hdev->cur_adv_instance == instance)
7908 cancel_adv_timeout(hdev);
7910 hci_remove_adv_instance(hdev, instance);
7911 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7918 rp.instance = cp->instance;
7921 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7922 mgmt_status(status));
7924 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7925 mgmt_status(status), &rp, sizeof(rp));
7927 mgmt_pending_remove(cmd);
7930 hci_dev_unlock(hdev);
7933 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7934 void *data, u16 data_len)
7936 struct mgmt_cp_add_advertising *cp = data;
7937 struct mgmt_rp_add_advertising rp;
7940 u16 timeout, duration;
7941 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7942 u8 schedule_instance = 0;
7943 struct adv_info *next_instance;
7945 struct mgmt_pending_cmd *cmd;
7946 struct hci_request req;
7948 bt_dev_dbg(hdev, "sock %p", sk);
7950 status = mgmt_le_support(hdev);
7952 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7955 /* Enabling the experimental LL Privay support disables support for
7958 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7960 MGMT_STATUS_NOT_SUPPORTED);
7962 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7964 MGMT_STATUS_INVALID_PARAMS);
7966 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7968 MGMT_STATUS_INVALID_PARAMS);
7970 flags = __le32_to_cpu(cp->flags);
7971 timeout = __le16_to_cpu(cp->timeout);
7972 duration = __le16_to_cpu(cp->duration);
7974 if (!requested_adv_flags_are_valid(hdev, flags))
7975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7976 MGMT_STATUS_INVALID_PARAMS);
7980 if (timeout && !hdev_is_powered(hdev)) {
7981 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7982 MGMT_STATUS_REJECTED);
7986 if (adv_busy(hdev)) {
7987 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7992 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7993 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7994 cp->scan_rsp_len, false)) {
7995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7996 MGMT_STATUS_INVALID_PARAMS);
8000 err = hci_add_adv_instance(hdev, cp->instance, flags,
8001 cp->adv_data_len, cp->data,
8003 cp->data + cp->adv_data_len,
8005 HCI_ADV_TX_POWER_NO_PREFERENCE,
8006 hdev->le_adv_min_interval,
8007 hdev->le_adv_max_interval);
8009 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8010 MGMT_STATUS_FAILED);
8014 /* Only trigger an advertising added event if a new instance was
8017 if (hdev->adv_instance_cnt > prev_instance_cnt)
8018 mgmt_advertising_added(sk, hdev, cp->instance);
8020 if (hdev->cur_adv_instance == cp->instance) {
8021 /* If the currently advertised instance is being changed then
8022 * cancel the current advertising and schedule the next
8023 * instance. If there is only one instance then the overridden
8024 * advertising data will be visible right away.
8026 cancel_adv_timeout(hdev);
8028 next_instance = hci_get_next_instance(hdev, cp->instance);
8030 schedule_instance = next_instance->instance;
8031 } else if (!hdev->adv_instance_timeout) {
8032 /* Immediately advertise the new instance if no other
8033 * instance is currently being advertised.
8035 schedule_instance = cp->instance;
8038 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8039 * there is no instance to be advertised then we have no HCI
8040 * communication to make. Simply return.
8042 if (!hdev_is_powered(hdev) ||
8043 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8044 !schedule_instance) {
8045 rp.instance = cp->instance;
8046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8047 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8051 /* We're good to go, update advertising data, parameters, and start
8054 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8061 hci_req_init(&req, hdev);
8063 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
8066 err = hci_req_run(&req, add_advertising_complete);
8069 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8070 MGMT_STATUS_FAILED);
8071 mgmt_pending_remove(cmd);
8075 hci_dev_unlock(hdev);
8080 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
8083 struct mgmt_pending_cmd *cmd;
8084 struct mgmt_cp_add_ext_adv_params *cp;
8085 struct mgmt_rp_add_ext_adv_params rp;
8086 struct adv_info *adv_instance;
8089 BT_DBG("%s", hdev->name);
8093 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
8098 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8102 rp.instance = cp->instance;
8103 rp.tx_power = adv_instance->tx_power;
8105 /* While we're at it, inform userspace of the available space for this
8106 * advertisement, given the flags that will be used.
8108 flags = __le32_to_cpu(cp->flags);
8109 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8110 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8113 /* If this advertisement was previously advertising and we
8114 * failed to update it, we signal that it has been removed and
8115 * delete its structure
8117 if (!adv_instance->pending)
8118 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8120 hci_remove_adv_instance(hdev, cp->instance);
8122 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8123 mgmt_status(status));
8126 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8127 mgmt_status(status), &rp, sizeof(rp));
8132 mgmt_pending_remove(cmd);
8134 hci_dev_unlock(hdev);
8137 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8138 void *data, u16 data_len)
8140 struct mgmt_cp_add_ext_adv_params *cp = data;
8141 struct mgmt_rp_add_ext_adv_params rp;
8142 struct mgmt_pending_cmd *cmd = NULL;
8143 struct adv_info *adv_instance;
8144 struct hci_request req;
8145 u32 flags, min_interval, max_interval;
8146 u16 timeout, duration;
8151 BT_DBG("%s", hdev->name);
8153 status = mgmt_le_support(hdev);
8155 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8158 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8159 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8160 MGMT_STATUS_INVALID_PARAMS);
8162 /* The purpose of breaking add_advertising into two separate MGMT calls
8163 * for params and data is to allow more parameters to be added to this
8164 * structure in the future. For this reason, we verify that we have the
8165 * bare minimum structure we know of when the interface was defined. Any
8166 * extra parameters we don't know about will be ignored in this request.
8168 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8170 MGMT_STATUS_INVALID_PARAMS);
8172 flags = __le32_to_cpu(cp->flags);
8174 if (!requested_adv_flags_are_valid(hdev, flags))
8175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8176 MGMT_STATUS_INVALID_PARAMS);
8180 /* In new interface, we require that we are powered to register */
8181 if (!hdev_is_powered(hdev)) {
8182 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8183 MGMT_STATUS_REJECTED);
8187 if (adv_busy(hdev)) {
8188 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8193 /* Parse defined parameters from request, use defaults otherwise */
8194 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8195 __le16_to_cpu(cp->timeout) : 0;
8197 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8198 __le16_to_cpu(cp->duration) :
8199 hdev->def_multi_adv_rotation_duration;
8201 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8202 __le32_to_cpu(cp->min_interval) :
8203 hdev->le_adv_min_interval;
8205 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8206 __le32_to_cpu(cp->max_interval) :
8207 hdev->le_adv_max_interval;
8209 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8211 HCI_ADV_TX_POWER_NO_PREFERENCE;
8213 /* Create advertising instance with no advertising or response data */
8214 err = hci_add_adv_instance(hdev, cp->instance, flags,
8215 0, NULL, 0, NULL, timeout, duration,
8216 tx_power, min_interval, max_interval);
8219 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8220 MGMT_STATUS_FAILED);
8224 /* Submit request for advertising params if ext adv available */
8225 if (ext_adv_capable(hdev)) {
8226 hci_req_init(&req, hdev);
8227 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8229 /* Updating parameters of an active instance will return a
8230 * Command Disallowed error, so we must first disable the
8231 * instance if it is active.
8233 if (!adv_instance->pending)
8234 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8236 __hci_req_setup_ext_adv_instance(&req, cp->instance);
8238 err = hci_req_run(&req, add_ext_adv_params_complete);
8241 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
8242 hdev, data, data_len);
8245 hci_remove_adv_instance(hdev, cp->instance);
8250 rp.instance = cp->instance;
8251 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8252 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8253 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8254 err = mgmt_cmd_complete(sk, hdev->id,
8255 MGMT_OP_ADD_EXT_ADV_PARAMS,
8256 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8260 hci_dev_unlock(hdev);
8265 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8268 struct mgmt_cp_add_ext_adv_data *cp = data;
8269 struct mgmt_rp_add_ext_adv_data rp;
8270 u8 schedule_instance = 0;
8271 struct adv_info *next_instance;
8272 struct adv_info *adv_instance;
8274 struct mgmt_pending_cmd *cmd;
8275 struct hci_request req;
8277 BT_DBG("%s", hdev->name);
8281 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8283 if (!adv_instance) {
8284 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8285 MGMT_STATUS_INVALID_PARAMS);
8289 /* In new interface, we require that we are powered to register */
8290 if (!hdev_is_powered(hdev)) {
8291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8292 MGMT_STATUS_REJECTED);
8293 goto clear_new_instance;
8296 if (adv_busy(hdev)) {
8297 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8299 goto clear_new_instance;
8302 /* Validate new data */
8303 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8304 cp->adv_data_len, true) ||
8305 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8306 cp->adv_data_len, cp->scan_rsp_len, false)) {
8307 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8308 MGMT_STATUS_INVALID_PARAMS);
8309 goto clear_new_instance;
8312 /* Set the data in the advertising instance */
8313 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8314 cp->data, cp->scan_rsp_len,
8315 cp->data + cp->adv_data_len);
8317 /* We're good to go, update advertising data, parameters, and start
8321 hci_req_init(&req, hdev);
8323 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8325 if (ext_adv_capable(hdev)) {
8326 __hci_req_update_adv_data(&req, cp->instance);
8327 __hci_req_update_scan_rsp_data(&req, cp->instance);
8328 __hci_req_enable_ext_advertising(&req, cp->instance);
8331 /* If using software rotation, determine next instance to use */
8333 if (hdev->cur_adv_instance == cp->instance) {
8334 /* If the currently advertised instance is being changed
8335 * then cancel the current advertising and schedule the
8336 * next instance. If there is only one instance then the
8337 * overridden advertising data will be visible right
8340 cancel_adv_timeout(hdev);
8342 next_instance = hci_get_next_instance(hdev,
8345 schedule_instance = next_instance->instance;
8346 } else if (!hdev->adv_instance_timeout) {
8347 /* Immediately advertise the new instance if no other
8348 * instance is currently being advertised.
8350 schedule_instance = cp->instance;
8353 /* If the HCI_ADVERTISING flag is set or there is no instance to
8354 * be advertised then we have no HCI communication to make.
8357 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8358 !schedule_instance) {
8359 if (adv_instance->pending) {
8360 mgmt_advertising_added(sk, hdev, cp->instance);
8361 adv_instance->pending = false;
8363 rp.instance = cp->instance;
8364 err = mgmt_cmd_complete(sk, hdev->id,
8365 MGMT_OP_ADD_EXT_ADV_DATA,
8366 MGMT_STATUS_SUCCESS, &rp,
8371 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8375 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8379 goto clear_new_instance;
8383 err = hci_req_run(&req, add_advertising_complete);
8386 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8387 MGMT_STATUS_FAILED);
8388 mgmt_pending_remove(cmd);
8389 goto clear_new_instance;
8392 /* We were successful in updating data, so trigger advertising_added
8393 * event if this is an instance that wasn't previously advertising. If
8394 * a failure occurs in the requests we initiated, we will remove the
8395 * instance again in add_advertising_complete
8397 if (adv_instance->pending)
8398 mgmt_advertising_added(sk, hdev, cp->instance);
8403 hci_remove_adv_instance(hdev, cp->instance);
8406 hci_dev_unlock(hdev);
8411 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8414 struct mgmt_pending_cmd *cmd;
8415 struct mgmt_cp_remove_advertising *cp;
8416 struct mgmt_rp_remove_advertising rp;
8418 bt_dev_dbg(hdev, "status %u", status);
8422 /* A failure status here only means that we failed to disable
8423 * advertising. Otherwise, the advertising instance has been removed,
8424 * so report success.
8426 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8431 rp.instance = cp->instance;
8433 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8435 mgmt_pending_remove(cmd);
8438 hci_dev_unlock(hdev);
8441 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8442 void *data, u16 data_len)
8444 struct mgmt_cp_remove_advertising *cp = data;
8445 struct mgmt_rp_remove_advertising rp;
8446 struct mgmt_pending_cmd *cmd;
8447 struct hci_request req;
8450 bt_dev_dbg(hdev, "sock %p", sk);
8452 /* Enabling the experimental LL Privay support disables support for
8455 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8457 MGMT_STATUS_NOT_SUPPORTED);
8461 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8462 err = mgmt_cmd_status(sk, hdev->id,
8463 MGMT_OP_REMOVE_ADVERTISING,
8464 MGMT_STATUS_INVALID_PARAMS);
8468 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8469 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8470 pending_find(MGMT_OP_SET_LE, hdev)) {
8471 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8476 if (list_empty(&hdev->adv_instances)) {
8477 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8478 MGMT_STATUS_INVALID_PARAMS);
8482 hci_req_init(&req, hdev);
8484 /* If we use extended advertising, instance is disabled and removed */
8485 if (ext_adv_capable(hdev)) {
8486 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8487 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8490 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8492 if (list_empty(&hdev->adv_instances))
8493 __hci_req_disable_advertising(&req);
8495 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8496 * flag is set or the device isn't powered then we have no HCI
8497 * communication to make. Simply return.
8499 if (skb_queue_empty(&req.cmd_q) ||
8500 !hdev_is_powered(hdev) ||
8501 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8502 hci_req_purge(&req);
8503 rp.instance = cp->instance;
8504 err = mgmt_cmd_complete(sk, hdev->id,
8505 MGMT_OP_REMOVE_ADVERTISING,
8506 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8510 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8517 err = hci_req_run(&req, remove_advertising_complete);
8519 mgmt_pending_remove(cmd);
8522 hci_dev_unlock(hdev);
8527 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8528 void *data, u16 data_len)
8530 struct mgmt_cp_get_adv_size_info *cp = data;
8531 struct mgmt_rp_get_adv_size_info rp;
8532 u32 flags, supported_flags;
8535 bt_dev_dbg(hdev, "sock %p", sk);
8537 if (!lmp_le_capable(hdev))
8538 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8539 MGMT_STATUS_REJECTED);
8541 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8542 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8543 MGMT_STATUS_INVALID_PARAMS);
8545 flags = __le32_to_cpu(cp->flags);
8547 /* The current implementation only supports a subset of the specified
8550 supported_flags = get_supported_adv_flags(hdev);
8551 if (flags & ~supported_flags)
8552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8553 MGMT_STATUS_INVALID_PARAMS);
8555 rp.instance = cp->instance;
8556 rp.flags = cp->flags;
8557 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8558 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8560 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8561 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8566 static const struct hci_mgmt_handler mgmt_handlers[] = {
8567 { NULL }, /* 0x0000 (no command) */
8568 { read_version, MGMT_READ_VERSION_SIZE,
8570 HCI_MGMT_UNTRUSTED },
8571 { read_commands, MGMT_READ_COMMANDS_SIZE,
8573 HCI_MGMT_UNTRUSTED },
8574 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8576 HCI_MGMT_UNTRUSTED },
8577 { read_controller_info, MGMT_READ_INFO_SIZE,
8578 HCI_MGMT_UNTRUSTED },
8579 { set_powered, MGMT_SETTING_SIZE },
8580 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8581 { set_connectable, MGMT_SETTING_SIZE },
8582 { set_fast_connectable, MGMT_SETTING_SIZE },
8583 { set_bondable, MGMT_SETTING_SIZE },
8584 { set_link_security, MGMT_SETTING_SIZE },
8585 { set_ssp, MGMT_SETTING_SIZE },
8586 { set_hs, MGMT_SETTING_SIZE },
8587 { set_le, MGMT_SETTING_SIZE },
8588 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8589 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8590 { add_uuid, MGMT_ADD_UUID_SIZE },
8591 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8592 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8594 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8596 { disconnect, MGMT_DISCONNECT_SIZE },
8597 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8598 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8599 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8600 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8601 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8602 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8603 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8604 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8605 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8606 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8607 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8608 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8609 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8611 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8612 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8613 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8614 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8615 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8616 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8617 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8618 { set_advertising, MGMT_SETTING_SIZE },
8619 { set_bredr, MGMT_SETTING_SIZE },
8620 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8621 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8622 { set_secure_conn, MGMT_SETTING_SIZE },
8623 { set_debug_keys, MGMT_SETTING_SIZE },
8624 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8625 { load_irks, MGMT_LOAD_IRKS_SIZE,
8627 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8628 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8629 { add_device, MGMT_ADD_DEVICE_SIZE },
8630 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8631 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8633 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8635 HCI_MGMT_UNTRUSTED },
8636 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8637 HCI_MGMT_UNCONFIGURED |
8638 HCI_MGMT_UNTRUSTED },
8639 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8640 HCI_MGMT_UNCONFIGURED },
8641 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8642 HCI_MGMT_UNCONFIGURED },
8643 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8645 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8646 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8648 HCI_MGMT_UNTRUSTED },
8649 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8650 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8652 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8653 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8654 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8655 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8656 HCI_MGMT_UNTRUSTED },
8657 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8658 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8659 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8660 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8662 { set_wideband_speech, MGMT_SETTING_SIZE },
8663 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8664 HCI_MGMT_UNTRUSTED },
8665 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8666 HCI_MGMT_UNTRUSTED |
8667 HCI_MGMT_HDEV_OPTIONAL },
8668 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8670 HCI_MGMT_HDEV_OPTIONAL },
8671 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8672 HCI_MGMT_UNTRUSTED },
8673 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8675 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8676 HCI_MGMT_UNTRUSTED },
8677 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8679 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8680 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8681 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8682 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8684 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8685 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8687 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8689 { add_adv_patterns_monitor_rssi,
8690 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8694 void mgmt_index_added(struct hci_dev *hdev)
8696 struct mgmt_ev_ext_index ev;
8698 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8701 switch (hdev->dev_type) {
8703 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8704 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8705 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8708 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8709 HCI_MGMT_INDEX_EVENTS);
8722 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8723 HCI_MGMT_EXT_INDEX_EVENTS);
8726 void mgmt_index_removed(struct hci_dev *hdev)
8728 struct mgmt_ev_ext_index ev;
8729 u8 status = MGMT_STATUS_INVALID_INDEX;
8731 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8734 switch (hdev->dev_type) {
8736 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8738 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8739 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8740 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8743 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8744 HCI_MGMT_INDEX_EVENTS);
8757 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8758 HCI_MGMT_EXT_INDEX_EVENTS);
8761 /* This function requires the caller holds hdev->lock */
8762 static void restart_le_actions(struct hci_dev *hdev)
8764 struct hci_conn_params *p;
8766 list_for_each_entry(p, &hdev->le_conn_params, list) {
8767 /* Needed for AUTO_OFF case where might not "really"
8768 * have been powered off.
8770 list_del_init(&p->action);
8772 switch (p->auto_connect) {
8773 case HCI_AUTO_CONN_DIRECT:
8774 case HCI_AUTO_CONN_ALWAYS:
8775 list_add(&p->action, &hdev->pend_le_conns);
8777 case HCI_AUTO_CONN_REPORT:
8778 list_add(&p->action, &hdev->pend_le_reports);
8786 void mgmt_power_on(struct hci_dev *hdev, int err)
8788 struct cmd_lookup match = { NULL, hdev };
8790 bt_dev_dbg(hdev, "err %d", err);
8795 restart_le_actions(hdev);
8796 hci_update_background_scan(hdev);
8799 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8801 new_settings(hdev, match.sk);
8806 hci_dev_unlock(hdev);
8809 void __mgmt_power_off(struct hci_dev *hdev)
8811 struct cmd_lookup match = { NULL, hdev };
8812 u8 status, zero_cod[] = { 0, 0, 0 };
8814 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8816 /* If the power off is because of hdev unregistration let
8817 * use the appropriate INVALID_INDEX status. Otherwise use
8818 * NOT_POWERED. We cover both scenarios here since later in
8819 * mgmt_index_removed() any hci_conn callbacks will have already
8820 * been triggered, potentially causing misleading DISCONNECTED
8823 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8824 status = MGMT_STATUS_INVALID_INDEX;
8826 status = MGMT_STATUS_NOT_POWERED;
8828 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8830 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8831 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8832 zero_cod, sizeof(zero_cod),
8833 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8834 ext_info_changed(hdev, NULL);
8837 new_settings(hdev, match.sk);
8843 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8845 struct mgmt_pending_cmd *cmd;
8848 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8852 if (err == -ERFKILL)
8853 status = MGMT_STATUS_RFKILLED;
8855 status = MGMT_STATUS_FAILED;
8857 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8859 mgmt_pending_remove(cmd);
8862 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8865 struct mgmt_ev_new_link_key ev;
8867 memset(&ev, 0, sizeof(ev));
8869 ev.store_hint = persistent;
8870 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8871 ev.key.addr.type = BDADDR_BREDR;
8872 ev.key.type = key->type;
8873 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8874 ev.key.pin_len = key->pin_len;
8876 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8879 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8881 switch (ltk->type) {
8883 case SMP_LTK_RESPONDER:
8884 if (ltk->authenticated)
8885 return MGMT_LTK_AUTHENTICATED;
8886 return MGMT_LTK_UNAUTHENTICATED;
8888 if (ltk->authenticated)
8889 return MGMT_LTK_P256_AUTH;
8890 return MGMT_LTK_P256_UNAUTH;
8891 case SMP_LTK_P256_DEBUG:
8892 return MGMT_LTK_P256_DEBUG;
8895 return MGMT_LTK_UNAUTHENTICATED;
8898 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8900 struct mgmt_ev_new_long_term_key ev;
8902 memset(&ev, 0, sizeof(ev));
8904 /* Devices using resolvable or non-resolvable random addresses
8905 * without providing an identity resolving key don't require
8906 * to store long term keys. Their addresses will change the
8909 * Only when a remote device provides an identity address
8910 * make sure the long term key is stored. If the remote
8911 * identity is known, the long term keys are internally
8912 * mapped to the identity address. So allow static random
8913 * and public addresses here.
8915 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8916 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8917 ev.store_hint = 0x00;
8919 ev.store_hint = persistent;
8921 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8922 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8923 ev.key.type = mgmt_ltk_type(key);
8924 ev.key.enc_size = key->enc_size;
8925 ev.key.ediv = key->ediv;
8926 ev.key.rand = key->rand;
8928 if (key->type == SMP_LTK)
8929 ev.key.initiator = 1;
8931 /* Make sure we copy only the significant bytes based on the
8932 * encryption key size, and set the rest of the value to zeroes.
8934 memcpy(ev.key.val, key->val, key->enc_size);
8935 memset(ev.key.val + key->enc_size, 0,
8936 sizeof(ev.key.val) - key->enc_size);
8938 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8941 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8943 struct mgmt_ev_new_irk ev;
8945 memset(&ev, 0, sizeof(ev));
8947 ev.store_hint = persistent;
8949 bacpy(&ev.rpa, &irk->rpa);
8950 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8951 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8952 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8954 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8957 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8960 struct mgmt_ev_new_csrk ev;
8962 memset(&ev, 0, sizeof(ev));
8964 /* Devices using resolvable or non-resolvable random addresses
8965 * without providing an identity resolving key don't require
8966 * to store signature resolving keys. Their addresses will change
8967 * the next time around.
8969 * Only when a remote device provides an identity address
8970 * make sure the signature resolving key is stored. So allow
8971 * static random and public addresses here.
8973 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8974 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8975 ev.store_hint = 0x00;
8977 ev.store_hint = persistent;
8979 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8980 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8981 ev.key.type = csrk->type;
8982 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8984 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8987 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8988 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8989 u16 max_interval, u16 latency, u16 timeout)
8991 struct mgmt_ev_new_conn_param ev;
8993 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8996 memset(&ev, 0, sizeof(ev));
8997 bacpy(&ev.addr.bdaddr, bdaddr);
8998 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8999 ev.store_hint = store_hint;
9000 ev.min_interval = cpu_to_le16(min_interval);
9001 ev.max_interval = cpu_to_le16(max_interval);
9002 ev.latency = cpu_to_le16(latency);
9003 ev.timeout = cpu_to_le16(timeout);
9005 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9008 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9009 u8 *name, u8 name_len)
9012 struct mgmt_ev_device_connected *ev = (void *) buf;
9016 bacpy(&ev->addr.bdaddr, &conn->dst);
9017 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9020 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9022 ev->flags = __cpu_to_le32(flags);
9024 /* We must ensure that the EIR Data fields are ordered and
9025 * unique. Keep it simple for now and avoid the problem by not
9026 * adding any BR/EDR data to the LE adv.
9028 if (conn->le_adv_data_len > 0) {
9029 memcpy(&ev->eir[eir_len],
9030 conn->le_adv_data, conn->le_adv_data_len);
9031 eir_len = conn->le_adv_data_len;
9034 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9037 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9038 eir_len = eir_append_data(ev->eir, eir_len,
9040 conn->dev_class, 3);
9043 ev->eir_len = cpu_to_le16(eir_len);
9045 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9046 sizeof(*ev) + eir_len, NULL);
9049 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9051 struct sock **sk = data;
9053 cmd->cmd_complete(cmd, 0);
9058 mgmt_pending_remove(cmd);
9061 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9063 struct hci_dev *hdev = data;
9064 struct mgmt_cp_unpair_device *cp = cmd->param;
9066 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9068 cmd->cmd_complete(cmd, 0);
9069 mgmt_pending_remove(cmd);
9072 bool mgmt_powering_down(struct hci_dev *hdev)
9074 struct mgmt_pending_cmd *cmd;
9075 struct mgmt_mode *cp;
9077 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9088 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9089 u8 link_type, u8 addr_type, u8 reason,
9090 bool mgmt_connected)
9092 struct mgmt_ev_device_disconnected ev;
9093 struct sock *sk = NULL;
9095 /* The connection is still in hci_conn_hash so test for 1
9096 * instead of 0 to know if this is the last one.
9098 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9099 cancel_delayed_work(&hdev->power_off);
9100 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9103 if (!mgmt_connected)
9106 if (link_type != ACL_LINK && link_type != LE_LINK)
9109 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9111 bacpy(&ev.addr.bdaddr, bdaddr);
9112 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9115 /* Report disconnects due to suspend */
9116 if (hdev->suspended)
9117 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9119 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9124 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9128 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9129 u8 link_type, u8 addr_type, u8 status)
9131 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9132 struct mgmt_cp_disconnect *cp;
9133 struct mgmt_pending_cmd *cmd;
9135 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9138 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9144 if (bacmp(bdaddr, &cp->addr.bdaddr))
9147 if (cp->addr.type != bdaddr_type)
9150 cmd->cmd_complete(cmd, mgmt_status(status));
9151 mgmt_pending_remove(cmd);
9154 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9155 u8 addr_type, u8 status)
9157 struct mgmt_ev_connect_failed ev;
9159 /* The connection is still in hci_conn_hash so test for 1
9160 * instead of 0 to know if this is the last one.
9162 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9163 cancel_delayed_work(&hdev->power_off);
9164 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9167 bacpy(&ev.addr.bdaddr, bdaddr);
9168 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9169 ev.status = mgmt_status(status);
9171 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9174 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9176 struct mgmt_ev_pin_code_request ev;
9178 bacpy(&ev.addr.bdaddr, bdaddr);
9179 ev.addr.type = BDADDR_BREDR;
9182 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9185 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9188 struct mgmt_pending_cmd *cmd;
9190 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9194 cmd->cmd_complete(cmd, mgmt_status(status));
9195 mgmt_pending_remove(cmd);
9198 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9201 struct mgmt_pending_cmd *cmd;
9203 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9207 cmd->cmd_complete(cmd, mgmt_status(status));
9208 mgmt_pending_remove(cmd);
9211 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9212 u8 link_type, u8 addr_type, u32 value,
9215 struct mgmt_ev_user_confirm_request ev;
9217 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9219 bacpy(&ev.addr.bdaddr, bdaddr);
9220 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9221 ev.confirm_hint = confirm_hint;
9222 ev.value = cpu_to_le32(value);
9224 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9228 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9229 u8 link_type, u8 addr_type)
9231 struct mgmt_ev_user_passkey_request ev;
9233 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9235 bacpy(&ev.addr.bdaddr, bdaddr);
9236 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9238 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9242 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9243 u8 link_type, u8 addr_type, u8 status,
9246 struct mgmt_pending_cmd *cmd;
9248 cmd = pending_find(opcode, hdev);
9252 cmd->cmd_complete(cmd, mgmt_status(status));
9253 mgmt_pending_remove(cmd);
9258 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9259 u8 link_type, u8 addr_type, u8 status)
9261 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9262 status, MGMT_OP_USER_CONFIRM_REPLY);
9265 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9266 u8 link_type, u8 addr_type, u8 status)
9268 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9270 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9273 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9274 u8 link_type, u8 addr_type, u8 status)
9276 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9277 status, MGMT_OP_USER_PASSKEY_REPLY);
9280 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9281 u8 link_type, u8 addr_type, u8 status)
9283 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9285 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9288 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9289 u8 link_type, u8 addr_type, u32 passkey,
9292 struct mgmt_ev_passkey_notify ev;
9294 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9296 bacpy(&ev.addr.bdaddr, bdaddr);
9297 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9298 ev.passkey = __cpu_to_le32(passkey);
9299 ev.entered = entered;
9301 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9304 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9306 struct mgmt_ev_auth_failed ev;
9307 struct mgmt_pending_cmd *cmd;
9308 u8 status = mgmt_status(hci_status);
9310 bacpy(&ev.addr.bdaddr, &conn->dst);
9311 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9314 cmd = find_pairing(conn);
9316 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9317 cmd ? cmd->sk : NULL);
9320 cmd->cmd_complete(cmd, status);
9321 mgmt_pending_remove(cmd);
9325 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9327 struct cmd_lookup match = { NULL, hdev };
9331 u8 mgmt_err = mgmt_status(status);
9332 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9333 cmd_status_rsp, &mgmt_err);
9337 if (test_bit(HCI_AUTH, &hdev->flags))
9338 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9340 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9342 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9346 new_settings(hdev, match.sk);
9352 static void clear_eir(struct hci_request *req)
9354 struct hci_dev *hdev = req->hdev;
9355 struct hci_cp_write_eir cp;
9357 if (!lmp_ext_inq_capable(hdev))
9360 memset(hdev->eir, 0, sizeof(hdev->eir));
9362 memset(&cp, 0, sizeof(cp));
9364 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9367 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9369 struct cmd_lookup match = { NULL, hdev };
9370 struct hci_request req;
9371 bool changed = false;
9374 u8 mgmt_err = mgmt_status(status);
9376 if (enable && hci_dev_test_and_clear_flag(hdev,
9378 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9379 new_settings(hdev, NULL);
9382 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9388 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9390 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9392 changed = hci_dev_test_and_clear_flag(hdev,
9395 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9398 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9401 new_settings(hdev, match.sk);
9406 hci_req_init(&req, hdev);
9408 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9409 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9410 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9411 sizeof(enable), &enable);
9412 __hci_req_update_eir(&req);
9417 hci_req_run(&req, NULL);
9420 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9422 struct cmd_lookup *match = data;
9424 if (match->sk == NULL) {
9425 match->sk = cmd->sk;
9426 sock_hold(match->sk);
9430 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9433 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9435 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9436 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9437 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9440 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9441 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9442 ext_info_changed(hdev, NULL);
9449 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9451 struct mgmt_cp_set_local_name ev;
9452 struct mgmt_pending_cmd *cmd;
9457 memset(&ev, 0, sizeof(ev));
9458 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9459 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9461 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9463 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9465 /* If this is a HCI command related to powering on the
9466 * HCI dev don't send any mgmt signals.
9468 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9472 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9473 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9474 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9477 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9481 for (i = 0; i < uuid_count; i++) {
9482 if (!memcmp(uuid, uuids[i], 16))
9489 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9493 while (parsed < eir_len) {
9494 u8 field_len = eir[0];
9501 if (eir_len - parsed < field_len + 1)
9505 case EIR_UUID16_ALL:
9506 case EIR_UUID16_SOME:
9507 for (i = 0; i + 3 <= field_len; i += 2) {
9508 memcpy(uuid, bluetooth_base_uuid, 16);
9509 uuid[13] = eir[i + 3];
9510 uuid[12] = eir[i + 2];
9511 if (has_uuid(uuid, uuid_count, uuids))
9515 case EIR_UUID32_ALL:
9516 case EIR_UUID32_SOME:
9517 for (i = 0; i + 5 <= field_len; i += 4) {
9518 memcpy(uuid, bluetooth_base_uuid, 16);
9519 uuid[15] = eir[i + 5];
9520 uuid[14] = eir[i + 4];
9521 uuid[13] = eir[i + 3];
9522 uuid[12] = eir[i + 2];
9523 if (has_uuid(uuid, uuid_count, uuids))
9527 case EIR_UUID128_ALL:
9528 case EIR_UUID128_SOME:
9529 for (i = 0; i + 17 <= field_len; i += 16) {
9530 memcpy(uuid, eir + i + 2, 16);
9531 if (has_uuid(uuid, uuid_count, uuids))
9537 parsed += field_len + 1;
9538 eir += field_len + 1;
9544 static void restart_le_scan(struct hci_dev *hdev)
9546 /* If controller is not scanning we are done. */
9547 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9550 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9551 hdev->discovery.scan_start +
9552 hdev->discovery.scan_duration))
9555 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9556 DISCOV_LE_RESTART_DELAY);
9559 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9560 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9562 /* If a RSSI threshold has been specified, and
9563 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9564 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9565 * is set, let it through for further processing, as we might need to
9568 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9569 * the results are also dropped.
9571 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9572 (rssi == HCI_RSSI_INVALID ||
9573 (rssi < hdev->discovery.rssi &&
9574 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9577 if (hdev->discovery.uuid_count != 0) {
9578 /* If a list of UUIDs is provided in filter, results with no
9579 * matching UUID should be dropped.
9581 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9582 hdev->discovery.uuids) &&
9583 !eir_has_uuids(scan_rsp, scan_rsp_len,
9584 hdev->discovery.uuid_count,
9585 hdev->discovery.uuids))
9589 /* If duplicate filtering does not report RSSI changes, then restart
9590 * scanning to ensure updated result with updated RSSI values.
9592 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9593 restart_le_scan(hdev);
9595 /* Validate RSSI value against the RSSI threshold once more. */
9596 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9597 rssi < hdev->discovery.rssi)
9604 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9605 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9606 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9609 struct mgmt_ev_device_found *ev = (void *)buf;
9612 /* Don't send events for a non-kernel initiated discovery. With
9613 * LE one exception is if we have pend_le_reports > 0 in which
9614 * case we're doing passive scanning and want these events.
9616 if (!hci_discovery_active(hdev)) {
9617 if (link_type == ACL_LINK)
9619 if (link_type == LE_LINK &&
9620 list_empty(&hdev->pend_le_reports) &&
9621 !hci_is_adv_monitoring(hdev)) {
9626 if (hdev->discovery.result_filtering) {
9627 /* We are using service discovery */
9628 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9633 if (hdev->discovery.limited) {
9634 /* Check for limited discoverable bit */
9636 if (!(dev_class[1] & 0x20))
9639 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9640 if (!flags || !(flags[0] & LE_AD_LIMITED))
9645 /* Make sure that the buffer is big enough. The 5 extra bytes
9646 * are for the potential CoD field.
9648 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9651 memset(buf, 0, sizeof(buf));
9653 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9654 * RSSI value was reported as 0 when not available. This behavior
9655 * is kept when using device discovery. This is required for full
9656 * backwards compatibility with the API.
9658 * However when using service discovery, the value 127 will be
9659 * returned when the RSSI is not available.
9661 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9662 link_type == ACL_LINK)
9665 bacpy(&ev->addr.bdaddr, bdaddr);
9666 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9668 ev->flags = cpu_to_le32(flags);
9671 /* Copy EIR or advertising data into event */
9672 memcpy(ev->eir, eir, eir_len);
9674 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9676 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9679 if (scan_rsp_len > 0)
9680 /* Append scan response data to event */
9681 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9683 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9684 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9686 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9689 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9690 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9692 struct mgmt_ev_device_found *ev;
9693 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9696 ev = (struct mgmt_ev_device_found *) buf;
9698 memset(buf, 0, sizeof(buf));
9700 bacpy(&ev->addr.bdaddr, bdaddr);
9701 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9704 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9707 ev->eir_len = cpu_to_le16(eir_len);
9709 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9712 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9714 struct mgmt_ev_discovering ev;
9716 bt_dev_dbg(hdev, "discovering %u", discovering);
9718 memset(&ev, 0, sizeof(ev));
9719 ev.type = hdev->discovery.type;
9720 ev.discovering = discovering;
9722 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9725 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9727 struct mgmt_ev_controller_suspend ev;
9729 ev.suspend_state = state;
9730 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9733 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9736 struct mgmt_ev_controller_resume ev;
9738 ev.wake_reason = reason;
9740 bacpy(&ev.addr.bdaddr, bdaddr);
9741 ev.addr.type = addr_type;
9743 memset(&ev.addr, 0, sizeof(ev.addr));
9746 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9749 static struct hci_mgmt_chan chan = {
9750 .channel = HCI_CHANNEL_CONTROL,
9751 .handler_count = ARRAY_SIZE(mgmt_handlers),
9752 .handlers = mgmt_handlers,
9753 .hdev_init = mgmt_init_hdev,
9758 return hci_mgmt_chan_register(&chan);
9761 void mgmt_exit(void)
9763 hci_mgmt_chan_unregister(&chan);