2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
860 settings |= MGMT_SETTING_CONFIGURATION;
862 if (cis_central_capable(hdev))
863 settings |= MGMT_SETTING_CIS_CENTRAL;
865 if (cis_peripheral_capable(hdev))
866 settings |= MGMT_SETTING_CIS_PERIPHERAL;
868 settings |= MGMT_SETTING_PHY_CONFIGURATION;
873 static u32 get_current_settings(struct hci_dev *hdev)
877 if (hdev_is_powered(hdev))
878 settings |= MGMT_SETTING_POWERED;
880 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 settings |= MGMT_SETTING_CONNECTABLE;
883 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 settings |= MGMT_SETTING_FAST_CONNECTABLE;
886 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 settings |= MGMT_SETTING_DISCOVERABLE;
889 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 settings |= MGMT_SETTING_BONDABLE;
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 settings |= MGMT_SETTING_BREDR;
895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 settings |= MGMT_SETTING_LE;
898 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 settings |= MGMT_SETTING_LINK_SECURITY;
901 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 settings |= MGMT_SETTING_SSP;
904 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 settings |= MGMT_SETTING_HS;
907 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 settings |= MGMT_SETTING_ADVERTISING;
910 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 settings |= MGMT_SETTING_SECURE_CONN;
913 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 settings |= MGMT_SETTING_DEBUG_KEYS;
916 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 settings |= MGMT_SETTING_PRIVACY;
919 /* The current setting for static address has two purposes. The
920 * first is to indicate if the static address will be used and
921 * the second is to indicate if it is actually set.
923 * This means if the static address is not configured, this flag
924 * will never be set. If the address is configured, then if the
925 * address is actually used decides if the flag is set or not.
927 * For single mode LE only controllers and dual-mode controllers
928 * with BR/EDR disabled, the existence of the static address will
931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 settings |= MGMT_SETTING_STATIC_ADDRESS;
938 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
941 if (cis_central_capable(hdev))
942 settings |= MGMT_SETTING_CIS_CENTRAL;
944 if (cis_peripheral_capable(hdev))
945 settings |= MGMT_SETTING_CIS_PERIPHERAL;
950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
952 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
957 struct mgmt_pending_cmd *cmd;
959 /* If there's a pending mgmt command the flags will not yet have
960 * their final values, so check for this first.
962 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
964 struct mgmt_mode *cp = cmd->param;
966 return LE_AD_GENERAL;
967 else if (cp->val == 0x02)
968 return LE_AD_LIMITED;
970 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971 return LE_AD_LIMITED;
972 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973 return LE_AD_GENERAL;
979 bool mgmt_get_connectable(struct hci_dev *hdev)
981 struct mgmt_pending_cmd *cmd;
983 /* If there's a pending mgmt command the flag will not yet have
984 * it's final value, so check for this first.
986 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
988 struct mgmt_mode *cp = cmd->param;
993 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
996 static int service_cache_sync(struct hci_dev *hdev, void *data)
998 hci_update_eir_sync(hdev);
999 hci_update_class_sync(hdev);
1004 static void service_cache_off(struct work_struct *work)
1006 struct hci_dev *hdev = container_of(work, struct hci_dev,
1007 service_cache.work);
1009 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1012 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1017 /* The generation of a new RPA and programming it into the
1018 * controller happens in the hci_req_enable_advertising()
1021 if (ext_adv_capable(hdev))
1022 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1024 return hci_enable_advertising_sync(hdev);
1027 static void rpa_expired(struct work_struct *work)
1029 struct hci_dev *hdev = container_of(work, struct hci_dev,
1032 bt_dev_dbg(hdev, "");
1034 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1036 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1039 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1042 static void discov_off(struct work_struct *work)
1044 struct hci_dev *hdev = container_of(work, struct hci_dev,
1047 bt_dev_dbg(hdev, "");
1051 /* When discoverable timeout triggers, then just make sure
1052 * the limited discoverable flag is cleared. Even in the case
1053 * of a timeout triggered from general discoverable, it is
1054 * safe to unconditionally clear the flag.
1056 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1057 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1058 hdev->discov_timeout = 0;
1060 hci_update_discoverable(hdev);
1062 mgmt_new_settings(hdev);
1064 hci_dev_unlock(hdev);
1067 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1069 static void mesh_send_complete(struct hci_dev *hdev,
1070 struct mgmt_mesh_tx *mesh_tx, bool silent)
1072 u8 handle = mesh_tx->handle;
1075 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1076 sizeof(handle), NULL);
1078 mgmt_mesh_remove(mesh_tx);
1081 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1083 struct mgmt_mesh_tx *mesh_tx;
1085 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1086 hci_disable_advertising_sync(hdev);
1087 mesh_tx = mgmt_mesh_next(hdev, NULL);
1090 mesh_send_complete(hdev, mesh_tx, false);
1095 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1096 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1097 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1099 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1104 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1105 mesh_send_start_complete);
1108 mesh_send_complete(hdev, mesh_tx, false);
1110 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1113 static void mesh_send_done(struct work_struct *work)
1115 struct hci_dev *hdev = container_of(work, struct hci_dev,
1116 mesh_send_done.work);
1118 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1121 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1124 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1126 if (hci_dev_test_flag(hdev, HCI_MGMT))
1129 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1131 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1132 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1133 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1134 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1136 /* Non-mgmt controlled devices get this bit set
1137 * implicitly so that pairing works for them, however
1138 * for mgmt we require user-space to explicitly enable
1141 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1143 hci_dev_set_flag(hdev, HCI_MGMT);
1146 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1147 void *data, u16 data_len)
1149 struct mgmt_rp_read_info rp;
1151 bt_dev_dbg(hdev, "sock %p", sk);
1155 memset(&rp, 0, sizeof(rp));
1157 bacpy(&rp.bdaddr, &hdev->bdaddr);
1159 rp.version = hdev->hci_ver;
1160 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1162 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1163 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1165 memcpy(rp.dev_class, hdev->dev_class, 3);
1167 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1168 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1170 hci_dev_unlock(hdev);
1172 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1176 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1181 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1182 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1183 hdev->dev_class, 3);
1185 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1186 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1189 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1190 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1191 hdev->dev_name, name_len);
1193 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1194 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1195 hdev->short_name, name_len);
1200 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1201 void *data, u16 data_len)
1204 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1207 bt_dev_dbg(hdev, "sock %p", sk);
1209 memset(&buf, 0, sizeof(buf));
1213 bacpy(&rp->bdaddr, &hdev->bdaddr);
1215 rp->version = hdev->hci_ver;
1216 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1218 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1219 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1222 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1223 rp->eir_len = cpu_to_le16(eir_len);
1225 hci_dev_unlock(hdev);
1227 /* If this command is called at least once, then the events
1228 * for class of device and local name changes are disabled
1229 * and only the new extended controller information event
1232 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1233 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1234 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1236 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1237 sizeof(*rp) + eir_len);
1240 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1243 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1246 memset(buf, 0, sizeof(buf));
1248 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1249 ev->eir_len = cpu_to_le16(eir_len);
1251 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1252 sizeof(*ev) + eir_len,
1253 HCI_MGMT_EXT_INFO_EVENTS, skip);
1256 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1258 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1260 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1264 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1266 struct mgmt_ev_advertising_added ev;
1268 ev.instance = instance;
1270 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1273 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1276 struct mgmt_ev_advertising_removed ev;
1278 ev.instance = instance;
1280 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1283 static void cancel_adv_timeout(struct hci_dev *hdev)
1285 if (hdev->adv_instance_timeout) {
1286 hdev->adv_instance_timeout = 0;
1287 cancel_delayed_work(&hdev->adv_instance_expire);
1291 /* This function requires the caller holds hdev->lock */
1292 static void restart_le_actions(struct hci_dev *hdev)
1294 struct hci_conn_params *p;
1296 list_for_each_entry(p, &hdev->le_conn_params, list) {
1297 /* Needed for AUTO_OFF case where might not "really"
1298 * have been powered off.
1300 list_del_init(&p->action);
1302 switch (p->auto_connect) {
1303 case HCI_AUTO_CONN_DIRECT:
1304 case HCI_AUTO_CONN_ALWAYS:
1305 list_add(&p->action, &hdev->pend_le_conns);
1307 case HCI_AUTO_CONN_REPORT:
1308 list_add(&p->action, &hdev->pend_le_reports);
1316 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1318 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1320 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1321 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1324 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1326 struct mgmt_pending_cmd *cmd = data;
1327 struct mgmt_mode *cp;
1329 /* Make sure cmd still outstanding. */
1330 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1335 bt_dev_dbg(hdev, "err %d", err);
1340 restart_le_actions(hdev);
1341 hci_update_passive_scan(hdev);
1342 hci_dev_unlock(hdev);
1345 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1347 /* Only call new_setting for power on as power off is deferred
1348 * to hdev->power_off work which does call hci_dev_do_close.
1351 new_settings(hdev, cmd->sk);
1353 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1357 mgmt_pending_remove(cmd);
1360 static int set_powered_sync(struct hci_dev *hdev, void *data)
1362 struct mgmt_pending_cmd *cmd = data;
1363 struct mgmt_mode *cp = cmd->param;
1365 BT_DBG("%s", hdev->name);
1367 return hci_set_powered_sync(hdev, cp->val);
1370 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1373 struct mgmt_mode *cp = data;
1374 struct mgmt_pending_cmd *cmd;
1377 bt_dev_dbg(hdev, "sock %p", sk);
1379 if (cp->val != 0x00 && cp->val != 0x01)
1380 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1381 MGMT_STATUS_INVALID_PARAMS);
1385 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1386 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391 if (!!cp->val == hdev_is_powered(hdev)) {
1392 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1396 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1402 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1403 mgmt_set_powered_complete);
1406 mgmt_pending_remove(cmd);
1409 hci_dev_unlock(hdev);
1413 int mgmt_new_settings(struct hci_dev *hdev)
1415 return new_settings(hdev, NULL);
1420 struct hci_dev *hdev;
1424 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1426 struct cmd_lookup *match = data;
1428 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1430 list_del(&cmd->list);
1432 if (match->sk == NULL) {
1433 match->sk = cmd->sk;
1434 sock_hold(match->sk);
1437 mgmt_pending_free(cmd);
1440 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1445 mgmt_pending_remove(cmd);
1448 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1450 if (cmd->cmd_complete) {
1453 cmd->cmd_complete(cmd, *status);
1454 mgmt_pending_remove(cmd);
1459 cmd_status_rsp(cmd, data);
1462 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1464 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1465 cmd->param, cmd->param_len);
1468 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1470 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 cmd->param, sizeof(struct mgmt_addr_info));
1474 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1476 if (!lmp_bredr_capable(hdev))
1477 return MGMT_STATUS_NOT_SUPPORTED;
1478 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1479 return MGMT_STATUS_REJECTED;
1481 return MGMT_STATUS_SUCCESS;
1484 static u8 mgmt_le_support(struct hci_dev *hdev)
1486 if (!lmp_le_capable(hdev))
1487 return MGMT_STATUS_NOT_SUPPORTED;
1488 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1489 return MGMT_STATUS_REJECTED;
1491 return MGMT_STATUS_SUCCESS;
1494 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1497 struct mgmt_pending_cmd *cmd = data;
1499 bt_dev_dbg(hdev, "err %d", err);
1501 /* Make sure cmd still outstanding. */
1502 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1508 u8 mgmt_err = mgmt_status(err);
1509 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1510 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1514 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1515 hdev->discov_timeout > 0) {
1516 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1517 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1520 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1521 new_settings(hdev, cmd->sk);
1524 mgmt_pending_remove(cmd);
1525 hci_dev_unlock(hdev);
1528 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1530 BT_DBG("%s", hdev->name);
1532 return hci_update_discoverable_sync(hdev);
1535 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1538 struct mgmt_cp_set_discoverable *cp = data;
1539 struct mgmt_pending_cmd *cmd;
1543 bt_dev_dbg(hdev, "sock %p", sk);
1545 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1546 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1548 MGMT_STATUS_REJECTED);
1550 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_INVALID_PARAMS);
1554 timeout = __le16_to_cpu(cp->timeout);
1556 /* Disabling discoverable requires that no timeout is set,
1557 * and enabling limited discoverable requires a timeout.
1559 if ((cp->val == 0x00 && timeout > 0) ||
1560 (cp->val == 0x02 && timeout == 0))
1561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 MGMT_STATUS_INVALID_PARAMS);
1566 if (!hdev_is_powered(hdev) && timeout > 0) {
1567 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_NOT_POWERED);
1572 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1573 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1574 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_REJECTED);
1585 if (hdev->advertising_paused) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 if (!hdev_is_powered(hdev)) {
1592 bool changed = false;
1594 /* Setting limited discoverable when powered off is
1595 * not a valid operation since it requires a timeout
1596 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1598 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1599 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1603 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1608 err = new_settings(hdev, sk);
1613 /* If the current mode is the same, then just update the timeout
1614 * value with the new value. And if only the timeout gets updated,
1615 * then no need for any HCI transactions.
1617 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1618 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1619 HCI_LIMITED_DISCOVERABLE)) {
1620 cancel_delayed_work(&hdev->discov_off);
1621 hdev->discov_timeout = timeout;
1623 if (cp->val && hdev->discov_timeout > 0) {
1624 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1625 queue_delayed_work(hdev->req_workqueue,
1626 &hdev->discov_off, to);
1629 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1633 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1639 /* Cancel any potential discoverable timeout that might be
1640 * still active and store new timeout value. The arming of
1641 * the timeout happens in the complete handler.
1643 cancel_delayed_work(&hdev->discov_off);
1644 hdev->discov_timeout = timeout;
1647 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1649 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1651 /* Limited discoverable mode */
1652 if (cp->val == 0x02)
1653 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1655 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1657 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1658 mgmt_set_discoverable_complete);
1661 mgmt_pending_remove(cmd);
1664 hci_dev_unlock(hdev);
1668 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1671 struct mgmt_pending_cmd *cmd = data;
1673 bt_dev_dbg(hdev, "err %d", err);
1675 /* Make sure cmd still outstanding. */
1676 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1682 u8 mgmt_err = mgmt_status(err);
1683 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1687 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1688 new_settings(hdev, cmd->sk);
1692 mgmt_pending_remove(cmd);
1694 hci_dev_unlock(hdev);
1697 static int set_connectable_update_settings(struct hci_dev *hdev,
1698 struct sock *sk, u8 val)
1700 bool changed = false;
1703 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1707 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1709 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1710 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1713 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1718 hci_update_scan(hdev);
1719 hci_update_passive_scan(hdev);
1720 return new_settings(hdev, sk);
1726 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1728 BT_DBG("%s", hdev->name);
1730 return hci_update_connectable_sync(hdev);
1733 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1736 struct mgmt_mode *cp = data;
1737 struct mgmt_pending_cmd *cmd;
1740 bt_dev_dbg(hdev, "sock %p", sk);
1742 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1743 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1745 MGMT_STATUS_REJECTED);
1747 if (cp->val != 0x00 && cp->val != 0x01)
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 MGMT_STATUS_INVALID_PARAMS);
1753 if (!hdev_is_powered(hdev)) {
1754 err = set_connectable_update_settings(hdev, sk, cp->val);
1758 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1759 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1760 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1772 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1774 if (hdev->discov_timeout > 0)
1775 cancel_delayed_work(&hdev->discov_off);
1777 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1778 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1779 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1782 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1783 mgmt_set_connectable_complete);
1786 mgmt_pending_remove(cmd);
1789 hci_dev_unlock(hdev);
1793 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1796 struct mgmt_mode *cp = data;
1800 bt_dev_dbg(hdev, "sock %p", sk);
1802 if (cp->val != 0x00 && cp->val != 0x01)
1803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1804 MGMT_STATUS_INVALID_PARAMS);
1809 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1811 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1813 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1818 /* In limited privacy mode the change of bondable mode
1819 * may affect the local advertising address.
1821 hci_update_discoverable(hdev);
1823 err = new_settings(hdev, sk);
1827 hci_dev_unlock(hdev);
1831 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1834 struct mgmt_mode *cp = data;
1835 struct mgmt_pending_cmd *cmd;
1839 bt_dev_dbg(hdev, "sock %p", sk);
1841 status = mgmt_bredr_support(hdev);
1843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1846 if (cp->val != 0x00 && cp->val != 0x01)
1847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1848 MGMT_STATUS_INVALID_PARAMS);
1852 if (!hdev_is_powered(hdev)) {
1853 bool changed = false;
1855 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1856 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1860 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1865 err = new_settings(hdev, sk);
1870 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1871 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1878 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1879 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1883 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1889 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1891 mgmt_pending_remove(cmd);
1896 hci_dev_unlock(hdev);
1900 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1902 struct cmd_lookup match = { NULL, hdev };
1903 struct mgmt_pending_cmd *cmd = data;
1904 struct mgmt_mode *cp = cmd->param;
1905 u8 enable = cp->val;
1908 /* Make sure cmd still outstanding. */
1909 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1913 u8 mgmt_err = mgmt_status(err);
1915 if (enable && hci_dev_test_and_clear_flag(hdev,
1917 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1918 new_settings(hdev, NULL);
1921 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1927 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1929 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1932 changed = hci_dev_test_and_clear_flag(hdev,
1935 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1938 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1941 new_settings(hdev, match.sk);
1946 hci_update_eir_sync(hdev);
1949 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1951 struct mgmt_pending_cmd *cmd = data;
1952 struct mgmt_mode *cp = cmd->param;
1953 bool changed = false;
1957 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1959 err = hci_write_ssp_mode_sync(hdev, cp->val);
1961 if (!err && changed)
1962 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1967 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1969 struct mgmt_mode *cp = data;
1970 struct mgmt_pending_cmd *cmd;
1974 bt_dev_dbg(hdev, "sock %p", sk);
1976 status = mgmt_bredr_support(hdev);
1978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1980 if (!lmp_ssp_capable(hdev))
1981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1982 MGMT_STATUS_NOT_SUPPORTED);
1984 if (cp->val != 0x00 && cp->val != 0x01)
1985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 MGMT_STATUS_INVALID_PARAMS);
1990 if (!hdev_is_powered(hdev)) {
1994 changed = !hci_dev_test_and_set_flag(hdev,
1997 changed = hci_dev_test_and_clear_flag(hdev,
2000 changed = hci_dev_test_and_clear_flag(hdev,
2003 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2006 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2011 err = new_settings(hdev, sk);
2016 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2022 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2023 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2027 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2031 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2035 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2036 MGMT_STATUS_FAILED);
2039 mgmt_pending_remove(cmd);
2043 hci_dev_unlock(hdev);
2047 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2049 struct mgmt_mode *cp = data;
2054 bt_dev_dbg(hdev, "sock %p", sk);
2056 if (!IS_ENABLED(CONFIG_BT_HS))
2057 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_NOT_SUPPORTED);
2060 status = mgmt_bredr_support(hdev);
2062 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2064 if (!lmp_ssp_capable(hdev))
2065 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 MGMT_STATUS_NOT_SUPPORTED);
2068 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2070 MGMT_STATUS_REJECTED);
2072 if (cp->val != 0x00 && cp->val != 0x01)
2073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 MGMT_STATUS_INVALID_PARAMS);
2078 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2079 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2085 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2087 if (hdev_is_powered(hdev)) {
2088 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 MGMT_STATUS_REJECTED);
2093 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2096 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2101 err = new_settings(hdev, sk);
2104 hci_dev_unlock(hdev);
2108 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2110 struct cmd_lookup match = { NULL, hdev };
2111 u8 status = mgmt_status(err);
2113 bt_dev_dbg(hdev, "err %d", err);
2116 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2121 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2123 new_settings(hdev, match.sk);
2129 static int set_le_sync(struct hci_dev *hdev, void *data)
2131 struct mgmt_pending_cmd *cmd = data;
2132 struct mgmt_mode *cp = cmd->param;
2137 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2139 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2140 hci_disable_advertising_sync(hdev);
2142 if (ext_adv_capable(hdev))
2143 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2145 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2148 err = hci_write_le_host_supported_sync(hdev, val, 0);
2150 /* Make sure the controller has a good default for
2151 * advertising data. Restrict the update to when LE
2152 * has actually been enabled. During power on, the
2153 * update in powered_update_hci will take care of it.
2155 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2156 if (ext_adv_capable(hdev)) {
2159 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2161 hci_update_scan_rsp_data_sync(hdev, 0x00);
2163 hci_update_adv_data_sync(hdev, 0x00);
2164 hci_update_scan_rsp_data_sync(hdev, 0x00);
2167 hci_update_passive_scan(hdev);
2173 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2175 struct mgmt_pending_cmd *cmd = data;
2176 u8 status = mgmt_status(err);
2177 struct sock *sk = cmd->sk;
2180 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2181 cmd_status_rsp, &status);
2185 mgmt_pending_remove(cmd);
2186 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2189 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2191 struct mgmt_pending_cmd *cmd = data;
2192 struct mgmt_cp_set_mesh *cp = cmd->param;
2193 size_t len = cmd->param_len;
2195 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2198 hci_dev_set_flag(hdev, HCI_MESH);
2200 hci_dev_clear_flag(hdev, HCI_MESH);
2204 /* If filters don't fit, forward all adv pkts */
2205 if (len <= sizeof(hdev->mesh_ad_types))
2206 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2208 hci_update_passive_scan_sync(hdev);
2212 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2214 struct mgmt_cp_set_mesh *cp = data;
2215 struct mgmt_pending_cmd *cmd;
2218 bt_dev_dbg(hdev, "sock %p", sk);
2220 if (!lmp_le_capable(hdev) ||
2221 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2222 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2223 MGMT_STATUS_NOT_SUPPORTED);
2225 if (cp->enable != 0x00 && cp->enable != 0x01)
2226 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2227 MGMT_STATUS_INVALID_PARAMS);
2231 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2235 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2239 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2240 MGMT_STATUS_FAILED);
2243 mgmt_pending_remove(cmd);
2246 hci_dev_unlock(hdev);
2250 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2252 struct mgmt_mesh_tx *mesh_tx = data;
2253 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2254 unsigned long mesh_send_interval;
2255 u8 mgmt_err = mgmt_status(err);
2257 /* Report any errors here, but don't report completion */
2260 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2261 /* Send Complete Error Code for handle */
2262 mesh_send_complete(hdev, mesh_tx, false);
2266 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2267 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2268 mesh_send_interval);
2271 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2273 struct mgmt_mesh_tx *mesh_tx = data;
2274 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2275 struct adv_info *adv, *next_instance;
2276 u8 instance = hdev->le_num_of_adv_sets + 1;
2277 u16 timeout, duration;
2280 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2281 return MGMT_STATUS_BUSY;
2284 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2285 adv = hci_add_adv_instance(hdev, instance, 0,
2286 send->adv_data_len, send->adv_data,
2289 HCI_ADV_TX_POWER_NO_PREFERENCE,
2290 hdev->le_adv_min_interval,
2291 hdev->le_adv_max_interval,
2295 mesh_tx->instance = instance;
2299 if (hdev->cur_adv_instance == instance) {
2300 /* If the currently advertised instance is being changed then
2301 * cancel the current advertising and schedule the next
2302 * instance. If there is only one instance then the overridden
2303 * advertising data will be visible right away.
2305 cancel_adv_timeout(hdev);
2307 next_instance = hci_get_next_instance(hdev, instance);
2309 instance = next_instance->instance;
2312 } else if (hdev->adv_instance_timeout) {
2313 /* Immediately advertise the new instance if no other, or
2314 * let it go naturally from queue if ADV is already happening
2320 return hci_schedule_adv_instance_sync(hdev, instance, true);
2325 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2327 struct mgmt_rp_mesh_read_features *rp = data;
2329 if (rp->used_handles >= rp->max_handles)
2332 rp->handles[rp->used_handles++] = mesh_tx->handle;
2335 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2336 void *data, u16 len)
2338 struct mgmt_rp_mesh_read_features rp;
2340 if (!lmp_le_capable(hdev) ||
2341 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2343 MGMT_STATUS_NOT_SUPPORTED);
2345 memset(&rp, 0, sizeof(rp));
2346 rp.index = cpu_to_le16(hdev->id);
2347 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2348 rp.max_handles = MESH_HANDLES_MAX;
2353 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2355 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2356 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2358 hci_dev_unlock(hdev);
2362 static int send_cancel(struct hci_dev *hdev, void *data)
2364 struct mgmt_pending_cmd *cmd = data;
2365 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2366 struct mgmt_mesh_tx *mesh_tx;
2368 if (!cancel->handle) {
2370 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2373 mesh_send_complete(hdev, mesh_tx, false);
2376 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2378 if (mesh_tx && mesh_tx->sk == cmd->sk)
2379 mesh_send_complete(hdev, mesh_tx, false);
2382 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2384 mgmt_pending_free(cmd);
2389 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2390 void *data, u16 len)
2392 struct mgmt_pending_cmd *cmd;
2395 if (!lmp_le_capable(hdev) ||
2396 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2397 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2398 MGMT_STATUS_NOT_SUPPORTED);
2400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 MGMT_STATUS_REJECTED);
2405 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2409 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2412 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2413 MGMT_STATUS_FAILED);
2416 mgmt_pending_free(cmd);
2419 hci_dev_unlock(hdev);
2423 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2425 struct mgmt_mesh_tx *mesh_tx;
2426 struct mgmt_cp_mesh_send *send = data;
2427 struct mgmt_rp_mesh_read_features rp;
2431 if (!lmp_le_capable(hdev) ||
2432 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2433 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2434 MGMT_STATUS_NOT_SUPPORTED);
2435 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2436 len <= MGMT_MESH_SEND_SIZE ||
2437 len > (MGMT_MESH_SEND_SIZE + 31))
2438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2439 MGMT_STATUS_REJECTED);
2443 memset(&rp, 0, sizeof(rp));
2444 rp.max_handles = MESH_HANDLES_MAX;
2446 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2448 if (rp.max_handles <= rp.used_handles) {
2449 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2454 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2455 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2460 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2461 mesh_send_start_complete);
2464 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2465 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2466 MGMT_STATUS_FAILED);
2470 mgmt_mesh_remove(mesh_tx);
2473 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2475 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2476 &mesh_tx->handle, 1);
2480 hci_dev_unlock(hdev);
2484 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2486 struct mgmt_mode *cp = data;
2487 struct mgmt_pending_cmd *cmd;
2491 bt_dev_dbg(hdev, "sock %p", sk);
2493 if (!lmp_le_capable(hdev))
2494 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 MGMT_STATUS_NOT_SUPPORTED);
2497 if (cp->val != 0x00 && cp->val != 0x01)
2498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 MGMT_STATUS_INVALID_PARAMS);
2501 /* Bluetooth single mode LE only controllers or dual-mode
2502 * controllers configured as LE only devices, do not allow
2503 * switching LE off. These have either LE enabled explicitly
2504 * or BR/EDR has been previously switched off.
2506 * When trying to enable an already enabled LE, then gracefully
2507 * send a positive response. Trying to disable it however will
2508 * result into rejection.
2510 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2511 if (cp->val == 0x01)
2512 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2515 MGMT_STATUS_REJECTED);
2521 enabled = lmp_host_le_capable(hdev);
2523 if (!hdev_is_powered(hdev) || val == enabled) {
2524 bool changed = false;
2526 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2527 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2531 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2532 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2536 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2541 err = new_settings(hdev, sk);
2546 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2547 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2548 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2553 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2557 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2561 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2562 MGMT_STATUS_FAILED);
2565 mgmt_pending_remove(cmd);
2569 hci_dev_unlock(hdev);
2573 /* This is a helper function to test for pending mgmt commands that can
2574 * cause CoD or EIR HCI commands. We can only allow one such pending
2575 * mgmt command at a time since otherwise we cannot easily track what
2576 * the current values are, will be, and based on that calculate if a new
2577 * HCI command needs to be sent and if yes with what value.
2579 static bool pending_eir_or_class(struct hci_dev *hdev)
2581 struct mgmt_pending_cmd *cmd;
2583 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2584 switch (cmd->opcode) {
2585 case MGMT_OP_ADD_UUID:
2586 case MGMT_OP_REMOVE_UUID:
2587 case MGMT_OP_SET_DEV_CLASS:
2588 case MGMT_OP_SET_POWERED:
2596 static const u8 bluetooth_base_uuid[] = {
2597 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2598 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2601 static u8 get_uuid_size(const u8 *uuid)
2605 if (memcmp(uuid, bluetooth_base_uuid, 12))
2608 val = get_unaligned_le32(&uuid[12]);
2615 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2617 struct mgmt_pending_cmd *cmd = data;
2619 bt_dev_dbg(hdev, "err %d", err);
2621 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2622 mgmt_status(err), hdev->dev_class, 3);
2624 mgmt_pending_free(cmd);
2627 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2631 err = hci_update_class_sync(hdev);
2635 return hci_update_eir_sync(hdev);
2638 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2640 struct mgmt_cp_add_uuid *cp = data;
2641 struct mgmt_pending_cmd *cmd;
2642 struct bt_uuid *uuid;
2645 bt_dev_dbg(hdev, "sock %p", sk);
2649 if (pending_eir_or_class(hdev)) {
2650 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2655 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2661 memcpy(uuid->uuid, cp->uuid, 16);
2662 uuid->svc_hint = cp->svc_hint;
2663 uuid->size = get_uuid_size(cp->uuid);
2665 list_add_tail(&uuid->list, &hdev->uuids);
2667 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2673 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2675 mgmt_pending_free(cmd);
2680 hci_dev_unlock(hdev);
2684 static bool enable_service_cache(struct hci_dev *hdev)
2686 if (!hdev_is_powered(hdev))
2689 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2690 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2698 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2702 err = hci_update_class_sync(hdev);
2706 return hci_update_eir_sync(hdev);
2709 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2712 struct mgmt_cp_remove_uuid *cp = data;
2713 struct mgmt_pending_cmd *cmd;
2714 struct bt_uuid *match, *tmp;
2715 static const u8 bt_uuid_any[] = {
2716 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2720 bt_dev_dbg(hdev, "sock %p", sk);
2724 if (pending_eir_or_class(hdev)) {
2725 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2730 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2731 hci_uuids_clear(hdev);
2733 if (enable_service_cache(hdev)) {
2734 err = mgmt_cmd_complete(sk, hdev->id,
2735 MGMT_OP_REMOVE_UUID,
2736 0, hdev->dev_class, 3);
2745 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2746 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2749 list_del(&match->list);
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2756 MGMT_STATUS_INVALID_PARAMS);
2761 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2767 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2768 mgmt_class_complete);
2770 mgmt_pending_free(cmd);
2773 hci_dev_unlock(hdev);
2777 static int set_class_sync(struct hci_dev *hdev, void *data)
2781 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2782 cancel_delayed_work_sync(&hdev->service_cache);
2783 err = hci_update_eir_sync(hdev);
2789 return hci_update_class_sync(hdev);
2792 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2795 struct mgmt_cp_set_dev_class *cp = data;
2796 struct mgmt_pending_cmd *cmd;
2799 bt_dev_dbg(hdev, "sock %p", sk);
2801 if (!lmp_bredr_capable(hdev))
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2803 MGMT_STATUS_NOT_SUPPORTED);
2807 if (pending_eir_or_class(hdev)) {
2808 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2813 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2814 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2815 MGMT_STATUS_INVALID_PARAMS);
2819 hdev->major_class = cp->major;
2820 hdev->minor_class = cp->minor;
2822 if (!hdev_is_powered(hdev)) {
2823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2824 hdev->dev_class, 3);
2828 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2834 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2835 mgmt_class_complete);
2837 mgmt_pending_free(cmd);
2840 hci_dev_unlock(hdev);
2844 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2847 struct mgmt_cp_load_link_keys *cp = data;
2848 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2849 sizeof(struct mgmt_link_key_info));
2850 u16 key_count, expected_len;
2854 bt_dev_dbg(hdev, "sock %p", sk);
2856 if (!lmp_bredr_capable(hdev))
2857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2858 MGMT_STATUS_NOT_SUPPORTED);
2860 key_count = __le16_to_cpu(cp->key_count);
2861 if (key_count > max_key_count) {
2862 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2864 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2865 MGMT_STATUS_INVALID_PARAMS);
2868 expected_len = struct_size(cp, keys, key_count);
2869 if (expected_len != len) {
2870 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2872 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2873 MGMT_STATUS_INVALID_PARAMS);
2876 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2878 MGMT_STATUS_INVALID_PARAMS);
2880 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2883 for (i = 0; i < key_count; i++) {
2884 struct mgmt_link_key_info *key = &cp->keys[i];
2886 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2887 return mgmt_cmd_status(sk, hdev->id,
2888 MGMT_OP_LOAD_LINK_KEYS,
2889 MGMT_STATUS_INVALID_PARAMS);
2894 hci_link_keys_clear(hdev);
2897 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2899 changed = hci_dev_test_and_clear_flag(hdev,
2900 HCI_KEEP_DEBUG_KEYS);
2903 new_settings(hdev, NULL);
2905 for (i = 0; i < key_count; i++) {
2906 struct mgmt_link_key_info *key = &cp->keys[i];
2908 if (hci_is_blocked_key(hdev,
2909 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2911 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2916 /* Always ignore debug keys and require a new pairing if
2917 * the user wants to use them.
2919 if (key->type == HCI_LK_DEBUG_COMBINATION)
2922 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2923 key->type, key->pin_len, NULL);
2926 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2928 hci_dev_unlock(hdev);
2933 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2934 u8 addr_type, struct sock *skip_sk)
2936 struct mgmt_ev_device_unpaired ev;
2938 bacpy(&ev.addr.bdaddr, bdaddr);
2939 ev.addr.type = addr_type;
2941 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2945 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2947 struct mgmt_pending_cmd *cmd = data;
2948 struct mgmt_cp_unpair_device *cp = cmd->param;
2951 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2953 cmd->cmd_complete(cmd, err);
2954 mgmt_pending_free(cmd);
2957 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2959 struct mgmt_pending_cmd *cmd = data;
2960 struct mgmt_cp_unpair_device *cp = cmd->param;
2961 struct hci_conn *conn;
2963 if (cp->addr.type == BDADDR_BREDR)
2964 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2967 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2968 le_addr_type(cp->addr.type));
2973 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2976 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2979 struct mgmt_cp_unpair_device *cp = data;
2980 struct mgmt_rp_unpair_device rp;
2981 struct hci_conn_params *params;
2982 struct mgmt_pending_cmd *cmd;
2983 struct hci_conn *conn;
2987 memset(&rp, 0, sizeof(rp));
2988 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2989 rp.addr.type = cp->addr.type;
2991 if (!bdaddr_type_is_valid(cp->addr.type))
2992 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2993 MGMT_STATUS_INVALID_PARAMS,
2996 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2997 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2998 MGMT_STATUS_INVALID_PARAMS,
3003 if (!hdev_is_powered(hdev)) {
3004 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3005 MGMT_STATUS_NOT_POWERED, &rp,
3010 if (cp->addr.type == BDADDR_BREDR) {
3011 /* If disconnection is requested, then look up the
3012 * connection. If the remote device is connected, it
3013 * will be later used to terminate the link.
3015 * Setting it to NULL explicitly will cause no
3016 * termination of the link.
3019 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3024 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3026 err = mgmt_cmd_complete(sk, hdev->id,
3027 MGMT_OP_UNPAIR_DEVICE,
3028 MGMT_STATUS_NOT_PAIRED, &rp,
3036 /* LE address type */
3037 addr_type = le_addr_type(cp->addr.type);
3039 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3040 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3043 MGMT_STATUS_NOT_PAIRED, &rp,
3048 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3050 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3055 /* Defer clearing up the connection parameters until closing to
3056 * give a chance of keeping them if a repairing happens.
3058 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3060 /* Disable auto-connection parameters if present */
3061 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3063 if (params->explicit_connect)
3064 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3066 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3069 /* If disconnection is not requested, then clear the connection
3070 * variable so that the link is not terminated.
3072 if (!cp->disconnect)
3076 /* If the connection variable is set, then termination of the
3077 * link is requested.
3080 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3082 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3086 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3093 cmd->cmd_complete = addr_cmd_complete;
3095 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3096 unpair_device_complete);
3098 mgmt_pending_free(cmd);
3101 hci_dev_unlock(hdev);
3105 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3108 struct mgmt_cp_disconnect *cp = data;
3109 struct mgmt_rp_disconnect rp;
3110 struct mgmt_pending_cmd *cmd;
3111 struct hci_conn *conn;
3114 bt_dev_dbg(hdev, "sock %p", sk);
3116 memset(&rp, 0, sizeof(rp));
3117 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3118 rp.addr.type = cp->addr.type;
3120 if (!bdaddr_type_is_valid(cp->addr.type))
3121 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3122 MGMT_STATUS_INVALID_PARAMS,
3127 if (!test_bit(HCI_UP, &hdev->flags)) {
3128 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3129 MGMT_STATUS_NOT_POWERED, &rp,
3134 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3136 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3140 if (cp->addr.type == BDADDR_BREDR)
3141 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3144 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3145 le_addr_type(cp->addr.type));
3147 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3148 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3149 MGMT_STATUS_NOT_CONNECTED, &rp,
3154 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3160 cmd->cmd_complete = generic_cmd_complete;
3162 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3164 mgmt_pending_remove(cmd);
3167 hci_dev_unlock(hdev);
3171 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3173 switch (link_type) {
3175 switch (addr_type) {
3176 case ADDR_LE_DEV_PUBLIC:
3177 return BDADDR_LE_PUBLIC;
3180 /* Fallback to LE Random address type */
3181 return BDADDR_LE_RANDOM;
3185 /* Fallback to BR/EDR type */
3186 return BDADDR_BREDR;
3190 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3193 struct mgmt_rp_get_connections *rp;
3198 bt_dev_dbg(hdev, "sock %p", sk);
3202 if (!hdev_is_powered(hdev)) {
3203 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3204 MGMT_STATUS_NOT_POWERED);
3209 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3210 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3214 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3221 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3222 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3224 bacpy(&rp->addr[i].bdaddr, &c->dst);
3225 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3226 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3231 rp->conn_count = cpu_to_le16(i);
3233 /* Recalculate length in case of filtered SCO connections, etc */
3234 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3235 struct_size(rp, addr, i));
3240 hci_dev_unlock(hdev);
3244 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3245 struct mgmt_cp_pin_code_neg_reply *cp)
3247 struct mgmt_pending_cmd *cmd;
3250 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3255 cmd->cmd_complete = addr_cmd_complete;
3257 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3258 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3260 mgmt_pending_remove(cmd);
3265 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3268 struct hci_conn *conn;
3269 struct mgmt_cp_pin_code_reply *cp = data;
3270 struct hci_cp_pin_code_reply reply;
3271 struct mgmt_pending_cmd *cmd;
3274 bt_dev_dbg(hdev, "sock %p", sk);
3278 if (!hdev_is_powered(hdev)) {
3279 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3280 MGMT_STATUS_NOT_POWERED);
3284 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3287 MGMT_STATUS_NOT_CONNECTED);
3291 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3292 struct mgmt_cp_pin_code_neg_reply ncp;
3294 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3296 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3298 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3300 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3301 MGMT_STATUS_INVALID_PARAMS);
3306 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3312 cmd->cmd_complete = addr_cmd_complete;
3314 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3315 reply.pin_len = cp->pin_len;
3316 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3318 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3320 mgmt_pending_remove(cmd);
3323 hci_dev_unlock(hdev);
3327 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3330 struct mgmt_cp_set_io_capability *cp = data;
3332 bt_dev_dbg(hdev, "sock %p", sk);
3334 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3336 MGMT_STATUS_INVALID_PARAMS);
3340 hdev->io_capability = cp->io_capability;
3342 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3344 hci_dev_unlock(hdev);
3346 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3350 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3352 struct hci_dev *hdev = conn->hdev;
3353 struct mgmt_pending_cmd *cmd;
3355 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3356 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3359 if (cmd->user_data != conn)
3368 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3370 struct mgmt_rp_pair_device rp;
3371 struct hci_conn *conn = cmd->user_data;
3374 bacpy(&rp.addr.bdaddr, &conn->dst);
3375 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3377 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3378 status, &rp, sizeof(rp));
3380 /* So we don't get further callbacks for this connection */
3381 conn->connect_cfm_cb = NULL;
3382 conn->security_cfm_cb = NULL;
3383 conn->disconn_cfm_cb = NULL;
3385 hci_conn_drop(conn);
3387 /* The device is paired so there is no need to remove
3388 * its connection parameters anymore.
3390 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3397 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3399 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3400 struct mgmt_pending_cmd *cmd;
3402 cmd = find_pairing(conn);
3404 cmd->cmd_complete(cmd, status);
3405 mgmt_pending_remove(cmd);
3409 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3411 struct mgmt_pending_cmd *cmd;
3413 BT_DBG("status %u", status);
3415 cmd = find_pairing(conn);
3417 BT_DBG("Unable to find a pending command");
3421 cmd->cmd_complete(cmd, mgmt_status(status));
3422 mgmt_pending_remove(cmd);
3425 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3427 struct mgmt_pending_cmd *cmd;
3429 BT_DBG("status %u", status);
3434 cmd = find_pairing(conn);
3436 BT_DBG("Unable to find a pending command");
3440 cmd->cmd_complete(cmd, mgmt_status(status));
3441 mgmt_pending_remove(cmd);
3444 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3447 struct mgmt_cp_pair_device *cp = data;
3448 struct mgmt_rp_pair_device rp;
3449 struct mgmt_pending_cmd *cmd;
3450 u8 sec_level, auth_type;
3451 struct hci_conn *conn;
3454 bt_dev_dbg(hdev, "sock %p", sk);
3456 memset(&rp, 0, sizeof(rp));
3457 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3458 rp.addr.type = cp->addr.type;
3460 if (!bdaddr_type_is_valid(cp->addr.type))
3461 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3462 MGMT_STATUS_INVALID_PARAMS,
3465 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3466 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3467 MGMT_STATUS_INVALID_PARAMS,
3472 if (!hdev_is_powered(hdev)) {
3473 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3474 MGMT_STATUS_NOT_POWERED, &rp,
3479 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3480 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 MGMT_STATUS_ALREADY_PAIRED, &rp,
3486 sec_level = BT_SECURITY_MEDIUM;
3487 auth_type = HCI_AT_DEDICATED_BONDING;
3489 if (cp->addr.type == BDADDR_BREDR) {
3490 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3491 auth_type, CONN_REASON_PAIR_DEVICE);
3493 u8 addr_type = le_addr_type(cp->addr.type);
3494 struct hci_conn_params *p;
3496 /* When pairing a new device, it is expected to remember
3497 * this device for future connections. Adding the connection
3498 * parameter information ahead of time allows tracking
3499 * of the peripheral preferred values and will speed up any
3500 * further connection establishment.
3502 * If connection parameters already exist, then they
3503 * will be kept and this function does nothing.
3505 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3507 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3508 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3510 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3511 sec_level, HCI_LE_CONN_TIMEOUT,
3512 CONN_REASON_PAIR_DEVICE);
3518 if (PTR_ERR(conn) == -EBUSY)
3519 status = MGMT_STATUS_BUSY;
3520 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3521 status = MGMT_STATUS_NOT_SUPPORTED;
3522 else if (PTR_ERR(conn) == -ECONNREFUSED)
3523 status = MGMT_STATUS_REJECTED;
3525 status = MGMT_STATUS_CONNECT_FAILED;
3527 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3528 status, &rp, sizeof(rp));
3532 if (conn->connect_cfm_cb) {
3533 hci_conn_drop(conn);
3534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3535 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3539 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3542 hci_conn_drop(conn);
3546 cmd->cmd_complete = pairing_complete;
3548 /* For LE, just connecting isn't a proof that the pairing finished */
3549 if (cp->addr.type == BDADDR_BREDR) {
3550 conn->connect_cfm_cb = pairing_complete_cb;
3551 conn->security_cfm_cb = pairing_complete_cb;
3552 conn->disconn_cfm_cb = pairing_complete_cb;
3554 conn->connect_cfm_cb = le_pairing_complete_cb;
3555 conn->security_cfm_cb = le_pairing_complete_cb;
3556 conn->disconn_cfm_cb = le_pairing_complete_cb;
3559 conn->io_capability = cp->io_cap;
3560 cmd->user_data = hci_conn_get(conn);
3562 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3563 hci_conn_security(conn, sec_level, auth_type, true)) {
3564 cmd->cmd_complete(cmd, 0);
3565 mgmt_pending_remove(cmd);
3571 hci_dev_unlock(hdev);
3575 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3577 struct hci_conn *conn;
3578 u16 handle = PTR_ERR(data);
3580 conn = hci_conn_hash_lookup_handle(hdev, handle);
3584 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3587 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3590 struct mgmt_addr_info *addr = data;
3591 struct mgmt_pending_cmd *cmd;
3592 struct hci_conn *conn;
3595 bt_dev_dbg(hdev, "sock %p", sk);
3599 if (!hdev_is_powered(hdev)) {
3600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3601 MGMT_STATUS_NOT_POWERED);
3605 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3607 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3608 MGMT_STATUS_INVALID_PARAMS);
3612 conn = cmd->user_data;
3614 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3615 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3616 MGMT_STATUS_INVALID_PARAMS);
3620 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3621 mgmt_pending_remove(cmd);
3623 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3624 addr, sizeof(*addr));
3626 /* Since user doesn't want to proceed with the connection, abort any
3627 * ongoing pairing and then terminate the link if it was created
3628 * because of the pair device action.
3630 if (addr->type == BDADDR_BREDR)
3631 hci_remove_link_key(hdev, &addr->bdaddr);
3633 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3634 le_addr_type(addr->type));
3636 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3637 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3641 hci_dev_unlock(hdev);
3645 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3646 struct mgmt_addr_info *addr, u16 mgmt_op,
3647 u16 hci_op, __le32 passkey)
3649 struct mgmt_pending_cmd *cmd;
3650 struct hci_conn *conn;
3655 if (!hdev_is_powered(hdev)) {
3656 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3657 MGMT_STATUS_NOT_POWERED, addr,
3662 if (addr->type == BDADDR_BREDR)
3663 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3665 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3666 le_addr_type(addr->type));
3669 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3670 MGMT_STATUS_NOT_CONNECTED, addr,
3675 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3676 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3678 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3679 MGMT_STATUS_SUCCESS, addr,
3682 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 MGMT_STATUS_FAILED, addr,
3689 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3695 cmd->cmd_complete = addr_cmd_complete;
3697 /* Continue with pairing via HCI */
3698 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3699 struct hci_cp_user_passkey_reply cp;
3701 bacpy(&cp.bdaddr, &addr->bdaddr);
3702 cp.passkey = passkey;
3703 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3705 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3709 mgmt_pending_remove(cmd);
3712 hci_dev_unlock(hdev);
3716 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3717 void *data, u16 len)
3719 struct mgmt_cp_pin_code_neg_reply *cp = data;
3721 bt_dev_dbg(hdev, "sock %p", sk);
3723 return user_pairing_resp(sk, hdev, &cp->addr,
3724 MGMT_OP_PIN_CODE_NEG_REPLY,
3725 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3728 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3731 struct mgmt_cp_user_confirm_reply *cp = data;
3733 bt_dev_dbg(hdev, "sock %p", sk);
3735 if (len != sizeof(*cp))
3736 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3737 MGMT_STATUS_INVALID_PARAMS);
3739 return user_pairing_resp(sk, hdev, &cp->addr,
3740 MGMT_OP_USER_CONFIRM_REPLY,
3741 HCI_OP_USER_CONFIRM_REPLY, 0);
3744 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3745 void *data, u16 len)
3747 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3749 bt_dev_dbg(hdev, "sock %p", sk);
3751 return user_pairing_resp(sk, hdev, &cp->addr,
3752 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3753 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3756 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3759 struct mgmt_cp_user_passkey_reply *cp = data;
3761 bt_dev_dbg(hdev, "sock %p", sk);
3763 return user_pairing_resp(sk, hdev, &cp->addr,
3764 MGMT_OP_USER_PASSKEY_REPLY,
3765 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3768 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3769 void *data, u16 len)
3771 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3773 bt_dev_dbg(hdev, "sock %p", sk);
3775 return user_pairing_resp(sk, hdev, &cp->addr,
3776 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3777 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3780 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3782 struct adv_info *adv_instance;
3784 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3788 /* stop if current instance doesn't need to be changed */
3789 if (!(adv_instance->flags & flags))
3792 cancel_adv_timeout(hdev);
3794 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3798 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3803 static int name_changed_sync(struct hci_dev *hdev, void *data)
3805 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3808 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3810 struct mgmt_pending_cmd *cmd = data;
3811 struct mgmt_cp_set_local_name *cp = cmd->param;
3812 u8 status = mgmt_status(err);
3814 bt_dev_dbg(hdev, "err %d", err);
3816 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3820 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3823 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3826 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3827 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3830 mgmt_pending_remove(cmd);
3833 static int set_name_sync(struct hci_dev *hdev, void *data)
3835 if (lmp_bredr_capable(hdev)) {
3836 hci_update_name_sync(hdev);
3837 hci_update_eir_sync(hdev);
3840 /* The name is stored in the scan response data and so
3841 * no need to update the advertising data here.
3843 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3844 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3849 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3852 struct mgmt_cp_set_local_name *cp = data;
3853 struct mgmt_pending_cmd *cmd;
3856 bt_dev_dbg(hdev, "sock %p", sk);
3860 /* If the old values are the same as the new ones just return a
3861 * direct command complete event.
3863 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3864 !memcmp(hdev->short_name, cp->short_name,
3865 sizeof(hdev->short_name))) {
3866 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3871 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3873 if (!hdev_is_powered(hdev)) {
3874 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3876 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3881 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3882 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3883 ext_info_changed(hdev, sk);
3888 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3892 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3896 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3897 MGMT_STATUS_FAILED);
3900 mgmt_pending_remove(cmd);
3905 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3908 hci_dev_unlock(hdev);
3912 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3914 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3917 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3920 struct mgmt_cp_set_appearance *cp = data;
3924 bt_dev_dbg(hdev, "sock %p", sk);
3926 if (!lmp_le_capable(hdev))
3927 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3928 MGMT_STATUS_NOT_SUPPORTED);
3930 appearance = le16_to_cpu(cp->appearance);
3934 if (hdev->appearance != appearance) {
3935 hdev->appearance = appearance;
3937 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3938 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3941 ext_info_changed(hdev, sk);
3944 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3947 hci_dev_unlock(hdev);
3952 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3953 void *data, u16 len)
3955 struct mgmt_rp_get_phy_configuration rp;
3957 bt_dev_dbg(hdev, "sock %p", sk);
3961 memset(&rp, 0, sizeof(rp));
3963 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3964 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3965 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3967 hci_dev_unlock(hdev);
3969 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3973 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3975 struct mgmt_ev_phy_configuration_changed ev;
3977 memset(&ev, 0, sizeof(ev));
3979 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3981 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3985 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3987 struct mgmt_pending_cmd *cmd = data;
3988 struct sk_buff *skb = cmd->skb;
3989 u8 status = mgmt_status(err);
3991 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3996 status = MGMT_STATUS_FAILED;
3997 else if (IS_ERR(skb))
3998 status = mgmt_status(PTR_ERR(skb));
4000 status = mgmt_status(skb->data[0]);
4003 bt_dev_dbg(hdev, "status %d", status);
4006 mgmt_cmd_status(cmd->sk, hdev->id,
4007 MGMT_OP_SET_PHY_CONFIGURATION, status);
4009 mgmt_cmd_complete(cmd->sk, hdev->id,
4010 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4013 mgmt_phy_configuration_changed(hdev, cmd->sk);
4016 if (skb && !IS_ERR(skb))
4019 mgmt_pending_remove(cmd);
4022 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4024 struct mgmt_pending_cmd *cmd = data;
4025 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4026 struct hci_cp_le_set_default_phy cp_phy;
4027 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4029 memset(&cp_phy, 0, sizeof(cp_phy));
4031 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4032 cp_phy.all_phys |= 0x01;
4034 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4035 cp_phy.all_phys |= 0x02;
4037 if (selected_phys & MGMT_PHY_LE_1M_TX)
4038 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4040 if (selected_phys & MGMT_PHY_LE_2M_TX)
4041 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4043 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4044 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4046 if (selected_phys & MGMT_PHY_LE_1M_RX)
4047 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4049 if (selected_phys & MGMT_PHY_LE_2M_RX)
4050 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4052 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4053 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4055 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4056 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4061 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4062 void *data, u16 len)
4064 struct mgmt_cp_set_phy_configuration *cp = data;
4065 struct mgmt_pending_cmd *cmd;
4066 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4067 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4068 bool changed = false;
4071 bt_dev_dbg(hdev, "sock %p", sk);
4073 configurable_phys = get_configurable_phys(hdev);
4074 supported_phys = get_supported_phys(hdev);
4075 selected_phys = __le32_to_cpu(cp->selected_phys);
4077 if (selected_phys & ~supported_phys)
4078 return mgmt_cmd_status(sk, hdev->id,
4079 MGMT_OP_SET_PHY_CONFIGURATION,
4080 MGMT_STATUS_INVALID_PARAMS);
4082 unconfigure_phys = supported_phys & ~configurable_phys;
4084 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4085 return mgmt_cmd_status(sk, hdev->id,
4086 MGMT_OP_SET_PHY_CONFIGURATION,
4087 MGMT_STATUS_INVALID_PARAMS);
4089 if (selected_phys == get_selected_phys(hdev))
4090 return mgmt_cmd_complete(sk, hdev->id,
4091 MGMT_OP_SET_PHY_CONFIGURATION,
4096 if (!hdev_is_powered(hdev)) {
4097 err = mgmt_cmd_status(sk, hdev->id,
4098 MGMT_OP_SET_PHY_CONFIGURATION,
4099 MGMT_STATUS_REJECTED);
4103 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4104 err = mgmt_cmd_status(sk, hdev->id,
4105 MGMT_OP_SET_PHY_CONFIGURATION,
4110 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4111 pkt_type |= (HCI_DH3 | HCI_DM3);
4113 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4115 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4116 pkt_type |= (HCI_DH5 | HCI_DM5);
4118 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4120 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4121 pkt_type &= ~HCI_2DH1;
4123 pkt_type |= HCI_2DH1;
4125 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4126 pkt_type &= ~HCI_2DH3;
4128 pkt_type |= HCI_2DH3;
4130 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4131 pkt_type &= ~HCI_2DH5;
4133 pkt_type |= HCI_2DH5;
4135 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4136 pkt_type &= ~HCI_3DH1;
4138 pkt_type |= HCI_3DH1;
4140 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4141 pkt_type &= ~HCI_3DH3;
4143 pkt_type |= HCI_3DH3;
4145 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4146 pkt_type &= ~HCI_3DH5;
4148 pkt_type |= HCI_3DH5;
4150 if (pkt_type != hdev->pkt_type) {
4151 hdev->pkt_type = pkt_type;
4155 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4156 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4158 mgmt_phy_configuration_changed(hdev, sk);
4160 err = mgmt_cmd_complete(sk, hdev->id,
4161 MGMT_OP_SET_PHY_CONFIGURATION,
4167 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4172 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4173 set_default_phy_complete);
4176 err = mgmt_cmd_status(sk, hdev->id,
4177 MGMT_OP_SET_PHY_CONFIGURATION,
4178 MGMT_STATUS_FAILED);
4181 mgmt_pending_remove(cmd);
4185 hci_dev_unlock(hdev);
4190 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4193 int err = MGMT_STATUS_SUCCESS;
4194 struct mgmt_cp_set_blocked_keys *keys = data;
4195 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4196 sizeof(struct mgmt_blocked_key_info));
4197 u16 key_count, expected_len;
4200 bt_dev_dbg(hdev, "sock %p", sk);
4202 key_count = __le16_to_cpu(keys->key_count);
4203 if (key_count > max_key_count) {
4204 bt_dev_err(hdev, "too big key_count value %u", key_count);
4205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4206 MGMT_STATUS_INVALID_PARAMS);
4209 expected_len = struct_size(keys, keys, key_count);
4210 if (expected_len != len) {
4211 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4213 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4214 MGMT_STATUS_INVALID_PARAMS);
4219 hci_blocked_keys_clear(hdev);
4221 for (i = 0; i < key_count; ++i) {
4222 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4225 err = MGMT_STATUS_NO_RESOURCES;
4229 b->type = keys->keys[i].type;
4230 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4231 list_add_rcu(&b->list, &hdev->blocked_keys);
4233 hci_dev_unlock(hdev);
4235 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4239 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4240 void *data, u16 len)
4242 struct mgmt_mode *cp = data;
4244 bool changed = false;
4246 bt_dev_dbg(hdev, "sock %p", sk);
4248 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4249 return mgmt_cmd_status(sk, hdev->id,
4250 MGMT_OP_SET_WIDEBAND_SPEECH,
4251 MGMT_STATUS_NOT_SUPPORTED);
4253 if (cp->val != 0x00 && cp->val != 0x01)
4254 return mgmt_cmd_status(sk, hdev->id,
4255 MGMT_OP_SET_WIDEBAND_SPEECH,
4256 MGMT_STATUS_INVALID_PARAMS);
4260 if (hdev_is_powered(hdev) &&
4261 !!cp->val != hci_dev_test_flag(hdev,
4262 HCI_WIDEBAND_SPEECH_ENABLED)) {
4263 err = mgmt_cmd_status(sk, hdev->id,
4264 MGMT_OP_SET_WIDEBAND_SPEECH,
4265 MGMT_STATUS_REJECTED);
4270 changed = !hci_dev_test_and_set_flag(hdev,
4271 HCI_WIDEBAND_SPEECH_ENABLED);
4273 changed = hci_dev_test_and_clear_flag(hdev,
4274 HCI_WIDEBAND_SPEECH_ENABLED);
4276 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4281 err = new_settings(hdev, sk);
4284 hci_dev_unlock(hdev);
4288 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4289 void *data, u16 data_len)
4292 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4295 u8 tx_power_range[2];
4297 bt_dev_dbg(hdev, "sock %p", sk);
4299 memset(&buf, 0, sizeof(buf));
4303 /* When the Read Simple Pairing Options command is supported, then
4304 * the remote public key validation is supported.
4306 * Alternatively, when Microsoft extensions are available, they can
4307 * indicate support for public key validation as well.
4309 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4310 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4312 flags |= 0x02; /* Remote public key validation (LE) */
4314 /* When the Read Encryption Key Size command is supported, then the
4315 * encryption key size is enforced.
4317 if (hdev->commands[20] & 0x10)
4318 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4320 flags |= 0x08; /* Encryption key size enforcement (LE) */
4322 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4325 /* When the Read Simple Pairing Options command is supported, then
4326 * also max encryption key size information is provided.
4328 if (hdev->commands[41] & 0x08)
4329 cap_len = eir_append_le16(rp->cap, cap_len,
4330 MGMT_CAP_MAX_ENC_KEY_SIZE,
4331 hdev->max_enc_key_size);
4333 cap_len = eir_append_le16(rp->cap, cap_len,
4334 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4335 SMP_MAX_ENC_KEY_SIZE);
4337 /* Append the min/max LE tx power parameters if we were able to fetch
4338 * it from the controller
4340 if (hdev->commands[38] & 0x80) {
4341 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4342 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4343 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4347 rp->cap_len = cpu_to_le16(cap_len);
4349 hci_dev_unlock(hdev);
4351 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4352 rp, sizeof(*rp) + cap_len);
4355 #ifdef CONFIG_BT_FEATURE_DEBUG
4356 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4357 static const u8 debug_uuid[16] = {
4358 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4359 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4363 /* 330859bc-7506-492d-9370-9a6f0614037f */
4364 static const u8 quality_report_uuid[16] = {
4365 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4366 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4369 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4370 static const u8 offload_codecs_uuid[16] = {
4371 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4372 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4375 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4376 static const u8 le_simultaneous_roles_uuid[16] = {
4377 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4378 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4381 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4382 static const u8 rpa_resolution_uuid[16] = {
4383 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4384 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4387 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4388 static const u8 iso_socket_uuid[16] = {
4389 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4390 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4393 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4394 static const u8 mgmt_mesh_uuid[16] = {
4395 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4396 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4399 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4400 void *data, u16 data_len)
4402 struct mgmt_rp_read_exp_features_info *rp;
4408 bt_dev_dbg(hdev, "sock %p", sk);
4410 /* Enough space for 7 features */
4411 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4412 rp = kzalloc(len, GFP_KERNEL);
4416 #ifdef CONFIG_BT_FEATURE_DEBUG
4418 flags = bt_dbg_get() ? BIT(0) : 0;
4420 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4421 rp->features[idx].flags = cpu_to_le32(flags);
4426 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4427 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4432 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4433 rp->features[idx].flags = cpu_to_le32(flags);
4437 if (hdev && ll_privacy_capable(hdev)) {
4438 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4439 flags = BIT(0) | BIT(1);
4443 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4444 rp->features[idx].flags = cpu_to_le32(flags);
4448 if (hdev && (aosp_has_quality_report(hdev) ||
4449 hdev->set_quality_report)) {
4450 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4455 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4456 rp->features[idx].flags = cpu_to_le32(flags);
4460 if (hdev && hdev->get_data_path_id) {
4461 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4466 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4467 rp->features[idx].flags = cpu_to_le32(flags);
4471 if (IS_ENABLED(CONFIG_BT_LE)) {
4472 flags = iso_enabled() ? BIT(0) : 0;
4473 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4474 rp->features[idx].flags = cpu_to_le32(flags);
4478 if (hdev && lmp_le_capable(hdev)) {
4479 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4484 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4485 rp->features[idx].flags = cpu_to_le32(flags);
4489 rp->feature_count = cpu_to_le16(idx);
4491 /* After reading the experimental features information, enable
4492 * the events to update client on any future change.
4494 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4496 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4497 MGMT_OP_READ_EXP_FEATURES_INFO,
4498 0, rp, sizeof(*rp) + (20 * idx));
4504 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4507 struct mgmt_ev_exp_feature_changed ev;
4509 memset(&ev, 0, sizeof(ev));
4510 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4511 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4513 // Do we need to be atomic with the conn_flags?
4514 if (enabled && privacy_mode_capable(hdev))
4515 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4517 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4519 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4521 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4525 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4526 bool enabled, struct sock *skip)
4528 struct mgmt_ev_exp_feature_changed ev;
4530 memset(&ev, 0, sizeof(ev));
4531 memcpy(ev.uuid, uuid, 16);
4532 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4534 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4536 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4539 #define EXP_FEAT(_uuid, _set_func) \
4542 .set_func = _set_func, \
4545 /* The zero key uuid is special. Multiple exp features are set through it. */
4546 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4547 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4549 struct mgmt_rp_set_exp_feature rp;
4551 memset(rp.uuid, 0, 16);
4552 rp.flags = cpu_to_le32(0);
4554 #ifdef CONFIG_BT_FEATURE_DEBUG
4556 bool changed = bt_dbg_get();
4561 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4565 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4568 changed = hci_dev_test_and_clear_flag(hdev,
4569 HCI_ENABLE_LL_PRIVACY);
4571 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4575 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4577 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4578 MGMT_OP_SET_EXP_FEATURE, 0,
4582 #ifdef CONFIG_BT_FEATURE_DEBUG
4583 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4584 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4586 struct mgmt_rp_set_exp_feature rp;
4591 /* Command requires to use the non-controller index */
4593 return mgmt_cmd_status(sk, hdev->id,
4594 MGMT_OP_SET_EXP_FEATURE,
4595 MGMT_STATUS_INVALID_INDEX);
4597 /* Parameters are limited to a single octet */
4598 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4599 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4600 MGMT_OP_SET_EXP_FEATURE,
4601 MGMT_STATUS_INVALID_PARAMS);
4603 /* Only boolean on/off is supported */
4604 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4605 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4606 MGMT_OP_SET_EXP_FEATURE,
4607 MGMT_STATUS_INVALID_PARAMS);
4609 val = !!cp->param[0];
4610 changed = val ? !bt_dbg_get() : bt_dbg_get();
4613 memcpy(rp.uuid, debug_uuid, 16);
4614 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4616 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4618 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4619 MGMT_OP_SET_EXP_FEATURE, 0,
4623 exp_feature_changed(hdev, debug_uuid, val, sk);
4629 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4630 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4632 struct mgmt_rp_set_exp_feature rp;
4636 /* Command requires to use the controller index */
4638 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4639 MGMT_OP_SET_EXP_FEATURE,
4640 MGMT_STATUS_INVALID_INDEX);
4642 /* Parameters are limited to a single octet */
4643 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4644 return mgmt_cmd_status(sk, hdev->id,
4645 MGMT_OP_SET_EXP_FEATURE,
4646 MGMT_STATUS_INVALID_PARAMS);
4648 /* Only boolean on/off is supported */
4649 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4650 return mgmt_cmd_status(sk, hdev->id,
4651 MGMT_OP_SET_EXP_FEATURE,
4652 MGMT_STATUS_INVALID_PARAMS);
4654 val = !!cp->param[0];
4657 changed = !hci_dev_test_and_set_flag(hdev,
4658 HCI_MESH_EXPERIMENTAL);
4660 hci_dev_clear_flag(hdev, HCI_MESH);
4661 changed = hci_dev_test_and_clear_flag(hdev,
4662 HCI_MESH_EXPERIMENTAL);
4665 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4666 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4668 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4670 err = mgmt_cmd_complete(sk, hdev->id,
4671 MGMT_OP_SET_EXP_FEATURE, 0,
4675 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4680 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4681 struct mgmt_cp_set_exp_feature *cp,
4684 struct mgmt_rp_set_exp_feature rp;
4689 /* Command requires to use the controller index */
4691 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4692 MGMT_OP_SET_EXP_FEATURE,
4693 MGMT_STATUS_INVALID_INDEX);
4695 /* Changes can only be made when controller is powered down */
4696 if (hdev_is_powered(hdev))
4697 return mgmt_cmd_status(sk, hdev->id,
4698 MGMT_OP_SET_EXP_FEATURE,
4699 MGMT_STATUS_REJECTED);
4701 /* Parameters are limited to a single octet */
4702 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4703 return mgmt_cmd_status(sk, hdev->id,
4704 MGMT_OP_SET_EXP_FEATURE,
4705 MGMT_STATUS_INVALID_PARAMS);
4707 /* Only boolean on/off is supported */
4708 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4709 return mgmt_cmd_status(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE,
4711 MGMT_STATUS_INVALID_PARAMS);
4713 val = !!cp->param[0];
4716 changed = !hci_dev_test_and_set_flag(hdev,
4717 HCI_ENABLE_LL_PRIVACY);
4718 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4720 /* Enable LL privacy + supported settings changed */
4721 flags = BIT(0) | BIT(1);
4723 changed = hci_dev_test_and_clear_flag(hdev,
4724 HCI_ENABLE_LL_PRIVACY);
4726 /* Disable LL privacy + supported settings changed */
4730 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4731 rp.flags = cpu_to_le32(flags);
4733 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4735 err = mgmt_cmd_complete(sk, hdev->id,
4736 MGMT_OP_SET_EXP_FEATURE, 0,
4740 exp_ll_privacy_feature_changed(val, hdev, sk);
4745 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4746 struct mgmt_cp_set_exp_feature *cp,
4749 struct mgmt_rp_set_exp_feature rp;
4753 /* Command requires to use a valid controller index */
4755 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4756 MGMT_OP_SET_EXP_FEATURE,
4757 MGMT_STATUS_INVALID_INDEX);
4759 /* Parameters are limited to a single octet */
4760 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4761 return mgmt_cmd_status(sk, hdev->id,
4762 MGMT_OP_SET_EXP_FEATURE,
4763 MGMT_STATUS_INVALID_PARAMS);
4765 /* Only boolean on/off is supported */
4766 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4767 return mgmt_cmd_status(sk, hdev->id,
4768 MGMT_OP_SET_EXP_FEATURE,
4769 MGMT_STATUS_INVALID_PARAMS);
4771 hci_req_sync_lock(hdev);
4773 val = !!cp->param[0];
4774 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4776 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4777 err = mgmt_cmd_status(sk, hdev->id,
4778 MGMT_OP_SET_EXP_FEATURE,
4779 MGMT_STATUS_NOT_SUPPORTED);
4780 goto unlock_quality_report;
4784 if (hdev->set_quality_report)
4785 err = hdev->set_quality_report(hdev, val);
4787 err = aosp_set_quality_report(hdev, val);
4790 err = mgmt_cmd_status(sk, hdev->id,
4791 MGMT_OP_SET_EXP_FEATURE,
4792 MGMT_STATUS_FAILED);
4793 goto unlock_quality_report;
4797 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4799 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4802 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4804 memcpy(rp.uuid, quality_report_uuid, 16);
4805 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4806 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4808 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4812 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4814 unlock_quality_report:
4815 hci_req_sync_unlock(hdev);
4819 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4820 struct mgmt_cp_set_exp_feature *cp,
4825 struct mgmt_rp_set_exp_feature rp;
4827 /* Command requires to use a valid controller index */
4829 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4830 MGMT_OP_SET_EXP_FEATURE,
4831 MGMT_STATUS_INVALID_INDEX);
4833 /* Parameters are limited to a single octet */
4834 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4835 return mgmt_cmd_status(sk, hdev->id,
4836 MGMT_OP_SET_EXP_FEATURE,
4837 MGMT_STATUS_INVALID_PARAMS);
4839 /* Only boolean on/off is supported */
4840 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4841 return mgmt_cmd_status(sk, hdev->id,
4842 MGMT_OP_SET_EXP_FEATURE,
4843 MGMT_STATUS_INVALID_PARAMS);
4845 val = !!cp->param[0];
4846 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4848 if (!hdev->get_data_path_id) {
4849 return mgmt_cmd_status(sk, hdev->id,
4850 MGMT_OP_SET_EXP_FEATURE,
4851 MGMT_STATUS_NOT_SUPPORTED);
4856 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4858 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4861 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4864 memcpy(rp.uuid, offload_codecs_uuid, 16);
4865 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4866 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4867 err = mgmt_cmd_complete(sk, hdev->id,
4868 MGMT_OP_SET_EXP_FEATURE, 0,
4872 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4877 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4878 struct mgmt_cp_set_exp_feature *cp,
4883 struct mgmt_rp_set_exp_feature rp;
4885 /* Command requires to use a valid controller index */
4887 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4888 MGMT_OP_SET_EXP_FEATURE,
4889 MGMT_STATUS_INVALID_INDEX);
4891 /* Parameters are limited to a single octet */
4892 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4893 return mgmt_cmd_status(sk, hdev->id,
4894 MGMT_OP_SET_EXP_FEATURE,
4895 MGMT_STATUS_INVALID_PARAMS);
4897 /* Only boolean on/off is supported */
4898 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4899 return mgmt_cmd_status(sk, hdev->id,
4900 MGMT_OP_SET_EXP_FEATURE,
4901 MGMT_STATUS_INVALID_PARAMS);
4903 val = !!cp->param[0];
4904 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4906 if (!hci_dev_le_state_simultaneous(hdev)) {
4907 return mgmt_cmd_status(sk, hdev->id,
4908 MGMT_OP_SET_EXP_FEATURE,
4909 MGMT_STATUS_NOT_SUPPORTED);
4914 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4916 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4919 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4922 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4923 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4924 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4925 err = mgmt_cmd_complete(sk, hdev->id,
4926 MGMT_OP_SET_EXP_FEATURE, 0,
4930 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4936 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4937 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4939 struct mgmt_rp_set_exp_feature rp;
4940 bool val, changed = false;
4943 /* Command requires to use the non-controller index */
4945 return mgmt_cmd_status(sk, hdev->id,
4946 MGMT_OP_SET_EXP_FEATURE,
4947 MGMT_STATUS_INVALID_INDEX);
4949 /* Parameters are limited to a single octet */
4950 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4951 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4952 MGMT_OP_SET_EXP_FEATURE,
4953 MGMT_STATUS_INVALID_PARAMS);
4955 /* Only boolean on/off is supported */
4956 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4958 MGMT_OP_SET_EXP_FEATURE,
4959 MGMT_STATUS_INVALID_PARAMS);
4961 val = cp->param[0] ? true : false;
4970 memcpy(rp.uuid, iso_socket_uuid, 16);
4971 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4973 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4975 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4976 MGMT_OP_SET_EXP_FEATURE, 0,
4980 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4986 static const struct mgmt_exp_feature {
4988 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4989 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4990 } exp_features[] = {
4991 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4992 #ifdef CONFIG_BT_FEATURE_DEBUG
4993 EXP_FEAT(debug_uuid, set_debug_func),
4995 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4996 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4997 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4998 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4999 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5001 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5004 /* end with a null feature */
5005 EXP_FEAT(NULL, NULL)
5008 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5009 void *data, u16 data_len)
5011 struct mgmt_cp_set_exp_feature *cp = data;
5014 bt_dev_dbg(hdev, "sock %p", sk);
5016 for (i = 0; exp_features[i].uuid; i++) {
5017 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5018 return exp_features[i].set_func(sk, hdev, cp, data_len);
5021 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5022 MGMT_OP_SET_EXP_FEATURE,
5023 MGMT_STATUS_NOT_SUPPORTED);
5026 static u32 get_params_flags(struct hci_dev *hdev,
5027 struct hci_conn_params *params)
5029 u32 flags = hdev->conn_flags;
5031 /* Devices using RPAs can only be programmed in the acceptlist if
5032 * LL Privacy has been enable otherwise they cannot mark
5033 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5035 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5036 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5037 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5042 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5045 struct mgmt_cp_get_device_flags *cp = data;
5046 struct mgmt_rp_get_device_flags rp;
5047 struct bdaddr_list_with_flags *br_params;
5048 struct hci_conn_params *params;
5049 u32 supported_flags;
5050 u32 current_flags = 0;
5051 u8 status = MGMT_STATUS_INVALID_PARAMS;
5053 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5054 &cp->addr.bdaddr, cp->addr.type);
5058 supported_flags = hdev->conn_flags;
5060 memset(&rp, 0, sizeof(rp));
5062 if (cp->addr.type == BDADDR_BREDR) {
5063 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5069 current_flags = br_params->flags;
5071 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5072 le_addr_type(cp->addr.type));
5076 supported_flags = get_params_flags(hdev, params);
5077 current_flags = params->flags;
5080 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5081 rp.addr.type = cp->addr.type;
5082 rp.supported_flags = cpu_to_le32(supported_flags);
5083 rp.current_flags = cpu_to_le32(current_flags);
5085 status = MGMT_STATUS_SUCCESS;
5088 hci_dev_unlock(hdev);
5090 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5094 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5095 bdaddr_t *bdaddr, u8 bdaddr_type,
5096 u32 supported_flags, u32 current_flags)
5098 struct mgmt_ev_device_flags_changed ev;
5100 bacpy(&ev.addr.bdaddr, bdaddr);
5101 ev.addr.type = bdaddr_type;
5102 ev.supported_flags = cpu_to_le32(supported_flags);
5103 ev.current_flags = cpu_to_le32(current_flags);
5105 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5108 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5111 struct mgmt_cp_set_device_flags *cp = data;
5112 struct bdaddr_list_with_flags *br_params;
5113 struct hci_conn_params *params;
5114 u8 status = MGMT_STATUS_INVALID_PARAMS;
5115 u32 supported_flags;
5116 u32 current_flags = __le32_to_cpu(cp->current_flags);
5118 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5119 &cp->addr.bdaddr, cp->addr.type, current_flags);
5121 // We should take hci_dev_lock() early, I think.. conn_flags can change
5122 supported_flags = hdev->conn_flags;
5124 if ((supported_flags | current_flags) != supported_flags) {
5125 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5126 current_flags, supported_flags);
5132 if (cp->addr.type == BDADDR_BREDR) {
5133 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5138 br_params->flags = current_flags;
5139 status = MGMT_STATUS_SUCCESS;
5141 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5142 &cp->addr.bdaddr, cp->addr.type);
5148 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5149 le_addr_type(cp->addr.type));
5151 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5152 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5156 supported_flags = get_params_flags(hdev, params);
5158 if ((supported_flags | current_flags) != supported_flags) {
5159 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5160 current_flags, supported_flags);
5164 params->flags = current_flags;
5165 status = MGMT_STATUS_SUCCESS;
5167 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5170 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5171 hci_update_passive_scan(hdev);
5174 hci_dev_unlock(hdev);
5177 if (status == MGMT_STATUS_SUCCESS)
5178 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5179 supported_flags, current_flags);
5181 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5182 &cp->addr, sizeof(cp->addr));
5185 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5188 struct mgmt_ev_adv_monitor_added ev;
5190 ev.monitor_handle = cpu_to_le16(handle);
5192 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5195 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5197 struct mgmt_ev_adv_monitor_removed ev;
5198 struct mgmt_pending_cmd *cmd;
5199 struct sock *sk_skip = NULL;
5200 struct mgmt_cp_remove_adv_monitor *cp;
5202 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5206 if (cp->monitor_handle)
5210 ev.monitor_handle = cpu_to_le16(handle);
5212 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5215 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5216 void *data, u16 len)
5218 struct adv_monitor *monitor = NULL;
5219 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5222 __u32 supported = 0;
5224 __u16 num_handles = 0;
5225 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5227 BT_DBG("request for %s", hdev->name);
5231 if (msft_monitor_supported(hdev))
5232 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5234 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5235 handles[num_handles++] = monitor->handle;
5237 hci_dev_unlock(hdev);
5239 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5240 rp = kmalloc(rp_size, GFP_KERNEL);
5244 /* All supported features are currently enabled */
5245 enabled = supported;
5247 rp->supported_features = cpu_to_le32(supported);
5248 rp->enabled_features = cpu_to_le32(enabled);
5249 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5250 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5251 rp->num_handles = cpu_to_le16(num_handles);
5253 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5255 err = mgmt_cmd_complete(sk, hdev->id,
5256 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5257 MGMT_STATUS_SUCCESS, rp, rp_size);
5264 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5265 void *data, int status)
5267 struct mgmt_rp_add_adv_patterns_monitor rp;
5268 struct mgmt_pending_cmd *cmd = data;
5269 struct adv_monitor *monitor = cmd->user_data;
5273 rp.monitor_handle = cpu_to_le16(monitor->handle);
5276 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5277 hdev->adv_monitors_cnt++;
5278 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5279 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5280 hci_update_passive_scan(hdev);
5283 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5284 mgmt_status(status), &rp, sizeof(rp));
5285 mgmt_pending_remove(cmd);
5287 hci_dev_unlock(hdev);
5288 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5289 rp.monitor_handle, status);
5292 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5294 struct mgmt_pending_cmd *cmd = data;
5295 struct adv_monitor *monitor = cmd->user_data;
5297 return hci_add_adv_monitor(hdev, monitor);
5300 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5301 struct adv_monitor *m, u8 status,
5302 void *data, u16 len, u16 op)
5304 struct mgmt_pending_cmd *cmd;
5312 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5313 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5314 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5315 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5316 status = MGMT_STATUS_BUSY;
5320 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5322 status = MGMT_STATUS_NO_RESOURCES;
5327 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5328 mgmt_add_adv_patterns_monitor_complete);
5331 status = MGMT_STATUS_NO_RESOURCES;
5333 status = MGMT_STATUS_FAILED;
5338 hci_dev_unlock(hdev);
5343 hci_free_adv_monitor(hdev, m);
5344 hci_dev_unlock(hdev);
5345 return mgmt_cmd_status(sk, hdev->id, op, status);
5348 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5349 struct mgmt_adv_rssi_thresholds *rssi)
5352 m->rssi.low_threshold = rssi->low_threshold;
5353 m->rssi.low_threshold_timeout =
5354 __le16_to_cpu(rssi->low_threshold_timeout);
5355 m->rssi.high_threshold = rssi->high_threshold;
5356 m->rssi.high_threshold_timeout =
5357 __le16_to_cpu(rssi->high_threshold_timeout);
5358 m->rssi.sampling_period = rssi->sampling_period;
5360 /* Default values. These numbers are the least constricting
5361 * parameters for MSFT API to work, so it behaves as if there
5362 * are no rssi parameter to consider. May need to be changed
5363 * if other API are to be supported.
5365 m->rssi.low_threshold = -127;
5366 m->rssi.low_threshold_timeout = 60;
5367 m->rssi.high_threshold = -127;
5368 m->rssi.high_threshold_timeout = 0;
5369 m->rssi.sampling_period = 0;
5373 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5374 struct mgmt_adv_pattern *patterns)
5376 u8 offset = 0, length = 0;
5377 struct adv_pattern *p = NULL;
5380 for (i = 0; i < pattern_count; i++) {
5381 offset = patterns[i].offset;
5382 length = patterns[i].length;
5383 if (offset >= HCI_MAX_AD_LENGTH ||
5384 length > HCI_MAX_AD_LENGTH ||
5385 (offset + length) > HCI_MAX_AD_LENGTH)
5386 return MGMT_STATUS_INVALID_PARAMS;
5388 p = kmalloc(sizeof(*p), GFP_KERNEL);
5390 return MGMT_STATUS_NO_RESOURCES;
5392 p->ad_type = patterns[i].ad_type;
5393 p->offset = patterns[i].offset;
5394 p->length = patterns[i].length;
5395 memcpy(p->value, patterns[i].value, p->length);
5397 INIT_LIST_HEAD(&p->list);
5398 list_add(&p->list, &m->patterns);
5401 return MGMT_STATUS_SUCCESS;
5404 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5405 void *data, u16 len)
5407 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5408 struct adv_monitor *m = NULL;
5409 u8 status = MGMT_STATUS_SUCCESS;
5410 size_t expected_size = sizeof(*cp);
5412 BT_DBG("request for %s", hdev->name);
5414 if (len <= sizeof(*cp)) {
5415 status = MGMT_STATUS_INVALID_PARAMS;
5419 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5420 if (len != expected_size) {
5421 status = MGMT_STATUS_INVALID_PARAMS;
5425 m = kzalloc(sizeof(*m), GFP_KERNEL);
5427 status = MGMT_STATUS_NO_RESOURCES;
5431 INIT_LIST_HEAD(&m->patterns);
5433 parse_adv_monitor_rssi(m, NULL);
5434 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5437 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5438 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5441 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5442 void *data, u16 len)
5444 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5445 struct adv_monitor *m = NULL;
5446 u8 status = MGMT_STATUS_SUCCESS;
5447 size_t expected_size = sizeof(*cp);
5449 BT_DBG("request for %s", hdev->name);
5451 if (len <= sizeof(*cp)) {
5452 status = MGMT_STATUS_INVALID_PARAMS;
5456 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5457 if (len != expected_size) {
5458 status = MGMT_STATUS_INVALID_PARAMS;
5462 m = kzalloc(sizeof(*m), GFP_KERNEL);
5464 status = MGMT_STATUS_NO_RESOURCES;
5468 INIT_LIST_HEAD(&m->patterns);
5470 parse_adv_monitor_rssi(m, &cp->rssi);
5471 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5474 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5475 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5478 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5479 void *data, int status)
5481 struct mgmt_rp_remove_adv_monitor rp;
5482 struct mgmt_pending_cmd *cmd = data;
5483 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5487 rp.monitor_handle = cp->monitor_handle;
5490 hci_update_passive_scan(hdev);
5492 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5493 mgmt_status(status), &rp, sizeof(rp));
5494 mgmt_pending_remove(cmd);
5496 hci_dev_unlock(hdev);
5497 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5498 rp.monitor_handle, status);
5501 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5503 struct mgmt_pending_cmd *cmd = data;
5504 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5505 u16 handle = __le16_to_cpu(cp->monitor_handle);
5508 return hci_remove_all_adv_monitor(hdev);
5510 return hci_remove_single_adv_monitor(hdev, handle);
5513 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5514 void *data, u16 len)
5516 struct mgmt_pending_cmd *cmd;
5521 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5522 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5523 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5524 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5525 status = MGMT_STATUS_BUSY;
5529 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5531 status = MGMT_STATUS_NO_RESOURCES;
5535 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5536 mgmt_remove_adv_monitor_complete);
5539 mgmt_pending_remove(cmd);
5542 status = MGMT_STATUS_NO_RESOURCES;
5544 status = MGMT_STATUS_FAILED;
5549 hci_dev_unlock(hdev);
5554 hci_dev_unlock(hdev);
5555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5559 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5561 struct mgmt_rp_read_local_oob_data mgmt_rp;
5562 size_t rp_size = sizeof(mgmt_rp);
5563 struct mgmt_pending_cmd *cmd = data;
5564 struct sk_buff *skb = cmd->skb;
5565 u8 status = mgmt_status(err);
5569 status = MGMT_STATUS_FAILED;
5570 else if (IS_ERR(skb))
5571 status = mgmt_status(PTR_ERR(skb));
5573 status = mgmt_status(skb->data[0]);
5576 bt_dev_dbg(hdev, "status %d", status);
5579 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5583 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5585 if (!bredr_sc_enabled(hdev)) {
5586 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5588 if (skb->len < sizeof(*rp)) {
5589 mgmt_cmd_status(cmd->sk, hdev->id,
5590 MGMT_OP_READ_LOCAL_OOB_DATA,
5591 MGMT_STATUS_FAILED);
5595 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5596 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5598 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5600 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5602 if (skb->len < sizeof(*rp)) {
5603 mgmt_cmd_status(cmd->sk, hdev->id,
5604 MGMT_OP_READ_LOCAL_OOB_DATA,
5605 MGMT_STATUS_FAILED);
5609 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5610 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5612 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5613 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5616 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5617 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5620 if (skb && !IS_ERR(skb))
5623 mgmt_pending_free(cmd);
5626 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5628 struct mgmt_pending_cmd *cmd = data;
5630 if (bredr_sc_enabled(hdev))
5631 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5633 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5635 if (IS_ERR(cmd->skb))
5636 return PTR_ERR(cmd->skb);
5641 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5642 void *data, u16 data_len)
5644 struct mgmt_pending_cmd *cmd;
5647 bt_dev_dbg(hdev, "sock %p", sk);
5651 if (!hdev_is_powered(hdev)) {
5652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5653 MGMT_STATUS_NOT_POWERED);
5657 if (!lmp_ssp_capable(hdev)) {
5658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659 MGMT_STATUS_NOT_SUPPORTED);
5663 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5667 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5668 read_local_oob_data_complete);
5671 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5672 MGMT_STATUS_FAILED);
5675 mgmt_pending_free(cmd);
5679 hci_dev_unlock(hdev);
5683 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5684 void *data, u16 len)
5686 struct mgmt_addr_info *addr = data;
5689 bt_dev_dbg(hdev, "sock %p", sk);
5691 if (!bdaddr_type_is_valid(addr->type))
5692 return mgmt_cmd_complete(sk, hdev->id,
5693 MGMT_OP_ADD_REMOTE_OOB_DATA,
5694 MGMT_STATUS_INVALID_PARAMS,
5695 addr, sizeof(*addr));
5699 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5700 struct mgmt_cp_add_remote_oob_data *cp = data;
5703 if (cp->addr.type != BDADDR_BREDR) {
5704 err = mgmt_cmd_complete(sk, hdev->id,
5705 MGMT_OP_ADD_REMOTE_OOB_DATA,
5706 MGMT_STATUS_INVALID_PARAMS,
5707 &cp->addr, sizeof(cp->addr));
5711 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5712 cp->addr.type, cp->hash,
5713 cp->rand, NULL, NULL);
5715 status = MGMT_STATUS_FAILED;
5717 status = MGMT_STATUS_SUCCESS;
5719 err = mgmt_cmd_complete(sk, hdev->id,
5720 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5721 &cp->addr, sizeof(cp->addr));
5722 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5723 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5724 u8 *rand192, *hash192, *rand256, *hash256;
5727 if (bdaddr_type_is_le(cp->addr.type)) {
5728 /* Enforce zero-valued 192-bit parameters as
5729 * long as legacy SMP OOB isn't implemented.
5731 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5732 memcmp(cp->hash192, ZERO_KEY, 16)) {
5733 err = mgmt_cmd_complete(sk, hdev->id,
5734 MGMT_OP_ADD_REMOTE_OOB_DATA,
5735 MGMT_STATUS_INVALID_PARAMS,
5736 addr, sizeof(*addr));
5743 /* In case one of the P-192 values is set to zero,
5744 * then just disable OOB data for P-192.
5746 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5747 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5751 rand192 = cp->rand192;
5752 hash192 = cp->hash192;
5756 /* In case one of the P-256 values is set to zero, then just
5757 * disable OOB data for P-256.
5759 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5760 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5764 rand256 = cp->rand256;
5765 hash256 = cp->hash256;
5768 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5769 cp->addr.type, hash192, rand192,
5772 status = MGMT_STATUS_FAILED;
5774 status = MGMT_STATUS_SUCCESS;
5776 err = mgmt_cmd_complete(sk, hdev->id,
5777 MGMT_OP_ADD_REMOTE_OOB_DATA,
5778 status, &cp->addr, sizeof(cp->addr));
5780 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5782 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5783 MGMT_STATUS_INVALID_PARAMS);
5787 hci_dev_unlock(hdev);
5791 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5792 void *data, u16 len)
5794 struct mgmt_cp_remove_remote_oob_data *cp = data;
5798 bt_dev_dbg(hdev, "sock %p", sk);
5800 if (cp->addr.type != BDADDR_BREDR)
5801 return mgmt_cmd_complete(sk, hdev->id,
5802 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5803 MGMT_STATUS_INVALID_PARAMS,
5804 &cp->addr, sizeof(cp->addr));
5808 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5809 hci_remote_oob_data_clear(hdev);
5810 status = MGMT_STATUS_SUCCESS;
5814 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5816 status = MGMT_STATUS_INVALID_PARAMS;
5818 status = MGMT_STATUS_SUCCESS;
5821 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5822 status, &cp->addr, sizeof(cp->addr));
5824 hci_dev_unlock(hdev);
5828 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5830 struct mgmt_pending_cmd *cmd;
5832 bt_dev_dbg(hdev, "status %u", status);
5836 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5838 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5841 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5844 cmd->cmd_complete(cmd, mgmt_status(status));
5845 mgmt_pending_remove(cmd);
5848 hci_dev_unlock(hdev);
5851 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5852 uint8_t *mgmt_status)
5855 case DISCOV_TYPE_LE:
5856 *mgmt_status = mgmt_le_support(hdev);
5860 case DISCOV_TYPE_INTERLEAVED:
5861 *mgmt_status = mgmt_le_support(hdev);
5865 case DISCOV_TYPE_BREDR:
5866 *mgmt_status = mgmt_bredr_support(hdev);
5871 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5878 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5880 struct mgmt_pending_cmd *cmd = data;
5882 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5883 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5884 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5887 bt_dev_dbg(hdev, "err %d", err);
5889 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5891 mgmt_pending_remove(cmd);
5893 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5897 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5899 return hci_start_discovery_sync(hdev);
5902 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5903 u16 op, void *data, u16 len)
5905 struct mgmt_cp_start_discovery *cp = data;
5906 struct mgmt_pending_cmd *cmd;
5910 bt_dev_dbg(hdev, "sock %p", sk);
5914 if (!hdev_is_powered(hdev)) {
5915 err = mgmt_cmd_complete(sk, hdev->id, op,
5916 MGMT_STATUS_NOT_POWERED,
5917 &cp->type, sizeof(cp->type));
5921 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5922 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5923 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5924 &cp->type, sizeof(cp->type));
5928 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5929 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5930 &cp->type, sizeof(cp->type));
5934 /* Can't start discovery when it is paused */
5935 if (hdev->discovery_paused) {
5936 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5937 &cp->type, sizeof(cp->type));
5941 /* Clear the discovery filter first to free any previously
5942 * allocated memory for the UUID list.
5944 hci_discovery_filter_clear(hdev);
5946 hdev->discovery.type = cp->type;
5947 hdev->discovery.report_invalid_rssi = false;
5948 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5949 hdev->discovery.limited = true;
5951 hdev->discovery.limited = false;
5953 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5959 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5960 start_discovery_complete);
5962 mgmt_pending_remove(cmd);
5966 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5969 hci_dev_unlock(hdev);
5973 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5974 void *data, u16 len)
5976 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5980 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5981 void *data, u16 len)
5983 return start_discovery_internal(sk, hdev,
5984 MGMT_OP_START_LIMITED_DISCOVERY,
5988 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5989 void *data, u16 len)
5991 struct mgmt_cp_start_service_discovery *cp = data;
5992 struct mgmt_pending_cmd *cmd;
5993 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5994 u16 uuid_count, expected_len;
5998 bt_dev_dbg(hdev, "sock %p", sk);
6002 if (!hdev_is_powered(hdev)) {
6003 err = mgmt_cmd_complete(sk, hdev->id,
6004 MGMT_OP_START_SERVICE_DISCOVERY,
6005 MGMT_STATUS_NOT_POWERED,
6006 &cp->type, sizeof(cp->type));
6010 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6011 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6012 err = mgmt_cmd_complete(sk, hdev->id,
6013 MGMT_OP_START_SERVICE_DISCOVERY,
6014 MGMT_STATUS_BUSY, &cp->type,
6019 if (hdev->discovery_paused) {
6020 err = mgmt_cmd_complete(sk, hdev->id,
6021 MGMT_OP_START_SERVICE_DISCOVERY,
6022 MGMT_STATUS_BUSY, &cp->type,
6027 uuid_count = __le16_to_cpu(cp->uuid_count);
6028 if (uuid_count > max_uuid_count) {
6029 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6031 err = mgmt_cmd_complete(sk, hdev->id,
6032 MGMT_OP_START_SERVICE_DISCOVERY,
6033 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6038 expected_len = sizeof(*cp) + uuid_count * 16;
6039 if (expected_len != len) {
6040 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6042 err = mgmt_cmd_complete(sk, hdev->id,
6043 MGMT_OP_START_SERVICE_DISCOVERY,
6044 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6049 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6050 err = mgmt_cmd_complete(sk, hdev->id,
6051 MGMT_OP_START_SERVICE_DISCOVERY,
6052 status, &cp->type, sizeof(cp->type));
6056 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6063 /* Clear the discovery filter first to free any previously
6064 * allocated memory for the UUID list.
6066 hci_discovery_filter_clear(hdev);
6068 hdev->discovery.result_filtering = true;
6069 hdev->discovery.type = cp->type;
6070 hdev->discovery.rssi = cp->rssi;
6071 hdev->discovery.uuid_count = uuid_count;
6073 if (uuid_count > 0) {
6074 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6076 if (!hdev->discovery.uuids) {
6077 err = mgmt_cmd_complete(sk, hdev->id,
6078 MGMT_OP_START_SERVICE_DISCOVERY,
6080 &cp->type, sizeof(cp->type));
6081 mgmt_pending_remove(cmd);
6086 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6087 start_discovery_complete);
6089 mgmt_pending_remove(cmd);
6093 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6096 hci_dev_unlock(hdev);
6100 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6102 struct mgmt_pending_cmd *cmd;
6104 bt_dev_dbg(hdev, "status %u", status);
6108 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6110 cmd->cmd_complete(cmd, mgmt_status(status));
6111 mgmt_pending_remove(cmd);
6114 hci_dev_unlock(hdev);
6117 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6119 struct mgmt_pending_cmd *cmd = data;
6121 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6124 bt_dev_dbg(hdev, "err %d", err);
6126 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6128 mgmt_pending_remove(cmd);
6131 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6134 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6136 return hci_stop_discovery_sync(hdev);
6139 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6142 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6143 struct mgmt_pending_cmd *cmd;
6146 bt_dev_dbg(hdev, "sock %p", sk);
6150 if (!hci_discovery_active(hdev)) {
6151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6152 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6153 sizeof(mgmt_cp->type));
6157 if (hdev->discovery.type != mgmt_cp->type) {
6158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6159 MGMT_STATUS_INVALID_PARAMS,
6160 &mgmt_cp->type, sizeof(mgmt_cp->type));
6164 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6170 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6171 stop_discovery_complete);
6173 mgmt_pending_remove(cmd);
6177 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6180 hci_dev_unlock(hdev);
6184 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6187 struct mgmt_cp_confirm_name *cp = data;
6188 struct inquiry_entry *e;
6191 bt_dev_dbg(hdev, "sock %p", sk);
6195 if (!hci_discovery_active(hdev)) {
6196 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6197 MGMT_STATUS_FAILED, &cp->addr,
6202 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6204 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6205 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6210 if (cp->name_known) {
6211 e->name_state = NAME_KNOWN;
6214 e->name_state = NAME_NEEDED;
6215 hci_inquiry_cache_update_resolve(hdev, e);
6218 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6219 &cp->addr, sizeof(cp->addr));
6222 hci_dev_unlock(hdev);
6226 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6229 struct mgmt_cp_block_device *cp = data;
6233 bt_dev_dbg(hdev, "sock %p", sk);
6235 if (!bdaddr_type_is_valid(cp->addr.type))
6236 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6237 MGMT_STATUS_INVALID_PARAMS,
6238 &cp->addr, sizeof(cp->addr));
6242 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6245 status = MGMT_STATUS_FAILED;
6249 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6251 status = MGMT_STATUS_SUCCESS;
6254 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6255 &cp->addr, sizeof(cp->addr));
6257 hci_dev_unlock(hdev);
6262 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6265 struct mgmt_cp_unblock_device *cp = data;
6269 bt_dev_dbg(hdev, "sock %p", sk);
6271 if (!bdaddr_type_is_valid(cp->addr.type))
6272 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6273 MGMT_STATUS_INVALID_PARAMS,
6274 &cp->addr, sizeof(cp->addr));
6278 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6281 status = MGMT_STATUS_INVALID_PARAMS;
6285 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6287 status = MGMT_STATUS_SUCCESS;
6290 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6291 &cp->addr, sizeof(cp->addr));
6293 hci_dev_unlock(hdev);
6298 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6300 return hci_update_eir_sync(hdev);
6303 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6306 struct mgmt_cp_set_device_id *cp = data;
6310 bt_dev_dbg(hdev, "sock %p", sk);
6312 source = __le16_to_cpu(cp->source);
6314 if (source > 0x0002)
6315 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6316 MGMT_STATUS_INVALID_PARAMS);
6320 hdev->devid_source = source;
6321 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6322 hdev->devid_product = __le16_to_cpu(cp->product);
6323 hdev->devid_version = __le16_to_cpu(cp->version);
6325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6328 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6330 hci_dev_unlock(hdev);
6335 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6338 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6340 bt_dev_dbg(hdev, "status %d", err);
6343 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6345 struct cmd_lookup match = { NULL, hdev };
6347 struct adv_info *adv_instance;
6348 u8 status = mgmt_status(err);
6351 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6352 cmd_status_rsp, &status);
6356 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6357 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6359 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6361 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6364 new_settings(hdev, match.sk);
6369 /* If "Set Advertising" was just disabled and instance advertising was
6370 * set up earlier, then re-enable multi-instance advertising.
6372 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6373 list_empty(&hdev->adv_instances))
6376 instance = hdev->cur_adv_instance;
6378 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6379 struct adv_info, list);
6383 instance = adv_instance->instance;
6386 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6388 enable_advertising_instance(hdev, err);
6391 static int set_adv_sync(struct hci_dev *hdev, void *data)
6393 struct mgmt_pending_cmd *cmd = data;
6394 struct mgmt_mode *cp = cmd->param;
6397 if (cp->val == 0x02)
6398 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6400 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6402 cancel_adv_timeout(hdev);
6405 /* Switch to instance "0" for the Set Advertising setting.
6406 * We cannot use update_[adv|scan_rsp]_data() here as the
6407 * HCI_ADVERTISING flag is not yet set.
6409 hdev->cur_adv_instance = 0x00;
6411 if (ext_adv_capable(hdev)) {
6412 hci_start_ext_adv_sync(hdev, 0x00);
6414 hci_update_adv_data_sync(hdev, 0x00);
6415 hci_update_scan_rsp_data_sync(hdev, 0x00);
6416 hci_enable_advertising_sync(hdev);
6419 hci_disable_advertising_sync(hdev);
6425 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6428 struct mgmt_mode *cp = data;
6429 struct mgmt_pending_cmd *cmd;
6433 bt_dev_dbg(hdev, "sock %p", sk);
6435 status = mgmt_le_support(hdev);
6437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6440 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6442 MGMT_STATUS_INVALID_PARAMS);
6444 if (hdev->advertising_paused)
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6452 /* The following conditions are ones which mean that we should
6453 * not do any HCI communication but directly send a mgmt
6454 * response to user space (after toggling the flag if
6457 if (!hdev_is_powered(hdev) ||
6458 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6459 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6460 hci_dev_test_flag(hdev, HCI_MESH) ||
6461 hci_conn_num(hdev, LE_LINK) > 0 ||
6462 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6463 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6467 hdev->cur_adv_instance = 0x00;
6468 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6469 if (cp->val == 0x02)
6470 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6472 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6474 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6475 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6483 err = new_settings(hdev, sk);
6488 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6489 pending_find(MGMT_OP_SET_LE, hdev)) {
6490 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6495 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6499 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6500 set_advertising_complete);
6503 mgmt_pending_remove(cmd);
6506 hci_dev_unlock(hdev);
6510 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6511 void *data, u16 len)
6513 struct mgmt_cp_set_static_address *cp = data;
6516 bt_dev_dbg(hdev, "sock %p", sk);
6518 if (!lmp_le_capable(hdev))
6519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6520 MGMT_STATUS_NOT_SUPPORTED);
6522 if (hdev_is_powered(hdev))
6523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6524 MGMT_STATUS_REJECTED);
6526 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6527 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6528 return mgmt_cmd_status(sk, hdev->id,
6529 MGMT_OP_SET_STATIC_ADDRESS,
6530 MGMT_STATUS_INVALID_PARAMS);
6532 /* Two most significant bits shall be set */
6533 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6534 return mgmt_cmd_status(sk, hdev->id,
6535 MGMT_OP_SET_STATIC_ADDRESS,
6536 MGMT_STATUS_INVALID_PARAMS);
6541 bacpy(&hdev->static_addr, &cp->bdaddr);
6543 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6547 err = new_settings(hdev, sk);
6550 hci_dev_unlock(hdev);
6554 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6555 void *data, u16 len)
6557 struct mgmt_cp_set_scan_params *cp = data;
6558 __u16 interval, window;
6561 bt_dev_dbg(hdev, "sock %p", sk);
6563 if (!lmp_le_capable(hdev))
6564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6565 MGMT_STATUS_NOT_SUPPORTED);
6567 interval = __le16_to_cpu(cp->interval);
6569 if (interval < 0x0004 || interval > 0x4000)
6570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571 MGMT_STATUS_INVALID_PARAMS);
6573 window = __le16_to_cpu(cp->window);
6575 if (window < 0x0004 || window > 0x4000)
6576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577 MGMT_STATUS_INVALID_PARAMS);
6579 if (window > interval)
6580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 MGMT_STATUS_INVALID_PARAMS);
6585 hdev->le_scan_interval = interval;
6586 hdev->le_scan_window = window;
6588 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6591 /* If background scan is running, restart it so new parameters are
6594 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6595 hdev->discovery.state == DISCOVERY_STOPPED)
6596 hci_update_passive_scan(hdev);
6598 hci_dev_unlock(hdev);
6603 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6605 struct mgmt_pending_cmd *cmd = data;
6607 bt_dev_dbg(hdev, "err %d", err);
6610 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6613 struct mgmt_mode *cp = cmd->param;
6616 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6618 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6620 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6621 new_settings(hdev, cmd->sk);
6624 mgmt_pending_free(cmd);
6627 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6629 struct mgmt_pending_cmd *cmd = data;
6630 struct mgmt_mode *cp = cmd->param;
6632 return hci_write_fast_connectable_sync(hdev, cp->val);
6635 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6636 void *data, u16 len)
6638 struct mgmt_mode *cp = data;
6639 struct mgmt_pending_cmd *cmd;
6642 bt_dev_dbg(hdev, "sock %p", sk);
6644 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6645 hdev->hci_ver < BLUETOOTH_VER_1_2)
6646 return mgmt_cmd_status(sk, hdev->id,
6647 MGMT_OP_SET_FAST_CONNECTABLE,
6648 MGMT_STATUS_NOT_SUPPORTED);
6650 if (cp->val != 0x00 && cp->val != 0x01)
6651 return mgmt_cmd_status(sk, hdev->id,
6652 MGMT_OP_SET_FAST_CONNECTABLE,
6653 MGMT_STATUS_INVALID_PARAMS);
6657 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6658 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6662 if (!hdev_is_powered(hdev)) {
6663 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6664 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6665 new_settings(hdev, sk);
6669 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6674 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6675 fast_connectable_complete);
6678 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6679 MGMT_STATUS_FAILED);
6682 mgmt_pending_free(cmd);
6686 hci_dev_unlock(hdev);
6691 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6693 struct mgmt_pending_cmd *cmd = data;
6695 bt_dev_dbg(hdev, "err %d", err);
6698 u8 mgmt_err = mgmt_status(err);
6700 /* We need to restore the flag if related HCI commands
6703 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6705 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6707 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6708 new_settings(hdev, cmd->sk);
6711 mgmt_pending_free(cmd);
6714 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6718 status = hci_write_fast_connectable_sync(hdev, false);
6721 status = hci_update_scan_sync(hdev);
6723 /* Since only the advertising data flags will change, there
6724 * is no need to update the scan response data.
6727 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6732 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6734 struct mgmt_mode *cp = data;
6735 struct mgmt_pending_cmd *cmd;
6738 bt_dev_dbg(hdev, "sock %p", sk);
6740 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6742 MGMT_STATUS_NOT_SUPPORTED);
6744 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 MGMT_STATUS_REJECTED);
6748 if (cp->val != 0x00 && cp->val != 0x01)
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_INVALID_PARAMS);
6754 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6755 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6759 if (!hdev_is_powered(hdev)) {
6761 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6762 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6763 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6764 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6765 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6768 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6770 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6774 err = new_settings(hdev, sk);
6778 /* Reject disabling when powered on */
6780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6781 MGMT_STATUS_REJECTED);
6784 /* When configuring a dual-mode controller to operate
6785 * with LE only and using a static address, then switching
6786 * BR/EDR back on is not allowed.
6788 * Dual-mode controllers shall operate with the public
6789 * address as its identity address for BR/EDR and LE. So
6790 * reject the attempt to create an invalid configuration.
6792 * The same restrictions applies when secure connections
6793 * has been enabled. For BR/EDR this is a controller feature
6794 * while for LE it is a host stack feature. This means that
6795 * switching BR/EDR back on when secure connections has been
6796 * enabled is not a supported transaction.
6798 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6799 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6800 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6802 MGMT_STATUS_REJECTED);
6807 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6811 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6812 set_bredr_complete);
6815 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6816 MGMT_STATUS_FAILED);
6818 mgmt_pending_free(cmd);
6823 /* We need to flip the bit already here so that
6824 * hci_req_update_adv_data generates the correct flags.
6826 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6829 hci_dev_unlock(hdev);
6833 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6835 struct mgmt_pending_cmd *cmd = data;
6836 struct mgmt_mode *cp;
6838 bt_dev_dbg(hdev, "err %d", err);
6841 u8 mgmt_err = mgmt_status(err);
6843 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6851 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6852 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6855 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6856 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6860 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6864 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6865 new_settings(hdev, cmd->sk);
6868 mgmt_pending_free(cmd);
6871 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6873 struct mgmt_pending_cmd *cmd = data;
6874 struct mgmt_mode *cp = cmd->param;
6877 /* Force write of val */
6878 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6880 return hci_write_sc_support_sync(hdev, val);
6883 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6884 void *data, u16 len)
6886 struct mgmt_mode *cp = data;
6887 struct mgmt_pending_cmd *cmd;
6891 bt_dev_dbg(hdev, "sock %p", sk);
6893 if (!lmp_sc_capable(hdev) &&
6894 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6896 MGMT_STATUS_NOT_SUPPORTED);
6898 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6899 lmp_sc_capable(hdev) &&
6900 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6902 MGMT_STATUS_REJECTED);
6904 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 MGMT_STATUS_INVALID_PARAMS);
6910 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6911 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6915 changed = !hci_dev_test_and_set_flag(hdev,
6917 if (cp->val == 0x02)
6918 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6920 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6922 changed = hci_dev_test_and_clear_flag(hdev,
6924 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6927 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6932 err = new_settings(hdev, sk);
6939 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6940 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6941 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6945 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6949 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6950 set_secure_conn_complete);
6953 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6954 MGMT_STATUS_FAILED);
6956 mgmt_pending_free(cmd);
6960 hci_dev_unlock(hdev);
6964 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6965 void *data, u16 len)
6967 struct mgmt_mode *cp = data;
6968 bool changed, use_changed;
6971 bt_dev_dbg(hdev, "sock %p", sk);
6973 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6975 MGMT_STATUS_INVALID_PARAMS);
6980 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6982 changed = hci_dev_test_and_clear_flag(hdev,
6983 HCI_KEEP_DEBUG_KEYS);
6985 if (cp->val == 0x02)
6986 use_changed = !hci_dev_test_and_set_flag(hdev,
6987 HCI_USE_DEBUG_KEYS);
6989 use_changed = hci_dev_test_and_clear_flag(hdev,
6990 HCI_USE_DEBUG_KEYS);
6992 if (hdev_is_powered(hdev) && use_changed &&
6993 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6994 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6995 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6996 sizeof(mode), &mode);
6999 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7004 err = new_settings(hdev, sk);
7007 hci_dev_unlock(hdev);
7011 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7014 struct mgmt_cp_set_privacy *cp = cp_data;
7018 bt_dev_dbg(hdev, "sock %p", sk);
7020 if (!lmp_le_capable(hdev))
7021 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7022 MGMT_STATUS_NOT_SUPPORTED);
7024 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7026 MGMT_STATUS_INVALID_PARAMS);
7028 if (hdev_is_powered(hdev))
7029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 MGMT_STATUS_REJECTED);
7034 /* If user space supports this command it is also expected to
7035 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7037 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7040 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7041 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7042 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7043 hci_adv_instances_set_rpa_expired(hdev, true);
7044 if (cp->privacy == 0x02)
7045 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7047 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7049 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7050 memset(hdev->irk, 0, sizeof(hdev->irk));
7051 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7052 hci_adv_instances_set_rpa_expired(hdev, false);
7053 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7056 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7061 err = new_settings(hdev, sk);
7064 hci_dev_unlock(hdev);
7068 static bool irk_is_valid(struct mgmt_irk_info *irk)
7070 switch (irk->addr.type) {
7071 case BDADDR_LE_PUBLIC:
7074 case BDADDR_LE_RANDOM:
7075 /* Two most significant bits shall be set */
7076 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7084 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7087 struct mgmt_cp_load_irks *cp = cp_data;
7088 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7089 sizeof(struct mgmt_irk_info));
7090 u16 irk_count, expected_len;
7093 bt_dev_dbg(hdev, "sock %p", sk);
7095 if (!lmp_le_capable(hdev))
7096 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7097 MGMT_STATUS_NOT_SUPPORTED);
7099 irk_count = __le16_to_cpu(cp->irk_count);
7100 if (irk_count > max_irk_count) {
7101 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7103 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7104 MGMT_STATUS_INVALID_PARAMS);
7107 expected_len = struct_size(cp, irks, irk_count);
7108 if (expected_len != len) {
7109 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7112 MGMT_STATUS_INVALID_PARAMS);
7115 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7117 for (i = 0; i < irk_count; i++) {
7118 struct mgmt_irk_info *key = &cp->irks[i];
7120 if (!irk_is_valid(key))
7121 return mgmt_cmd_status(sk, hdev->id,
7123 MGMT_STATUS_INVALID_PARAMS);
7128 hci_smp_irks_clear(hdev);
7130 for (i = 0; i < irk_count; i++) {
7131 struct mgmt_irk_info *irk = &cp->irks[i];
7133 if (hci_is_blocked_key(hdev,
7134 HCI_BLOCKED_KEY_TYPE_IRK,
7136 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7141 hci_add_irk(hdev, &irk->addr.bdaddr,
7142 le_addr_type(irk->addr.type), irk->val,
7146 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7148 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7150 hci_dev_unlock(hdev);
7155 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7157 if (key->initiator != 0x00 && key->initiator != 0x01)
7160 switch (key->addr.type) {
7161 case BDADDR_LE_PUBLIC:
7164 case BDADDR_LE_RANDOM:
7165 /* Two most significant bits shall be set */
7166 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7174 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7175 void *cp_data, u16 len)
7177 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7178 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7179 sizeof(struct mgmt_ltk_info));
7180 u16 key_count, expected_len;
7183 bt_dev_dbg(hdev, "sock %p", sk);
7185 if (!lmp_le_capable(hdev))
7186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7187 MGMT_STATUS_NOT_SUPPORTED);
7189 key_count = __le16_to_cpu(cp->key_count);
7190 if (key_count > max_key_count) {
7191 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7194 MGMT_STATUS_INVALID_PARAMS);
7197 expected_len = struct_size(cp, keys, key_count);
7198 if (expected_len != len) {
7199 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7202 MGMT_STATUS_INVALID_PARAMS);
7205 bt_dev_dbg(hdev, "key_count %u", key_count);
7207 for (i = 0; i < key_count; i++) {
7208 struct mgmt_ltk_info *key = &cp->keys[i];
7210 if (!ltk_is_valid(key))
7211 return mgmt_cmd_status(sk, hdev->id,
7212 MGMT_OP_LOAD_LONG_TERM_KEYS,
7213 MGMT_STATUS_INVALID_PARAMS);
7218 hci_smp_ltks_clear(hdev);
7220 for (i = 0; i < key_count; i++) {
7221 struct mgmt_ltk_info *key = &cp->keys[i];
7222 u8 type, authenticated;
7224 if (hci_is_blocked_key(hdev,
7225 HCI_BLOCKED_KEY_TYPE_LTK,
7227 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7232 switch (key->type) {
7233 case MGMT_LTK_UNAUTHENTICATED:
7234 authenticated = 0x00;
7235 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7237 case MGMT_LTK_AUTHENTICATED:
7238 authenticated = 0x01;
7239 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7241 case MGMT_LTK_P256_UNAUTH:
7242 authenticated = 0x00;
7243 type = SMP_LTK_P256;
7245 case MGMT_LTK_P256_AUTH:
7246 authenticated = 0x01;
7247 type = SMP_LTK_P256;
7249 case MGMT_LTK_P256_DEBUG:
7250 authenticated = 0x00;
7251 type = SMP_LTK_P256_DEBUG;
7257 hci_add_ltk(hdev, &key->addr.bdaddr,
7258 le_addr_type(key->addr.type), type, authenticated,
7259 key->val, key->enc_size, key->ediv, key->rand);
7262 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7265 hci_dev_unlock(hdev);
7270 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7272 struct mgmt_pending_cmd *cmd = data;
7273 struct hci_conn *conn = cmd->user_data;
7274 struct mgmt_cp_get_conn_info *cp = cmd->param;
7275 struct mgmt_rp_get_conn_info rp;
7278 bt_dev_dbg(hdev, "err %d", err);
7280 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
7282 status = mgmt_status(err);
7283 if (status == MGMT_STATUS_SUCCESS) {
7284 rp.rssi = conn->rssi;
7285 rp.tx_power = conn->tx_power;
7286 rp.max_tx_power = conn->max_tx_power;
7288 rp.rssi = HCI_RSSI_INVALID;
7289 rp.tx_power = HCI_TX_POWER_INVALID;
7290 rp.max_tx_power = HCI_TX_POWER_INVALID;
7293 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7296 mgmt_pending_free(cmd);
7299 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7301 struct mgmt_pending_cmd *cmd = data;
7302 struct mgmt_cp_get_conn_info *cp = cmd->param;
7303 struct hci_conn *conn;
7307 /* Make sure we are still connected */
7308 if (cp->addr.type == BDADDR_BREDR)
7309 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7312 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7314 if (!conn || conn->state != BT_CONNECTED)
7315 return MGMT_STATUS_NOT_CONNECTED;
7317 cmd->user_data = conn;
7318 handle = cpu_to_le16(conn->handle);
7320 /* Refresh RSSI each time */
7321 err = hci_read_rssi_sync(hdev, handle);
7323 /* For LE links TX power does not change thus we don't need to
7324 * query for it once value is known.
7326 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7327 conn->tx_power == HCI_TX_POWER_INVALID))
7328 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7330 /* Max TX power needs to be read only once per connection */
7331 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7332 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7337 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7340 struct mgmt_cp_get_conn_info *cp = data;
7341 struct mgmt_rp_get_conn_info rp;
7342 struct hci_conn *conn;
7343 unsigned long conn_info_age;
7346 bt_dev_dbg(hdev, "sock %p", sk);
7348 memset(&rp, 0, sizeof(rp));
7349 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7350 rp.addr.type = cp->addr.type;
7352 if (!bdaddr_type_is_valid(cp->addr.type))
7353 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7354 MGMT_STATUS_INVALID_PARAMS,
7359 if (!hdev_is_powered(hdev)) {
7360 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7361 MGMT_STATUS_NOT_POWERED, &rp,
7366 if (cp->addr.type == BDADDR_BREDR)
7367 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7370 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7372 if (!conn || conn->state != BT_CONNECTED) {
7373 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7374 MGMT_STATUS_NOT_CONNECTED, &rp,
7379 /* To avoid client trying to guess when to poll again for information we
7380 * calculate conn info age as random value between min/max set in hdev.
7382 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7383 hdev->conn_info_max_age - 1);
7385 /* Query controller to refresh cached values if they are too old or were
7388 if (time_after(jiffies, conn->conn_info_timestamp +
7389 msecs_to_jiffies(conn_info_age)) ||
7390 !conn->conn_info_timestamp) {
7391 struct mgmt_pending_cmd *cmd;
7393 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7398 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7399 cmd, get_conn_info_complete);
7403 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7404 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7407 mgmt_pending_free(cmd);
7412 conn->conn_info_timestamp = jiffies;
7414 /* Cache is valid, just reply with values cached in hci_conn */
7415 rp.rssi = conn->rssi;
7416 rp.tx_power = conn->tx_power;
7417 rp.max_tx_power = conn->max_tx_power;
7419 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7420 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7424 hci_dev_unlock(hdev);
7428 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7430 struct mgmt_pending_cmd *cmd = data;
7431 struct mgmt_cp_get_clock_info *cp = cmd->param;
7432 struct mgmt_rp_get_clock_info rp;
7433 struct hci_conn *conn = cmd->user_data;
7434 u8 status = mgmt_status(err);
7436 bt_dev_dbg(hdev, "err %d", err);
7438 memset(&rp, 0, sizeof(rp));
7439 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7440 rp.addr.type = cp->addr.type;
7445 rp.local_clock = cpu_to_le32(hdev->clock);
7448 rp.piconet_clock = cpu_to_le32(conn->clock);
7449 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7453 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7456 mgmt_pending_free(cmd);
7459 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7461 struct mgmt_pending_cmd *cmd = data;
7462 struct mgmt_cp_get_clock_info *cp = cmd->param;
7463 struct hci_cp_read_clock hci_cp;
7464 struct hci_conn *conn;
7466 memset(&hci_cp, 0, sizeof(hci_cp));
7467 hci_read_clock_sync(hdev, &hci_cp);
7469 /* Make sure connection still exists */
7470 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7471 if (!conn || conn->state != BT_CONNECTED)
7472 return MGMT_STATUS_NOT_CONNECTED;
7474 cmd->user_data = conn;
7475 hci_cp.handle = cpu_to_le16(conn->handle);
7476 hci_cp.which = 0x01; /* Piconet clock */
7478 return hci_read_clock_sync(hdev, &hci_cp);
7481 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7484 struct mgmt_cp_get_clock_info *cp = data;
7485 struct mgmt_rp_get_clock_info rp;
7486 struct mgmt_pending_cmd *cmd;
7487 struct hci_conn *conn;
7490 bt_dev_dbg(hdev, "sock %p", sk);
7492 memset(&rp, 0, sizeof(rp));
7493 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7494 rp.addr.type = cp->addr.type;
7496 if (cp->addr.type != BDADDR_BREDR)
7497 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7498 MGMT_STATUS_INVALID_PARAMS,
7503 if (!hdev_is_powered(hdev)) {
7504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7505 MGMT_STATUS_NOT_POWERED, &rp,
7510 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7511 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7513 if (!conn || conn->state != BT_CONNECTED) {
7514 err = mgmt_cmd_complete(sk, hdev->id,
7515 MGMT_OP_GET_CLOCK_INFO,
7516 MGMT_STATUS_NOT_CONNECTED,
7524 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7528 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7529 get_clock_info_complete);
7532 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7533 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7536 mgmt_pending_free(cmd);
7541 hci_dev_unlock(hdev);
7545 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7547 struct hci_conn *conn;
7549 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7553 if (conn->dst_type != type)
7556 if (conn->state != BT_CONNECTED)
7562 /* This function requires the caller holds hdev->lock */
7563 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7564 u8 addr_type, u8 auto_connect)
7566 struct hci_conn_params *params;
7568 params = hci_conn_params_add(hdev, addr, addr_type);
7572 if (params->auto_connect == auto_connect)
7575 list_del_init(¶ms->action);
7577 switch (auto_connect) {
7578 case HCI_AUTO_CONN_DISABLED:
7579 case HCI_AUTO_CONN_LINK_LOSS:
7580 /* If auto connect is being disabled when we're trying to
7581 * connect to device, keep connecting.
7583 if (params->explicit_connect)
7584 list_add(¶ms->action, &hdev->pend_le_conns);
7586 case HCI_AUTO_CONN_REPORT:
7587 if (params->explicit_connect)
7588 list_add(¶ms->action, &hdev->pend_le_conns);
7590 list_add(¶ms->action, &hdev->pend_le_reports);
7592 case HCI_AUTO_CONN_DIRECT:
7593 case HCI_AUTO_CONN_ALWAYS:
7594 if (!is_connected(hdev, addr, addr_type))
7595 list_add(¶ms->action, &hdev->pend_le_conns);
7599 params->auto_connect = auto_connect;
7601 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7602 addr, addr_type, auto_connect);
7607 static void device_added(struct sock *sk, struct hci_dev *hdev,
7608 bdaddr_t *bdaddr, u8 type, u8 action)
7610 struct mgmt_ev_device_added ev;
7612 bacpy(&ev.addr.bdaddr, bdaddr);
7613 ev.addr.type = type;
7616 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7619 static int add_device_sync(struct hci_dev *hdev, void *data)
7621 return hci_update_passive_scan_sync(hdev);
7624 static int add_device(struct sock *sk, struct hci_dev *hdev,
7625 void *data, u16 len)
7627 struct mgmt_cp_add_device *cp = data;
7628 u8 auto_conn, addr_type;
7629 struct hci_conn_params *params;
7631 u32 current_flags = 0;
7632 u32 supported_flags;
7634 bt_dev_dbg(hdev, "sock %p", sk);
7636 if (!bdaddr_type_is_valid(cp->addr.type) ||
7637 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7638 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7639 MGMT_STATUS_INVALID_PARAMS,
7640 &cp->addr, sizeof(cp->addr));
7642 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7643 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7644 MGMT_STATUS_INVALID_PARAMS,
7645 &cp->addr, sizeof(cp->addr));
7649 if (cp->addr.type == BDADDR_BREDR) {
7650 /* Only incoming connections action is supported for now */
7651 if (cp->action != 0x01) {
7652 err = mgmt_cmd_complete(sk, hdev->id,
7654 MGMT_STATUS_INVALID_PARAMS,
7655 &cp->addr, sizeof(cp->addr));
7659 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7665 hci_update_scan(hdev);
7670 addr_type = le_addr_type(cp->addr.type);
7672 if (cp->action == 0x02)
7673 auto_conn = HCI_AUTO_CONN_ALWAYS;
7674 else if (cp->action == 0x01)
7675 auto_conn = HCI_AUTO_CONN_DIRECT;
7677 auto_conn = HCI_AUTO_CONN_REPORT;
7679 /* Kernel internally uses conn_params with resolvable private
7680 * address, but Add Device allows only identity addresses.
7681 * Make sure it is enforced before calling
7682 * hci_conn_params_lookup.
7684 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7685 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7686 MGMT_STATUS_INVALID_PARAMS,
7687 &cp->addr, sizeof(cp->addr));
7691 /* If the connection parameters don't exist for this device,
7692 * they will be created and configured with defaults.
7694 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7696 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7697 MGMT_STATUS_FAILED, &cp->addr,
7701 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7704 current_flags = params->flags;
7707 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7712 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7713 supported_flags = hdev->conn_flags;
7714 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7715 supported_flags, current_flags);
7717 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7718 MGMT_STATUS_SUCCESS, &cp->addr,
7722 hci_dev_unlock(hdev);
7726 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7727 bdaddr_t *bdaddr, u8 type)
7729 struct mgmt_ev_device_removed ev;
7731 bacpy(&ev.addr.bdaddr, bdaddr);
7732 ev.addr.type = type;
7734 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7737 static int remove_device_sync(struct hci_dev *hdev, void *data)
7739 return hci_update_passive_scan_sync(hdev);
7742 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7743 void *data, u16 len)
7745 struct mgmt_cp_remove_device *cp = data;
7748 bt_dev_dbg(hdev, "sock %p", sk);
7752 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7753 struct hci_conn_params *params;
7756 if (!bdaddr_type_is_valid(cp->addr.type)) {
7757 err = mgmt_cmd_complete(sk, hdev->id,
7758 MGMT_OP_REMOVE_DEVICE,
7759 MGMT_STATUS_INVALID_PARAMS,
7760 &cp->addr, sizeof(cp->addr));
7764 if (cp->addr.type == BDADDR_BREDR) {
7765 err = hci_bdaddr_list_del(&hdev->accept_list,
7769 err = mgmt_cmd_complete(sk, hdev->id,
7770 MGMT_OP_REMOVE_DEVICE,
7771 MGMT_STATUS_INVALID_PARAMS,
7777 hci_update_scan(hdev);
7779 device_removed(sk, hdev, &cp->addr.bdaddr,
7784 addr_type = le_addr_type(cp->addr.type);
7786 /* Kernel internally uses conn_params with resolvable private
7787 * address, but Remove Device allows only identity addresses.
7788 * Make sure it is enforced before calling
7789 * hci_conn_params_lookup.
7791 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7792 err = mgmt_cmd_complete(sk, hdev->id,
7793 MGMT_OP_REMOVE_DEVICE,
7794 MGMT_STATUS_INVALID_PARAMS,
7795 &cp->addr, sizeof(cp->addr));
7799 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7802 err = mgmt_cmd_complete(sk, hdev->id,
7803 MGMT_OP_REMOVE_DEVICE,
7804 MGMT_STATUS_INVALID_PARAMS,
7805 &cp->addr, sizeof(cp->addr));
7809 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7810 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7811 err = mgmt_cmd_complete(sk, hdev->id,
7812 MGMT_OP_REMOVE_DEVICE,
7813 MGMT_STATUS_INVALID_PARAMS,
7814 &cp->addr, sizeof(cp->addr));
7818 list_del(¶ms->action);
7819 list_del(¶ms->list);
7822 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7824 struct hci_conn_params *p, *tmp;
7825 struct bdaddr_list *b, *btmp;
7827 if (cp->addr.type) {
7828 err = mgmt_cmd_complete(sk, hdev->id,
7829 MGMT_OP_REMOVE_DEVICE,
7830 MGMT_STATUS_INVALID_PARAMS,
7831 &cp->addr, sizeof(cp->addr));
7835 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7836 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7841 hci_update_scan(hdev);
7843 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7844 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7846 device_removed(sk, hdev, &p->addr, p->addr_type);
7847 if (p->explicit_connect) {
7848 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7851 list_del(&p->action);
7856 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7859 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7862 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7863 MGMT_STATUS_SUCCESS, &cp->addr,
7866 hci_dev_unlock(hdev);
7870 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7873 struct mgmt_cp_load_conn_param *cp = data;
7874 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7875 sizeof(struct mgmt_conn_param));
7876 u16 param_count, expected_len;
7879 if (!lmp_le_capable(hdev))
7880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7881 MGMT_STATUS_NOT_SUPPORTED);
7883 param_count = __le16_to_cpu(cp->param_count);
7884 if (param_count > max_param_count) {
7885 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7888 MGMT_STATUS_INVALID_PARAMS);
7891 expected_len = struct_size(cp, params, param_count);
7892 if (expected_len != len) {
7893 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896 MGMT_STATUS_INVALID_PARAMS);
7899 bt_dev_dbg(hdev, "param_count %u", param_count);
7903 hci_conn_params_clear_disabled(hdev);
7905 for (i = 0; i < param_count; i++) {
7906 struct mgmt_conn_param *param = &cp->params[i];
7907 struct hci_conn_params *hci_param;
7908 u16 min, max, latency, timeout;
7911 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7914 if (param->addr.type == BDADDR_LE_PUBLIC) {
7915 addr_type = ADDR_LE_DEV_PUBLIC;
7916 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7917 addr_type = ADDR_LE_DEV_RANDOM;
7919 bt_dev_err(hdev, "ignoring invalid connection parameters");
7923 min = le16_to_cpu(param->min_interval);
7924 max = le16_to_cpu(param->max_interval);
7925 latency = le16_to_cpu(param->latency);
7926 timeout = le16_to_cpu(param->timeout);
7928 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7929 min, max, latency, timeout);
7931 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7932 bt_dev_err(hdev, "ignoring invalid connection parameters");
7936 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7939 bt_dev_err(hdev, "failed to add connection parameters");
7943 hci_param->conn_min_interval = min;
7944 hci_param->conn_max_interval = max;
7945 hci_param->conn_latency = latency;
7946 hci_param->supervision_timeout = timeout;
7949 hci_dev_unlock(hdev);
7951 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7955 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7956 void *data, u16 len)
7958 struct mgmt_cp_set_external_config *cp = data;
7962 bt_dev_dbg(hdev, "sock %p", sk);
7964 if (hdev_is_powered(hdev))
7965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7966 MGMT_STATUS_REJECTED);
7968 if (cp->config != 0x00 && cp->config != 0x01)
7969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7970 MGMT_STATUS_INVALID_PARAMS);
7972 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7974 MGMT_STATUS_NOT_SUPPORTED);
7979 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7981 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7983 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7990 err = new_options(hdev, sk);
7992 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7993 mgmt_index_removed(hdev);
7995 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7996 hci_dev_set_flag(hdev, HCI_CONFIG);
7997 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7999 queue_work(hdev->req_workqueue, &hdev->power_on);
8001 set_bit(HCI_RAW, &hdev->flags);
8002 mgmt_index_added(hdev);
8007 hci_dev_unlock(hdev);
8011 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8012 void *data, u16 len)
8014 struct mgmt_cp_set_public_address *cp = data;
8018 bt_dev_dbg(hdev, "sock %p", sk);
8020 if (hdev_is_powered(hdev))
8021 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8022 MGMT_STATUS_REJECTED);
8024 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8026 MGMT_STATUS_INVALID_PARAMS);
8028 if (!hdev->set_bdaddr)
8029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8030 MGMT_STATUS_NOT_SUPPORTED);
8034 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8035 bacpy(&hdev->public_addr, &cp->bdaddr);
8037 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8044 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8045 err = new_options(hdev, sk);
8047 if (is_configured(hdev)) {
8048 mgmt_index_removed(hdev);
8050 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8052 hci_dev_set_flag(hdev, HCI_CONFIG);
8053 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8055 queue_work(hdev->req_workqueue, &hdev->power_on);
8059 hci_dev_unlock(hdev);
8063 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8066 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8067 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8068 u8 *h192, *r192, *h256, *r256;
8069 struct mgmt_pending_cmd *cmd = data;
8070 struct sk_buff *skb = cmd->skb;
8071 u8 status = mgmt_status(err);
8074 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8079 status = MGMT_STATUS_FAILED;
8080 else if (IS_ERR(skb))
8081 status = mgmt_status(PTR_ERR(skb));
8083 status = mgmt_status(skb->data[0]);
8086 bt_dev_dbg(hdev, "status %u", status);
8088 mgmt_cp = cmd->param;
8091 status = mgmt_status(status);
8098 } else if (!bredr_sc_enabled(hdev)) {
8099 struct hci_rp_read_local_oob_data *rp;
8101 if (skb->len != sizeof(*rp)) {
8102 status = MGMT_STATUS_FAILED;
8105 status = MGMT_STATUS_SUCCESS;
8106 rp = (void *)skb->data;
8108 eir_len = 5 + 18 + 18;
8115 struct hci_rp_read_local_oob_ext_data *rp;
8117 if (skb->len != sizeof(*rp)) {
8118 status = MGMT_STATUS_FAILED;
8121 status = MGMT_STATUS_SUCCESS;
8122 rp = (void *)skb->data;
8124 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8125 eir_len = 5 + 18 + 18;
8129 eir_len = 5 + 18 + 18 + 18 + 18;
8139 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8146 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8147 hdev->dev_class, 3);
8150 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8151 EIR_SSP_HASH_C192, h192, 16);
8152 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8153 EIR_SSP_RAND_R192, r192, 16);
8157 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8158 EIR_SSP_HASH_C256, h256, 16);
8159 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8160 EIR_SSP_RAND_R256, r256, 16);
8164 mgmt_rp->type = mgmt_cp->type;
8165 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8167 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8168 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8169 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8170 if (err < 0 || status)
8173 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8175 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8176 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8177 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8179 if (skb && !IS_ERR(skb))
8183 mgmt_pending_remove(cmd);
8186 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8187 struct mgmt_cp_read_local_oob_ext_data *cp)
8189 struct mgmt_pending_cmd *cmd;
8192 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8197 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8198 read_local_oob_ext_data_complete);
8201 mgmt_pending_remove(cmd);
8208 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8209 void *data, u16 data_len)
8211 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8212 struct mgmt_rp_read_local_oob_ext_data *rp;
8215 u8 status, flags, role, addr[7], hash[16], rand[16];
8218 bt_dev_dbg(hdev, "sock %p", sk);
8220 if (hdev_is_powered(hdev)) {
8222 case BIT(BDADDR_BREDR):
8223 status = mgmt_bredr_support(hdev);
8229 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8230 status = mgmt_le_support(hdev);
8234 eir_len = 9 + 3 + 18 + 18 + 3;
8237 status = MGMT_STATUS_INVALID_PARAMS;
8242 status = MGMT_STATUS_NOT_POWERED;
8246 rp_len = sizeof(*rp) + eir_len;
8247 rp = kmalloc(rp_len, GFP_ATOMIC);
8251 if (!status && !lmp_ssp_capable(hdev)) {
8252 status = MGMT_STATUS_NOT_SUPPORTED;
8263 case BIT(BDADDR_BREDR):
8264 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8265 err = read_local_ssp_oob_req(hdev, sk, cp);
8266 hci_dev_unlock(hdev);
8270 status = MGMT_STATUS_FAILED;
8273 eir_len = eir_append_data(rp->eir, eir_len,
8275 hdev->dev_class, 3);
8278 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8279 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8280 smp_generate_oob(hdev, hash, rand) < 0) {
8281 hci_dev_unlock(hdev);
8282 status = MGMT_STATUS_FAILED;
8286 /* This should return the active RPA, but since the RPA
8287 * is only programmed on demand, it is really hard to fill
8288 * this in at the moment. For now disallow retrieving
8289 * local out-of-band data when privacy is in use.
8291 * Returning the identity address will not help here since
8292 * pairing happens before the identity resolving key is
8293 * known and thus the connection establishment happens
8294 * based on the RPA and not the identity address.
8296 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8297 hci_dev_unlock(hdev);
8298 status = MGMT_STATUS_REJECTED;
8302 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8303 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8304 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8305 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8306 memcpy(addr, &hdev->static_addr, 6);
8309 memcpy(addr, &hdev->bdaddr, 6);
8313 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8314 addr, sizeof(addr));
8316 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8321 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8322 &role, sizeof(role));
8324 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8325 eir_len = eir_append_data(rp->eir, eir_len,
8327 hash, sizeof(hash));
8329 eir_len = eir_append_data(rp->eir, eir_len,
8331 rand, sizeof(rand));
8334 flags = mgmt_get_adv_discov_flags(hdev);
8336 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8337 flags |= LE_AD_NO_BREDR;
8339 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8340 &flags, sizeof(flags));
8344 hci_dev_unlock(hdev);
8346 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8348 status = MGMT_STATUS_SUCCESS;
8351 rp->type = cp->type;
8352 rp->eir_len = cpu_to_le16(eir_len);
8354 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8355 status, rp, sizeof(*rp) + eir_len);
8356 if (err < 0 || status)
8359 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8360 rp, sizeof(*rp) + eir_len,
8361 HCI_MGMT_OOB_DATA_EVENTS, sk);
8369 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8373 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8374 flags |= MGMT_ADV_FLAG_DISCOV;
8375 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8376 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8377 flags |= MGMT_ADV_FLAG_APPEARANCE;
8378 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8379 flags |= MGMT_ADV_PARAM_DURATION;
8380 flags |= MGMT_ADV_PARAM_TIMEOUT;
8381 flags |= MGMT_ADV_PARAM_INTERVALS;
8382 flags |= MGMT_ADV_PARAM_TX_POWER;
8383 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8385 /* In extended adv TX_POWER returned from Set Adv Param
8386 * will be always valid.
8388 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8389 flags |= MGMT_ADV_FLAG_TX_POWER;
8391 if (ext_adv_capable(hdev)) {
8392 flags |= MGMT_ADV_FLAG_SEC_1M;
8393 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8394 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8396 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8397 flags |= MGMT_ADV_FLAG_SEC_2M;
8399 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8400 flags |= MGMT_ADV_FLAG_SEC_CODED;
8406 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8407 void *data, u16 data_len)
8409 struct mgmt_rp_read_adv_features *rp;
8412 struct adv_info *adv_instance;
8413 u32 supported_flags;
8416 bt_dev_dbg(hdev, "sock %p", sk);
8418 if (!lmp_le_capable(hdev))
8419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8420 MGMT_STATUS_REJECTED);
8424 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8425 rp = kmalloc(rp_len, GFP_ATOMIC);
8427 hci_dev_unlock(hdev);
8431 supported_flags = get_supported_adv_flags(hdev);
8433 rp->supported_flags = cpu_to_le32(supported_flags);
8434 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8435 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8436 rp->max_instances = hdev->le_num_of_adv_sets;
8437 rp->num_instances = hdev->adv_instance_cnt;
8439 instance = rp->instance;
8440 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8441 /* Only instances 1-le_num_of_adv_sets are externally visible */
8442 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8443 *instance = adv_instance->instance;
8446 rp->num_instances--;
8451 hci_dev_unlock(hdev);
8453 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8454 MGMT_STATUS_SUCCESS, rp, rp_len);
8461 static u8 calculate_name_len(struct hci_dev *hdev)
8463 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8465 return eir_append_local_name(hdev, buf, 0);
8468 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8471 u8 max_len = HCI_MAX_AD_LENGTH;
8474 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8475 MGMT_ADV_FLAG_LIMITED_DISCOV |
8476 MGMT_ADV_FLAG_MANAGED_FLAGS))
8479 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8482 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8483 max_len -= calculate_name_len(hdev);
8485 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8492 static bool flags_managed(u32 adv_flags)
8494 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8495 MGMT_ADV_FLAG_LIMITED_DISCOV |
8496 MGMT_ADV_FLAG_MANAGED_FLAGS);
8499 static bool tx_power_managed(u32 adv_flags)
8501 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8504 static bool name_managed(u32 adv_flags)
8506 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8509 static bool appearance_managed(u32 adv_flags)
8511 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8514 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8515 u8 len, bool is_adv_data)
8520 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8525 /* Make sure that the data is correctly formatted. */
8526 for (i = 0; i < len; i += (cur_len + 1)) {
8532 if (data[i + 1] == EIR_FLAGS &&
8533 (!is_adv_data || flags_managed(adv_flags)))
8536 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8539 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8542 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8545 if (data[i + 1] == EIR_APPEARANCE &&
8546 appearance_managed(adv_flags))
8549 /* If the current field length would exceed the total data
8550 * length, then it's invalid.
8552 if (i + cur_len >= len)
8559 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8561 u32 supported_flags, phy_flags;
8563 /* The current implementation only supports a subset of the specified
8564 * flags. Also need to check mutual exclusiveness of sec flags.
8566 supported_flags = get_supported_adv_flags(hdev);
8567 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8568 if (adv_flags & ~supported_flags ||
8569 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8575 static bool adv_busy(struct hci_dev *hdev)
8577 return pending_find(MGMT_OP_SET_LE, hdev);
8580 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8583 struct adv_info *adv, *n;
8585 bt_dev_dbg(hdev, "err %d", err);
8589 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8596 adv->pending = false;
8600 instance = adv->instance;
8602 if (hdev->cur_adv_instance == instance)
8603 cancel_adv_timeout(hdev);
8605 hci_remove_adv_instance(hdev, instance);
8606 mgmt_advertising_removed(sk, hdev, instance);
8609 hci_dev_unlock(hdev);
8612 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8614 struct mgmt_pending_cmd *cmd = data;
8615 struct mgmt_cp_add_advertising *cp = cmd->param;
8616 struct mgmt_rp_add_advertising rp;
8618 memset(&rp, 0, sizeof(rp));
8620 rp.instance = cp->instance;
8623 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8626 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8627 mgmt_status(err), &rp, sizeof(rp));
8629 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8631 mgmt_pending_free(cmd);
8634 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8636 struct mgmt_pending_cmd *cmd = data;
8637 struct mgmt_cp_add_advertising *cp = cmd->param;
8639 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8642 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8643 void *data, u16 data_len)
8645 struct mgmt_cp_add_advertising *cp = data;
8646 struct mgmt_rp_add_advertising rp;
8649 u16 timeout, duration;
8650 unsigned int prev_instance_cnt;
8651 u8 schedule_instance = 0;
8652 struct adv_info *adv, *next_instance;
8654 struct mgmt_pending_cmd *cmd;
8656 bt_dev_dbg(hdev, "sock %p", sk);
8658 status = mgmt_le_support(hdev);
8660 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8663 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8664 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8665 MGMT_STATUS_INVALID_PARAMS);
8667 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8669 MGMT_STATUS_INVALID_PARAMS);
8671 flags = __le32_to_cpu(cp->flags);
8672 timeout = __le16_to_cpu(cp->timeout);
8673 duration = __le16_to_cpu(cp->duration);
8675 if (!requested_adv_flags_are_valid(hdev, flags))
8676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8677 MGMT_STATUS_INVALID_PARAMS);
8681 if (timeout && !hdev_is_powered(hdev)) {
8682 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8683 MGMT_STATUS_REJECTED);
8687 if (adv_busy(hdev)) {
8688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8693 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8694 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8695 cp->scan_rsp_len, false)) {
8696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8697 MGMT_STATUS_INVALID_PARAMS);
8701 prev_instance_cnt = hdev->adv_instance_cnt;
8703 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8704 cp->adv_data_len, cp->data,
8706 cp->data + cp->adv_data_len,
8708 HCI_ADV_TX_POWER_NO_PREFERENCE,
8709 hdev->le_adv_min_interval,
8710 hdev->le_adv_max_interval, 0);
8712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8713 MGMT_STATUS_FAILED);
8717 /* Only trigger an advertising added event if a new instance was
8720 if (hdev->adv_instance_cnt > prev_instance_cnt)
8721 mgmt_advertising_added(sk, hdev, cp->instance);
8723 if (hdev->cur_adv_instance == cp->instance) {
8724 /* If the currently advertised instance is being changed then
8725 * cancel the current advertising and schedule the next
8726 * instance. If there is only one instance then the overridden
8727 * advertising data will be visible right away.
8729 cancel_adv_timeout(hdev);
8731 next_instance = hci_get_next_instance(hdev, cp->instance);
8733 schedule_instance = next_instance->instance;
8734 } else if (!hdev->adv_instance_timeout) {
8735 /* Immediately advertise the new instance if no other
8736 * instance is currently being advertised.
8738 schedule_instance = cp->instance;
8741 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8742 * there is no instance to be advertised then we have no HCI
8743 * communication to make. Simply return.
8745 if (!hdev_is_powered(hdev) ||
8746 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8747 !schedule_instance) {
8748 rp.instance = cp->instance;
8749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8750 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8754 /* We're good to go, update advertising data, parameters, and start
8757 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8764 cp->instance = schedule_instance;
8766 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8767 add_advertising_complete);
8769 mgmt_pending_free(cmd);
8772 hci_dev_unlock(hdev);
8777 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8780 struct mgmt_pending_cmd *cmd = data;
8781 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8782 struct mgmt_rp_add_ext_adv_params rp;
8783 struct adv_info *adv;
8786 BT_DBG("%s", hdev->name);
8790 adv = hci_find_adv_instance(hdev, cp->instance);
8794 rp.instance = cp->instance;
8795 rp.tx_power = adv->tx_power;
8797 /* While we're at it, inform userspace of the available space for this
8798 * advertisement, given the flags that will be used.
8800 flags = __le32_to_cpu(cp->flags);
8801 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8802 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8805 /* If this advertisement was previously advertising and we
8806 * failed to update it, we signal that it has been removed and
8807 * delete its structure
8810 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8812 hci_remove_adv_instance(hdev, cp->instance);
8814 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8817 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8818 mgmt_status(err), &rp, sizeof(rp));
8823 mgmt_pending_free(cmd);
8825 hci_dev_unlock(hdev);
8828 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8830 struct mgmt_pending_cmd *cmd = data;
8831 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8833 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8836 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8837 void *data, u16 data_len)
8839 struct mgmt_cp_add_ext_adv_params *cp = data;
8840 struct mgmt_rp_add_ext_adv_params rp;
8841 struct mgmt_pending_cmd *cmd = NULL;
8842 struct adv_info *adv;
8843 u32 flags, min_interval, max_interval;
8844 u16 timeout, duration;
8849 BT_DBG("%s", hdev->name);
8851 status = mgmt_le_support(hdev);
8853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8856 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 MGMT_STATUS_INVALID_PARAMS);
8860 /* The purpose of breaking add_advertising into two separate MGMT calls
8861 * for params and data is to allow more parameters to be added to this
8862 * structure in the future. For this reason, we verify that we have the
8863 * bare minimum structure we know of when the interface was defined. Any
8864 * extra parameters we don't know about will be ignored in this request.
8866 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8868 MGMT_STATUS_INVALID_PARAMS);
8870 flags = __le32_to_cpu(cp->flags);
8872 if (!requested_adv_flags_are_valid(hdev, flags))
8873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8874 MGMT_STATUS_INVALID_PARAMS);
8878 /* In new interface, we require that we are powered to register */
8879 if (!hdev_is_powered(hdev)) {
8880 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8881 MGMT_STATUS_REJECTED);
8885 if (adv_busy(hdev)) {
8886 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8891 /* Parse defined parameters from request, use defaults otherwise */
8892 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8893 __le16_to_cpu(cp->timeout) : 0;
8895 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8896 __le16_to_cpu(cp->duration) :
8897 hdev->def_multi_adv_rotation_duration;
8899 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8900 __le32_to_cpu(cp->min_interval) :
8901 hdev->le_adv_min_interval;
8903 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8904 __le32_to_cpu(cp->max_interval) :
8905 hdev->le_adv_max_interval;
8907 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8909 HCI_ADV_TX_POWER_NO_PREFERENCE;
8911 /* Create advertising instance with no advertising or response data */
8912 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8913 timeout, duration, tx_power, min_interval,
8917 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8918 MGMT_STATUS_FAILED);
8922 /* Submit request for advertising params if ext adv available */
8923 if (ext_adv_capable(hdev)) {
8924 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8928 hci_remove_adv_instance(hdev, cp->instance);
8932 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8933 add_ext_adv_params_complete);
8935 mgmt_pending_free(cmd);
8937 rp.instance = cp->instance;
8938 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8939 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8940 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8941 err = mgmt_cmd_complete(sk, hdev->id,
8942 MGMT_OP_ADD_EXT_ADV_PARAMS,
8943 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8947 hci_dev_unlock(hdev);
8952 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8954 struct mgmt_pending_cmd *cmd = data;
8955 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8956 struct mgmt_rp_add_advertising rp;
8958 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8960 memset(&rp, 0, sizeof(rp));
8962 rp.instance = cp->instance;
8965 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8968 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8969 mgmt_status(err), &rp, sizeof(rp));
8971 mgmt_pending_free(cmd);
8974 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8976 struct mgmt_pending_cmd *cmd = data;
8977 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8980 if (ext_adv_capable(hdev)) {
8981 err = hci_update_adv_data_sync(hdev, cp->instance);
8985 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8989 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8992 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8995 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8998 struct mgmt_cp_add_ext_adv_data *cp = data;
8999 struct mgmt_rp_add_ext_adv_data rp;
9000 u8 schedule_instance = 0;
9001 struct adv_info *next_instance;
9002 struct adv_info *adv_instance;
9004 struct mgmt_pending_cmd *cmd;
9006 BT_DBG("%s", hdev->name);
9010 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9012 if (!adv_instance) {
9013 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9014 MGMT_STATUS_INVALID_PARAMS);
9018 /* In new interface, we require that we are powered to register */
9019 if (!hdev_is_powered(hdev)) {
9020 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9021 MGMT_STATUS_REJECTED);
9022 goto clear_new_instance;
9025 if (adv_busy(hdev)) {
9026 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9028 goto clear_new_instance;
9031 /* Validate new data */
9032 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9033 cp->adv_data_len, true) ||
9034 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9035 cp->adv_data_len, cp->scan_rsp_len, false)) {
9036 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9037 MGMT_STATUS_INVALID_PARAMS);
9038 goto clear_new_instance;
9041 /* Set the data in the advertising instance */
9042 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9043 cp->data, cp->scan_rsp_len,
9044 cp->data + cp->adv_data_len);
9046 /* If using software rotation, determine next instance to use */
9047 if (hdev->cur_adv_instance == cp->instance) {
9048 /* If the currently advertised instance is being changed
9049 * then cancel the current advertising and schedule the
9050 * next instance. If there is only one instance then the
9051 * overridden advertising data will be visible right
9054 cancel_adv_timeout(hdev);
9056 next_instance = hci_get_next_instance(hdev, cp->instance);
9058 schedule_instance = next_instance->instance;
9059 } else if (!hdev->adv_instance_timeout) {
9060 /* Immediately advertise the new instance if no other
9061 * instance is currently being advertised.
9063 schedule_instance = cp->instance;
9066 /* If the HCI_ADVERTISING flag is set or there is no instance to
9067 * be advertised then we have no HCI communication to make.
9070 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9071 if (adv_instance->pending) {
9072 mgmt_advertising_added(sk, hdev, cp->instance);
9073 adv_instance->pending = false;
9075 rp.instance = cp->instance;
9076 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9077 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9081 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9085 goto clear_new_instance;
9088 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9089 add_ext_adv_data_complete);
9091 mgmt_pending_free(cmd);
9092 goto clear_new_instance;
9095 /* We were successful in updating data, so trigger advertising_added
9096 * event if this is an instance that wasn't previously advertising. If
9097 * a failure occurs in the requests we initiated, we will remove the
9098 * instance again in add_advertising_complete
9100 if (adv_instance->pending)
9101 mgmt_advertising_added(sk, hdev, cp->instance);
9106 hci_remove_adv_instance(hdev, cp->instance);
9109 hci_dev_unlock(hdev);
9114 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9117 struct mgmt_pending_cmd *cmd = data;
9118 struct mgmt_cp_remove_advertising *cp = cmd->param;
9119 struct mgmt_rp_remove_advertising rp;
9121 bt_dev_dbg(hdev, "err %d", err);
9123 memset(&rp, 0, sizeof(rp));
9124 rp.instance = cp->instance;
9127 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9130 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9131 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9133 mgmt_pending_free(cmd);
9136 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9138 struct mgmt_pending_cmd *cmd = data;
9139 struct mgmt_cp_remove_advertising *cp = cmd->param;
9142 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9146 if (list_empty(&hdev->adv_instances))
9147 err = hci_disable_advertising_sync(hdev);
9152 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9153 void *data, u16 data_len)
9155 struct mgmt_cp_remove_advertising *cp = data;
9156 struct mgmt_pending_cmd *cmd;
9159 bt_dev_dbg(hdev, "sock %p", sk);
9163 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9164 err = mgmt_cmd_status(sk, hdev->id,
9165 MGMT_OP_REMOVE_ADVERTISING,
9166 MGMT_STATUS_INVALID_PARAMS);
9170 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9171 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9176 if (list_empty(&hdev->adv_instances)) {
9177 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9178 MGMT_STATUS_INVALID_PARAMS);
9182 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9189 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9190 remove_advertising_complete);
9192 mgmt_pending_free(cmd);
9195 hci_dev_unlock(hdev);
9200 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9201 void *data, u16 data_len)
9203 struct mgmt_cp_get_adv_size_info *cp = data;
9204 struct mgmt_rp_get_adv_size_info rp;
9205 u32 flags, supported_flags;
9207 bt_dev_dbg(hdev, "sock %p", sk);
9209 if (!lmp_le_capable(hdev))
9210 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9211 MGMT_STATUS_REJECTED);
9213 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9215 MGMT_STATUS_INVALID_PARAMS);
9217 flags = __le32_to_cpu(cp->flags);
9219 /* The current implementation only supports a subset of the specified
9222 supported_flags = get_supported_adv_flags(hdev);
9223 if (flags & ~supported_flags)
9224 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9225 MGMT_STATUS_INVALID_PARAMS);
9227 rp.instance = cp->instance;
9228 rp.flags = cp->flags;
9229 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9230 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9233 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9236 static const struct hci_mgmt_handler mgmt_handlers[] = {
9237 { NULL }, /* 0x0000 (no command) */
9238 { read_version, MGMT_READ_VERSION_SIZE,
9240 HCI_MGMT_UNTRUSTED },
9241 { read_commands, MGMT_READ_COMMANDS_SIZE,
9243 HCI_MGMT_UNTRUSTED },
9244 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9246 HCI_MGMT_UNTRUSTED },
9247 { read_controller_info, MGMT_READ_INFO_SIZE,
9248 HCI_MGMT_UNTRUSTED },
9249 { set_powered, MGMT_SETTING_SIZE },
9250 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9251 { set_connectable, MGMT_SETTING_SIZE },
9252 { set_fast_connectable, MGMT_SETTING_SIZE },
9253 { set_bondable, MGMT_SETTING_SIZE },
9254 { set_link_security, MGMT_SETTING_SIZE },
9255 { set_ssp, MGMT_SETTING_SIZE },
9256 { set_hs, MGMT_SETTING_SIZE },
9257 { set_le, MGMT_SETTING_SIZE },
9258 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9259 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9260 { add_uuid, MGMT_ADD_UUID_SIZE },
9261 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9262 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9264 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9266 { disconnect, MGMT_DISCONNECT_SIZE },
9267 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9268 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9269 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9270 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9271 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9272 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9273 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9274 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9275 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9276 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9277 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9278 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9279 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9281 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9282 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9283 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9284 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9285 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9286 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9287 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9288 { set_advertising, MGMT_SETTING_SIZE },
9289 { set_bredr, MGMT_SETTING_SIZE },
9290 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9291 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9292 { set_secure_conn, MGMT_SETTING_SIZE },
9293 { set_debug_keys, MGMT_SETTING_SIZE },
9294 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9295 { load_irks, MGMT_LOAD_IRKS_SIZE,
9297 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9298 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9299 { add_device, MGMT_ADD_DEVICE_SIZE },
9300 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9301 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9303 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9305 HCI_MGMT_UNTRUSTED },
9306 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9307 HCI_MGMT_UNCONFIGURED |
9308 HCI_MGMT_UNTRUSTED },
9309 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9310 HCI_MGMT_UNCONFIGURED },
9311 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9312 HCI_MGMT_UNCONFIGURED },
9313 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9315 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9316 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9318 HCI_MGMT_UNTRUSTED },
9319 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9320 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9322 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9323 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9324 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9325 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9326 HCI_MGMT_UNTRUSTED },
9327 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9328 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9329 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9330 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9332 { set_wideband_speech, MGMT_SETTING_SIZE },
9333 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9334 HCI_MGMT_UNTRUSTED },
9335 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9336 HCI_MGMT_UNTRUSTED |
9337 HCI_MGMT_HDEV_OPTIONAL },
9338 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9340 HCI_MGMT_HDEV_OPTIONAL },
9341 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9342 HCI_MGMT_UNTRUSTED },
9343 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9345 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9346 HCI_MGMT_UNTRUSTED },
9347 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9349 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9350 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9351 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9352 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9354 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9355 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9357 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9359 { add_adv_patterns_monitor_rssi,
9360 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9362 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9364 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9365 { mesh_send, MGMT_MESH_SEND_SIZE,
9367 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9370 void mgmt_index_added(struct hci_dev *hdev)
9372 struct mgmt_ev_ext_index ev;
9374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9377 switch (hdev->dev_type) {
9379 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9380 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9381 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9384 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9385 HCI_MGMT_INDEX_EVENTS);
9398 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9399 HCI_MGMT_EXT_INDEX_EVENTS);
9402 void mgmt_index_removed(struct hci_dev *hdev)
9404 struct mgmt_ev_ext_index ev;
9405 u8 status = MGMT_STATUS_INVALID_INDEX;
9407 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9410 switch (hdev->dev_type) {
9412 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9414 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9415 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9416 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9419 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9420 HCI_MGMT_INDEX_EVENTS);
9433 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9434 HCI_MGMT_EXT_INDEX_EVENTS);
9436 /* Cancel any remaining timed work */
9437 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9439 cancel_delayed_work_sync(&hdev->discov_off);
9440 cancel_delayed_work_sync(&hdev->service_cache);
9441 cancel_delayed_work_sync(&hdev->rpa_expired);
9444 void mgmt_power_on(struct hci_dev *hdev, int err)
9446 struct cmd_lookup match = { NULL, hdev };
9448 bt_dev_dbg(hdev, "err %d", err);
9453 restart_le_actions(hdev);
9454 hci_update_passive_scan(hdev);
9457 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9459 new_settings(hdev, match.sk);
9464 hci_dev_unlock(hdev);
9467 void __mgmt_power_off(struct hci_dev *hdev)
9469 struct cmd_lookup match = { NULL, hdev };
9470 u8 status, zero_cod[] = { 0, 0, 0 };
9472 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9474 /* If the power off is because of hdev unregistration let
9475 * use the appropriate INVALID_INDEX status. Otherwise use
9476 * NOT_POWERED. We cover both scenarios here since later in
9477 * mgmt_index_removed() any hci_conn callbacks will have already
9478 * been triggered, potentially causing misleading DISCONNECTED
9481 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9482 status = MGMT_STATUS_INVALID_INDEX;
9484 status = MGMT_STATUS_NOT_POWERED;
9486 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9488 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9489 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9490 zero_cod, sizeof(zero_cod),
9491 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9492 ext_info_changed(hdev, NULL);
9495 new_settings(hdev, match.sk);
9501 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9503 struct mgmt_pending_cmd *cmd;
9506 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9510 if (err == -ERFKILL)
9511 status = MGMT_STATUS_RFKILLED;
9513 status = MGMT_STATUS_FAILED;
9515 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9517 mgmt_pending_remove(cmd);
9520 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9523 struct mgmt_ev_new_link_key ev;
9525 memset(&ev, 0, sizeof(ev));
9527 ev.store_hint = persistent;
9528 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9529 ev.key.addr.type = BDADDR_BREDR;
9530 ev.key.type = key->type;
9531 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9532 ev.key.pin_len = key->pin_len;
9534 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9537 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9539 switch (ltk->type) {
9541 case SMP_LTK_RESPONDER:
9542 if (ltk->authenticated)
9543 return MGMT_LTK_AUTHENTICATED;
9544 return MGMT_LTK_UNAUTHENTICATED;
9546 if (ltk->authenticated)
9547 return MGMT_LTK_P256_AUTH;
9548 return MGMT_LTK_P256_UNAUTH;
9549 case SMP_LTK_P256_DEBUG:
9550 return MGMT_LTK_P256_DEBUG;
9553 return MGMT_LTK_UNAUTHENTICATED;
9556 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9558 struct mgmt_ev_new_long_term_key ev;
9560 memset(&ev, 0, sizeof(ev));
9562 /* Devices using resolvable or non-resolvable random addresses
9563 * without providing an identity resolving key don't require
9564 * to store long term keys. Their addresses will change the
9567 * Only when a remote device provides an identity address
9568 * make sure the long term key is stored. If the remote
9569 * identity is known, the long term keys are internally
9570 * mapped to the identity address. So allow static random
9571 * and public addresses here.
9573 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9574 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9575 ev.store_hint = 0x00;
9577 ev.store_hint = persistent;
9579 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9580 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9581 ev.key.type = mgmt_ltk_type(key);
9582 ev.key.enc_size = key->enc_size;
9583 ev.key.ediv = key->ediv;
9584 ev.key.rand = key->rand;
9586 if (key->type == SMP_LTK)
9587 ev.key.initiator = 1;
9589 /* Make sure we copy only the significant bytes based on the
9590 * encryption key size, and set the rest of the value to zeroes.
9592 memcpy(ev.key.val, key->val, key->enc_size);
9593 memset(ev.key.val + key->enc_size, 0,
9594 sizeof(ev.key.val) - key->enc_size);
9596 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9599 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9601 struct mgmt_ev_new_irk ev;
9603 memset(&ev, 0, sizeof(ev));
9605 ev.store_hint = persistent;
9607 bacpy(&ev.rpa, &irk->rpa);
9608 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9609 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9610 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9612 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9615 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9618 struct mgmt_ev_new_csrk ev;
9620 memset(&ev, 0, sizeof(ev));
9622 /* Devices using resolvable or non-resolvable random addresses
9623 * without providing an identity resolving key don't require
9624 * to store signature resolving keys. Their addresses will change
9625 * the next time around.
9627 * Only when a remote device provides an identity address
9628 * make sure the signature resolving key is stored. So allow
9629 * static random and public addresses here.
9631 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9632 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9633 ev.store_hint = 0x00;
9635 ev.store_hint = persistent;
9637 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9638 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9639 ev.key.type = csrk->type;
9640 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9642 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9645 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9646 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9647 u16 max_interval, u16 latency, u16 timeout)
9649 struct mgmt_ev_new_conn_param ev;
9651 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9654 memset(&ev, 0, sizeof(ev));
9655 bacpy(&ev.addr.bdaddr, bdaddr);
9656 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9657 ev.store_hint = store_hint;
9658 ev.min_interval = cpu_to_le16(min_interval);
9659 ev.max_interval = cpu_to_le16(max_interval);
9660 ev.latency = cpu_to_le16(latency);
9661 ev.timeout = cpu_to_le16(timeout);
9663 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9666 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9667 u8 *name, u8 name_len)
9669 struct sk_buff *skb;
9670 struct mgmt_ev_device_connected *ev;
9674 /* allocate buff for LE or BR/EDR adv */
9675 if (conn->le_adv_data_len > 0)
9676 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9677 sizeof(*ev) + conn->le_adv_data_len);
9679 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9680 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9681 eir_precalc_len(sizeof(conn->dev_class)));
9683 ev = skb_put(skb, sizeof(*ev));
9684 bacpy(&ev->addr.bdaddr, &conn->dst);
9685 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9688 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9690 ev->flags = __cpu_to_le32(flags);
9692 /* We must ensure that the EIR Data fields are ordered and
9693 * unique. Keep it simple for now and avoid the problem by not
9694 * adding any BR/EDR data to the LE adv.
9696 if (conn->le_adv_data_len > 0) {
9697 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9698 eir_len = conn->le_adv_data_len;
9701 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9703 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9704 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9705 conn->dev_class, sizeof(conn->dev_class));
9708 ev->eir_len = cpu_to_le16(eir_len);
9710 mgmt_event_skb(skb, NULL);
9713 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9715 struct sock **sk = data;
9717 cmd->cmd_complete(cmd, 0);
9722 mgmt_pending_remove(cmd);
9725 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9727 struct hci_dev *hdev = data;
9728 struct mgmt_cp_unpair_device *cp = cmd->param;
9730 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9732 cmd->cmd_complete(cmd, 0);
9733 mgmt_pending_remove(cmd);
9736 bool mgmt_powering_down(struct hci_dev *hdev)
9738 struct mgmt_pending_cmd *cmd;
9739 struct mgmt_mode *cp;
9741 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9752 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9753 u8 link_type, u8 addr_type, u8 reason,
9754 bool mgmt_connected)
9756 struct mgmt_ev_device_disconnected ev;
9757 struct sock *sk = NULL;
9759 /* The connection is still in hci_conn_hash so test for 1
9760 * instead of 0 to know if this is the last one.
9762 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9763 cancel_delayed_work(&hdev->power_off);
9764 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9767 if (!mgmt_connected)
9770 if (link_type != ACL_LINK && link_type != LE_LINK)
9773 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9775 bacpy(&ev.addr.bdaddr, bdaddr);
9776 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9779 /* Report disconnects due to suspend */
9780 if (hdev->suspended)
9781 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9783 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9788 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9792 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9793 u8 link_type, u8 addr_type, u8 status)
9795 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9796 struct mgmt_cp_disconnect *cp;
9797 struct mgmt_pending_cmd *cmd;
9799 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9802 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9808 if (bacmp(bdaddr, &cp->addr.bdaddr))
9811 if (cp->addr.type != bdaddr_type)
9814 cmd->cmd_complete(cmd, mgmt_status(status));
9815 mgmt_pending_remove(cmd);
9818 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9819 u8 addr_type, u8 status)
9821 struct mgmt_ev_connect_failed ev;
9823 /* The connection is still in hci_conn_hash so test for 1
9824 * instead of 0 to know if this is the last one.
9826 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9827 cancel_delayed_work(&hdev->power_off);
9828 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9831 bacpy(&ev.addr.bdaddr, bdaddr);
9832 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9833 ev.status = mgmt_status(status);
9835 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9838 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9840 struct mgmt_ev_pin_code_request ev;
9842 bacpy(&ev.addr.bdaddr, bdaddr);
9843 ev.addr.type = BDADDR_BREDR;
9846 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9849 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9852 struct mgmt_pending_cmd *cmd;
9854 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9858 cmd->cmd_complete(cmd, mgmt_status(status));
9859 mgmt_pending_remove(cmd);
9862 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 struct mgmt_pending_cmd *cmd;
9867 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9871 cmd->cmd_complete(cmd, mgmt_status(status));
9872 mgmt_pending_remove(cmd);
9875 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9876 u8 link_type, u8 addr_type, u32 value,
9879 struct mgmt_ev_user_confirm_request ev;
9881 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9883 bacpy(&ev.addr.bdaddr, bdaddr);
9884 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9885 ev.confirm_hint = confirm_hint;
9886 ev.value = cpu_to_le32(value);
9888 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9892 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9893 u8 link_type, u8 addr_type)
9895 struct mgmt_ev_user_passkey_request ev;
9897 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9899 bacpy(&ev.addr.bdaddr, bdaddr);
9900 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9902 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9906 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9907 u8 link_type, u8 addr_type, u8 status,
9910 struct mgmt_pending_cmd *cmd;
9912 cmd = pending_find(opcode, hdev);
9916 cmd->cmd_complete(cmd, mgmt_status(status));
9917 mgmt_pending_remove(cmd);
9922 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9923 u8 link_type, u8 addr_type, u8 status)
9925 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9926 status, MGMT_OP_USER_CONFIRM_REPLY);
9929 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9930 u8 link_type, u8 addr_type, u8 status)
9932 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9934 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9937 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9938 u8 link_type, u8 addr_type, u8 status)
9940 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9941 status, MGMT_OP_USER_PASSKEY_REPLY);
9944 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9945 u8 link_type, u8 addr_type, u8 status)
9947 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9949 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9952 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9953 u8 link_type, u8 addr_type, u32 passkey,
9956 struct mgmt_ev_passkey_notify ev;
9958 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9960 bacpy(&ev.addr.bdaddr, bdaddr);
9961 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9962 ev.passkey = __cpu_to_le32(passkey);
9963 ev.entered = entered;
9965 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9968 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9970 struct mgmt_ev_auth_failed ev;
9971 struct mgmt_pending_cmd *cmd;
9972 u8 status = mgmt_status(hci_status);
9974 bacpy(&ev.addr.bdaddr, &conn->dst);
9975 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9978 cmd = find_pairing(conn);
9980 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9981 cmd ? cmd->sk : NULL);
9984 cmd->cmd_complete(cmd, status);
9985 mgmt_pending_remove(cmd);
9989 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9991 struct cmd_lookup match = { NULL, hdev };
9995 u8 mgmt_err = mgmt_status(status);
9996 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9997 cmd_status_rsp, &mgmt_err);
10001 if (test_bit(HCI_AUTH, &hdev->flags))
10002 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10004 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10006 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10010 new_settings(hdev, match.sk);
10013 sock_put(match.sk);
10016 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10018 struct cmd_lookup *match = data;
10020 if (match->sk == NULL) {
10021 match->sk = cmd->sk;
10022 sock_hold(match->sk);
10026 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10029 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10031 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10032 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10033 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10036 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10037 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10038 ext_info_changed(hdev, NULL);
10042 sock_put(match.sk);
10045 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10047 struct mgmt_cp_set_local_name ev;
10048 struct mgmt_pending_cmd *cmd;
10053 memset(&ev, 0, sizeof(ev));
10054 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10055 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10057 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10059 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10061 /* If this is a HCI command related to powering on the
10062 * HCI dev don't send any mgmt signals.
10064 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10068 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10069 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10070 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10073 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10077 for (i = 0; i < uuid_count; i++) {
10078 if (!memcmp(uuid, uuids[i], 16))
10085 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10089 while (parsed < eir_len) {
10090 u8 field_len = eir[0];
10094 if (field_len == 0)
10097 if (eir_len - parsed < field_len + 1)
10101 case EIR_UUID16_ALL:
10102 case EIR_UUID16_SOME:
10103 for (i = 0; i + 3 <= field_len; i += 2) {
10104 memcpy(uuid, bluetooth_base_uuid, 16);
10105 uuid[13] = eir[i + 3];
10106 uuid[12] = eir[i + 2];
10107 if (has_uuid(uuid, uuid_count, uuids))
10111 case EIR_UUID32_ALL:
10112 case EIR_UUID32_SOME:
10113 for (i = 0; i + 5 <= field_len; i += 4) {
10114 memcpy(uuid, bluetooth_base_uuid, 16);
10115 uuid[15] = eir[i + 5];
10116 uuid[14] = eir[i + 4];
10117 uuid[13] = eir[i + 3];
10118 uuid[12] = eir[i + 2];
10119 if (has_uuid(uuid, uuid_count, uuids))
10123 case EIR_UUID128_ALL:
10124 case EIR_UUID128_SOME:
10125 for (i = 0; i + 17 <= field_len; i += 16) {
10126 memcpy(uuid, eir + i + 2, 16);
10127 if (has_uuid(uuid, uuid_count, uuids))
10133 parsed += field_len + 1;
10134 eir += field_len + 1;
10140 static void restart_le_scan(struct hci_dev *hdev)
10142 /* If controller is not scanning we are done. */
10143 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10146 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10147 hdev->discovery.scan_start +
10148 hdev->discovery.scan_duration))
10151 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10152 DISCOV_LE_RESTART_DELAY);
10155 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10156 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10158 /* If a RSSI threshold has been specified, and
10159 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10160 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10161 * is set, let it through for further processing, as we might need to
10162 * restart the scan.
10164 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10165 * the results are also dropped.
10167 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10168 (rssi == HCI_RSSI_INVALID ||
10169 (rssi < hdev->discovery.rssi &&
10170 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10173 if (hdev->discovery.uuid_count != 0) {
10174 /* If a list of UUIDs is provided in filter, results with no
10175 * matching UUID should be dropped.
10177 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10178 hdev->discovery.uuids) &&
10179 !eir_has_uuids(scan_rsp, scan_rsp_len,
10180 hdev->discovery.uuid_count,
10181 hdev->discovery.uuids))
10185 /* If duplicate filtering does not report RSSI changes, then restart
10186 * scanning to ensure updated result with updated RSSI values.
10188 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10189 restart_le_scan(hdev);
10191 /* Validate RSSI value against the RSSI threshold once more. */
10192 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10193 rssi < hdev->discovery.rssi)
10200 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10201 bdaddr_t *bdaddr, u8 addr_type)
10203 struct mgmt_ev_adv_monitor_device_lost ev;
10205 ev.monitor_handle = cpu_to_le16(handle);
10206 bacpy(&ev.addr.bdaddr, bdaddr);
10207 ev.addr.type = addr_type;
10209 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10213 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10214 struct sk_buff *skb,
10215 struct sock *skip_sk,
10218 struct sk_buff *advmon_skb;
10219 size_t advmon_skb_len;
10220 __le16 *monitor_handle;
10225 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10226 sizeof(struct mgmt_ev_device_found)) + skb->len;
10227 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10232 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10233 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10234 * store monitor_handle of the matched monitor.
10236 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10237 *monitor_handle = cpu_to_le16(handle);
10238 skb_put_data(advmon_skb, skb->data, skb->len);
10240 mgmt_event_skb(advmon_skb, skip_sk);
10243 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10244 bdaddr_t *bdaddr, bool report_device,
10245 struct sk_buff *skb,
10246 struct sock *skip_sk)
10248 struct monitored_device *dev, *tmp;
10249 bool matched = false;
10250 bool notified = false;
10252 /* We have received the Advertisement Report because:
10253 * 1. the kernel has initiated active discovery
10254 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10256 * 3. if none of the above is true, we have one or more active
10257 * Advertisement Monitor
10259 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10260 * and report ONLY one advertisement per device for the matched Monitor
10261 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10263 * For case 3, since we are not active scanning and all advertisements
10264 * received are due to a matched Advertisement Monitor, report all
10265 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10267 if (report_device && !hdev->advmon_pend_notify) {
10268 mgmt_event_skb(skb, skip_sk);
10272 hdev->advmon_pend_notify = false;
10274 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10275 if (!bacmp(&dev->bdaddr, bdaddr)) {
10278 if (!dev->notified) {
10279 mgmt_send_adv_monitor_device_found(hdev, skb,
10283 dev->notified = true;
10287 if (!dev->notified)
10288 hdev->advmon_pend_notify = true;
10291 if (!report_device &&
10292 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10293 /* Handle 0 indicates that we are not active scanning and this
10294 * is a subsequent advertisement report for an already matched
10295 * Advertisement Monitor or the controller offloading support
10296 * is not available.
10298 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10302 mgmt_event_skb(skb, skip_sk);
10307 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10308 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10309 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10312 struct sk_buff *skb;
10313 struct mgmt_ev_mesh_device_found *ev;
10316 if (!hdev->mesh_ad_types[0])
10319 /* Scan for requested AD types */
10321 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10322 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10323 if (!hdev->mesh_ad_types[j])
10326 if (hdev->mesh_ad_types[j] == eir[i + 1])
10332 if (scan_rsp_len > 0) {
10333 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10334 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10335 if (!hdev->mesh_ad_types[j])
10338 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10347 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10348 sizeof(*ev) + eir_len + scan_rsp_len);
10352 ev = skb_put(skb, sizeof(*ev));
10354 bacpy(&ev->addr.bdaddr, bdaddr);
10355 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10357 ev->flags = cpu_to_le32(flags);
10358 ev->instant = cpu_to_le64(instant);
10361 /* Copy EIR or advertising data into event */
10362 skb_put_data(skb, eir, eir_len);
10364 if (scan_rsp_len > 0)
10365 /* Append scan response data to event */
10366 skb_put_data(skb, scan_rsp, scan_rsp_len);
10368 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10370 mgmt_event_skb(skb, NULL);
10373 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10374 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10375 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10378 struct sk_buff *skb;
10379 struct mgmt_ev_device_found *ev;
10380 bool report_device = hci_discovery_active(hdev);
10382 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10383 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10384 eir, eir_len, scan_rsp, scan_rsp_len,
10387 /* Don't send events for a non-kernel initiated discovery. With
10388 * LE one exception is if we have pend_le_reports > 0 in which
10389 * case we're doing passive scanning and want these events.
10391 if (!hci_discovery_active(hdev)) {
10392 if (link_type == ACL_LINK)
10394 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10395 report_device = true;
10396 else if (!hci_is_adv_monitoring(hdev))
10400 if (hdev->discovery.result_filtering) {
10401 /* We are using service discovery */
10402 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10407 if (hdev->discovery.limited) {
10408 /* Check for limited discoverable bit */
10410 if (!(dev_class[1] & 0x20))
10413 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10414 if (!flags || !(flags[0] & LE_AD_LIMITED))
10419 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10420 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10421 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10425 ev = skb_put(skb, sizeof(*ev));
10427 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10428 * RSSI value was reported as 0 when not available. This behavior
10429 * is kept when using device discovery. This is required for full
10430 * backwards compatibility with the API.
10432 * However when using service discovery, the value 127 will be
10433 * returned when the RSSI is not available.
10435 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10436 link_type == ACL_LINK)
10439 bacpy(&ev->addr.bdaddr, bdaddr);
10440 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10442 ev->flags = cpu_to_le32(flags);
10445 /* Copy EIR or advertising data into event */
10446 skb_put_data(skb, eir, eir_len);
10448 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10451 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10453 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10456 if (scan_rsp_len > 0)
10457 /* Append scan response data to event */
10458 skb_put_data(skb, scan_rsp, scan_rsp_len);
10460 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10462 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10465 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10466 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10468 struct sk_buff *skb;
10469 struct mgmt_ev_device_found *ev;
10473 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10474 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10476 ev = skb_put(skb, sizeof(*ev));
10477 bacpy(&ev->addr.bdaddr, bdaddr);
10478 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10482 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10484 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10486 ev->eir_len = cpu_to_le16(eir_len);
10487 ev->flags = cpu_to_le32(flags);
10489 mgmt_event_skb(skb, NULL);
10492 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10494 struct mgmt_ev_discovering ev;
10496 bt_dev_dbg(hdev, "discovering %u", discovering);
10498 memset(&ev, 0, sizeof(ev));
10499 ev.type = hdev->discovery.type;
10500 ev.discovering = discovering;
10502 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10505 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10507 struct mgmt_ev_controller_suspend ev;
10509 ev.suspend_state = state;
10510 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10513 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10516 struct mgmt_ev_controller_resume ev;
10518 ev.wake_reason = reason;
10520 bacpy(&ev.addr.bdaddr, bdaddr);
10521 ev.addr.type = addr_type;
10523 memset(&ev.addr, 0, sizeof(ev.addr));
10526 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10529 static struct hci_mgmt_chan chan = {
10530 .channel = HCI_CHANNEL_CONTROL,
10531 .handler_count = ARRAY_SIZE(mgmt_handlers),
10532 .handlers = mgmt_handlers,
10533 .hdev_init = mgmt_init_hdev,
10536 int mgmt_init(void)
10538 return hci_mgmt_chan_register(&chan);
10541 void mgmt_exit(void)
10543 hci_mgmt_chan_unregister(&chan);
10546 void mgmt_cleanup(struct sock *sk)
10548 struct mgmt_mesh_tx *mesh_tx;
10549 struct hci_dev *hdev;
10551 read_lock(&hci_dev_list_lock);
10553 list_for_each_entry(hdev, &hci_dev_list, list) {
10555 mesh_tx = mgmt_mesh_next(hdev, sk);
10558 mesh_send_complete(hdev, mesh_tx, true);
10562 read_unlock(&hci_dev_list_lock);