2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI connection handling. */
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "hci_request.h"
48 struct conn_handle_t {
49 struct hci_conn *conn;
53 static const struct sco_param esco_param_cvsd[] = {
54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
55 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
56 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
57 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
58 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
61 static const struct sco_param sco_param_cvsd[] = {
62 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
63 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
66 static const struct sco_param esco_param_msbc[] = {
67 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
68 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
71 /* This function requires the caller holds hdev->lock */
72 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
74 struct hci_conn_params *params;
75 struct hci_dev *hdev = conn->hdev;
81 bdaddr_type = conn->dst_type;
83 /* Check if we need to convert to identity address */
84 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
86 bdaddr = &irk->bdaddr;
87 bdaddr_type = irk->addr_type;
90 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
96 hci_conn_drop(params->conn);
97 hci_conn_put(params->conn);
101 if (!params->explicit_connect)
104 /* If the status indicates successful cancellation of
105 * the attempt (i.e. Unknown Connection Id) there's no point of
106 * notifying failure since we'll go back to keep trying to
107 * connect. The only exception is explicit connect requests
108 * where a timeout + cancel does indicate an actual failure.
110 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
111 mgmt_connect_failed(hdev, &conn->dst, conn->type,
112 conn->dst_type, status);
114 /* The connection attempt was doing scan for new RPA, and is
115 * in scan phase. If params are not associated with any other
116 * autoconnect action, remove them completely. If they are, just unmark
117 * them as waiting for connection, by clearing explicit_connect field.
119 params->explicit_connect = false;
121 list_del_init(¶ms->action);
123 switch (params->auto_connect) {
124 case HCI_AUTO_CONN_EXPLICIT:
125 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
126 /* return instead of break to avoid duplicate scan update */
128 case HCI_AUTO_CONN_DIRECT:
129 case HCI_AUTO_CONN_ALWAYS:
130 list_add(¶ms->action, &hdev->pend_le_conns);
132 case HCI_AUTO_CONN_REPORT:
133 list_add(¶ms->action, &hdev->pend_le_reports);
139 hci_update_passive_scan(hdev);
142 static void hci_conn_cleanup(struct hci_conn *conn)
144 struct hci_dev *hdev = conn->hdev;
146 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
147 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
149 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
150 hci_remove_link_key(hdev, &conn->dst);
152 hci_chan_list_flush(conn);
154 hci_conn_hash_del(hdev, conn);
159 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160 switch (conn->setting & SCO_AIRMODE_MASK) {
161 case SCO_AIRMODE_CVSD:
162 case SCO_AIRMODE_TRANSP:
164 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
169 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172 hci_conn_del_sysfs(conn);
174 debugfs_remove_recursive(conn->debugfs);
181 static void le_scan_cleanup(struct work_struct *work)
183 struct hci_conn *conn = container_of(work, struct hci_conn,
185 struct hci_dev *hdev = conn->hdev;
186 struct hci_conn *c = NULL;
188 BT_DBG("%s hcon %p", hdev->name, conn);
192 /* Check that the hci_conn is still around */
194 list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
201 hci_connect_le_scan_cleanup(conn, 0x00);
202 hci_conn_cleanup(conn);
205 hci_dev_unlock(hdev);
210 static void hci_connect_le_scan_remove(struct hci_conn *conn)
212 BT_DBG("%s hcon %p", conn->hdev->name, conn);
214 /* We can't call hci_conn_del/hci_conn_cleanup here since that
215 * could deadlock with another hci_conn_del() call that's holding
216 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
217 * Instead, grab temporary extra references to the hci_dev and
218 * hci_conn and perform the necessary cleanup in a separate work
222 hci_dev_hold(conn->hdev);
225 /* Even though we hold a reference to the hdev, many other
226 * things might get cleaned up meanwhile, including the hdev's
227 * own workqueue, so we can't use that for scheduling.
229 schedule_work(&conn->le_scan_cleanup);
232 static void hci_acl_create_connection(struct hci_conn *conn)
234 struct hci_dev *hdev = conn->hdev;
235 struct inquiry_entry *ie;
236 struct hci_cp_create_conn cp;
238 BT_DBG("hcon %p", conn);
240 /* Many controllers disallow HCI Create Connection while it is doing
241 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
242 * Connection. This may cause the MGMT discovering state to become false
243 * without user space's request but it is okay since the MGMT Discovery
244 * APIs do not promise that discovery should be done forever. Instead,
245 * the user space monitors the status of MGMT discovering and it may
246 * request for discovery again when this flag becomes false.
248 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
249 /* Put this connection to "pending" state so that it will be
250 * executed after the inquiry cancel command complete event.
252 conn->state = BT_CONNECT2;
253 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
257 conn->state = BT_CONNECT;
259 conn->role = HCI_ROLE_MASTER;
263 conn->link_policy = hdev->link_policy;
265 memset(&cp, 0, sizeof(cp));
266 bacpy(&cp.bdaddr, &conn->dst);
267 cp.pscan_rep_mode = 0x02;
269 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
271 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
272 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
273 cp.pscan_mode = ie->data.pscan_mode;
274 cp.clock_offset = ie->data.clock_offset |
278 memcpy(conn->dev_class, ie->data.dev_class, 3);
281 cp.pkt_type = cpu_to_le16(conn->pkt_type);
282 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
283 cp.role_switch = 0x01;
285 cp.role_switch = 0x00;
287 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
290 int hci_disconnect(struct hci_conn *conn, __u8 reason)
292 BT_DBG("hcon %p", conn);
294 /* When we are central of an established connection and it enters
295 * the disconnect timeout, then go ahead and try to read the
296 * current clock offset. Processing of the result is done
297 * within the event handling and hci_clock_offset_evt function.
299 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
300 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
301 struct hci_dev *hdev = conn->hdev;
302 struct hci_cp_read_clock_offset clkoff_cp;
304 clkoff_cp.handle = cpu_to_le16(conn->handle);
305 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
309 return hci_abort_conn(conn, reason);
312 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
314 struct hci_dev *hdev = conn->hdev;
315 struct hci_cp_add_sco cp;
317 BT_DBG("hcon %p", conn);
319 conn->state = BT_CONNECT;
324 cp.handle = cpu_to_le16(handle);
325 cp.pkt_type = cpu_to_le16(conn->pkt_type);
327 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
330 static bool find_next_esco_param(struct hci_conn *conn,
331 const struct sco_param *esco_param, int size)
336 for (; conn->attempt <= size; conn->attempt++) {
337 if (lmp_esco_2m_capable(conn->parent) ||
338 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
340 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
341 conn, conn->attempt);
344 return conn->attempt <= size;
347 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
350 __u8 vnd_len, *vnd_data = NULL;
351 struct hci_op_configure_data_path *cmd = NULL;
353 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
358 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
364 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
368 cmd->vnd_len = vnd_len;
369 memcpy(cmd->vnd_data, vnd_data, vnd_len);
371 cmd->direction = 0x00;
372 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
373 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
375 cmd->direction = 0x01;
376 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
377 sizeof(*cmd) + vnd_len, cmd,
386 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
388 struct conn_handle_t *conn_handle = data;
389 struct hci_conn *conn = conn_handle->conn;
390 __u16 handle = conn_handle->handle;
391 struct hci_cp_enhanced_setup_sync_conn cp;
392 const struct sco_param *param;
396 bt_dev_dbg(hdev, "hcon %p", conn);
398 /* for offload use case, codec needs to configured before opening SCO */
399 if (conn->codec.data_path)
400 configure_datapath_sync(hdev, &conn->codec);
402 conn->state = BT_CONNECT;
407 memset(&cp, 0x00, sizeof(cp));
409 cp.handle = cpu_to_le16(handle);
411 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
412 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
414 switch (conn->codec.id) {
416 if (!find_next_esco_param(conn, esco_param_msbc,
417 ARRAY_SIZE(esco_param_msbc)))
420 param = &esco_param_msbc[conn->attempt - 1];
421 cp.tx_coding_format.id = 0x05;
422 cp.rx_coding_format.id = 0x05;
423 cp.tx_codec_frame_size = __cpu_to_le16(60);
424 cp.rx_codec_frame_size = __cpu_to_le16(60);
425 cp.in_bandwidth = __cpu_to_le32(32000);
426 cp.out_bandwidth = __cpu_to_le32(32000);
427 cp.in_coding_format.id = 0x04;
428 cp.out_coding_format.id = 0x04;
429 cp.in_coded_data_size = __cpu_to_le16(16);
430 cp.out_coded_data_size = __cpu_to_le16(16);
431 cp.in_pcm_data_format = 2;
432 cp.out_pcm_data_format = 2;
433 cp.in_pcm_sample_payload_msb_pos = 0;
434 cp.out_pcm_sample_payload_msb_pos = 0;
435 cp.in_data_path = conn->codec.data_path;
436 cp.out_data_path = conn->codec.data_path;
437 cp.in_transport_unit_size = 1;
438 cp.out_transport_unit_size = 1;
441 case BT_CODEC_TRANSPARENT:
442 if (!find_next_esco_param(conn, esco_param_msbc,
443 ARRAY_SIZE(esco_param_msbc)))
445 param = &esco_param_msbc[conn->attempt - 1];
446 cp.tx_coding_format.id = 0x03;
447 cp.rx_coding_format.id = 0x03;
448 cp.tx_codec_frame_size = __cpu_to_le16(60);
449 cp.rx_codec_frame_size = __cpu_to_le16(60);
450 cp.in_bandwidth = __cpu_to_le32(0x1f40);
451 cp.out_bandwidth = __cpu_to_le32(0x1f40);
452 cp.in_coding_format.id = 0x03;
453 cp.out_coding_format.id = 0x03;
454 cp.in_coded_data_size = __cpu_to_le16(16);
455 cp.out_coded_data_size = __cpu_to_le16(16);
456 cp.in_pcm_data_format = 2;
457 cp.out_pcm_data_format = 2;
458 cp.in_pcm_sample_payload_msb_pos = 0;
459 cp.out_pcm_sample_payload_msb_pos = 0;
460 cp.in_data_path = conn->codec.data_path;
461 cp.out_data_path = conn->codec.data_path;
462 cp.in_transport_unit_size = 1;
463 cp.out_transport_unit_size = 1;
467 if (conn->parent && lmp_esco_capable(conn->parent)) {
468 if (!find_next_esco_param(conn, esco_param_cvsd,
469 ARRAY_SIZE(esco_param_cvsd)))
471 param = &esco_param_cvsd[conn->attempt - 1];
473 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
475 param = &sco_param_cvsd[conn->attempt - 1];
477 cp.tx_coding_format.id = 2;
478 cp.rx_coding_format.id = 2;
479 cp.tx_codec_frame_size = __cpu_to_le16(60);
480 cp.rx_codec_frame_size = __cpu_to_le16(60);
481 cp.in_bandwidth = __cpu_to_le32(16000);
482 cp.out_bandwidth = __cpu_to_le32(16000);
483 cp.in_coding_format.id = 4;
484 cp.out_coding_format.id = 4;
485 cp.in_coded_data_size = __cpu_to_le16(16);
486 cp.out_coded_data_size = __cpu_to_le16(16);
487 cp.in_pcm_data_format = 2;
488 cp.out_pcm_data_format = 2;
489 cp.in_pcm_sample_payload_msb_pos = 0;
490 cp.out_pcm_sample_payload_msb_pos = 0;
491 cp.in_data_path = conn->codec.data_path;
492 cp.out_data_path = conn->codec.data_path;
493 cp.in_transport_unit_size = 16;
494 cp.out_transport_unit_size = 16;
500 cp.retrans_effort = param->retrans_effort;
501 cp.pkt_type = __cpu_to_le16(param->pkt_type);
502 cp.max_latency = __cpu_to_le16(param->max_latency);
504 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
510 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
512 struct hci_dev *hdev = conn->hdev;
513 struct hci_cp_setup_sync_conn cp;
514 const struct sco_param *param;
516 bt_dev_dbg(hdev, "hcon %p", conn);
518 conn->state = BT_CONNECT;
523 cp.handle = cpu_to_le16(handle);
525 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
526 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
527 cp.voice_setting = cpu_to_le16(conn->setting);
529 switch (conn->setting & SCO_AIRMODE_MASK) {
530 case SCO_AIRMODE_TRANSP:
531 if (!find_next_esco_param(conn, esco_param_msbc,
532 ARRAY_SIZE(esco_param_msbc)))
534 param = &esco_param_msbc[conn->attempt - 1];
536 case SCO_AIRMODE_CVSD:
537 if (conn->parent && lmp_esco_capable(conn->parent)) {
538 if (!find_next_esco_param(conn, esco_param_cvsd,
539 ARRAY_SIZE(esco_param_cvsd)))
541 param = &esco_param_cvsd[conn->attempt - 1];
543 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
545 param = &sco_param_cvsd[conn->attempt - 1];
552 cp.retrans_effort = param->retrans_effort;
553 cp.pkt_type = __cpu_to_le16(param->pkt_type);
554 cp.max_latency = __cpu_to_le16(param->max_latency);
556 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
562 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
565 struct conn_handle_t *conn_handle;
567 if (enhanced_sync_conn_capable(conn->hdev)) {
568 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
573 conn_handle->conn = conn;
574 conn_handle->handle = handle;
575 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
583 return hci_setup_sync_conn(conn, handle);
586 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
589 struct hci_dev *hdev = conn->hdev;
590 struct hci_conn_params *params;
591 struct hci_cp_le_conn_update cp;
595 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
597 params->conn_min_interval = min;
598 params->conn_max_interval = max;
599 params->conn_latency = latency;
600 params->supervision_timeout = to_multiplier;
603 hci_dev_unlock(hdev);
605 memset(&cp, 0, sizeof(cp));
606 cp.handle = cpu_to_le16(conn->handle);
607 cp.conn_interval_min = cpu_to_le16(min);
608 cp.conn_interval_max = cpu_to_le16(max);
609 cp.conn_latency = cpu_to_le16(latency);
610 cp.supervision_timeout = cpu_to_le16(to_multiplier);
611 cp.min_ce_len = cpu_to_le16(0x0000);
612 cp.max_ce_len = cpu_to_le16(0x0000);
614 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
622 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
623 __u8 ltk[16], __u8 key_size)
625 struct hci_dev *hdev = conn->hdev;
626 struct hci_cp_le_start_enc cp;
628 BT_DBG("hcon %p", conn);
630 memset(&cp, 0, sizeof(cp));
632 cp.handle = cpu_to_le16(conn->handle);
635 memcpy(cp.ltk, ltk, key_size);
637 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
640 /* Device _must_ be locked */
641 void hci_sco_setup(struct hci_conn *conn, __u8 status)
643 struct hci_link *link;
645 link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
646 if (!link || !link->conn)
649 BT_DBG("hcon %p", conn);
652 if (lmp_esco_capable(conn->hdev))
653 hci_setup_sync(link->conn, conn->handle);
655 hci_add_sco(link->conn, conn->handle);
657 hci_connect_cfm(link->conn, status);
658 hci_conn_del(link->conn);
662 static void hci_conn_timeout(struct work_struct *work)
664 struct hci_conn *conn = container_of(work, struct hci_conn,
666 int refcnt = atomic_read(&conn->refcnt);
668 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
672 /* FIXME: It was observed that in pairing failed scenario, refcnt
673 * drops below 0. Probably this is because l2cap_conn_del calls
674 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
675 * dropped. After that loop hci_chan_del is called which also drops
676 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
682 /* LE connections in scanning state need special handling */
683 if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
684 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
685 hci_connect_le_scan_remove(conn);
689 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
692 /* Enter sniff mode */
693 static void hci_conn_idle(struct work_struct *work)
695 struct hci_conn *conn = container_of(work, struct hci_conn,
697 struct hci_dev *hdev = conn->hdev;
699 BT_DBG("hcon %p mode %d", conn, conn->mode);
701 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
704 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
707 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
708 struct hci_cp_sniff_subrate cp;
709 cp.handle = cpu_to_le16(conn->handle);
710 cp.max_latency = cpu_to_le16(0);
711 cp.min_remote_timeout = cpu_to_le16(0);
712 cp.min_local_timeout = cpu_to_le16(0);
713 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
716 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
717 struct hci_cp_sniff_mode cp;
718 cp.handle = cpu_to_le16(conn->handle);
719 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
720 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
721 cp.attempt = cpu_to_le16(4);
722 cp.timeout = cpu_to_le16(1);
723 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
727 static void hci_conn_auto_accept(struct work_struct *work)
729 struct hci_conn *conn = container_of(work, struct hci_conn,
730 auto_accept_work.work);
732 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
736 static void le_disable_advertising(struct hci_dev *hdev)
738 if (ext_adv_capable(hdev)) {
739 struct hci_cp_le_set_ext_adv_enable cp;
742 cp.num_of_sets = 0x00;
744 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
748 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
753 static void le_conn_timeout(struct work_struct *work)
755 struct hci_conn *conn = container_of(work, struct hci_conn,
756 le_conn_timeout.work);
757 struct hci_dev *hdev = conn->hdev;
761 /* We could end up here due to having done directed advertising,
762 * so clean up the state if necessary. This should however only
763 * happen with broken hardware or if low duty cycle was used
764 * (which doesn't have a timeout of its own).
766 if (conn->role == HCI_ROLE_SLAVE) {
767 /* Disable LE Advertising */
768 le_disable_advertising(hdev);
770 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
771 hci_dev_unlock(hdev);
775 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
778 struct iso_list_data {
790 struct hci_cp_le_set_cig_params cp;
791 struct hci_cis_params cis[0x11];
795 static void bis_list(struct hci_conn *conn, void *data)
797 struct iso_list_data *d = data;
799 /* Skip if not broadcast/ANY address */
800 if (bacmp(&conn->dst, BDADDR_ANY))
803 if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
804 d->bis != conn->iso_qos.bcast.bis)
810 static void find_bis(struct hci_conn *conn, void *data)
812 struct iso_list_data *d = data;
815 if (bacmp(&conn->dst, BDADDR_ANY))
821 static int terminate_big_sync(struct hci_dev *hdev, void *data)
823 struct iso_list_data *d = data;
825 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
827 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
829 /* Check if ISO connection is a BIS and terminate BIG if there are
830 * no other connections using it.
832 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
836 return hci_le_terminate_big_sync(hdev, d->big,
837 HCI_ERROR_LOCAL_HOST_TERM);
840 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
845 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
847 struct iso_list_data *d;
850 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
852 d = kzalloc(sizeof(*d), GFP_KERNEL);
859 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
860 terminate_big_destroy);
867 static int big_terminate_sync(struct hci_dev *hdev, void *data)
869 struct iso_list_data *d = data;
871 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
874 /* Check if ISO connection is a BIS and terminate BIG if there are
875 * no other connections using it.
877 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
881 hci_le_big_terminate_sync(hdev, d->big);
883 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
886 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
888 struct iso_list_data *d;
891 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
893 d = kzalloc(sizeof(*d), GFP_KERNEL);
898 d->sync_handle = sync_handle;
900 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
901 terminate_big_destroy);
908 /* Cleanup BIS connection
910 * Detects if there any BIS left connected in a BIG
911 * broadcaster: Remove advertising instance and terminate BIG.
912 * broadcaster receiver: Teminate BIG sync and terminate PA sync.
914 static void bis_cleanup(struct hci_conn *conn)
916 struct hci_dev *hdev = conn->hdev;
918 bt_dev_dbg(hdev, "conn %p", conn);
920 if (conn->role == HCI_ROLE_MASTER) {
921 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
924 hci_le_terminate_big(hdev, conn->iso_qos.bcast.big,
925 conn->iso_qos.bcast.bis);
927 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
932 static int remove_cig_sync(struct hci_dev *hdev, void *data)
934 u8 handle = PTR_ERR(data);
936 return hci_le_remove_cig_sync(hdev, handle);
939 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
941 bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
943 return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
946 static void find_cis(struct hci_conn *conn, void *data)
948 struct iso_list_data *d = data;
950 /* Ignore broadcast */
951 if (!bacmp(&conn->dst, BDADDR_ANY))
957 /* Cleanup CIS connection:
959 * Detects if there any CIS left connected in a CIG and remove it.
961 static void cis_cleanup(struct hci_conn *conn)
963 struct hci_dev *hdev = conn->hdev;
964 struct iso_list_data d;
966 memset(&d, 0, sizeof(d));
967 d.cig = conn->iso_qos.ucast.cig;
969 /* Check if ISO connection is a CIS and remove CIG if there are
970 * no other connections using it.
972 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
976 hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
979 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
982 struct hci_conn *conn;
984 BT_DBG("%s dst %pMR", hdev->name, dst);
986 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
990 bacpy(&conn->dst, dst);
991 bacpy(&conn->src, &hdev->bdaddr);
992 conn->handle = HCI_CONN_HANDLE_UNSET;
996 conn->mode = HCI_CM_ACTIVE;
997 conn->state = BT_OPEN;
998 conn->auth_type = HCI_AT_GENERAL_BONDING;
999 conn->io_capability = hdev->io_capability;
1000 conn->remote_auth = 0xff;
1001 conn->key_type = 0xff;
1002 conn->rssi = HCI_RSSI_INVALID;
1003 conn->tx_power = HCI_TX_POWER_INVALID;
1004 conn->max_tx_power = HCI_TX_POWER_INVALID;
1006 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
1007 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1009 /* Set Default Authenticated payload timeout to 30s */
1010 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
1012 if (conn->role == HCI_ROLE_MASTER)
1017 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1020 /* conn->src should reflect the local identity address */
1021 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1024 /* conn->src should reflect the local identity address */
1025 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1027 /* set proper cleanup function */
1028 if (!bacmp(dst, BDADDR_ANY))
1029 conn->cleanup = bis_cleanup;
1030 else if (conn->role == HCI_ROLE_MASTER)
1031 conn->cleanup = cis_cleanup;
1035 if (lmp_esco_capable(hdev))
1036 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1037 (hdev->esco_type & EDR_ESCO_MASK);
1039 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1042 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1046 skb_queue_head_init(&conn->data_q);
1048 INIT_LIST_HEAD(&conn->chan_list);
1049 INIT_LIST_HEAD(&conn->link_list);
1051 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1052 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1053 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1054 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1055 INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
1057 atomic_set(&conn->refcnt, 0);
1061 hci_conn_hash_add(hdev, conn);
1063 /* The SCO and eSCO connections will only be notified when their
1064 * setup has been completed. This is different to ACL links which
1065 * can be notified right away.
1067 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1069 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1072 hci_conn_init_sysfs(conn);
1077 static void hci_conn_unlink(struct hci_conn *conn)
1079 struct hci_dev *hdev = conn->hdev;
1081 bt_dev_dbg(hdev, "hcon %p", conn);
1083 if (!conn->parent) {
1084 struct hci_link *link, *t;
1086 list_for_each_entry_safe(link, t, &conn->link_list, list)
1087 hci_conn_unlink(link->conn);
1095 hci_conn_put(conn->parent);
1096 conn->parent = NULL;
1098 list_del_rcu(&conn->link->list);
1104 /* Due to race, SCO connection might be not established
1105 * yet at this point. Delete it now, otherwise it is
1106 * possible for it to be stuck and can't be deleted.
1108 if (conn->handle == HCI_CONN_HANDLE_UNSET)
1112 int hci_conn_del(struct hci_conn *conn)
1114 struct hci_dev *hdev = conn->hdev;
1116 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1118 cancel_delayed_work_sync(&conn->disc_work);
1119 cancel_delayed_work_sync(&conn->auto_accept_work);
1120 cancel_delayed_work_sync(&conn->idle_work);
1122 if (conn->type == ACL_LINK) {
1123 hci_conn_unlink(conn);
1124 /* Unacked frames */
1125 hdev->acl_cnt += conn->sent;
1126 } else if (conn->type == LE_LINK) {
1127 cancel_delayed_work(&conn->le_conn_timeout);
1130 hdev->le_cnt += conn->sent;
1132 hdev->acl_cnt += conn->sent;
1134 struct hci_conn *acl = conn->parent;
1137 hci_conn_unlink(conn);
1141 /* Unacked ISO frames */
1142 if (conn->type == ISO_LINK) {
1144 hdev->iso_cnt += conn->sent;
1145 else if (hdev->le_pkts)
1146 hdev->le_cnt += conn->sent;
1148 hdev->acl_cnt += conn->sent;
1153 amp_mgr_put(conn->amp_mgr);
1155 skb_queue_purge(&conn->data_q);
1157 /* Remove the connection from the list and cleanup its remaining
1158 * state. This is a separate function since for some cases like
1159 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1160 * rest of hci_conn_del.
1162 hci_conn_cleanup(conn);
1167 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1169 int use_src = bacmp(src, BDADDR_ANY);
1170 struct hci_dev *hdev = NULL, *d;
1172 BT_DBG("%pMR -> %pMR", src, dst);
1174 read_lock(&hci_dev_list_lock);
1176 list_for_each_entry(d, &hci_dev_list, list) {
1177 if (!test_bit(HCI_UP, &d->flags) ||
1178 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1179 d->dev_type != HCI_PRIMARY)
1183 * No source address - find interface with bdaddr != dst
1184 * Source address - find interface with bdaddr == src
1191 if (src_type == BDADDR_BREDR) {
1192 if (!lmp_bredr_capable(d))
1194 bacpy(&id_addr, &d->bdaddr);
1195 id_addr_type = BDADDR_BREDR;
1197 if (!lmp_le_capable(d))
1200 hci_copy_identity_address(d, &id_addr,
1203 /* Convert from HCI to three-value type */
1204 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1205 id_addr_type = BDADDR_LE_PUBLIC;
1207 id_addr_type = BDADDR_LE_RANDOM;
1210 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1214 if (bacmp(&d->bdaddr, dst)) {
1221 hdev = hci_dev_hold(hdev);
1223 read_unlock(&hci_dev_list_lock);
1226 EXPORT_SYMBOL(hci_get_route);
1228 /* This function requires the caller holds hdev->lock */
1229 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1231 struct hci_dev *hdev = conn->hdev;
1233 hci_connect_le_scan_cleanup(conn, status);
1235 /* Enable advertising in case this was a failed connection
1236 * attempt as a peripheral.
1238 hci_enable_advertising(hdev);
1241 /* This function requires the caller holds hdev->lock */
1242 void hci_conn_failed(struct hci_conn *conn, u8 status)
1244 struct hci_dev *hdev = conn->hdev;
1246 bt_dev_dbg(hdev, "status 0x%2.2x", status);
1248 switch (conn->type) {
1250 hci_le_conn_failed(conn, status);
1253 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1254 conn->dst_type, status);
1258 conn->state = BT_CLOSED;
1259 hci_connect_cfm(conn, status);
1263 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1265 struct hci_conn *conn = data;
1267 bt_dev_dbg(hdev, "err %d", err);
1272 hci_connect_le_scan_cleanup(conn, 0x00);
1276 /* Check if connection is still pending */
1277 if (conn != hci_lookup_le_connect(hdev))
1280 /* Flush to make sure we send create conn cancel command if needed */
1281 flush_delayed_work(&conn->le_conn_timeout);
1282 hci_conn_failed(conn, bt_status(err));
1285 hci_dev_unlock(hdev);
1288 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1290 struct hci_conn *conn = data;
1292 bt_dev_dbg(hdev, "conn %p", conn);
1294 return hci_le_create_conn_sync(hdev, conn);
1297 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1298 u8 dst_type, bool dst_resolved, u8 sec_level,
1299 u16 conn_timeout, u8 role)
1301 struct hci_conn *conn;
1302 struct smp_irk *irk;
1305 /* Let's make sure that le is enabled.*/
1306 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1307 if (lmp_le_capable(hdev))
1308 return ERR_PTR(-ECONNREFUSED);
1310 return ERR_PTR(-EOPNOTSUPP);
1313 /* Since the controller supports only one LE connection attempt at a
1314 * time, we return -EBUSY if there is any connection attempt running.
1316 if (hci_lookup_le_connect(hdev))
1317 return ERR_PTR(-EBUSY);
1319 /* If there's already a connection object but it's not in
1320 * scanning state it means it must already be established, in
1321 * which case we can't do anything else except report a failure
1324 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1325 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1326 return ERR_PTR(-EBUSY);
1329 /* Check if the destination address has been resolved by the controller
1330 * since if it did then the identity address shall be used.
1332 if (!dst_resolved) {
1333 /* When given an identity address with existing identity
1334 * resolving key, the connection needs to be established
1335 * to a resolvable random address.
1337 * Storing the resolvable random address is required here
1338 * to handle connection failures. The address will later
1339 * be resolved back into the original identity address
1340 * from the connect request.
1342 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1343 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1345 dst_type = ADDR_LE_DEV_RANDOM;
1350 bacpy(&conn->dst, dst);
1352 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1354 return ERR_PTR(-ENOMEM);
1355 hci_conn_hold(conn);
1356 conn->pending_sec_level = sec_level;
1359 conn->dst_type = dst_type;
1360 conn->sec_level = BT_SECURITY_LOW;
1361 conn->conn_timeout = conn_timeout;
1363 conn->state = BT_CONNECT;
1364 clear_bit(HCI_CONN_SCANNING, &conn->flags);
1366 err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1367 create_le_conn_complete);
1370 return ERR_PTR(err);
1376 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1378 struct hci_conn *conn;
1380 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1384 if (conn->state != BT_CONNECTED)
1390 /* This function requires the caller holds hdev->lock */
1391 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1392 bdaddr_t *addr, u8 addr_type)
1394 struct hci_conn_params *params;
1396 if (is_connected(hdev, addr, addr_type))
1399 params = hci_conn_params_lookup(hdev, addr, addr_type);
1401 params = hci_conn_params_add(hdev, addr, addr_type);
1405 /* If we created new params, mark them to be deleted in
1406 * hci_connect_le_scan_cleanup. It's different case than
1407 * existing disabled params, those will stay after cleanup.
1409 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1412 /* We're trying to connect, so make sure params are at pend_le_conns */
1413 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1414 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1415 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1416 list_del_init(¶ms->action);
1417 list_add(¶ms->action, &hdev->pend_le_conns);
1420 params->explicit_connect = true;
1422 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1423 params->auto_connect);
1428 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1430 struct iso_list_data data;
1432 /* Allocate a BIG if not set */
1433 if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1434 for (data.big = 0x00; data.big < 0xef; data.big++) {
1438 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1444 if (data.big == 0xef)
1445 return -EADDRNOTAVAIL;
1448 qos->bcast.big = data.big;
1454 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1456 struct iso_list_data data;
1458 /* Allocate BIS if not set */
1459 if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1460 /* Find an unused adv set to advertise BIS, skip instance 0x00
1461 * since it is reserved as general purpose set.
1463 for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1467 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1473 if (data.bis == hdev->le_num_of_adv_sets)
1474 return -EADDRNOTAVAIL;
1477 qos->bcast.bis = data.bis;
1483 /* This function requires the caller holds hdev->lock */
1484 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1485 struct bt_iso_qos *qos)
1487 struct hci_conn *conn;
1488 struct iso_list_data data;
1491 /* Let's make sure that le is enabled.*/
1492 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1493 if (lmp_le_capable(hdev))
1494 return ERR_PTR(-ECONNREFUSED);
1495 return ERR_PTR(-EOPNOTSUPP);
1498 err = qos_set_big(hdev, qos);
1500 return ERR_PTR(err);
1502 err = qos_set_bis(hdev, qos);
1504 return ERR_PTR(err);
1506 data.big = qos->bcast.big;
1507 data.bis = qos->bcast.bis;
1510 /* Check if there is already a matching BIG/BIS */
1511 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1513 return ERR_PTR(-EADDRINUSE);
1515 conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis);
1517 return ERR_PTR(-EADDRINUSE);
1519 conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1521 return ERR_PTR(-ENOMEM);
1523 set_bit(HCI_CONN_PER_ADV, &conn->flags);
1524 conn->state = BT_CONNECT;
1526 hci_conn_hold(conn);
1530 /* This function requires the caller holds hdev->lock */
1531 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1532 u8 dst_type, u8 sec_level,
1534 enum conn_reasons conn_reason)
1536 struct hci_conn *conn;
1538 /* Let's make sure that le is enabled.*/
1539 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1540 if (lmp_le_capable(hdev))
1541 return ERR_PTR(-ECONNREFUSED);
1543 return ERR_PTR(-EOPNOTSUPP);
1546 /* Some devices send ATT messages as soon as the physical link is
1547 * established. To be able to handle these ATT messages, the user-
1548 * space first establishes the connection and then starts the pairing
1551 * So if a hci_conn object already exists for the following connection
1552 * attempt, we simply update pending_sec_level and auth_type fields
1553 * and return the object found.
1555 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1557 if (conn->pending_sec_level < sec_level)
1558 conn->pending_sec_level = sec_level;
1562 BT_DBG("requesting refresh of dst_addr");
1564 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1566 return ERR_PTR(-ENOMEM);
1568 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1570 return ERR_PTR(-EBUSY);
1573 conn->state = BT_CONNECT;
1574 set_bit(HCI_CONN_SCANNING, &conn->flags);
1575 conn->dst_type = dst_type;
1576 conn->sec_level = BT_SECURITY_LOW;
1577 conn->pending_sec_level = sec_level;
1578 conn->conn_timeout = conn_timeout;
1579 conn->conn_reason = conn_reason;
1581 hci_update_passive_scan(hdev);
1584 hci_conn_hold(conn);
1588 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1589 u8 sec_level, u8 auth_type,
1590 enum conn_reasons conn_reason)
1592 struct hci_conn *acl;
1594 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1595 if (lmp_bredr_capable(hdev))
1596 return ERR_PTR(-ECONNREFUSED);
1598 return ERR_PTR(-EOPNOTSUPP);
1601 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1603 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1605 return ERR_PTR(-ENOMEM);
1610 acl->conn_reason = conn_reason;
1611 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1612 acl->sec_level = BT_SECURITY_LOW;
1613 acl->pending_sec_level = sec_level;
1614 acl->auth_type = auth_type;
1615 hci_acl_create_connection(acl);
1621 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1622 struct hci_conn *conn)
1624 struct hci_dev *hdev = parent->hdev;
1625 struct hci_link *link;
1627 bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1635 link = kzalloc(sizeof(*link), GFP_KERNEL);
1639 link->conn = hci_conn_hold(conn);
1641 conn->parent = hci_conn_get(parent);
1643 /* Use list_add_tail_rcu append to the list */
1644 list_add_tail_rcu(&link->list, &parent->link_list);
1649 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1650 __u16 setting, struct bt_codec *codec)
1652 struct hci_conn *acl;
1653 struct hci_conn *sco;
1654 struct hci_link *link;
1656 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1657 CONN_REASON_SCO_CONNECT);
1661 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1663 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1666 return ERR_PTR(-ENOMEM);
1670 link = hci_conn_link(acl, sco);
1677 sco->setting = setting;
1678 sco->codec = *codec;
1680 if (acl->state == BT_CONNECTED &&
1681 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1682 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1683 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1685 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1686 /* defer SCO setup until mode change completed */
1687 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1691 hci_sco_setup(acl, 0x00);
1697 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1699 struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1701 cis->cis_id = qos->ucast.cis;
1702 cis->c_sdu = cpu_to_le16(qos->ucast.out.sdu);
1703 cis->p_sdu = cpu_to_le16(qos->ucast.in.sdu);
1704 cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy;
1705 cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy;
1706 cis->c_rtn = qos->ucast.out.rtn;
1707 cis->p_rtn = qos->ucast.in.rtn;
1709 d->pdu.cp.num_cis++;
1712 static void cis_list(struct hci_conn *conn, void *data)
1714 struct iso_list_data *d = data;
1716 /* Skip if broadcast/ANY address */
1717 if (!bacmp(&conn->dst, BDADDR_ANY))
1720 if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1721 d->cis != conn->iso_qos.ucast.cis)
1726 if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1727 d->count >= ARRAY_SIZE(d->pdu.cis))
1730 cis_add(d, &conn->iso_qos);
1733 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1735 struct hci_dev *hdev = conn->hdev;
1736 struct hci_cp_le_create_big cp;
1738 memset(&cp, 0, sizeof(cp));
1740 cp.handle = qos->bcast.big;
1741 cp.adv_handle = qos->bcast.bis;
1743 hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1744 cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1745 cp.bis.latency = cpu_to_le16(qos->bcast.out.latency);
1746 cp.bis.rtn = qos->bcast.out.rtn;
1747 cp.bis.phy = qos->bcast.out.phy;
1748 cp.bis.packing = qos->bcast.packing;
1749 cp.bis.framing = qos->bcast.framing;
1750 cp.bis.encryption = qos->bcast.encryption;
1751 memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1753 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1756 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1758 struct hci_dev *hdev = conn->hdev;
1759 struct iso_list_data data;
1761 memset(&data, 0, sizeof(data));
1763 /* Allocate a CIG if not set */
1764 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1765 for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1769 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1774 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1775 BT_CONNECTED, &data);
1780 if (data.cig == 0xff)
1784 qos->ucast.cig = data.cig;
1787 data.pdu.cp.cig_id = qos->ucast.cig;
1788 hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval);
1789 hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval);
1790 data.pdu.cp.sca = qos->ucast.sca;
1791 data.pdu.cp.packing = qos->ucast.packing;
1792 data.pdu.cp.framing = qos->ucast.framing;
1793 data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1794 data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1796 if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1798 data.cig = qos->ucast.cig;
1799 data.cis = qos->ucast.cis;
1801 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1806 cis_add(&data, qos);
1809 /* Reprogram all CIS(s) with the same CIG */
1810 for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11;
1814 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1819 /* Allocate a CIS if not set */
1820 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) {
1822 qos->ucast.cis = data.cis;
1823 cis_add(&data, qos);
1827 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1830 if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1831 sizeof(data.pdu.cp) +
1832 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1839 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1840 __u8 dst_type, struct bt_iso_qos *qos)
1842 struct hci_conn *cis;
1844 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1847 cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1849 return ERR_PTR(-ENOMEM);
1850 cis->cleanup = cis_cleanup;
1851 cis->dst_type = dst_type;
1854 if (cis->state == BT_CONNECTED)
1857 /* Check if CIS has been set and the settings matches */
1858 if (cis->state == BT_BOUND &&
1859 !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1862 /* Update LINK PHYs according to QoS preference */
1863 cis->le_tx_phy = qos->ucast.out.phy;
1864 cis->le_rx_phy = qos->ucast.in.phy;
1866 /* If output interval is not set use the input interval as it cannot be
1869 if (!qos->ucast.out.interval)
1870 qos->ucast.out.interval = qos->ucast.in.interval;
1872 /* If input interval is not set use the output interval as it cannot be
1875 if (!qos->ucast.in.interval)
1876 qos->ucast.in.interval = qos->ucast.out.interval;
1878 /* If output latency is not set use the input latency as it cannot be
1881 if (!qos->ucast.out.latency)
1882 qos->ucast.out.latency = qos->ucast.in.latency;
1884 /* If input latency is not set use the output latency as it cannot be
1887 if (!qos->ucast.in.latency)
1888 qos->ucast.in.latency = qos->ucast.out.latency;
1890 if (!hci_le_set_cig_params(cis, qos)) {
1892 return ERR_PTR(-EINVAL);
1895 cis->iso_qos = *qos;
1896 cis->state = BT_BOUND;
1901 bool hci_iso_setup_path(struct hci_conn *conn)
1903 struct hci_dev *hdev = conn->hdev;
1904 struct hci_cp_le_setup_iso_path cmd;
1906 memset(&cmd, 0, sizeof(cmd));
1908 if (conn->iso_qos.ucast.out.sdu) {
1909 cmd.handle = cpu_to_le16(conn->handle);
1910 cmd.direction = 0x00; /* Input (Host to Controller) */
1911 cmd.path = 0x00; /* HCI path if enabled */
1912 cmd.codec = 0x03; /* Transparent Data */
1914 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1919 if (conn->iso_qos.ucast.in.sdu) {
1920 cmd.handle = cpu_to_le16(conn->handle);
1921 cmd.direction = 0x01; /* Output (Controller to Host) */
1922 cmd.path = 0x00; /* HCI path if enabled */
1923 cmd.codec = 0x03; /* Transparent Data */
1925 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1933 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1935 return hci_le_create_cis_sync(hdev, data);
1938 int hci_le_create_cis(struct hci_conn *conn)
1940 struct hci_conn *cis;
1941 struct hci_link *link, *t;
1942 struct hci_dev *hdev = conn->hdev;
1945 bt_dev_dbg(hdev, "hcon %p", conn);
1947 switch (conn->type) {
1949 if (conn->state != BT_CONNECTED || list_empty(&conn->link_list))
1954 /* hci_conn_link uses list_add_tail_rcu so the list is in
1955 * the same order as the connections are requested.
1957 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1958 if (link->conn->state == BT_BOUND) {
1959 err = hci_le_create_cis(link->conn);
1967 return cis ? 0 : -EINVAL;
1975 if (cis->state == BT_CONNECT)
1978 /* Queue Create CIS */
1979 err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1983 cis->state = BT_CONNECT;
1988 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1989 struct bt_iso_io_qos *qos, __u8 phy)
1991 /* Only set MTU if PHY is enabled */
1992 if (!qos->sdu && qos->phy) {
1993 if (hdev->iso_mtu > 0)
1994 qos->sdu = hdev->iso_mtu;
1995 else if (hdev->le_mtu > 0)
1996 qos->sdu = hdev->le_mtu;
1998 qos->sdu = hdev->acl_mtu;
2001 /* Use the same PHY as ACL if set to any */
2002 if (qos->phy == BT_ISO_PHY_ANY)
2005 /* Use LE ACL connection interval if not set */
2007 /* ACL interval unit in 1.25 ms to us */
2008 qos->interval = conn->le_conn_interval * 1250;
2010 /* Use LE ACL connection latency if not set */
2012 qos->latency = conn->le_conn_latency;
2015 static void hci_bind_bis(struct hci_conn *conn,
2016 struct bt_iso_qos *qos)
2018 /* Update LINK PHYs according to QoS preference */
2019 conn->le_tx_phy = qos->bcast.out.phy;
2020 conn->le_tx_phy = qos->bcast.out.phy;
2021 conn->iso_qos = *qos;
2022 conn->state = BT_BOUND;
2025 static int create_big_sync(struct hci_dev *hdev, void *data)
2027 struct hci_conn *conn = data;
2028 struct bt_iso_qos *qos = &conn->iso_qos;
2029 u16 interval, sync_interval = 0;
2033 if (qos->bcast.out.phy == 0x02)
2034 flags |= MGMT_ADV_FLAG_SEC_2M;
2036 /* Align intervals */
2037 interval = qos->bcast.out.interval / 1250;
2040 sync_interval = qos->bcast.sync_interval * 1600;
2042 err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2043 conn->le_per_adv_data, flags, interval,
2044 interval, sync_interval);
2048 return hci_le_create_big(conn, &conn->iso_qos);
2051 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2053 struct hci_cp_le_pa_create_sync *cp = data;
2055 bt_dev_dbg(hdev, "");
2058 bt_dev_err(hdev, "Unable to create PA: %d", err);
2063 static int create_pa_sync(struct hci_dev *hdev, void *data)
2065 struct hci_cp_le_pa_create_sync *cp = data;
2068 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2069 sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2071 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2075 return hci_update_passive_scan_sync(hdev);
2078 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2079 __u8 sid, struct bt_iso_qos *qos)
2081 struct hci_cp_le_pa_create_sync *cp;
2083 if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2086 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2088 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2092 cp->options = qos->bcast.options;
2094 cp->addr_type = dst_type;
2095 bacpy(&cp->addr, dst);
2096 cp->skip = cpu_to_le16(qos->bcast.skip);
2097 cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2098 cp->sync_cte_type = qos->bcast.sync_cte_type;
2100 /* Queue start pa_create_sync and scan */
2101 return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2104 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2105 __u16 sync_handle, __u8 num_bis, __u8 bis[])
2108 struct hci_cp_le_big_create_sync cp;
2113 if (num_bis > sizeof(pdu.bis))
2116 err = qos_set_big(hdev, qos);
2120 memset(&pdu, 0, sizeof(pdu));
2121 pdu.cp.handle = qos->bcast.big;
2122 pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2123 pdu.cp.encryption = qos->bcast.encryption;
2124 memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2125 pdu.cp.mse = qos->bcast.mse;
2126 pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2127 pdu.cp.num_bis = num_bis;
2128 memcpy(pdu.bis, bis, num_bis);
2130 return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2131 sizeof(pdu.cp) + num_bis, &pdu);
2134 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2136 struct hci_conn *conn = data;
2138 bt_dev_dbg(hdev, "conn %p", conn);
2141 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2142 hci_connect_cfm(conn, err);
2147 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2148 __u8 dst_type, struct bt_iso_qos *qos,
2149 __u8 base_len, __u8 *base)
2151 struct hci_conn *conn;
2154 /* We need hci_conn object using the BDADDR_ANY as dst */
2155 conn = hci_add_bis(hdev, dst, qos);
2159 hci_bind_bis(conn, qos);
2161 /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2162 if (base_len && base) {
2163 base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2164 0x1851, base, base_len);
2165 conn->le_per_adv_data_len = base_len;
2168 /* Queue start periodic advertising and create BIG */
2169 err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2170 create_big_complete);
2172 hci_conn_drop(conn);
2173 return ERR_PTR(err);
2176 hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2177 conn->le_tx_phy ? conn->le_tx_phy :
2178 hdev->le_tx_def_phys);
2183 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2184 __u8 dst_type, struct bt_iso_qos *qos)
2186 struct hci_conn *le;
2187 struct hci_conn *cis;
2188 struct hci_link *link;
2190 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2191 le = hci_connect_le(hdev, dst, dst_type, false,
2193 HCI_LE_CONN_TIMEOUT,
2196 le = hci_connect_le_scan(hdev, dst, dst_type,
2198 HCI_LE_CONN_TIMEOUT,
2199 CONN_REASON_ISO_CONNECT);
2203 hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2204 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2205 hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2206 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2208 cis = hci_bind_cis(hdev, dst, dst_type, qos);
2214 link = hci_conn_link(le, cis);
2221 /* If LE is already connected and CIS handle is already set proceed to
2222 * Create CIS immediately.
2224 if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2225 hci_le_create_cis(cis);
2230 /* Check link security requirement */
2231 int hci_conn_check_link_mode(struct hci_conn *conn)
2233 BT_DBG("hcon %p", conn);
2235 /* In Secure Connections Only mode, it is required that Secure
2236 * Connections is used and the link is encrypted with AES-CCM
2237 * using a P-256 authenticated combination key.
2239 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2240 if (!hci_conn_sc_enabled(conn) ||
2241 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2242 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2246 /* AES encryption is required for Level 4:
2248 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2251 * 128-bit equivalent strength for link and encryption keys
2252 * required using FIPS approved algorithms (E0 not allowed,
2253 * SAFER+ not allowed, and P-192 not allowed; encryption key
2256 if (conn->sec_level == BT_SECURITY_FIPS &&
2257 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2258 bt_dev_err(conn->hdev,
2259 "Invalid security: Missing AES-CCM usage");
2263 if (hci_conn_ssp_enabled(conn) &&
2264 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2270 /* Authenticate remote device */
2271 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2273 BT_DBG("hcon %p", conn);
2275 if (conn->pending_sec_level > sec_level)
2276 sec_level = conn->pending_sec_level;
2278 if (sec_level > conn->sec_level)
2279 conn->pending_sec_level = sec_level;
2280 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2283 /* Make sure we preserve an existing MITM requirement*/
2284 auth_type |= (conn->auth_type & 0x01);
2286 conn->auth_type = auth_type;
2288 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2289 struct hci_cp_auth_requested cp;
2291 cp.handle = cpu_to_le16(conn->handle);
2292 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2295 /* If we're already encrypted set the REAUTH_PEND flag,
2296 * otherwise set the ENCRYPT_PEND.
2298 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2299 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2301 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2307 /* Encrypt the link */
2308 static void hci_conn_encrypt(struct hci_conn *conn)
2310 BT_DBG("hcon %p", conn);
2312 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2313 struct hci_cp_set_conn_encrypt cp;
2314 cp.handle = cpu_to_le16(conn->handle);
2316 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2321 /* Enable security */
2322 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2325 BT_DBG("hcon %p", conn);
2327 if (conn->type == LE_LINK)
2328 return smp_conn_security(conn, sec_level);
2330 /* For sdp we don't need the link key. */
2331 if (sec_level == BT_SECURITY_SDP)
2334 /* For non 2.1 devices and low security level we don't need the link
2336 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2339 /* For other security levels we need the link key. */
2340 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2343 /* An authenticated FIPS approved combination key has sufficient
2344 * security for security level 4. */
2345 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2346 sec_level == BT_SECURITY_FIPS)
2349 /* An authenticated combination key has sufficient security for
2350 security level 3. */
2351 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2352 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2353 sec_level == BT_SECURITY_HIGH)
2356 /* An unauthenticated combination key has sufficient security for
2357 security level 1 and 2. */
2358 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2359 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2360 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2363 /* A combination key has always sufficient security for the security
2364 levels 1 or 2. High security level requires the combination key
2365 is generated using maximum PIN code length (16).
2366 For pre 2.1 units. */
2367 if (conn->key_type == HCI_LK_COMBINATION &&
2368 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2369 conn->pin_length == 16))
2373 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2377 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2379 if (!hci_conn_auth(conn, sec_level, auth_type))
2383 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2384 /* Ensure that the encryption key size has been read,
2385 * otherwise stall the upper layer responses.
2387 if (!conn->enc_key_size)
2390 /* Nothing else needed, all requirements are met */
2394 hci_conn_encrypt(conn);
2397 EXPORT_SYMBOL(hci_conn_security);
2399 /* Check secure link requirement */
2400 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2402 BT_DBG("hcon %p", conn);
2404 /* Accept if non-secure or higher security level is required */
2405 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2408 /* Accept if secure or higher security level is already present */
2409 if (conn->sec_level == BT_SECURITY_HIGH ||
2410 conn->sec_level == BT_SECURITY_FIPS)
2413 /* Reject not secure link */
2416 EXPORT_SYMBOL(hci_conn_check_secure);
2419 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2421 BT_DBG("hcon %p", conn);
2423 if (role == conn->role)
2426 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2427 struct hci_cp_switch_role cp;
2428 bacpy(&cp.bdaddr, &conn->dst);
2430 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2435 EXPORT_SYMBOL(hci_conn_switch_role);
2437 /* Enter active mode */
2438 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2440 struct hci_dev *hdev = conn->hdev;
2442 BT_DBG("hcon %p mode %d", conn, conn->mode);
2444 if (conn->mode != HCI_CM_SNIFF)
2447 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2450 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2451 struct hci_cp_exit_sniff_mode cp;
2452 cp.handle = cpu_to_le16(conn->handle);
2453 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2457 if (hdev->idle_timeout > 0)
2458 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2459 msecs_to_jiffies(hdev->idle_timeout));
2462 /* Drop all connection on the device */
2463 void hci_conn_hash_flush(struct hci_dev *hdev)
2465 struct hci_conn_hash *h = &hdev->conn_hash;
2466 struct hci_conn *c, *n;
2468 BT_DBG("hdev %s", hdev->name);
2470 list_for_each_entry_safe(c, n, &h->list, list) {
2471 c->state = BT_CLOSED;
2473 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
2475 /* Unlink before deleting otherwise it is possible that
2476 * hci_conn_del removes the link which may cause the list to
2477 * contain items already freed.
2484 /* Check pending connect attempts */
2485 void hci_conn_check_pending(struct hci_dev *hdev)
2487 struct hci_conn *conn;
2489 BT_DBG("hdev %s", hdev->name);
2493 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2495 hci_acl_create_connection(conn);
2497 hci_dev_unlock(hdev);
2500 static u32 get_link_mode(struct hci_conn *conn)
2504 if (conn->role == HCI_ROLE_MASTER)
2505 link_mode |= HCI_LM_MASTER;
2507 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2508 link_mode |= HCI_LM_ENCRYPT;
2510 if (test_bit(HCI_CONN_AUTH, &conn->flags))
2511 link_mode |= HCI_LM_AUTH;
2513 if (test_bit(HCI_CONN_SECURE, &conn->flags))
2514 link_mode |= HCI_LM_SECURE;
2516 if (test_bit(HCI_CONN_FIPS, &conn->flags))
2517 link_mode |= HCI_LM_FIPS;
2522 int hci_get_conn_list(void __user *arg)
2525 struct hci_conn_list_req req, *cl;
2526 struct hci_conn_info *ci;
2527 struct hci_dev *hdev;
2528 int n = 0, size, err;
2530 if (copy_from_user(&req, arg, sizeof(req)))
2533 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2536 size = sizeof(req) + req.conn_num * sizeof(*ci);
2538 cl = kmalloc(size, GFP_KERNEL);
2542 hdev = hci_dev_get(req.dev_id);
2551 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2552 bacpy(&(ci + n)->bdaddr, &c->dst);
2553 (ci + n)->handle = c->handle;
2554 (ci + n)->type = c->type;
2555 (ci + n)->out = c->out;
2556 (ci + n)->state = c->state;
2557 (ci + n)->link_mode = get_link_mode(c);
2558 if (++n >= req.conn_num)
2561 hci_dev_unlock(hdev);
2563 cl->dev_id = hdev->id;
2565 size = sizeof(req) + n * sizeof(*ci);
2569 err = copy_to_user(arg, cl, size);
2572 return err ? -EFAULT : 0;
2575 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2577 struct hci_conn_info_req req;
2578 struct hci_conn_info ci;
2579 struct hci_conn *conn;
2580 char __user *ptr = arg + sizeof(req);
2582 if (copy_from_user(&req, arg, sizeof(req)))
2586 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2588 bacpy(&ci.bdaddr, &conn->dst);
2589 ci.handle = conn->handle;
2590 ci.type = conn->type;
2592 ci.state = conn->state;
2593 ci.link_mode = get_link_mode(conn);
2595 hci_dev_unlock(hdev);
2600 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2603 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2605 struct hci_auth_info_req req;
2606 struct hci_conn *conn;
2608 if (copy_from_user(&req, arg, sizeof(req)))
2612 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2614 req.type = conn->auth_type;
2615 hci_dev_unlock(hdev);
2620 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2623 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2625 struct hci_dev *hdev = conn->hdev;
2626 struct hci_chan *chan;
2628 BT_DBG("%s hcon %p", hdev->name, conn);
2630 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2631 BT_DBG("Refusing to create new hci_chan");
2635 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2639 chan->conn = hci_conn_get(conn);
2640 skb_queue_head_init(&chan->data_q);
2641 chan->state = BT_CONNECTED;
2643 list_add_rcu(&chan->list, &conn->chan_list);
2648 void hci_chan_del(struct hci_chan *chan)
2650 struct hci_conn *conn = chan->conn;
2651 struct hci_dev *hdev = conn->hdev;
2653 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2655 list_del_rcu(&chan->list);
2659 /* Prevent new hci_chan's to be created for this hci_conn */
2660 set_bit(HCI_CONN_DROP, &conn->flags);
2664 skb_queue_purge(&chan->data_q);
2668 void hci_chan_list_flush(struct hci_conn *conn)
2670 struct hci_chan *chan, *n;
2672 BT_DBG("hcon %p", conn);
2674 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2678 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2681 struct hci_chan *hchan;
2683 list_for_each_entry(hchan, &hcon->chan_list, list) {
2684 if (hchan->handle == handle)
2691 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2693 struct hci_conn_hash *h = &hdev->conn_hash;
2694 struct hci_conn *hcon;
2695 struct hci_chan *hchan = NULL;
2699 list_for_each_entry_rcu(hcon, &h->list, list) {
2700 hchan = __hci_chan_lookup_handle(hcon, handle);
2710 u32 hci_conn_get_phy(struct hci_conn *conn)
2714 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2715 * Table 6.2: Packets defined for synchronous, asynchronous, and
2716 * CPB logical transport types.
2718 switch (conn->type) {
2720 /* SCO logical transport (1 Mb/s):
2721 * HV1, HV2, HV3 and DV.
2723 phys |= BT_PHY_BR_1M_1SLOT;
2728 /* ACL logical transport (1 Mb/s) ptt=0:
2729 * DH1, DM3, DH3, DM5 and DH5.
2731 phys |= BT_PHY_BR_1M_1SLOT;
2733 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2734 phys |= BT_PHY_BR_1M_3SLOT;
2736 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2737 phys |= BT_PHY_BR_1M_5SLOT;
2739 /* ACL logical transport (2 Mb/s) ptt=1:
2740 * 2-DH1, 2-DH3 and 2-DH5.
2742 if (!(conn->pkt_type & HCI_2DH1))
2743 phys |= BT_PHY_EDR_2M_1SLOT;
2745 if (!(conn->pkt_type & HCI_2DH3))
2746 phys |= BT_PHY_EDR_2M_3SLOT;
2748 if (!(conn->pkt_type & HCI_2DH5))
2749 phys |= BT_PHY_EDR_2M_5SLOT;
2751 /* ACL logical transport (3 Mb/s) ptt=1:
2752 * 3-DH1, 3-DH3 and 3-DH5.
2754 if (!(conn->pkt_type & HCI_3DH1))
2755 phys |= BT_PHY_EDR_3M_1SLOT;
2757 if (!(conn->pkt_type & HCI_3DH3))
2758 phys |= BT_PHY_EDR_3M_3SLOT;
2760 if (!(conn->pkt_type & HCI_3DH5))
2761 phys |= BT_PHY_EDR_3M_5SLOT;
2766 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2767 phys |= BT_PHY_BR_1M_1SLOT;
2769 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2770 phys |= BT_PHY_BR_1M_3SLOT;
2772 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2773 if (!(conn->pkt_type & ESCO_2EV3))
2774 phys |= BT_PHY_EDR_2M_1SLOT;
2776 if (!(conn->pkt_type & ESCO_2EV5))
2777 phys |= BT_PHY_EDR_2M_3SLOT;
2779 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2780 if (!(conn->pkt_type & ESCO_3EV3))
2781 phys |= BT_PHY_EDR_3M_1SLOT;
2783 if (!(conn->pkt_type & ESCO_3EV5))
2784 phys |= BT_PHY_EDR_3M_3SLOT;
2789 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2790 phys |= BT_PHY_LE_1M_TX;
2792 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2793 phys |= BT_PHY_LE_1M_RX;
2795 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2796 phys |= BT_PHY_LE_2M_TX;
2798 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2799 phys |= BT_PHY_LE_2M_RX;
2801 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2802 phys |= BT_PHY_LE_CODED_TX;
2804 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2805 phys |= BT_PHY_LE_CODED_RX;
2813 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2817 if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
2820 switch (conn->state) {
2823 if (conn->type == AMP_LINK) {
2824 struct hci_cp_disconn_phy_link cp;
2826 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2828 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
2831 struct hci_cp_disconnect dc;
2833 dc.handle = cpu_to_le16(conn->handle);
2835 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
2839 conn->state = BT_DISCONN;
2843 if (conn->type == LE_LINK) {
2844 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2846 r = hci_send_cmd(conn->hdev,
2847 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
2848 } else if (conn->type == ACL_LINK) {
2849 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
2851 r = hci_send_cmd(conn->hdev,
2852 HCI_OP_CREATE_CONN_CANCEL,
2857 if (conn->type == ACL_LINK) {
2858 struct hci_cp_reject_conn_req rej;
2860 bacpy(&rej.bdaddr, &conn->dst);
2861 rej.reason = reason;
2863 r = hci_send_cmd(conn->hdev,
2864 HCI_OP_REJECT_CONN_REQ,
2866 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2867 struct hci_cp_reject_sync_conn_req rej;
2869 bacpy(&rej.bdaddr, &conn->dst);
2871 /* SCO rejection has its own limited set of
2872 * allowed error values (0x0D-0x0F) which isn't
2873 * compatible with most values passed to this
2874 * function. To be safe hard-code one of the
2875 * values that's suitable for SCO.
2877 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2879 r = hci_send_cmd(conn->hdev,
2880 HCI_OP_REJECT_SYNC_CONN_REQ,
2885 conn->state = BT_CLOSED;