]> Git Repo - J-linux.git/blob - net/bluetooth/hci_conn.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <[email protected]>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI connection handling. */
27
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "smp.h"
38 #include "eir.h"
39
40 struct sco_param {
41         u16 pkt_type;
42         u16 max_latency;
43         u8  retrans_effort;
44 };
45
46 struct conn_handle_t {
47         struct hci_conn *conn;
48         __u16 handle;
49 };
50
51 static const struct sco_param esco_param_cvsd[] = {
52         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
53         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
54         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
55         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
56         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
57 };
58
59 static const struct sco_param sco_param_cvsd[] = {
60         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
61         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
62 };
63
64 static const struct sco_param esco_param_msbc[] = {
65         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
66         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
67 };
68
69 /* This function requires the caller holds hdev->lock */
70 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
71 {
72         struct hci_conn_params *params;
73         struct hci_dev *hdev = conn->hdev;
74         struct smp_irk *irk;
75         bdaddr_t *bdaddr;
76         u8 bdaddr_type;
77
78         bdaddr = &conn->dst;
79         bdaddr_type = conn->dst_type;
80
81         /* Check if we need to convert to identity address */
82         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
83         if (irk) {
84                 bdaddr = &irk->bdaddr;
85                 bdaddr_type = irk->addr_type;
86         }
87
88         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
89                                            bdaddr_type);
90         if (!params)
91                 return;
92
93         if (params->conn) {
94                 hci_conn_drop(params->conn);
95                 hci_conn_put(params->conn);
96                 params->conn = NULL;
97         }
98
99         if (!params->explicit_connect)
100                 return;
101
102         /* If the status indicates successful cancellation of
103          * the attempt (i.e. Unknown Connection Id) there's no point of
104          * notifying failure since we'll go back to keep trying to
105          * connect. The only exception is explicit connect requests
106          * where a timeout + cancel does indicate an actual failure.
107          */
108         if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
109                 mgmt_connect_failed(hdev, conn, status);
110
111         /* The connection attempt was doing scan for new RPA, and is
112          * in scan phase. If params are not associated with any other
113          * autoconnect action, remove them completely. If they are, just unmark
114          * them as waiting for connection, by clearing explicit_connect field.
115          */
116         params->explicit_connect = false;
117
118         hci_pend_le_list_del_init(params);
119
120         switch (params->auto_connect) {
121         case HCI_AUTO_CONN_EXPLICIT:
122                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
123                 /* return instead of break to avoid duplicate scan update */
124                 return;
125         case HCI_AUTO_CONN_DIRECT:
126         case HCI_AUTO_CONN_ALWAYS:
127                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
128                 break;
129         case HCI_AUTO_CONN_REPORT:
130                 hci_pend_le_list_add(params, &hdev->pend_le_reports);
131                 break;
132         default:
133                 break;
134         }
135
136         hci_update_passive_scan(hdev);
137 }
138
139 static void hci_conn_cleanup(struct hci_conn *conn)
140 {
141         struct hci_dev *hdev = conn->hdev;
142
143         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
144                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
145
146         if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
147                 hci_remove_link_key(hdev, &conn->dst);
148
149         hci_chan_list_flush(conn);
150
151         hci_conn_hash_del(hdev, conn);
152
153         if (HCI_CONN_HANDLE_UNSET(conn->handle))
154                 ida_free(&hdev->unset_handle_ida, conn->handle);
155
156         if (conn->cleanup)
157                 conn->cleanup(conn);
158
159         if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160                 switch (conn->setting & SCO_AIRMODE_MASK) {
161                 case SCO_AIRMODE_CVSD:
162                 case SCO_AIRMODE_TRANSP:
163                         if (hdev->notify)
164                                 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
165                         break;
166                 }
167         } else {
168                 if (hdev->notify)
169                         hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
170         }
171
172         debugfs_remove_recursive(conn->debugfs);
173
174         hci_conn_del_sysfs(conn);
175
176         hci_dev_put(hdev);
177 }
178
179 int hci_disconnect(struct hci_conn *conn, __u8 reason)
180 {
181         BT_DBG("hcon %p", conn);
182
183         /* When we are central of an established connection and it enters
184          * the disconnect timeout, then go ahead and try to read the
185          * current clock offset.  Processing of the result is done
186          * within the event handling and hci_clock_offset_evt function.
187          */
188         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
189             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
190                 struct hci_dev *hdev = conn->hdev;
191                 struct hci_cp_read_clock_offset clkoff_cp;
192
193                 clkoff_cp.handle = cpu_to_le16(conn->handle);
194                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
195                              &clkoff_cp);
196         }
197
198         return hci_abort_conn(conn, reason);
199 }
200
201 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
202 {
203         struct hci_dev *hdev = conn->hdev;
204         struct hci_cp_add_sco cp;
205
206         BT_DBG("hcon %p", conn);
207
208         conn->state = BT_CONNECT;
209         conn->out = true;
210
211         conn->attempt++;
212
213         cp.handle   = cpu_to_le16(handle);
214         cp.pkt_type = cpu_to_le16(conn->pkt_type);
215
216         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
217 }
218
219 static bool find_next_esco_param(struct hci_conn *conn,
220                                  const struct sco_param *esco_param, int size)
221 {
222         if (!conn->parent)
223                 return false;
224
225         for (; conn->attempt <= size; conn->attempt++) {
226                 if (lmp_esco_2m_capable(conn->parent) ||
227                     (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
228                         break;
229                 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
230                        conn, conn->attempt);
231         }
232
233         return conn->attempt <= size;
234 }
235
236 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
237 {
238         int err;
239         __u8 vnd_len, *vnd_data = NULL;
240         struct hci_op_configure_data_path *cmd = NULL;
241
242         /* Do not take below 2 checks as error since the 1st means user do not
243          * want to use HFP offload mode and the 2nd means the vendor controller
244          * do not need to send below HCI command for offload mode.
245          */
246         if (!codec->data_path || !hdev->get_codec_config_data)
247                 return 0;
248
249         err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
250                                           &vnd_data);
251         if (err < 0)
252                 goto error;
253
254         cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
255         if (!cmd) {
256                 err = -ENOMEM;
257                 goto error;
258         }
259
260         err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
261         if (err < 0)
262                 goto error;
263
264         cmd->vnd_len = vnd_len;
265         memcpy(cmd->vnd_data, vnd_data, vnd_len);
266
267         cmd->direction = 0x00;
268         __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
269                               sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
270
271         cmd->direction = 0x01;
272         err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
273                                     sizeof(*cmd) + vnd_len, cmd,
274                                     HCI_CMD_TIMEOUT);
275 error:
276
277         kfree(cmd);
278         kfree(vnd_data);
279         return err;
280 }
281
282 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
283 {
284         struct conn_handle_t *conn_handle = data;
285         struct hci_conn *conn = conn_handle->conn;
286         __u16 handle = conn_handle->handle;
287         struct hci_cp_enhanced_setup_sync_conn cp;
288         const struct sco_param *param;
289
290         kfree(conn_handle);
291
292         if (!hci_conn_valid(hdev, conn))
293                 return -ECANCELED;
294
295         bt_dev_dbg(hdev, "hcon %p", conn);
296
297         configure_datapath_sync(hdev, &conn->codec);
298
299         conn->state = BT_CONNECT;
300         conn->out = true;
301
302         conn->attempt++;
303
304         memset(&cp, 0x00, sizeof(cp));
305
306         cp.handle   = cpu_to_le16(handle);
307
308         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
309         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
310
311         switch (conn->codec.id) {
312         case BT_CODEC_MSBC:
313                 if (!find_next_esco_param(conn, esco_param_msbc,
314                                           ARRAY_SIZE(esco_param_msbc)))
315                         return -EINVAL;
316
317                 param = &esco_param_msbc[conn->attempt - 1];
318                 cp.tx_coding_format.id = 0x05;
319                 cp.rx_coding_format.id = 0x05;
320                 cp.tx_codec_frame_size = __cpu_to_le16(60);
321                 cp.rx_codec_frame_size = __cpu_to_le16(60);
322                 cp.in_bandwidth = __cpu_to_le32(32000);
323                 cp.out_bandwidth = __cpu_to_le32(32000);
324                 cp.in_coding_format.id = 0x04;
325                 cp.out_coding_format.id = 0x04;
326                 cp.in_coded_data_size = __cpu_to_le16(16);
327                 cp.out_coded_data_size = __cpu_to_le16(16);
328                 cp.in_pcm_data_format = 2;
329                 cp.out_pcm_data_format = 2;
330                 cp.in_pcm_sample_payload_msb_pos = 0;
331                 cp.out_pcm_sample_payload_msb_pos = 0;
332                 cp.in_data_path = conn->codec.data_path;
333                 cp.out_data_path = conn->codec.data_path;
334                 cp.in_transport_unit_size = 1;
335                 cp.out_transport_unit_size = 1;
336                 break;
337
338         case BT_CODEC_TRANSPARENT:
339                 if (!find_next_esco_param(conn, esco_param_msbc,
340                                           ARRAY_SIZE(esco_param_msbc)))
341                         return false;
342                 param = &esco_param_msbc[conn->attempt - 1];
343                 cp.tx_coding_format.id = 0x03;
344                 cp.rx_coding_format.id = 0x03;
345                 cp.tx_codec_frame_size = __cpu_to_le16(60);
346                 cp.rx_codec_frame_size = __cpu_to_le16(60);
347                 cp.in_bandwidth = __cpu_to_le32(0x1f40);
348                 cp.out_bandwidth = __cpu_to_le32(0x1f40);
349                 cp.in_coding_format.id = 0x03;
350                 cp.out_coding_format.id = 0x03;
351                 cp.in_coded_data_size = __cpu_to_le16(16);
352                 cp.out_coded_data_size = __cpu_to_le16(16);
353                 cp.in_pcm_data_format = 2;
354                 cp.out_pcm_data_format = 2;
355                 cp.in_pcm_sample_payload_msb_pos = 0;
356                 cp.out_pcm_sample_payload_msb_pos = 0;
357                 cp.in_data_path = conn->codec.data_path;
358                 cp.out_data_path = conn->codec.data_path;
359                 cp.in_transport_unit_size = 1;
360                 cp.out_transport_unit_size = 1;
361                 break;
362
363         case BT_CODEC_CVSD:
364                 if (conn->parent && lmp_esco_capable(conn->parent)) {
365                         if (!find_next_esco_param(conn, esco_param_cvsd,
366                                                   ARRAY_SIZE(esco_param_cvsd)))
367                                 return -EINVAL;
368                         param = &esco_param_cvsd[conn->attempt - 1];
369                 } else {
370                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
371                                 return -EINVAL;
372                         param = &sco_param_cvsd[conn->attempt - 1];
373                 }
374                 cp.tx_coding_format.id = 2;
375                 cp.rx_coding_format.id = 2;
376                 cp.tx_codec_frame_size = __cpu_to_le16(60);
377                 cp.rx_codec_frame_size = __cpu_to_le16(60);
378                 cp.in_bandwidth = __cpu_to_le32(16000);
379                 cp.out_bandwidth = __cpu_to_le32(16000);
380                 cp.in_coding_format.id = 4;
381                 cp.out_coding_format.id = 4;
382                 cp.in_coded_data_size = __cpu_to_le16(16);
383                 cp.out_coded_data_size = __cpu_to_le16(16);
384                 cp.in_pcm_data_format = 2;
385                 cp.out_pcm_data_format = 2;
386                 cp.in_pcm_sample_payload_msb_pos = 0;
387                 cp.out_pcm_sample_payload_msb_pos = 0;
388                 cp.in_data_path = conn->codec.data_path;
389                 cp.out_data_path = conn->codec.data_path;
390                 cp.in_transport_unit_size = 16;
391                 cp.out_transport_unit_size = 16;
392                 break;
393         default:
394                 return -EINVAL;
395         }
396
397         cp.retrans_effort = param->retrans_effort;
398         cp.pkt_type = __cpu_to_le16(param->pkt_type);
399         cp.max_latency = __cpu_to_le16(param->max_latency);
400
401         if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
402                 return -EIO;
403
404         return 0;
405 }
406
407 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
408 {
409         struct hci_dev *hdev = conn->hdev;
410         struct hci_cp_setup_sync_conn cp;
411         const struct sco_param *param;
412
413         bt_dev_dbg(hdev, "hcon %p", conn);
414
415         conn->state = BT_CONNECT;
416         conn->out = true;
417
418         conn->attempt++;
419
420         cp.handle   = cpu_to_le16(handle);
421
422         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
423         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
424         cp.voice_setting  = cpu_to_le16(conn->setting);
425
426         switch (conn->setting & SCO_AIRMODE_MASK) {
427         case SCO_AIRMODE_TRANSP:
428                 if (!find_next_esco_param(conn, esco_param_msbc,
429                                           ARRAY_SIZE(esco_param_msbc)))
430                         return false;
431                 param = &esco_param_msbc[conn->attempt - 1];
432                 break;
433         case SCO_AIRMODE_CVSD:
434                 if (conn->parent && lmp_esco_capable(conn->parent)) {
435                         if (!find_next_esco_param(conn, esco_param_cvsd,
436                                                   ARRAY_SIZE(esco_param_cvsd)))
437                                 return false;
438                         param = &esco_param_cvsd[conn->attempt - 1];
439                 } else {
440                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
441                                 return false;
442                         param = &sco_param_cvsd[conn->attempt - 1];
443                 }
444                 break;
445         default:
446                 return false;
447         }
448
449         cp.retrans_effort = param->retrans_effort;
450         cp.pkt_type = __cpu_to_le16(param->pkt_type);
451         cp.max_latency = __cpu_to_le16(param->max_latency);
452
453         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
454                 return false;
455
456         return true;
457 }
458
459 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
460 {
461         int result;
462         struct conn_handle_t *conn_handle;
463
464         if (enhanced_sync_conn_capable(conn->hdev)) {
465                 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
466
467                 if (!conn_handle)
468                         return false;
469
470                 conn_handle->conn = conn;
471                 conn_handle->handle = handle;
472                 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
473                                             conn_handle, NULL);
474                 if (result < 0)
475                         kfree(conn_handle);
476
477                 return result == 0;
478         }
479
480         return hci_setup_sync_conn(conn, handle);
481 }
482
483 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
484                       u16 to_multiplier)
485 {
486         struct hci_dev *hdev = conn->hdev;
487         struct hci_conn_params *params;
488         struct hci_cp_le_conn_update cp;
489
490         hci_dev_lock(hdev);
491
492         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
493         if (params) {
494                 params->conn_min_interval = min;
495                 params->conn_max_interval = max;
496                 params->conn_latency = latency;
497                 params->supervision_timeout = to_multiplier;
498         }
499
500         hci_dev_unlock(hdev);
501
502         memset(&cp, 0, sizeof(cp));
503         cp.handle               = cpu_to_le16(conn->handle);
504         cp.conn_interval_min    = cpu_to_le16(min);
505         cp.conn_interval_max    = cpu_to_le16(max);
506         cp.conn_latency         = cpu_to_le16(latency);
507         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
508         cp.min_ce_len           = cpu_to_le16(0x0000);
509         cp.max_ce_len           = cpu_to_le16(0x0000);
510
511         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
512
513         if (params)
514                 return 0x01;
515
516         return 0x00;
517 }
518
519 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
520                       __u8 ltk[16], __u8 key_size)
521 {
522         struct hci_dev *hdev = conn->hdev;
523         struct hci_cp_le_start_enc cp;
524
525         BT_DBG("hcon %p", conn);
526
527         memset(&cp, 0, sizeof(cp));
528
529         cp.handle = cpu_to_le16(conn->handle);
530         cp.rand = rand;
531         cp.ediv = ediv;
532         memcpy(cp.ltk, ltk, key_size);
533
534         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
535 }
536
537 /* Device _must_ be locked */
538 void hci_sco_setup(struct hci_conn *conn, __u8 status)
539 {
540         struct hci_link *link;
541
542         link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
543         if (!link || !link->conn)
544                 return;
545
546         BT_DBG("hcon %p", conn);
547
548         if (!status) {
549                 if (lmp_esco_capable(conn->hdev))
550                         hci_setup_sync(link->conn, conn->handle);
551                 else
552                         hci_add_sco(link->conn, conn->handle);
553         } else {
554                 hci_connect_cfm(link->conn, status);
555                 hci_conn_del(link->conn);
556         }
557 }
558
559 static void hci_conn_timeout(struct work_struct *work)
560 {
561         struct hci_conn *conn = container_of(work, struct hci_conn,
562                                              disc_work.work);
563         int refcnt = atomic_read(&conn->refcnt);
564
565         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
566
567         WARN_ON(refcnt < 0);
568
569         /* FIXME: It was observed that in pairing failed scenario, refcnt
570          * drops below 0. Probably this is because l2cap_conn_del calls
571          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
572          * dropped. After that loop hci_chan_del is called which also drops
573          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
574          * otherwise drop it.
575          */
576         if (refcnt > 0)
577                 return;
578
579         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
580 }
581
582 /* Enter sniff mode */
583 static void hci_conn_idle(struct work_struct *work)
584 {
585         struct hci_conn *conn = container_of(work, struct hci_conn,
586                                              idle_work.work);
587         struct hci_dev *hdev = conn->hdev;
588
589         BT_DBG("hcon %p mode %d", conn, conn->mode);
590
591         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
592                 return;
593
594         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
595                 return;
596
597         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
598                 struct hci_cp_sniff_subrate cp;
599                 cp.handle             = cpu_to_le16(conn->handle);
600                 cp.max_latency        = cpu_to_le16(0);
601                 cp.min_remote_timeout = cpu_to_le16(0);
602                 cp.min_local_timeout  = cpu_to_le16(0);
603                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
604         }
605
606         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
607                 struct hci_cp_sniff_mode cp;
608                 cp.handle       = cpu_to_le16(conn->handle);
609                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
610                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
611                 cp.attempt      = cpu_to_le16(4);
612                 cp.timeout      = cpu_to_le16(1);
613                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
614         }
615 }
616
617 static void hci_conn_auto_accept(struct work_struct *work)
618 {
619         struct hci_conn *conn = container_of(work, struct hci_conn,
620                                              auto_accept_work.work);
621
622         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
623                      &conn->dst);
624 }
625
626 static void le_disable_advertising(struct hci_dev *hdev)
627 {
628         if (ext_adv_capable(hdev)) {
629                 struct hci_cp_le_set_ext_adv_enable cp;
630
631                 cp.enable = 0x00;
632                 cp.num_of_sets = 0x00;
633
634                 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
635                              &cp);
636         } else {
637                 u8 enable = 0x00;
638                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
639                              &enable);
640         }
641 }
642
643 static void le_conn_timeout(struct work_struct *work)
644 {
645         struct hci_conn *conn = container_of(work, struct hci_conn,
646                                              le_conn_timeout.work);
647         struct hci_dev *hdev = conn->hdev;
648
649         BT_DBG("");
650
651         /* We could end up here due to having done directed advertising,
652          * so clean up the state if necessary. This should however only
653          * happen with broken hardware or if low duty cycle was used
654          * (which doesn't have a timeout of its own).
655          */
656         if (conn->role == HCI_ROLE_SLAVE) {
657                 /* Disable LE Advertising */
658                 le_disable_advertising(hdev);
659                 hci_dev_lock(hdev);
660                 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
661                 hci_dev_unlock(hdev);
662                 return;
663         }
664
665         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
666 }
667
668 struct iso_list_data {
669         union {
670                 u8  cig;
671                 u8  big;
672         };
673         union {
674                 u8  cis;
675                 u8  bis;
676                 u16 sync_handle;
677         };
678         int count;
679         bool big_term;
680         bool pa_sync_term;
681         bool big_sync_term;
682 };
683
684 static void bis_list(struct hci_conn *conn, void *data)
685 {
686         struct iso_list_data *d = data;
687
688         /* Skip if not broadcast/ANY address */
689         if (bacmp(&conn->dst, BDADDR_ANY))
690                 return;
691
692         if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
693             d->bis != conn->iso_qos.bcast.bis)
694                 return;
695
696         d->count++;
697 }
698
699 static int terminate_big_sync(struct hci_dev *hdev, void *data)
700 {
701         struct iso_list_data *d = data;
702
703         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
704
705         hci_disable_per_advertising_sync(hdev, d->bis);
706         hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
707
708         /* Only terminate BIG if it has been created */
709         if (!d->big_term)
710                 return 0;
711
712         return hci_le_terminate_big_sync(hdev, d->big,
713                                          HCI_ERROR_LOCAL_HOST_TERM);
714 }
715
716 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
717 {
718         kfree(data);
719 }
720
721 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
722 {
723         struct iso_list_data *d;
724         int ret;
725
726         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
727                    conn->iso_qos.bcast.bis);
728
729         d = kzalloc(sizeof(*d), GFP_KERNEL);
730         if (!d)
731                 return -ENOMEM;
732
733         d->big = conn->iso_qos.bcast.big;
734         d->bis = conn->iso_qos.bcast.bis;
735         d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
736
737         ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
738                                  terminate_big_destroy);
739         if (ret)
740                 kfree(d);
741
742         return ret;
743 }
744
745 static int big_terminate_sync(struct hci_dev *hdev, void *data)
746 {
747         struct iso_list_data *d = data;
748
749         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
750                    d->sync_handle);
751
752         if (d->big_sync_term)
753                 hci_le_big_terminate_sync(hdev, d->big);
754
755         if (d->pa_sync_term)
756                 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
757
758         return 0;
759 }
760
761 static void find_bis(struct hci_conn *conn, void *data)
762 {
763         struct iso_list_data *d = data;
764
765         /* Ignore if BIG doesn't match */
766         if (d->big != conn->iso_qos.bcast.big)
767                 return;
768
769         d->count++;
770 }
771
772 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
773 {
774         struct iso_list_data *d;
775         int ret;
776
777         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
778
779         d = kzalloc(sizeof(*d), GFP_KERNEL);
780         if (!d)
781                 return -ENOMEM;
782
783         d->big = big;
784         d->sync_handle = conn->sync_handle;
785
786         if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
787                 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
788                                         HCI_CONN_PA_SYNC, d);
789
790                 if (!d->count)
791                         d->pa_sync_term = true;
792
793                 d->count = 0;
794         }
795
796         if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
797                 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
798                                         HCI_CONN_BIG_SYNC, d);
799
800                 if (!d->count)
801                         d->big_sync_term = true;
802         }
803
804         ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
805                                  terminate_big_destroy);
806         if (ret)
807                 kfree(d);
808
809         return ret;
810 }
811
812 /* Cleanup BIS connection
813  *
814  * Detects if there any BIS left connected in a BIG
815  * broadcaster: Remove advertising instance and terminate BIG.
816  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
817  */
818 static void bis_cleanup(struct hci_conn *conn)
819 {
820         struct hci_dev *hdev = conn->hdev;
821         struct hci_conn *bis;
822
823         bt_dev_dbg(hdev, "conn %p", conn);
824
825         if (conn->role == HCI_ROLE_MASTER) {
826                 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
827                         return;
828
829                 /* Check if ISO connection is a BIS and terminate advertising
830                  * set and BIG if there are no other connections using it.
831                  */
832                 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
833                 if (bis)
834                         return;
835
836                 hci_le_terminate_big(hdev, conn);
837         } else {
838                 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
839                                      conn);
840         }
841 }
842
843 static int remove_cig_sync(struct hci_dev *hdev, void *data)
844 {
845         u8 handle = PTR_UINT(data);
846
847         return hci_le_remove_cig_sync(hdev, handle);
848 }
849
850 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
851 {
852         bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
853
854         return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
855                                   NULL);
856 }
857
858 static void find_cis(struct hci_conn *conn, void *data)
859 {
860         struct iso_list_data *d = data;
861
862         /* Ignore broadcast or if CIG don't match */
863         if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
864                 return;
865
866         d->count++;
867 }
868
869 /* Cleanup CIS connection:
870  *
871  * Detects if there any CIS left connected in a CIG and remove it.
872  */
873 static void cis_cleanup(struct hci_conn *conn)
874 {
875         struct hci_dev *hdev = conn->hdev;
876         struct iso_list_data d;
877
878         if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
879                 return;
880
881         memset(&d, 0, sizeof(d));
882         d.cig = conn->iso_qos.ucast.cig;
883
884         /* Check if ISO connection is a CIS and remove CIG if there are
885          * no other connections using it.
886          */
887         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
888         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
889         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
890         if (d.count)
891                 return;
892
893         hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
894 }
895
896 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
897 {
898         return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
899                                U16_MAX, GFP_ATOMIC);
900 }
901
902 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
903                                        u8 role, u16 handle)
904 {
905         struct hci_conn *conn;
906
907         switch (type) {
908         case ACL_LINK:
909                 if (!hdev->acl_mtu)
910                         return ERR_PTR(-ECONNREFUSED);
911                 break;
912         case ISO_LINK:
913                 if (hdev->iso_mtu)
914                         /* Dedicated ISO Buffer exists */
915                         break;
916                 fallthrough;
917         case LE_LINK:
918                 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
919                         return ERR_PTR(-ECONNREFUSED);
920                 if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
921                         return ERR_PTR(-ECONNREFUSED);
922                 break;
923         case SCO_LINK:
924         case ESCO_LINK:
925                 if (!hdev->sco_pkts)
926                         /* Controller does not support SCO or eSCO over HCI */
927                         return ERR_PTR(-ECONNREFUSED);
928                 break;
929         default:
930                 return ERR_PTR(-ECONNREFUSED);
931         }
932
933         bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
934
935         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
936         if (!conn)
937                 return ERR_PTR(-ENOMEM);
938
939         bacpy(&conn->dst, dst);
940         bacpy(&conn->src, &hdev->bdaddr);
941         conn->handle = handle;
942         conn->hdev  = hdev;
943         conn->type  = type;
944         conn->role  = role;
945         conn->mode  = HCI_CM_ACTIVE;
946         conn->state = BT_OPEN;
947         conn->auth_type = HCI_AT_GENERAL_BONDING;
948         conn->io_capability = hdev->io_capability;
949         conn->remote_auth = 0xff;
950         conn->key_type = 0xff;
951         conn->rssi = HCI_RSSI_INVALID;
952         conn->tx_power = HCI_TX_POWER_INVALID;
953         conn->max_tx_power = HCI_TX_POWER_INVALID;
954         conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
955         conn->sid = HCI_SID_INVALID;
956
957         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
958         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
959
960         /* Set Default Authenticated payload timeout to 30s */
961         conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
962
963         if (conn->role == HCI_ROLE_MASTER)
964                 conn->out = true;
965
966         switch (type) {
967         case ACL_LINK:
968                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
969                 conn->mtu = hdev->acl_mtu;
970                 break;
971         case LE_LINK:
972                 /* conn->src should reflect the local identity address */
973                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
974                 conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
975                 break;
976         case ISO_LINK:
977                 /* conn->src should reflect the local identity address */
978                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
979
980                 /* set proper cleanup function */
981                 if (!bacmp(dst, BDADDR_ANY))
982                         conn->cleanup = bis_cleanup;
983                 else if (conn->role == HCI_ROLE_MASTER)
984                         conn->cleanup = cis_cleanup;
985
986                 conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
987                             hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
988                 break;
989         case SCO_LINK:
990                 if (lmp_esco_capable(hdev))
991                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
992                                         (hdev->esco_type & EDR_ESCO_MASK);
993                 else
994                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
995
996                 conn->mtu = hdev->sco_mtu;
997                 break;
998         case ESCO_LINK:
999                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1000                 conn->mtu = hdev->sco_mtu;
1001                 break;
1002         }
1003
1004         skb_queue_head_init(&conn->data_q);
1005
1006         INIT_LIST_HEAD(&conn->chan_list);
1007         INIT_LIST_HEAD(&conn->link_list);
1008
1009         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1010         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1011         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1012         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1013
1014         atomic_set(&conn->refcnt, 0);
1015
1016         hci_dev_hold(hdev);
1017
1018         hci_conn_hash_add(hdev, conn);
1019
1020         /* The SCO and eSCO connections will only be notified when their
1021          * setup has been completed. This is different to ACL links which
1022          * can be notified right away.
1023          */
1024         if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1025                 if (hdev->notify)
1026                         hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1027         }
1028
1029         hci_conn_init_sysfs(conn);
1030
1031         return conn;
1032 }
1033
1034 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1035                                     bdaddr_t *dst, u8 role)
1036 {
1037         int handle;
1038
1039         bt_dev_dbg(hdev, "dst %pMR", dst);
1040
1041         handle = hci_conn_hash_alloc_unset(hdev);
1042         if (unlikely(handle < 0))
1043                 return ERR_PTR(-ECONNREFUSED);
1044
1045         return __hci_conn_add(hdev, type, dst, role, handle);
1046 }
1047
1048 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1049                               u8 role, u16 handle)
1050 {
1051         if (handle > HCI_CONN_HANDLE_MAX)
1052                 return ERR_PTR(-EINVAL);
1053
1054         return __hci_conn_add(hdev, type, dst, role, handle);
1055 }
1056
1057 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1058 {
1059         if (!reason)
1060                 reason = HCI_ERROR_REMOTE_USER_TERM;
1061
1062         /* Due to race, SCO/ISO conn might be not established yet at this point,
1063          * and nothing else will clean it up. In other cases it is done via HCI
1064          * events.
1065          */
1066         switch (conn->type) {
1067         case SCO_LINK:
1068         case ESCO_LINK:
1069                 if (HCI_CONN_HANDLE_UNSET(conn->handle))
1070                         hci_conn_failed(conn, reason);
1071                 break;
1072         case ISO_LINK:
1073                 if ((conn->state != BT_CONNECTED &&
1074                     !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
1075                     test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
1076                         hci_conn_failed(conn, reason);
1077                 break;
1078         }
1079 }
1080
1081 static void hci_conn_unlink(struct hci_conn *conn)
1082 {
1083         struct hci_dev *hdev = conn->hdev;
1084
1085         bt_dev_dbg(hdev, "hcon %p", conn);
1086
1087         if (!conn->parent) {
1088                 struct hci_link *link, *t;
1089
1090                 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1091                         struct hci_conn *child = link->conn;
1092
1093                         hci_conn_unlink(child);
1094
1095                         /* If hdev is down it means
1096                          * hci_dev_close_sync/hci_conn_hash_flush is in progress
1097                          * and links don't need to be cleanup as all connections
1098                          * would be cleanup.
1099                          */
1100                         if (!test_bit(HCI_UP, &hdev->flags))
1101                                 continue;
1102
1103                         hci_conn_cleanup_child(child, conn->abort_reason);
1104                 }
1105
1106                 return;
1107         }
1108
1109         if (!conn->link)
1110                 return;
1111
1112         list_del_rcu(&conn->link->list);
1113         synchronize_rcu();
1114
1115         hci_conn_drop(conn->parent);
1116         hci_conn_put(conn->parent);
1117         conn->parent = NULL;
1118
1119         kfree(conn->link);
1120         conn->link = NULL;
1121 }
1122
1123 void hci_conn_del(struct hci_conn *conn)
1124 {
1125         struct hci_dev *hdev = conn->hdev;
1126
1127         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1128
1129         hci_conn_unlink(conn);
1130
1131         disable_delayed_work_sync(&conn->disc_work);
1132         disable_delayed_work_sync(&conn->auto_accept_work);
1133         disable_delayed_work_sync(&conn->idle_work);
1134
1135         if (conn->type == ACL_LINK) {
1136                 /* Unacked frames */
1137                 hdev->acl_cnt += conn->sent;
1138         } else if (conn->type == LE_LINK) {
1139                 cancel_delayed_work(&conn->le_conn_timeout);
1140
1141                 if (hdev->le_pkts)
1142                         hdev->le_cnt += conn->sent;
1143                 else
1144                         hdev->acl_cnt += conn->sent;
1145         } else {
1146                 /* Unacked ISO frames */
1147                 if (conn->type == ISO_LINK) {
1148                         if (hdev->iso_pkts)
1149                                 hdev->iso_cnt += conn->sent;
1150                         else if (hdev->le_pkts)
1151                                 hdev->le_cnt += conn->sent;
1152                         else
1153                                 hdev->acl_cnt += conn->sent;
1154                 }
1155         }
1156
1157         skb_queue_purge(&conn->data_q);
1158
1159         /* Remove the connection from the list and cleanup its remaining
1160          * state. This is a separate function since for some cases like
1161          * BT_CONNECT_SCAN we *only* want the cleanup part without the
1162          * rest of hci_conn_del.
1163          */
1164         hci_conn_cleanup(conn);
1165
1166         /* Dequeue callbacks using connection pointer as data */
1167         hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1168 }
1169
1170 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1171 {
1172         int use_src = bacmp(src, BDADDR_ANY);
1173         struct hci_dev *hdev = NULL, *d;
1174
1175         BT_DBG("%pMR -> %pMR", src, dst);
1176
1177         read_lock(&hci_dev_list_lock);
1178
1179         list_for_each_entry(d, &hci_dev_list, list) {
1180                 if (!test_bit(HCI_UP, &d->flags) ||
1181                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
1182                         continue;
1183
1184                 /* Simple routing:
1185                  *   No source address - find interface with bdaddr != dst
1186                  *   Source address    - find interface with bdaddr == src
1187                  */
1188
1189                 if (use_src) {
1190                         bdaddr_t id_addr;
1191                         u8 id_addr_type;
1192
1193                         if (src_type == BDADDR_BREDR) {
1194                                 if (!lmp_bredr_capable(d))
1195                                         continue;
1196                                 bacpy(&id_addr, &d->bdaddr);
1197                                 id_addr_type = BDADDR_BREDR;
1198                         } else {
1199                                 if (!lmp_le_capable(d))
1200                                         continue;
1201
1202                                 hci_copy_identity_address(d, &id_addr,
1203                                                           &id_addr_type);
1204
1205                                 /* Convert from HCI to three-value type */
1206                                 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1207                                         id_addr_type = BDADDR_LE_PUBLIC;
1208                                 else
1209                                         id_addr_type = BDADDR_LE_RANDOM;
1210                         }
1211
1212                         if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1213                                 hdev = d; break;
1214                         }
1215                 } else {
1216                         if (bacmp(&d->bdaddr, dst)) {
1217                                 hdev = d; break;
1218                         }
1219                 }
1220         }
1221
1222         if (hdev)
1223                 hdev = hci_dev_hold(hdev);
1224
1225         read_unlock(&hci_dev_list_lock);
1226         return hdev;
1227 }
1228 EXPORT_SYMBOL(hci_get_route);
1229
1230 /* This function requires the caller holds hdev->lock */
1231 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1232 {
1233         struct hci_dev *hdev = conn->hdev;
1234
1235         hci_connect_le_scan_cleanup(conn, status);
1236
1237         /* Enable advertising in case this was a failed connection
1238          * attempt as a peripheral.
1239          */
1240         hci_enable_advertising(hdev);
1241 }
1242
1243 /* This function requires the caller holds hdev->lock */
1244 void hci_conn_failed(struct hci_conn *conn, u8 status)
1245 {
1246         struct hci_dev *hdev = conn->hdev;
1247
1248         bt_dev_dbg(hdev, "status 0x%2.2x", status);
1249
1250         switch (conn->type) {
1251         case LE_LINK:
1252                 hci_le_conn_failed(conn, status);
1253                 break;
1254         case ACL_LINK:
1255                 mgmt_connect_failed(hdev, conn, status);
1256                 break;
1257         }
1258
1259         /* In case of BIG/PA sync failed, clear conn flags so that
1260          * the conns will be correctly cleaned up by ISO layer
1261          */
1262         test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1263         test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1264
1265         conn->state = BT_CLOSED;
1266         hci_connect_cfm(conn, status);
1267         hci_conn_del(conn);
1268 }
1269
1270 /* This function requires the caller holds hdev->lock */
1271 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1272 {
1273         struct hci_dev *hdev = conn->hdev;
1274
1275         bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1276
1277         if (conn->handle == handle)
1278                 return 0;
1279
1280         if (handle > HCI_CONN_HANDLE_MAX) {
1281                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1282                            handle, HCI_CONN_HANDLE_MAX);
1283                 return HCI_ERROR_INVALID_PARAMETERS;
1284         }
1285
1286         /* If abort_reason has been sent it means the connection is being
1287          * aborted and the handle shall not be changed.
1288          */
1289         if (conn->abort_reason)
1290                 return conn->abort_reason;
1291
1292         if (HCI_CONN_HANDLE_UNSET(conn->handle))
1293                 ida_free(&hdev->unset_handle_ida, conn->handle);
1294
1295         conn->handle = handle;
1296
1297         return 0;
1298 }
1299
1300 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1301                                 u8 dst_type, bool dst_resolved, u8 sec_level,
1302                                 u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
1303 {
1304         struct hci_conn *conn;
1305         struct smp_irk *irk;
1306         int err;
1307
1308         /* Let's make sure that le is enabled.*/
1309         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1310                 if (lmp_le_capable(hdev))
1311                         return ERR_PTR(-ECONNREFUSED);
1312
1313                 return ERR_PTR(-EOPNOTSUPP);
1314         }
1315
1316         /* Since the controller supports only one LE connection attempt at a
1317          * time, we return -EBUSY if there is any connection attempt running.
1318          */
1319         if (hci_lookup_le_connect(hdev))
1320                 return ERR_PTR(-EBUSY);
1321
1322         /* If there's already a connection object but it's not in
1323          * scanning state it means it must already be established, in
1324          * which case we can't do anything else except report a failure
1325          * to connect.
1326          */
1327         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1328         if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1329                 return ERR_PTR(-EBUSY);
1330         }
1331
1332         /* Check if the destination address has been resolved by the controller
1333          * since if it did then the identity address shall be used.
1334          */
1335         if (!dst_resolved) {
1336                 /* When given an identity address with existing identity
1337                  * resolving key, the connection needs to be established
1338                  * to a resolvable random address.
1339                  *
1340                  * Storing the resolvable random address is required here
1341                  * to handle connection failures. The address will later
1342                  * be resolved back into the original identity address
1343                  * from the connect request.
1344                  */
1345                 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1346                 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1347                         dst = &irk->rpa;
1348                         dst_type = ADDR_LE_DEV_RANDOM;
1349                 }
1350         }
1351
1352         if (conn) {
1353                 bacpy(&conn->dst, dst);
1354         } else {
1355                 conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1356                 if (IS_ERR(conn))
1357                         return conn;
1358                 hci_conn_hold(conn);
1359                 conn->pending_sec_level = sec_level;
1360         }
1361
1362         conn->dst_type = dst_type;
1363         conn->sec_level = BT_SECURITY_LOW;
1364         conn->conn_timeout = conn_timeout;
1365         conn->le_adv_phy = phy;
1366         conn->le_adv_sec_phy = sec_phy;
1367
1368         err = hci_connect_le_sync(hdev, conn);
1369         if (err) {
1370                 hci_conn_del(conn);
1371                 return ERR_PTR(err);
1372         }
1373
1374         return conn;
1375 }
1376
1377 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1378 {
1379         struct hci_conn *conn;
1380
1381         conn = hci_conn_hash_lookup_le(hdev, addr, type);
1382         if (!conn)
1383                 return false;
1384
1385         if (conn->state != BT_CONNECTED)
1386                 return false;
1387
1388         return true;
1389 }
1390
1391 /* This function requires the caller holds hdev->lock */
1392 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1393                                         bdaddr_t *addr, u8 addr_type)
1394 {
1395         struct hci_conn_params *params;
1396
1397         if (is_connected(hdev, addr, addr_type))
1398                 return -EISCONN;
1399
1400         params = hci_conn_params_lookup(hdev, addr, addr_type);
1401         if (!params) {
1402                 params = hci_conn_params_add(hdev, addr, addr_type);
1403                 if (!params)
1404                         return -ENOMEM;
1405
1406                 /* If we created new params, mark them to be deleted in
1407                  * hci_connect_le_scan_cleanup. It's different case than
1408                  * existing disabled params, those will stay after cleanup.
1409                  */
1410                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1411         }
1412
1413         /* We're trying to connect, so make sure params are at pend_le_conns */
1414         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1415             params->auto_connect == HCI_AUTO_CONN_REPORT ||
1416             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1417                 hci_pend_le_list_del_init(params);
1418                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
1419         }
1420
1421         params->explicit_connect = true;
1422
1423         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1424                params->auto_connect);
1425
1426         return 0;
1427 }
1428
1429 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1430 {
1431         struct hci_conn *conn;
1432         u8  big;
1433
1434         /* Allocate a BIG if not set */
1435         if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1436                 for (big = 0x00; big < 0xef; big++) {
1437
1438                         conn = hci_conn_hash_lookup_big(hdev, big);
1439                         if (!conn)
1440                                 break;
1441                 }
1442
1443                 if (big == 0xef)
1444                         return -EADDRNOTAVAIL;
1445
1446                 /* Update BIG */
1447                 qos->bcast.big = big;
1448         }
1449
1450         return 0;
1451 }
1452
1453 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1454 {
1455         struct hci_conn *conn;
1456         u8  bis;
1457
1458         /* Allocate BIS if not set */
1459         if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1460                 if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
1461                         conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1462
1463                         if (conn) {
1464                                 /* If the BIG handle is already matched to an advertising
1465                                  * handle, do not allocate a new one.
1466                                  */
1467                                 qos->bcast.bis = conn->iso_qos.bcast.bis;
1468                                 return 0;
1469                         }
1470                 }
1471
1472                 /* Find an unused adv set to advertise BIS, skip instance 0x00
1473                  * since it is reserved as general purpose set.
1474                  */
1475                 for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1476                      bis++) {
1477
1478                         conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1479                         if (!conn)
1480                                 break;
1481                 }
1482
1483                 if (bis == hdev->le_num_of_adv_sets)
1484                         return -EADDRNOTAVAIL;
1485
1486                 /* Update BIS */
1487                 qos->bcast.bis = bis;
1488         }
1489
1490         return 0;
1491 }
1492
1493 /* This function requires the caller holds hdev->lock */
1494 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1495                                     struct bt_iso_qos *qos, __u8 base_len,
1496                                     __u8 *base)
1497 {
1498         struct hci_conn *conn;
1499         int err;
1500
1501         /* Let's make sure that le is enabled.*/
1502         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1503                 if (lmp_le_capable(hdev))
1504                         return ERR_PTR(-ECONNREFUSED);
1505                 return ERR_PTR(-EOPNOTSUPP);
1506         }
1507
1508         err = qos_set_big(hdev, qos);
1509         if (err)
1510                 return ERR_PTR(err);
1511
1512         err = qos_set_bis(hdev, qos);
1513         if (err)
1514                 return ERR_PTR(err);
1515
1516         /* Check if the LE Create BIG command has already been sent */
1517         conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1518                                                 qos->bcast.big);
1519         if (conn)
1520                 return ERR_PTR(-EADDRINUSE);
1521
1522         /* Check BIS settings against other bound BISes, since all
1523          * BISes in a BIG must have the same value for all parameters
1524          */
1525         conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1526
1527         if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1528                      base_len != conn->le_per_adv_data_len ||
1529                      memcmp(conn->le_per_adv_data, base, base_len)))
1530                 return ERR_PTR(-EADDRINUSE);
1531
1532         conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1533         if (IS_ERR(conn))
1534                 return conn;
1535
1536         conn->state = BT_CONNECT;
1537
1538         hci_conn_hold(conn);
1539         return conn;
1540 }
1541
1542 /* This function requires the caller holds hdev->lock */
1543 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1544                                      u8 dst_type, u8 sec_level,
1545                                      u16 conn_timeout,
1546                                      enum conn_reasons conn_reason)
1547 {
1548         struct hci_conn *conn;
1549
1550         /* Let's make sure that le is enabled.*/
1551         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1552                 if (lmp_le_capable(hdev))
1553                         return ERR_PTR(-ECONNREFUSED);
1554
1555                 return ERR_PTR(-EOPNOTSUPP);
1556         }
1557
1558         /* Some devices send ATT messages as soon as the physical link is
1559          * established. To be able to handle these ATT messages, the user-
1560          * space first establishes the connection and then starts the pairing
1561          * process.
1562          *
1563          * So if a hci_conn object already exists for the following connection
1564          * attempt, we simply update pending_sec_level and auth_type fields
1565          * and return the object found.
1566          */
1567         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1568         if (conn) {
1569                 if (conn->pending_sec_level < sec_level)
1570                         conn->pending_sec_level = sec_level;
1571                 goto done;
1572         }
1573
1574         BT_DBG("requesting refresh of dst_addr");
1575
1576         conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1577         if (IS_ERR(conn))
1578                 return conn;
1579
1580         if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1581                 hci_conn_del(conn);
1582                 return ERR_PTR(-EBUSY);
1583         }
1584
1585         conn->state = BT_CONNECT;
1586         set_bit(HCI_CONN_SCANNING, &conn->flags);
1587         conn->dst_type = dst_type;
1588         conn->sec_level = BT_SECURITY_LOW;
1589         conn->pending_sec_level = sec_level;
1590         conn->conn_timeout = conn_timeout;
1591         conn->conn_reason = conn_reason;
1592
1593         hci_update_passive_scan(hdev);
1594
1595 done:
1596         hci_conn_hold(conn);
1597         return conn;
1598 }
1599
1600 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1601                                  u8 sec_level, u8 auth_type,
1602                                  enum conn_reasons conn_reason, u16 timeout)
1603 {
1604         struct hci_conn *acl;
1605
1606         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1607                 if (lmp_bredr_capable(hdev))
1608                         return ERR_PTR(-ECONNREFUSED);
1609
1610                 return ERR_PTR(-EOPNOTSUPP);
1611         }
1612
1613         /* Reject outgoing connection to device with same BD ADDR against
1614          * CVE-2020-26555
1615          */
1616         if (!bacmp(&hdev->bdaddr, dst)) {
1617                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1618                            dst);
1619                 return ERR_PTR(-ECONNREFUSED);
1620         }
1621
1622         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1623         if (!acl) {
1624                 acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1625                 if (IS_ERR(acl))
1626                         return acl;
1627         }
1628
1629         hci_conn_hold(acl);
1630
1631         acl->conn_reason = conn_reason;
1632         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1633                 int err;
1634
1635                 acl->sec_level = BT_SECURITY_LOW;
1636                 acl->pending_sec_level = sec_level;
1637                 acl->auth_type = auth_type;
1638                 acl->conn_timeout = timeout;
1639
1640                 err = hci_connect_acl_sync(hdev, acl);
1641                 if (err) {
1642                         hci_conn_del(acl);
1643                         return ERR_PTR(err);
1644                 }
1645         }
1646
1647         return acl;
1648 }
1649
1650 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1651                                       struct hci_conn *conn)
1652 {
1653         struct hci_dev *hdev = parent->hdev;
1654         struct hci_link *link;
1655
1656         bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1657
1658         if (conn->link)
1659                 return conn->link;
1660
1661         if (conn->parent)
1662                 return NULL;
1663
1664         link = kzalloc(sizeof(*link), GFP_KERNEL);
1665         if (!link)
1666                 return NULL;
1667
1668         link->conn = hci_conn_hold(conn);
1669         conn->link = link;
1670         conn->parent = hci_conn_get(parent);
1671
1672         /* Use list_add_tail_rcu append to the list */
1673         list_add_tail_rcu(&link->list, &parent->link_list);
1674
1675         return link;
1676 }
1677
1678 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1679                                  __u16 setting, struct bt_codec *codec,
1680                                  u16 timeout)
1681 {
1682         struct hci_conn *acl;
1683         struct hci_conn *sco;
1684         struct hci_link *link;
1685
1686         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1687                               CONN_REASON_SCO_CONNECT, timeout);
1688         if (IS_ERR(acl))
1689                 return acl;
1690
1691         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1692         if (!sco) {
1693                 sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1694                 if (IS_ERR(sco)) {
1695                         hci_conn_drop(acl);
1696                         return sco;
1697                 }
1698         }
1699
1700         link = hci_conn_link(acl, sco);
1701         if (!link) {
1702                 hci_conn_drop(acl);
1703                 hci_conn_drop(sco);
1704                 return ERR_PTR(-ENOLINK);
1705         }
1706
1707         sco->setting = setting;
1708         sco->codec = *codec;
1709
1710         if (acl->state == BT_CONNECTED &&
1711             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1712                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1713                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1714
1715                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1716                         /* defer SCO setup until mode change completed */
1717                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1718                         return sco;
1719                 }
1720
1721                 hci_sco_setup(acl, 0x00);
1722         }
1723
1724         return sco;
1725 }
1726
1727 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1728 {
1729         struct hci_dev *hdev = conn->hdev;
1730         struct hci_cp_le_create_big cp;
1731         struct iso_list_data data;
1732
1733         memset(&cp, 0, sizeof(cp));
1734
1735         data.big = qos->bcast.big;
1736         data.bis = qos->bcast.bis;
1737         data.count = 0;
1738
1739         /* Create a BIS for each bound connection */
1740         hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1741                                  BT_BOUND, &data);
1742
1743         cp.handle = qos->bcast.big;
1744         cp.adv_handle = qos->bcast.bis;
1745         cp.num_bis  = data.count;
1746         hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1747         cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1748         cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1749         cp.bis.rtn  = qos->bcast.out.rtn;
1750         cp.bis.phy  = qos->bcast.out.phy;
1751         cp.bis.packing = qos->bcast.packing;
1752         cp.bis.framing = qos->bcast.framing;
1753         cp.bis.encryption = qos->bcast.encryption;
1754         memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1755
1756         return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1757 }
1758
1759 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1760 {
1761         DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f);
1762         u8 cig_id = PTR_UINT(data);
1763         struct hci_conn *conn;
1764         struct bt_iso_qos *qos;
1765         u8 aux_num_cis = 0;
1766         u8 cis_id;
1767
1768         conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1769         if (!conn)
1770                 return 0;
1771
1772         qos = &conn->iso_qos;
1773         pdu->cig_id = cig_id;
1774         hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval);
1775         hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval);
1776         pdu->sca = qos->ucast.sca;
1777         pdu->packing = qos->ucast.packing;
1778         pdu->framing = qos->ucast.framing;
1779         pdu->c_latency = cpu_to_le16(qos->ucast.out.latency);
1780         pdu->p_latency = cpu_to_le16(qos->ucast.in.latency);
1781
1782         /* Reprogram all CIS(s) with the same CIG, valid range are:
1783          * num_cis: 0x00 to 0x1F
1784          * cis_id: 0x00 to 0xEF
1785          */
1786         for (cis_id = 0x00; cis_id < 0xf0 &&
1787              aux_num_cis < pdu->num_cis; cis_id++) {
1788                 struct hci_cis_params *cis;
1789
1790                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1791                 if (!conn)
1792                         continue;
1793
1794                 qos = &conn->iso_qos;
1795
1796                 cis = &pdu->cis[aux_num_cis++];
1797                 cis->cis_id = cis_id;
1798                 cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1799                 cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1800                 cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1801                               qos->ucast.in.phy;
1802                 cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1803                               qos->ucast.out.phy;
1804                 cis->c_rtn  = qos->ucast.out.rtn;
1805                 cis->p_rtn  = qos->ucast.in.rtn;
1806         }
1807         pdu->num_cis = aux_num_cis;
1808
1809         if (!pdu->num_cis)
1810                 return 0;
1811
1812         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1813                                      struct_size(pdu, cis, pdu->num_cis),
1814                                      pdu, HCI_CMD_TIMEOUT);
1815 }
1816
1817 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1818 {
1819         struct hci_dev *hdev = conn->hdev;
1820         struct iso_list_data data;
1821
1822         memset(&data, 0, sizeof(data));
1823
1824         /* Allocate first still reconfigurable CIG if not set */
1825         if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1826                 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1827                         data.count = 0;
1828
1829                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1830                                                  BT_CONNECT, &data);
1831                         if (data.count)
1832                                 continue;
1833
1834                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1835                                                  BT_CONNECTED, &data);
1836                         if (!data.count)
1837                                 break;
1838                 }
1839
1840                 if (data.cig == 0xf0)
1841                         return false;
1842
1843                 /* Update CIG */
1844                 qos->ucast.cig = data.cig;
1845         }
1846
1847         if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1848                 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1849                                              qos->ucast.cis))
1850                         return false;
1851                 goto done;
1852         }
1853
1854         /* Allocate first available CIS if not set */
1855         for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1856              data.cis++) {
1857                 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1858                                               data.cis)) {
1859                         /* Update CIS */
1860                         qos->ucast.cis = data.cis;
1861                         break;
1862                 }
1863         }
1864
1865         if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1866                 return false;
1867
1868 done:
1869         if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1870                                UINT_PTR(qos->ucast.cig), NULL) < 0)
1871                 return false;
1872
1873         return true;
1874 }
1875
1876 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1877                               __u8 dst_type, struct bt_iso_qos *qos)
1878 {
1879         struct hci_conn *cis;
1880
1881         cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1882                                        qos->ucast.cis);
1883         if (!cis) {
1884                 cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1885                 if (IS_ERR(cis))
1886                         return cis;
1887                 cis->cleanup = cis_cleanup;
1888                 cis->dst_type = dst_type;
1889                 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1890                 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1891         }
1892
1893         if (cis->state == BT_CONNECTED)
1894                 return cis;
1895
1896         /* Check if CIS has been set and the settings matches */
1897         if (cis->state == BT_BOUND &&
1898             !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1899                 return cis;
1900
1901         /* Update LINK PHYs according to QoS preference */
1902         cis->le_tx_phy = qos->ucast.out.phy;
1903         cis->le_rx_phy = qos->ucast.in.phy;
1904
1905         /* If output interval is not set use the input interval as it cannot be
1906          * 0x000000.
1907          */
1908         if (!qos->ucast.out.interval)
1909                 qos->ucast.out.interval = qos->ucast.in.interval;
1910
1911         /* If input interval is not set use the output interval as it cannot be
1912          * 0x000000.
1913          */
1914         if (!qos->ucast.in.interval)
1915                 qos->ucast.in.interval = qos->ucast.out.interval;
1916
1917         /* If output latency is not set use the input latency as it cannot be
1918          * 0x0000.
1919          */
1920         if (!qos->ucast.out.latency)
1921                 qos->ucast.out.latency = qos->ucast.in.latency;
1922
1923         /* If input latency is not set use the output latency as it cannot be
1924          * 0x0000.
1925          */
1926         if (!qos->ucast.in.latency)
1927                 qos->ucast.in.latency = qos->ucast.out.latency;
1928
1929         if (!hci_le_set_cig_params(cis, qos)) {
1930                 hci_conn_drop(cis);
1931                 return ERR_PTR(-EINVAL);
1932         }
1933
1934         hci_conn_hold(cis);
1935
1936         cis->iso_qos = *qos;
1937         cis->state = BT_BOUND;
1938
1939         return cis;
1940 }
1941
1942 bool hci_iso_setup_path(struct hci_conn *conn)
1943 {
1944         struct hci_dev *hdev = conn->hdev;
1945         struct hci_cp_le_setup_iso_path cmd;
1946
1947         memset(&cmd, 0, sizeof(cmd));
1948
1949         if (conn->iso_qos.ucast.out.sdu) {
1950                 cmd.handle = cpu_to_le16(conn->handle);
1951                 cmd.direction = 0x00; /* Input (Host to Controller) */
1952                 cmd.path = 0x00; /* HCI path if enabled */
1953                 cmd.codec = 0x03; /* Transparent Data */
1954
1955                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1956                                  &cmd) < 0)
1957                         return false;
1958         }
1959
1960         if (conn->iso_qos.ucast.in.sdu) {
1961                 cmd.handle = cpu_to_le16(conn->handle);
1962                 cmd.direction = 0x01; /* Output (Controller to Host) */
1963                 cmd.path = 0x00; /* HCI path if enabled */
1964                 cmd.codec = 0x03; /* Transparent Data */
1965
1966                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1967                                  &cmd) < 0)
1968                         return false;
1969         }
1970
1971         return true;
1972 }
1973
1974 int hci_conn_check_create_cis(struct hci_conn *conn)
1975 {
1976         if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1977                 return -EINVAL;
1978
1979         if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1980             conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1981                 return 1;
1982
1983         return 0;
1984 }
1985
1986 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1987 {
1988         return hci_le_create_cis_sync(hdev);
1989 }
1990
1991 int hci_le_create_cis_pending(struct hci_dev *hdev)
1992 {
1993         struct hci_conn *conn;
1994         bool pending = false;
1995
1996         rcu_read_lock();
1997
1998         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1999                 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2000                         rcu_read_unlock();
2001                         return -EBUSY;
2002                 }
2003
2004                 if (!hci_conn_check_create_cis(conn))
2005                         pending = true;
2006         }
2007
2008         rcu_read_unlock();
2009
2010         if (!pending)
2011                 return 0;
2012
2013         /* Queue Create CIS */
2014         return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2015 }
2016
2017 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2018                               struct bt_iso_io_qos *qos, __u8 phy)
2019 {
2020         /* Only set MTU if PHY is enabled */
2021         if (!qos->sdu && qos->phy)
2022                 qos->sdu = conn->mtu;
2023
2024         /* Use the same PHY as ACL if set to any */
2025         if (qos->phy == BT_ISO_PHY_ANY)
2026                 qos->phy = phy;
2027
2028         /* Use LE ACL connection interval if not set */
2029         if (!qos->interval)
2030                 /* ACL interval unit in 1.25 ms to us */
2031                 qos->interval = conn->le_conn_interval * 1250;
2032
2033         /* Use LE ACL connection latency if not set */
2034         if (!qos->latency)
2035                 qos->latency = conn->le_conn_latency;
2036 }
2037
2038 static int create_big_sync(struct hci_dev *hdev, void *data)
2039 {
2040         struct hci_conn *conn = data;
2041         struct bt_iso_qos *qos = &conn->iso_qos;
2042         u16 interval, sync_interval = 0;
2043         u32 flags = 0;
2044         int err;
2045
2046         if (qos->bcast.out.phy == 0x02)
2047                 flags |= MGMT_ADV_FLAG_SEC_2M;
2048
2049         /* Align intervals */
2050         interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2051
2052         if (qos->bcast.bis)
2053                 sync_interval = interval * 4;
2054
2055         err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2056                                      conn->le_per_adv_data, flags, interval,
2057                                      interval, sync_interval);
2058         if (err)
2059                 return err;
2060
2061         return hci_le_create_big(conn, &conn->iso_qos);
2062 }
2063
2064 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2065 {
2066         bt_dev_dbg(hdev, "");
2067
2068         if (err)
2069                 bt_dev_err(hdev, "Unable to create PA: %d", err);
2070 }
2071
2072 static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
2073 {
2074         if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
2075                 return false;
2076
2077         return true;
2078 }
2079
2080 static int create_pa_sync(struct hci_dev *hdev, void *data)
2081 {
2082         struct hci_cp_le_pa_create_sync cp = {0};
2083         struct hci_conn *conn;
2084         int err = 0;
2085
2086         hci_dev_lock(hdev);
2087
2088         rcu_read_lock();
2089
2090         /* The spec allows only one pending LE Periodic Advertising Create
2091          * Sync command at a time. If the command is pending now, don't do
2092          * anything. We check for pending connections after each PA Sync
2093          * Established event.
2094          *
2095          * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
2096          * page 2493:
2097          *
2098          * If the Host issues this command when another HCI_LE_Periodic_
2099          * Advertising_Create_Sync command is pending, the Controller shall
2100          * return the error code Command Disallowed (0x0C).
2101          */
2102         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2103                 if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
2104                         goto unlock;
2105         }
2106
2107         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2108                 if (hci_conn_check_create_pa_sync(conn)) {
2109                         struct bt_iso_qos *qos = &conn->iso_qos;
2110
2111                         cp.options = qos->bcast.options;
2112                         cp.sid = conn->sid;
2113                         cp.addr_type = conn->dst_type;
2114                         bacpy(&cp.addr, &conn->dst);
2115                         cp.skip = cpu_to_le16(qos->bcast.skip);
2116                         cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2117                         cp.sync_cte_type = qos->bcast.sync_cte_type;
2118
2119                         break;
2120                 }
2121         }
2122
2123 unlock:
2124         rcu_read_unlock();
2125
2126         hci_dev_unlock(hdev);
2127
2128         if (bacmp(&cp.addr, BDADDR_ANY)) {
2129                 hci_dev_set_flag(hdev, HCI_PA_SYNC);
2130                 set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
2131
2132                 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2133                                             sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2134                 if (!err)
2135                         err = hci_update_passive_scan_sync(hdev);
2136
2137                 if (err) {
2138                         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2139                         clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
2140                 }
2141         }
2142
2143         return err;
2144 }
2145
2146 int hci_pa_create_sync_pending(struct hci_dev *hdev)
2147 {
2148         /* Queue start pa_create_sync and scan */
2149         return hci_cmd_sync_queue(hdev, create_pa_sync,
2150                                   NULL, create_pa_complete);
2151 }
2152
2153 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
2154                                     __u8 dst_type, __u8 sid,
2155                                     struct bt_iso_qos *qos)
2156 {
2157         struct hci_conn *conn;
2158
2159         conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
2160         if (IS_ERR(conn))
2161                 return conn;
2162
2163         conn->iso_qos = *qos;
2164         conn->dst_type = dst_type;
2165         conn->sid = sid;
2166         conn->state = BT_LISTEN;
2167
2168         hci_conn_hold(conn);
2169
2170         hci_pa_create_sync_pending(hdev);
2171
2172         return conn;
2173 }
2174
2175 static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
2176 {
2177         if (!conn->num_bis)
2178                 return false;
2179
2180         return true;
2181 }
2182
2183 static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
2184 {
2185         bt_dev_dbg(hdev, "");
2186
2187         if (err)
2188                 bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
2189 }
2190
2191 static int big_create_sync(struct hci_dev *hdev, void *data)
2192 {
2193         DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
2194         struct hci_conn *conn;
2195
2196         rcu_read_lock();
2197
2198         pdu->num_bis = 0;
2199
2200         /* The spec allows only one pending LE BIG Create Sync command at
2201          * a time. If the command is pending now, don't do anything. We
2202          * check for pending connections after each BIG Sync Established
2203          * event.
2204          *
2205          * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
2206          * page 2586:
2207          *
2208          * If the Host sends this command when the Controller is in the
2209          * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
2210          * Established event has not been generated, the Controller shall
2211          * return the error code Command Disallowed (0x0C).
2212          */
2213         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2214                 if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
2215                         goto unlock;
2216         }
2217
2218         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2219                 if (hci_conn_check_create_big_sync(conn)) {
2220                         struct bt_iso_qos *qos = &conn->iso_qos;
2221
2222                         set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
2223
2224                         pdu->handle = qos->bcast.big;
2225                         pdu->sync_handle = cpu_to_le16(conn->sync_handle);
2226                         pdu->encryption = qos->bcast.encryption;
2227                         memcpy(pdu->bcode, qos->bcast.bcode,
2228                                sizeof(pdu->bcode));
2229                         pdu->mse = qos->bcast.mse;
2230                         pdu->timeout = cpu_to_le16(qos->bcast.timeout);
2231                         pdu->num_bis = conn->num_bis;
2232                         memcpy(pdu->bis, conn->bis, conn->num_bis);
2233
2234                         break;
2235                 }
2236         }
2237
2238 unlock:
2239         rcu_read_unlock();
2240
2241         if (!pdu->num_bis)
2242                 return 0;
2243
2244         return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2245                             struct_size(pdu, bis, pdu->num_bis), pdu);
2246 }
2247
2248 int hci_le_big_create_sync_pending(struct hci_dev *hdev)
2249 {
2250         /* Queue big_create_sync */
2251         return hci_cmd_sync_queue_once(hdev, big_create_sync,
2252                                        NULL, big_create_sync_complete);
2253 }
2254
2255 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2256                            struct bt_iso_qos *qos,
2257                            __u16 sync_handle, __u8 num_bis, __u8 bis[])
2258 {
2259         int err;
2260
2261         if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS)
2262                 return -EINVAL;
2263
2264         err = qos_set_big(hdev, qos);
2265         if (err)
2266                 return err;
2267
2268         if (hcon) {
2269                 /* Update hcon QoS */
2270                 hcon->iso_qos = *qos;
2271
2272                 hcon->num_bis = num_bis;
2273                 memcpy(hcon->bis, bis, num_bis);
2274         }
2275
2276         return hci_le_big_create_sync_pending(hdev);
2277 }
2278
2279 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2280 {
2281         struct hci_conn *conn = data;
2282
2283         bt_dev_dbg(hdev, "conn %p", conn);
2284
2285         if (err) {
2286                 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2287                 hci_connect_cfm(conn, err);
2288                 hci_conn_del(conn);
2289         }
2290 }
2291
2292 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2293                               struct bt_iso_qos *qos,
2294                               __u8 base_len, __u8 *base)
2295 {
2296         struct hci_conn *conn;
2297         struct hci_conn *parent;
2298         __u8 eir[HCI_MAX_PER_AD_LENGTH];
2299         struct hci_link *link;
2300
2301         /* Look for any BIS that is open for rebinding */
2302         conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN);
2303         if (conn) {
2304                 memcpy(qos, &conn->iso_qos, sizeof(*qos));
2305                 conn->state = BT_CONNECTED;
2306                 return conn;
2307         }
2308
2309         if (base_len && base)
2310                 base_len = eir_append_service_data(eir, 0,  0x1851,
2311                                                    base, base_len);
2312
2313         /* We need hci_conn object using the BDADDR_ANY as dst */
2314         conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2315         if (IS_ERR(conn))
2316                 return conn;
2317
2318         /* Update LINK PHYs according to QoS preference */
2319         conn->le_tx_phy = qos->bcast.out.phy;
2320         conn->le_tx_phy = qos->bcast.out.phy;
2321
2322         /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2323         if (base_len && base) {
2324                 memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2325                 conn->le_per_adv_data_len = base_len;
2326         }
2327
2328         hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2329                           conn->le_tx_phy ? conn->le_tx_phy :
2330                           hdev->le_tx_def_phys);
2331
2332         conn->iso_qos = *qos;
2333         conn->state = BT_BOUND;
2334
2335         /* Link BISes together */
2336         parent = hci_conn_hash_lookup_big(hdev,
2337                                           conn->iso_qos.bcast.big);
2338         if (parent && parent != conn) {
2339                 link = hci_conn_link(parent, conn);
2340                 hci_conn_drop(conn);
2341                 if (!link)
2342                         return ERR_PTR(-ENOLINK);
2343         }
2344
2345         return conn;
2346 }
2347
2348 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2349 {
2350         struct iso_list_data *d = data;
2351
2352         /* Skip if not broadcast/ANY address */
2353         if (bacmp(&conn->dst, BDADDR_ANY))
2354                 return;
2355
2356         if (d->big != conn->iso_qos.bcast.big ||
2357             d->bis == BT_ISO_QOS_BIS_UNSET ||
2358             d->bis != conn->iso_qos.bcast.bis)
2359                 return;
2360
2361         set_bit(HCI_CONN_PER_ADV, &conn->flags);
2362 }
2363
2364 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2365                                  __u8 dst_type, struct bt_iso_qos *qos,
2366                                  __u8 base_len, __u8 *base)
2367 {
2368         struct hci_conn *conn;
2369         int err;
2370         struct iso_list_data data;
2371
2372         conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2373         if (IS_ERR(conn))
2374                 return conn;
2375
2376         if (conn->state == BT_CONNECTED)
2377                 return conn;
2378
2379         data.big = qos->bcast.big;
2380         data.bis = qos->bcast.bis;
2381
2382         /* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2383          * the start periodic advertising and create BIG commands have
2384          * been queued
2385          */
2386         hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2387                                  BT_BOUND, &data);
2388
2389         /* Queue start periodic advertising and create BIG */
2390         err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2391                                  create_big_complete);
2392         if (err < 0) {
2393                 hci_conn_drop(conn);
2394                 return ERR_PTR(err);
2395         }
2396
2397         return conn;
2398 }
2399
2400 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2401                                  __u8 dst_type, struct bt_iso_qos *qos)
2402 {
2403         struct hci_conn *le;
2404         struct hci_conn *cis;
2405         struct hci_link *link;
2406
2407         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2408                 le = hci_connect_le(hdev, dst, dst_type, false,
2409                                     BT_SECURITY_LOW,
2410                                     HCI_LE_CONN_TIMEOUT,
2411                                     HCI_ROLE_SLAVE, 0, 0);
2412         else
2413                 le = hci_connect_le_scan(hdev, dst, dst_type,
2414                                          BT_SECURITY_LOW,
2415                                          HCI_LE_CONN_TIMEOUT,
2416                                          CONN_REASON_ISO_CONNECT);
2417         if (IS_ERR(le))
2418                 return le;
2419
2420         hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2421                           le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2422         hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2423                           le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2424
2425         cis = hci_bind_cis(hdev, dst, dst_type, qos);
2426         if (IS_ERR(cis)) {
2427                 hci_conn_drop(le);
2428                 return cis;
2429         }
2430
2431         link = hci_conn_link(le, cis);
2432         hci_conn_drop(cis);
2433         if (!link) {
2434                 hci_conn_drop(le);
2435                 return ERR_PTR(-ENOLINK);
2436         }
2437
2438         cis->state = BT_CONNECT;
2439
2440         hci_le_create_cis_pending(hdev);
2441
2442         return cis;
2443 }
2444
2445 /* Check link security requirement */
2446 int hci_conn_check_link_mode(struct hci_conn *conn)
2447 {
2448         BT_DBG("hcon %p", conn);
2449
2450         /* In Secure Connections Only mode, it is required that Secure
2451          * Connections is used and the link is encrypted with AES-CCM
2452          * using a P-256 authenticated combination key.
2453          */
2454         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2455                 if (!hci_conn_sc_enabled(conn) ||
2456                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2457                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2458                         return 0;
2459         }
2460
2461          /* AES encryption is required for Level 4:
2462           *
2463           * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2464           * page 1319:
2465           *
2466           * 128-bit equivalent strength for link and encryption keys
2467           * required using FIPS approved algorithms (E0 not allowed,
2468           * SAFER+ not allowed, and P-192 not allowed; encryption key
2469           * not shortened)
2470           */
2471         if (conn->sec_level == BT_SECURITY_FIPS &&
2472             !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2473                 bt_dev_err(conn->hdev,
2474                            "Invalid security: Missing AES-CCM usage");
2475                 return 0;
2476         }
2477
2478         if (hci_conn_ssp_enabled(conn) &&
2479             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2480                 return 0;
2481
2482         return 1;
2483 }
2484
2485 /* Authenticate remote device */
2486 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2487 {
2488         BT_DBG("hcon %p", conn);
2489
2490         if (conn->pending_sec_level > sec_level)
2491                 sec_level = conn->pending_sec_level;
2492
2493         if (sec_level > conn->sec_level)
2494                 conn->pending_sec_level = sec_level;
2495         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2496                 return 1;
2497
2498         /* Make sure we preserve an existing MITM requirement*/
2499         auth_type |= (conn->auth_type & 0x01);
2500
2501         conn->auth_type = auth_type;
2502
2503         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2504                 struct hci_cp_auth_requested cp;
2505
2506                 cp.handle = cpu_to_le16(conn->handle);
2507                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2508                              sizeof(cp), &cp);
2509
2510                 /* Set the ENCRYPT_PEND to trigger encryption after
2511                  * authentication.
2512                  */
2513                 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2514                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2515         }
2516
2517         return 0;
2518 }
2519
2520 /* Encrypt the link */
2521 static void hci_conn_encrypt(struct hci_conn *conn)
2522 {
2523         BT_DBG("hcon %p", conn);
2524
2525         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2526                 struct hci_cp_set_conn_encrypt cp;
2527                 cp.handle  = cpu_to_le16(conn->handle);
2528                 cp.encrypt = 0x01;
2529                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2530                              &cp);
2531         }
2532 }
2533
2534 /* Enable security */
2535 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2536                       bool initiator)
2537 {
2538         BT_DBG("hcon %p", conn);
2539
2540         if (conn->type == LE_LINK)
2541                 return smp_conn_security(conn, sec_level);
2542
2543         /* For sdp we don't need the link key. */
2544         if (sec_level == BT_SECURITY_SDP)
2545                 return 1;
2546
2547         /* For non 2.1 devices and low security level we don't need the link
2548            key. */
2549         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2550                 return 1;
2551
2552         /* For other security levels we need the link key. */
2553         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2554                 goto auth;
2555
2556         switch (conn->key_type) {
2557         case HCI_LK_AUTH_COMBINATION_P256:
2558                 /* An authenticated FIPS approved combination key has
2559                  * sufficient security for security level 4 or lower.
2560                  */
2561                 if (sec_level <= BT_SECURITY_FIPS)
2562                         goto encrypt;
2563                 break;
2564         case HCI_LK_AUTH_COMBINATION_P192:
2565                 /* An authenticated combination key has sufficient security for
2566                  * security level 3 or lower.
2567                  */
2568                 if (sec_level <= BT_SECURITY_HIGH)
2569                         goto encrypt;
2570                 break;
2571         case HCI_LK_UNAUTH_COMBINATION_P192:
2572         case HCI_LK_UNAUTH_COMBINATION_P256:
2573                 /* An unauthenticated combination key has sufficient security
2574                  * for security level 2 or lower.
2575                  */
2576                 if (sec_level <= BT_SECURITY_MEDIUM)
2577                         goto encrypt;
2578                 break;
2579         case HCI_LK_COMBINATION:
2580                 /* A combination key has always sufficient security for the
2581                  * security levels 2 or lower. High security level requires the
2582                  * combination key is generated using maximum PIN code length
2583                  * (16). For pre 2.1 units.
2584                  */
2585                 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2586                         goto encrypt;
2587                 break;
2588         default:
2589                 break;
2590         }
2591
2592 auth:
2593         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2594                 return 0;
2595
2596         if (initiator)
2597                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2598
2599         if (!hci_conn_auth(conn, sec_level, auth_type))
2600                 return 0;
2601
2602 encrypt:
2603         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2604                 /* Ensure that the encryption key size has been read,
2605                  * otherwise stall the upper layer responses.
2606                  */
2607                 if (!conn->enc_key_size)
2608                         return 0;
2609
2610                 /* Nothing else needed, all requirements are met */
2611                 return 1;
2612         }
2613
2614         hci_conn_encrypt(conn);
2615         return 0;
2616 }
2617 EXPORT_SYMBOL(hci_conn_security);
2618
2619 /* Check secure link requirement */
2620 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2621 {
2622         BT_DBG("hcon %p", conn);
2623
2624         /* Accept if non-secure or higher security level is required */
2625         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2626                 return 1;
2627
2628         /* Accept if secure or higher security level is already present */
2629         if (conn->sec_level == BT_SECURITY_HIGH ||
2630             conn->sec_level == BT_SECURITY_FIPS)
2631                 return 1;
2632
2633         /* Reject not secure link */
2634         return 0;
2635 }
2636 EXPORT_SYMBOL(hci_conn_check_secure);
2637
2638 /* Switch role */
2639 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2640 {
2641         BT_DBG("hcon %p", conn);
2642
2643         if (role == conn->role)
2644                 return 1;
2645
2646         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2647                 struct hci_cp_switch_role cp;
2648                 bacpy(&cp.bdaddr, &conn->dst);
2649                 cp.role = role;
2650                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2651         }
2652
2653         return 0;
2654 }
2655 EXPORT_SYMBOL(hci_conn_switch_role);
2656
2657 /* Enter active mode */
2658 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2659 {
2660         struct hci_dev *hdev = conn->hdev;
2661
2662         BT_DBG("hcon %p mode %d", conn, conn->mode);
2663
2664         if (conn->mode != HCI_CM_SNIFF)
2665                 goto timer;
2666
2667         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2668                 goto timer;
2669
2670         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2671                 struct hci_cp_exit_sniff_mode cp;
2672                 cp.handle = cpu_to_le16(conn->handle);
2673                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2674         }
2675
2676 timer:
2677         if (hdev->idle_timeout > 0)
2678                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2679                                    msecs_to_jiffies(hdev->idle_timeout));
2680 }
2681
2682 /* Drop all connection on the device */
2683 void hci_conn_hash_flush(struct hci_dev *hdev)
2684 {
2685         struct list_head *head = &hdev->conn_hash.list;
2686         struct hci_conn *conn;
2687
2688         BT_DBG("hdev %s", hdev->name);
2689
2690         /* We should not traverse the list here, because hci_conn_del
2691          * can remove extra links, which may cause the list traversal
2692          * to hit items that have already been released.
2693          */
2694         while ((conn = list_first_entry_or_null(head,
2695                                                 struct hci_conn,
2696                                                 list)) != NULL) {
2697                 conn->state = BT_CLOSED;
2698                 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2699                 hci_conn_del(conn);
2700         }
2701 }
2702
2703 static u32 get_link_mode(struct hci_conn *conn)
2704 {
2705         u32 link_mode = 0;
2706
2707         if (conn->role == HCI_ROLE_MASTER)
2708                 link_mode |= HCI_LM_MASTER;
2709
2710         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2711                 link_mode |= HCI_LM_ENCRYPT;
2712
2713         if (test_bit(HCI_CONN_AUTH, &conn->flags))
2714                 link_mode |= HCI_LM_AUTH;
2715
2716         if (test_bit(HCI_CONN_SECURE, &conn->flags))
2717                 link_mode |= HCI_LM_SECURE;
2718
2719         if (test_bit(HCI_CONN_FIPS, &conn->flags))
2720                 link_mode |= HCI_LM_FIPS;
2721
2722         return link_mode;
2723 }
2724
2725 int hci_get_conn_list(void __user *arg)
2726 {
2727         struct hci_conn *c;
2728         struct hci_conn_list_req req, *cl;
2729         struct hci_conn_info *ci;
2730         struct hci_dev *hdev;
2731         int n = 0, size, err;
2732
2733         if (copy_from_user(&req, arg, sizeof(req)))
2734                 return -EFAULT;
2735
2736         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2737                 return -EINVAL;
2738
2739         size = sizeof(req) + req.conn_num * sizeof(*ci);
2740
2741         cl = kmalloc(size, GFP_KERNEL);
2742         if (!cl)
2743                 return -ENOMEM;
2744
2745         hdev = hci_dev_get(req.dev_id);
2746         if (!hdev) {
2747                 kfree(cl);
2748                 return -ENODEV;
2749         }
2750
2751         ci = cl->conn_info;
2752
2753         hci_dev_lock(hdev);
2754         list_for_each_entry(c, &hdev->conn_hash.list, list) {
2755                 bacpy(&(ci + n)->bdaddr, &c->dst);
2756                 (ci + n)->handle = c->handle;
2757                 (ci + n)->type  = c->type;
2758                 (ci + n)->out   = c->out;
2759                 (ci + n)->state = c->state;
2760                 (ci + n)->link_mode = get_link_mode(c);
2761                 if (++n >= req.conn_num)
2762                         break;
2763         }
2764         hci_dev_unlock(hdev);
2765
2766         cl->dev_id = hdev->id;
2767         cl->conn_num = n;
2768         size = sizeof(req) + n * sizeof(*ci);
2769
2770         hci_dev_put(hdev);
2771
2772         err = copy_to_user(arg, cl, size);
2773         kfree(cl);
2774
2775         return err ? -EFAULT : 0;
2776 }
2777
2778 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2779 {
2780         struct hci_conn_info_req req;
2781         struct hci_conn_info ci;
2782         struct hci_conn *conn;
2783         char __user *ptr = arg + sizeof(req);
2784
2785         if (copy_from_user(&req, arg, sizeof(req)))
2786                 return -EFAULT;
2787
2788         hci_dev_lock(hdev);
2789         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2790         if (conn) {
2791                 bacpy(&ci.bdaddr, &conn->dst);
2792                 ci.handle = conn->handle;
2793                 ci.type  = conn->type;
2794                 ci.out   = conn->out;
2795                 ci.state = conn->state;
2796                 ci.link_mode = get_link_mode(conn);
2797         }
2798         hci_dev_unlock(hdev);
2799
2800         if (!conn)
2801                 return -ENOENT;
2802
2803         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2804 }
2805
2806 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2807 {
2808         struct hci_auth_info_req req;
2809         struct hci_conn *conn;
2810
2811         if (copy_from_user(&req, arg, sizeof(req)))
2812                 return -EFAULT;
2813
2814         hci_dev_lock(hdev);
2815         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2816         if (conn)
2817                 req.type = conn->auth_type;
2818         hci_dev_unlock(hdev);
2819
2820         if (!conn)
2821                 return -ENOENT;
2822
2823         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2824 }
2825
2826 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2827 {
2828         struct hci_dev *hdev = conn->hdev;
2829         struct hci_chan *chan;
2830
2831         BT_DBG("%s hcon %p", hdev->name, conn);
2832
2833         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2834                 BT_DBG("Refusing to create new hci_chan");
2835                 return NULL;
2836         }
2837
2838         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2839         if (!chan)
2840                 return NULL;
2841
2842         chan->conn = hci_conn_get(conn);
2843         skb_queue_head_init(&chan->data_q);
2844         chan->state = BT_CONNECTED;
2845
2846         list_add_rcu(&chan->list, &conn->chan_list);
2847
2848         return chan;
2849 }
2850
2851 void hci_chan_del(struct hci_chan *chan)
2852 {
2853         struct hci_conn *conn = chan->conn;
2854         struct hci_dev *hdev = conn->hdev;
2855
2856         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2857
2858         list_del_rcu(&chan->list);
2859
2860         synchronize_rcu();
2861
2862         /* Prevent new hci_chan's to be created for this hci_conn */
2863         set_bit(HCI_CONN_DROP, &conn->flags);
2864
2865         hci_conn_put(conn);
2866
2867         skb_queue_purge(&chan->data_q);
2868         kfree(chan);
2869 }
2870
2871 void hci_chan_list_flush(struct hci_conn *conn)
2872 {
2873         struct hci_chan *chan, *n;
2874
2875         BT_DBG("hcon %p", conn);
2876
2877         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2878                 hci_chan_del(chan);
2879 }
2880
2881 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2882                                                  __u16 handle)
2883 {
2884         struct hci_chan *hchan;
2885
2886         list_for_each_entry(hchan, &hcon->chan_list, list) {
2887                 if (hchan->handle == handle)
2888                         return hchan;
2889         }
2890
2891         return NULL;
2892 }
2893
2894 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2895 {
2896         struct hci_conn_hash *h = &hdev->conn_hash;
2897         struct hci_conn *hcon;
2898         struct hci_chan *hchan = NULL;
2899
2900         rcu_read_lock();
2901
2902         list_for_each_entry_rcu(hcon, &h->list, list) {
2903                 hchan = __hci_chan_lookup_handle(hcon, handle);
2904                 if (hchan)
2905                         break;
2906         }
2907
2908         rcu_read_unlock();
2909
2910         return hchan;
2911 }
2912
2913 u32 hci_conn_get_phy(struct hci_conn *conn)
2914 {
2915         u32 phys = 0;
2916
2917         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2918          * Table 6.2: Packets defined for synchronous, asynchronous, and
2919          * CPB logical transport types.
2920          */
2921         switch (conn->type) {
2922         case SCO_LINK:
2923                 /* SCO logical transport (1 Mb/s):
2924                  * HV1, HV2, HV3 and DV.
2925                  */
2926                 phys |= BT_PHY_BR_1M_1SLOT;
2927
2928                 break;
2929
2930         case ACL_LINK:
2931                 /* ACL logical transport (1 Mb/s) ptt=0:
2932                  * DH1, DM3, DH3, DM5 and DH5.
2933                  */
2934                 phys |= BT_PHY_BR_1M_1SLOT;
2935
2936                 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2937                         phys |= BT_PHY_BR_1M_3SLOT;
2938
2939                 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2940                         phys |= BT_PHY_BR_1M_5SLOT;
2941
2942                 /* ACL logical transport (2 Mb/s) ptt=1:
2943                  * 2-DH1, 2-DH3 and 2-DH5.
2944                  */
2945                 if (!(conn->pkt_type & HCI_2DH1))
2946                         phys |= BT_PHY_EDR_2M_1SLOT;
2947
2948                 if (!(conn->pkt_type & HCI_2DH3))
2949                         phys |= BT_PHY_EDR_2M_3SLOT;
2950
2951                 if (!(conn->pkt_type & HCI_2DH5))
2952                         phys |= BT_PHY_EDR_2M_5SLOT;
2953
2954                 /* ACL logical transport (3 Mb/s) ptt=1:
2955                  * 3-DH1, 3-DH3 and 3-DH5.
2956                  */
2957                 if (!(conn->pkt_type & HCI_3DH1))
2958                         phys |= BT_PHY_EDR_3M_1SLOT;
2959
2960                 if (!(conn->pkt_type & HCI_3DH3))
2961                         phys |= BT_PHY_EDR_3M_3SLOT;
2962
2963                 if (!(conn->pkt_type & HCI_3DH5))
2964                         phys |= BT_PHY_EDR_3M_5SLOT;
2965
2966                 break;
2967
2968         case ESCO_LINK:
2969                 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2970                 phys |= BT_PHY_BR_1M_1SLOT;
2971
2972                 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2973                         phys |= BT_PHY_BR_1M_3SLOT;
2974
2975                 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2976                 if (!(conn->pkt_type & ESCO_2EV3))
2977                         phys |= BT_PHY_EDR_2M_1SLOT;
2978
2979                 if (!(conn->pkt_type & ESCO_2EV5))
2980                         phys |= BT_PHY_EDR_2M_3SLOT;
2981
2982                 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2983                 if (!(conn->pkt_type & ESCO_3EV3))
2984                         phys |= BT_PHY_EDR_3M_1SLOT;
2985
2986                 if (!(conn->pkt_type & ESCO_3EV5))
2987                         phys |= BT_PHY_EDR_3M_3SLOT;
2988
2989                 break;
2990
2991         case LE_LINK:
2992                 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2993                         phys |= BT_PHY_LE_1M_TX;
2994
2995                 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2996                         phys |= BT_PHY_LE_1M_RX;
2997
2998                 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2999                         phys |= BT_PHY_LE_2M_TX;
3000
3001                 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
3002                         phys |= BT_PHY_LE_2M_RX;
3003
3004                 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
3005                         phys |= BT_PHY_LE_CODED_TX;
3006
3007                 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
3008                         phys |= BT_PHY_LE_CODED_RX;
3009
3010                 break;
3011         }
3012
3013         return phys;
3014 }
3015
3016 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3017 {
3018         struct hci_conn *conn = data;
3019
3020         if (!hci_conn_valid(hdev, conn))
3021                 return -ECANCELED;
3022
3023         return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
3024 }
3025
3026 int hci_abort_conn(struct hci_conn *conn, u8 reason)
3027 {
3028         struct hci_dev *hdev = conn->hdev;
3029
3030         /* If abort_reason has already been set it means the connection is
3031          * already being aborted so don't attempt to overwrite it.
3032          */
3033         if (conn->abort_reason)
3034                 return 0;
3035
3036         bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
3037
3038         conn->abort_reason = reason;
3039
3040         /* If the connection is pending check the command opcode since that
3041          * might be blocking on hci_cmd_sync_work while waiting its respective
3042          * event so we need to hci_cmd_sync_cancel to cancel it.
3043          *
3044          * hci_connect_le serializes the connection attempts so only one
3045          * connection can be in BT_CONNECT at time.
3046          */
3047         if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
3048                 switch (hci_skb_event(hdev->sent_cmd)) {
3049                 case HCI_EV_CONN_COMPLETE:
3050                 case HCI_EV_LE_CONN_COMPLETE:
3051                 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
3052                 case HCI_EVT_LE_CIS_ESTABLISHED:
3053                         hci_cmd_sync_cancel(hdev, ECANCELED);
3054                         break;
3055                 }
3056         /* Cancel connect attempt if still queued/pending */
3057         } else if (!hci_cancel_connect_sync(hdev, conn)) {
3058                 return 0;
3059         }
3060
3061         /* Run immediately if on cmd_sync_work since this may be called
3062          * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
3063          * already queue its callback on cmd_sync_work.
3064          */
3065         return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
3066 }
This page took 0.20343 seconds and 4 git commands to generate.