]> Git Repo - linux.git/blob - net/bluetooth/hci_sync.c
Merge tag 'ceph-for-6.12-rc1' of https://github.com/ceph/ceph-client
[linux.git] / net / bluetooth / hci_sync.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  * Copyright 2023 NXP
7  */
8
9 #include <linux/property.h>
10
11 #include <net/bluetooth/bluetooth.h>
12 #include <net/bluetooth/hci_core.h>
13 #include <net/bluetooth/mgmt.h>
14
15 #include "hci_codec.h"
16 #include "hci_debugfs.h"
17 #include "smp.h"
18 #include "eir.h"
19 #include "msft.h"
20 #include "aosp.h"
21 #include "leds.h"
22
23 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
24                                   struct sk_buff *skb)
25 {
26         bt_dev_dbg(hdev, "result 0x%2.2x", result);
27
28         if (hdev->req_status != HCI_REQ_PEND)
29                 return;
30
31         hdev->req_result = result;
32         hdev->req_status = HCI_REQ_DONE;
33
34         /* Free the request command so it is not used as response */
35         kfree_skb(hdev->req_skb);
36         hdev->req_skb = NULL;
37
38         if (skb) {
39                 struct sock *sk = hci_skb_sk(skb);
40
41                 /* Drop sk reference if set */
42                 if (sk)
43                         sock_put(sk);
44
45                 hdev->req_rsp = skb_get(skb);
46         }
47
48         wake_up_interruptible(&hdev->req_wait_q);
49 }
50
51 struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
52                                    const void *param, struct sock *sk)
53 {
54         int len = HCI_COMMAND_HDR_SIZE + plen;
55         struct hci_command_hdr *hdr;
56         struct sk_buff *skb;
57
58         skb = bt_skb_alloc(len, GFP_ATOMIC);
59         if (!skb)
60                 return NULL;
61
62         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
63         hdr->opcode = cpu_to_le16(opcode);
64         hdr->plen   = plen;
65
66         if (plen)
67                 skb_put_data(skb, param, plen);
68
69         bt_dev_dbg(hdev, "skb len %d", skb->len);
70
71         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
72         hci_skb_opcode(skb) = opcode;
73
74         /* Grab a reference if command needs to be associated with a sock (e.g.
75          * likely mgmt socket that initiated the command).
76          */
77         if (sk) {
78                 hci_skb_sk(skb) = sk;
79                 sock_hold(sk);
80         }
81
82         return skb;
83 }
84
85 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
86                              const void *param, u8 event, struct sock *sk)
87 {
88         struct hci_dev *hdev = req->hdev;
89         struct sk_buff *skb;
90
91         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
92
93         /* If an error occurred during request building, there is no point in
94          * queueing the HCI command. We can simply return.
95          */
96         if (req->err)
97                 return;
98
99         skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
100         if (!skb) {
101                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
102                            opcode);
103                 req->err = -ENOMEM;
104                 return;
105         }
106
107         if (skb_queue_empty(&req->cmd_q))
108                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
109
110         hci_skb_event(skb) = event;
111
112         skb_queue_tail(&req->cmd_q, skb);
113 }
114
115 static int hci_req_sync_run(struct hci_request *req)
116 {
117         struct hci_dev *hdev = req->hdev;
118         struct sk_buff *skb;
119         unsigned long flags;
120
121         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
122
123         /* If an error occurred during request building, remove all HCI
124          * commands queued on the HCI request queue.
125          */
126         if (req->err) {
127                 skb_queue_purge(&req->cmd_q);
128                 return req->err;
129         }
130
131         /* Do not allow empty requests */
132         if (skb_queue_empty(&req->cmd_q))
133                 return -ENODATA;
134
135         skb = skb_peek_tail(&req->cmd_q);
136         bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
137         bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
138
139         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
140         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
141         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
142
143         queue_work(hdev->workqueue, &hdev->cmd_work);
144
145         return 0;
146 }
147
148 static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
149 {
150         skb_queue_head_init(&req->cmd_q);
151         req->hdev = hdev;
152         req->err = 0;
153 }
154
155 /* This function requires the caller holds hdev->req_lock. */
156 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
157                                   const void *param, u8 event, u32 timeout,
158                                   struct sock *sk)
159 {
160         struct hci_request req;
161         struct sk_buff *skb;
162         int err = 0;
163
164         bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
165
166         hci_request_init(&req, hdev);
167
168         hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
169
170         hdev->req_status = HCI_REQ_PEND;
171
172         err = hci_req_sync_run(&req);
173         if (err < 0)
174                 return ERR_PTR(err);
175
176         err = wait_event_interruptible_timeout(hdev->req_wait_q,
177                                                hdev->req_status != HCI_REQ_PEND,
178                                                timeout);
179
180         if (err == -ERESTARTSYS)
181                 return ERR_PTR(-EINTR);
182
183         switch (hdev->req_status) {
184         case HCI_REQ_DONE:
185                 err = -bt_to_errno(hdev->req_result);
186                 break;
187
188         case HCI_REQ_CANCELED:
189                 err = -hdev->req_result;
190                 break;
191
192         default:
193                 err = -ETIMEDOUT;
194                 break;
195         }
196
197         hdev->req_status = 0;
198         hdev->req_result = 0;
199         skb = hdev->req_rsp;
200         hdev->req_rsp = NULL;
201
202         bt_dev_dbg(hdev, "end: err %d", err);
203
204         if (err < 0) {
205                 kfree_skb(skb);
206                 return ERR_PTR(err);
207         }
208
209         return skb;
210 }
211 EXPORT_SYMBOL(__hci_cmd_sync_sk);
212
213 /* This function requires the caller holds hdev->req_lock. */
214 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
215                                const void *param, u32 timeout)
216 {
217         return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
218 }
219 EXPORT_SYMBOL(__hci_cmd_sync);
220
221 /* Send HCI command and wait for command complete event */
222 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
223                              const void *param, u32 timeout)
224 {
225         struct sk_buff *skb;
226
227         if (!test_bit(HCI_UP, &hdev->flags))
228                 return ERR_PTR(-ENETDOWN);
229
230         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
231
232         hci_req_sync_lock(hdev);
233         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
234         hci_req_sync_unlock(hdev);
235
236         return skb;
237 }
238 EXPORT_SYMBOL(hci_cmd_sync);
239
240 /* This function requires the caller holds hdev->req_lock. */
241 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
242                                   const void *param, u8 event, u32 timeout)
243 {
244         return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
245                                  NULL);
246 }
247 EXPORT_SYMBOL(__hci_cmd_sync_ev);
248
249 /* This function requires the caller holds hdev->req_lock. */
250 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
251                              const void *param, u8 event, u32 timeout,
252                              struct sock *sk)
253 {
254         struct sk_buff *skb;
255         u8 status;
256
257         skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
258         if (IS_ERR(skb)) {
259                 if (!event)
260                         bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
261                                    PTR_ERR(skb));
262                 return PTR_ERR(skb);
263         }
264
265         /* If command return a status event skb will be set to NULL as there are
266          * no parameters, in case of failure IS_ERR(skb) would have be set to
267          * the actual error would be found with PTR_ERR(skb).
268          */
269         if (!skb)
270                 return 0;
271
272         status = skb->data[0];
273
274         kfree_skb(skb);
275
276         return status;
277 }
278 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
279
280 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
281                           const void *param, u32 timeout)
282 {
283         return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
284                                         NULL);
285 }
286 EXPORT_SYMBOL(__hci_cmd_sync_status);
287
288 int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
289                         const void *param, u32 timeout)
290 {
291         int err;
292
293         hci_req_sync_lock(hdev);
294         err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
295         hci_req_sync_unlock(hdev);
296
297         return err;
298 }
299 EXPORT_SYMBOL(hci_cmd_sync_status);
300
301 static void hci_cmd_sync_work(struct work_struct *work)
302 {
303         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
304
305         bt_dev_dbg(hdev, "");
306
307         /* Dequeue all entries and run them */
308         while (1) {
309                 struct hci_cmd_sync_work_entry *entry;
310
311                 mutex_lock(&hdev->cmd_sync_work_lock);
312                 entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
313                                                  struct hci_cmd_sync_work_entry,
314                                                  list);
315                 if (entry)
316                         list_del(&entry->list);
317                 mutex_unlock(&hdev->cmd_sync_work_lock);
318
319                 if (!entry)
320                         break;
321
322                 bt_dev_dbg(hdev, "entry %p", entry);
323
324                 if (entry->func) {
325                         int err;
326
327                         hci_req_sync_lock(hdev);
328                         err = entry->func(hdev, entry->data);
329                         if (entry->destroy)
330                                 entry->destroy(hdev, entry->data, err);
331                         hci_req_sync_unlock(hdev);
332                 }
333
334                 kfree(entry);
335         }
336 }
337
338 static void hci_cmd_sync_cancel_work(struct work_struct *work)
339 {
340         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
341
342         cancel_delayed_work_sync(&hdev->cmd_timer);
343         cancel_delayed_work_sync(&hdev->ncmd_timer);
344         atomic_set(&hdev->cmd_cnt, 1);
345
346         wake_up_interruptible(&hdev->req_wait_q);
347 }
348
349 static int hci_scan_disable_sync(struct hci_dev *hdev);
350 static int scan_disable_sync(struct hci_dev *hdev, void *data)
351 {
352         return hci_scan_disable_sync(hdev);
353 }
354
355 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
356 {
357         return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
358 }
359
360 static void le_scan_disable(struct work_struct *work)
361 {
362         struct hci_dev *hdev = container_of(work, struct hci_dev,
363                                             le_scan_disable.work);
364         int status;
365
366         bt_dev_dbg(hdev, "");
367         hci_dev_lock(hdev);
368
369         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
370                 goto _return;
371
372         status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
373         if (status) {
374                 bt_dev_err(hdev, "failed to disable LE scan: %d", status);
375                 goto _return;
376         }
377
378         /* If we were running LE only scan, change discovery state. If
379          * we were running both LE and BR/EDR inquiry simultaneously,
380          * and BR/EDR inquiry is already finished, stop discovery,
381          * otherwise BR/EDR inquiry will stop discovery when finished.
382          * If we will resolve remote device name, do not change
383          * discovery state.
384          */
385
386         if (hdev->discovery.type == DISCOV_TYPE_LE)
387                 goto discov_stopped;
388
389         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
390                 goto _return;
391
392         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
393                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
394                     hdev->discovery.state != DISCOVERY_RESOLVING)
395                         goto discov_stopped;
396
397                 goto _return;
398         }
399
400         status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
401         if (status) {
402                 bt_dev_err(hdev, "inquiry failed: status %d", status);
403                 goto discov_stopped;
404         }
405
406         goto _return;
407
408 discov_stopped:
409         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
410
411 _return:
412         hci_dev_unlock(hdev);
413 }
414
415 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
416                                        u8 filter_dup);
417
418 static int reenable_adv_sync(struct hci_dev *hdev, void *data)
419 {
420         bt_dev_dbg(hdev, "");
421
422         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
423             list_empty(&hdev->adv_instances))
424                 return 0;
425
426         if (hdev->cur_adv_instance) {
427                 return hci_schedule_adv_instance_sync(hdev,
428                                                       hdev->cur_adv_instance,
429                                                       true);
430         } else {
431                 if (ext_adv_capable(hdev)) {
432                         hci_start_ext_adv_sync(hdev, 0x00);
433                 } else {
434                         hci_update_adv_data_sync(hdev, 0x00);
435                         hci_update_scan_rsp_data_sync(hdev, 0x00);
436                         hci_enable_advertising_sync(hdev);
437                 }
438         }
439
440         return 0;
441 }
442
443 static void reenable_adv(struct work_struct *work)
444 {
445         struct hci_dev *hdev = container_of(work, struct hci_dev,
446                                             reenable_adv_work);
447         int status;
448
449         bt_dev_dbg(hdev, "");
450
451         hci_dev_lock(hdev);
452
453         status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
454         if (status)
455                 bt_dev_err(hdev, "failed to reenable ADV: %d", status);
456
457         hci_dev_unlock(hdev);
458 }
459
460 static void cancel_adv_timeout(struct hci_dev *hdev)
461 {
462         if (hdev->adv_instance_timeout) {
463                 hdev->adv_instance_timeout = 0;
464                 cancel_delayed_work(&hdev->adv_instance_expire);
465         }
466 }
467
468 /* For a single instance:
469  * - force == true: The instance will be removed even when its remaining
470  *   lifetime is not zero.
471  * - force == false: the instance will be deactivated but kept stored unless
472  *   the remaining lifetime is zero.
473  *
474  * For instance == 0x00:
475  * - force == true: All instances will be removed regardless of their timeout
476  *   setting.
477  * - force == false: Only instances that have a timeout will be removed.
478  */
479 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
480                                 u8 instance, bool force)
481 {
482         struct adv_info *adv_instance, *n, *next_instance = NULL;
483         int err;
484         u8 rem_inst;
485
486         /* Cancel any timeout concerning the removed instance(s). */
487         if (!instance || hdev->cur_adv_instance == instance)
488                 cancel_adv_timeout(hdev);
489
490         /* Get the next instance to advertise BEFORE we remove
491          * the current one. This can be the same instance again
492          * if there is only one instance.
493          */
494         if (instance && hdev->cur_adv_instance == instance)
495                 next_instance = hci_get_next_instance(hdev, instance);
496
497         if (instance == 0x00) {
498                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
499                                          list) {
500                         if (!(force || adv_instance->timeout))
501                                 continue;
502
503                         rem_inst = adv_instance->instance;
504                         err = hci_remove_adv_instance(hdev, rem_inst);
505                         if (!err)
506                                 mgmt_advertising_removed(sk, hdev, rem_inst);
507                 }
508         } else {
509                 adv_instance = hci_find_adv_instance(hdev, instance);
510
511                 if (force || (adv_instance && adv_instance->timeout &&
512                               !adv_instance->remaining_time)) {
513                         /* Don't advertise a removed instance. */
514                         if (next_instance &&
515                             next_instance->instance == instance)
516                                 next_instance = NULL;
517
518                         err = hci_remove_adv_instance(hdev, instance);
519                         if (!err)
520                                 mgmt_advertising_removed(sk, hdev, instance);
521                 }
522         }
523
524         if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
525                 return 0;
526
527         if (next_instance && !ext_adv_capable(hdev))
528                 return hci_schedule_adv_instance_sync(hdev,
529                                                       next_instance->instance,
530                                                       false);
531
532         return 0;
533 }
534
535 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
536 {
537         u8 instance = *(u8 *)data;
538
539         kfree(data);
540
541         hci_clear_adv_instance_sync(hdev, NULL, instance, false);
542
543         if (list_empty(&hdev->adv_instances))
544                 return hci_disable_advertising_sync(hdev);
545
546         return 0;
547 }
548
549 static void adv_timeout_expire(struct work_struct *work)
550 {
551         u8 *inst_ptr;
552         struct hci_dev *hdev = container_of(work, struct hci_dev,
553                                             adv_instance_expire.work);
554
555         bt_dev_dbg(hdev, "");
556
557         hci_dev_lock(hdev);
558
559         hdev->adv_instance_timeout = 0;
560
561         if (hdev->cur_adv_instance == 0x00)
562                 goto unlock;
563
564         inst_ptr = kmalloc(1, GFP_KERNEL);
565         if (!inst_ptr)
566                 goto unlock;
567
568         *inst_ptr = hdev->cur_adv_instance;
569         hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
570
571 unlock:
572         hci_dev_unlock(hdev);
573 }
574
575 static bool is_interleave_scanning(struct hci_dev *hdev)
576 {
577         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
578 }
579
580 static int hci_passive_scan_sync(struct hci_dev *hdev);
581
582 static void interleave_scan_work(struct work_struct *work)
583 {
584         struct hci_dev *hdev = container_of(work, struct hci_dev,
585                                             interleave_scan.work);
586         unsigned long timeout;
587
588         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
589                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
590         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
591                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
592         } else {
593                 bt_dev_err(hdev, "unexpected error");
594                 return;
595         }
596
597         hci_passive_scan_sync(hdev);
598
599         hci_dev_lock(hdev);
600
601         switch (hdev->interleave_scan_state) {
602         case INTERLEAVE_SCAN_ALLOWLIST:
603                 bt_dev_dbg(hdev, "next state: allowlist");
604                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
605                 break;
606         case INTERLEAVE_SCAN_NO_FILTER:
607                 bt_dev_dbg(hdev, "next state: no filter");
608                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
609                 break;
610         case INTERLEAVE_SCAN_NONE:
611                 bt_dev_err(hdev, "unexpected error");
612         }
613
614         hci_dev_unlock(hdev);
615
616         /* Don't continue interleaving if it was canceled */
617         if (is_interleave_scanning(hdev))
618                 queue_delayed_work(hdev->req_workqueue,
619                                    &hdev->interleave_scan, timeout);
620 }
621
622 void hci_cmd_sync_init(struct hci_dev *hdev)
623 {
624         INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
625         INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
626         mutex_init(&hdev->cmd_sync_work_lock);
627         mutex_init(&hdev->unregister_lock);
628
629         INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
630         INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
631         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
632         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
633         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
634 }
635
636 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
637                                        struct hci_cmd_sync_work_entry *entry,
638                                        int err)
639 {
640         if (entry->destroy)
641                 entry->destroy(hdev, entry->data, err);
642
643         list_del(&entry->list);
644         kfree(entry);
645 }
646
647 void hci_cmd_sync_clear(struct hci_dev *hdev)
648 {
649         struct hci_cmd_sync_work_entry *entry, *tmp;
650
651         cancel_work_sync(&hdev->cmd_sync_work);
652         cancel_work_sync(&hdev->reenable_adv_work);
653
654         mutex_lock(&hdev->cmd_sync_work_lock);
655         list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
656                 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
657         mutex_unlock(&hdev->cmd_sync_work_lock);
658 }
659
660 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
661 {
662         bt_dev_dbg(hdev, "err 0x%2.2x", err);
663
664         if (hdev->req_status == HCI_REQ_PEND) {
665                 hdev->req_result = err;
666                 hdev->req_status = HCI_REQ_CANCELED;
667
668                 queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
669         }
670 }
671 EXPORT_SYMBOL(hci_cmd_sync_cancel);
672
673 /* Cancel ongoing command request synchronously:
674  *
675  * - Set result and mark status to HCI_REQ_CANCELED
676  * - Wakeup command sync thread
677  */
678 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
679 {
680         bt_dev_dbg(hdev, "err 0x%2.2x", err);
681
682         if (hdev->req_status == HCI_REQ_PEND) {
683                 /* req_result is __u32 so error must be positive to be properly
684                  * propagated.
685                  */
686                 hdev->req_result = err < 0 ? -err : err;
687                 hdev->req_status = HCI_REQ_CANCELED;
688
689                 wake_up_interruptible(&hdev->req_wait_q);
690         }
691 }
692 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
693
694 /* Submit HCI command to be run in as cmd_sync_work:
695  *
696  * - hdev must _not_ be unregistered
697  */
698 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
699                         void *data, hci_cmd_sync_work_destroy_t destroy)
700 {
701         struct hci_cmd_sync_work_entry *entry;
702         int err = 0;
703
704         mutex_lock(&hdev->unregister_lock);
705         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
706                 err = -ENODEV;
707                 goto unlock;
708         }
709
710         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
711         if (!entry) {
712                 err = -ENOMEM;
713                 goto unlock;
714         }
715         entry->func = func;
716         entry->data = data;
717         entry->destroy = destroy;
718
719         mutex_lock(&hdev->cmd_sync_work_lock);
720         list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
721         mutex_unlock(&hdev->cmd_sync_work_lock);
722
723         queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
724
725 unlock:
726         mutex_unlock(&hdev->unregister_lock);
727         return err;
728 }
729 EXPORT_SYMBOL(hci_cmd_sync_submit);
730
731 /* Queue HCI command:
732  *
733  * - hdev must be running
734  */
735 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
736                        void *data, hci_cmd_sync_work_destroy_t destroy)
737 {
738         /* Only queue command if hdev is running which means it had been opened
739          * and is either on init phase or is already up.
740          */
741         if (!test_bit(HCI_RUNNING, &hdev->flags))
742                 return -ENETDOWN;
743
744         return hci_cmd_sync_submit(hdev, func, data, destroy);
745 }
746 EXPORT_SYMBOL(hci_cmd_sync_queue);
747
748 static struct hci_cmd_sync_work_entry *
749 _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
750                            void *data, hci_cmd_sync_work_destroy_t destroy)
751 {
752         struct hci_cmd_sync_work_entry *entry, *tmp;
753
754         list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
755                 if (func && entry->func != func)
756                         continue;
757
758                 if (data && entry->data != data)
759                         continue;
760
761                 if (destroy && entry->destroy != destroy)
762                         continue;
763
764                 return entry;
765         }
766
767         return NULL;
768 }
769
770 /* Queue HCI command entry once:
771  *
772  * - Lookup if an entry already exist and only if it doesn't creates a new entry
773  *   and queue it.
774  */
775 int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
776                             void *data, hci_cmd_sync_work_destroy_t destroy)
777 {
778         if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
779                 return 0;
780
781         return hci_cmd_sync_queue(hdev, func, data, destroy);
782 }
783 EXPORT_SYMBOL(hci_cmd_sync_queue_once);
784
785 /* Run HCI command:
786  *
787  * - hdev must be running
788  * - if on cmd_sync_work then run immediately otherwise queue
789  */
790 int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
791                      void *data, hci_cmd_sync_work_destroy_t destroy)
792 {
793         /* Only queue command if hdev is running which means it had been opened
794          * and is either on init phase or is already up.
795          */
796         if (!test_bit(HCI_RUNNING, &hdev->flags))
797                 return -ENETDOWN;
798
799         /* If on cmd_sync_work then run immediately otherwise queue */
800         if (current_work() == &hdev->cmd_sync_work)
801                 return func(hdev, data);
802
803         return hci_cmd_sync_submit(hdev, func, data, destroy);
804 }
805 EXPORT_SYMBOL(hci_cmd_sync_run);
806
807 /* Run HCI command entry once:
808  *
809  * - Lookup if an entry already exist and only if it doesn't creates a new entry
810  *   and run it.
811  * - if on cmd_sync_work then run immediately otherwise queue
812  */
813 int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
814                           void *data, hci_cmd_sync_work_destroy_t destroy)
815 {
816         if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
817                 return 0;
818
819         return hci_cmd_sync_run(hdev, func, data, destroy);
820 }
821 EXPORT_SYMBOL(hci_cmd_sync_run_once);
822
823 /* Lookup HCI command entry:
824  *
825  * - Return first entry that matches by function callback or data or
826  *   destroy callback.
827  */
828 struct hci_cmd_sync_work_entry *
829 hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
830                           void *data, hci_cmd_sync_work_destroy_t destroy)
831 {
832         struct hci_cmd_sync_work_entry *entry;
833
834         mutex_lock(&hdev->cmd_sync_work_lock);
835         entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
836         mutex_unlock(&hdev->cmd_sync_work_lock);
837
838         return entry;
839 }
840 EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
841
842 /* Cancel HCI command entry */
843 void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
844                                struct hci_cmd_sync_work_entry *entry)
845 {
846         mutex_lock(&hdev->cmd_sync_work_lock);
847         _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
848         mutex_unlock(&hdev->cmd_sync_work_lock);
849 }
850 EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
851
852 /* Dequeue one HCI command entry:
853  *
854  * - Lookup and cancel first entry that matches.
855  */
856 bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
857                                hci_cmd_sync_work_func_t func,
858                                void *data, hci_cmd_sync_work_destroy_t destroy)
859 {
860         struct hci_cmd_sync_work_entry *entry;
861
862         entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
863         if (!entry)
864                 return false;
865
866         hci_cmd_sync_cancel_entry(hdev, entry);
867
868         return true;
869 }
870 EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
871
872 /* Dequeue HCI command entry:
873  *
874  * - Lookup and cancel any entry that matches by function callback or data or
875  *   destroy callback.
876  */
877 bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
878                           void *data, hci_cmd_sync_work_destroy_t destroy)
879 {
880         struct hci_cmd_sync_work_entry *entry;
881         bool ret = false;
882
883         mutex_lock(&hdev->cmd_sync_work_lock);
884         while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
885                                                    destroy))) {
886                 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
887                 ret = true;
888         }
889         mutex_unlock(&hdev->cmd_sync_work_lock);
890
891         return ret;
892 }
893 EXPORT_SYMBOL(hci_cmd_sync_dequeue);
894
895 int hci_update_eir_sync(struct hci_dev *hdev)
896 {
897         struct hci_cp_write_eir cp;
898
899         bt_dev_dbg(hdev, "");
900
901         if (!hdev_is_powered(hdev))
902                 return 0;
903
904         if (!lmp_ext_inq_capable(hdev))
905                 return 0;
906
907         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
908                 return 0;
909
910         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
911                 return 0;
912
913         memset(&cp, 0, sizeof(cp));
914
915         eir_create(hdev, cp.data);
916
917         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
918                 return 0;
919
920         memcpy(hdev->eir, cp.data, sizeof(cp.data));
921
922         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
923                                      HCI_CMD_TIMEOUT);
924 }
925
926 static u8 get_service_classes(struct hci_dev *hdev)
927 {
928         struct bt_uuid *uuid;
929         u8 val = 0;
930
931         list_for_each_entry(uuid, &hdev->uuids, list)
932                 val |= uuid->svc_hint;
933
934         return val;
935 }
936
937 int hci_update_class_sync(struct hci_dev *hdev)
938 {
939         u8 cod[3];
940
941         bt_dev_dbg(hdev, "");
942
943         if (!hdev_is_powered(hdev))
944                 return 0;
945
946         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
947                 return 0;
948
949         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
950                 return 0;
951
952         cod[0] = hdev->minor_class;
953         cod[1] = hdev->major_class;
954         cod[2] = get_service_classes(hdev);
955
956         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
957                 cod[1] |= 0x20;
958
959         if (memcmp(cod, hdev->dev_class, 3) == 0)
960                 return 0;
961
962         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
963                                      sizeof(cod), cod, HCI_CMD_TIMEOUT);
964 }
965
966 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
967 {
968         /* If there is no connection we are OK to advertise. */
969         if (hci_conn_num(hdev, LE_LINK) == 0)
970                 return true;
971
972         /* Check le_states if there is any connection in peripheral role. */
973         if (hdev->conn_hash.le_num_peripheral > 0) {
974                 /* Peripheral connection state and non connectable mode
975                  * bit 20.
976                  */
977                 if (!connectable && !(hdev->le_states[2] & 0x10))
978                         return false;
979
980                 /* Peripheral connection state and connectable mode bit 38
981                  * and scannable bit 21.
982                  */
983                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
984                                     !(hdev->le_states[2] & 0x20)))
985                         return false;
986         }
987
988         /* Check le_states if there is any connection in central role. */
989         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
990                 /* Central connection state and non connectable mode bit 18. */
991                 if (!connectable && !(hdev->le_states[2] & 0x02))
992                         return false;
993
994                 /* Central connection state and connectable mode bit 35 and
995                  * scannable 19.
996                  */
997                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
998                                     !(hdev->le_states[2] & 0x08)))
999                         return false;
1000         }
1001
1002         return true;
1003 }
1004
1005 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1006 {
1007         /* If privacy is not enabled don't use RPA */
1008         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1009                 return false;
1010
1011         /* If basic privacy mode is enabled use RPA */
1012         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1013                 return true;
1014
1015         /* If limited privacy mode is enabled don't use RPA if we're
1016          * both discoverable and bondable.
1017          */
1018         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1019             hci_dev_test_flag(hdev, HCI_BONDABLE))
1020                 return false;
1021
1022         /* We're neither bondable nor discoverable in the limited
1023          * privacy mode, therefore use RPA.
1024          */
1025         return true;
1026 }
1027
1028 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
1029 {
1030         /* If we're advertising or initiating an LE connection we can't
1031          * go ahead and change the random address at this time. This is
1032          * because the eventual initiator address used for the
1033          * subsequently created connection will be undefined (some
1034          * controllers use the new address and others the one we had
1035          * when the operation started).
1036          *
1037          * In this kind of scenario skip the update and let the random
1038          * address be updated at the next cycle.
1039          */
1040         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1041             hci_lookup_le_connect(hdev)) {
1042                 bt_dev_dbg(hdev, "Deferring random address update");
1043                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1044                 return 0;
1045         }
1046
1047         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1048                                      6, rpa, HCI_CMD_TIMEOUT);
1049 }
1050
1051 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1052                                    bool rpa, u8 *own_addr_type)
1053 {
1054         int err;
1055
1056         /* If privacy is enabled use a resolvable private address. If
1057          * current RPA has expired or there is something else than
1058          * the current RPA in use, then generate a new one.
1059          */
1060         if (rpa) {
1061                 /* If Controller supports LL Privacy use own address type is
1062                  * 0x03
1063                  */
1064                 if (use_ll_privacy(hdev))
1065                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1066                 else
1067                         *own_addr_type = ADDR_LE_DEV_RANDOM;
1068
1069                 /* Check if RPA is valid */
1070                 if (rpa_valid(hdev))
1071                         return 0;
1072
1073                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1074                 if (err < 0) {
1075                         bt_dev_err(hdev, "failed to generate new RPA");
1076                         return err;
1077                 }
1078
1079                 err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1080                 if (err)
1081                         return err;
1082
1083                 return 0;
1084         }
1085
1086         /* In case of required privacy without resolvable private address,
1087          * use an non-resolvable private address. This is useful for active
1088          * scanning and non-connectable advertising.
1089          */
1090         if (require_privacy) {
1091                 bdaddr_t nrpa;
1092
1093                 while (true) {
1094                         /* The non-resolvable private address is generated
1095                          * from random six bytes with the two most significant
1096                          * bits cleared.
1097                          */
1098                         get_random_bytes(&nrpa, 6);
1099                         nrpa.b[5] &= 0x3f;
1100
1101                         /* The non-resolvable private address shall not be
1102                          * equal to the public address.
1103                          */
1104                         if (bacmp(&hdev->bdaddr, &nrpa))
1105                                 break;
1106                 }
1107
1108                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1109
1110                 return hci_set_random_addr_sync(hdev, &nrpa);
1111         }
1112
1113         /* If forcing static address is in use or there is no public
1114          * address use the static address as random address (but skip
1115          * the HCI command if the current random address is already the
1116          * static one.
1117          *
1118          * In case BR/EDR has been disabled on a dual-mode controller
1119          * and a static address has been configured, then use that
1120          * address instead of the public BR/EDR address.
1121          */
1122         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1123             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1124             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1125              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1126                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1127                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1128                         return hci_set_random_addr_sync(hdev,
1129                                                         &hdev->static_addr);
1130                 return 0;
1131         }
1132
1133         /* Neither privacy nor static address is being used so use a
1134          * public address.
1135          */
1136         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1137
1138         return 0;
1139 }
1140
1141 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1142 {
1143         struct hci_cp_le_set_ext_adv_enable *cp;
1144         struct hci_cp_ext_adv_set *set;
1145         u8 data[sizeof(*cp) + sizeof(*set) * 1];
1146         u8 size;
1147         struct adv_info *adv = NULL;
1148
1149         /* If request specifies an instance that doesn't exist, fail */
1150         if (instance > 0) {
1151                 adv = hci_find_adv_instance(hdev, instance);
1152                 if (!adv)
1153                         return -EINVAL;
1154
1155                 /* If not enabled there is nothing to do */
1156                 if (!adv->enabled)
1157                         return 0;
1158         }
1159
1160         memset(data, 0, sizeof(data));
1161
1162         cp = (void *)data;
1163         set = (void *)cp->data;
1164
1165         /* Instance 0x00 indicates all advertising instances will be disabled */
1166         cp->num_of_sets = !!instance;
1167         cp->enable = 0x00;
1168
1169         set->handle = adv ? adv->handle : instance;
1170
1171         size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1172
1173         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1174                                      size, data, HCI_CMD_TIMEOUT);
1175 }
1176
1177 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1178                                             bdaddr_t *random_addr)
1179 {
1180         struct hci_cp_le_set_adv_set_rand_addr cp;
1181         int err;
1182
1183         if (!instance) {
1184                 /* Instance 0x00 doesn't have an adv_info, instead it uses
1185                  * hdev->random_addr to track its address so whenever it needs
1186                  * to be updated this also set the random address since
1187                  * hdev->random_addr is shared with scan state machine.
1188                  */
1189                 err = hci_set_random_addr_sync(hdev, random_addr);
1190                 if (err)
1191                         return err;
1192         }
1193
1194         memset(&cp, 0, sizeof(cp));
1195
1196         cp.handle = instance;
1197         bacpy(&cp.bdaddr, random_addr);
1198
1199         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1200                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1201 }
1202
1203 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1204 {
1205         struct hci_cp_le_set_ext_adv_params cp;
1206         bool connectable;
1207         u32 flags;
1208         bdaddr_t random_addr;
1209         u8 own_addr_type;
1210         int err;
1211         struct adv_info *adv;
1212         bool secondary_adv;
1213
1214         if (instance > 0) {
1215                 adv = hci_find_adv_instance(hdev, instance);
1216                 if (!adv)
1217                         return -EINVAL;
1218         } else {
1219                 adv = NULL;
1220         }
1221
1222         /* Updating parameters of an active instance will return a
1223          * Command Disallowed error, so we must first disable the
1224          * instance if it is active.
1225          */
1226         if (adv && !adv->pending) {
1227                 err = hci_disable_ext_adv_instance_sync(hdev, instance);
1228                 if (err)
1229                         return err;
1230         }
1231
1232         flags = hci_adv_instance_flags(hdev, instance);
1233
1234         /* If the "connectable" instance flag was not set, then choose between
1235          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1236          */
1237         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1238                       mgmt_get_connectable(hdev);
1239
1240         if (!is_advertising_allowed(hdev, connectable))
1241                 return -EPERM;
1242
1243         /* Set require_privacy to true only when non-connectable
1244          * advertising is used. In that case it is fine to use a
1245          * non-resolvable private address.
1246          */
1247         err = hci_get_random_address(hdev, !connectable,
1248                                      adv_use_rpa(hdev, flags), adv,
1249                                      &own_addr_type, &random_addr);
1250         if (err < 0)
1251                 return err;
1252
1253         memset(&cp, 0, sizeof(cp));
1254
1255         if (adv) {
1256                 hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1257                 hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1258                 cp.tx_power = adv->tx_power;
1259         } else {
1260                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1261                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1262                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1263         }
1264
1265         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1266
1267         if (connectable) {
1268                 if (secondary_adv)
1269                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1270                 else
1271                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1272         } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1273                    (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1274                 if (secondary_adv)
1275                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1276                 else
1277                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1278         } else {
1279                 if (secondary_adv)
1280                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1281                 else
1282                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1283         }
1284
1285         /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1286          * contains the peer’s Identity Address and the Peer_Address_Type
1287          * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1288          * These parameters are used to locate the corresponding local IRK in
1289          * the resolving list; this IRK is used to generate their own address
1290          * used in the advertisement.
1291          */
1292         if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1293                 hci_copy_identity_address(hdev, &cp.peer_addr,
1294                                           &cp.peer_addr_type);
1295
1296         cp.own_addr_type = own_addr_type;
1297         cp.channel_map = hdev->le_adv_channel_map;
1298         cp.handle = adv ? adv->handle : instance;
1299
1300         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1301                 cp.primary_phy = HCI_ADV_PHY_1M;
1302                 cp.secondary_phy = HCI_ADV_PHY_2M;
1303         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1304                 cp.primary_phy = HCI_ADV_PHY_CODED;
1305                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1306         } else {
1307                 /* In all other cases use 1M */
1308                 cp.primary_phy = HCI_ADV_PHY_1M;
1309                 cp.secondary_phy = HCI_ADV_PHY_1M;
1310         }
1311
1312         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1313                                     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1314         if (err)
1315                 return err;
1316
1317         if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1318              own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1319             bacmp(&random_addr, BDADDR_ANY)) {
1320                 /* Check if random address need to be updated */
1321                 if (adv) {
1322                         if (!bacmp(&random_addr, &adv->random_addr))
1323                                 return 0;
1324                 } else {
1325                         if (!bacmp(&random_addr, &hdev->random_addr))
1326                                 return 0;
1327                 }
1328
1329                 return hci_set_adv_set_random_addr_sync(hdev, instance,
1330                                                         &random_addr);
1331         }
1332
1333         return 0;
1334 }
1335
1336 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1337 {
1338         DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1339                     HCI_MAX_EXT_AD_LENGTH);
1340         u8 len;
1341         struct adv_info *adv = NULL;
1342         int err;
1343
1344         if (instance) {
1345                 adv = hci_find_adv_instance(hdev, instance);
1346                 if (!adv || !adv->scan_rsp_changed)
1347                         return 0;
1348         }
1349
1350         len = eir_create_scan_rsp(hdev, instance, pdu->data);
1351
1352         pdu->handle = adv ? adv->handle : instance;
1353         pdu->length = len;
1354         pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1355         pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1356
1357         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1358                                     struct_size(pdu, data, len), pdu,
1359                                     HCI_CMD_TIMEOUT);
1360         if (err)
1361                 return err;
1362
1363         if (adv) {
1364                 adv->scan_rsp_changed = false;
1365         } else {
1366                 memcpy(hdev->scan_rsp_data, pdu->data, len);
1367                 hdev->scan_rsp_data_len = len;
1368         }
1369
1370         return 0;
1371 }
1372
1373 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1374 {
1375         struct hci_cp_le_set_scan_rsp_data cp;
1376         u8 len;
1377
1378         memset(&cp, 0, sizeof(cp));
1379
1380         len = eir_create_scan_rsp(hdev, instance, cp.data);
1381
1382         if (hdev->scan_rsp_data_len == len &&
1383             !memcmp(cp.data, hdev->scan_rsp_data, len))
1384                 return 0;
1385
1386         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1387         hdev->scan_rsp_data_len = len;
1388
1389         cp.length = len;
1390
1391         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1392                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1393 }
1394
1395 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1396 {
1397         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1398                 return 0;
1399
1400         if (ext_adv_capable(hdev))
1401                 return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1402
1403         return __hci_set_scan_rsp_data_sync(hdev, instance);
1404 }
1405
1406 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1407 {
1408         struct hci_cp_le_set_ext_adv_enable *cp;
1409         struct hci_cp_ext_adv_set *set;
1410         u8 data[sizeof(*cp) + sizeof(*set) * 1];
1411         struct adv_info *adv;
1412
1413         if (instance > 0) {
1414                 adv = hci_find_adv_instance(hdev, instance);
1415                 if (!adv)
1416                         return -EINVAL;
1417                 /* If already enabled there is nothing to do */
1418                 if (adv->enabled)
1419                         return 0;
1420         } else {
1421                 adv = NULL;
1422         }
1423
1424         cp = (void *)data;
1425         set = (void *)cp->data;
1426
1427         memset(cp, 0, sizeof(*cp));
1428
1429         cp->enable = 0x01;
1430         cp->num_of_sets = 0x01;
1431
1432         memset(set, 0, sizeof(*set));
1433
1434         set->handle = adv ? adv->handle : instance;
1435
1436         /* Set duration per instance since controller is responsible for
1437          * scheduling it.
1438          */
1439         if (adv && adv->timeout) {
1440                 u16 duration = adv->timeout * MSEC_PER_SEC;
1441
1442                 /* Time = N * 10 ms */
1443                 set->duration = cpu_to_le16(duration / 10);
1444         }
1445
1446         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1447                                      sizeof(*cp) +
1448                                      sizeof(*set) * cp->num_of_sets,
1449                                      data, HCI_CMD_TIMEOUT);
1450 }
1451
1452 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1453 {
1454         int err;
1455
1456         err = hci_setup_ext_adv_instance_sync(hdev, instance);
1457         if (err)
1458                 return err;
1459
1460         err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1461         if (err)
1462                 return err;
1463
1464         return hci_enable_ext_advertising_sync(hdev, instance);
1465 }
1466
1467 int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1468 {
1469         struct hci_cp_le_set_per_adv_enable cp;
1470         struct adv_info *adv = NULL;
1471
1472         /* If periodic advertising already disabled there is nothing to do. */
1473         adv = hci_find_adv_instance(hdev, instance);
1474         if (!adv || !adv->periodic || !adv->enabled)
1475                 return 0;
1476
1477         memset(&cp, 0, sizeof(cp));
1478
1479         cp.enable = 0x00;
1480         cp.handle = instance;
1481
1482         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1483                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1484 }
1485
1486 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1487                                        u16 min_interval, u16 max_interval)
1488 {
1489         struct hci_cp_le_set_per_adv_params cp;
1490
1491         memset(&cp, 0, sizeof(cp));
1492
1493         if (!min_interval)
1494                 min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1495
1496         if (!max_interval)
1497                 max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1498
1499         cp.handle = instance;
1500         cp.min_interval = cpu_to_le16(min_interval);
1501         cp.max_interval = cpu_to_le16(max_interval);
1502         cp.periodic_properties = 0x0000;
1503
1504         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1505                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1506 }
1507
1508 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1509 {
1510         DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1511                     HCI_MAX_PER_AD_LENGTH);
1512         u8 len;
1513         struct adv_info *adv = NULL;
1514
1515         if (instance) {
1516                 adv = hci_find_adv_instance(hdev, instance);
1517                 if (!adv || !adv->periodic)
1518                         return 0;
1519         }
1520
1521         len = eir_create_per_adv_data(hdev, instance, pdu->data);
1522
1523         pdu->length = len;
1524         pdu->handle = adv ? adv->handle : instance;
1525         pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1526
1527         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1528                                      struct_size(pdu, data, len), pdu,
1529                                      HCI_CMD_TIMEOUT);
1530 }
1531
1532 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1533 {
1534         struct hci_cp_le_set_per_adv_enable cp;
1535         struct adv_info *adv = NULL;
1536
1537         /* If periodic advertising already enabled there is nothing to do. */
1538         adv = hci_find_adv_instance(hdev, instance);
1539         if (adv && adv->periodic && adv->enabled)
1540                 return 0;
1541
1542         memset(&cp, 0, sizeof(cp));
1543
1544         cp.enable = 0x01;
1545         cp.handle = instance;
1546
1547         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1548                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1549 }
1550
1551 /* Checks if periodic advertising data contains a Basic Announcement and if it
1552  * does generates a Broadcast ID and add Broadcast Announcement.
1553  */
1554 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1555 {
1556         u8 bid[3];
1557         u8 ad[4 + 3];
1558
1559         /* Skip if NULL adv as instance 0x00 is used for general purpose
1560          * advertising so it cannot used for the likes of Broadcast Announcement
1561          * as it can be overwritten at any point.
1562          */
1563         if (!adv)
1564                 return 0;
1565
1566         /* Check if PA data doesn't contains a Basic Audio Announcement then
1567          * there is nothing to do.
1568          */
1569         if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1570                                   0x1851, NULL))
1571                 return 0;
1572
1573         /* Check if advertising data already has a Broadcast Announcement since
1574          * the process may want to control the Broadcast ID directly and in that
1575          * case the kernel shall no interfere.
1576          */
1577         if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1578                                  NULL))
1579                 return 0;
1580
1581         /* Generate Broadcast ID */
1582         get_random_bytes(bid, sizeof(bid));
1583         eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1584         hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
1585
1586         return hci_update_adv_data_sync(hdev, adv->instance);
1587 }
1588
1589 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1590                            u8 *data, u32 flags, u16 min_interval,
1591                            u16 max_interval, u16 sync_interval)
1592 {
1593         struct adv_info *adv = NULL;
1594         int err;
1595         bool added = false;
1596
1597         hci_disable_per_advertising_sync(hdev, instance);
1598
1599         if (instance) {
1600                 adv = hci_find_adv_instance(hdev, instance);
1601                 /* Create an instance if that could not be found */
1602                 if (!adv) {
1603                         adv = hci_add_per_instance(hdev, instance, flags,
1604                                                    data_len, data,
1605                                                    sync_interval,
1606                                                    sync_interval);
1607                         if (IS_ERR(adv))
1608                                 return PTR_ERR(adv);
1609                         adv->pending = false;
1610                         added = true;
1611                 }
1612         }
1613
1614         /* Start advertising */
1615         err = hci_start_ext_adv_sync(hdev, instance);
1616         if (err < 0)
1617                 goto fail;
1618
1619         err = hci_adv_bcast_annoucement(hdev, adv);
1620         if (err < 0)
1621                 goto fail;
1622
1623         err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1624                                           max_interval);
1625         if (err < 0)
1626                 goto fail;
1627
1628         err = hci_set_per_adv_data_sync(hdev, instance);
1629         if (err < 0)
1630                 goto fail;
1631
1632         err = hci_enable_per_advertising_sync(hdev, instance);
1633         if (err < 0)
1634                 goto fail;
1635
1636         return 0;
1637
1638 fail:
1639         if (added)
1640                 hci_remove_adv_instance(hdev, instance);
1641
1642         return err;
1643 }
1644
1645 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1646 {
1647         int err;
1648
1649         if (ext_adv_capable(hdev))
1650                 return hci_start_ext_adv_sync(hdev, instance);
1651
1652         err = hci_update_adv_data_sync(hdev, instance);
1653         if (err)
1654                 return err;
1655
1656         err = hci_update_scan_rsp_data_sync(hdev, instance);
1657         if (err)
1658                 return err;
1659
1660         return hci_enable_advertising_sync(hdev);
1661 }
1662
1663 int hci_enable_advertising_sync(struct hci_dev *hdev)
1664 {
1665         struct adv_info *adv_instance;
1666         struct hci_cp_le_set_adv_param cp;
1667         u8 own_addr_type, enable = 0x01;
1668         bool connectable;
1669         u16 adv_min_interval, adv_max_interval;
1670         u32 flags;
1671         u8 status;
1672
1673         if (ext_adv_capable(hdev))
1674                 return hci_enable_ext_advertising_sync(hdev,
1675                                                        hdev->cur_adv_instance);
1676
1677         flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1678         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1679
1680         /* If the "connectable" instance flag was not set, then choose between
1681          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1682          */
1683         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1684                       mgmt_get_connectable(hdev);
1685
1686         if (!is_advertising_allowed(hdev, connectable))
1687                 return -EINVAL;
1688
1689         status = hci_disable_advertising_sync(hdev);
1690         if (status)
1691                 return status;
1692
1693         /* Clear the HCI_LE_ADV bit temporarily so that the
1694          * hci_update_random_address knows that it's safe to go ahead
1695          * and write a new random address. The flag will be set back on
1696          * as soon as the SET_ADV_ENABLE HCI command completes.
1697          */
1698         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1699
1700         /* Set require_privacy to true only when non-connectable
1701          * advertising is used. In that case it is fine to use a
1702          * non-resolvable private address.
1703          */
1704         status = hci_update_random_address_sync(hdev, !connectable,
1705                                                 adv_use_rpa(hdev, flags),
1706                                                 &own_addr_type);
1707         if (status)
1708                 return status;
1709
1710         memset(&cp, 0, sizeof(cp));
1711
1712         if (adv_instance) {
1713                 adv_min_interval = adv_instance->min_interval;
1714                 adv_max_interval = adv_instance->max_interval;
1715         } else {
1716                 adv_min_interval = hdev->le_adv_min_interval;
1717                 adv_max_interval = hdev->le_adv_max_interval;
1718         }
1719
1720         if (connectable) {
1721                 cp.type = LE_ADV_IND;
1722         } else {
1723                 if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1724                         cp.type = LE_ADV_SCAN_IND;
1725                 else
1726                         cp.type = LE_ADV_NONCONN_IND;
1727
1728                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1729                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1730                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1731                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1732                 }
1733         }
1734
1735         cp.min_interval = cpu_to_le16(adv_min_interval);
1736         cp.max_interval = cpu_to_le16(adv_max_interval);
1737         cp.own_address_type = own_addr_type;
1738         cp.channel_map = hdev->le_adv_channel_map;
1739
1740         status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1741                                        sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1742         if (status)
1743                 return status;
1744
1745         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1746                                      sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1747 }
1748
1749 static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1750 {
1751         return hci_enable_advertising_sync(hdev);
1752 }
1753
1754 int hci_enable_advertising(struct hci_dev *hdev)
1755 {
1756         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1757             list_empty(&hdev->adv_instances))
1758                 return 0;
1759
1760         return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1761 }
1762
1763 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1764                                      struct sock *sk)
1765 {
1766         int err;
1767
1768         if (!ext_adv_capable(hdev))
1769                 return 0;
1770
1771         err = hci_disable_ext_adv_instance_sync(hdev, instance);
1772         if (err)
1773                 return err;
1774
1775         /* If request specifies an instance that doesn't exist, fail */
1776         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1777                 return -EINVAL;
1778
1779         return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1780                                         sizeof(instance), &instance, 0,
1781                                         HCI_CMD_TIMEOUT, sk);
1782 }
1783
1784 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
1785 {
1786         struct adv_info *adv = data;
1787         u8 instance = 0;
1788
1789         if (adv)
1790                 instance = adv->instance;
1791
1792         return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
1793 }
1794
1795 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
1796 {
1797         struct adv_info *adv = NULL;
1798
1799         if (instance) {
1800                 adv = hci_find_adv_instance(hdev, instance);
1801                 if (!adv)
1802                         return -EINVAL;
1803         }
1804
1805         return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
1806 }
1807
1808 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1809 {
1810         struct hci_cp_le_term_big cp;
1811
1812         memset(&cp, 0, sizeof(cp));
1813         cp.handle = handle;
1814         cp.reason = reason;
1815
1816         return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1817                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1818 }
1819
1820 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1821 {
1822         DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1823                     HCI_MAX_EXT_AD_LENGTH);
1824         u8 len;
1825         struct adv_info *adv = NULL;
1826         int err;
1827
1828         if (instance) {
1829                 adv = hci_find_adv_instance(hdev, instance);
1830                 if (!adv || !adv->adv_data_changed)
1831                         return 0;
1832         }
1833
1834         len = eir_create_adv_data(hdev, instance, pdu->data);
1835
1836         pdu->length = len;
1837         pdu->handle = adv ? adv->handle : instance;
1838         pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1839         pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1840
1841         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1842                                     struct_size(pdu, data, len), pdu,
1843                                     HCI_CMD_TIMEOUT);
1844         if (err)
1845                 return err;
1846
1847         /* Update data if the command succeed */
1848         if (adv) {
1849                 adv->adv_data_changed = false;
1850         } else {
1851                 memcpy(hdev->adv_data, pdu->data, len);
1852                 hdev->adv_data_len = len;
1853         }
1854
1855         return 0;
1856 }
1857
1858 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1859 {
1860         struct hci_cp_le_set_adv_data cp;
1861         u8 len;
1862
1863         memset(&cp, 0, sizeof(cp));
1864
1865         len = eir_create_adv_data(hdev, instance, cp.data);
1866
1867         /* There's nothing to do if the data hasn't changed */
1868         if (hdev->adv_data_len == len &&
1869             memcmp(cp.data, hdev->adv_data, len) == 0)
1870                 return 0;
1871
1872         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1873         hdev->adv_data_len = len;
1874
1875         cp.length = len;
1876
1877         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1878                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1879 }
1880
1881 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1882 {
1883         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1884                 return 0;
1885
1886         if (ext_adv_capable(hdev))
1887                 return hci_set_ext_adv_data_sync(hdev, instance);
1888
1889         return hci_set_adv_data_sync(hdev, instance);
1890 }
1891
1892 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1893                                    bool force)
1894 {
1895         struct adv_info *adv = NULL;
1896         u16 timeout;
1897
1898         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1899                 return -EPERM;
1900
1901         if (hdev->adv_instance_timeout)
1902                 return -EBUSY;
1903
1904         adv = hci_find_adv_instance(hdev, instance);
1905         if (!adv)
1906                 return -ENOENT;
1907
1908         /* A zero timeout means unlimited advertising. As long as there is
1909          * only one instance, duration should be ignored. We still set a timeout
1910          * in case further instances are being added later on.
1911          *
1912          * If the remaining lifetime of the instance is more than the duration
1913          * then the timeout corresponds to the duration, otherwise it will be
1914          * reduced to the remaining instance lifetime.
1915          */
1916         if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1917                 timeout = adv->duration;
1918         else
1919                 timeout = adv->remaining_time;
1920
1921         /* The remaining time is being reduced unless the instance is being
1922          * advertised without time limit.
1923          */
1924         if (adv->timeout)
1925                 adv->remaining_time = adv->remaining_time - timeout;
1926
1927         /* Only use work for scheduling instances with legacy advertising */
1928         if (!ext_adv_capable(hdev)) {
1929                 hdev->adv_instance_timeout = timeout;
1930                 queue_delayed_work(hdev->req_workqueue,
1931                                    &hdev->adv_instance_expire,
1932                                    msecs_to_jiffies(timeout * 1000));
1933         }
1934
1935         /* If we're just re-scheduling the same instance again then do not
1936          * execute any HCI commands. This happens when a single instance is
1937          * being advertised.
1938          */
1939         if (!force && hdev->cur_adv_instance == instance &&
1940             hci_dev_test_flag(hdev, HCI_LE_ADV))
1941                 return 0;
1942
1943         hdev->cur_adv_instance = instance;
1944
1945         return hci_start_adv_sync(hdev, instance);
1946 }
1947
1948 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1949 {
1950         int err;
1951
1952         if (!ext_adv_capable(hdev))
1953                 return 0;
1954
1955         /* Disable instance 0x00 to disable all instances */
1956         err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1957         if (err)
1958                 return err;
1959
1960         return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1961                                         0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1962 }
1963
1964 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1965 {
1966         struct adv_info *adv, *n;
1967         int err = 0;
1968
1969         if (ext_adv_capable(hdev))
1970                 /* Remove all existing sets */
1971                 err = hci_clear_adv_sets_sync(hdev, sk);
1972         if (ext_adv_capable(hdev))
1973                 return err;
1974
1975         /* This is safe as long as there is no command send while the lock is
1976          * held.
1977          */
1978         hci_dev_lock(hdev);
1979
1980         /* Cleanup non-ext instances */
1981         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1982                 u8 instance = adv->instance;
1983                 int err;
1984
1985                 if (!(force || adv->timeout))
1986                         continue;
1987
1988                 err = hci_remove_adv_instance(hdev, instance);
1989                 if (!err)
1990                         mgmt_advertising_removed(sk, hdev, instance);
1991         }
1992
1993         hci_dev_unlock(hdev);
1994
1995         return 0;
1996 }
1997
1998 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1999                                struct sock *sk)
2000 {
2001         int err = 0;
2002
2003         /* If we use extended advertising, instance has to be removed first. */
2004         if (ext_adv_capable(hdev))
2005                 err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
2006         if (ext_adv_capable(hdev))
2007                 return err;
2008
2009         /* This is safe as long as there is no command send while the lock is
2010          * held.
2011          */
2012         hci_dev_lock(hdev);
2013
2014         err = hci_remove_adv_instance(hdev, instance);
2015         if (!err)
2016                 mgmt_advertising_removed(sk, hdev, instance);
2017
2018         hci_dev_unlock(hdev);
2019
2020         return err;
2021 }
2022
2023 /* For a single instance:
2024  * - force == true: The instance will be removed even when its remaining
2025  *   lifetime is not zero.
2026  * - force == false: the instance will be deactivated but kept stored unless
2027  *   the remaining lifetime is zero.
2028  *
2029  * For instance == 0x00:
2030  * - force == true: All instances will be removed regardless of their timeout
2031  *   setting.
2032  * - force == false: Only instances that have a timeout will be removed.
2033  */
2034 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
2035                                 u8 instance, bool force)
2036 {
2037         struct adv_info *next = NULL;
2038         int err;
2039
2040         /* Cancel any timeout concerning the removed instance(s). */
2041         if (!instance || hdev->cur_adv_instance == instance)
2042                 cancel_adv_timeout(hdev);
2043
2044         /* Get the next instance to advertise BEFORE we remove
2045          * the current one. This can be the same instance again
2046          * if there is only one instance.
2047          */
2048         if (hdev->cur_adv_instance == instance)
2049                 next = hci_get_next_instance(hdev, instance);
2050
2051         if (!instance) {
2052                 err = hci_clear_adv_sync(hdev, sk, force);
2053                 if (err)
2054                         return err;
2055         } else {
2056                 struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2057
2058                 if (force || (adv && adv->timeout && !adv->remaining_time)) {
2059                         /* Don't advertise a removed instance. */
2060                         if (next && next->instance == instance)
2061                                 next = NULL;
2062
2063                         err = hci_remove_adv_sync(hdev, instance, sk);
2064                         if (err)
2065                                 return err;
2066                 }
2067         }
2068
2069         if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2070                 return 0;
2071
2072         if (next && !ext_adv_capable(hdev))
2073                 hci_schedule_adv_instance_sync(hdev, next->instance, false);
2074
2075         return 0;
2076 }
2077
2078 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2079 {
2080         struct hci_cp_read_rssi cp;
2081
2082         cp.handle = handle;
2083         return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2084                                         sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2085 }
2086
2087 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2088 {
2089         return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2090                                         sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2091 }
2092
2093 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2094 {
2095         struct hci_cp_read_tx_power cp;
2096
2097         cp.handle = handle;
2098         cp.type = type;
2099         return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2100                                         sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2101 }
2102
2103 int hci_disable_advertising_sync(struct hci_dev *hdev)
2104 {
2105         u8 enable = 0x00;
2106         int err = 0;
2107
2108         /* If controller is not advertising we are done. */
2109         if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2110                 return 0;
2111
2112         if (ext_adv_capable(hdev))
2113                 err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2114         if (ext_adv_capable(hdev))
2115                 return err;
2116
2117         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2118                                      sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2119 }
2120
2121 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2122                                            u8 filter_dup)
2123 {
2124         struct hci_cp_le_set_ext_scan_enable cp;
2125
2126         memset(&cp, 0, sizeof(cp));
2127         cp.enable = val;
2128
2129         if (hci_dev_test_flag(hdev, HCI_MESH))
2130                 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2131         else
2132                 cp.filter_dup = filter_dup;
2133
2134         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2135                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2136 }
2137
2138 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2139                                        u8 filter_dup)
2140 {
2141         struct hci_cp_le_set_scan_enable cp;
2142
2143         if (use_ext_scan(hdev))
2144                 return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2145
2146         memset(&cp, 0, sizeof(cp));
2147         cp.enable = val;
2148
2149         if (val && hci_dev_test_flag(hdev, HCI_MESH))
2150                 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2151         else
2152                 cp.filter_dup = filter_dup;
2153
2154         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2155                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2156 }
2157
2158 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2159 {
2160         if (!use_ll_privacy(hdev))
2161                 return 0;
2162
2163         /* If controller is not/already resolving we are done. */
2164         if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2165                 return 0;
2166
2167         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2168                                      sizeof(val), &val, HCI_CMD_TIMEOUT);
2169 }
2170
2171 static int hci_scan_disable_sync(struct hci_dev *hdev)
2172 {
2173         int err;
2174
2175         /* If controller is not scanning we are done. */
2176         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2177                 return 0;
2178
2179         if (hdev->scanning_paused) {
2180                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2181                 return 0;
2182         }
2183
2184         err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2185         if (err) {
2186                 bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2187                 return err;
2188         }
2189
2190         return err;
2191 }
2192
2193 static bool scan_use_rpa(struct hci_dev *hdev)
2194 {
2195         return hci_dev_test_flag(hdev, HCI_PRIVACY);
2196 }
2197
2198 static void hci_start_interleave_scan(struct hci_dev *hdev)
2199 {
2200         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2201         queue_delayed_work(hdev->req_workqueue,
2202                            &hdev->interleave_scan, 0);
2203 }
2204
2205 static void cancel_interleave_scan(struct hci_dev *hdev)
2206 {
2207         bt_dev_dbg(hdev, "cancelling interleave scan");
2208
2209         cancel_delayed_work_sync(&hdev->interleave_scan);
2210
2211         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2212 }
2213
2214 /* Return true if interleave_scan wasn't started until exiting this function,
2215  * otherwise, return false
2216  */
2217 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2218 {
2219         /* Do interleaved scan only if all of the following are true:
2220          * - There is at least one ADV monitor
2221          * - At least one pending LE connection or one device to be scanned for
2222          * - Monitor offloading is not supported
2223          * If so, we should alternate between allowlist scan and one without
2224          * any filters to save power.
2225          */
2226         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2227                                 !(list_empty(&hdev->pend_le_conns) &&
2228                                   list_empty(&hdev->pend_le_reports)) &&
2229                                 hci_get_adv_monitor_offload_ext(hdev) ==
2230                                     HCI_ADV_MONITOR_EXT_NONE;
2231         bool is_interleaving = is_interleave_scanning(hdev);
2232
2233         if (use_interleaving && !is_interleaving) {
2234                 hci_start_interleave_scan(hdev);
2235                 bt_dev_dbg(hdev, "starting interleave scan");
2236                 return true;
2237         }
2238
2239         if (!use_interleaving && is_interleaving)
2240                 cancel_interleave_scan(hdev);
2241
2242         return false;
2243 }
2244
2245 /* Removes connection to resolve list if needed.*/
2246 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2247                                         bdaddr_t *bdaddr, u8 bdaddr_type)
2248 {
2249         struct hci_cp_le_del_from_resolv_list cp;
2250         struct bdaddr_list_with_irk *entry;
2251
2252         if (!use_ll_privacy(hdev))
2253                 return 0;
2254
2255         /* Check if the IRK has been programmed */
2256         entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2257                                                 bdaddr_type);
2258         if (!entry)
2259                 return 0;
2260
2261         cp.bdaddr_type = bdaddr_type;
2262         bacpy(&cp.bdaddr, bdaddr);
2263
2264         return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2265                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2266 }
2267
2268 static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2269                                        bdaddr_t *bdaddr, u8 bdaddr_type)
2270 {
2271         struct hci_cp_le_del_from_accept_list cp;
2272         int err;
2273
2274         /* Check if device is on accept list before removing it */
2275         if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2276                 return 0;
2277
2278         cp.bdaddr_type = bdaddr_type;
2279         bacpy(&cp.bdaddr, bdaddr);
2280
2281         /* Ignore errors when removing from resolving list as that is likely
2282          * that the device was never added.
2283          */
2284         hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2285
2286         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2287                                     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2288         if (err) {
2289                 bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2290                 return err;
2291         }
2292
2293         bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2294                    cp.bdaddr_type);
2295
2296         return 0;
2297 }
2298
2299 struct conn_params {
2300         bdaddr_t addr;
2301         u8 addr_type;
2302         hci_conn_flags_t flags;
2303         u8 privacy_mode;
2304 };
2305
2306 /* Adds connection to resolve list if needed.
2307  * Setting params to NULL programs local hdev->irk
2308  */
2309 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2310                                         struct conn_params *params)
2311 {
2312         struct hci_cp_le_add_to_resolv_list cp;
2313         struct smp_irk *irk;
2314         struct bdaddr_list_with_irk *entry;
2315         struct hci_conn_params *p;
2316
2317         if (!use_ll_privacy(hdev))
2318                 return 0;
2319
2320         /* Attempt to program local identity address, type and irk if params is
2321          * NULL.
2322          */
2323         if (!params) {
2324                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2325                         return 0;
2326
2327                 hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2328                 memcpy(cp.peer_irk, hdev->irk, 16);
2329                 goto done;
2330         }
2331
2332         irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2333         if (!irk)
2334                 return 0;
2335
2336         /* Check if the IK has _not_ been programmed yet. */
2337         entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2338                                                 &params->addr,
2339                                                 params->addr_type);
2340         if (entry)
2341                 return 0;
2342
2343         cp.bdaddr_type = params->addr_type;
2344         bacpy(&cp.bdaddr, &params->addr);
2345         memcpy(cp.peer_irk, irk->val, 16);
2346
2347         /* Default privacy mode is always Network */
2348         params->privacy_mode = HCI_NETWORK_PRIVACY;
2349
2350         rcu_read_lock();
2351         p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2352                                       &params->addr, params->addr_type);
2353         if (!p)
2354                 p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2355                                               &params->addr, params->addr_type);
2356         if (p)
2357                 WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2358         rcu_read_unlock();
2359
2360 done:
2361         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2362                 memcpy(cp.local_irk, hdev->irk, 16);
2363         else
2364                 memset(cp.local_irk, 0, 16);
2365
2366         return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2367                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2368 }
2369
2370 /* Set Device Privacy Mode. */
2371 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2372                                         struct conn_params *params)
2373 {
2374         struct hci_cp_le_set_privacy_mode cp;
2375         struct smp_irk *irk;
2376
2377         /* If device privacy mode has already been set there is nothing to do */
2378         if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2379                 return 0;
2380
2381         /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2382          * indicates that LL Privacy has been enabled and
2383          * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2384          */
2385         if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2386                 return 0;
2387
2388         irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2389         if (!irk)
2390                 return 0;
2391
2392         memset(&cp, 0, sizeof(cp));
2393         cp.bdaddr_type = irk->addr_type;
2394         bacpy(&cp.bdaddr, &irk->bdaddr);
2395         cp.mode = HCI_DEVICE_PRIVACY;
2396
2397         /* Note: params->privacy_mode is not updated since it is a copy */
2398
2399         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2400                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2401 }
2402
2403 /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2404  * this attempts to program the device in the resolving list as well and
2405  * properly set the privacy mode.
2406  */
2407 static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2408                                        struct conn_params *params,
2409                                        u8 *num_entries)
2410 {
2411         struct hci_cp_le_add_to_accept_list cp;
2412         int err;
2413
2414         /* During suspend, only wakeable devices can be in acceptlist */
2415         if (hdev->suspended &&
2416             !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2417                 hci_le_del_accept_list_sync(hdev, &params->addr,
2418                                             params->addr_type);
2419                 return 0;
2420         }
2421
2422         /* Select filter policy to accept all advertising */
2423         if (*num_entries >= hdev->le_accept_list_size)
2424                 return -ENOSPC;
2425
2426         /* Accept list can not be used with RPAs */
2427         if (!use_ll_privacy(hdev) &&
2428             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
2429                 return -EINVAL;
2430
2431         /* Attempt to program the device in the resolving list first to avoid
2432          * having to rollback in case it fails since the resolving list is
2433          * dynamic it can probably be smaller than the accept list.
2434          */
2435         err = hci_le_add_resolve_list_sync(hdev, params);
2436         if (err) {
2437                 bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2438                 return err;
2439         }
2440
2441         /* Set Privacy Mode */
2442         err = hci_le_set_privacy_mode_sync(hdev, params);
2443         if (err) {
2444                 bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2445                 return err;
2446         }
2447
2448         /* Check if already in accept list */
2449         if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2450                                    params->addr_type))
2451                 return 0;
2452
2453         *num_entries += 1;
2454         cp.bdaddr_type = params->addr_type;
2455         bacpy(&cp.bdaddr, &params->addr);
2456
2457         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2458                                     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2459         if (err) {
2460                 bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2461                 /* Rollback the device from the resolving list */
2462                 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2463                 return err;
2464         }
2465
2466         bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2467                    cp.bdaddr_type);
2468
2469         return 0;
2470 }
2471
2472 /* This function disables/pause all advertising instances */
2473 static int hci_pause_advertising_sync(struct hci_dev *hdev)
2474 {
2475         int err;
2476         int old_state;
2477
2478         /* If already been paused there is nothing to do. */
2479         if (hdev->advertising_paused)
2480                 return 0;
2481
2482         bt_dev_dbg(hdev, "Pausing directed advertising");
2483
2484         /* Stop directed advertising */
2485         old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2486         if (old_state) {
2487                 /* When discoverable timeout triggers, then just make sure
2488                  * the limited discoverable flag is cleared. Even in the case
2489                  * of a timeout triggered from general discoverable, it is
2490                  * safe to unconditionally clear the flag.
2491                  */
2492                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2493                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2494                 hdev->discov_timeout = 0;
2495         }
2496
2497         bt_dev_dbg(hdev, "Pausing advertising instances");
2498
2499         /* Call to disable any advertisements active on the controller.
2500          * This will succeed even if no advertisements are configured.
2501          */
2502         err = hci_disable_advertising_sync(hdev);
2503         if (err)
2504                 return err;
2505
2506         /* If we are using software rotation, pause the loop */
2507         if (!ext_adv_capable(hdev))
2508                 cancel_adv_timeout(hdev);
2509
2510         hdev->advertising_paused = true;
2511         hdev->advertising_old_state = old_state;
2512
2513         return 0;
2514 }
2515
2516 /* This function enables all user advertising instances */
2517 static int hci_resume_advertising_sync(struct hci_dev *hdev)
2518 {
2519         struct adv_info *adv, *tmp;
2520         int err;
2521
2522         /* If advertising has not been paused there is nothing  to do. */
2523         if (!hdev->advertising_paused)
2524                 return 0;
2525
2526         /* Resume directed advertising */
2527         hdev->advertising_paused = false;
2528         if (hdev->advertising_old_state) {
2529                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
2530                 hdev->advertising_old_state = 0;
2531         }
2532
2533         bt_dev_dbg(hdev, "Resuming advertising instances");
2534
2535         if (ext_adv_capable(hdev)) {
2536                 /* Call for each tracked instance to be re-enabled */
2537                 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2538                         err = hci_enable_ext_advertising_sync(hdev,
2539                                                               adv->instance);
2540                         if (!err)
2541                                 continue;
2542
2543                         /* If the instance cannot be resumed remove it */
2544                         hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2545                                                          NULL);
2546                 }
2547         } else {
2548                 /* Schedule for most recent instance to be restarted and begin
2549                  * the software rotation loop
2550                  */
2551                 err = hci_schedule_adv_instance_sync(hdev,
2552                                                      hdev->cur_adv_instance,
2553                                                      true);
2554         }
2555
2556         hdev->advertising_paused = false;
2557
2558         return err;
2559 }
2560
2561 static int hci_pause_addr_resolution(struct hci_dev *hdev)
2562 {
2563         int err;
2564
2565         if (!use_ll_privacy(hdev))
2566                 return 0;
2567
2568         if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2569                 return 0;
2570
2571         /* Cannot disable addr resolution if scanning is enabled or
2572          * when initiating an LE connection.
2573          */
2574         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2575             hci_lookup_le_connect(hdev)) {
2576                 bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2577                 return -EPERM;
2578         }
2579
2580         /* Cannot disable addr resolution if advertising is enabled. */
2581         err = hci_pause_advertising_sync(hdev);
2582         if (err) {
2583                 bt_dev_err(hdev, "Pause advertising failed: %d", err);
2584                 return err;
2585         }
2586
2587         err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2588         if (err)
2589                 bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2590                            err);
2591
2592         /* Return if address resolution is disabled and RPA is not used. */
2593         if (!err && scan_use_rpa(hdev))
2594                 return 0;
2595
2596         hci_resume_advertising_sync(hdev);
2597         return err;
2598 }
2599
2600 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2601                                              bool extended, struct sock *sk)
2602 {
2603         u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2604                                         HCI_OP_READ_LOCAL_OOB_DATA;
2605
2606         return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2607 }
2608
2609 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2610 {
2611         struct hci_conn_params *params;
2612         struct conn_params *p;
2613         size_t i;
2614
2615         rcu_read_lock();
2616
2617         i = 0;
2618         list_for_each_entry_rcu(params, list, action)
2619                 ++i;
2620         *n = i;
2621
2622         rcu_read_unlock();
2623
2624         p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2625         if (!p)
2626                 return NULL;
2627
2628         rcu_read_lock();
2629
2630         i = 0;
2631         list_for_each_entry_rcu(params, list, action) {
2632                 /* Racing adds are handled in next scan update */
2633                 if (i >= *n)
2634                         break;
2635
2636                 /* No hdev->lock, but: addr, addr_type are immutable.
2637                  * privacy_mode is only written by us or in
2638                  * hci_cc_le_set_privacy_mode that we wait for.
2639                  * We should be idempotent so MGMT updating flags
2640                  * while we are processing is OK.
2641                  */
2642                 bacpy(&p[i].addr, &params->addr);
2643                 p[i].addr_type = params->addr_type;
2644                 p[i].flags = READ_ONCE(params->flags);
2645                 p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2646                 ++i;
2647         }
2648
2649         rcu_read_unlock();
2650
2651         *n = i;
2652         return p;
2653 }
2654
2655 /* Clear LE Accept List */
2656 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2657 {
2658         if (!(hdev->commands[26] & 0x80))
2659                 return 0;
2660
2661         return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2662                                      HCI_CMD_TIMEOUT);
2663 }
2664
2665 /* Device must not be scanning when updating the accept list.
2666  *
2667  * Update is done using the following sequence:
2668  *
2669  * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
2670  * Remove Devices From Accept List ->
2671  * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
2672  * Add Devices to Accept List ->
2673  * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
2674  * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
2675  * Enable Scanning
2676  *
2677  * In case of failure advertising shall be restored to its original state and
2678  * return would disable accept list since either accept or resolving list could
2679  * not be programmed.
2680  *
2681  */
2682 static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2683 {
2684         struct conn_params *params;
2685         struct bdaddr_list *b, *t;
2686         u8 num_entries = 0;
2687         bool pend_conn, pend_report;
2688         u8 filter_policy;
2689         size_t i, n;
2690         int err;
2691
2692         /* Pause advertising if resolving list can be used as controllers
2693          * cannot accept resolving list modifications while advertising.
2694          */
2695         if (use_ll_privacy(hdev)) {
2696                 err = hci_pause_advertising_sync(hdev);
2697                 if (err) {
2698                         bt_dev_err(hdev, "pause advertising failed: %d", err);
2699                         return 0x00;
2700                 }
2701         }
2702
2703         /* Disable address resolution while reprogramming accept list since
2704          * devices that do have an IRK will be programmed in the resolving list
2705          * when LL Privacy is enabled.
2706          */
2707         err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2708         if (err) {
2709                 bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2710                 goto done;
2711         }
2712
2713         /* Force address filtering if PA Sync is in progress */
2714         if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2715                 struct hci_cp_le_pa_create_sync *sent;
2716
2717                 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
2718                 if (sent) {
2719                         struct conn_params pa;
2720
2721                         memset(&pa, 0, sizeof(pa));
2722
2723                         bacpy(&pa.addr, &sent->addr);
2724                         pa.addr_type = sent->addr_type;
2725
2726                         /* Clear first since there could be addresses left
2727                          * behind.
2728                          */
2729                         hci_le_clear_accept_list_sync(hdev);
2730
2731                         num_entries = 1;
2732                         err = hci_le_add_accept_list_sync(hdev, &pa,
2733                                                           &num_entries);
2734                         goto done;
2735                 }
2736         }
2737
2738         /* Go through the current accept list programmed into the
2739          * controller one by one and check if that address is connected or is
2740          * still in the list of pending connections or list of devices to
2741          * report. If not present in either list, then remove it from
2742          * the controller.
2743          */
2744         list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2745                 if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2746                         continue;
2747
2748                 /* Pointers not dereferenced, no locks needed */
2749                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2750                                                       &b->bdaddr,
2751                                                       b->bdaddr_type);
2752                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2753                                                         &b->bdaddr,
2754                                                         b->bdaddr_type);
2755
2756                 /* If the device is not likely to connect or report,
2757                  * remove it from the acceptlist.
2758                  */
2759                 if (!pend_conn && !pend_report) {
2760                         hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2761                                                     b->bdaddr_type);
2762                         continue;
2763                 }
2764
2765                 num_entries++;
2766         }
2767
2768         /* Since all no longer valid accept list entries have been
2769          * removed, walk through the list of pending connections
2770          * and ensure that any new device gets programmed into
2771          * the controller.
2772          *
2773          * If the list of the devices is larger than the list of
2774          * available accept list entries in the controller, then
2775          * just abort and return filer policy value to not use the
2776          * accept list.
2777          *
2778          * The list and params may be mutated while we wait for events,
2779          * so make a copy and iterate it.
2780          */
2781
2782         params = conn_params_copy(&hdev->pend_le_conns, &n);
2783         if (!params) {
2784                 err = -ENOMEM;
2785                 goto done;
2786         }
2787
2788         for (i = 0; i < n; ++i) {
2789                 err = hci_le_add_accept_list_sync(hdev, &params[i],
2790                                                   &num_entries);
2791                 if (err) {
2792                         kvfree(params);
2793                         goto done;
2794                 }
2795         }
2796
2797         kvfree(params);
2798
2799         /* After adding all new pending connections, walk through
2800          * the list of pending reports and also add these to the
2801          * accept list if there is still space. Abort if space runs out.
2802          */
2803
2804         params = conn_params_copy(&hdev->pend_le_reports, &n);
2805         if (!params) {
2806                 err = -ENOMEM;
2807                 goto done;
2808         }
2809
2810         for (i = 0; i < n; ++i) {
2811                 err = hci_le_add_accept_list_sync(hdev, &params[i],
2812                                                   &num_entries);
2813                 if (err) {
2814                         kvfree(params);
2815                         goto done;
2816                 }
2817         }
2818
2819         kvfree(params);
2820
2821         /* Use the allowlist unless the following conditions are all true:
2822          * - We are not currently suspending
2823          * - There are 1 or more ADV monitors registered and it's not offloaded
2824          * - Interleaved scanning is not currently using the allowlist
2825          */
2826         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2827             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2828             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2829                 err = -EINVAL;
2830
2831 done:
2832         filter_policy = err ? 0x00 : 0x01;
2833
2834         /* Enable address resolution when LL Privacy is enabled. */
2835         err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2836         if (err)
2837                 bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2838
2839         /* Resume advertising if it was paused */
2840         if (use_ll_privacy(hdev))
2841                 hci_resume_advertising_sync(hdev);
2842
2843         /* Select filter policy to use accept list */
2844         return filter_policy;
2845 }
2846
2847 static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2848                                    u8 type, u16 interval, u16 window)
2849 {
2850         cp->type = type;
2851         cp->interval = cpu_to_le16(interval);
2852         cp->window = cpu_to_le16(window);
2853 }
2854
2855 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2856                                           u16 interval, u16 window,
2857                                           u8 own_addr_type, u8 filter_policy)
2858 {
2859         struct hci_cp_le_set_ext_scan_params *cp;
2860         struct hci_cp_le_scan_phy_params *phy;
2861         u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2862         u8 num_phy = 0x00;
2863
2864         cp = (void *)data;
2865         phy = (void *)cp->data;
2866
2867         memset(data, 0, sizeof(data));
2868
2869         cp->own_addr_type = own_addr_type;
2870         cp->filter_policy = filter_policy;
2871
2872         /* Check if PA Sync is in progress then select the PHY based on the
2873          * hci_conn.iso_qos.
2874          */
2875         if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2876                 struct hci_cp_le_add_to_accept_list *sent;
2877
2878                 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2879                 if (sent) {
2880                         struct hci_conn *conn;
2881
2882                         conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2883                                                        &sent->bdaddr);
2884                         if (conn) {
2885                                 struct bt_iso_qos *qos = &conn->iso_qos;
2886
2887                                 if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2888                                     qos->bcast.in.phy & BT_ISO_PHY_2M) {
2889                                         cp->scanning_phys |= LE_SCAN_PHY_1M;
2890                                         hci_le_scan_phy_params(phy, type,
2891                                                                interval,
2892                                                                window);
2893                                         num_phy++;
2894                                         phy++;
2895                                 }
2896
2897                                 if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2898                                         cp->scanning_phys |= LE_SCAN_PHY_CODED;
2899                                         hci_le_scan_phy_params(phy, type,
2900                                                                interval * 3,
2901                                                                window * 3);
2902                                         num_phy++;
2903                                         phy++;
2904                                 }
2905
2906                                 if (num_phy)
2907                                         goto done;
2908                         }
2909                 }
2910         }
2911
2912         if (scan_1m(hdev) || scan_2m(hdev)) {
2913                 cp->scanning_phys |= LE_SCAN_PHY_1M;
2914                 hci_le_scan_phy_params(phy, type, interval, window);
2915                 num_phy++;
2916                 phy++;
2917         }
2918
2919         if (scan_coded(hdev)) {
2920                 cp->scanning_phys |= LE_SCAN_PHY_CODED;
2921                 hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2922                 num_phy++;
2923                 phy++;
2924         }
2925
2926 done:
2927         if (!num_phy)
2928                 return -EINVAL;
2929
2930         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2931                                      sizeof(*cp) + sizeof(*phy) * num_phy,
2932                                      data, HCI_CMD_TIMEOUT);
2933 }
2934
2935 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2936                                       u16 interval, u16 window,
2937                                       u8 own_addr_type, u8 filter_policy)
2938 {
2939         struct hci_cp_le_set_scan_param cp;
2940
2941         if (use_ext_scan(hdev))
2942                 return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2943                                                       window, own_addr_type,
2944                                                       filter_policy);
2945
2946         memset(&cp, 0, sizeof(cp));
2947         cp.type = type;
2948         cp.interval = cpu_to_le16(interval);
2949         cp.window = cpu_to_le16(window);
2950         cp.own_address_type = own_addr_type;
2951         cp.filter_policy = filter_policy;
2952
2953         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2954                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2955 }
2956
2957 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2958                                u16 window, u8 own_addr_type, u8 filter_policy,
2959                                u8 filter_dup)
2960 {
2961         int err;
2962
2963         if (hdev->scanning_paused) {
2964                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2965                 return 0;
2966         }
2967
2968         err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2969                                          own_addr_type, filter_policy);
2970         if (err)
2971                 return err;
2972
2973         return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2974 }
2975
2976 static int hci_passive_scan_sync(struct hci_dev *hdev)
2977 {
2978         u8 own_addr_type;
2979         u8 filter_policy;
2980         u16 window, interval;
2981         u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2982         int err;
2983
2984         if (hdev->scanning_paused) {
2985                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2986                 return 0;
2987         }
2988
2989         err = hci_scan_disable_sync(hdev);
2990         if (err) {
2991                 bt_dev_err(hdev, "disable scanning failed: %d", err);
2992                 return err;
2993         }
2994
2995         /* Set require_privacy to false since no SCAN_REQ are send
2996          * during passive scanning. Not using an non-resolvable address
2997          * here is important so that peer devices using direct
2998          * advertising with our address will be correctly reported
2999          * by the controller.
3000          */
3001         if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
3002                                            &own_addr_type))
3003                 return 0;
3004
3005         if (hdev->enable_advmon_interleave_scan &&
3006             hci_update_interleaved_scan_sync(hdev))
3007                 return 0;
3008
3009         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
3010
3011         /* Adding or removing entries from the accept list must
3012          * happen before enabling scanning. The controller does
3013          * not allow accept list modification while scanning.
3014          */
3015         filter_policy = hci_update_accept_list_sync(hdev);
3016
3017         /* If suspended and filter_policy set to 0x00 (no acceptlist) then
3018          * passive scanning cannot be started since that would require the host
3019          * to be woken up to process the reports.
3020          */
3021         if (hdev->suspended && !filter_policy) {
3022                 /* Check if accept list is empty then there is no need to scan
3023                  * while suspended.
3024                  */
3025                 if (list_empty(&hdev->le_accept_list))
3026                         return 0;
3027
3028                 /* If there are devices is the accept_list that means some
3029                  * devices could not be programmed which in non-suspended case
3030                  * means filter_policy needs to be set to 0x00 so the host needs
3031                  * to filter, but since this is treating suspended case we
3032                  * can ignore device needing host to filter to allow devices in
3033                  * the acceptlist to be able to wakeup the system.
3034                  */
3035                 filter_policy = 0x01;
3036         }
3037
3038         /* When the controller is using random resolvable addresses and
3039          * with that having LE privacy enabled, then controllers with
3040          * Extended Scanner Filter Policies support can now enable support
3041          * for handling directed advertising.
3042          *
3043          * So instead of using filter polices 0x00 (no acceptlist)
3044          * and 0x01 (acceptlist enabled) use the new filter policies
3045          * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3046          */
3047         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3048             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3049                 filter_policy |= 0x02;
3050
3051         if (hdev->suspended) {
3052                 window = hdev->le_scan_window_suspend;
3053                 interval = hdev->le_scan_int_suspend;
3054         } else if (hci_is_le_conn_scanning(hdev)) {
3055                 window = hdev->le_scan_window_connect;
3056                 interval = hdev->le_scan_int_connect;
3057         } else if (hci_is_adv_monitoring(hdev)) {
3058                 window = hdev->le_scan_window_adv_monitor;
3059                 interval = hdev->le_scan_int_adv_monitor;
3060
3061                 /* Disable duplicates filter when scanning for advertisement
3062                  * monitor for the following reasons.
3063                  *
3064                  * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
3065                  * controllers ignore RSSI_Sampling_Period when the duplicates
3066                  * filter is enabled.
3067                  *
3068                  * For SW pattern filtering, when we're not doing interleaved
3069                  * scanning, it is necessary to disable duplicates filter,
3070                  * otherwise hosts can only receive one advertisement and it's
3071                  * impossible to know if a peer is still in range.
3072                  */
3073                 filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3074         } else {
3075                 window = hdev->le_scan_window;
3076                 interval = hdev->le_scan_interval;
3077         }
3078
3079         /* Disable all filtering for Mesh */
3080         if (hci_dev_test_flag(hdev, HCI_MESH)) {
3081                 filter_policy = 0;
3082                 filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3083         }
3084
3085         bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3086
3087         return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3088                                    own_addr_type, filter_policy, filter_dups);
3089 }
3090
3091 /* This function controls the passive scanning based on hdev->pend_le_conns
3092  * list. If there are pending LE connection we start the background scanning,
3093  * otherwise we stop it in the following sequence:
3094  *
3095  * If there are devices to scan:
3096  *
3097  * Disable Scanning -> Update Accept List ->
3098  * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
3099  * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3100  * Enable Scanning
3101  *
3102  * Otherwise:
3103  *
3104  * Disable Scanning
3105  */
3106 int hci_update_passive_scan_sync(struct hci_dev *hdev)
3107 {
3108         int err;
3109
3110         if (!test_bit(HCI_UP, &hdev->flags) ||
3111             test_bit(HCI_INIT, &hdev->flags) ||
3112             hci_dev_test_flag(hdev, HCI_SETUP) ||
3113             hci_dev_test_flag(hdev, HCI_CONFIG) ||
3114             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3115             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3116                 return 0;
3117
3118         /* No point in doing scanning if LE support hasn't been enabled */
3119         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3120                 return 0;
3121
3122         /* If discovery is active don't interfere with it */
3123         if (hdev->discovery.state != DISCOVERY_STOPPED)
3124                 return 0;
3125
3126         /* Reset RSSI and UUID filters when starting background scanning
3127          * since these filters are meant for service discovery only.
3128          *
3129          * The Start Discovery and Start Service Discovery operations
3130          * ensure to set proper values for RSSI threshold and UUID
3131          * filter list. So it is safe to just reset them here.
3132          */
3133         hci_discovery_filter_clear(hdev);
3134
3135         bt_dev_dbg(hdev, "ADV monitoring is %s",
3136                    hci_is_adv_monitoring(hdev) ? "on" : "off");
3137
3138         if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3139             list_empty(&hdev->pend_le_conns) &&
3140             list_empty(&hdev->pend_le_reports) &&
3141             !hci_is_adv_monitoring(hdev) &&
3142             !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3143                 /* If there is no pending LE connections or devices
3144                  * to be scanned for or no ADV monitors, we should stop the
3145                  * background scanning.
3146                  */
3147
3148                 bt_dev_dbg(hdev, "stopping background scanning");
3149
3150                 err = hci_scan_disable_sync(hdev);
3151                 if (err)
3152                         bt_dev_err(hdev, "stop background scanning failed: %d",
3153                                    err);
3154         } else {
3155                 /* If there is at least one pending LE connection, we should
3156                  * keep the background scan running.
3157                  */
3158
3159                 /* If controller is connecting, we should not start scanning
3160                  * since some controllers are not able to scan and connect at
3161                  * the same time.
3162                  */
3163                 if (hci_lookup_le_connect(hdev))
3164                         return 0;
3165
3166                 bt_dev_dbg(hdev, "start background scanning");
3167
3168                 err = hci_passive_scan_sync(hdev);
3169                 if (err)
3170                         bt_dev_err(hdev, "start background scanning failed: %d",
3171                                    err);
3172         }
3173
3174         return err;
3175 }
3176
3177 static int update_scan_sync(struct hci_dev *hdev, void *data)
3178 {
3179         return hci_update_scan_sync(hdev);
3180 }
3181
3182 int hci_update_scan(struct hci_dev *hdev)
3183 {
3184         return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3185 }
3186
3187 static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3188 {
3189         return hci_update_passive_scan_sync(hdev);
3190 }
3191
3192 int hci_update_passive_scan(struct hci_dev *hdev)
3193 {
3194         /* Only queue if it would have any effect */
3195         if (!test_bit(HCI_UP, &hdev->flags) ||
3196             test_bit(HCI_INIT, &hdev->flags) ||
3197             hci_dev_test_flag(hdev, HCI_SETUP) ||
3198             hci_dev_test_flag(hdev, HCI_CONFIG) ||
3199             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3200             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3201                 return 0;
3202
3203         return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3204                                        NULL);
3205 }
3206
3207 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3208 {
3209         int err;
3210
3211         if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3212                 return 0;
3213
3214         err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3215                                     sizeof(val), &val, HCI_CMD_TIMEOUT);
3216
3217         if (!err) {
3218                 if (val) {
3219                         hdev->features[1][0] |= LMP_HOST_SC;
3220                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3221                 } else {
3222                         hdev->features[1][0] &= ~LMP_HOST_SC;
3223                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3224                 }
3225         }
3226
3227         return err;
3228 }
3229
3230 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3231 {
3232         int err;
3233
3234         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3235             lmp_host_ssp_capable(hdev))
3236                 return 0;
3237
3238         if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3239                 __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3240                                       sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3241         }
3242
3243         err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3244                                     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3245         if (err)
3246                 return err;
3247
3248         return hci_write_sc_support_sync(hdev, 0x01);
3249 }
3250
3251 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3252 {
3253         struct hci_cp_write_le_host_supported cp;
3254
3255         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3256             !lmp_bredr_capable(hdev))
3257                 return 0;
3258
3259         /* Check first if we already have the right host state
3260          * (host features set)
3261          */
3262         if (le == lmp_host_le_capable(hdev) &&
3263             simul == lmp_host_le_br_capable(hdev))
3264                 return 0;
3265
3266         memset(&cp, 0, sizeof(cp));
3267
3268         cp.le = le;
3269         cp.simul = simul;
3270
3271         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3272                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3273 }
3274
3275 static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3276 {
3277         struct adv_info *adv, *tmp;
3278         int err;
3279
3280         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3281                 return 0;
3282
3283         /* If RPA Resolution has not been enable yet it means the
3284          * resolving list is empty and we should attempt to program the
3285          * local IRK in order to support using own_addr_type
3286          * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3287          */
3288         if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3289                 hci_le_add_resolve_list_sync(hdev, NULL);
3290                 hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3291         }
3292
3293         /* Make sure the controller has a good default for
3294          * advertising data. This also applies to the case
3295          * where BR/EDR was toggled during the AUTO_OFF phase.
3296          */
3297         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3298             list_empty(&hdev->adv_instances)) {
3299                 if (ext_adv_capable(hdev)) {
3300                         err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3301                         if (!err)
3302                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
3303                 } else {
3304                         err = hci_update_adv_data_sync(hdev, 0x00);
3305                         if (!err)
3306                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
3307                 }
3308
3309                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3310                         hci_enable_advertising_sync(hdev);
3311         }
3312
3313         /* Call for each tracked instance to be scheduled */
3314         list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3315                 hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3316
3317         return 0;
3318 }
3319
3320 static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3321 {
3322         u8 link_sec;
3323
3324         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3325         if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3326                 return 0;
3327
3328         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3329                                      sizeof(link_sec), &link_sec,
3330                                      HCI_CMD_TIMEOUT);
3331 }
3332
3333 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3334 {
3335         struct hci_cp_write_page_scan_activity cp;
3336         u8 type;
3337         int err = 0;
3338
3339         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3340                 return 0;
3341
3342         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3343                 return 0;
3344
3345         memset(&cp, 0, sizeof(cp));
3346
3347         if (enable) {
3348                 type = PAGE_SCAN_TYPE_INTERLACED;
3349
3350                 /* 160 msec page scan interval */
3351                 cp.interval = cpu_to_le16(0x0100);
3352         } else {
3353                 type = hdev->def_page_scan_type;
3354                 cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3355         }
3356
3357         cp.window = cpu_to_le16(hdev->def_page_scan_window);
3358
3359         if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3360             __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3361                 err = __hci_cmd_sync_status(hdev,
3362                                             HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3363                                             sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3364                 if (err)
3365                         return err;
3366         }
3367
3368         if (hdev->page_scan_type != type)
3369                 err = __hci_cmd_sync_status(hdev,
3370                                             HCI_OP_WRITE_PAGE_SCAN_TYPE,
3371                                             sizeof(type), &type,
3372                                             HCI_CMD_TIMEOUT);
3373
3374         return err;
3375 }
3376
3377 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3378 {
3379         struct bdaddr_list *b;
3380
3381         list_for_each_entry(b, &hdev->accept_list, list) {
3382                 struct hci_conn *conn;
3383
3384                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3385                 if (!conn)
3386                         return true;
3387
3388                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3389                         return true;
3390         }
3391
3392         return false;
3393 }
3394
3395 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3396 {
3397         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3398                                             sizeof(val), &val,
3399                                             HCI_CMD_TIMEOUT);
3400 }
3401
3402 int hci_update_scan_sync(struct hci_dev *hdev)
3403 {
3404         u8 scan;
3405
3406         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3407                 return 0;
3408
3409         if (!hdev_is_powered(hdev))
3410                 return 0;
3411
3412         if (mgmt_powering_down(hdev))
3413                 return 0;
3414
3415         if (hdev->scanning_paused)
3416                 return 0;
3417
3418         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3419             disconnected_accept_list_entries(hdev))
3420                 scan = SCAN_PAGE;
3421         else
3422                 scan = SCAN_DISABLED;
3423
3424         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3425                 scan |= SCAN_INQUIRY;
3426
3427         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3428             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3429                 return 0;
3430
3431         return hci_write_scan_enable_sync(hdev, scan);
3432 }
3433
3434 int hci_update_name_sync(struct hci_dev *hdev)
3435 {
3436         struct hci_cp_write_local_name cp;
3437
3438         memset(&cp, 0, sizeof(cp));
3439
3440         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3441
3442         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3443                                             sizeof(cp), &cp,
3444                                             HCI_CMD_TIMEOUT);
3445 }
3446
3447 /* This function perform powered update HCI command sequence after the HCI init
3448  * sequence which end up resetting all states, the sequence is as follows:
3449  *
3450  * HCI_SSP_ENABLED(Enable SSP)
3451  * HCI_LE_ENABLED(Enable LE)
3452  * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
3453  * Update adv data)
3454  * Enable Authentication
3455  * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3456  * Set Name -> Set EIR)
3457  * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3458  */
3459 int hci_powered_update_sync(struct hci_dev *hdev)
3460 {
3461         int err;
3462
3463         /* Register the available SMP channels (BR/EDR and LE) only when
3464          * successfully powering on the controller. This late
3465          * registration is required so that LE SMP can clearly decide if
3466          * the public address or static address is used.
3467          */
3468         smp_register(hdev);
3469
3470         err = hci_write_ssp_mode_sync(hdev, 0x01);
3471         if (err)
3472                 return err;
3473
3474         err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3475         if (err)
3476                 return err;
3477
3478         err = hci_powered_update_adv_sync(hdev);
3479         if (err)
3480                 return err;
3481
3482         err = hci_write_auth_enable_sync(hdev);
3483         if (err)
3484                 return err;
3485
3486         if (lmp_bredr_capable(hdev)) {
3487                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3488                         hci_write_fast_connectable_sync(hdev, true);
3489                 else
3490                         hci_write_fast_connectable_sync(hdev, false);
3491                 hci_update_scan_sync(hdev);
3492                 hci_update_class_sync(hdev);
3493                 hci_update_name_sync(hdev);
3494                 hci_update_eir_sync(hdev);
3495         }
3496
3497         /* If forcing static address is in use or there is no public
3498          * address use the static address as random address (but skip
3499          * the HCI command if the current random address is already the
3500          * static one.
3501          *
3502          * In case BR/EDR has been disabled on a dual-mode controller
3503          * and a static address has been configured, then use that
3504          * address instead of the public BR/EDR address.
3505          */
3506         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3507             (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3508             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3509                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
3510                         return hci_set_random_addr_sync(hdev,
3511                                                         &hdev->static_addr);
3512         }
3513
3514         return 0;
3515 }
3516
3517 /**
3518  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3519  *                                     (BD_ADDR) for a HCI device from
3520  *                                     a firmware node property.
3521  * @hdev:       The HCI device
3522  *
3523  * Search the firmware node for 'local-bd-address'.
3524  *
3525  * All-zero BD addresses are rejected, because those could be properties
3526  * that exist in the firmware tables, but were not updated by the firmware. For
3527  * example, the DTS could define 'local-bd-address', with zero BD addresses.
3528  */
3529 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3530 {
3531         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3532         bdaddr_t ba;
3533         int ret;
3534
3535         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3536                                             (u8 *)&ba, sizeof(ba));
3537         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3538                 return;
3539
3540         if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3541                 baswap(&hdev->public_addr, &ba);
3542         else
3543                 bacpy(&hdev->public_addr, &ba);
3544 }
3545
3546 struct hci_init_stage {
3547         int (*func)(struct hci_dev *hdev);
3548 };
3549
3550 /* Run init stage NULL terminated function table */
3551 static int hci_init_stage_sync(struct hci_dev *hdev,
3552                                const struct hci_init_stage *stage)
3553 {
3554         size_t i;
3555
3556         for (i = 0; stage[i].func; i++) {
3557                 int err;
3558
3559                 err = stage[i].func(hdev);
3560                 if (err)
3561                         return err;
3562         }
3563
3564         return 0;
3565 }
3566
3567 /* Read Local Version */
3568 static int hci_read_local_version_sync(struct hci_dev *hdev)
3569 {
3570         return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3571                                      0, NULL, HCI_CMD_TIMEOUT);
3572 }
3573
3574 /* Read BD Address */
3575 static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3576 {
3577         return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3578                                      0, NULL, HCI_CMD_TIMEOUT);
3579 }
3580
3581 #define HCI_INIT(_func) \
3582 { \
3583         .func = _func, \
3584 }
3585
3586 static const struct hci_init_stage hci_init0[] = {
3587         /* HCI_OP_READ_LOCAL_VERSION */
3588         HCI_INIT(hci_read_local_version_sync),
3589         /* HCI_OP_READ_BD_ADDR */
3590         HCI_INIT(hci_read_bd_addr_sync),
3591         {}
3592 };
3593
3594 int hci_reset_sync(struct hci_dev *hdev)
3595 {
3596         int err;
3597
3598         set_bit(HCI_RESET, &hdev->flags);
3599
3600         err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3601                                     HCI_CMD_TIMEOUT);
3602         if (err)
3603                 return err;
3604
3605         return 0;
3606 }
3607
3608 static int hci_init0_sync(struct hci_dev *hdev)
3609 {
3610         int err;
3611
3612         bt_dev_dbg(hdev, "");
3613
3614         /* Reset */
3615         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3616                 err = hci_reset_sync(hdev);
3617                 if (err)
3618                         return err;
3619         }
3620
3621         return hci_init_stage_sync(hdev, hci_init0);
3622 }
3623
3624 static int hci_unconf_init_sync(struct hci_dev *hdev)
3625 {
3626         int err;
3627
3628         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3629                 return 0;
3630
3631         err = hci_init0_sync(hdev);
3632         if (err < 0)
3633                 return err;
3634
3635         if (hci_dev_test_flag(hdev, HCI_SETUP))
3636                 hci_debugfs_create_basic(hdev);
3637
3638         return 0;
3639 }
3640
3641 /* Read Local Supported Features. */
3642 static int hci_read_local_features_sync(struct hci_dev *hdev)
3643 {
3644         return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3645                                      0, NULL, HCI_CMD_TIMEOUT);
3646 }
3647
3648 /* BR Controller init stage 1 command sequence */
3649 static const struct hci_init_stage br_init1[] = {
3650         /* HCI_OP_READ_LOCAL_FEATURES */
3651         HCI_INIT(hci_read_local_features_sync),
3652         /* HCI_OP_READ_LOCAL_VERSION */
3653         HCI_INIT(hci_read_local_version_sync),
3654         /* HCI_OP_READ_BD_ADDR */
3655         HCI_INIT(hci_read_bd_addr_sync),
3656         {}
3657 };
3658
3659 /* Read Local Commands */
3660 static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3661 {
3662         /* All Bluetooth 1.2 and later controllers should support the
3663          * HCI command for reading the local supported commands.
3664          *
3665          * Unfortunately some controllers indicate Bluetooth 1.2 support,
3666          * but do not have support for this command. If that is the case,
3667          * the driver can quirk the behavior and skip reading the local
3668          * supported commands.
3669          */
3670         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3671             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3672                 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3673                                              0, NULL, HCI_CMD_TIMEOUT);
3674
3675         return 0;
3676 }
3677
3678 static int hci_init1_sync(struct hci_dev *hdev)
3679 {
3680         int err;
3681
3682         bt_dev_dbg(hdev, "");
3683
3684         /* Reset */
3685         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3686                 err = hci_reset_sync(hdev);
3687                 if (err)
3688                         return err;
3689         }
3690
3691         return hci_init_stage_sync(hdev, br_init1);
3692 }
3693
3694 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
3695 static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3696 {
3697         return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3698                                      0, NULL, HCI_CMD_TIMEOUT);
3699 }
3700
3701 /* Read Class of Device */
3702 static int hci_read_dev_class_sync(struct hci_dev *hdev)
3703 {
3704         return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3705                                      0, NULL, HCI_CMD_TIMEOUT);
3706 }
3707
3708 /* Read Local Name */
3709 static int hci_read_local_name_sync(struct hci_dev *hdev)
3710 {
3711         return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3712                                      0, NULL, HCI_CMD_TIMEOUT);
3713 }
3714
3715 /* Read Voice Setting */
3716 static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3717 {
3718         return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3719                                      0, NULL, HCI_CMD_TIMEOUT);
3720 }
3721
3722 /* Read Number of Supported IAC */
3723 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3724 {
3725         return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3726                                      0, NULL, HCI_CMD_TIMEOUT);
3727 }
3728
3729 /* Read Current IAC LAP */
3730 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3731 {
3732         return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3733                                      0, NULL, HCI_CMD_TIMEOUT);
3734 }
3735
3736 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3737                                      u8 cond_type, bdaddr_t *bdaddr,
3738                                      u8 auto_accept)
3739 {
3740         struct hci_cp_set_event_filter cp;
3741
3742         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3743                 return 0;
3744
3745         if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3746                 return 0;
3747
3748         memset(&cp, 0, sizeof(cp));
3749         cp.flt_type = flt_type;
3750
3751         if (flt_type != HCI_FLT_CLEAR_ALL) {
3752                 cp.cond_type = cond_type;
3753                 bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3754                 cp.addr_conn_flt.auto_accept = auto_accept;
3755         }
3756
3757         return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3758                                      flt_type == HCI_FLT_CLEAR_ALL ?
3759                                      sizeof(cp.flt_type) : sizeof(cp), &cp,
3760                                      HCI_CMD_TIMEOUT);
3761 }
3762
3763 static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3764 {
3765         if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3766                 return 0;
3767
3768         /* In theory the state machine should not reach here unless
3769          * a hci_set_event_filter_sync() call succeeds, but we do
3770          * the check both for parity and as a future reminder.
3771          */
3772         if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3773                 return 0;
3774
3775         return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3776                                          BDADDR_ANY, 0x00);
3777 }
3778
3779 /* Connection accept timeout ~20 secs */
3780 static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3781 {
3782         __le16 param = cpu_to_le16(0x7d00);
3783
3784         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3785                                      sizeof(param), &param, HCI_CMD_TIMEOUT);
3786 }
3787
3788 /* BR Controller init stage 2 command sequence */
3789 static const struct hci_init_stage br_init2[] = {
3790         /* HCI_OP_READ_BUFFER_SIZE */
3791         HCI_INIT(hci_read_buffer_size_sync),
3792         /* HCI_OP_READ_CLASS_OF_DEV */
3793         HCI_INIT(hci_read_dev_class_sync),
3794         /* HCI_OP_READ_LOCAL_NAME */
3795         HCI_INIT(hci_read_local_name_sync),
3796         /* HCI_OP_READ_VOICE_SETTING */
3797         HCI_INIT(hci_read_voice_setting_sync),
3798         /* HCI_OP_READ_NUM_SUPPORTED_IAC */
3799         HCI_INIT(hci_read_num_supported_iac_sync),
3800         /* HCI_OP_READ_CURRENT_IAC_LAP */
3801         HCI_INIT(hci_read_current_iac_lap_sync),
3802         /* HCI_OP_SET_EVENT_FLT */
3803         HCI_INIT(hci_clear_event_filter_sync),
3804         /* HCI_OP_WRITE_CA_TIMEOUT */
3805         HCI_INIT(hci_write_ca_timeout_sync),
3806         {}
3807 };
3808
3809 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3810 {
3811         u8 mode = 0x01;
3812
3813         if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3814                 return 0;
3815
3816         /* When SSP is available, then the host features page
3817          * should also be available as well. However some
3818          * controllers list the max_page as 0 as long as SSP
3819          * has not been enabled. To achieve proper debugging
3820          * output, force the minimum max_page to 1 at least.
3821          */
3822         hdev->max_page = 0x01;
3823
3824         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3825                                      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3826 }
3827
3828 static int hci_write_eir_sync(struct hci_dev *hdev)
3829 {
3830         struct hci_cp_write_eir cp;
3831
3832         if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3833                 return 0;
3834
3835         memset(hdev->eir, 0, sizeof(hdev->eir));
3836         memset(&cp, 0, sizeof(cp));
3837
3838         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3839                                      HCI_CMD_TIMEOUT);
3840 }
3841
3842 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3843 {
3844         u8 mode;
3845
3846         if (!lmp_inq_rssi_capable(hdev) &&
3847             !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3848                 return 0;
3849
3850         /* If Extended Inquiry Result events are supported, then
3851          * they are clearly preferred over Inquiry Result with RSSI
3852          * events.
3853          */
3854         mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3855
3856         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3857                                      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3858 }
3859
3860 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3861 {
3862         if (!lmp_inq_tx_pwr_capable(hdev))
3863                 return 0;
3864
3865         return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3866                                      0, NULL, HCI_CMD_TIMEOUT);
3867 }
3868
3869 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3870 {
3871         struct hci_cp_read_local_ext_features cp;
3872
3873         if (!lmp_ext_feat_capable(hdev))
3874                 return 0;
3875
3876         memset(&cp, 0, sizeof(cp));
3877         cp.page = page;
3878
3879         return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3880                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3881 }
3882
3883 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3884 {
3885         return hci_read_local_ext_features_sync(hdev, 0x01);
3886 }
3887
3888 /* HCI Controller init stage 2 command sequence */
3889 static const struct hci_init_stage hci_init2[] = {
3890         /* HCI_OP_READ_LOCAL_COMMANDS */
3891         HCI_INIT(hci_read_local_cmds_sync),
3892         /* HCI_OP_WRITE_SSP_MODE */
3893         HCI_INIT(hci_write_ssp_mode_1_sync),
3894         /* HCI_OP_WRITE_EIR */
3895         HCI_INIT(hci_write_eir_sync),
3896         /* HCI_OP_WRITE_INQUIRY_MODE */
3897         HCI_INIT(hci_write_inquiry_mode_sync),
3898         /* HCI_OP_READ_INQ_RSP_TX_POWER */
3899         HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3900         /* HCI_OP_READ_LOCAL_EXT_FEATURES */
3901         HCI_INIT(hci_read_local_ext_features_1_sync),
3902         /* HCI_OP_WRITE_AUTH_ENABLE */
3903         HCI_INIT(hci_write_auth_enable_sync),
3904         {}
3905 };
3906
3907 /* Read LE Buffer Size */
3908 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3909 {
3910         /* Use Read LE Buffer Size V2 if supported */
3911         if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3912                 return __hci_cmd_sync_status(hdev,
3913                                              HCI_OP_LE_READ_BUFFER_SIZE_V2,
3914                                              0, NULL, HCI_CMD_TIMEOUT);
3915
3916         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3917                                      0, NULL, HCI_CMD_TIMEOUT);
3918 }
3919
3920 /* Read LE Local Supported Features */
3921 static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3922 {
3923         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3924                                      0, NULL, HCI_CMD_TIMEOUT);
3925 }
3926
3927 /* Read LE Supported States */
3928 static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3929 {
3930         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3931                                      0, NULL, HCI_CMD_TIMEOUT);
3932 }
3933
3934 /* LE Controller init stage 2 command sequence */
3935 static const struct hci_init_stage le_init2[] = {
3936         /* HCI_OP_LE_READ_LOCAL_FEATURES */
3937         HCI_INIT(hci_le_read_local_features_sync),
3938         /* HCI_OP_LE_READ_BUFFER_SIZE */
3939         HCI_INIT(hci_le_read_buffer_size_sync),
3940         /* HCI_OP_LE_READ_SUPPORTED_STATES */
3941         HCI_INIT(hci_le_read_supported_states_sync),
3942         {}
3943 };
3944
3945 static int hci_init2_sync(struct hci_dev *hdev)
3946 {
3947         int err;
3948
3949         bt_dev_dbg(hdev, "");
3950
3951         err = hci_init_stage_sync(hdev, hci_init2);
3952         if (err)
3953                 return err;
3954
3955         if (lmp_bredr_capable(hdev)) {
3956                 err = hci_init_stage_sync(hdev, br_init2);
3957                 if (err)
3958                         return err;
3959         } else {
3960                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3961         }
3962
3963         if (lmp_le_capable(hdev)) {
3964                 err = hci_init_stage_sync(hdev, le_init2);
3965                 if (err)
3966                         return err;
3967                 /* LE-only controllers have LE implicitly enabled */
3968                 if (!lmp_bredr_capable(hdev))
3969                         hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3970         }
3971
3972         return 0;
3973 }
3974
3975 static int hci_set_event_mask_sync(struct hci_dev *hdev)
3976 {
3977         /* The second byte is 0xff instead of 0x9f (two reserved bits
3978          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3979          * command otherwise.
3980          */
3981         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3982
3983         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
3984          * any event mask for pre 1.2 devices.
3985          */
3986         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3987                 return 0;
3988
3989         if (lmp_bredr_capable(hdev)) {
3990                 events[4] |= 0x01; /* Flow Specification Complete */
3991
3992                 /* Don't set Disconnect Complete and mode change when
3993                  * suspended as that would wakeup the host when disconnecting
3994                  * due to suspend.
3995                  */
3996                 if (hdev->suspended) {
3997                         events[0] &= 0xef;
3998                         events[2] &= 0xf7;
3999                 }
4000         } else {
4001                 /* Use a different default for LE-only devices */
4002                 memset(events, 0, sizeof(events));
4003                 events[1] |= 0x20; /* Command Complete */
4004                 events[1] |= 0x40; /* Command Status */
4005                 events[1] |= 0x80; /* Hardware Error */
4006
4007                 /* If the controller supports the Disconnect command, enable
4008                  * the corresponding event. In addition enable packet flow
4009                  * control related events.
4010                  */
4011                 if (hdev->commands[0] & 0x20) {
4012                         /* Don't set Disconnect Complete when suspended as that
4013                          * would wakeup the host when disconnecting due to
4014                          * suspend.
4015                          */
4016                         if (!hdev->suspended)
4017                                 events[0] |= 0x10; /* Disconnection Complete */
4018                         events[2] |= 0x04; /* Number of Completed Packets */
4019                         events[3] |= 0x02; /* Data Buffer Overflow */
4020                 }
4021
4022                 /* If the controller supports the Read Remote Version
4023                  * Information command, enable the corresponding event.
4024                  */
4025                 if (hdev->commands[2] & 0x80)
4026                         events[1] |= 0x08; /* Read Remote Version Information
4027                                             * Complete
4028                                             */
4029
4030                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
4031                         events[0] |= 0x80; /* Encryption Change */
4032                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
4033                 }
4034         }
4035
4036         if (lmp_inq_rssi_capable(hdev) ||
4037             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
4038                 events[4] |= 0x02; /* Inquiry Result with RSSI */
4039
4040         if (lmp_ext_feat_capable(hdev))
4041                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
4042
4043         if (lmp_esco_capable(hdev)) {
4044                 events[5] |= 0x08; /* Synchronous Connection Complete */
4045                 events[5] |= 0x10; /* Synchronous Connection Changed */
4046         }
4047
4048         if (lmp_sniffsubr_capable(hdev))
4049                 events[5] |= 0x20; /* Sniff Subrating */
4050
4051         if (lmp_pause_enc_capable(hdev))
4052                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
4053
4054         if (lmp_ext_inq_capable(hdev))
4055                 events[5] |= 0x40; /* Extended Inquiry Result */
4056
4057         if (lmp_no_flush_capable(hdev))
4058                 events[7] |= 0x01; /* Enhanced Flush Complete */
4059
4060         if (lmp_lsto_capable(hdev))
4061                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
4062
4063         if (lmp_ssp_capable(hdev)) {
4064                 events[6] |= 0x01;      /* IO Capability Request */
4065                 events[6] |= 0x02;      /* IO Capability Response */
4066                 events[6] |= 0x04;      /* User Confirmation Request */
4067                 events[6] |= 0x08;      /* User Passkey Request */
4068                 events[6] |= 0x10;      /* Remote OOB Data Request */
4069                 events[6] |= 0x20;      /* Simple Pairing Complete */
4070                 events[7] |= 0x04;      /* User Passkey Notification */
4071                 events[7] |= 0x08;      /* Keypress Notification */
4072                 events[7] |= 0x10;      /* Remote Host Supported
4073                                          * Features Notification
4074                                          */
4075         }
4076
4077         if (lmp_le_capable(hdev))
4078                 events[7] |= 0x20;      /* LE Meta-Event */
4079
4080         return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4081                                      sizeof(events), events, HCI_CMD_TIMEOUT);
4082 }
4083
4084 static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4085 {
4086         struct hci_cp_read_stored_link_key cp;
4087
4088         if (!(hdev->commands[6] & 0x20) ||
4089             test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4090                 return 0;
4091
4092         memset(&cp, 0, sizeof(cp));
4093         bacpy(&cp.bdaddr, BDADDR_ANY);
4094         cp.read_all = 0x01;
4095
4096         return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4097                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4098 }
4099
4100 static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4101 {
4102         struct hci_cp_write_def_link_policy cp;
4103         u16 link_policy = 0;
4104
4105         if (!(hdev->commands[5] & 0x10))
4106                 return 0;
4107
4108         memset(&cp, 0, sizeof(cp));
4109
4110         if (lmp_rswitch_capable(hdev))
4111                 link_policy |= HCI_LP_RSWITCH;
4112         if (lmp_hold_capable(hdev))
4113                 link_policy |= HCI_LP_HOLD;
4114         if (lmp_sniff_capable(hdev))
4115                 link_policy |= HCI_LP_SNIFF;
4116         if (lmp_park_capable(hdev))
4117                 link_policy |= HCI_LP_PARK;
4118
4119         cp.policy = cpu_to_le16(link_policy);
4120
4121         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4122                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4123 }
4124
4125 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4126 {
4127         if (!(hdev->commands[8] & 0x01))
4128                 return 0;
4129
4130         return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4131                                      0, NULL, HCI_CMD_TIMEOUT);
4132 }
4133
4134 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4135 {
4136         if (!(hdev->commands[18] & 0x04) ||
4137             !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4138             test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4139                 return 0;
4140
4141         return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4142                                      0, NULL, HCI_CMD_TIMEOUT);
4143 }
4144
4145 static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4146 {
4147         /* Some older Broadcom based Bluetooth 1.2 controllers do not
4148          * support the Read Page Scan Type command. Check support for
4149          * this command in the bit mask of supported commands.
4150          */
4151         if (!(hdev->commands[13] & 0x01))
4152                 return 0;
4153
4154         return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4155                                      0, NULL, HCI_CMD_TIMEOUT);
4156 }
4157
4158 /* Read features beyond page 1 if available */
4159 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4160 {
4161         u8 page;
4162         int err;
4163
4164         if (!lmp_ext_feat_capable(hdev))
4165                 return 0;
4166
4167         for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4168              page++) {
4169                 err = hci_read_local_ext_features_sync(hdev, page);
4170                 if (err)
4171                         return err;
4172         }
4173
4174         return 0;
4175 }
4176
4177 /* HCI Controller init stage 3 command sequence */
4178 static const struct hci_init_stage hci_init3[] = {
4179         /* HCI_OP_SET_EVENT_MASK */
4180         HCI_INIT(hci_set_event_mask_sync),
4181         /* HCI_OP_READ_STORED_LINK_KEY */
4182         HCI_INIT(hci_read_stored_link_key_sync),
4183         /* HCI_OP_WRITE_DEF_LINK_POLICY */
4184         HCI_INIT(hci_setup_link_policy_sync),
4185         /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4186         HCI_INIT(hci_read_page_scan_activity_sync),
4187         /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4188         HCI_INIT(hci_read_def_err_data_reporting_sync),
4189         /* HCI_OP_READ_PAGE_SCAN_TYPE */
4190         HCI_INIT(hci_read_page_scan_type_sync),
4191         /* HCI_OP_READ_LOCAL_EXT_FEATURES */
4192         HCI_INIT(hci_read_local_ext_features_all_sync),
4193         {}
4194 };
4195
4196 static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4197 {
4198         u8 events[8];
4199
4200         if (!lmp_le_capable(hdev))
4201                 return 0;
4202
4203         memset(events, 0, sizeof(events));
4204
4205         if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4206                 events[0] |= 0x10;      /* LE Long Term Key Request */
4207
4208         /* If controller supports the Connection Parameters Request
4209          * Link Layer Procedure, enable the corresponding event.
4210          */
4211         if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4212                 /* LE Remote Connection Parameter Request */
4213                 events[0] |= 0x20;
4214
4215         /* If the controller supports the Data Length Extension
4216          * feature, enable the corresponding event.
4217          */
4218         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4219                 events[0] |= 0x40;      /* LE Data Length Change */
4220
4221         /* If the controller supports LL Privacy feature or LE Extended Adv,
4222          * enable the corresponding event.
4223          */
4224         if (use_enhanced_conn_complete(hdev))
4225                 events[1] |= 0x02;      /* LE Enhanced Connection Complete */
4226
4227         /* If the controller supports Extended Scanner Filter
4228          * Policies, enable the corresponding event.
4229          */
4230         if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4231                 events[1] |= 0x04;      /* LE Direct Advertising Report */
4232
4233         /* If the controller supports Channel Selection Algorithm #2
4234          * feature, enable the corresponding event.
4235          */
4236         if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4237                 events[2] |= 0x08;      /* LE Channel Selection Algorithm */
4238
4239         /* If the controller supports the LE Set Scan Enable command,
4240          * enable the corresponding advertising report event.
4241          */
4242         if (hdev->commands[26] & 0x08)
4243                 events[0] |= 0x02;      /* LE Advertising Report */
4244
4245         /* If the controller supports the LE Create Connection
4246          * command, enable the corresponding event.
4247          */
4248         if (hdev->commands[26] & 0x10)
4249                 events[0] |= 0x01;      /* LE Connection Complete */
4250
4251         /* If the controller supports the LE Connection Update
4252          * command, enable the corresponding event.
4253          */
4254         if (hdev->commands[27] & 0x04)
4255                 events[0] |= 0x04;      /* LE Connection Update Complete */
4256
4257         /* If the controller supports the LE Read Remote Used Features
4258          * command, enable the corresponding event.
4259          */
4260         if (hdev->commands[27] & 0x20)
4261                 /* LE Read Remote Used Features Complete */
4262                 events[0] |= 0x08;
4263
4264         /* If the controller supports the LE Read Local P-256
4265          * Public Key command, enable the corresponding event.
4266          */
4267         if (hdev->commands[34] & 0x02)
4268                 /* LE Read Local P-256 Public Key Complete */
4269                 events[0] |= 0x80;
4270
4271         /* If the controller supports the LE Generate DHKey
4272          * command, enable the corresponding event.
4273          */
4274         if (hdev->commands[34] & 0x04)
4275                 events[1] |= 0x01;      /* LE Generate DHKey Complete */
4276
4277         /* If the controller supports the LE Set Default PHY or
4278          * LE Set PHY commands, enable the corresponding event.
4279          */
4280         if (hdev->commands[35] & (0x20 | 0x40))
4281                 events[1] |= 0x08;        /* LE PHY Update Complete */
4282
4283         /* If the controller supports LE Set Extended Scan Parameters
4284          * and LE Set Extended Scan Enable commands, enable the
4285          * corresponding event.
4286          */
4287         if (use_ext_scan(hdev))
4288                 events[1] |= 0x10;      /* LE Extended Advertising Report */
4289
4290         /* If the controller supports the LE Extended Advertising
4291          * command, enable the corresponding event.
4292          */
4293         if (ext_adv_capable(hdev))
4294                 events[2] |= 0x02;      /* LE Advertising Set Terminated */
4295
4296         if (cis_capable(hdev)) {
4297                 events[3] |= 0x01;      /* LE CIS Established */
4298                 if (cis_peripheral_capable(hdev))
4299                         events[3] |= 0x02; /* LE CIS Request */
4300         }
4301
4302         if (bis_capable(hdev)) {
4303                 events[1] |= 0x20;      /* LE PA Report */
4304                 events[1] |= 0x40;      /* LE PA Sync Established */
4305                 events[3] |= 0x04;      /* LE Create BIG Complete */
4306                 events[3] |= 0x08;      /* LE Terminate BIG Complete */
4307                 events[3] |= 0x10;      /* LE BIG Sync Established */
4308                 events[3] |= 0x20;      /* LE BIG Sync Loss */
4309                 events[4] |= 0x02;      /* LE BIG Info Advertising Report */
4310         }
4311
4312         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4313                                      sizeof(events), events, HCI_CMD_TIMEOUT);
4314 }
4315
4316 /* Read LE Advertising Channel TX Power */
4317 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4318 {
4319         if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4320                 /* HCI TS spec forbids mixing of legacy and extended
4321                  * advertising commands wherein READ_ADV_TX_POWER is
4322                  * also included. So do not call it if extended adv
4323                  * is supported otherwise controller will return
4324                  * COMMAND_DISALLOWED for extended commands.
4325                  */
4326                 return __hci_cmd_sync_status(hdev,
4327                                                HCI_OP_LE_READ_ADV_TX_POWER,
4328                                                0, NULL, HCI_CMD_TIMEOUT);
4329         }
4330
4331         return 0;
4332 }
4333
4334 /* Read LE Min/Max Tx Power*/
4335 static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4336 {
4337         if (!(hdev->commands[38] & 0x80) ||
4338             test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4339                 return 0;
4340
4341         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4342                                      0, NULL, HCI_CMD_TIMEOUT);
4343 }
4344
4345 /* Read LE Accept List Size */
4346 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4347 {
4348         if (!(hdev->commands[26] & 0x40))
4349                 return 0;
4350
4351         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4352                                      0, NULL, HCI_CMD_TIMEOUT);
4353 }
4354
4355 /* Read LE Resolving List Size */
4356 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4357 {
4358         if (!(hdev->commands[34] & 0x40))
4359                 return 0;
4360
4361         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4362                                      0, NULL, HCI_CMD_TIMEOUT);
4363 }
4364
4365 /* Clear LE Resolving List */
4366 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4367 {
4368         if (!(hdev->commands[34] & 0x20))
4369                 return 0;
4370
4371         return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4372                                      HCI_CMD_TIMEOUT);
4373 }
4374
4375 /* Set RPA timeout */
4376 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4377 {
4378         __le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4379
4380         if (!(hdev->commands[35] & 0x04) ||
4381             test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4382                 return 0;
4383
4384         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4385                                      sizeof(timeout), &timeout,
4386                                      HCI_CMD_TIMEOUT);
4387 }
4388
4389 /* Read LE Maximum Data Length */
4390 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4391 {
4392         if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4393                 return 0;
4394
4395         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4396                                      HCI_CMD_TIMEOUT);
4397 }
4398
4399 /* Read LE Suggested Default Data Length */
4400 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4401 {
4402         if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4403                 return 0;
4404
4405         return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4406                                      HCI_CMD_TIMEOUT);
4407 }
4408
4409 /* Read LE Number of Supported Advertising Sets */
4410 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4411 {
4412         if (!ext_adv_capable(hdev))
4413                 return 0;
4414
4415         return __hci_cmd_sync_status(hdev,
4416                                      HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4417                                      0, NULL, HCI_CMD_TIMEOUT);
4418 }
4419
4420 /* Write LE Host Supported */
4421 static int hci_set_le_support_sync(struct hci_dev *hdev)
4422 {
4423         struct hci_cp_write_le_host_supported cp;
4424
4425         /* LE-only devices do not support explicit enablement */
4426         if (!lmp_bredr_capable(hdev))
4427                 return 0;
4428
4429         memset(&cp, 0, sizeof(cp));
4430
4431         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4432                 cp.le = 0x01;
4433                 cp.simul = 0x00;
4434         }
4435
4436         if (cp.le == lmp_host_le_capable(hdev))
4437                 return 0;
4438
4439         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4440                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4441 }
4442
4443 /* LE Set Host Feature */
4444 static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4445 {
4446         struct hci_cp_le_set_host_feature cp;
4447
4448         if (!cis_capable(hdev))
4449                 return 0;
4450
4451         memset(&cp, 0, sizeof(cp));
4452
4453         /* Connected Isochronous Channels (Host Support) */
4454         cp.bit_number = 32;
4455         cp.bit_value = 1;
4456
4457         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4458                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4459 }
4460
4461 /* LE Controller init stage 3 command sequence */
4462 static const struct hci_init_stage le_init3[] = {
4463         /* HCI_OP_LE_SET_EVENT_MASK */
4464         HCI_INIT(hci_le_set_event_mask_sync),
4465         /* HCI_OP_LE_READ_ADV_TX_POWER */
4466         HCI_INIT(hci_le_read_adv_tx_power_sync),
4467         /* HCI_OP_LE_READ_TRANSMIT_POWER */
4468         HCI_INIT(hci_le_read_tx_power_sync),
4469         /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4470         HCI_INIT(hci_le_read_accept_list_size_sync),
4471         /* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4472         HCI_INIT(hci_le_clear_accept_list_sync),
4473         /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4474         HCI_INIT(hci_le_read_resolv_list_size_sync),
4475         /* HCI_OP_LE_CLEAR_RESOLV_LIST */
4476         HCI_INIT(hci_le_clear_resolv_list_sync),
4477         /* HCI_OP_LE_SET_RPA_TIMEOUT */
4478         HCI_INIT(hci_le_set_rpa_timeout_sync),
4479         /* HCI_OP_LE_READ_MAX_DATA_LEN */
4480         HCI_INIT(hci_le_read_max_data_len_sync),
4481         /* HCI_OP_LE_READ_DEF_DATA_LEN */
4482         HCI_INIT(hci_le_read_def_data_len_sync),
4483         /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4484         HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4485         /* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4486         HCI_INIT(hci_set_le_support_sync),
4487         /* HCI_OP_LE_SET_HOST_FEATURE */
4488         HCI_INIT(hci_le_set_host_feature_sync),
4489         {}
4490 };
4491
4492 static int hci_init3_sync(struct hci_dev *hdev)
4493 {
4494         int err;
4495
4496         bt_dev_dbg(hdev, "");
4497
4498         err = hci_init_stage_sync(hdev, hci_init3);
4499         if (err)
4500                 return err;
4501
4502         if (lmp_le_capable(hdev))
4503                 return hci_init_stage_sync(hdev, le_init3);
4504
4505         return 0;
4506 }
4507
4508 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4509 {
4510         struct hci_cp_delete_stored_link_key cp;
4511
4512         /* Some Broadcom based Bluetooth controllers do not support the
4513          * Delete Stored Link Key command. They are clearly indicating its
4514          * absence in the bit mask of supported commands.
4515          *
4516          * Check the supported commands and only if the command is marked
4517          * as supported send it. If not supported assume that the controller
4518          * does not have actual support for stored link keys which makes this
4519          * command redundant anyway.
4520          *
4521          * Some controllers indicate that they support handling deleting
4522          * stored link keys, but they don't. The quirk lets a driver
4523          * just disable this command.
4524          */
4525         if (!(hdev->commands[6] & 0x80) ||
4526             test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4527                 return 0;
4528
4529         memset(&cp, 0, sizeof(cp));
4530         bacpy(&cp.bdaddr, BDADDR_ANY);
4531         cp.delete_all = 0x01;
4532
4533         return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4534                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4535 }
4536
4537 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4538 {
4539         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4540         bool changed = false;
4541
4542         /* Set event mask page 2 if the HCI command for it is supported */
4543         if (!(hdev->commands[22] & 0x04))
4544                 return 0;
4545
4546         /* If Connectionless Peripheral Broadcast central role is supported
4547          * enable all necessary events for it.
4548          */
4549         if (lmp_cpb_central_capable(hdev)) {
4550                 events[1] |= 0x40;      /* Triggered Clock Capture */
4551                 events[1] |= 0x80;      /* Synchronization Train Complete */
4552                 events[2] |= 0x08;      /* Truncated Page Complete */
4553                 events[2] |= 0x20;      /* CPB Channel Map Change */
4554                 changed = true;
4555         }
4556
4557         /* If Connectionless Peripheral Broadcast peripheral role is supported
4558          * enable all necessary events for it.
4559          */
4560         if (lmp_cpb_peripheral_capable(hdev)) {
4561                 events[2] |= 0x01;      /* Synchronization Train Received */
4562                 events[2] |= 0x02;      /* CPB Receive */
4563                 events[2] |= 0x04;      /* CPB Timeout */
4564                 events[2] |= 0x10;      /* Peripheral Page Response Timeout */
4565                 changed = true;
4566         }
4567
4568         /* Enable Authenticated Payload Timeout Expired event if supported */
4569         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4570                 events[2] |= 0x80;
4571                 changed = true;
4572         }
4573
4574         /* Some Broadcom based controllers indicate support for Set Event
4575          * Mask Page 2 command, but then actually do not support it. Since
4576          * the default value is all bits set to zero, the command is only
4577          * required if the event mask has to be changed. In case no change
4578          * to the event mask is needed, skip this command.
4579          */
4580         if (!changed)
4581                 return 0;
4582
4583         return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4584                                      sizeof(events), events, HCI_CMD_TIMEOUT);
4585 }
4586
4587 /* Read local codec list if the HCI command is supported */
4588 static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4589 {
4590         if (hdev->commands[45] & 0x04)
4591                 hci_read_supported_codecs_v2(hdev);
4592         else if (hdev->commands[29] & 0x20)
4593                 hci_read_supported_codecs(hdev);
4594
4595         return 0;
4596 }
4597
4598 /* Read local pairing options if the HCI command is supported */
4599 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4600 {
4601         if (!(hdev->commands[41] & 0x08))
4602                 return 0;
4603
4604         return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4605                                      0, NULL, HCI_CMD_TIMEOUT);
4606 }
4607
4608 /* Get MWS transport configuration if the HCI command is supported */
4609 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4610 {
4611         if (!mws_transport_config_capable(hdev))
4612                 return 0;
4613
4614         return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4615                                      0, NULL, HCI_CMD_TIMEOUT);
4616 }
4617
4618 /* Check for Synchronization Train support */
4619 static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4620 {
4621         if (!lmp_sync_train_capable(hdev))
4622                 return 0;
4623
4624         return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4625                                      0, NULL, HCI_CMD_TIMEOUT);
4626 }
4627
4628 /* Enable Secure Connections if supported and configured */
4629 static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4630 {
4631         u8 support = 0x01;
4632
4633         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4634             !bredr_sc_enabled(hdev))
4635                 return 0;
4636
4637         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4638                                      sizeof(support), &support,
4639                                      HCI_CMD_TIMEOUT);
4640 }
4641
4642 /* Set erroneous data reporting if supported to the wideband speech
4643  * setting value
4644  */
4645 static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4646 {
4647         struct hci_cp_write_def_err_data_reporting cp;
4648         bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4649
4650         if (!(hdev->commands[18] & 0x08) ||
4651             !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4652             test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4653                 return 0;
4654
4655         if (enabled == hdev->err_data_reporting)
4656                 return 0;
4657
4658         memset(&cp, 0, sizeof(cp));
4659         cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4660                                 ERR_DATA_REPORTING_DISABLED;
4661
4662         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4663                                     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4664 }
4665
4666 static const struct hci_init_stage hci_init4[] = {
4667          /* HCI_OP_DELETE_STORED_LINK_KEY */
4668         HCI_INIT(hci_delete_stored_link_key_sync),
4669         /* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4670         HCI_INIT(hci_set_event_mask_page_2_sync),
4671         /* HCI_OP_READ_LOCAL_CODECS */
4672         HCI_INIT(hci_read_local_codecs_sync),
4673          /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4674         HCI_INIT(hci_read_local_pairing_opts_sync),
4675          /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4676         HCI_INIT(hci_get_mws_transport_config_sync),
4677          /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4678         HCI_INIT(hci_read_sync_train_params_sync),
4679         /* HCI_OP_WRITE_SC_SUPPORT */
4680         HCI_INIT(hci_write_sc_support_1_sync),
4681         /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4682         HCI_INIT(hci_set_err_data_report_sync),
4683         {}
4684 };
4685
4686 /* Set Suggested Default Data Length to maximum if supported */
4687 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4688 {
4689         struct hci_cp_le_write_def_data_len cp;
4690
4691         if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4692                 return 0;
4693
4694         memset(&cp, 0, sizeof(cp));
4695         cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4696         cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4697
4698         return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4699                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4700 }
4701
4702 /* Set Default PHY parameters if command is supported, enables all supported
4703  * PHYs according to the LE Features bits.
4704  */
4705 static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4706 {
4707         struct hci_cp_le_set_default_phy cp;
4708
4709         if (!(hdev->commands[35] & 0x20)) {
4710                 /* If the command is not supported it means only 1M PHY is
4711                  * supported.
4712                  */
4713                 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4714                 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4715                 return 0;
4716         }
4717
4718         memset(&cp, 0, sizeof(cp));
4719         cp.all_phys = 0x00;
4720         cp.tx_phys = HCI_LE_SET_PHY_1M;
4721         cp.rx_phys = HCI_LE_SET_PHY_1M;
4722
4723         /* Enables 2M PHY if supported */
4724         if (le_2m_capable(hdev)) {
4725                 cp.tx_phys |= HCI_LE_SET_PHY_2M;
4726                 cp.rx_phys |= HCI_LE_SET_PHY_2M;
4727         }
4728
4729         /* Enables Coded PHY if supported */
4730         if (le_coded_capable(hdev)) {
4731                 cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4732                 cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4733         }
4734
4735         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4736                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4737 }
4738
4739 static const struct hci_init_stage le_init4[] = {
4740         /* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4741         HCI_INIT(hci_le_set_write_def_data_len_sync),
4742         /* HCI_OP_LE_SET_DEFAULT_PHY */
4743         HCI_INIT(hci_le_set_default_phy_sync),
4744         {}
4745 };
4746
4747 static int hci_init4_sync(struct hci_dev *hdev)
4748 {
4749         int err;
4750
4751         bt_dev_dbg(hdev, "");
4752
4753         err = hci_init_stage_sync(hdev, hci_init4);
4754         if (err)
4755                 return err;
4756
4757         if (lmp_le_capable(hdev))
4758                 return hci_init_stage_sync(hdev, le_init4);
4759
4760         return 0;
4761 }
4762
4763 static int hci_init_sync(struct hci_dev *hdev)
4764 {
4765         int err;
4766
4767         err = hci_init1_sync(hdev);
4768         if (err < 0)
4769                 return err;
4770
4771         if (hci_dev_test_flag(hdev, HCI_SETUP))
4772                 hci_debugfs_create_basic(hdev);
4773
4774         err = hci_init2_sync(hdev);
4775         if (err < 0)
4776                 return err;
4777
4778         err = hci_init3_sync(hdev);
4779         if (err < 0)
4780                 return err;
4781
4782         err = hci_init4_sync(hdev);
4783         if (err < 0)
4784                 return err;
4785
4786         /* This function is only called when the controller is actually in
4787          * configured state. When the controller is marked as unconfigured,
4788          * this initialization procedure is not run.
4789          *
4790          * It means that it is possible that a controller runs through its
4791          * setup phase and then discovers missing settings. If that is the
4792          * case, then this function will not be called. It then will only
4793          * be called during the config phase.
4794          *
4795          * So only when in setup phase or config phase, create the debugfs
4796          * entries and register the SMP channels.
4797          */
4798         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4799             !hci_dev_test_flag(hdev, HCI_CONFIG))
4800                 return 0;
4801
4802         if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4803                 return 0;
4804
4805         hci_debugfs_create_common(hdev);
4806
4807         if (lmp_bredr_capable(hdev))
4808                 hci_debugfs_create_bredr(hdev);
4809
4810         if (lmp_le_capable(hdev))
4811                 hci_debugfs_create_le(hdev);
4812
4813         return 0;
4814 }
4815
4816 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4817
4818 static const struct {
4819         unsigned long quirk;
4820         const char *desc;
4821 } hci_broken_table[] = {
4822         HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4823                          "HCI Read Local Supported Commands not supported"),
4824         HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4825                          "HCI Delete Stored Link Key command is advertised, "
4826                          "but not supported."),
4827         HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4828                          "HCI Read Default Erroneous Data Reporting command is "
4829                          "advertised, but not supported."),
4830         HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4831                          "HCI Read Transmit Power Level command is advertised, "
4832                          "but not supported."),
4833         HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4834                          "HCI Set Event Filter command not supported."),
4835         HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4836                          "HCI Enhanced Setup Synchronous Connection command is "
4837                          "advertised, but not supported."),
4838         HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4839                          "HCI LE Set Random Private Address Timeout command is "
4840                          "advertised, but not supported."),
4841         HCI_QUIRK_BROKEN(LE_CODED,
4842                          "HCI LE Coded PHY feature bit is set, "
4843                          "but its usage is not supported.")
4844 };
4845
4846 /* This function handles hdev setup stage:
4847  *
4848  * Calls hdev->setup
4849  * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4850  */
4851 static int hci_dev_setup_sync(struct hci_dev *hdev)
4852 {
4853         int ret = 0;
4854         bool invalid_bdaddr;
4855         size_t i;
4856
4857         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4858             !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4859                 return 0;
4860
4861         bt_dev_dbg(hdev, "");
4862
4863         hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4864
4865         if (hdev->setup)
4866                 ret = hdev->setup(hdev);
4867
4868         for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4869                 if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4870                         bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4871         }
4872
4873         /* The transport driver can set the quirk to mark the
4874          * BD_ADDR invalid before creating the HCI device or in
4875          * its setup callback.
4876          */
4877         invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4878                          test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4879         if (!ret) {
4880                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4881                     !bacmp(&hdev->public_addr, BDADDR_ANY))
4882                         hci_dev_get_bd_addr_from_property(hdev);
4883
4884                 if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4885                     hdev->set_bdaddr) {
4886                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4887                         if (!ret)
4888                                 invalid_bdaddr = false;
4889                 }
4890         }
4891
4892         /* The transport driver can set these quirks before
4893          * creating the HCI device or in its setup callback.
4894          *
4895          * For the invalid BD_ADDR quirk it is possible that
4896          * it becomes a valid address if the bootloader does
4897          * provide it (see above).
4898          *
4899          * In case any of them is set, the controller has to
4900          * start up as unconfigured.
4901          */
4902         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4903             invalid_bdaddr)
4904                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4905
4906         /* For an unconfigured controller it is required to
4907          * read at least the version information provided by
4908          * the Read Local Version Information command.
4909          *
4910          * If the set_bdaddr driver callback is provided, then
4911          * also the original Bluetooth public device address
4912          * will be read using the Read BD Address command.
4913          */
4914         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4915                 return hci_unconf_init_sync(hdev);
4916
4917         return ret;
4918 }
4919
4920 /* This function handles hdev init stage:
4921  *
4922  * Calls hci_dev_setup_sync to perform setup stage
4923  * Calls hci_init_sync to perform HCI command init sequence
4924  */
4925 static int hci_dev_init_sync(struct hci_dev *hdev)
4926 {
4927         int ret;
4928
4929         bt_dev_dbg(hdev, "");
4930
4931         atomic_set(&hdev->cmd_cnt, 1);
4932         set_bit(HCI_INIT, &hdev->flags);
4933
4934         ret = hci_dev_setup_sync(hdev);
4935
4936         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4937                 /* If public address change is configured, ensure that
4938                  * the address gets programmed. If the driver does not
4939                  * support changing the public address, fail the power
4940                  * on procedure.
4941                  */
4942                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4943                     hdev->set_bdaddr)
4944                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4945                 else
4946                         ret = -EADDRNOTAVAIL;
4947         }
4948
4949         if (!ret) {
4950                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4951                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4952                         ret = hci_init_sync(hdev);
4953                         if (!ret && hdev->post_init)
4954                                 ret = hdev->post_init(hdev);
4955                 }
4956         }
4957
4958         /* If the HCI Reset command is clearing all diagnostic settings,
4959          * then they need to be reprogrammed after the init procedure
4960          * completed.
4961          */
4962         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4963             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4964             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4965                 ret = hdev->set_diag(hdev, true);
4966
4967         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4968                 msft_do_open(hdev);
4969                 aosp_do_open(hdev);
4970         }
4971
4972         clear_bit(HCI_INIT, &hdev->flags);
4973
4974         return ret;
4975 }
4976
4977 int hci_dev_open_sync(struct hci_dev *hdev)
4978 {
4979         int ret;
4980
4981         bt_dev_dbg(hdev, "");
4982
4983         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
4984                 ret = -ENODEV;
4985                 goto done;
4986         }
4987
4988         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4989             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4990                 /* Check for rfkill but allow the HCI setup stage to
4991                  * proceed (which in itself doesn't cause any RF activity).
4992                  */
4993                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
4994                         ret = -ERFKILL;
4995                         goto done;
4996                 }
4997
4998                 /* Check for valid public address or a configured static
4999                  * random address, but let the HCI setup proceed to
5000                  * be able to determine if there is a public address
5001                  * or not.
5002                  *
5003                  * In case of user channel usage, it is not important
5004                  * if a public address or static random address is
5005                  * available.
5006                  */
5007                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5008                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5009                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
5010                         ret = -EADDRNOTAVAIL;
5011                         goto done;
5012                 }
5013         }
5014
5015         if (test_bit(HCI_UP, &hdev->flags)) {
5016                 ret = -EALREADY;
5017                 goto done;
5018         }
5019
5020         if (hdev->open(hdev)) {
5021                 ret = -EIO;
5022                 goto done;
5023         }
5024
5025         hci_devcd_reset(hdev);
5026
5027         set_bit(HCI_RUNNING, &hdev->flags);
5028         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
5029
5030         ret = hci_dev_init_sync(hdev);
5031         if (!ret) {
5032                 hci_dev_hold(hdev);
5033                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5034                 hci_adv_instances_set_rpa_expired(hdev, true);
5035                 set_bit(HCI_UP, &hdev->flags);
5036                 hci_sock_dev_event(hdev, HCI_DEV_UP);
5037                 hci_leds_update_powered(hdev, true);
5038                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5039                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
5040                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5041                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5042                     hci_dev_test_flag(hdev, HCI_MGMT)) {
5043                         ret = hci_powered_update_sync(hdev);
5044                         mgmt_power_on(hdev, ret);
5045                 }
5046         } else {
5047                 /* Init failed, cleanup */
5048                 flush_work(&hdev->tx_work);
5049
5050                 /* Since hci_rx_work() is possible to awake new cmd_work
5051                  * it should be flushed first to avoid unexpected call of
5052                  * hci_cmd_work()
5053                  */
5054                 flush_work(&hdev->rx_work);
5055                 flush_work(&hdev->cmd_work);
5056
5057                 skb_queue_purge(&hdev->cmd_q);
5058                 skb_queue_purge(&hdev->rx_q);
5059
5060                 if (hdev->flush)
5061                         hdev->flush(hdev);
5062
5063                 if (hdev->sent_cmd) {
5064                         cancel_delayed_work_sync(&hdev->cmd_timer);
5065                         kfree_skb(hdev->sent_cmd);
5066                         hdev->sent_cmd = NULL;
5067                 }
5068
5069                 if (hdev->req_skb) {
5070                         kfree_skb(hdev->req_skb);
5071                         hdev->req_skb = NULL;
5072                 }
5073
5074                 clear_bit(HCI_RUNNING, &hdev->flags);
5075                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5076
5077                 hdev->close(hdev);
5078                 hdev->flags &= BIT(HCI_RAW);
5079         }
5080
5081 done:
5082         return ret;
5083 }
5084
5085 /* This function requires the caller holds hdev->lock */
5086 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5087 {
5088         struct hci_conn_params *p;
5089
5090         list_for_each_entry(p, &hdev->le_conn_params, list) {
5091                 hci_pend_le_list_del_init(p);
5092                 if (p->conn) {
5093                         hci_conn_drop(p->conn);
5094                         hci_conn_put(p->conn);
5095                         p->conn = NULL;
5096                 }
5097         }
5098
5099         BT_DBG("All LE pending actions cleared");
5100 }
5101
5102 static int hci_dev_shutdown(struct hci_dev *hdev)
5103 {
5104         int err = 0;
5105         /* Similar to how we first do setup and then set the exclusive access
5106          * bit for userspace, we must first unset userchannel and then clean up.
5107          * Otherwise, the kernel can't properly use the hci channel to clean up
5108          * the controller (some shutdown routines require sending additional
5109          * commands to the controller for example).
5110          */
5111         bool was_userchannel =
5112                 hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5113
5114         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5115             test_bit(HCI_UP, &hdev->flags)) {
5116                 /* Execute vendor specific shutdown routine */
5117                 if (hdev->shutdown)
5118                         err = hdev->shutdown(hdev);
5119         }
5120
5121         if (was_userchannel)
5122                 hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5123
5124         return err;
5125 }
5126
5127 int hci_dev_close_sync(struct hci_dev *hdev)
5128 {
5129         bool auto_off;
5130         int err = 0;
5131
5132         bt_dev_dbg(hdev, "");
5133
5134         cancel_delayed_work(&hdev->power_off);
5135         cancel_delayed_work(&hdev->ncmd_timer);
5136         cancel_delayed_work(&hdev->le_scan_disable);
5137
5138         hci_cmd_sync_cancel_sync(hdev, ENODEV);
5139
5140         cancel_interleave_scan(hdev);
5141
5142         if (hdev->adv_instance_timeout) {
5143                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
5144                 hdev->adv_instance_timeout = 0;
5145         }
5146
5147         err = hci_dev_shutdown(hdev);
5148
5149         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5150                 cancel_delayed_work_sync(&hdev->cmd_timer);
5151                 return err;
5152         }
5153
5154         hci_leds_update_powered(hdev, false);
5155
5156         /* Flush RX and TX works */
5157         flush_work(&hdev->tx_work);
5158         flush_work(&hdev->rx_work);
5159
5160         if (hdev->discov_timeout > 0) {
5161                 hdev->discov_timeout = 0;
5162                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5163                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5164         }
5165
5166         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5167                 cancel_delayed_work(&hdev->service_cache);
5168
5169         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5170                 struct adv_info *adv_instance;
5171
5172                 cancel_delayed_work_sync(&hdev->rpa_expired);
5173
5174                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5175                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5176         }
5177
5178         /* Avoid potential lockdep warnings from the *_flush() calls by
5179          * ensuring the workqueue is empty up front.
5180          */
5181         drain_workqueue(hdev->workqueue);
5182
5183         hci_dev_lock(hdev);
5184
5185         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5186
5187         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5188
5189         if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5190             hci_dev_test_flag(hdev, HCI_MGMT))
5191                 __mgmt_power_off(hdev);
5192
5193         hci_inquiry_cache_flush(hdev);
5194         hci_pend_le_actions_clear(hdev);
5195         hci_conn_hash_flush(hdev);
5196         /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5197         smp_unregister(hdev);
5198         hci_dev_unlock(hdev);
5199
5200         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5201
5202         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5203                 aosp_do_close(hdev);
5204                 msft_do_close(hdev);
5205         }
5206
5207         if (hdev->flush)
5208                 hdev->flush(hdev);
5209
5210         /* Reset device */
5211         skb_queue_purge(&hdev->cmd_q);
5212         atomic_set(&hdev->cmd_cnt, 1);
5213         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5214             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5215                 set_bit(HCI_INIT, &hdev->flags);
5216                 hci_reset_sync(hdev);
5217                 clear_bit(HCI_INIT, &hdev->flags);
5218         }
5219
5220         /* flush cmd  work */
5221         flush_work(&hdev->cmd_work);
5222
5223         /* Drop queues */
5224         skb_queue_purge(&hdev->rx_q);
5225         skb_queue_purge(&hdev->cmd_q);
5226         skb_queue_purge(&hdev->raw_q);
5227
5228         /* Drop last sent command */
5229         if (hdev->sent_cmd) {
5230                 cancel_delayed_work_sync(&hdev->cmd_timer);
5231                 kfree_skb(hdev->sent_cmd);
5232                 hdev->sent_cmd = NULL;
5233         }
5234
5235         /* Drop last request */
5236         if (hdev->req_skb) {
5237                 kfree_skb(hdev->req_skb);
5238                 hdev->req_skb = NULL;
5239         }
5240
5241         clear_bit(HCI_RUNNING, &hdev->flags);
5242         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5243
5244         /* After this point our queues are empty and no tasks are scheduled. */
5245         hdev->close(hdev);
5246
5247         /* Clear flags */
5248         hdev->flags &= BIT(HCI_RAW);
5249         hci_dev_clear_volatile_flags(hdev);
5250
5251         memset(hdev->eir, 0, sizeof(hdev->eir));
5252         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5253         bacpy(&hdev->random_addr, BDADDR_ANY);
5254         hci_codec_list_clear(&hdev->local_codecs);
5255
5256         hci_dev_put(hdev);
5257         return err;
5258 }
5259
5260 /* This function perform power on HCI command sequence as follows:
5261  *
5262  * If controller is already up (HCI_UP) performs hci_powered_update_sync
5263  * sequence otherwise run hci_dev_open_sync which will follow with
5264  * hci_powered_update_sync after the init sequence is completed.
5265  */
5266 static int hci_power_on_sync(struct hci_dev *hdev)
5267 {
5268         int err;
5269
5270         if (test_bit(HCI_UP, &hdev->flags) &&
5271             hci_dev_test_flag(hdev, HCI_MGMT) &&
5272             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5273                 cancel_delayed_work(&hdev->power_off);
5274                 return hci_powered_update_sync(hdev);
5275         }
5276
5277         err = hci_dev_open_sync(hdev);
5278         if (err < 0)
5279                 return err;
5280
5281         /* During the HCI setup phase, a few error conditions are
5282          * ignored and they need to be checked now. If they are still
5283          * valid, it is important to return the device back off.
5284          */
5285         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5286             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5287             (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5288              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5289                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5290                 hci_dev_close_sync(hdev);
5291         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5292                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5293                                    HCI_AUTO_OFF_TIMEOUT);
5294         }
5295
5296         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5297                 /* For unconfigured devices, set the HCI_RAW flag
5298                  * so that userspace can easily identify them.
5299                  */
5300                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5301                         set_bit(HCI_RAW, &hdev->flags);
5302
5303                 /* For fully configured devices, this will send
5304                  * the Index Added event. For unconfigured devices,
5305                  * it will send Unconfigued Index Added event.
5306                  *
5307                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5308                  * and no event will be send.
5309                  */
5310                 mgmt_index_added(hdev);
5311         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5312                 /* When the controller is now configured, then it
5313                  * is important to clear the HCI_RAW flag.
5314                  */
5315                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5316                         clear_bit(HCI_RAW, &hdev->flags);
5317
5318                 /* Powering on the controller with HCI_CONFIG set only
5319                  * happens with the transition from unconfigured to
5320                  * configured. This will send the Index Added event.
5321                  */
5322                 mgmt_index_added(hdev);
5323         }
5324
5325         return 0;
5326 }
5327
5328 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5329 {
5330         struct hci_cp_remote_name_req_cancel cp;
5331
5332         memset(&cp, 0, sizeof(cp));
5333         bacpy(&cp.bdaddr, addr);
5334
5335         return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5336                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5337 }
5338
5339 int hci_stop_discovery_sync(struct hci_dev *hdev)
5340 {
5341         struct discovery_state *d = &hdev->discovery;
5342         struct inquiry_entry *e;
5343         int err;
5344
5345         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5346
5347         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5348                 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5349                         err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5350                                                     0, NULL, HCI_CMD_TIMEOUT);
5351                         if (err)
5352                                 return err;
5353                 }
5354
5355                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5356                         cancel_delayed_work(&hdev->le_scan_disable);
5357
5358                         err = hci_scan_disable_sync(hdev);
5359                         if (err)
5360                                 return err;
5361                 }
5362
5363         } else {
5364                 err = hci_scan_disable_sync(hdev);
5365                 if (err)
5366                         return err;
5367         }
5368
5369         /* Resume advertising if it was paused */
5370         if (use_ll_privacy(hdev))
5371                 hci_resume_advertising_sync(hdev);
5372
5373         /* No further actions needed for LE-only discovery */
5374         if (d->type == DISCOV_TYPE_LE)
5375                 return 0;
5376
5377         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5378                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5379                                                      NAME_PENDING);
5380                 if (!e)
5381                         return 0;
5382
5383                 /* Ignore cancel errors since it should interfere with stopping
5384                  * of the discovery.
5385                  */
5386                 hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5387         }
5388
5389         return 0;
5390 }
5391
5392 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5393                                u8 reason)
5394 {
5395         struct hci_cp_disconnect cp;
5396
5397         if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5398                 /* This is a BIS connection, hci_conn_del will
5399                  * do the necessary cleanup.
5400                  */
5401                 hci_dev_lock(hdev);
5402                 hci_conn_failed(conn, reason);
5403                 hci_dev_unlock(hdev);
5404
5405                 return 0;
5406         }
5407
5408         memset(&cp, 0, sizeof(cp));
5409         cp.handle = cpu_to_le16(conn->handle);
5410         cp.reason = reason;
5411
5412         /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5413          * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5414          * used when suspending or powering off, where we don't want to wait
5415          * for the peer's response.
5416          */
5417         if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5418                 return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5419                                                 sizeof(cp), &cp,
5420                                                 HCI_EV_DISCONN_COMPLETE,
5421                                                 HCI_CMD_TIMEOUT, NULL);
5422
5423         return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5424                                      HCI_CMD_TIMEOUT);
5425 }
5426
5427 static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5428                                       struct hci_conn *conn, u8 reason)
5429 {
5430         /* Return reason if scanning since the connection shall probably be
5431          * cleanup directly.
5432          */
5433         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5434                 return reason;
5435
5436         if (conn->role == HCI_ROLE_SLAVE ||
5437             test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5438                 return 0;
5439
5440         return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5441                                      0, NULL, HCI_CMD_TIMEOUT);
5442 }
5443
5444 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5445                                    u8 reason)
5446 {
5447         if (conn->type == LE_LINK)
5448                 return hci_le_connect_cancel_sync(hdev, conn, reason);
5449
5450         if (conn->type == ISO_LINK) {
5451                 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5452                  * page 1857:
5453                  *
5454                  * If this command is issued for a CIS on the Central and the
5455                  * CIS is successfully terminated before being established,
5456                  * then an HCI_LE_CIS_Established event shall also be sent for
5457                  * this CIS with the Status Operation Cancelled by Host (0x44).
5458                  */
5459                 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5460                         return hci_disconnect_sync(hdev, conn, reason);
5461
5462                 /* CIS with no Create CIS sent have nothing to cancel */
5463                 if (bacmp(&conn->dst, BDADDR_ANY))
5464                         return HCI_ERROR_LOCAL_HOST_TERM;
5465
5466                 /* There is no way to cancel a BIS without terminating the BIG
5467                  * which is done later on connection cleanup.
5468                  */
5469                 return 0;
5470         }
5471
5472         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5473                 return 0;
5474
5475         /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5476          * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5477          * used when suspending or powering off, where we don't want to wait
5478          * for the peer's response.
5479          */
5480         if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5481                 return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5482                                                 6, &conn->dst,
5483                                                 HCI_EV_CONN_COMPLETE,
5484                                                 HCI_CMD_TIMEOUT, NULL);
5485
5486         return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5487                                      6, &conn->dst, HCI_CMD_TIMEOUT);
5488 }
5489
5490 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5491                                u8 reason)
5492 {
5493         struct hci_cp_reject_sync_conn_req cp;
5494
5495         memset(&cp, 0, sizeof(cp));
5496         bacpy(&cp.bdaddr, &conn->dst);
5497         cp.reason = reason;
5498
5499         /* SCO rejection has its own limited set of
5500          * allowed error values (0x0D-0x0F).
5501          */
5502         if (reason < 0x0d || reason > 0x0f)
5503                 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5504
5505         return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5506                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5507 }
5508
5509 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5510                                   u8 reason)
5511 {
5512         struct hci_cp_le_reject_cis cp;
5513
5514         memset(&cp, 0, sizeof(cp));
5515         cp.handle = cpu_to_le16(conn->handle);
5516         cp.reason = reason;
5517
5518         return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5519                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5520 }
5521
5522 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5523                                 u8 reason)
5524 {
5525         struct hci_cp_reject_conn_req cp;
5526
5527         if (conn->type == ISO_LINK)
5528                 return hci_le_reject_cis_sync(hdev, conn, reason);
5529
5530         if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5531                 return hci_reject_sco_sync(hdev, conn, reason);
5532
5533         memset(&cp, 0, sizeof(cp));
5534         bacpy(&cp.bdaddr, &conn->dst);
5535         cp.reason = reason;
5536
5537         return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5538                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5539 }
5540
5541 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5542 {
5543         int err = 0;
5544         u16 handle = conn->handle;
5545         bool disconnect = false;
5546         struct hci_conn *c;
5547
5548         switch (conn->state) {
5549         case BT_CONNECTED:
5550         case BT_CONFIG:
5551                 err = hci_disconnect_sync(hdev, conn, reason);
5552                 break;
5553         case BT_CONNECT:
5554                 err = hci_connect_cancel_sync(hdev, conn, reason);
5555                 break;
5556         case BT_CONNECT2:
5557                 err = hci_reject_conn_sync(hdev, conn, reason);
5558                 break;
5559         case BT_OPEN:
5560         case BT_BOUND:
5561                 break;
5562         default:
5563                 disconnect = true;
5564                 break;
5565         }
5566
5567         hci_dev_lock(hdev);
5568
5569         /* Check if the connection has been cleaned up concurrently */
5570         c = hci_conn_hash_lookup_handle(hdev, handle);
5571         if (!c || c != conn) {
5572                 err = 0;
5573                 goto unlock;
5574         }
5575
5576         /* Cleanup hci_conn object if it cannot be cancelled as it
5577          * likelly means the controller and host stack are out of sync
5578          * or in case of LE it was still scanning so it can be cleanup
5579          * safely.
5580          */
5581         if (disconnect) {
5582                 conn->state = BT_CLOSED;
5583                 hci_disconn_cfm(conn, reason);
5584                 hci_conn_del(conn);
5585         } else {
5586                 hci_conn_failed(conn, reason);
5587         }
5588
5589 unlock:
5590         hci_dev_unlock(hdev);
5591         return err;
5592 }
5593
5594 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5595 {
5596         struct list_head *head = &hdev->conn_hash.list;
5597         struct hci_conn *conn;
5598
5599         rcu_read_lock();
5600         while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5601                 /* Make sure the connection is not freed while unlocking */
5602                 conn = hci_conn_get(conn);
5603                 rcu_read_unlock();
5604                 /* Disregard possible errors since hci_conn_del shall have been
5605                  * called even in case of errors had occurred since it would
5606                  * then cause hci_conn_failed to be called which calls
5607                  * hci_conn_del internally.
5608                  */
5609                 hci_abort_conn_sync(hdev, conn, reason);
5610                 hci_conn_put(conn);
5611                 rcu_read_lock();
5612         }
5613         rcu_read_unlock();
5614
5615         return 0;
5616 }
5617
5618 /* This function perform power off HCI command sequence as follows:
5619  *
5620  * Clear Advertising
5621  * Stop Discovery
5622  * Disconnect all connections
5623  * hci_dev_close_sync
5624  */
5625 static int hci_power_off_sync(struct hci_dev *hdev)
5626 {
5627         int err;
5628
5629         /* If controller is already down there is nothing to do */
5630         if (!test_bit(HCI_UP, &hdev->flags))
5631                 return 0;
5632
5633         hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5634
5635         if (test_bit(HCI_ISCAN, &hdev->flags) ||
5636             test_bit(HCI_PSCAN, &hdev->flags)) {
5637                 err = hci_write_scan_enable_sync(hdev, 0x00);
5638                 if (err)
5639                         goto out;
5640         }
5641
5642         err = hci_clear_adv_sync(hdev, NULL, false);
5643         if (err)
5644                 goto out;
5645
5646         err = hci_stop_discovery_sync(hdev);
5647         if (err)
5648                 goto out;
5649
5650         /* Terminated due to Power Off */
5651         err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5652         if (err)
5653                 goto out;
5654
5655         err = hci_dev_close_sync(hdev);
5656
5657 out:
5658         hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5659         return err;
5660 }
5661
5662 int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5663 {
5664         if (val)
5665                 return hci_power_on_sync(hdev);
5666
5667         return hci_power_off_sync(hdev);
5668 }
5669
5670 static int hci_write_iac_sync(struct hci_dev *hdev)
5671 {
5672         struct hci_cp_write_current_iac_lap cp;
5673
5674         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5675                 return 0;
5676
5677         memset(&cp, 0, sizeof(cp));
5678
5679         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5680                 /* Limited discoverable mode */
5681                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
5682                 cp.iac_lap[0] = 0x00;   /* LIAC */
5683                 cp.iac_lap[1] = 0x8b;
5684                 cp.iac_lap[2] = 0x9e;
5685                 cp.iac_lap[3] = 0x33;   /* GIAC */
5686                 cp.iac_lap[4] = 0x8b;
5687                 cp.iac_lap[5] = 0x9e;
5688         } else {
5689                 /* General discoverable mode */
5690                 cp.num_iac = 1;
5691                 cp.iac_lap[0] = 0x33;   /* GIAC */
5692                 cp.iac_lap[1] = 0x8b;
5693                 cp.iac_lap[2] = 0x9e;
5694         }
5695
5696         return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5697                                      (cp.num_iac * 3) + 1, &cp,
5698                                      HCI_CMD_TIMEOUT);
5699 }
5700
5701 int hci_update_discoverable_sync(struct hci_dev *hdev)
5702 {
5703         int err = 0;
5704
5705         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5706                 err = hci_write_iac_sync(hdev);
5707                 if (err)
5708                         return err;
5709
5710                 err = hci_update_scan_sync(hdev);
5711                 if (err)
5712                         return err;
5713
5714                 err = hci_update_class_sync(hdev);
5715                 if (err)
5716                         return err;
5717         }
5718
5719         /* Advertising instances don't use the global discoverable setting, so
5720          * only update AD if advertising was enabled using Set Advertising.
5721          */
5722         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5723                 err = hci_update_adv_data_sync(hdev, 0x00);
5724                 if (err)
5725                         return err;
5726
5727                 /* Discoverable mode affects the local advertising
5728                  * address in limited privacy mode.
5729                  */
5730                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5731                         if (ext_adv_capable(hdev))
5732                                 err = hci_start_ext_adv_sync(hdev, 0x00);
5733                         else
5734                                 err = hci_enable_advertising_sync(hdev);
5735                 }
5736         }
5737
5738         return err;
5739 }
5740
5741 static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5742 {
5743         return hci_update_discoverable_sync(hdev);
5744 }
5745
5746 int hci_update_discoverable(struct hci_dev *hdev)
5747 {
5748         /* Only queue if it would have any effect */
5749         if (hdev_is_powered(hdev) &&
5750             hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5751             hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5752             hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5753                 return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5754                                           NULL);
5755
5756         return 0;
5757 }
5758
5759 int hci_update_connectable_sync(struct hci_dev *hdev)
5760 {
5761         int err;
5762
5763         err = hci_update_scan_sync(hdev);
5764         if (err)
5765                 return err;
5766
5767         /* If BR/EDR is not enabled and we disable advertising as a
5768          * by-product of disabling connectable, we need to update the
5769          * advertising flags.
5770          */
5771         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5772                 err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5773
5774         /* Update the advertising parameters if necessary */
5775         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5776             !list_empty(&hdev->adv_instances)) {
5777                 if (ext_adv_capable(hdev))
5778                         err = hci_start_ext_adv_sync(hdev,
5779                                                      hdev->cur_adv_instance);
5780                 else
5781                         err = hci_enable_advertising_sync(hdev);
5782
5783                 if (err)
5784                         return err;
5785         }
5786
5787         return hci_update_passive_scan_sync(hdev);
5788 }
5789
5790 int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
5791 {
5792         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5793         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5794         struct hci_cp_inquiry cp;
5795
5796         bt_dev_dbg(hdev, "");
5797
5798         if (test_bit(HCI_INQUIRY, &hdev->flags))
5799                 return 0;
5800
5801         hci_dev_lock(hdev);
5802         hci_inquiry_cache_flush(hdev);
5803         hci_dev_unlock(hdev);
5804
5805         memset(&cp, 0, sizeof(cp));
5806
5807         if (hdev->discovery.limited)
5808                 memcpy(&cp.lap, liac, sizeof(cp.lap));
5809         else
5810                 memcpy(&cp.lap, giac, sizeof(cp.lap));
5811
5812         cp.length = length;
5813         cp.num_rsp = num_rsp;
5814
5815         return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5816                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5817 }
5818
5819 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5820 {
5821         u8 own_addr_type;
5822         /* Accept list is not used for discovery */
5823         u8 filter_policy = 0x00;
5824         /* Default is to enable duplicates filter */
5825         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5826         int err;
5827
5828         bt_dev_dbg(hdev, "");
5829
5830         /* If controller is scanning, it means the passive scanning is
5831          * running. Thus, we should temporarily stop it in order to set the
5832          * discovery scanning parameters.
5833          */
5834         err = hci_scan_disable_sync(hdev);
5835         if (err) {
5836                 bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5837                 return err;
5838         }
5839
5840         cancel_interleave_scan(hdev);
5841
5842         /* Pause address resolution for active scan and stop advertising if
5843          * privacy is enabled.
5844          */
5845         err = hci_pause_addr_resolution(hdev);
5846         if (err)
5847                 goto failed;
5848
5849         /* All active scans will be done with either a resolvable private
5850          * address (when privacy feature has been enabled) or non-resolvable
5851          * private address.
5852          */
5853         err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5854                                              &own_addr_type);
5855         if (err < 0)
5856                 own_addr_type = ADDR_LE_DEV_PUBLIC;
5857
5858         if (hci_is_adv_monitoring(hdev) ||
5859             (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5860             hdev->discovery.result_filtering)) {
5861                 /* Duplicate filter should be disabled when some advertisement
5862                  * monitor is activated, otherwise AdvMon can only receive one
5863                  * advertisement for one peer(*) during active scanning, and
5864                  * might report loss to these peers.
5865                  *
5866                  * If controller does strict duplicate filtering and the
5867                  * discovery requires result filtering disables controller based
5868                  * filtering since that can cause reports that would match the
5869                  * host filter to not be reported.
5870                  */
5871                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5872         }
5873
5874         err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5875                                   hdev->le_scan_window_discovery,
5876                                   own_addr_type, filter_policy, filter_dup);
5877         if (!err)
5878                 return err;
5879
5880 failed:
5881         /* Resume advertising if it was paused */
5882         if (use_ll_privacy(hdev))
5883                 hci_resume_advertising_sync(hdev);
5884
5885         /* Resume passive scanning */
5886         hci_update_passive_scan_sync(hdev);
5887         return err;
5888 }
5889
5890 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5891 {
5892         int err;
5893
5894         bt_dev_dbg(hdev, "");
5895
5896         err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5897         if (err)
5898                 return err;
5899
5900         return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5901 }
5902
5903 int hci_start_discovery_sync(struct hci_dev *hdev)
5904 {
5905         unsigned long timeout;
5906         int err;
5907
5908         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5909
5910         switch (hdev->discovery.type) {
5911         case DISCOV_TYPE_BREDR:
5912                 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5913         case DISCOV_TYPE_INTERLEAVED:
5914                 /* When running simultaneous discovery, the LE scanning time
5915                  * should occupy the whole discovery time sine BR/EDR inquiry
5916                  * and LE scanning are scheduled by the controller.
5917                  *
5918                  * For interleaving discovery in comparison, BR/EDR inquiry
5919                  * and LE scanning are done sequentially with separate
5920                  * timeouts.
5921                  */
5922                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5923                              &hdev->quirks)) {
5924                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5925                         /* During simultaneous discovery, we double LE scan
5926                          * interval. We must leave some time for the controller
5927                          * to do BR/EDR inquiry.
5928                          */
5929                         err = hci_start_interleaved_discovery_sync(hdev);
5930                         break;
5931                 }
5932
5933                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5934                 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5935                 break;
5936         case DISCOV_TYPE_LE:
5937                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5938                 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5939                 break;
5940         default:
5941                 return -EINVAL;
5942         }
5943
5944         if (err)
5945                 return err;
5946
5947         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5948
5949         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5950                            timeout);
5951         return 0;
5952 }
5953
5954 static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5955 {
5956         switch (hci_get_adv_monitor_offload_ext(hdev)) {
5957         case HCI_ADV_MONITOR_EXT_MSFT:
5958                 msft_suspend_sync(hdev);
5959                 break;
5960         default:
5961                 return;
5962         }
5963 }
5964
5965 /* This function disables discovery and mark it as paused */
5966 static int hci_pause_discovery_sync(struct hci_dev *hdev)
5967 {
5968         int old_state = hdev->discovery.state;
5969         int err;
5970
5971         /* If discovery already stopped/stopping/paused there nothing to do */
5972         if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
5973             hdev->discovery_paused)
5974                 return 0;
5975
5976         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5977         err = hci_stop_discovery_sync(hdev);
5978         if (err)
5979                 return err;
5980
5981         hdev->discovery_paused = true;
5982         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5983
5984         return 0;
5985 }
5986
5987 static int hci_update_event_filter_sync(struct hci_dev *hdev)
5988 {
5989         struct bdaddr_list_with_flags *b;
5990         u8 scan = SCAN_DISABLED;
5991         bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
5992         int err;
5993
5994         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5995                 return 0;
5996
5997         /* Some fake CSR controllers lock up after setting this type of
5998          * filter, so avoid sending the request altogether.
5999          */
6000         if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
6001                 return 0;
6002
6003         /* Always clear event filter when starting */
6004         hci_clear_event_filter_sync(hdev);
6005
6006         list_for_each_entry(b, &hdev->accept_list, list) {
6007                 if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
6008                         continue;
6009
6010                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
6011
6012                 err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
6013                                                  HCI_CONN_SETUP_ALLOW_BDADDR,
6014                                                  &b->bdaddr,
6015                                                  HCI_CONN_SETUP_AUTO_ON);
6016                 if (err)
6017                         bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
6018                                    &b->bdaddr);
6019                 else
6020                         scan = SCAN_PAGE;
6021         }
6022
6023         if (scan && !scanning)
6024                 hci_write_scan_enable_sync(hdev, scan);
6025         else if (!scan && scanning)
6026                 hci_write_scan_enable_sync(hdev, scan);
6027
6028         return 0;
6029 }
6030
6031 /* This function disables scan (BR and LE) and mark it as paused */
6032 static int hci_pause_scan_sync(struct hci_dev *hdev)
6033 {
6034         if (hdev->scanning_paused)
6035                 return 0;
6036
6037         /* Disable page scan if enabled */
6038         if (test_bit(HCI_PSCAN, &hdev->flags))
6039                 hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
6040
6041         hci_scan_disable_sync(hdev);
6042
6043         hdev->scanning_paused = true;
6044
6045         return 0;
6046 }
6047
6048 /* This function performs the HCI suspend procedures in the follow order:
6049  *
6050  * Pause discovery (active scanning/inquiry)
6051  * Pause Directed Advertising/Advertising
6052  * Pause Scanning (passive scanning in case discovery was not active)
6053  * Disconnect all connections
6054  * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6055  * otherwise:
6056  * Update event mask (only set events that are allowed to wake up the host)
6057  * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6058  * Update passive scanning (lower duty cycle)
6059  * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6060  */
6061 int hci_suspend_sync(struct hci_dev *hdev)
6062 {
6063         int err;
6064
6065         /* If marked as suspended there nothing to do */
6066         if (hdev->suspended)
6067                 return 0;
6068
6069         /* Mark device as suspended */
6070         hdev->suspended = true;
6071
6072         /* Pause discovery if not already stopped */
6073         hci_pause_discovery_sync(hdev);
6074
6075         /* Pause other advertisements */
6076         hci_pause_advertising_sync(hdev);
6077
6078         /* Suspend monitor filters */
6079         hci_suspend_monitor_sync(hdev);
6080
6081         /* Prevent disconnects from causing scanning to be re-enabled */
6082         hci_pause_scan_sync(hdev);
6083
6084         if (hci_conn_count(hdev)) {
6085                 /* Soft disconnect everything (power off) */
6086                 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6087                 if (err) {
6088                         /* Set state to BT_RUNNING so resume doesn't notify */
6089                         hdev->suspend_state = BT_RUNNING;
6090                         hci_resume_sync(hdev);
6091                         return err;
6092                 }
6093
6094                 /* Update event mask so only the allowed event can wakeup the
6095                  * host.
6096                  */
6097                 hci_set_event_mask_sync(hdev);
6098         }
6099
6100         /* Only configure accept list if disconnect succeeded and wake
6101          * isn't being prevented.
6102          */
6103         if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6104                 hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6105                 return 0;
6106         }
6107
6108         /* Unpause to take care of updating scanning params */
6109         hdev->scanning_paused = false;
6110
6111         /* Enable event filter for paired devices */
6112         hci_update_event_filter_sync(hdev);
6113
6114         /* Update LE passive scan if enabled */
6115         hci_update_passive_scan_sync(hdev);
6116
6117         /* Pause scan changes again. */
6118         hdev->scanning_paused = true;
6119
6120         hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6121
6122         return 0;
6123 }
6124
6125 /* This function resumes discovery */
6126 static int hci_resume_discovery_sync(struct hci_dev *hdev)
6127 {
6128         int err;
6129
6130         /* If discovery not paused there nothing to do */
6131         if (!hdev->discovery_paused)
6132                 return 0;
6133
6134         hdev->discovery_paused = false;
6135
6136         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6137
6138         err = hci_start_discovery_sync(hdev);
6139
6140         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6141                                 DISCOVERY_FINDING);
6142
6143         return err;
6144 }
6145
6146 static void hci_resume_monitor_sync(struct hci_dev *hdev)
6147 {
6148         switch (hci_get_adv_monitor_offload_ext(hdev)) {
6149         case HCI_ADV_MONITOR_EXT_MSFT:
6150                 msft_resume_sync(hdev);
6151                 break;
6152         default:
6153                 return;
6154         }
6155 }
6156
6157 /* This function resume scan and reset paused flag */
6158 static int hci_resume_scan_sync(struct hci_dev *hdev)
6159 {
6160         if (!hdev->scanning_paused)
6161                 return 0;
6162
6163         hdev->scanning_paused = false;
6164
6165         hci_update_scan_sync(hdev);
6166
6167         /* Reset passive scanning to normal */
6168         hci_update_passive_scan_sync(hdev);
6169
6170         return 0;
6171 }
6172
6173 /* This function performs the HCI suspend procedures in the follow order:
6174  *
6175  * Restore event mask
6176  * Clear event filter
6177  * Update passive scanning (normal duty cycle)
6178  * Resume Directed Advertising/Advertising
6179  * Resume discovery (active scanning/inquiry)
6180  */
6181 int hci_resume_sync(struct hci_dev *hdev)
6182 {
6183         /* If not marked as suspended there nothing to do */
6184         if (!hdev->suspended)
6185                 return 0;
6186
6187         hdev->suspended = false;
6188
6189         /* Restore event mask */
6190         hci_set_event_mask_sync(hdev);
6191
6192         /* Clear any event filters and restore scan state */
6193         hci_clear_event_filter_sync(hdev);
6194
6195         /* Resume scanning */
6196         hci_resume_scan_sync(hdev);
6197
6198         /* Resume monitor filters */
6199         hci_resume_monitor_sync(hdev);
6200
6201         /* Resume other advertisements */
6202         hci_resume_advertising_sync(hdev);
6203
6204         /* Resume discovery */
6205         hci_resume_discovery_sync(hdev);
6206
6207         return 0;
6208 }
6209
6210 static bool conn_use_rpa(struct hci_conn *conn)
6211 {
6212         struct hci_dev *hdev = conn->hdev;
6213
6214         return hci_dev_test_flag(hdev, HCI_PRIVACY);
6215 }
6216
6217 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6218                                                 struct hci_conn *conn)
6219 {
6220         struct hci_cp_le_set_ext_adv_params cp;
6221         int err;
6222         bdaddr_t random_addr;
6223         u8 own_addr_type;
6224
6225         err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6226                                              &own_addr_type);
6227         if (err)
6228                 return err;
6229
6230         /* Set require_privacy to false so that the remote device has a
6231          * chance of identifying us.
6232          */
6233         err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6234                                      &own_addr_type, &random_addr);
6235         if (err)
6236                 return err;
6237
6238         memset(&cp, 0, sizeof(cp));
6239
6240         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6241         cp.channel_map = hdev->le_adv_channel_map;
6242         cp.tx_power = HCI_TX_POWER_INVALID;
6243         cp.primary_phy = HCI_ADV_PHY_1M;
6244         cp.secondary_phy = HCI_ADV_PHY_1M;
6245         cp.handle = 0x00; /* Use instance 0 for directed adv */
6246         cp.own_addr_type = own_addr_type;
6247         cp.peer_addr_type = conn->dst_type;
6248         bacpy(&cp.peer_addr, &conn->dst);
6249
6250         /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6251          * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6252          * does not supports advertising data when the advertising set already
6253          * contains some, the controller shall return erroc code 'Invalid
6254          * HCI Command Parameters(0x12).
6255          * So it is required to remove adv set for handle 0x00. since we use
6256          * instance 0 for directed adv.
6257          */
6258         err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6259         if (err)
6260                 return err;
6261
6262         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6263                                     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6264         if (err)
6265                 return err;
6266
6267         /* Check if random address need to be updated */
6268         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6269             bacmp(&random_addr, BDADDR_ANY) &&
6270             bacmp(&random_addr, &hdev->random_addr)) {
6271                 err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6272                                                        &random_addr);
6273                 if (err)
6274                         return err;
6275         }
6276
6277         return hci_enable_ext_advertising_sync(hdev, 0x00);
6278 }
6279
6280 static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6281                                             struct hci_conn *conn)
6282 {
6283         struct hci_cp_le_set_adv_param cp;
6284         u8 status;
6285         u8 own_addr_type;
6286         u8 enable;
6287
6288         if (ext_adv_capable(hdev))
6289                 return hci_le_ext_directed_advertising_sync(hdev, conn);
6290
6291         /* Clear the HCI_LE_ADV bit temporarily so that the
6292          * hci_update_random_address knows that it's safe to go ahead
6293          * and write a new random address. The flag will be set back on
6294          * as soon as the SET_ADV_ENABLE HCI command completes.
6295          */
6296         hci_dev_clear_flag(hdev, HCI_LE_ADV);
6297
6298         /* Set require_privacy to false so that the remote device has a
6299          * chance of identifying us.
6300          */
6301         status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6302                                                 &own_addr_type);
6303         if (status)
6304                 return status;
6305
6306         memset(&cp, 0, sizeof(cp));
6307
6308         /* Some controllers might reject command if intervals are not
6309          * within range for undirected advertising.
6310          * BCM20702A0 is known to be affected by this.
6311          */
6312         cp.min_interval = cpu_to_le16(0x0020);
6313         cp.max_interval = cpu_to_le16(0x0020);
6314
6315         cp.type = LE_ADV_DIRECT_IND;
6316         cp.own_address_type = own_addr_type;
6317         cp.direct_addr_type = conn->dst_type;
6318         bacpy(&cp.direct_addr, &conn->dst);
6319         cp.channel_map = hdev->le_adv_channel_map;
6320
6321         status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6322                                        sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6323         if (status)
6324                 return status;
6325
6326         enable = 0x01;
6327
6328         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6329                                      sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6330 }
6331
6332 static void set_ext_conn_params(struct hci_conn *conn,
6333                                 struct hci_cp_le_ext_conn_param *p)
6334 {
6335         struct hci_dev *hdev = conn->hdev;
6336
6337         memset(p, 0, sizeof(*p));
6338
6339         p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6340         p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6341         p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6342         p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6343         p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6344         p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6345         p->min_ce_len = cpu_to_le16(0x0000);
6346         p->max_ce_len = cpu_to_le16(0x0000);
6347 }
6348
6349 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6350                                        struct hci_conn *conn, u8 own_addr_type)
6351 {
6352         struct hci_cp_le_ext_create_conn *cp;
6353         struct hci_cp_le_ext_conn_param *p;
6354         u8 data[sizeof(*cp) + sizeof(*p) * 3];
6355         u32 plen;
6356
6357         cp = (void *)data;
6358         p = (void *)cp->data;
6359
6360         memset(cp, 0, sizeof(*cp));
6361
6362         bacpy(&cp->peer_addr, &conn->dst);
6363         cp->peer_addr_type = conn->dst_type;
6364         cp->own_addr_type = own_addr_type;
6365
6366         plen = sizeof(*cp);
6367
6368         if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6369                               conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6370                 cp->phys |= LE_SCAN_PHY_1M;
6371                 set_ext_conn_params(conn, p);
6372
6373                 p++;
6374                 plen += sizeof(*p);
6375         }
6376
6377         if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6378                               conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6379                 cp->phys |= LE_SCAN_PHY_2M;
6380                 set_ext_conn_params(conn, p);
6381
6382                 p++;
6383                 plen += sizeof(*p);
6384         }
6385
6386         if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6387                                  conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6388                 cp->phys |= LE_SCAN_PHY_CODED;
6389                 set_ext_conn_params(conn, p);
6390
6391                 plen += sizeof(*p);
6392         }
6393
6394         return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6395                                         plen, data,
6396                                         HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6397                                         conn->conn_timeout, NULL);
6398 }
6399
6400 static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6401 {
6402         struct hci_cp_le_create_conn cp;
6403         struct hci_conn_params *params;
6404         u8 own_addr_type;
6405         int err;
6406         struct hci_conn *conn = data;
6407
6408         if (!hci_conn_valid(hdev, conn))
6409                 return -ECANCELED;
6410
6411         bt_dev_dbg(hdev, "conn %p", conn);
6412
6413         clear_bit(HCI_CONN_SCANNING, &conn->flags);
6414         conn->state = BT_CONNECT;
6415
6416         /* If requested to connect as peripheral use directed advertising */
6417         if (conn->role == HCI_ROLE_SLAVE) {
6418                 /* If we're active scanning and simultaneous roles is not
6419                  * enabled simply reject the attempt.
6420                  */
6421                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6422                     hdev->le_scan_type == LE_SCAN_ACTIVE &&
6423                     !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6424                         hci_conn_del(conn);
6425                         return -EBUSY;
6426                 }
6427
6428                 /* Pause advertising while doing directed advertising. */
6429                 hci_pause_advertising_sync(hdev);
6430
6431                 err = hci_le_directed_advertising_sync(hdev, conn);
6432                 goto done;
6433         }
6434
6435         /* Disable advertising if simultaneous roles is not in use. */
6436         if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6437                 hci_pause_advertising_sync(hdev);
6438
6439         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6440         if (params) {
6441                 conn->le_conn_min_interval = params->conn_min_interval;
6442                 conn->le_conn_max_interval = params->conn_max_interval;
6443                 conn->le_conn_latency = params->conn_latency;
6444                 conn->le_supv_timeout = params->supervision_timeout;
6445         } else {
6446                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
6447                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
6448                 conn->le_conn_latency = hdev->le_conn_latency;
6449                 conn->le_supv_timeout = hdev->le_supv_timeout;
6450         }
6451
6452         /* If controller is scanning, we stop it since some controllers are
6453          * not able to scan and connect at the same time. Also set the
6454          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6455          * handler for scan disabling knows to set the correct discovery
6456          * state.
6457          */
6458         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6459                 hci_scan_disable_sync(hdev);
6460                 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6461         }
6462
6463         /* Update random address, but set require_privacy to false so
6464          * that we never connect with an non-resolvable address.
6465          */
6466         err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6467                                              &own_addr_type);
6468         if (err)
6469                 goto done;
6470
6471         if (use_ext_conn(hdev)) {
6472                 err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6473                 goto done;
6474         }
6475
6476         memset(&cp, 0, sizeof(cp));
6477
6478         cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6479         cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6480
6481         bacpy(&cp.peer_addr, &conn->dst);
6482         cp.peer_addr_type = conn->dst_type;
6483         cp.own_address_type = own_addr_type;
6484         cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6485         cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6486         cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6487         cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6488         cp.min_ce_len = cpu_to_le16(0x0000);
6489         cp.max_ce_len = cpu_to_le16(0x0000);
6490
6491         /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6492          *
6493          * If this event is unmasked and the HCI_LE_Connection_Complete event
6494          * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6495          * sent when a new connection has been created.
6496          */
6497         err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6498                                        sizeof(cp), &cp,
6499                                        use_enhanced_conn_complete(hdev) ?
6500                                        HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6501                                        HCI_EV_LE_CONN_COMPLETE,
6502                                        conn->conn_timeout, NULL);
6503
6504 done:
6505         if (err == -ETIMEDOUT)
6506                 hci_le_connect_cancel_sync(hdev, conn, 0x00);
6507
6508         /* Re-enable advertising after the connection attempt is finished. */
6509         hci_resume_advertising_sync(hdev);
6510         return err;
6511 }
6512
6513 int hci_le_create_cis_sync(struct hci_dev *hdev)
6514 {
6515         DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6516         size_t aux_num_cis = 0;
6517         struct hci_conn *conn;
6518         u8 cig = BT_ISO_QOS_CIG_UNSET;
6519
6520         /* The spec allows only one pending LE Create CIS command at a time. If
6521          * the command is pending now, don't do anything. We check for pending
6522          * connections after each CIS Established event.
6523          *
6524          * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6525          * page 2566:
6526          *
6527          * If the Host issues this command before all the
6528          * HCI_LE_CIS_Established events from the previous use of the
6529          * command have been generated, the Controller shall return the
6530          * error code Command Disallowed (0x0C).
6531          *
6532          * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6533          * page 2567:
6534          *
6535          * When the Controller receives the HCI_LE_Create_CIS command, the
6536          * Controller sends the HCI_Command_Status event to the Host. An
6537          * HCI_LE_CIS_Established event will be generated for each CIS when it
6538          * is established or if it is disconnected or considered lost before
6539          * being established; until all the events are generated, the command
6540          * remains pending.
6541          */
6542
6543         hci_dev_lock(hdev);
6544
6545         rcu_read_lock();
6546
6547         /* Wait until previous Create CIS has completed */
6548         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6549                 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6550                         goto done;
6551         }
6552
6553         /* Find CIG with all CIS ready */
6554         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6555                 struct hci_conn *link;
6556
6557                 if (hci_conn_check_create_cis(conn))
6558                         continue;
6559
6560                 cig = conn->iso_qos.ucast.cig;
6561
6562                 list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6563                         if (hci_conn_check_create_cis(link) > 0 &&
6564                             link->iso_qos.ucast.cig == cig &&
6565                             link->state != BT_CONNECTED) {
6566                                 cig = BT_ISO_QOS_CIG_UNSET;
6567                                 break;
6568                         }
6569                 }
6570
6571                 if (cig != BT_ISO_QOS_CIG_UNSET)
6572                         break;
6573         }
6574
6575         if (cig == BT_ISO_QOS_CIG_UNSET)
6576                 goto done;
6577
6578         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6579                 struct hci_cis *cis = &cmd->cis[aux_num_cis];
6580
6581                 if (hci_conn_check_create_cis(conn) ||
6582                     conn->iso_qos.ucast.cig != cig)
6583                         continue;
6584
6585                 set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6586                 cis->acl_handle = cpu_to_le16(conn->parent->handle);
6587                 cis->cis_handle = cpu_to_le16(conn->handle);
6588                 aux_num_cis++;
6589
6590                 if (aux_num_cis >= cmd->num_cis)
6591                         break;
6592         }
6593         cmd->num_cis = aux_num_cis;
6594
6595 done:
6596         rcu_read_unlock();
6597
6598         hci_dev_unlock(hdev);
6599
6600         if (!aux_num_cis)
6601                 return 0;
6602
6603         /* Wait for HCI_LE_CIS_Established */
6604         return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6605                                         struct_size(cmd, cis, cmd->num_cis),
6606                                         cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6607                                         conn->conn_timeout, NULL);
6608 }
6609
6610 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6611 {
6612         struct hci_cp_le_remove_cig cp;
6613
6614         memset(&cp, 0, sizeof(cp));
6615         cp.cig_id = handle;
6616
6617         return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6618                                      &cp, HCI_CMD_TIMEOUT);
6619 }
6620
6621 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6622 {
6623         struct hci_cp_le_big_term_sync cp;
6624
6625         memset(&cp, 0, sizeof(cp));
6626         cp.handle = handle;
6627
6628         return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6629                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6630 }
6631
6632 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6633 {
6634         struct hci_cp_le_pa_term_sync cp;
6635
6636         memset(&cp, 0, sizeof(cp));
6637         cp.handle = cpu_to_le16(handle);
6638
6639         return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6640                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6641 }
6642
6643 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6644                            bool use_rpa, struct adv_info *adv_instance,
6645                            u8 *own_addr_type, bdaddr_t *rand_addr)
6646 {
6647         int err;
6648
6649         bacpy(rand_addr, BDADDR_ANY);
6650
6651         /* If privacy is enabled use a resolvable private address. If
6652          * current RPA has expired then generate a new one.
6653          */
6654         if (use_rpa) {
6655                 /* If Controller supports LL Privacy use own address type is
6656                  * 0x03
6657                  */
6658                 if (use_ll_privacy(hdev))
6659                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6660                 else
6661                         *own_addr_type = ADDR_LE_DEV_RANDOM;
6662
6663                 if (adv_instance) {
6664                         if (adv_rpa_valid(adv_instance))
6665                                 return 0;
6666                 } else {
6667                         if (rpa_valid(hdev))
6668                                 return 0;
6669                 }
6670
6671                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6672                 if (err < 0) {
6673                         bt_dev_err(hdev, "failed to generate new RPA");
6674                         return err;
6675                 }
6676
6677                 bacpy(rand_addr, &hdev->rpa);
6678
6679                 return 0;
6680         }
6681
6682         /* In case of required privacy without resolvable private address,
6683          * use an non-resolvable private address. This is useful for
6684          * non-connectable advertising.
6685          */
6686         if (require_privacy) {
6687                 bdaddr_t nrpa;
6688
6689                 while (true) {
6690                         /* The non-resolvable private address is generated
6691                          * from random six bytes with the two most significant
6692                          * bits cleared.
6693                          */
6694                         get_random_bytes(&nrpa, 6);
6695                         nrpa.b[5] &= 0x3f;
6696
6697                         /* The non-resolvable private address shall not be
6698                          * equal to the public address.
6699                          */
6700                         if (bacmp(&hdev->bdaddr, &nrpa))
6701                                 break;
6702                 }
6703
6704                 *own_addr_type = ADDR_LE_DEV_RANDOM;
6705                 bacpy(rand_addr, &nrpa);
6706
6707                 return 0;
6708         }
6709
6710         /* No privacy so use a public address. */
6711         *own_addr_type = ADDR_LE_DEV_PUBLIC;
6712
6713         return 0;
6714 }
6715
6716 static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6717 {
6718         u8 instance = PTR_UINT(data);
6719
6720         return hci_update_adv_data_sync(hdev, instance);
6721 }
6722
6723 int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6724 {
6725         return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6726                                   UINT_PTR(instance), NULL);
6727 }
6728
6729 static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6730 {
6731         struct hci_conn *conn = data;
6732         struct inquiry_entry *ie;
6733         struct hci_cp_create_conn cp;
6734         int err;
6735
6736         if (!hci_conn_valid(hdev, conn))
6737                 return -ECANCELED;
6738
6739         /* Many controllers disallow HCI Create Connection while it is doing
6740          * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6741          * Connection. This may cause the MGMT discovering state to become false
6742          * without user space's request but it is okay since the MGMT Discovery
6743          * APIs do not promise that discovery should be done forever. Instead,
6744          * the user space monitors the status of MGMT discovering and it may
6745          * request for discovery again when this flag becomes false.
6746          */
6747         if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6748                 err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6749                                             NULL, HCI_CMD_TIMEOUT);
6750                 if (err)
6751                         bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6752         }
6753
6754         conn->state = BT_CONNECT;
6755         conn->out = true;
6756         conn->role = HCI_ROLE_MASTER;
6757
6758         conn->attempt++;
6759
6760         conn->link_policy = hdev->link_policy;
6761
6762         memset(&cp, 0, sizeof(cp));
6763         bacpy(&cp.bdaddr, &conn->dst);
6764         cp.pscan_rep_mode = 0x02;
6765
6766         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6767         if (ie) {
6768                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6769                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6770                         cp.pscan_mode     = ie->data.pscan_mode;
6771                         cp.clock_offset   = ie->data.clock_offset |
6772                                             cpu_to_le16(0x8000);
6773                 }
6774
6775                 memcpy(conn->dev_class, ie->data.dev_class, 3);
6776         }
6777
6778         cp.pkt_type = cpu_to_le16(conn->pkt_type);
6779         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6780                 cp.role_switch = 0x01;
6781         else
6782                 cp.role_switch = 0x00;
6783
6784         return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6785                                         sizeof(cp), &cp,
6786                                         HCI_EV_CONN_COMPLETE,
6787                                         conn->conn_timeout, NULL);
6788 }
6789
6790 int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6791 {
6792         return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6793                                        NULL);
6794 }
6795
6796 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6797 {
6798         struct hci_conn *conn = data;
6799
6800         bt_dev_dbg(hdev, "err %d", err);
6801
6802         if (err == -ECANCELED)
6803                 return;
6804
6805         hci_dev_lock(hdev);
6806
6807         if (!hci_conn_valid(hdev, conn))
6808                 goto done;
6809
6810         if (!err) {
6811                 hci_connect_le_scan_cleanup(conn, 0x00);
6812                 goto done;
6813         }
6814
6815         /* Check if connection is still pending */
6816         if (conn != hci_lookup_le_connect(hdev))
6817                 goto done;
6818
6819         /* Flush to make sure we send create conn cancel command if needed */
6820         flush_delayed_work(&conn->le_conn_timeout);
6821         hci_conn_failed(conn, bt_status(err));
6822
6823 done:
6824         hci_dev_unlock(hdev);
6825 }
6826
6827 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6828 {
6829         return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6830                                        create_le_conn_complete);
6831 }
6832
6833 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6834 {
6835         if (conn->state != BT_OPEN)
6836                 return -EINVAL;
6837
6838         switch (conn->type) {
6839         case ACL_LINK:
6840                 return !hci_cmd_sync_dequeue_once(hdev,
6841                                                   hci_acl_create_conn_sync,
6842                                                   conn, NULL);
6843         case LE_LINK:
6844                 return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6845                                                   conn, create_le_conn_complete);
6846         }
6847
6848         return -ENOENT;
6849 }
6850
6851 int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
6852                             struct hci_conn_params *params)
6853 {
6854         struct hci_cp_le_conn_update cp;
6855
6856         memset(&cp, 0, sizeof(cp));
6857         cp.handle               = cpu_to_le16(conn->handle);
6858         cp.conn_interval_min    = cpu_to_le16(params->conn_min_interval);
6859         cp.conn_interval_max    = cpu_to_le16(params->conn_max_interval);
6860         cp.conn_latency         = cpu_to_le16(params->conn_latency);
6861         cp.supervision_timeout  = cpu_to_le16(params->supervision_timeout);
6862         cp.min_ce_len           = cpu_to_le16(0x0000);
6863         cp.max_ce_len           = cpu_to_le16(0x0000);
6864
6865         return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
6866                                      sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6867 }
This page took 0.437946 seconds and 4 git commands to generate.