]> Git Repo - linux.git/blob - net/bluetooth/hci_request.c
net: dpaa: Adjust queue depth on rate change
[linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37         skb_queue_head_init(&req->cmd_q);
38         req->hdev = hdev;
39         req->err = 0;
40 }
41
42 void hci_req_purge(struct hci_request *req)
43 {
44         skb_queue_purge(&req->cmd_q);
45 }
46
47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49         return hdev->req_status == HCI_REQ_PEND;
50 }
51
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53                    hci_req_complete_skb_t complete_skb)
54 {
55         struct hci_dev *hdev = req->hdev;
56         struct sk_buff *skb;
57         unsigned long flags;
58
59         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60
61         /* If an error occurred during request building, remove all HCI
62          * commands queued on the HCI request queue.
63          */
64         if (req->err) {
65                 skb_queue_purge(&req->cmd_q);
66                 return req->err;
67         }
68
69         /* Do not allow empty requests */
70         if (skb_queue_empty(&req->cmd_q))
71                 return -ENODATA;
72
73         skb = skb_peek_tail(&req->cmd_q);
74         if (complete) {
75                 bt_cb(skb)->hci.req_complete = complete;
76         } else if (complete_skb) {
77                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79         }
80
81         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84
85         queue_work(hdev->workqueue, &hdev->cmd_work);
86
87         return 0;
88 }
89
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92         return req_run(req, complete, NULL);
93 }
94
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97         return req_run(req, NULL, complete);
98 }
99
100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101                            struct sk_buff *skb)
102 {
103         bt_dev_dbg(hdev, "result 0x%2.2x", result);
104
105         if (hdev->req_status == HCI_REQ_PEND) {
106                 hdev->req_result = result;
107                 hdev->req_status = HCI_REQ_DONE;
108                 if (skb)
109                         hdev->req_skb = skb_get(skb);
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 /* Execute request and wait for completion. */
115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
116                                                      unsigned long opt),
117                    unsigned long opt, u32 timeout, u8 *hci_status)
118 {
119         struct hci_request req;
120         int err = 0;
121
122         bt_dev_dbg(hdev, "start");
123
124         hci_req_init(&req, hdev);
125
126         hdev->req_status = HCI_REQ_PEND;
127
128         err = func(&req, opt);
129         if (err) {
130                 if (hci_status)
131                         *hci_status = HCI_ERROR_UNSPECIFIED;
132                 return err;
133         }
134
135         err = hci_req_run_skb(&req, hci_req_sync_complete);
136         if (err < 0) {
137                 hdev->req_status = 0;
138
139                 /* ENODATA means the HCI request command queue is empty.
140                  * This can happen when a request with conditionals doesn't
141                  * trigger any commands to be sent. This is normal behavior
142                  * and should not trigger an error return.
143                  */
144                 if (err == -ENODATA) {
145                         if (hci_status)
146                                 *hci_status = 0;
147                         return 0;
148                 }
149
150                 if (hci_status)
151                         *hci_status = HCI_ERROR_UNSPECIFIED;
152
153                 return err;
154         }
155
156         err = wait_event_interruptible_timeout(hdev->req_wait_q,
157                         hdev->req_status != HCI_REQ_PEND, timeout);
158
159         if (err == -ERESTARTSYS)
160                 return -EINTR;
161
162         switch (hdev->req_status) {
163         case HCI_REQ_DONE:
164                 err = -bt_to_errno(hdev->req_result);
165                 if (hci_status)
166                         *hci_status = hdev->req_result;
167                 break;
168
169         case HCI_REQ_CANCELED:
170                 err = -hdev->req_result;
171                 if (hci_status)
172                         *hci_status = HCI_ERROR_UNSPECIFIED;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 if (hci_status)
178                         *hci_status = HCI_ERROR_UNSPECIFIED;
179                 break;
180         }
181
182         kfree_skb(hdev->req_skb);
183         hdev->req_skb = NULL;
184         hdev->req_status = hdev->req_result = 0;
185
186         bt_dev_dbg(hdev, "end: err %d", err);
187
188         return err;
189 }
190
191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
192                                                   unsigned long opt),
193                  unsigned long opt, u32 timeout, u8 *hci_status)
194 {
195         int ret;
196
197         /* Serialize all requests */
198         hci_req_sync_lock(hdev);
199         /* check the state after obtaing the lock to protect the HCI_UP
200          * against any races from hci_dev_do_close when the controller
201          * gets removed.
202          */
203         if (test_bit(HCI_UP, &hdev->flags))
204                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
205         else
206                 ret = -ENETDOWN;
207         hci_req_sync_unlock(hdev);
208
209         return ret;
210 }
211
212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
213                                 const void *param)
214 {
215         int len = HCI_COMMAND_HDR_SIZE + plen;
216         struct hci_command_hdr *hdr;
217         struct sk_buff *skb;
218
219         skb = bt_skb_alloc(len, GFP_ATOMIC);
220         if (!skb)
221                 return NULL;
222
223         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
224         hdr->opcode = cpu_to_le16(opcode);
225         hdr->plen   = plen;
226
227         if (plen)
228                 skb_put_data(skb, param, plen);
229
230         bt_dev_dbg(hdev, "skb len %d", skb->len);
231
232         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233         hci_skb_opcode(skb) = opcode;
234
235         return skb;
236 }
237
238 /* Queue a command to an asynchronous HCI request */
239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240                     const void *param, u8 event)
241 {
242         struct hci_dev *hdev = req->hdev;
243         struct sk_buff *skb;
244
245         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
246
247         /* If an error occurred during request building, there is no point in
248          * queueing the HCI command. We can simply return.
249          */
250         if (req->err)
251                 return;
252
253         skb = hci_prepare_cmd(hdev, opcode, plen, param);
254         if (!skb) {
255                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
256                            opcode);
257                 req->err = -ENOMEM;
258                 return;
259         }
260
261         if (skb_queue_empty(&req->cmd_q))
262                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
263
264         hci_skb_event(skb) = event;
265
266         skb_queue_tail(&req->cmd_q, skb);
267 }
268
269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
270                  const void *param)
271 {
272         hci_req_add_ev(req, opcode, plen, param, 0);
273 }
274
275 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
276 {
277         struct hci_dev *hdev = req->hdev;
278         struct hci_cp_write_page_scan_activity acp;
279         u8 type;
280
281         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
282                 return;
283
284         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
285                 return;
286
287         if (enable) {
288                 type = PAGE_SCAN_TYPE_INTERLACED;
289
290                 /* 160 msec page scan interval */
291                 acp.interval = cpu_to_le16(0x0100);
292         } else {
293                 type = hdev->def_page_scan_type;
294                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
295         }
296
297         acp.window = cpu_to_le16(hdev->def_page_scan_window);
298
299         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
300             __cpu_to_le16(hdev->page_scan_window) != acp.window)
301                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
302                             sizeof(acp), &acp);
303
304         if (hdev->page_scan_type != type)
305                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
306 }
307
308 static void start_interleave_scan(struct hci_dev *hdev)
309 {
310         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
311         queue_delayed_work(hdev->req_workqueue,
312                            &hdev->interleave_scan, 0);
313 }
314
315 static bool is_interleave_scanning(struct hci_dev *hdev)
316 {
317         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
318 }
319
320 static void cancel_interleave_scan(struct hci_dev *hdev)
321 {
322         bt_dev_dbg(hdev, "cancelling interleave scan");
323
324         cancel_delayed_work_sync(&hdev->interleave_scan);
325
326         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
327 }
328
329 /* Return true if interleave_scan wasn't started until exiting this function,
330  * otherwise, return false
331  */
332 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
333 {
334         /* Do interleaved scan only if all of the following are true:
335          * - There is at least one ADV monitor
336          * - At least one pending LE connection or one device to be scanned for
337          * - Monitor offloading is not supported
338          * If so, we should alternate between allowlist scan and one without
339          * any filters to save power.
340          */
341         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
342                                 !(list_empty(&hdev->pend_le_conns) &&
343                                   list_empty(&hdev->pend_le_reports)) &&
344                                 hci_get_adv_monitor_offload_ext(hdev) ==
345                                     HCI_ADV_MONITOR_EXT_NONE;
346         bool is_interleaving = is_interleave_scanning(hdev);
347
348         if (use_interleaving && !is_interleaving) {
349                 start_interleave_scan(hdev);
350                 bt_dev_dbg(hdev, "starting interleave scan");
351                 return true;
352         }
353
354         if (!use_interleaving && is_interleaving)
355                 cancel_interleave_scan(hdev);
356
357         return false;
358 }
359
360 void __hci_req_update_name(struct hci_request *req)
361 {
362         struct hci_dev *hdev = req->hdev;
363         struct hci_cp_write_local_name cp;
364
365         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
366
367         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
368 }
369
370 void __hci_req_update_eir(struct hci_request *req)
371 {
372         struct hci_dev *hdev = req->hdev;
373         struct hci_cp_write_eir cp;
374
375         if (!hdev_is_powered(hdev))
376                 return;
377
378         if (!lmp_ext_inq_capable(hdev))
379                 return;
380
381         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
382                 return;
383
384         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
385                 return;
386
387         memset(&cp, 0, sizeof(cp));
388
389         eir_create(hdev, cp.data);
390
391         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
392                 return;
393
394         memcpy(hdev->eir, cp.data, sizeof(cp.data));
395
396         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
397 }
398
399 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
400 {
401         struct hci_dev *hdev = req->hdev;
402
403         if (hdev->scanning_paused) {
404                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
405                 return;
406         }
407
408         if (use_ext_scan(hdev)) {
409                 struct hci_cp_le_set_ext_scan_enable cp;
410
411                 memset(&cp, 0, sizeof(cp));
412                 cp.enable = LE_SCAN_DISABLE;
413                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
414                             &cp);
415         } else {
416                 struct hci_cp_le_set_scan_enable cp;
417
418                 memset(&cp, 0, sizeof(cp));
419                 cp.enable = LE_SCAN_DISABLE;
420                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
421         }
422
423         /* Disable address resolution */
424         if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
425                 __u8 enable = 0x00;
426
427                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
428         }
429 }
430
431 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
432                                  u8 bdaddr_type)
433 {
434         struct hci_cp_le_del_from_accept_list cp;
435
436         cp.bdaddr_type = bdaddr_type;
437         bacpy(&cp.bdaddr, bdaddr);
438
439         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
440                    cp.bdaddr_type);
441         hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
442
443         if (use_ll_privacy(req->hdev)) {
444                 struct smp_irk *irk;
445
446                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
447                 if (irk) {
448                         struct hci_cp_le_del_from_resolv_list cp;
449
450                         cp.bdaddr_type = bdaddr_type;
451                         bacpy(&cp.bdaddr, bdaddr);
452
453                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
454                                     sizeof(cp), &cp);
455                 }
456         }
457 }
458
459 /* Adds connection to accept list if needed. On error, returns -1. */
460 static int add_to_accept_list(struct hci_request *req,
461                               struct hci_conn_params *params, u8 *num_entries,
462                               bool allow_rpa)
463 {
464         struct hci_cp_le_add_to_accept_list cp;
465         struct hci_dev *hdev = req->hdev;
466
467         /* Already in accept list */
468         if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
469                                    params->addr_type))
470                 return 0;
471
472         /* Select filter policy to accept all advertising */
473         if (*num_entries >= hdev->le_accept_list_size)
474                 return -1;
475
476         /* Accept list can not be used with RPAs */
477         if (!allow_rpa &&
478             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
479             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
480                 return -1;
481         }
482
483         /* During suspend, only wakeable devices can be in accept list */
484         if (hdev->suspended &&
485             !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
486                 return 0;
487
488         *num_entries += 1;
489         cp.bdaddr_type = params->addr_type;
490         bacpy(&cp.bdaddr, &params->addr);
491
492         bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
493                    cp.bdaddr_type);
494         hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
495
496         if (use_ll_privacy(hdev)) {
497                 struct smp_irk *irk;
498
499                 irk = hci_find_irk_by_addr(hdev, &params->addr,
500                                            params->addr_type);
501                 if (irk) {
502                         struct hci_cp_le_add_to_resolv_list cp;
503
504                         cp.bdaddr_type = params->addr_type;
505                         bacpy(&cp.bdaddr, &params->addr);
506                         memcpy(cp.peer_irk, irk->val, 16);
507
508                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
509                                 memcpy(cp.local_irk, hdev->irk, 16);
510                         else
511                                 memset(cp.local_irk, 0, 16);
512
513                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
514                                     sizeof(cp), &cp);
515                 }
516         }
517
518         return 0;
519 }
520
521 static u8 update_accept_list(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_conn_params *params;
525         struct bdaddr_list *b;
526         u8 num_entries = 0;
527         bool pend_conn, pend_report;
528         /* We allow usage of accept list even with RPAs in suspend. In the worst
529          * case, we won't be able to wake from devices that use the privacy1.2
530          * features. Additionally, once we support privacy1.2 and IRK
531          * offloading, we can update this to also check for those conditions.
532          */
533         bool allow_rpa = hdev->suspended;
534
535         if (use_ll_privacy(hdev))
536                 allow_rpa = true;
537
538         /* Go through the current accept list programmed into the
539          * controller one by one and check if that address is still
540          * in the list of pending connections or list of devices to
541          * report. If not present in either list, then queue the
542          * command to remove it from the controller.
543          */
544         list_for_each_entry(b, &hdev->le_accept_list, list) {
545                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
546                                                       &b->bdaddr,
547                                                       b->bdaddr_type);
548                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
549                                                         &b->bdaddr,
550                                                         b->bdaddr_type);
551
552                 /* If the device is not likely to connect or report,
553                  * remove it from the accept list.
554                  */
555                 if (!pend_conn && !pend_report) {
556                         del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
557                         continue;
558                 }
559
560                 /* Accept list can not be used with RPAs */
561                 if (!allow_rpa &&
562                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
563                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
564                         return 0x00;
565                 }
566
567                 num_entries++;
568         }
569
570         /* Since all no longer valid accept list entries have been
571          * removed, walk through the list of pending connections
572          * and ensure that any new device gets programmed into
573          * the controller.
574          *
575          * If the list of the devices is larger than the list of
576          * available accept list entries in the controller, then
577          * just abort and return filer policy value to not use the
578          * accept list.
579          */
580         list_for_each_entry(params, &hdev->pend_le_conns, action) {
581                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
582                         return 0x00;
583         }
584
585         /* After adding all new pending connections, walk through
586          * the list of pending reports and also add these to the
587          * accept list if there is still space. Abort if space runs out.
588          */
589         list_for_each_entry(params, &hdev->pend_le_reports, action) {
590                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
591                         return 0x00;
592         }
593
594         /* Use the allowlist unless the following conditions are all true:
595          * - We are not currently suspending
596          * - There are 1 or more ADV monitors registered and it's not offloaded
597          * - Interleaved scanning is not currently using the allowlist
598          */
599         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
600             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
601             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
602                 return 0x00;
603
604         /* Select filter policy to use accept list */
605         return 0x01;
606 }
607
608 static bool scan_use_rpa(struct hci_dev *hdev)
609 {
610         return hci_dev_test_flag(hdev, HCI_PRIVACY);
611 }
612
613 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
614                                u16 window, u8 own_addr_type, u8 filter_policy,
615                                bool filter_dup, bool addr_resolv)
616 {
617         struct hci_dev *hdev = req->hdev;
618
619         if (hdev->scanning_paused) {
620                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
621                 return;
622         }
623
624         if (use_ll_privacy(hdev) && addr_resolv) {
625                 u8 enable = 0x01;
626
627                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
628         }
629
630         /* Use ext scanning if set ext scan param and ext scan enable is
631          * supported
632          */
633         if (use_ext_scan(hdev)) {
634                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
635                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
636                 struct hci_cp_le_scan_phy_params *phy_params;
637                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
638                 u32 plen;
639
640                 ext_param_cp = (void *)data;
641                 phy_params = (void *)ext_param_cp->data;
642
643                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
644                 ext_param_cp->own_addr_type = own_addr_type;
645                 ext_param_cp->filter_policy = filter_policy;
646
647                 plen = sizeof(*ext_param_cp);
648
649                 if (scan_1m(hdev) || scan_2m(hdev)) {
650                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
651
652                         memset(phy_params, 0, sizeof(*phy_params));
653                         phy_params->type = type;
654                         phy_params->interval = cpu_to_le16(interval);
655                         phy_params->window = cpu_to_le16(window);
656
657                         plen += sizeof(*phy_params);
658                         phy_params++;
659                 }
660
661                 if (scan_coded(hdev)) {
662                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
663
664                         memset(phy_params, 0, sizeof(*phy_params));
665                         phy_params->type = type;
666                         phy_params->interval = cpu_to_le16(interval);
667                         phy_params->window = cpu_to_le16(window);
668
669                         plen += sizeof(*phy_params);
670                         phy_params++;
671                 }
672
673                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
674                             plen, ext_param_cp);
675
676                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
677                 ext_enable_cp.enable = LE_SCAN_ENABLE;
678                 ext_enable_cp.filter_dup = filter_dup;
679
680                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
681                             sizeof(ext_enable_cp), &ext_enable_cp);
682         } else {
683                 struct hci_cp_le_set_scan_param param_cp;
684                 struct hci_cp_le_set_scan_enable enable_cp;
685
686                 memset(&param_cp, 0, sizeof(param_cp));
687                 param_cp.type = type;
688                 param_cp.interval = cpu_to_le16(interval);
689                 param_cp.window = cpu_to_le16(window);
690                 param_cp.own_address_type = own_addr_type;
691                 param_cp.filter_policy = filter_policy;
692                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
693                             &param_cp);
694
695                 memset(&enable_cp, 0, sizeof(enable_cp));
696                 enable_cp.enable = LE_SCAN_ENABLE;
697                 enable_cp.filter_dup = filter_dup;
698                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
699                             &enable_cp);
700         }
701 }
702
703 /* Returns true if an le connection is in the scanning state */
704 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
705 {
706         struct hci_conn_hash *h = &hdev->conn_hash;
707         struct hci_conn  *c;
708
709         rcu_read_lock();
710
711         list_for_each_entry_rcu(c, &h->list, list) {
712                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
713                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
714                         rcu_read_unlock();
715                         return true;
716                 }
717         }
718
719         rcu_read_unlock();
720
721         return false;
722 }
723
724 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
725  * controller based address resolution to be able to reconfigure
726  * resolving list.
727  */
728 void hci_req_add_le_passive_scan(struct hci_request *req)
729 {
730         struct hci_dev *hdev = req->hdev;
731         u8 own_addr_type;
732         u8 filter_policy;
733         u16 window, interval;
734         /* Default is to enable duplicates filter */
735         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
736         /* Background scanning should run with address resolution */
737         bool addr_resolv = true;
738
739         if (hdev->scanning_paused) {
740                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
741                 return;
742         }
743
744         /* Set require_privacy to false since no SCAN_REQ are send
745          * during passive scanning. Not using an non-resolvable address
746          * here is important so that peer devices using direct
747          * advertising with our address will be correctly reported
748          * by the controller.
749          */
750         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
751                                       &own_addr_type))
752                 return;
753
754         if (hdev->enable_advmon_interleave_scan &&
755             __hci_update_interleaved_scan(hdev))
756                 return;
757
758         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
759         /* Adding or removing entries from the accept list must
760          * happen before enabling scanning. The controller does
761          * not allow accept list modification while scanning.
762          */
763         filter_policy = update_accept_list(req);
764
765         /* When the controller is using random resolvable addresses and
766          * with that having LE privacy enabled, then controllers with
767          * Extended Scanner Filter Policies support can now enable support
768          * for handling directed advertising.
769          *
770          * So instead of using filter polices 0x00 (no accept list)
771          * and 0x01 (accept list enabled) use the new filter policies
772          * 0x02 (no accept list) and 0x03 (accept list enabled).
773          */
774         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
775             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776                 filter_policy |= 0x02;
777
778         if (hdev->suspended) {
779                 window = hdev->le_scan_window_suspend;
780                 interval = hdev->le_scan_int_suspend;
781         } else if (hci_is_le_conn_scanning(hdev)) {
782                 window = hdev->le_scan_window_connect;
783                 interval = hdev->le_scan_int_connect;
784         } else if (hci_is_adv_monitoring(hdev)) {
785                 window = hdev->le_scan_window_adv_monitor;
786                 interval = hdev->le_scan_int_adv_monitor;
787
788                 /* Disable duplicates filter when scanning for advertisement
789                  * monitor for the following reasons.
790                  *
791                  * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792                  * controllers ignore RSSI_Sampling_Period when the duplicates
793                  * filter is enabled.
794                  *
795                  * For SW pattern filtering, when we're not doing interleaved
796                  * scanning, it is necessary to disable duplicates filter,
797                  * otherwise hosts can only receive one advertisement and it's
798                  * impossible to know if a peer is still in range.
799                  */
800                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
801         } else {
802                 window = hdev->le_scan_window;
803                 interval = hdev->le_scan_interval;
804         }
805
806         bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
807                    filter_policy);
808         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
809                            own_addr_type, filter_policy, filter_dup,
810                            addr_resolv);
811 }
812
813 static void cancel_adv_timeout(struct hci_dev *hdev)
814 {
815         if (hdev->adv_instance_timeout) {
816                 hdev->adv_instance_timeout = 0;
817                 cancel_delayed_work(&hdev->adv_instance_expire);
818         }
819 }
820
821 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
822 {
823         return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
824 }
825
826 void __hci_req_disable_advertising(struct hci_request *req)
827 {
828         if (ext_adv_capable(req->hdev)) {
829                 __hci_req_disable_ext_adv_instance(req, 0x00);
830         } else {
831                 u8 enable = 0x00;
832
833                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
834         }
835 }
836
837 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
838 {
839         /* If privacy is not enabled don't use RPA */
840         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
841                 return false;
842
843         /* If basic privacy mode is enabled use RPA */
844         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
845                 return true;
846
847         /* If limited privacy mode is enabled don't use RPA if we're
848          * both discoverable and bondable.
849          */
850         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
851             hci_dev_test_flag(hdev, HCI_BONDABLE))
852                 return false;
853
854         /* We're neither bondable nor discoverable in the limited
855          * privacy mode, therefore use RPA.
856          */
857         return true;
858 }
859
860 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
861 {
862         /* If there is no connection we are OK to advertise. */
863         if (hci_conn_num(hdev, LE_LINK) == 0)
864                 return true;
865
866         /* Check le_states if there is any connection in peripheral role. */
867         if (hdev->conn_hash.le_num_peripheral > 0) {
868                 /* Peripheral connection state and non connectable mode bit 20.
869                  */
870                 if (!connectable && !(hdev->le_states[2] & 0x10))
871                         return false;
872
873                 /* Peripheral connection state and connectable mode bit 38
874                  * and scannable bit 21.
875                  */
876                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
877                                     !(hdev->le_states[2] & 0x20)))
878                         return false;
879         }
880
881         /* Check le_states if there is any connection in central role. */
882         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
883                 /* Central connection state and non connectable mode bit 18. */
884                 if (!connectable && !(hdev->le_states[2] & 0x02))
885                         return false;
886
887                 /* Central connection state and connectable mode bit 35 and
888                  * scannable 19.
889                  */
890                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
891                                     !(hdev->le_states[2] & 0x08)))
892                         return false;
893         }
894
895         return true;
896 }
897
898 void __hci_req_enable_advertising(struct hci_request *req)
899 {
900         struct hci_dev *hdev = req->hdev;
901         struct adv_info *adv;
902         struct hci_cp_le_set_adv_param cp;
903         u8 own_addr_type, enable = 0x01;
904         bool connectable;
905         u16 adv_min_interval, adv_max_interval;
906         u32 flags;
907
908         flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
909         adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
910
911         /* If the "connectable" instance flag was not set, then choose between
912          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
913          */
914         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
915                       mgmt_get_connectable(hdev);
916
917         if (!is_advertising_allowed(hdev, connectable))
918                 return;
919
920         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
921                 __hci_req_disable_advertising(req);
922
923         /* Clear the HCI_LE_ADV bit temporarily so that the
924          * hci_update_random_address knows that it's safe to go ahead
925          * and write a new random address. The flag will be set back on
926          * as soon as the SET_ADV_ENABLE HCI command completes.
927          */
928         hci_dev_clear_flag(hdev, HCI_LE_ADV);
929
930         /* Set require_privacy to true only when non-connectable
931          * advertising is used. In that case it is fine to use a
932          * non-resolvable private address.
933          */
934         if (hci_update_random_address(req, !connectable,
935                                       adv_use_rpa(hdev, flags),
936                                       &own_addr_type) < 0)
937                 return;
938
939         memset(&cp, 0, sizeof(cp));
940
941         if (adv) {
942                 adv_min_interval = adv->min_interval;
943                 adv_max_interval = adv->max_interval;
944         } else {
945                 adv_min_interval = hdev->le_adv_min_interval;
946                 adv_max_interval = hdev->le_adv_max_interval;
947         }
948
949         if (connectable) {
950                 cp.type = LE_ADV_IND;
951         } else {
952                 if (adv_cur_instance_is_scannable(hdev))
953                         cp.type = LE_ADV_SCAN_IND;
954                 else
955                         cp.type = LE_ADV_NONCONN_IND;
956
957                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
958                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
959                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
960                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
961                 }
962         }
963
964         cp.min_interval = cpu_to_le16(adv_min_interval);
965         cp.max_interval = cpu_to_le16(adv_max_interval);
966         cp.own_address_type = own_addr_type;
967         cp.channel_map = hdev->le_adv_channel_map;
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
970
971         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 }
973
974 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
975 {
976         struct hci_dev *hdev = req->hdev;
977         u8 len;
978
979         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
980                 return;
981
982         if (ext_adv_capable(hdev)) {
983                 struct {
984                         struct hci_cp_le_set_ext_scan_rsp_data cp;
985                         u8 data[HCI_MAX_EXT_AD_LENGTH];
986                 } pdu;
987
988                 memset(&pdu, 0, sizeof(pdu));
989
990                 len = eir_create_scan_rsp(hdev, instance, pdu.data);
991
992                 if (hdev->scan_rsp_data_len == len &&
993                     !memcmp(pdu.data, hdev->scan_rsp_data, len))
994                         return;
995
996                 memcpy(hdev->scan_rsp_data, pdu.data, len);
997                 hdev->scan_rsp_data_len = len;
998
999                 pdu.cp.handle = instance;
1000                 pdu.cp.length = len;
1001                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1002                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1003
1004                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1005                             sizeof(pdu.cp) + len, &pdu.cp);
1006         } else {
1007                 struct hci_cp_le_set_scan_rsp_data cp;
1008
1009                 memset(&cp, 0, sizeof(cp));
1010
1011                 len = eir_create_scan_rsp(hdev, instance, cp.data);
1012
1013                 if (hdev->scan_rsp_data_len == len &&
1014                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1015                         return;
1016
1017                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1018                 hdev->scan_rsp_data_len = len;
1019
1020                 cp.length = len;
1021
1022                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1023         }
1024 }
1025
1026 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1027 {
1028         struct hci_dev *hdev = req->hdev;
1029         u8 len;
1030
1031         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1032                 return;
1033
1034         if (ext_adv_capable(hdev)) {
1035                 struct {
1036                         struct hci_cp_le_set_ext_adv_data cp;
1037                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1038                 } pdu;
1039
1040                 memset(&pdu, 0, sizeof(pdu));
1041
1042                 len = eir_create_adv_data(hdev, instance, pdu.data);
1043
1044                 /* There's nothing to do if the data hasn't changed */
1045                 if (hdev->adv_data_len == len &&
1046                     memcmp(pdu.data, hdev->adv_data, len) == 0)
1047                         return;
1048
1049                 memcpy(hdev->adv_data, pdu.data, len);
1050                 hdev->adv_data_len = len;
1051
1052                 pdu.cp.length = len;
1053                 pdu.cp.handle = instance;
1054                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1055                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1056
1057                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1058                             sizeof(pdu.cp) + len, &pdu.cp);
1059         } else {
1060                 struct hci_cp_le_set_adv_data cp;
1061
1062                 memset(&cp, 0, sizeof(cp));
1063
1064                 len = eir_create_adv_data(hdev, instance, cp.data);
1065
1066                 /* There's nothing to do if the data hasn't changed */
1067                 if (hdev->adv_data_len == len &&
1068                     memcmp(cp.data, hdev->adv_data, len) == 0)
1069                         return;
1070
1071                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1072                 hdev->adv_data_len = len;
1073
1074                 cp.length = len;
1075
1076                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1077         }
1078 }
1079
1080 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1081 {
1082         struct hci_request req;
1083
1084         hci_req_init(&req, hdev);
1085         __hci_req_update_adv_data(&req, instance);
1086
1087         return hci_req_run(&req, NULL);
1088 }
1089
1090 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1091                                             u16 opcode)
1092 {
1093         BT_DBG("%s status %u", hdev->name, status);
1094 }
1095
1096 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1097 {
1098         struct hci_request req;
1099         __u8 enable = 0x00;
1100
1101         if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1102                 return;
1103
1104         hci_req_init(&req, hdev);
1105
1106         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1107
1108         hci_req_run(&req, enable_addr_resolution_complete);
1109 }
1110
1111 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1112 {
1113         bt_dev_dbg(hdev, "status %u", status);
1114 }
1115
1116 void hci_req_reenable_advertising(struct hci_dev *hdev)
1117 {
1118         struct hci_request req;
1119
1120         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1121             list_empty(&hdev->adv_instances))
1122                 return;
1123
1124         hci_req_init(&req, hdev);
1125
1126         if (hdev->cur_adv_instance) {
1127                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1128                                                 true);
1129         } else {
1130                 if (ext_adv_capable(hdev)) {
1131                         __hci_req_start_ext_adv(&req, 0x00);
1132                 } else {
1133                         __hci_req_update_adv_data(&req, 0x00);
1134                         __hci_req_update_scan_rsp_data(&req, 0x00);
1135                         __hci_req_enable_advertising(&req);
1136                 }
1137         }
1138
1139         hci_req_run(&req, adv_enable_complete);
1140 }
1141
1142 static void adv_timeout_expire(struct work_struct *work)
1143 {
1144         struct hci_dev *hdev = container_of(work, struct hci_dev,
1145                                             adv_instance_expire.work);
1146
1147         struct hci_request req;
1148         u8 instance;
1149
1150         bt_dev_dbg(hdev, "");
1151
1152         hci_dev_lock(hdev);
1153
1154         hdev->adv_instance_timeout = 0;
1155
1156         instance = hdev->cur_adv_instance;
1157         if (instance == 0x00)
1158                 goto unlock;
1159
1160         hci_req_init(&req, hdev);
1161
1162         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1163
1164         if (list_empty(&hdev->adv_instances))
1165                 __hci_req_disable_advertising(&req);
1166
1167         hci_req_run(&req, NULL);
1168
1169 unlock:
1170         hci_dev_unlock(hdev);
1171 }
1172
1173 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1174                                            unsigned long opt)
1175 {
1176         struct hci_dev *hdev = req->hdev;
1177         int ret = 0;
1178
1179         hci_dev_lock(hdev);
1180
1181         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1182                 hci_req_add_le_scan_disable(req, false);
1183         hci_req_add_le_passive_scan(req);
1184
1185         switch (hdev->interleave_scan_state) {
1186         case INTERLEAVE_SCAN_ALLOWLIST:
1187                 bt_dev_dbg(hdev, "next state: allowlist");
1188                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1189                 break;
1190         case INTERLEAVE_SCAN_NO_FILTER:
1191                 bt_dev_dbg(hdev, "next state: no filter");
1192                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1193                 break;
1194         case INTERLEAVE_SCAN_NONE:
1195                 BT_ERR("unexpected error");
1196                 ret = -1;
1197         }
1198
1199         hci_dev_unlock(hdev);
1200
1201         return ret;
1202 }
1203
1204 static void interleave_scan_work(struct work_struct *work)
1205 {
1206         struct hci_dev *hdev = container_of(work, struct hci_dev,
1207                                             interleave_scan.work);
1208         u8 status;
1209         unsigned long timeout;
1210
1211         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1212                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1213         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1214                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1215         } else {
1216                 bt_dev_err(hdev, "unexpected error");
1217                 return;
1218         }
1219
1220         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1221                      HCI_CMD_TIMEOUT, &status);
1222
1223         /* Don't continue interleaving if it was canceled */
1224         if (is_interleave_scanning(hdev))
1225                 queue_delayed_work(hdev->req_workqueue,
1226                                    &hdev->interleave_scan, timeout);
1227 }
1228
1229 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1230                            bool use_rpa, struct adv_info *adv_instance,
1231                            u8 *own_addr_type, bdaddr_t *rand_addr)
1232 {
1233         int err;
1234
1235         bacpy(rand_addr, BDADDR_ANY);
1236
1237         /* If privacy is enabled use a resolvable private address. If
1238          * current RPA has expired then generate a new one.
1239          */
1240         if (use_rpa) {
1241                 /* If Controller supports LL Privacy use own address type is
1242                  * 0x03
1243                  */
1244                 if (use_ll_privacy(hdev))
1245                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1246                 else
1247                         *own_addr_type = ADDR_LE_DEV_RANDOM;
1248
1249                 if (adv_instance) {
1250                         if (adv_rpa_valid(adv_instance))
1251                                 return 0;
1252                 } else {
1253                         if (rpa_valid(hdev))
1254                                 return 0;
1255                 }
1256
1257                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1258                 if (err < 0) {
1259                         bt_dev_err(hdev, "failed to generate new RPA");
1260                         return err;
1261                 }
1262
1263                 bacpy(rand_addr, &hdev->rpa);
1264
1265                 return 0;
1266         }
1267
1268         /* In case of required privacy without resolvable private address,
1269          * use an non-resolvable private address. This is useful for
1270          * non-connectable advertising.
1271          */
1272         if (require_privacy) {
1273                 bdaddr_t nrpa;
1274
1275                 while (true) {
1276                         /* The non-resolvable private address is generated
1277                          * from random six bytes with the two most significant
1278                          * bits cleared.
1279                          */
1280                         get_random_bytes(&nrpa, 6);
1281                         nrpa.b[5] &= 0x3f;
1282
1283                         /* The non-resolvable private address shall not be
1284                          * equal to the public address.
1285                          */
1286                         if (bacmp(&hdev->bdaddr, &nrpa))
1287                                 break;
1288                 }
1289
1290                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1291                 bacpy(rand_addr, &nrpa);
1292
1293                 return 0;
1294         }
1295
1296         /* No privacy so use a public address. */
1297         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1298
1299         return 0;
1300 }
1301
1302 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1303 {
1304         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1305 }
1306
1307 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1308 {
1309         struct hci_dev *hdev = req->hdev;
1310
1311         /* If we're advertising or initiating an LE connection we can't
1312          * go ahead and change the random address at this time. This is
1313          * because the eventual initiator address used for the
1314          * subsequently created connection will be undefined (some
1315          * controllers use the new address and others the one we had
1316          * when the operation started).
1317          *
1318          * In this kind of scenario skip the update and let the random
1319          * address be updated at the next cycle.
1320          */
1321         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1322             hci_lookup_le_connect(hdev)) {
1323                 bt_dev_dbg(hdev, "Deferring random address update");
1324                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1325                 return;
1326         }
1327
1328         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1329 }
1330
1331 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1332 {
1333         struct hci_cp_le_set_ext_adv_params cp;
1334         struct hci_dev *hdev = req->hdev;
1335         bool connectable;
1336         u32 flags;
1337         bdaddr_t random_addr;
1338         u8 own_addr_type;
1339         int err;
1340         struct adv_info *adv;
1341         bool secondary_adv, require_privacy;
1342
1343         if (instance > 0) {
1344                 adv = hci_find_adv_instance(hdev, instance);
1345                 if (!adv)
1346                         return -EINVAL;
1347         } else {
1348                 adv = NULL;
1349         }
1350
1351         flags = hci_adv_instance_flags(hdev, instance);
1352
1353         /* If the "connectable" instance flag was not set, then choose between
1354          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1355          */
1356         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1357                       mgmt_get_connectable(hdev);
1358
1359         if (!is_advertising_allowed(hdev, connectable))
1360                 return -EPERM;
1361
1362         /* Set require_privacy to true only when non-connectable
1363          * advertising is used. In that case it is fine to use a
1364          * non-resolvable private address.
1365          */
1366         require_privacy = !connectable;
1367
1368         /* Don't require privacy for periodic adv? */
1369         if (adv && adv->periodic)
1370                 require_privacy = false;
1371
1372         err = hci_get_random_address(hdev, require_privacy,
1373                                      adv_use_rpa(hdev, flags), adv,
1374                                      &own_addr_type, &random_addr);
1375         if (err < 0)
1376                 return err;
1377
1378         memset(&cp, 0, sizeof(cp));
1379
1380         if (adv) {
1381                 hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1382                 hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1383                 cp.tx_power = adv->tx_power;
1384         } else {
1385                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1386                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1387                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1388         }
1389
1390         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1391
1392         if (connectable) {
1393                 if (secondary_adv)
1394                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1395                 else
1396                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1397         } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1398                    (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1399                 if (secondary_adv)
1400                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1401                 else
1402                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1403         } else {
1404                 /* Secondary and periodic cannot use legacy PDUs */
1405                 if (secondary_adv || (adv && adv->periodic))
1406                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1407                 else
1408                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1409         }
1410
1411         cp.own_addr_type = own_addr_type;
1412         cp.channel_map = hdev->le_adv_channel_map;
1413         cp.handle = instance;
1414
1415         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1416                 cp.primary_phy = HCI_ADV_PHY_1M;
1417                 cp.secondary_phy = HCI_ADV_PHY_2M;
1418         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1419                 cp.primary_phy = HCI_ADV_PHY_CODED;
1420                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1421         } else {
1422                 /* In all other cases use 1M */
1423                 cp.primary_phy = HCI_ADV_PHY_1M;
1424                 cp.secondary_phy = HCI_ADV_PHY_1M;
1425         }
1426
1427         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1428
1429         if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1430              own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1431             bacmp(&random_addr, BDADDR_ANY)) {
1432                 struct hci_cp_le_set_adv_set_rand_addr cp;
1433
1434                 /* Check if random address need to be updated */
1435                 if (adv) {
1436                         if (!bacmp(&random_addr, &adv->random_addr))
1437                                 return 0;
1438                 } else {
1439                         if (!bacmp(&random_addr, &hdev->random_addr))
1440                                 return 0;
1441                         /* Instance 0x00 doesn't have an adv_info, instead it
1442                          * uses hdev->random_addr to track its address so
1443                          * whenever it needs to be updated this also set the
1444                          * random address since hdev->random_addr is shared with
1445                          * scan state machine.
1446                          */
1447                         set_random_addr(req, &random_addr);
1448                 }
1449
1450                 memset(&cp, 0, sizeof(cp));
1451
1452                 cp.handle = instance;
1453                 bacpy(&cp.bdaddr, &random_addr);
1454
1455                 hci_req_add(req,
1456                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1457                             sizeof(cp), &cp);
1458         }
1459
1460         return 0;
1461 }
1462
1463 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1464 {
1465         struct hci_dev *hdev = req->hdev;
1466         struct hci_cp_le_set_ext_adv_enable *cp;
1467         struct hci_cp_ext_adv_set *adv_set;
1468         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1469         struct adv_info *adv_instance;
1470
1471         if (instance > 0) {
1472                 adv_instance = hci_find_adv_instance(hdev, instance);
1473                 if (!adv_instance)
1474                         return -EINVAL;
1475         } else {
1476                 adv_instance = NULL;
1477         }
1478
1479         cp = (void *) data;
1480         adv_set = (void *) cp->data;
1481
1482         memset(cp, 0, sizeof(*cp));
1483
1484         cp->enable = 0x01;
1485         cp->num_of_sets = 0x01;
1486
1487         memset(adv_set, 0, sizeof(*adv_set));
1488
1489         adv_set->handle = instance;
1490
1491         /* Set duration per instance since controller is responsible for
1492          * scheduling it.
1493          */
1494         if (adv_instance && adv_instance->duration) {
1495                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1496
1497                 /* Time = N * 10 ms */
1498                 adv_set->duration = cpu_to_le16(duration / 10);
1499         }
1500
1501         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1502                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1503                     data);
1504
1505         return 0;
1506 }
1507
1508 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1509 {
1510         struct hci_dev *hdev = req->hdev;
1511         struct hci_cp_le_set_ext_adv_enable *cp;
1512         struct hci_cp_ext_adv_set *adv_set;
1513         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1514         u8 req_size;
1515
1516         /* If request specifies an instance that doesn't exist, fail */
1517         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1518                 return -EINVAL;
1519
1520         memset(data, 0, sizeof(data));
1521
1522         cp = (void *)data;
1523         adv_set = (void *)cp->data;
1524
1525         /* Instance 0x00 indicates all advertising instances will be disabled */
1526         cp->num_of_sets = !!instance;
1527         cp->enable = 0x00;
1528
1529         adv_set->handle = instance;
1530
1531         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1532         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1533
1534         return 0;
1535 }
1536
1537 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1538 {
1539         struct hci_dev *hdev = req->hdev;
1540
1541         /* If request specifies an instance that doesn't exist, fail */
1542         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1543                 return -EINVAL;
1544
1545         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1546
1547         return 0;
1548 }
1549
1550 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1551 {
1552         struct hci_dev *hdev = req->hdev;
1553         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1554         int err;
1555
1556         /* If instance isn't pending, the chip knows about it, and it's safe to
1557          * disable
1558          */
1559         if (adv_instance && !adv_instance->pending)
1560                 __hci_req_disable_ext_adv_instance(req, instance);
1561
1562         err = __hci_req_setup_ext_adv_instance(req, instance);
1563         if (err < 0)
1564                 return err;
1565
1566         __hci_req_update_scan_rsp_data(req, instance);
1567         __hci_req_enable_ext_advertising(req, instance);
1568
1569         return 0;
1570 }
1571
1572 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1573                                     bool force)
1574 {
1575         struct hci_dev *hdev = req->hdev;
1576         struct adv_info *adv_instance = NULL;
1577         u16 timeout;
1578
1579         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1580             list_empty(&hdev->adv_instances))
1581                 return -EPERM;
1582
1583         if (hdev->adv_instance_timeout)
1584                 return -EBUSY;
1585
1586         adv_instance = hci_find_adv_instance(hdev, instance);
1587         if (!adv_instance)
1588                 return -ENOENT;
1589
1590         /* A zero timeout means unlimited advertising. As long as there is
1591          * only one instance, duration should be ignored. We still set a timeout
1592          * in case further instances are being added later on.
1593          *
1594          * If the remaining lifetime of the instance is more than the duration
1595          * then the timeout corresponds to the duration, otherwise it will be
1596          * reduced to the remaining instance lifetime.
1597          */
1598         if (adv_instance->timeout == 0 ||
1599             adv_instance->duration <= adv_instance->remaining_time)
1600                 timeout = adv_instance->duration;
1601         else
1602                 timeout = adv_instance->remaining_time;
1603
1604         /* The remaining time is being reduced unless the instance is being
1605          * advertised without time limit.
1606          */
1607         if (adv_instance->timeout)
1608                 adv_instance->remaining_time =
1609                                 adv_instance->remaining_time - timeout;
1610
1611         /* Only use work for scheduling instances with legacy advertising */
1612         if (!ext_adv_capable(hdev)) {
1613                 hdev->adv_instance_timeout = timeout;
1614                 queue_delayed_work(hdev->req_workqueue,
1615                            &hdev->adv_instance_expire,
1616                            msecs_to_jiffies(timeout * 1000));
1617         }
1618
1619         /* If we're just re-scheduling the same instance again then do not
1620          * execute any HCI commands. This happens when a single instance is
1621          * being advertised.
1622          */
1623         if (!force && hdev->cur_adv_instance == instance &&
1624             hci_dev_test_flag(hdev, HCI_LE_ADV))
1625                 return 0;
1626
1627         hdev->cur_adv_instance = instance;
1628         if (ext_adv_capable(hdev)) {
1629                 __hci_req_start_ext_adv(req, instance);
1630         } else {
1631                 __hci_req_update_adv_data(req, instance);
1632                 __hci_req_update_scan_rsp_data(req, instance);
1633                 __hci_req_enable_advertising(req);
1634         }
1635
1636         return 0;
1637 }
1638
1639 /* For a single instance:
1640  * - force == true: The instance will be removed even when its remaining
1641  *   lifetime is not zero.
1642  * - force == false: the instance will be deactivated but kept stored unless
1643  *   the remaining lifetime is zero.
1644  *
1645  * For instance == 0x00:
1646  * - force == true: All instances will be removed regardless of their timeout
1647  *   setting.
1648  * - force == false: Only instances that have a timeout will be removed.
1649  */
1650 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1651                                 struct hci_request *req, u8 instance,
1652                                 bool force)
1653 {
1654         struct adv_info *adv_instance, *n, *next_instance = NULL;
1655         int err;
1656         u8 rem_inst;
1657
1658         /* Cancel any timeout concerning the removed instance(s). */
1659         if (!instance || hdev->cur_adv_instance == instance)
1660                 cancel_adv_timeout(hdev);
1661
1662         /* Get the next instance to advertise BEFORE we remove
1663          * the current one. This can be the same instance again
1664          * if there is only one instance.
1665          */
1666         if (instance && hdev->cur_adv_instance == instance)
1667                 next_instance = hci_get_next_instance(hdev, instance);
1668
1669         if (instance == 0x00) {
1670                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1671                                          list) {
1672                         if (!(force || adv_instance->timeout))
1673                                 continue;
1674
1675                         rem_inst = adv_instance->instance;
1676                         err = hci_remove_adv_instance(hdev, rem_inst);
1677                         if (!err)
1678                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1679                 }
1680         } else {
1681                 adv_instance = hci_find_adv_instance(hdev, instance);
1682
1683                 if (force || (adv_instance && adv_instance->timeout &&
1684                               !adv_instance->remaining_time)) {
1685                         /* Don't advertise a removed instance. */
1686                         if (next_instance &&
1687                             next_instance->instance == instance)
1688                                 next_instance = NULL;
1689
1690                         err = hci_remove_adv_instance(hdev, instance);
1691                         if (!err)
1692                                 mgmt_advertising_removed(sk, hdev, instance);
1693                 }
1694         }
1695
1696         if (!req || !hdev_is_powered(hdev) ||
1697             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1698                 return;
1699
1700         if (next_instance && !ext_adv_capable(hdev))
1701                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1702                                                 false);
1703 }
1704
1705 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1706                               bool use_rpa, u8 *own_addr_type)
1707 {
1708         struct hci_dev *hdev = req->hdev;
1709         int err;
1710
1711         /* If privacy is enabled use a resolvable private address. If
1712          * current RPA has expired or there is something else than
1713          * the current RPA in use, then generate a new one.
1714          */
1715         if (use_rpa) {
1716                 /* If Controller supports LL Privacy use own address type is
1717                  * 0x03
1718                  */
1719                 if (use_ll_privacy(hdev))
1720                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1721                 else
1722                         *own_addr_type = ADDR_LE_DEV_RANDOM;
1723
1724                 if (rpa_valid(hdev))
1725                         return 0;
1726
1727                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1728                 if (err < 0) {
1729                         bt_dev_err(hdev, "failed to generate new RPA");
1730                         return err;
1731                 }
1732
1733                 set_random_addr(req, &hdev->rpa);
1734
1735                 return 0;
1736         }
1737
1738         /* In case of required privacy without resolvable private address,
1739          * use an non-resolvable private address. This is useful for active
1740          * scanning and non-connectable advertising.
1741          */
1742         if (require_privacy) {
1743                 bdaddr_t nrpa;
1744
1745                 while (true) {
1746                         /* The non-resolvable private address is generated
1747                          * from random six bytes with the two most significant
1748                          * bits cleared.
1749                          */
1750                         get_random_bytes(&nrpa, 6);
1751                         nrpa.b[5] &= 0x3f;
1752
1753                         /* The non-resolvable private address shall not be
1754                          * equal to the public address.
1755                          */
1756                         if (bacmp(&hdev->bdaddr, &nrpa))
1757                                 break;
1758                 }
1759
1760                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1761                 set_random_addr(req, &nrpa);
1762                 return 0;
1763         }
1764
1765         /* If forcing static address is in use or there is no public
1766          * address use the static address as random address (but skip
1767          * the HCI command if the current random address is already the
1768          * static one.
1769          *
1770          * In case BR/EDR has been disabled on a dual-mode controller
1771          * and a static address has been configured, then use that
1772          * address instead of the public BR/EDR address.
1773          */
1774         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1775             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1776             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1777              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1778                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1779                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1780                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1781                                     &hdev->static_addr);
1782                 return 0;
1783         }
1784
1785         /* Neither privacy nor static address is being used so use a
1786          * public address.
1787          */
1788         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1789
1790         return 0;
1791 }
1792
1793 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1794 {
1795         struct bdaddr_list *b;
1796
1797         list_for_each_entry(b, &hdev->accept_list, list) {
1798                 struct hci_conn *conn;
1799
1800                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1801                 if (!conn)
1802                         return true;
1803
1804                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1805                         return true;
1806         }
1807
1808         return false;
1809 }
1810
1811 void __hci_req_update_scan(struct hci_request *req)
1812 {
1813         struct hci_dev *hdev = req->hdev;
1814         u8 scan;
1815
1816         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1817                 return;
1818
1819         if (!hdev_is_powered(hdev))
1820                 return;
1821
1822         if (mgmt_powering_down(hdev))
1823                 return;
1824
1825         if (hdev->scanning_paused)
1826                 return;
1827
1828         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1829             disconnected_accept_list_entries(hdev))
1830                 scan = SCAN_PAGE;
1831         else
1832                 scan = SCAN_DISABLED;
1833
1834         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1835                 scan |= SCAN_INQUIRY;
1836
1837         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1838             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1839                 return;
1840
1841         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1842 }
1843
1844 static u8 get_service_classes(struct hci_dev *hdev)
1845 {
1846         struct bt_uuid *uuid;
1847         u8 val = 0;
1848
1849         list_for_each_entry(uuid, &hdev->uuids, list)
1850                 val |= uuid->svc_hint;
1851
1852         return val;
1853 }
1854
1855 void __hci_req_update_class(struct hci_request *req)
1856 {
1857         struct hci_dev *hdev = req->hdev;
1858         u8 cod[3];
1859
1860         bt_dev_dbg(hdev, "");
1861
1862         if (!hdev_is_powered(hdev))
1863                 return;
1864
1865         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1866                 return;
1867
1868         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1869                 return;
1870
1871         cod[0] = hdev->minor_class;
1872         cod[1] = hdev->major_class;
1873         cod[2] = get_service_classes(hdev);
1874
1875         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1876                 cod[1] |= 0x20;
1877
1878         if (memcmp(cod, hdev->dev_class, 3) == 0)
1879                 return;
1880
1881         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1882 }
1883
1884 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1885                       u8 reason)
1886 {
1887         switch (conn->state) {
1888         case BT_CONNECTED:
1889         case BT_CONFIG:
1890                 if (conn->type == AMP_LINK) {
1891                         struct hci_cp_disconn_phy_link cp;
1892
1893                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1894                         cp.reason = reason;
1895                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1896                                     &cp);
1897                 } else {
1898                         struct hci_cp_disconnect dc;
1899
1900                         dc.handle = cpu_to_le16(conn->handle);
1901                         dc.reason = reason;
1902                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1903                 }
1904
1905                 conn->state = BT_DISCONN;
1906
1907                 break;
1908         case BT_CONNECT:
1909                 if (conn->type == LE_LINK) {
1910                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1911                                 break;
1912                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1913                                     0, NULL);
1914                 } else if (conn->type == ACL_LINK) {
1915                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1916                                 break;
1917                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1918                                     6, &conn->dst);
1919                 }
1920                 break;
1921         case BT_CONNECT2:
1922                 if (conn->type == ACL_LINK) {
1923                         struct hci_cp_reject_conn_req rej;
1924
1925                         bacpy(&rej.bdaddr, &conn->dst);
1926                         rej.reason = reason;
1927
1928                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1929                                     sizeof(rej), &rej);
1930                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1931                         struct hci_cp_reject_sync_conn_req rej;
1932
1933                         bacpy(&rej.bdaddr, &conn->dst);
1934
1935                         /* SCO rejection has its own limited set of
1936                          * allowed error values (0x0D-0x0F) which isn't
1937                          * compatible with most values passed to this
1938                          * function. To be safe hard-code one of the
1939                          * values that's suitable for SCO.
1940                          */
1941                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1942
1943                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1944                                     sizeof(rej), &rej);
1945                 }
1946                 break;
1947         default:
1948                 conn->state = BT_CLOSED;
1949                 break;
1950         }
1951 }
1952
1953 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1954 {
1955         if (status)
1956                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
1957 }
1958
1959 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1960 {
1961         struct hci_request req;
1962         int err;
1963
1964         hci_req_init(&req, conn->hdev);
1965
1966         __hci_abort_conn(&req, conn, reason);
1967
1968         err = hci_req_run(&req, abort_conn_complete);
1969         if (err && err != -ENODATA) {
1970                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1971                 return err;
1972         }
1973
1974         return 0;
1975 }
1976
1977 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1978 {
1979         hci_req_add_le_scan_disable(req, false);
1980         return 0;
1981 }
1982
1983 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1984 {
1985         u8 length = opt;
1986         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1987         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1988         struct hci_cp_inquiry cp;
1989
1990         if (test_bit(HCI_INQUIRY, &req->hdev->flags))
1991                 return 0;
1992
1993         bt_dev_dbg(req->hdev, "");
1994
1995         hci_dev_lock(req->hdev);
1996         hci_inquiry_cache_flush(req->hdev);
1997         hci_dev_unlock(req->hdev);
1998
1999         memset(&cp, 0, sizeof(cp));
2000
2001         if (req->hdev->discovery.limited)
2002                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2003         else
2004                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2005
2006         cp.length = length;
2007
2008         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2009
2010         return 0;
2011 }
2012
2013 static void le_scan_disable_work(struct work_struct *work)
2014 {
2015         struct hci_dev *hdev = container_of(work, struct hci_dev,
2016                                             le_scan_disable.work);
2017         u8 status;
2018
2019         bt_dev_dbg(hdev, "");
2020
2021         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2022                 return;
2023
2024         cancel_delayed_work(&hdev->le_scan_restart);
2025
2026         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2027         if (status) {
2028                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2029                            status);
2030                 return;
2031         }
2032
2033         hdev->discovery.scan_start = 0;
2034
2035         /* If we were running LE only scan, change discovery state. If
2036          * we were running both LE and BR/EDR inquiry simultaneously,
2037          * and BR/EDR inquiry is already finished, stop discovery,
2038          * otherwise BR/EDR inquiry will stop discovery when finished.
2039          * If we will resolve remote device name, do not change
2040          * discovery state.
2041          */
2042
2043         if (hdev->discovery.type == DISCOV_TYPE_LE)
2044                 goto discov_stopped;
2045
2046         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2047                 return;
2048
2049         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2050                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2051                     hdev->discovery.state != DISCOVERY_RESOLVING)
2052                         goto discov_stopped;
2053
2054                 return;
2055         }
2056
2057         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2058                      HCI_CMD_TIMEOUT, &status);
2059         if (status) {
2060                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2061                 goto discov_stopped;
2062         }
2063
2064         return;
2065
2066 discov_stopped:
2067         hci_dev_lock(hdev);
2068         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069         hci_dev_unlock(hdev);
2070 }
2071
2072 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2073 {
2074         struct hci_dev *hdev = req->hdev;
2075
2076         /* If controller is not scanning we are done. */
2077         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2078                 return 0;
2079
2080         if (hdev->scanning_paused) {
2081                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2082                 return 0;
2083         }
2084
2085         hci_req_add_le_scan_disable(req, false);
2086
2087         if (use_ext_scan(hdev)) {
2088                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2089
2090                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2091                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2092                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2093
2094                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2095                             sizeof(ext_enable_cp), &ext_enable_cp);
2096         } else {
2097                 struct hci_cp_le_set_scan_enable cp;
2098
2099                 memset(&cp, 0, sizeof(cp));
2100                 cp.enable = LE_SCAN_ENABLE;
2101                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2102                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2103         }
2104
2105         return 0;
2106 }
2107
2108 static void le_scan_restart_work(struct work_struct *work)
2109 {
2110         struct hci_dev *hdev = container_of(work, struct hci_dev,
2111                                             le_scan_restart.work);
2112         unsigned long timeout, duration, scan_start, now;
2113         u8 status;
2114
2115         bt_dev_dbg(hdev, "");
2116
2117         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2118         if (status) {
2119                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2120                            status);
2121                 return;
2122         }
2123
2124         hci_dev_lock(hdev);
2125
2126         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2127             !hdev->discovery.scan_start)
2128                 goto unlock;
2129
2130         /* When the scan was started, hdev->le_scan_disable has been queued
2131          * after duration from scan_start. During scan restart this job
2132          * has been canceled, and we need to queue it again after proper
2133          * timeout, to make sure that scan does not run indefinitely.
2134          */
2135         duration = hdev->discovery.scan_duration;
2136         scan_start = hdev->discovery.scan_start;
2137         now = jiffies;
2138         if (now - scan_start <= duration) {
2139                 int elapsed;
2140
2141                 if (now >= scan_start)
2142                         elapsed = now - scan_start;
2143                 else
2144                         elapsed = ULONG_MAX - scan_start + now;
2145
2146                 timeout = duration - elapsed;
2147         } else {
2148                 timeout = 0;
2149         }
2150
2151         queue_delayed_work(hdev->req_workqueue,
2152                            &hdev->le_scan_disable, timeout);
2153
2154 unlock:
2155         hci_dev_unlock(hdev);
2156 }
2157
2158 bool hci_req_stop_discovery(struct hci_request *req)
2159 {
2160         struct hci_dev *hdev = req->hdev;
2161         struct discovery_state *d = &hdev->discovery;
2162         struct hci_cp_remote_name_req_cancel cp;
2163         struct inquiry_entry *e;
2164         bool ret = false;
2165
2166         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2167
2168         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2169                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2170                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2171
2172                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2173                         cancel_delayed_work(&hdev->le_scan_disable);
2174                         cancel_delayed_work(&hdev->le_scan_restart);
2175                         hci_req_add_le_scan_disable(req, false);
2176                 }
2177
2178                 ret = true;
2179         } else {
2180                 /* Passive scanning */
2181                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2182                         hci_req_add_le_scan_disable(req, false);
2183                         ret = true;
2184                 }
2185         }
2186
2187         /* No further actions needed for LE-only discovery */
2188         if (d->type == DISCOV_TYPE_LE)
2189                 return ret;
2190
2191         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2192                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2193                                                      NAME_PENDING);
2194                 if (!e)
2195                         return ret;
2196
2197                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2198                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2199                             &cp);
2200                 ret = true;
2201         }
2202
2203         return ret;
2204 }
2205
2206 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2207                                       u16 opcode)
2208 {
2209         bt_dev_dbg(hdev, "status %u", status);
2210 }
2211
2212 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2213 {
2214         struct hci_request req;
2215         int err;
2216         __u8 vnd_len, *vnd_data = NULL;
2217         struct hci_op_configure_data_path *cmd = NULL;
2218
2219         hci_req_init(&req, hdev);
2220
2221         err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2222                                           &vnd_data);
2223         if (err < 0)
2224                 goto error;
2225
2226         cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2227         if (!cmd) {
2228                 err = -ENOMEM;
2229                 goto error;
2230         }
2231
2232         err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2233         if (err < 0)
2234                 goto error;
2235
2236         cmd->vnd_len = vnd_len;
2237         memcpy(cmd->vnd_data, vnd_data, vnd_len);
2238
2239         cmd->direction = 0x00;
2240         hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2241
2242         cmd->direction = 0x01;
2243         hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2244
2245         err = hci_req_run(&req, config_data_path_complete);
2246 error:
2247
2248         kfree(cmd);
2249         kfree(vnd_data);
2250         return err;
2251 }
2252
2253 void hci_request_setup(struct hci_dev *hdev)
2254 {
2255         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2256         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2257         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2258         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2259 }
2260
2261 void hci_request_cancel_all(struct hci_dev *hdev)
2262 {
2263         __hci_cmd_sync_cancel(hdev, ENODEV);
2264
2265         cancel_delayed_work_sync(&hdev->le_scan_disable);
2266         cancel_delayed_work_sync(&hdev->le_scan_restart);
2267
2268         if (hdev->adv_instance_timeout) {
2269                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2270                 hdev->adv_instance_timeout = 0;
2271         }
2272
2273         cancel_interleave_scan(hdev);
2274 }
This page took 0.163913 seconds and 4 git commands to generate.