]> Git Repo - linux.git/blob - net/bluetooth/hci_core.c
topology/sysfs: rework book and drawer topology ifdefery
[linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <[email protected]>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 static int hci_reset_req(struct hci_request *req, unsigned long opt)
66 {
67         BT_DBG("%s %ld", req->hdev->name, opt);
68
69         /* Reset device */
70         set_bit(HCI_RESET, &req->hdev->flags);
71         hci_req_add(req, HCI_OP_RESET, 0, NULL);
72         return 0;
73 }
74
75 static void bredr_init(struct hci_request *req)
76 {
77         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
78
79         /* Read Local Supported Features */
80         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
81
82         /* Read Local Version */
83         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
84
85         /* Read BD Address */
86         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
87 }
88
89 static void amp_init1(struct hci_request *req)
90 {
91         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
92
93         /* Read Local Version */
94         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
95
96         /* Read Local Supported Commands */
97         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
98
99         /* Read Local AMP Info */
100         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
101
102         /* Read Data Blk size */
103         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
104
105         /* Read Flow Control Mode */
106         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
107
108         /* Read Location Data */
109         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
110 }
111
112 static int amp_init2(struct hci_request *req)
113 {
114         /* Read Local Supported Features. Not all AMP controllers
115          * support this so it's placed conditionally in the second
116          * stage init.
117          */
118         if (req->hdev->commands[14] & 0x20)
119                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
120
121         return 0;
122 }
123
124 static int hci_init1_req(struct hci_request *req, unsigned long opt)
125 {
126         struct hci_dev *hdev = req->hdev;
127
128         BT_DBG("%s %ld", hdev->name, opt);
129
130         /* Reset */
131         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
132                 hci_reset_req(req, 0);
133
134         switch (hdev->dev_type) {
135         case HCI_PRIMARY:
136                 bredr_init(req);
137                 break;
138         case HCI_AMP:
139                 amp_init1(req);
140                 break;
141         default:
142                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
143                 break;
144         }
145
146         return 0;
147 }
148
149 static void bredr_setup(struct hci_request *req)
150 {
151         __le16 param;
152         __u8 flt_type;
153
154         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
155         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
156
157         /* Read Class of Device */
158         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
159
160         /* Read Local Name */
161         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
162
163         /* Read Voice Setting */
164         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
165
166         /* Read Number of Supported IAC */
167         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
168
169         /* Read Current IAC LAP */
170         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
171
172         /* Clear Event Filters */
173         flt_type = HCI_FLT_CLEAR_ALL;
174         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
175
176         /* Connection accept timeout ~20 secs */
177         param = cpu_to_le16(0x7d00);
178         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
179 }
180
181 static void le_setup(struct hci_request *req)
182 {
183         struct hci_dev *hdev = req->hdev;
184
185         /* Read LE Buffer Size */
186         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
187
188         /* Read LE Local Supported Features */
189         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
190
191         /* Read LE Supported States */
192         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
193
194         /* LE-only controllers have LE implicitly enabled */
195         if (!lmp_bredr_capable(hdev))
196                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
197 }
198
199 static void hci_setup_event_mask(struct hci_request *req)
200 {
201         struct hci_dev *hdev = req->hdev;
202
203         /* The second byte is 0xff instead of 0x9f (two reserved bits
204          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
205          * command otherwise.
206          */
207         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
208
209         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
210          * any event mask for pre 1.2 devices.
211          */
212         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
213                 return;
214
215         if (lmp_bredr_capable(hdev)) {
216                 events[4] |= 0x01; /* Flow Specification Complete */
217         } else {
218                 /* Use a different default for LE-only devices */
219                 memset(events, 0, sizeof(events));
220                 events[1] |= 0x20; /* Command Complete */
221                 events[1] |= 0x40; /* Command Status */
222                 events[1] |= 0x80; /* Hardware Error */
223
224                 /* If the controller supports the Disconnect command, enable
225                  * the corresponding event. In addition enable packet flow
226                  * control related events.
227                  */
228                 if (hdev->commands[0] & 0x20) {
229                         events[0] |= 0x10; /* Disconnection Complete */
230                         events[2] |= 0x04; /* Number of Completed Packets */
231                         events[3] |= 0x02; /* Data Buffer Overflow */
232                 }
233
234                 /* If the controller supports the Read Remote Version
235                  * Information command, enable the corresponding event.
236                  */
237                 if (hdev->commands[2] & 0x80)
238                         events[1] |= 0x08; /* Read Remote Version Information
239                                             * Complete
240                                             */
241
242                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
243                         events[0] |= 0x80; /* Encryption Change */
244                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
245                 }
246         }
247
248         if (lmp_inq_rssi_capable(hdev) ||
249             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
250                 events[4] |= 0x02; /* Inquiry Result with RSSI */
251
252         if (lmp_ext_feat_capable(hdev))
253                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
254
255         if (lmp_esco_capable(hdev)) {
256                 events[5] |= 0x08; /* Synchronous Connection Complete */
257                 events[5] |= 0x10; /* Synchronous Connection Changed */
258         }
259
260         if (lmp_sniffsubr_capable(hdev))
261                 events[5] |= 0x20; /* Sniff Subrating */
262
263         if (lmp_pause_enc_capable(hdev))
264                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
265
266         if (lmp_ext_inq_capable(hdev))
267                 events[5] |= 0x40; /* Extended Inquiry Result */
268
269         if (lmp_no_flush_capable(hdev))
270                 events[7] |= 0x01; /* Enhanced Flush Complete */
271
272         if (lmp_lsto_capable(hdev))
273                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
274
275         if (lmp_ssp_capable(hdev)) {
276                 events[6] |= 0x01;      /* IO Capability Request */
277                 events[6] |= 0x02;      /* IO Capability Response */
278                 events[6] |= 0x04;      /* User Confirmation Request */
279                 events[6] |= 0x08;      /* User Passkey Request */
280                 events[6] |= 0x10;      /* Remote OOB Data Request */
281                 events[6] |= 0x20;      /* Simple Pairing Complete */
282                 events[7] |= 0x04;      /* User Passkey Notification */
283                 events[7] |= 0x08;      /* Keypress Notification */
284                 events[7] |= 0x10;      /* Remote Host Supported
285                                          * Features Notification
286                                          */
287         }
288
289         if (lmp_le_capable(hdev))
290                 events[7] |= 0x20;      /* LE Meta-Event */
291
292         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
293 }
294
295 static int hci_init2_req(struct hci_request *req, unsigned long opt)
296 {
297         struct hci_dev *hdev = req->hdev;
298
299         if (hdev->dev_type == HCI_AMP)
300                 return amp_init2(req);
301
302         if (lmp_bredr_capable(hdev))
303                 bredr_setup(req);
304         else
305                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
306
307         if (lmp_le_capable(hdev))
308                 le_setup(req);
309
310         /* All Bluetooth 1.2 and later controllers should support the
311          * HCI command for reading the local supported commands.
312          *
313          * Unfortunately some controllers indicate Bluetooth 1.2 support,
314          * but do not have support for this command. If that is the case,
315          * the driver can quirk the behavior and skip reading the local
316          * supported commands.
317          */
318         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
319             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
320                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
321
322         if (lmp_ssp_capable(hdev)) {
323                 /* When SSP is available, then the host features page
324                  * should also be available as well. However some
325                  * controllers list the max_page as 0 as long as SSP
326                  * has not been enabled. To achieve proper debugging
327                  * output, force the minimum max_page to 1 at least.
328                  */
329                 hdev->max_page = 0x01;
330
331                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
332                         u8 mode = 0x01;
333
334                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
335                                     sizeof(mode), &mode);
336                 } else {
337                         struct hci_cp_write_eir cp;
338
339                         memset(hdev->eir, 0, sizeof(hdev->eir));
340                         memset(&cp, 0, sizeof(cp));
341
342                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
343                 }
344         }
345
346         if (lmp_inq_rssi_capable(hdev) ||
347             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
348                 u8 mode;
349
350                 /* If Extended Inquiry Result events are supported, then
351                  * they are clearly preferred over Inquiry Result with RSSI
352                  * events.
353                  */
354                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
355
356                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
357         }
358
359         if (lmp_inq_tx_pwr_capable(hdev))
360                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
361
362         if (lmp_ext_feat_capable(hdev)) {
363                 struct hci_cp_read_local_ext_features cp;
364
365                 cp.page = 0x01;
366                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
367                             sizeof(cp), &cp);
368         }
369
370         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
371                 u8 enable = 1;
372                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
373                             &enable);
374         }
375
376         return 0;
377 }
378
379 static void hci_setup_link_policy(struct hci_request *req)
380 {
381         struct hci_dev *hdev = req->hdev;
382         struct hci_cp_write_def_link_policy cp;
383         u16 link_policy = 0;
384
385         if (lmp_rswitch_capable(hdev))
386                 link_policy |= HCI_LP_RSWITCH;
387         if (lmp_hold_capable(hdev))
388                 link_policy |= HCI_LP_HOLD;
389         if (lmp_sniff_capable(hdev))
390                 link_policy |= HCI_LP_SNIFF;
391         if (lmp_park_capable(hdev))
392                 link_policy |= HCI_LP_PARK;
393
394         cp.policy = cpu_to_le16(link_policy);
395         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
396 }
397
398 static void hci_set_le_support(struct hci_request *req)
399 {
400         struct hci_dev *hdev = req->hdev;
401         struct hci_cp_write_le_host_supported cp;
402
403         /* LE-only devices do not support explicit enablement */
404         if (!lmp_bredr_capable(hdev))
405                 return;
406
407         memset(&cp, 0, sizeof(cp));
408
409         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
410                 cp.le = 0x01;
411                 cp.simul = 0x00;
412         }
413
414         if (cp.le != lmp_host_le_capable(hdev))
415                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
416                             &cp);
417 }
418
419 static void hci_set_event_mask_page_2(struct hci_request *req)
420 {
421         struct hci_dev *hdev = req->hdev;
422         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
423         bool changed = false;
424
425         /* If Connectionless Peripheral Broadcast central role is supported
426          * enable all necessary events for it.
427          */
428         if (lmp_cpb_central_capable(hdev)) {
429                 events[1] |= 0x40;      /* Triggered Clock Capture */
430                 events[1] |= 0x80;      /* Synchronization Train Complete */
431                 events[2] |= 0x10;      /* Peripheral Page Response Timeout */
432                 events[2] |= 0x20;      /* CPB Channel Map Change */
433                 changed = true;
434         }
435
436         /* If Connectionless Peripheral Broadcast peripheral role is supported
437          * enable all necessary events for it.
438          */
439         if (lmp_cpb_peripheral_capable(hdev)) {
440                 events[2] |= 0x01;      /* Synchronization Train Received */
441                 events[2] |= 0x02;      /* CPB Receive */
442                 events[2] |= 0x04;      /* CPB Timeout */
443                 events[2] |= 0x08;      /* Truncated Page Complete */
444                 changed = true;
445         }
446
447         /* Enable Authenticated Payload Timeout Expired event if supported */
448         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
449                 events[2] |= 0x80;
450                 changed = true;
451         }
452
453         /* Some Broadcom based controllers indicate support for Set Event
454          * Mask Page 2 command, but then actually do not support it. Since
455          * the default value is all bits set to zero, the command is only
456          * required if the event mask has to be changed. In case no change
457          * to the event mask is needed, skip this command.
458          */
459         if (changed)
460                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
461                             sizeof(events), events);
462 }
463
464 static int hci_init3_req(struct hci_request *req, unsigned long opt)
465 {
466         struct hci_dev *hdev = req->hdev;
467         u8 p;
468
469         hci_setup_event_mask(req);
470
471         if (hdev->commands[6] & 0x20 &&
472             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
473                 struct hci_cp_read_stored_link_key cp;
474
475                 bacpy(&cp.bdaddr, BDADDR_ANY);
476                 cp.read_all = 0x01;
477                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
478         }
479
480         if (hdev->commands[5] & 0x10)
481                 hci_setup_link_policy(req);
482
483         if (hdev->commands[8] & 0x01)
484                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
485
486         if (hdev->commands[18] & 0x04 &&
487             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
488                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
489
490         /* Some older Broadcom based Bluetooth 1.2 controllers do not
491          * support the Read Page Scan Type command. Check support for
492          * this command in the bit mask of supported commands.
493          */
494         if (hdev->commands[13] & 0x01)
495                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
496
497         if (lmp_le_capable(hdev)) {
498                 u8 events[8];
499
500                 memset(events, 0, sizeof(events));
501
502                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
503                         events[0] |= 0x10;      /* LE Long Term Key Request */
504
505                 /* If controller supports the Connection Parameters Request
506                  * Link Layer Procedure, enable the corresponding event.
507                  */
508                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
509                         events[0] |= 0x20;      /* LE Remote Connection
510                                                  * Parameter Request
511                                                  */
512
513                 /* If the controller supports the Data Length Extension
514                  * feature, enable the corresponding event.
515                  */
516                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
517                         events[0] |= 0x40;      /* LE Data Length Change */
518
519                 /* If the controller supports LL Privacy feature, enable
520                  * the corresponding event.
521                  */
522                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
523                         events[1] |= 0x02;      /* LE Enhanced Connection
524                                                  * Complete
525                                                  */
526
527                 /* If the controller supports Extended Scanner Filter
528                  * Policies, enable the corresponding event.
529                  */
530                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
531                         events[1] |= 0x04;      /* LE Direct Advertising
532                                                  * Report
533                                                  */
534
535                 /* If the controller supports Channel Selection Algorithm #2
536                  * feature, enable the corresponding event.
537                  */
538                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
539                         events[2] |= 0x08;      /* LE Channel Selection
540                                                  * Algorithm
541                                                  */
542
543                 /* If the controller supports the LE Set Scan Enable command,
544                  * enable the corresponding advertising report event.
545                  */
546                 if (hdev->commands[26] & 0x08)
547                         events[0] |= 0x02;      /* LE Advertising Report */
548
549                 /* If the controller supports the LE Create Connection
550                  * command, enable the corresponding event.
551                  */
552                 if (hdev->commands[26] & 0x10)
553                         events[0] |= 0x01;      /* LE Connection Complete */
554
555                 /* If the controller supports the LE Connection Update
556                  * command, enable the corresponding event.
557                  */
558                 if (hdev->commands[27] & 0x04)
559                         events[0] |= 0x04;      /* LE Connection Update
560                                                  * Complete
561                                                  */
562
563                 /* If the controller supports the LE Read Remote Used Features
564                  * command, enable the corresponding event.
565                  */
566                 if (hdev->commands[27] & 0x20)
567                         events[0] |= 0x08;      /* LE Read Remote Used
568                                                  * Features Complete
569                                                  */
570
571                 /* If the controller supports the LE Read Local P-256
572                  * Public Key command, enable the corresponding event.
573                  */
574                 if (hdev->commands[34] & 0x02)
575                         events[0] |= 0x80;      /* LE Read Local P-256
576                                                  * Public Key Complete
577                                                  */
578
579                 /* If the controller supports the LE Generate DHKey
580                  * command, enable the corresponding event.
581                  */
582                 if (hdev->commands[34] & 0x04)
583                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
584
585                 /* If the controller supports the LE Set Default PHY or
586                  * LE Set PHY commands, enable the corresponding event.
587                  */
588                 if (hdev->commands[35] & (0x20 | 0x40))
589                         events[1] |= 0x08;        /* LE PHY Update Complete */
590
591                 /* If the controller supports LE Set Extended Scan Parameters
592                  * and LE Set Extended Scan Enable commands, enable the
593                  * corresponding event.
594                  */
595                 if (use_ext_scan(hdev))
596                         events[1] |= 0x10;      /* LE Extended Advertising
597                                                  * Report
598                                                  */
599
600                 /* If the controller supports the LE Extended Advertising
601                  * command, enable the corresponding event.
602                  */
603                 if (ext_adv_capable(hdev))
604                         events[2] |= 0x02;      /* LE Advertising Set
605                                                  * Terminated
606                                                  */
607
608                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
609                             events);
610
611                 /* Read LE Advertising Channel TX Power */
612                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
613                         /* HCI TS spec forbids mixing of legacy and extended
614                          * advertising commands wherein READ_ADV_TX_POWER is
615                          * also included. So do not call it if extended adv
616                          * is supported otherwise controller will return
617                          * COMMAND_DISALLOWED for extended commands.
618                          */
619                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
620                 }
621
622                 if (hdev->commands[38] & 0x80) {
623                         /* Read LE Min/Max Tx Power*/
624                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
625                                     0, NULL);
626                 }
627
628                 if (hdev->commands[26] & 0x40) {
629                         /* Read LE Accept List Size */
630                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
631                                     0, NULL);
632                 }
633
634                 if (hdev->commands[26] & 0x80) {
635                         /* Clear LE Accept List */
636                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
637                 }
638
639                 if (hdev->commands[34] & 0x40) {
640                         /* Read LE Resolving List Size */
641                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
642                                     0, NULL);
643                 }
644
645                 if (hdev->commands[34] & 0x20) {
646                         /* Clear LE Resolving List */
647                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
648                 }
649
650                 if (hdev->commands[35] & 0x04) {
651                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
652
653                         /* Set RPA timeout */
654                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
655                                     &rpa_timeout);
656                 }
657
658                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
659                         /* Read LE Maximum Data Length */
660                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
661
662                         /* Read LE Suggested Default Data Length */
663                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
664                 }
665
666                 if (ext_adv_capable(hdev)) {
667                         /* Read LE Number of Supported Advertising Sets */
668                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
669                                     0, NULL);
670                 }
671
672                 hci_set_le_support(req);
673         }
674
675         /* Read features beyond page 1 if available */
676         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
677                 struct hci_cp_read_local_ext_features cp;
678
679                 cp.page = p;
680                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
681                             sizeof(cp), &cp);
682         }
683
684         return 0;
685 }
686
687 static int hci_init4_req(struct hci_request *req, unsigned long opt)
688 {
689         struct hci_dev *hdev = req->hdev;
690
691         /* Some Broadcom based Bluetooth controllers do not support the
692          * Delete Stored Link Key command. They are clearly indicating its
693          * absence in the bit mask of supported commands.
694          *
695          * Check the supported commands and only if the command is marked
696          * as supported send it. If not supported assume that the controller
697          * does not have actual support for stored link keys which makes this
698          * command redundant anyway.
699          *
700          * Some controllers indicate that they support handling deleting
701          * stored link keys, but they don't. The quirk lets a driver
702          * just disable this command.
703          */
704         if (hdev->commands[6] & 0x80 &&
705             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
706                 struct hci_cp_delete_stored_link_key cp;
707
708                 bacpy(&cp.bdaddr, BDADDR_ANY);
709                 cp.delete_all = 0x01;
710                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
711                             sizeof(cp), &cp);
712         }
713
714         /* Set event mask page 2 if the HCI command for it is supported */
715         if (hdev->commands[22] & 0x04)
716                 hci_set_event_mask_page_2(req);
717
718         /* Read local pairing options if the HCI command is supported */
719         if (hdev->commands[41] & 0x08)
720                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
721
722         /* Get MWS transport configuration if the HCI command is supported */
723         if (hdev->commands[30] & 0x08)
724                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
725
726         /* Check for Synchronization Train support */
727         if (lmp_sync_train_capable(hdev))
728                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
729
730         /* Enable Secure Connections if supported and configured */
731         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
732             bredr_sc_enabled(hdev)) {
733                 u8 support = 0x01;
734
735                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
736                             sizeof(support), &support);
737         }
738
739         /* Set erroneous data reporting if supported to the wideband speech
740          * setting value
741          */
742         if (hdev->commands[18] & 0x08 &&
743             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
744                 bool enabled = hci_dev_test_flag(hdev,
745                                                  HCI_WIDEBAND_SPEECH_ENABLED);
746
747                 if (enabled !=
748                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
749                         struct hci_cp_write_def_err_data_reporting cp;
750
751                         cp.err_data_reporting = enabled ?
752                                                 ERR_DATA_REPORTING_ENABLED :
753                                                 ERR_DATA_REPORTING_DISABLED;
754
755                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
756                                     sizeof(cp), &cp);
757                 }
758         }
759
760         /* Set Suggested Default Data Length to maximum if supported */
761         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
762                 struct hci_cp_le_write_def_data_len cp;
763
764                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
765                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
766                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
767         }
768
769         /* Set Default PHY parameters if command is supported */
770         if (hdev->commands[35] & 0x20) {
771                 struct hci_cp_le_set_default_phy cp;
772
773                 cp.all_phys = 0x00;
774                 cp.tx_phys = hdev->le_tx_def_phys;
775                 cp.rx_phys = hdev->le_rx_def_phys;
776
777                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
778         }
779
780         return 0;
781 }
782
783 static int __hci_init(struct hci_dev *hdev)
784 {
785         int err;
786
787         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
788         if (err < 0)
789                 return err;
790
791         if (hci_dev_test_flag(hdev, HCI_SETUP))
792                 hci_debugfs_create_basic(hdev);
793
794         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
795         if (err < 0)
796                 return err;
797
798         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
799          * BR/EDR/LE type controllers. AMP controllers only need the
800          * first two stages of init.
801          */
802         if (hdev->dev_type != HCI_PRIMARY)
803                 return 0;
804
805         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
806         if (err < 0)
807                 return err;
808
809         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
810         if (err < 0)
811                 return err;
812
813         /* Read local codec list if the HCI command is supported */
814         if (hdev->commands[45] & 0x04)
815                 hci_read_supported_codecs_v2(hdev);
816         else if (hdev->commands[29] & 0x20)
817                 hci_read_supported_codecs(hdev);
818
819         /* This function is only called when the controller is actually in
820          * configured state. When the controller is marked as unconfigured,
821          * this initialization procedure is not run.
822          *
823          * It means that it is possible that a controller runs through its
824          * setup phase and then discovers missing settings. If that is the
825          * case, then this function will not be called. It then will only
826          * be called during the config phase.
827          *
828          * So only when in setup phase or config phase, create the debugfs
829          * entries and register the SMP channels.
830          */
831         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
832             !hci_dev_test_flag(hdev, HCI_CONFIG))
833                 return 0;
834
835         hci_debugfs_create_common(hdev);
836
837         if (lmp_bredr_capable(hdev))
838                 hci_debugfs_create_bredr(hdev);
839
840         if (lmp_le_capable(hdev))
841                 hci_debugfs_create_le(hdev);
842
843         return 0;
844 }
845
846 static int hci_init0_req(struct hci_request *req, unsigned long opt)
847 {
848         struct hci_dev *hdev = req->hdev;
849
850         BT_DBG("%s %ld", hdev->name, opt);
851
852         /* Reset */
853         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
854                 hci_reset_req(req, 0);
855
856         /* Read Local Version */
857         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
858
859         /* Read BD Address */
860         if (hdev->set_bdaddr)
861                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
862
863         return 0;
864 }
865
866 static int __hci_unconf_init(struct hci_dev *hdev)
867 {
868         int err;
869
870         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
871                 return 0;
872
873         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
874         if (err < 0)
875                 return err;
876
877         if (hci_dev_test_flag(hdev, HCI_SETUP))
878                 hci_debugfs_create_basic(hdev);
879
880         return 0;
881 }
882
883 static int hci_scan_req(struct hci_request *req, unsigned long opt)
884 {
885         __u8 scan = opt;
886
887         BT_DBG("%s %x", req->hdev->name, scan);
888
889         /* Inquiry and Page scans */
890         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
891         return 0;
892 }
893
894 static int hci_auth_req(struct hci_request *req, unsigned long opt)
895 {
896         __u8 auth = opt;
897
898         BT_DBG("%s %x", req->hdev->name, auth);
899
900         /* Authentication */
901         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
902         return 0;
903 }
904
905 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
906 {
907         __u8 encrypt = opt;
908
909         BT_DBG("%s %x", req->hdev->name, encrypt);
910
911         /* Encryption */
912         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
913         return 0;
914 }
915
916 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
917 {
918         __le16 policy = cpu_to_le16(opt);
919
920         BT_DBG("%s %x", req->hdev->name, policy);
921
922         /* Default link policy */
923         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
924         return 0;
925 }
926
927 /* Get HCI device by index.
928  * Device is held on return. */
929 struct hci_dev *hci_dev_get(int index)
930 {
931         struct hci_dev *hdev = NULL, *d;
932
933         BT_DBG("%d", index);
934
935         if (index < 0)
936                 return NULL;
937
938         read_lock(&hci_dev_list_lock);
939         list_for_each_entry(d, &hci_dev_list, list) {
940                 if (d->id == index) {
941                         hdev = hci_dev_hold(d);
942                         break;
943                 }
944         }
945         read_unlock(&hci_dev_list_lock);
946         return hdev;
947 }
948
949 /* ---- Inquiry support ---- */
950
951 bool hci_discovery_active(struct hci_dev *hdev)
952 {
953         struct discovery_state *discov = &hdev->discovery;
954
955         switch (discov->state) {
956         case DISCOVERY_FINDING:
957         case DISCOVERY_RESOLVING:
958                 return true;
959
960         default:
961                 return false;
962         }
963 }
964
965 void hci_discovery_set_state(struct hci_dev *hdev, int state)
966 {
967         int old_state = hdev->discovery.state;
968
969         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
970
971         if (old_state == state)
972                 return;
973
974         hdev->discovery.state = state;
975
976         switch (state) {
977         case DISCOVERY_STOPPED:
978                 hci_update_background_scan(hdev);
979
980                 if (old_state != DISCOVERY_STARTING)
981                         mgmt_discovering(hdev, 0);
982                 break;
983         case DISCOVERY_STARTING:
984                 break;
985         case DISCOVERY_FINDING:
986                 mgmt_discovering(hdev, 1);
987                 break;
988         case DISCOVERY_RESOLVING:
989                 break;
990         case DISCOVERY_STOPPING:
991                 break;
992         }
993 }
994
995 void hci_inquiry_cache_flush(struct hci_dev *hdev)
996 {
997         struct discovery_state *cache = &hdev->discovery;
998         struct inquiry_entry *p, *n;
999
1000         list_for_each_entry_safe(p, n, &cache->all, all) {
1001                 list_del(&p->all);
1002                 kfree(p);
1003         }
1004
1005         INIT_LIST_HEAD(&cache->unknown);
1006         INIT_LIST_HEAD(&cache->resolve);
1007 }
1008
1009 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1010                                                bdaddr_t *bdaddr)
1011 {
1012         struct discovery_state *cache = &hdev->discovery;
1013         struct inquiry_entry *e;
1014
1015         BT_DBG("cache %p, %pMR", cache, bdaddr);
1016
1017         list_for_each_entry(e, &cache->all, all) {
1018                 if (!bacmp(&e->data.bdaddr, bdaddr))
1019                         return e;
1020         }
1021
1022         return NULL;
1023 }
1024
1025 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1026                                                        bdaddr_t *bdaddr)
1027 {
1028         struct discovery_state *cache = &hdev->discovery;
1029         struct inquiry_entry *e;
1030
1031         BT_DBG("cache %p, %pMR", cache, bdaddr);
1032
1033         list_for_each_entry(e, &cache->unknown, list) {
1034                 if (!bacmp(&e->data.bdaddr, bdaddr))
1035                         return e;
1036         }
1037
1038         return NULL;
1039 }
1040
1041 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1042                                                        bdaddr_t *bdaddr,
1043                                                        int state)
1044 {
1045         struct discovery_state *cache = &hdev->discovery;
1046         struct inquiry_entry *e;
1047
1048         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1049
1050         list_for_each_entry(e, &cache->resolve, list) {
1051                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1052                         return e;
1053                 if (!bacmp(&e->data.bdaddr, bdaddr))
1054                         return e;
1055         }
1056
1057         return NULL;
1058 }
1059
1060 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1061                                       struct inquiry_entry *ie)
1062 {
1063         struct discovery_state *cache = &hdev->discovery;
1064         struct list_head *pos = &cache->resolve;
1065         struct inquiry_entry *p;
1066
1067         list_del(&ie->list);
1068
1069         list_for_each_entry(p, &cache->resolve, list) {
1070                 if (p->name_state != NAME_PENDING &&
1071                     abs(p->data.rssi) >= abs(ie->data.rssi))
1072                         break;
1073                 pos = &p->list;
1074         }
1075
1076         list_add(&ie->list, pos);
1077 }
1078
1079 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1080                              bool name_known)
1081 {
1082         struct discovery_state *cache = &hdev->discovery;
1083         struct inquiry_entry *ie;
1084         u32 flags = 0;
1085
1086         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1087
1088         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1089
1090         if (!data->ssp_mode)
1091                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1092
1093         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1094         if (ie) {
1095                 if (!ie->data.ssp_mode)
1096                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1097
1098                 if (ie->name_state == NAME_NEEDED &&
1099                     data->rssi != ie->data.rssi) {
1100                         ie->data.rssi = data->rssi;
1101                         hci_inquiry_cache_update_resolve(hdev, ie);
1102                 }
1103
1104                 goto update;
1105         }
1106
1107         /* Entry not in the cache. Add new one. */
1108         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1109         if (!ie) {
1110                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1111                 goto done;
1112         }
1113
1114         list_add(&ie->all, &cache->all);
1115
1116         if (name_known) {
1117                 ie->name_state = NAME_KNOWN;
1118         } else {
1119                 ie->name_state = NAME_NOT_KNOWN;
1120                 list_add(&ie->list, &cache->unknown);
1121         }
1122
1123 update:
1124         if (name_known && ie->name_state != NAME_KNOWN &&
1125             ie->name_state != NAME_PENDING) {
1126                 ie->name_state = NAME_KNOWN;
1127                 list_del(&ie->list);
1128         }
1129
1130         memcpy(&ie->data, data, sizeof(*data));
1131         ie->timestamp = jiffies;
1132         cache->timestamp = jiffies;
1133
1134         if (ie->name_state == NAME_NOT_KNOWN)
1135                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1136
1137 done:
1138         return flags;
1139 }
1140
1141 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1142 {
1143         struct discovery_state *cache = &hdev->discovery;
1144         struct inquiry_info *info = (struct inquiry_info *) buf;
1145         struct inquiry_entry *e;
1146         int copied = 0;
1147
1148         list_for_each_entry(e, &cache->all, all) {
1149                 struct inquiry_data *data = &e->data;
1150
1151                 if (copied >= num)
1152                         break;
1153
1154                 bacpy(&info->bdaddr, &data->bdaddr);
1155                 info->pscan_rep_mode    = data->pscan_rep_mode;
1156                 info->pscan_period_mode = data->pscan_period_mode;
1157                 info->pscan_mode        = data->pscan_mode;
1158                 memcpy(info->dev_class, data->dev_class, 3);
1159                 info->clock_offset      = data->clock_offset;
1160
1161                 info++;
1162                 copied++;
1163         }
1164
1165         BT_DBG("cache %p, copied %d", cache, copied);
1166         return copied;
1167 }
1168
1169 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1170 {
1171         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1172         struct hci_dev *hdev = req->hdev;
1173         struct hci_cp_inquiry cp;
1174
1175         BT_DBG("%s", hdev->name);
1176
1177         if (test_bit(HCI_INQUIRY, &hdev->flags))
1178                 return 0;
1179
1180         /* Start Inquiry */
1181         memcpy(&cp.lap, &ir->lap, 3);
1182         cp.length  = ir->length;
1183         cp.num_rsp = ir->num_rsp;
1184         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1185
1186         return 0;
1187 }
1188
1189 int hci_inquiry(void __user *arg)
1190 {
1191         __u8 __user *ptr = arg;
1192         struct hci_inquiry_req ir;
1193         struct hci_dev *hdev;
1194         int err = 0, do_inquiry = 0, max_rsp;
1195         long timeo;
1196         __u8 *buf;
1197
1198         if (copy_from_user(&ir, ptr, sizeof(ir)))
1199                 return -EFAULT;
1200
1201         hdev = hci_dev_get(ir.dev_id);
1202         if (!hdev)
1203                 return -ENODEV;
1204
1205         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1206                 err = -EBUSY;
1207                 goto done;
1208         }
1209
1210         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1211                 err = -EOPNOTSUPP;
1212                 goto done;
1213         }
1214
1215         if (hdev->dev_type != HCI_PRIMARY) {
1216                 err = -EOPNOTSUPP;
1217                 goto done;
1218         }
1219
1220         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1221                 err = -EOPNOTSUPP;
1222                 goto done;
1223         }
1224
1225         /* Restrict maximum inquiry length to 60 seconds */
1226         if (ir.length > 60) {
1227                 err = -EINVAL;
1228                 goto done;
1229         }
1230
1231         hci_dev_lock(hdev);
1232         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1233             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1234                 hci_inquiry_cache_flush(hdev);
1235                 do_inquiry = 1;
1236         }
1237         hci_dev_unlock(hdev);
1238
1239         timeo = ir.length * msecs_to_jiffies(2000);
1240
1241         if (do_inquiry) {
1242                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1243                                    timeo, NULL);
1244                 if (err < 0)
1245                         goto done;
1246
1247                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1248                  * cleared). If it is interrupted by a signal, return -EINTR.
1249                  */
1250                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1251                                 TASK_INTERRUPTIBLE)) {
1252                         err = -EINTR;
1253                         goto done;
1254                 }
1255         }
1256
1257         /* for unlimited number of responses we will use buffer with
1258          * 255 entries
1259          */
1260         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1261
1262         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1263          * copy it to the user space.
1264          */
1265         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1266         if (!buf) {
1267                 err = -ENOMEM;
1268                 goto done;
1269         }
1270
1271         hci_dev_lock(hdev);
1272         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1273         hci_dev_unlock(hdev);
1274
1275         BT_DBG("num_rsp %d", ir.num_rsp);
1276
1277         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1278                 ptr += sizeof(ir);
1279                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1280                                  ir.num_rsp))
1281                         err = -EFAULT;
1282         } else
1283                 err = -EFAULT;
1284
1285         kfree(buf);
1286
1287 done:
1288         hci_dev_put(hdev);
1289         return err;
1290 }
1291
1292 /**
1293  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1294  *                                     (BD_ADDR) for a HCI device from
1295  *                                     a firmware node property.
1296  * @hdev:       The HCI device
1297  *
1298  * Search the firmware node for 'local-bd-address'.
1299  *
1300  * All-zero BD addresses are rejected, because those could be properties
1301  * that exist in the firmware tables, but were not updated by the firmware. For
1302  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1303  */
1304 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1305 {
1306         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1307         bdaddr_t ba;
1308         int ret;
1309
1310         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1311                                             (u8 *)&ba, sizeof(ba));
1312         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1313                 return;
1314
1315         bacpy(&hdev->public_addr, &ba);
1316 }
1317
1318 static int hci_dev_do_open(struct hci_dev *hdev)
1319 {
1320         int ret = 0;
1321
1322         BT_DBG("%s %p", hdev->name, hdev);
1323
1324         hci_req_sync_lock(hdev);
1325
1326         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1327                 ret = -ENODEV;
1328                 goto done;
1329         }
1330
1331         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1332             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1333                 /* Check for rfkill but allow the HCI setup stage to
1334                  * proceed (which in itself doesn't cause any RF activity).
1335                  */
1336                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1337                         ret = -ERFKILL;
1338                         goto done;
1339                 }
1340
1341                 /* Check for valid public address or a configured static
1342                  * random address, but let the HCI setup proceed to
1343                  * be able to determine if there is a public address
1344                  * or not.
1345                  *
1346                  * In case of user channel usage, it is not important
1347                  * if a public address or static random address is
1348                  * available.
1349                  *
1350                  * This check is only valid for BR/EDR controllers
1351                  * since AMP controllers do not have an address.
1352                  */
1353                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1354                     hdev->dev_type == HCI_PRIMARY &&
1355                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1356                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1357                         ret = -EADDRNOTAVAIL;
1358                         goto done;
1359                 }
1360         }
1361
1362         if (test_bit(HCI_UP, &hdev->flags)) {
1363                 ret = -EALREADY;
1364                 goto done;
1365         }
1366
1367         if (hdev->open(hdev)) {
1368                 ret = -EIO;
1369                 goto done;
1370         }
1371
1372         set_bit(HCI_RUNNING, &hdev->flags);
1373         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1374
1375         atomic_set(&hdev->cmd_cnt, 1);
1376         set_bit(HCI_INIT, &hdev->flags);
1377
1378         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1379             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1380                 bool invalid_bdaddr;
1381
1382                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1383
1384                 if (hdev->setup)
1385                         ret = hdev->setup(hdev);
1386
1387                 /* The transport driver can set the quirk to mark the
1388                  * BD_ADDR invalid before creating the HCI device or in
1389                  * its setup callback.
1390                  */
1391                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1392                                           &hdev->quirks);
1393
1394                 if (ret)
1395                         goto setup_failed;
1396
1397                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1398                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1399                                 hci_dev_get_bd_addr_from_property(hdev);
1400
1401                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1402                             hdev->set_bdaddr) {
1403                                 ret = hdev->set_bdaddr(hdev,
1404                                                        &hdev->public_addr);
1405
1406                                 /* If setting of the BD_ADDR from the device
1407                                  * property succeeds, then treat the address
1408                                  * as valid even if the invalid BD_ADDR
1409                                  * quirk indicates otherwise.
1410                                  */
1411                                 if (!ret)
1412                                         invalid_bdaddr = false;
1413                         }
1414                 }
1415
1416 setup_failed:
1417                 /* The transport driver can set these quirks before
1418                  * creating the HCI device or in its setup callback.
1419                  *
1420                  * For the invalid BD_ADDR quirk it is possible that
1421                  * it becomes a valid address if the bootloader does
1422                  * provide it (see above).
1423                  *
1424                  * In case any of them is set, the controller has to
1425                  * start up as unconfigured.
1426                  */
1427                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1428                     invalid_bdaddr)
1429                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1430
1431                 /* For an unconfigured controller it is required to
1432                  * read at least the version information provided by
1433                  * the Read Local Version Information command.
1434                  *
1435                  * If the set_bdaddr driver callback is provided, then
1436                  * also the original Bluetooth public device address
1437                  * will be read using the Read BD Address command.
1438                  */
1439                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1440                         ret = __hci_unconf_init(hdev);
1441         }
1442
1443         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1444                 /* If public address change is configured, ensure that
1445                  * the address gets programmed. If the driver does not
1446                  * support changing the public address, fail the power
1447                  * on procedure.
1448                  */
1449                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1450                     hdev->set_bdaddr)
1451                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1452                 else
1453                         ret = -EADDRNOTAVAIL;
1454         }
1455
1456         if (!ret) {
1457                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1458                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1459                         ret = __hci_init(hdev);
1460                         if (!ret && hdev->post_init)
1461                                 ret = hdev->post_init(hdev);
1462                 }
1463         }
1464
1465         /* If the HCI Reset command is clearing all diagnostic settings,
1466          * then they need to be reprogrammed after the init procedure
1467          * completed.
1468          */
1469         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1470             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1471             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1472                 ret = hdev->set_diag(hdev, true);
1473
1474         msft_do_open(hdev);
1475         aosp_do_open(hdev);
1476
1477         clear_bit(HCI_INIT, &hdev->flags);
1478
1479         if (!ret) {
1480                 hci_dev_hold(hdev);
1481                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1482                 hci_adv_instances_set_rpa_expired(hdev, true);
1483                 set_bit(HCI_UP, &hdev->flags);
1484                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1485                 hci_leds_update_powered(hdev, true);
1486                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1487                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1488                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1489                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1490                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1491                     hdev->dev_type == HCI_PRIMARY) {
1492                         ret = __hci_req_hci_power_on(hdev);
1493                         mgmt_power_on(hdev, ret);
1494                 }
1495         } else {
1496                 /* Init failed, cleanup */
1497                 flush_work(&hdev->tx_work);
1498
1499                 /* Since hci_rx_work() is possible to awake new cmd_work
1500                  * it should be flushed first to avoid unexpected call of
1501                  * hci_cmd_work()
1502                  */
1503                 flush_work(&hdev->rx_work);
1504                 flush_work(&hdev->cmd_work);
1505
1506                 skb_queue_purge(&hdev->cmd_q);
1507                 skb_queue_purge(&hdev->rx_q);
1508
1509                 if (hdev->flush)
1510                         hdev->flush(hdev);
1511
1512                 if (hdev->sent_cmd) {
1513                         kfree_skb(hdev->sent_cmd);
1514                         hdev->sent_cmd = NULL;
1515                 }
1516
1517                 clear_bit(HCI_RUNNING, &hdev->flags);
1518                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1519
1520                 hdev->close(hdev);
1521                 hdev->flags &= BIT(HCI_RAW);
1522         }
1523
1524 done:
1525         hci_req_sync_unlock(hdev);
1526         return ret;
1527 }
1528
1529 /* ---- HCI ioctl helpers ---- */
1530
1531 int hci_dev_open(__u16 dev)
1532 {
1533         struct hci_dev *hdev;
1534         int err;
1535
1536         hdev = hci_dev_get(dev);
1537         if (!hdev)
1538                 return -ENODEV;
1539
1540         /* Devices that are marked as unconfigured can only be powered
1541          * up as user channel. Trying to bring them up as normal devices
1542          * will result into a failure. Only user channel operation is
1543          * possible.
1544          *
1545          * When this function is called for a user channel, the flag
1546          * HCI_USER_CHANNEL will be set first before attempting to
1547          * open the device.
1548          */
1549         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1550             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1551                 err = -EOPNOTSUPP;
1552                 goto done;
1553         }
1554
1555         /* We need to ensure that no other power on/off work is pending
1556          * before proceeding to call hci_dev_do_open. This is
1557          * particularly important if the setup procedure has not yet
1558          * completed.
1559          */
1560         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1561                 cancel_delayed_work(&hdev->power_off);
1562
1563         /* After this call it is guaranteed that the setup procedure
1564          * has finished. This means that error conditions like RFKILL
1565          * or no valid public or static random address apply.
1566          */
1567         flush_workqueue(hdev->req_workqueue);
1568
1569         /* For controllers not using the management interface and that
1570          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1571          * so that pairing works for them. Once the management interface
1572          * is in use this bit will be cleared again and userspace has
1573          * to explicitly enable it.
1574          */
1575         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1576             !hci_dev_test_flag(hdev, HCI_MGMT))
1577                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1578
1579         err = hci_dev_do_open(hdev);
1580
1581 done:
1582         hci_dev_put(hdev);
1583         return err;
1584 }
1585
1586 /* This function requires the caller holds hdev->lock */
1587 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1588 {
1589         struct hci_conn_params *p;
1590
1591         list_for_each_entry(p, &hdev->le_conn_params, list) {
1592                 if (p->conn) {
1593                         hci_conn_drop(p->conn);
1594                         hci_conn_put(p->conn);
1595                         p->conn = NULL;
1596                 }
1597                 list_del_init(&p->action);
1598         }
1599
1600         BT_DBG("All LE pending actions cleared");
1601 }
1602
1603 int hci_dev_do_close(struct hci_dev *hdev)
1604 {
1605         bool auto_off;
1606         int err = 0;
1607
1608         BT_DBG("%s %p", hdev->name, hdev);
1609
1610         cancel_delayed_work(&hdev->power_off);
1611         cancel_delayed_work(&hdev->ncmd_timer);
1612
1613         hci_request_cancel_all(hdev);
1614         hci_req_sync_lock(hdev);
1615
1616         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1617             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1618             test_bit(HCI_UP, &hdev->flags)) {
1619                 /* Execute vendor specific shutdown routine */
1620                 if (hdev->shutdown)
1621                         err = hdev->shutdown(hdev);
1622         }
1623
1624         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1625                 cancel_delayed_work_sync(&hdev->cmd_timer);
1626                 hci_req_sync_unlock(hdev);
1627                 return err;
1628         }
1629
1630         hci_leds_update_powered(hdev, false);
1631
1632         /* Flush RX and TX works */
1633         flush_work(&hdev->tx_work);
1634         flush_work(&hdev->rx_work);
1635
1636         if (hdev->discov_timeout > 0) {
1637                 hdev->discov_timeout = 0;
1638                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1639                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1640         }
1641
1642         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1643                 cancel_delayed_work(&hdev->service_cache);
1644
1645         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1646                 struct adv_info *adv_instance;
1647
1648                 cancel_delayed_work_sync(&hdev->rpa_expired);
1649
1650                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1651                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1652         }
1653
1654         /* Avoid potential lockdep warnings from the *_flush() calls by
1655          * ensuring the workqueue is empty up front.
1656          */
1657         drain_workqueue(hdev->workqueue);
1658
1659         hci_dev_lock(hdev);
1660
1661         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1662
1663         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1664
1665         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1666             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1667             hci_dev_test_flag(hdev, HCI_MGMT))
1668                 __mgmt_power_off(hdev);
1669
1670         hci_inquiry_cache_flush(hdev);
1671         hci_pend_le_actions_clear(hdev);
1672         hci_conn_hash_flush(hdev);
1673         hci_dev_unlock(hdev);
1674
1675         smp_unregister(hdev);
1676
1677         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1678
1679         aosp_do_close(hdev);
1680         msft_do_close(hdev);
1681
1682         if (hdev->flush)
1683                 hdev->flush(hdev);
1684
1685         /* Reset device */
1686         skb_queue_purge(&hdev->cmd_q);
1687         atomic_set(&hdev->cmd_cnt, 1);
1688         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1689             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1690                 set_bit(HCI_INIT, &hdev->flags);
1691                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1692                 clear_bit(HCI_INIT, &hdev->flags);
1693         }
1694
1695         /* flush cmd  work */
1696         flush_work(&hdev->cmd_work);
1697
1698         /* Drop queues */
1699         skb_queue_purge(&hdev->rx_q);
1700         skb_queue_purge(&hdev->cmd_q);
1701         skb_queue_purge(&hdev->raw_q);
1702
1703         /* Drop last sent command */
1704         if (hdev->sent_cmd) {
1705                 cancel_delayed_work_sync(&hdev->cmd_timer);
1706                 kfree_skb(hdev->sent_cmd);
1707                 hdev->sent_cmd = NULL;
1708         }
1709
1710         clear_bit(HCI_RUNNING, &hdev->flags);
1711         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1712
1713         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1714                 wake_up(&hdev->suspend_wait_q);
1715
1716         /* After this point our queues are empty
1717          * and no tasks are scheduled. */
1718         hdev->close(hdev);
1719
1720         /* Clear flags */
1721         hdev->flags &= BIT(HCI_RAW);
1722         hci_dev_clear_volatile_flags(hdev);
1723
1724         /* Controller radio is available but is currently powered down */
1725         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1726
1727         memset(hdev->eir, 0, sizeof(hdev->eir));
1728         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1729         bacpy(&hdev->random_addr, BDADDR_ANY);
1730         hci_codec_list_clear(&hdev->local_codecs);
1731
1732         hci_req_sync_unlock(hdev);
1733
1734         hci_dev_put(hdev);
1735         return err;
1736 }
1737
1738 int hci_dev_close(__u16 dev)
1739 {
1740         struct hci_dev *hdev;
1741         int err;
1742
1743         hdev = hci_dev_get(dev);
1744         if (!hdev)
1745                 return -ENODEV;
1746
1747         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1748                 err = -EBUSY;
1749                 goto done;
1750         }
1751
1752         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1753                 cancel_delayed_work(&hdev->power_off);
1754
1755         err = hci_dev_do_close(hdev);
1756
1757 done:
1758         hci_dev_put(hdev);
1759         return err;
1760 }
1761
1762 static int hci_dev_do_reset(struct hci_dev *hdev)
1763 {
1764         int ret;
1765
1766         BT_DBG("%s %p", hdev->name, hdev);
1767
1768         hci_req_sync_lock(hdev);
1769
1770         /* Drop queues */
1771         skb_queue_purge(&hdev->rx_q);
1772         skb_queue_purge(&hdev->cmd_q);
1773
1774         /* Avoid potential lockdep warnings from the *_flush() calls by
1775          * ensuring the workqueue is empty up front.
1776          */
1777         drain_workqueue(hdev->workqueue);
1778
1779         hci_dev_lock(hdev);
1780         hci_inquiry_cache_flush(hdev);
1781         hci_conn_hash_flush(hdev);
1782         hci_dev_unlock(hdev);
1783
1784         if (hdev->flush)
1785                 hdev->flush(hdev);
1786
1787         atomic_set(&hdev->cmd_cnt, 1);
1788         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1789
1790         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1791
1792         hci_req_sync_unlock(hdev);
1793         return ret;
1794 }
1795
1796 int hci_dev_reset(__u16 dev)
1797 {
1798         struct hci_dev *hdev;
1799         int err;
1800
1801         hdev = hci_dev_get(dev);
1802         if (!hdev)
1803                 return -ENODEV;
1804
1805         if (!test_bit(HCI_UP, &hdev->flags)) {
1806                 err = -ENETDOWN;
1807                 goto done;
1808         }
1809
1810         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1811                 err = -EBUSY;
1812                 goto done;
1813         }
1814
1815         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1816                 err = -EOPNOTSUPP;
1817                 goto done;
1818         }
1819
1820         err = hci_dev_do_reset(hdev);
1821
1822 done:
1823         hci_dev_put(hdev);
1824         return err;
1825 }
1826
1827 int hci_dev_reset_stat(__u16 dev)
1828 {
1829         struct hci_dev *hdev;
1830         int ret = 0;
1831
1832         hdev = hci_dev_get(dev);
1833         if (!hdev)
1834                 return -ENODEV;
1835
1836         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1837                 ret = -EBUSY;
1838                 goto done;
1839         }
1840
1841         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1842                 ret = -EOPNOTSUPP;
1843                 goto done;
1844         }
1845
1846         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1847
1848 done:
1849         hci_dev_put(hdev);
1850         return ret;
1851 }
1852
1853 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1854 {
1855         bool conn_changed, discov_changed;
1856
1857         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1858
1859         if ((scan & SCAN_PAGE))
1860                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1861                                                           HCI_CONNECTABLE);
1862         else
1863                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1864                                                            HCI_CONNECTABLE);
1865
1866         if ((scan & SCAN_INQUIRY)) {
1867                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1868                                                             HCI_DISCOVERABLE);
1869         } else {
1870                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1871                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1872                                                              HCI_DISCOVERABLE);
1873         }
1874
1875         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1876                 return;
1877
1878         if (conn_changed || discov_changed) {
1879                 /* In case this was disabled through mgmt */
1880                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1881
1882                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1883                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1884
1885                 mgmt_new_settings(hdev);
1886         }
1887 }
1888
1889 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1890 {
1891         struct hci_dev *hdev;
1892         struct hci_dev_req dr;
1893         int err = 0;
1894
1895         if (copy_from_user(&dr, arg, sizeof(dr)))
1896                 return -EFAULT;
1897
1898         hdev = hci_dev_get(dr.dev_id);
1899         if (!hdev)
1900                 return -ENODEV;
1901
1902         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1903                 err = -EBUSY;
1904                 goto done;
1905         }
1906
1907         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1908                 err = -EOPNOTSUPP;
1909                 goto done;
1910         }
1911
1912         if (hdev->dev_type != HCI_PRIMARY) {
1913                 err = -EOPNOTSUPP;
1914                 goto done;
1915         }
1916
1917         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1918                 err = -EOPNOTSUPP;
1919                 goto done;
1920         }
1921
1922         switch (cmd) {
1923         case HCISETAUTH:
1924                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1925                                    HCI_INIT_TIMEOUT, NULL);
1926                 break;
1927
1928         case HCISETENCRYPT:
1929                 if (!lmp_encrypt_capable(hdev)) {
1930                         err = -EOPNOTSUPP;
1931                         break;
1932                 }
1933
1934                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1935                         /* Auth must be enabled first */
1936                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1937                                            HCI_INIT_TIMEOUT, NULL);
1938                         if (err)
1939                                 break;
1940                 }
1941
1942                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1943                                    HCI_INIT_TIMEOUT, NULL);
1944                 break;
1945
1946         case HCISETSCAN:
1947                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1948                                    HCI_INIT_TIMEOUT, NULL);
1949
1950                 /* Ensure that the connectable and discoverable states
1951                  * get correctly modified as this was a non-mgmt change.
1952                  */
1953                 if (!err)
1954                         hci_update_scan_state(hdev, dr.dev_opt);
1955                 break;
1956
1957         case HCISETLINKPOL:
1958                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1959                                    HCI_INIT_TIMEOUT, NULL);
1960                 break;
1961
1962         case HCISETLINKMODE:
1963                 hdev->link_mode = ((__u16) dr.dev_opt) &
1964                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1965                 break;
1966
1967         case HCISETPTYPE:
1968                 if (hdev->pkt_type == (__u16) dr.dev_opt)
1969                         break;
1970
1971                 hdev->pkt_type = (__u16) dr.dev_opt;
1972                 mgmt_phy_configuration_changed(hdev, NULL);
1973                 break;
1974
1975         case HCISETACLMTU:
1976                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1977                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1978                 break;
1979
1980         case HCISETSCOMTU:
1981                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1982                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1983                 break;
1984
1985         default:
1986                 err = -EINVAL;
1987                 break;
1988         }
1989
1990 done:
1991         hci_dev_put(hdev);
1992         return err;
1993 }
1994
1995 int hci_get_dev_list(void __user *arg)
1996 {
1997         struct hci_dev *hdev;
1998         struct hci_dev_list_req *dl;
1999         struct hci_dev_req *dr;
2000         int n = 0, size, err;
2001         __u16 dev_num;
2002
2003         if (get_user(dev_num, (__u16 __user *) arg))
2004                 return -EFAULT;
2005
2006         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2007                 return -EINVAL;
2008
2009         size = sizeof(*dl) + dev_num * sizeof(*dr);
2010
2011         dl = kzalloc(size, GFP_KERNEL);
2012         if (!dl)
2013                 return -ENOMEM;
2014
2015         dr = dl->dev_req;
2016
2017         read_lock(&hci_dev_list_lock);
2018         list_for_each_entry(hdev, &hci_dev_list, list) {
2019                 unsigned long flags = hdev->flags;
2020
2021                 /* When the auto-off is configured it means the transport
2022                  * is running, but in that case still indicate that the
2023                  * device is actually down.
2024                  */
2025                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2026                         flags &= ~BIT(HCI_UP);
2027
2028                 (dr + n)->dev_id  = hdev->id;
2029                 (dr + n)->dev_opt = flags;
2030
2031                 if (++n >= dev_num)
2032                         break;
2033         }
2034         read_unlock(&hci_dev_list_lock);
2035
2036         dl->dev_num = n;
2037         size = sizeof(*dl) + n * sizeof(*dr);
2038
2039         err = copy_to_user(arg, dl, size);
2040         kfree(dl);
2041
2042         return err ? -EFAULT : 0;
2043 }
2044
2045 int hci_get_dev_info(void __user *arg)
2046 {
2047         struct hci_dev *hdev;
2048         struct hci_dev_info di;
2049         unsigned long flags;
2050         int err = 0;
2051
2052         if (copy_from_user(&di, arg, sizeof(di)))
2053                 return -EFAULT;
2054
2055         hdev = hci_dev_get(di.dev_id);
2056         if (!hdev)
2057                 return -ENODEV;
2058
2059         /* When the auto-off is configured it means the transport
2060          * is running, but in that case still indicate that the
2061          * device is actually down.
2062          */
2063         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2064                 flags = hdev->flags & ~BIT(HCI_UP);
2065         else
2066                 flags = hdev->flags;
2067
2068         strcpy(di.name, hdev->name);
2069         di.bdaddr   = hdev->bdaddr;
2070         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2071         di.flags    = flags;
2072         di.pkt_type = hdev->pkt_type;
2073         if (lmp_bredr_capable(hdev)) {
2074                 di.acl_mtu  = hdev->acl_mtu;
2075                 di.acl_pkts = hdev->acl_pkts;
2076                 di.sco_mtu  = hdev->sco_mtu;
2077                 di.sco_pkts = hdev->sco_pkts;
2078         } else {
2079                 di.acl_mtu  = hdev->le_mtu;
2080                 di.acl_pkts = hdev->le_pkts;
2081                 di.sco_mtu  = 0;
2082                 di.sco_pkts = 0;
2083         }
2084         di.link_policy = hdev->link_policy;
2085         di.link_mode   = hdev->link_mode;
2086
2087         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2088         memcpy(&di.features, &hdev->features, sizeof(di.features));
2089
2090         if (copy_to_user(arg, &di, sizeof(di)))
2091                 err = -EFAULT;
2092
2093         hci_dev_put(hdev);
2094
2095         return err;
2096 }
2097
2098 /* ---- Interface to HCI drivers ---- */
2099
2100 static int hci_rfkill_set_block(void *data, bool blocked)
2101 {
2102         struct hci_dev *hdev = data;
2103
2104         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2105
2106         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2107                 return -EBUSY;
2108
2109         if (blocked) {
2110                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2111                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2112                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2113                         hci_dev_do_close(hdev);
2114         } else {
2115                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2116         }
2117
2118         return 0;
2119 }
2120
2121 static const struct rfkill_ops hci_rfkill_ops = {
2122         .set_block = hci_rfkill_set_block,
2123 };
2124
2125 static void hci_power_on(struct work_struct *work)
2126 {
2127         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2128         int err;
2129
2130         BT_DBG("%s", hdev->name);
2131
2132         if (test_bit(HCI_UP, &hdev->flags) &&
2133             hci_dev_test_flag(hdev, HCI_MGMT) &&
2134             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2135                 cancel_delayed_work(&hdev->power_off);
2136                 hci_req_sync_lock(hdev);
2137                 err = __hci_req_hci_power_on(hdev);
2138                 hci_req_sync_unlock(hdev);
2139                 mgmt_power_on(hdev, err);
2140                 return;
2141         }
2142
2143         err = hci_dev_do_open(hdev);
2144         if (err < 0) {
2145                 hci_dev_lock(hdev);
2146                 mgmt_set_powered_failed(hdev, err);
2147                 hci_dev_unlock(hdev);
2148                 return;
2149         }
2150
2151         /* During the HCI setup phase, a few error conditions are
2152          * ignored and they need to be checked now. If they are still
2153          * valid, it is important to turn the device back off.
2154          */
2155         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2156             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2157             (hdev->dev_type == HCI_PRIMARY &&
2158              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2159              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2160                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2161                 hci_dev_do_close(hdev);
2162         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2163                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2164                                    HCI_AUTO_OFF_TIMEOUT);
2165         }
2166
2167         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2168                 /* For unconfigured devices, set the HCI_RAW flag
2169                  * so that userspace can easily identify them.
2170                  */
2171                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2172                         set_bit(HCI_RAW, &hdev->flags);
2173
2174                 /* For fully configured devices, this will send
2175                  * the Index Added event. For unconfigured devices,
2176                  * it will send Unconfigued Index Added event.
2177                  *
2178                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2179                  * and no event will be send.
2180                  */
2181                 mgmt_index_added(hdev);
2182         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2183                 /* When the controller is now configured, then it
2184                  * is important to clear the HCI_RAW flag.
2185                  */
2186                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2187                         clear_bit(HCI_RAW, &hdev->flags);
2188
2189                 /* Powering on the controller with HCI_CONFIG set only
2190                  * happens with the transition from unconfigured to
2191                  * configured. This will send the Index Added event.
2192                  */
2193                 mgmt_index_added(hdev);
2194         }
2195 }
2196
2197 static void hci_power_off(struct work_struct *work)
2198 {
2199         struct hci_dev *hdev = container_of(work, struct hci_dev,
2200                                             power_off.work);
2201
2202         BT_DBG("%s", hdev->name);
2203
2204         hci_dev_do_close(hdev);
2205 }
2206
2207 static void hci_error_reset(struct work_struct *work)
2208 {
2209         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2210
2211         BT_DBG("%s", hdev->name);
2212
2213         if (hdev->hw_error)
2214                 hdev->hw_error(hdev, hdev->hw_error_code);
2215         else
2216                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2217
2218         if (hci_dev_do_close(hdev))
2219                 return;
2220
2221         hci_dev_do_open(hdev);
2222 }
2223
2224 void hci_uuids_clear(struct hci_dev *hdev)
2225 {
2226         struct bt_uuid *uuid, *tmp;
2227
2228         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2229                 list_del(&uuid->list);
2230                 kfree(uuid);
2231         }
2232 }
2233
2234 void hci_link_keys_clear(struct hci_dev *hdev)
2235 {
2236         struct link_key *key;
2237
2238         list_for_each_entry(key, &hdev->link_keys, list) {
2239                 list_del_rcu(&key->list);
2240                 kfree_rcu(key, rcu);
2241         }
2242 }
2243
2244 void hci_smp_ltks_clear(struct hci_dev *hdev)
2245 {
2246         struct smp_ltk *k;
2247
2248         list_for_each_entry(k, &hdev->long_term_keys, list) {
2249                 list_del_rcu(&k->list);
2250                 kfree_rcu(k, rcu);
2251         }
2252 }
2253
2254 void hci_smp_irks_clear(struct hci_dev *hdev)
2255 {
2256         struct smp_irk *k;
2257
2258         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2259                 list_del_rcu(&k->list);
2260                 kfree_rcu(k, rcu);
2261         }
2262 }
2263
2264 void hci_blocked_keys_clear(struct hci_dev *hdev)
2265 {
2266         struct blocked_key *b;
2267
2268         list_for_each_entry(b, &hdev->blocked_keys, list) {
2269                 list_del_rcu(&b->list);
2270                 kfree_rcu(b, rcu);
2271         }
2272 }
2273
2274 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2275 {
2276         bool blocked = false;
2277         struct blocked_key *b;
2278
2279         rcu_read_lock();
2280         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2281                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2282                         blocked = true;
2283                         break;
2284                 }
2285         }
2286
2287         rcu_read_unlock();
2288         return blocked;
2289 }
2290
2291 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2292 {
2293         struct link_key *k;
2294
2295         rcu_read_lock();
2296         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2297                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2298                         rcu_read_unlock();
2299
2300                         if (hci_is_blocked_key(hdev,
2301                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2302                                                k->val)) {
2303                                 bt_dev_warn_ratelimited(hdev,
2304                                                         "Link key blocked for %pMR",
2305                                                         &k->bdaddr);
2306                                 return NULL;
2307                         }
2308
2309                         return k;
2310                 }
2311         }
2312         rcu_read_unlock();
2313
2314         return NULL;
2315 }
2316
2317 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2318                                u8 key_type, u8 old_key_type)
2319 {
2320         /* Legacy key */
2321         if (key_type < 0x03)
2322                 return true;
2323
2324         /* Debug keys are insecure so don't store them persistently */
2325         if (key_type == HCI_LK_DEBUG_COMBINATION)
2326                 return false;
2327
2328         /* Changed combination key and there's no previous one */
2329         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2330                 return false;
2331
2332         /* Security mode 3 case */
2333         if (!conn)
2334                 return true;
2335
2336         /* BR/EDR key derived using SC from an LE link */
2337         if (conn->type == LE_LINK)
2338                 return true;
2339
2340         /* Neither local nor remote side had no-bonding as requirement */
2341         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2342                 return true;
2343
2344         /* Local side had dedicated bonding as requirement */
2345         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2346                 return true;
2347
2348         /* Remote side had dedicated bonding as requirement */
2349         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2350                 return true;
2351
2352         /* If none of the above criteria match, then don't store the key
2353          * persistently */
2354         return false;
2355 }
2356
2357 static u8 ltk_role(u8 type)
2358 {
2359         if (type == SMP_LTK)
2360                 return HCI_ROLE_MASTER;
2361
2362         return HCI_ROLE_SLAVE;
2363 }
2364
2365 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2366                              u8 addr_type, u8 role)
2367 {
2368         struct smp_ltk *k;
2369
2370         rcu_read_lock();
2371         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2372                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2373                         continue;
2374
2375                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2376                         rcu_read_unlock();
2377
2378                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2379                                                k->val)) {
2380                                 bt_dev_warn_ratelimited(hdev,
2381                                                         "LTK blocked for %pMR",
2382                                                         &k->bdaddr);
2383                                 return NULL;
2384                         }
2385
2386                         return k;
2387                 }
2388         }
2389         rcu_read_unlock();
2390
2391         return NULL;
2392 }
2393
2394 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2395 {
2396         struct smp_irk *irk_to_return = NULL;
2397         struct smp_irk *irk;
2398
2399         rcu_read_lock();
2400         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2401                 if (!bacmp(&irk->rpa, rpa)) {
2402                         irk_to_return = irk;
2403                         goto done;
2404                 }
2405         }
2406
2407         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2408                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2409                         bacpy(&irk->rpa, rpa);
2410                         irk_to_return = irk;
2411                         goto done;
2412                 }
2413         }
2414
2415 done:
2416         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2417                                                 irk_to_return->val)) {
2418                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2419                                         &irk_to_return->bdaddr);
2420                 irk_to_return = NULL;
2421         }
2422
2423         rcu_read_unlock();
2424
2425         return irk_to_return;
2426 }
2427
2428 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2429                                      u8 addr_type)
2430 {
2431         struct smp_irk *irk_to_return = NULL;
2432         struct smp_irk *irk;
2433
2434         /* Identity Address must be public or static random */
2435         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2436                 return NULL;
2437
2438         rcu_read_lock();
2439         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2440                 if (addr_type == irk->addr_type &&
2441                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2442                         irk_to_return = irk;
2443                         goto done;
2444                 }
2445         }
2446
2447 done:
2448
2449         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2450                                                 irk_to_return->val)) {
2451                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2452                                         &irk_to_return->bdaddr);
2453                 irk_to_return = NULL;
2454         }
2455
2456         rcu_read_unlock();
2457
2458         return irk_to_return;
2459 }
2460
2461 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2462                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2463                                   u8 pin_len, bool *persistent)
2464 {
2465         struct link_key *key, *old_key;
2466         u8 old_key_type;
2467
2468         old_key = hci_find_link_key(hdev, bdaddr);
2469         if (old_key) {
2470                 old_key_type = old_key->type;
2471                 key = old_key;
2472         } else {
2473                 old_key_type = conn ? conn->key_type : 0xff;
2474                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2475                 if (!key)
2476                         return NULL;
2477                 list_add_rcu(&key->list, &hdev->link_keys);
2478         }
2479
2480         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2481
2482         /* Some buggy controller combinations generate a changed
2483          * combination key for legacy pairing even when there's no
2484          * previous key */
2485         if (type == HCI_LK_CHANGED_COMBINATION &&
2486             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2487                 type = HCI_LK_COMBINATION;
2488                 if (conn)
2489                         conn->key_type = type;
2490         }
2491
2492         bacpy(&key->bdaddr, bdaddr);
2493         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2494         key->pin_len = pin_len;
2495
2496         if (type == HCI_LK_CHANGED_COMBINATION)
2497                 key->type = old_key_type;
2498         else
2499                 key->type = type;
2500
2501         if (persistent)
2502                 *persistent = hci_persistent_key(hdev, conn, type,
2503                                                  old_key_type);
2504
2505         return key;
2506 }
2507
2508 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2509                             u8 addr_type, u8 type, u8 authenticated,
2510                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2511 {
2512         struct smp_ltk *key, *old_key;
2513         u8 role = ltk_role(type);
2514
2515         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2516         if (old_key)
2517                 key = old_key;
2518         else {
2519                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2520                 if (!key)
2521                         return NULL;
2522                 list_add_rcu(&key->list, &hdev->long_term_keys);
2523         }
2524
2525         bacpy(&key->bdaddr, bdaddr);
2526         key->bdaddr_type = addr_type;
2527         memcpy(key->val, tk, sizeof(key->val));
2528         key->authenticated = authenticated;
2529         key->ediv = ediv;
2530         key->rand = rand;
2531         key->enc_size = enc_size;
2532         key->type = type;
2533
2534         return key;
2535 }
2536
2537 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2538                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2539 {
2540         struct smp_irk *irk;
2541
2542         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2543         if (!irk) {
2544                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2545                 if (!irk)
2546                         return NULL;
2547
2548                 bacpy(&irk->bdaddr, bdaddr);
2549                 irk->addr_type = addr_type;
2550
2551                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2552         }
2553
2554         memcpy(irk->val, val, 16);
2555         bacpy(&irk->rpa, rpa);
2556
2557         return irk;
2558 }
2559
2560 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2561 {
2562         struct link_key *key;
2563
2564         key = hci_find_link_key(hdev, bdaddr);
2565         if (!key)
2566                 return -ENOENT;
2567
2568         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2569
2570         list_del_rcu(&key->list);
2571         kfree_rcu(key, rcu);
2572
2573         return 0;
2574 }
2575
2576 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2577 {
2578         struct smp_ltk *k;
2579         int removed = 0;
2580
2581         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2582                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2583                         continue;
2584
2585                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2586
2587                 list_del_rcu(&k->list);
2588                 kfree_rcu(k, rcu);
2589                 removed++;
2590         }
2591
2592         return removed ? 0 : -ENOENT;
2593 }
2594
2595 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2596 {
2597         struct smp_irk *k;
2598
2599         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2600                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2601                         continue;
2602
2603                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2604
2605                 list_del_rcu(&k->list);
2606                 kfree_rcu(k, rcu);
2607         }
2608 }
2609
2610 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2611 {
2612         struct smp_ltk *k;
2613         struct smp_irk *irk;
2614         u8 addr_type;
2615
2616         if (type == BDADDR_BREDR) {
2617                 if (hci_find_link_key(hdev, bdaddr))
2618                         return true;
2619                 return false;
2620         }
2621
2622         /* Convert to HCI addr type which struct smp_ltk uses */
2623         if (type == BDADDR_LE_PUBLIC)
2624                 addr_type = ADDR_LE_DEV_PUBLIC;
2625         else
2626                 addr_type = ADDR_LE_DEV_RANDOM;
2627
2628         irk = hci_get_irk(hdev, bdaddr, addr_type);
2629         if (irk) {
2630                 bdaddr = &irk->bdaddr;
2631                 addr_type = irk->addr_type;
2632         }
2633
2634         rcu_read_lock();
2635         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2636                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2637                         rcu_read_unlock();
2638                         return true;
2639                 }
2640         }
2641         rcu_read_unlock();
2642
2643         return false;
2644 }
2645
2646 /* HCI command timer function */
2647 static void hci_cmd_timeout(struct work_struct *work)
2648 {
2649         struct hci_dev *hdev = container_of(work, struct hci_dev,
2650                                             cmd_timer.work);
2651
2652         if (hdev->sent_cmd) {
2653                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2654                 u16 opcode = __le16_to_cpu(sent->opcode);
2655
2656                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2657         } else {
2658                 bt_dev_err(hdev, "command tx timeout");
2659         }
2660
2661         if (hdev->cmd_timeout)
2662                 hdev->cmd_timeout(hdev);
2663
2664         atomic_set(&hdev->cmd_cnt, 1);
2665         queue_work(hdev->workqueue, &hdev->cmd_work);
2666 }
2667
2668 /* HCI ncmd timer function */
2669 static void hci_ncmd_timeout(struct work_struct *work)
2670 {
2671         struct hci_dev *hdev = container_of(work, struct hci_dev,
2672                                             ncmd_timer.work);
2673
2674         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2675
2676         /* During HCI_INIT phase no events can be injected if the ncmd timer
2677          * triggers since the procedure has its own timeout handling.
2678          */
2679         if (test_bit(HCI_INIT, &hdev->flags))
2680                 return;
2681
2682         /* This is an irrecoverable state, inject hardware error event */
2683         hci_reset_dev(hdev);
2684 }
2685
2686 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2687                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2688 {
2689         struct oob_data *data;
2690
2691         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2692                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2693                         continue;
2694                 if (data->bdaddr_type != bdaddr_type)
2695                         continue;
2696                 return data;
2697         }
2698
2699         return NULL;
2700 }
2701
2702 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2703                                u8 bdaddr_type)
2704 {
2705         struct oob_data *data;
2706
2707         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2708         if (!data)
2709                 return -ENOENT;
2710
2711         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2712
2713         list_del(&data->list);
2714         kfree(data);
2715
2716         return 0;
2717 }
2718
2719 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2720 {
2721         struct oob_data *data, *n;
2722
2723         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2724                 list_del(&data->list);
2725                 kfree(data);
2726         }
2727 }
2728
2729 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2730                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2731                             u8 *hash256, u8 *rand256)
2732 {
2733         struct oob_data *data;
2734
2735         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2736         if (!data) {
2737                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2738                 if (!data)
2739                         return -ENOMEM;
2740
2741                 bacpy(&data->bdaddr, bdaddr);
2742                 data->bdaddr_type = bdaddr_type;
2743                 list_add(&data->list, &hdev->remote_oob_data);
2744         }
2745
2746         if (hash192 && rand192) {
2747                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2748                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2749                 if (hash256 && rand256)
2750                         data->present = 0x03;
2751         } else {
2752                 memset(data->hash192, 0, sizeof(data->hash192));
2753                 memset(data->rand192, 0, sizeof(data->rand192));
2754                 if (hash256 && rand256)
2755                         data->present = 0x02;
2756                 else
2757                         data->present = 0x00;
2758         }
2759
2760         if (hash256 && rand256) {
2761                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2762                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2763         } else {
2764                 memset(data->hash256, 0, sizeof(data->hash256));
2765                 memset(data->rand256, 0, sizeof(data->rand256));
2766                 if (hash192 && rand192)
2767                         data->present = 0x01;
2768         }
2769
2770         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2771
2772         return 0;
2773 }
2774
2775 /* This function requires the caller holds hdev->lock */
2776 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2777 {
2778         struct adv_info *adv_instance;
2779
2780         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2781                 if (adv_instance->instance == instance)
2782                         return adv_instance;
2783         }
2784
2785         return NULL;
2786 }
2787
2788 /* This function requires the caller holds hdev->lock */
2789 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2790 {
2791         struct adv_info *cur_instance;
2792
2793         cur_instance = hci_find_adv_instance(hdev, instance);
2794         if (!cur_instance)
2795                 return NULL;
2796
2797         if (cur_instance == list_last_entry(&hdev->adv_instances,
2798                                             struct adv_info, list))
2799                 return list_first_entry(&hdev->adv_instances,
2800                                                  struct adv_info, list);
2801         else
2802                 return list_next_entry(cur_instance, list);
2803 }
2804
2805 /* This function requires the caller holds hdev->lock */
2806 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2807 {
2808         struct adv_info *adv_instance;
2809
2810         adv_instance = hci_find_adv_instance(hdev, instance);
2811         if (!adv_instance)
2812                 return -ENOENT;
2813
2814         BT_DBG("%s removing %dMR", hdev->name, instance);
2815
2816         if (hdev->cur_adv_instance == instance) {
2817                 if (hdev->adv_instance_timeout) {
2818                         cancel_delayed_work(&hdev->adv_instance_expire);
2819                         hdev->adv_instance_timeout = 0;
2820                 }
2821                 hdev->cur_adv_instance = 0x00;
2822         }
2823
2824         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2825
2826         list_del(&adv_instance->list);
2827         kfree(adv_instance);
2828
2829         hdev->adv_instance_cnt--;
2830
2831         return 0;
2832 }
2833
2834 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2835 {
2836         struct adv_info *adv_instance, *n;
2837
2838         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2839                 adv_instance->rpa_expired = rpa_expired;
2840 }
2841
2842 /* This function requires the caller holds hdev->lock */
2843 void hci_adv_instances_clear(struct hci_dev *hdev)
2844 {
2845         struct adv_info *adv_instance, *n;
2846
2847         if (hdev->adv_instance_timeout) {
2848                 cancel_delayed_work(&hdev->adv_instance_expire);
2849                 hdev->adv_instance_timeout = 0;
2850         }
2851
2852         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2853                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2854                 list_del(&adv_instance->list);
2855                 kfree(adv_instance);
2856         }
2857
2858         hdev->adv_instance_cnt = 0;
2859         hdev->cur_adv_instance = 0x00;
2860 }
2861
2862 static void adv_instance_rpa_expired(struct work_struct *work)
2863 {
2864         struct adv_info *adv_instance = container_of(work, struct adv_info,
2865                                                      rpa_expired_cb.work);
2866
2867         BT_DBG("");
2868
2869         adv_instance->rpa_expired = true;
2870 }
2871
2872 /* This function requires the caller holds hdev->lock */
2873 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2874                          u16 adv_data_len, u8 *adv_data,
2875                          u16 scan_rsp_len, u8 *scan_rsp_data,
2876                          u16 timeout, u16 duration, s8 tx_power,
2877                          u32 min_interval, u32 max_interval)
2878 {
2879         struct adv_info *adv_instance;
2880
2881         adv_instance = hci_find_adv_instance(hdev, instance);
2882         if (adv_instance) {
2883                 memset(adv_instance->adv_data, 0,
2884                        sizeof(adv_instance->adv_data));
2885                 memset(adv_instance->scan_rsp_data, 0,
2886                        sizeof(adv_instance->scan_rsp_data));
2887         } else {
2888                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2889                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2890                         return -EOVERFLOW;
2891
2892                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2893                 if (!adv_instance)
2894                         return -ENOMEM;
2895
2896                 adv_instance->pending = true;
2897                 adv_instance->instance = instance;
2898                 list_add(&adv_instance->list, &hdev->adv_instances);
2899                 hdev->adv_instance_cnt++;
2900         }
2901
2902         adv_instance->flags = flags;
2903         adv_instance->adv_data_len = adv_data_len;
2904         adv_instance->scan_rsp_len = scan_rsp_len;
2905         adv_instance->min_interval = min_interval;
2906         adv_instance->max_interval = max_interval;
2907         adv_instance->tx_power = tx_power;
2908
2909         if (adv_data_len)
2910                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2911
2912         if (scan_rsp_len)
2913                 memcpy(adv_instance->scan_rsp_data,
2914                        scan_rsp_data, scan_rsp_len);
2915
2916         adv_instance->timeout = timeout;
2917         adv_instance->remaining_time = timeout;
2918
2919         if (duration == 0)
2920                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
2921         else
2922                 adv_instance->duration = duration;
2923
2924         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2925                           adv_instance_rpa_expired);
2926
2927         BT_DBG("%s for %dMR", hdev->name, instance);
2928
2929         return 0;
2930 }
2931
2932 /* This function requires the caller holds hdev->lock */
2933 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
2934                               u16 adv_data_len, u8 *adv_data,
2935                               u16 scan_rsp_len, u8 *scan_rsp_data)
2936 {
2937         struct adv_info *adv_instance;
2938
2939         adv_instance = hci_find_adv_instance(hdev, instance);
2940
2941         /* If advertisement doesn't exist, we can't modify its data */
2942         if (!adv_instance)
2943                 return -ENOENT;
2944
2945         if (adv_data_len) {
2946                 memset(adv_instance->adv_data, 0,
2947                        sizeof(adv_instance->adv_data));
2948                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2949                 adv_instance->adv_data_len = adv_data_len;
2950         }
2951
2952         if (scan_rsp_len) {
2953                 memset(adv_instance->scan_rsp_data, 0,
2954                        sizeof(adv_instance->scan_rsp_data));
2955                 memcpy(adv_instance->scan_rsp_data,
2956                        scan_rsp_data, scan_rsp_len);
2957                 adv_instance->scan_rsp_len = scan_rsp_len;
2958         }
2959
2960         return 0;
2961 }
2962
2963 /* This function requires the caller holds hdev->lock */
2964 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
2965 {
2966         u32 flags;
2967         struct adv_info *adv;
2968
2969         if (instance == 0x00) {
2970                 /* Instance 0 always manages the "Tx Power" and "Flags"
2971                  * fields
2972                  */
2973                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
2974
2975                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
2976                  * corresponds to the "connectable" instance flag.
2977                  */
2978                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
2979                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
2980
2981                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2982                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
2983                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2984                         flags |= MGMT_ADV_FLAG_DISCOV;
2985
2986                 return flags;
2987         }
2988
2989         adv = hci_find_adv_instance(hdev, instance);
2990
2991         /* Return 0 when we got an invalid instance identifier. */
2992         if (!adv)
2993                 return 0;
2994
2995         return adv->flags;
2996 }
2997
2998 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
2999 {
3000         struct adv_info *adv;
3001
3002         /* Instance 0x00 always set local name */
3003         if (instance == 0x00)
3004                 return true;
3005
3006         adv = hci_find_adv_instance(hdev, instance);
3007         if (!adv)
3008                 return false;
3009
3010         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
3011             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
3012                 return true;
3013
3014         return adv->scan_rsp_len ? true : false;
3015 }
3016
3017 /* This function requires the caller holds hdev->lock */
3018 void hci_adv_monitors_clear(struct hci_dev *hdev)
3019 {
3020         struct adv_monitor *monitor;
3021         int handle;
3022
3023         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3024                 hci_free_adv_monitor(hdev, monitor);
3025
3026         idr_destroy(&hdev->adv_monitors_idr);
3027 }
3028
3029 /* Frees the monitor structure and do some bookkeepings.
3030  * This function requires the caller holds hdev->lock.
3031  */
3032 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3033 {
3034         struct adv_pattern *pattern;
3035         struct adv_pattern *tmp;
3036
3037         if (!monitor)
3038                 return;
3039
3040         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3041                 list_del(&pattern->list);
3042                 kfree(pattern);
3043         }
3044
3045         if (monitor->handle)
3046                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3047
3048         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3049                 hdev->adv_monitors_cnt--;
3050                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3051         }
3052
3053         kfree(monitor);
3054 }
3055
3056 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3057 {
3058         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3059 }
3060
3061 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3062 {
3063         return mgmt_remove_adv_monitor_complete(hdev, status);
3064 }
3065
3066 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3067  * also attempts to forward the request to the controller.
3068  * Returns true if request is forwarded (result is pending), false otherwise.
3069  * This function requires the caller holds hdev->lock.
3070  */
3071 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3072                          int *err)
3073 {
3074         int min, max, handle;
3075
3076         *err = 0;
3077
3078         if (!monitor) {
3079                 *err = -EINVAL;
3080                 return false;
3081         }
3082
3083         min = HCI_MIN_ADV_MONITOR_HANDLE;
3084         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3085         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3086                            GFP_KERNEL);
3087         if (handle < 0) {
3088                 *err = handle;
3089                 return false;
3090         }
3091
3092         monitor->handle = handle;
3093
3094         if (!hdev_is_powered(hdev))
3095                 return false;
3096
3097         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3098         case HCI_ADV_MONITOR_EXT_NONE:
3099                 hci_update_background_scan(hdev);
3100                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3101                 /* Message was not forwarded to controller - not an error */
3102                 return false;
3103         case HCI_ADV_MONITOR_EXT_MSFT:
3104                 *err = msft_add_monitor_pattern(hdev, monitor);
3105                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3106                            *err);
3107                 break;
3108         }
3109
3110         return (*err == 0);
3111 }
3112
3113 /* Attempts to tell the controller and free the monitor. If somehow the
3114  * controller doesn't have a corresponding handle, remove anyway.
3115  * Returns true if request is forwarded (result is pending), false otherwise.
3116  * This function requires the caller holds hdev->lock.
3117  */
3118 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3119                                    struct adv_monitor *monitor,
3120                                    u16 handle, int *err)
3121 {
3122         *err = 0;
3123
3124         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3125         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3126                 goto free_monitor;
3127         case HCI_ADV_MONITOR_EXT_MSFT:
3128                 *err = msft_remove_monitor(hdev, monitor, handle);
3129                 break;
3130         }
3131
3132         /* In case no matching handle registered, just free the monitor */
3133         if (*err == -ENOENT)
3134                 goto free_monitor;
3135
3136         return (*err == 0);
3137
3138 free_monitor:
3139         if (*err == -ENOENT)
3140                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3141                             monitor->handle);
3142         hci_free_adv_monitor(hdev, monitor);
3143
3144         *err = 0;
3145         return false;
3146 }
3147
3148 /* Returns true if request is forwarded (result is pending), false otherwise.
3149  * This function requires the caller holds hdev->lock.
3150  */
3151 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3152 {
3153         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3154         bool pending;
3155
3156         if (!monitor) {
3157                 *err = -EINVAL;
3158                 return false;
3159         }
3160
3161         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3162         if (!*err && !pending)
3163                 hci_update_background_scan(hdev);
3164
3165         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3166                    hdev->name, handle, *err, pending ? "" : "not ");
3167
3168         return pending;
3169 }
3170
3171 /* Returns true if request is forwarded (result is pending), false otherwise.
3172  * This function requires the caller holds hdev->lock.
3173  */
3174 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3175 {
3176         struct adv_monitor *monitor;
3177         int idr_next_id = 0;
3178         bool pending = false;
3179         bool update = false;
3180
3181         *err = 0;
3182
3183         while (!*err && !pending) {
3184                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3185                 if (!monitor)
3186                         break;
3187
3188                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3189
3190                 if (!*err && !pending)
3191                         update = true;
3192         }
3193
3194         if (update)
3195                 hci_update_background_scan(hdev);
3196
3197         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3198                    hdev->name, *err, pending ? "" : "not ");
3199
3200         return pending;
3201 }
3202
3203 /* This function requires the caller holds hdev->lock */
3204 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3205 {
3206         return !idr_is_empty(&hdev->adv_monitors_idr);
3207 }
3208
3209 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3210 {
3211         if (msft_monitor_supported(hdev))
3212                 return HCI_ADV_MONITOR_EXT_MSFT;
3213
3214         return HCI_ADV_MONITOR_EXT_NONE;
3215 }
3216
3217 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3218                                          bdaddr_t *bdaddr, u8 type)
3219 {
3220         struct bdaddr_list *b;
3221
3222         list_for_each_entry(b, bdaddr_list, list) {
3223                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3224                         return b;
3225         }
3226
3227         return NULL;
3228 }
3229
3230 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3231                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3232                                 u8 type)
3233 {
3234         struct bdaddr_list_with_irk *b;
3235
3236         list_for_each_entry(b, bdaddr_list, list) {
3237                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3238                         return b;
3239         }
3240
3241         return NULL;
3242 }
3243
3244 struct bdaddr_list_with_flags *
3245 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3246                                   bdaddr_t *bdaddr, u8 type)
3247 {
3248         struct bdaddr_list_with_flags *b;
3249
3250         list_for_each_entry(b, bdaddr_list, list) {
3251                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3252                         return b;
3253         }
3254
3255         return NULL;
3256 }
3257
3258 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3259 {
3260         struct bdaddr_list *b, *n;
3261
3262         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3263                 list_del(&b->list);
3264                 kfree(b);
3265         }
3266 }
3267
3268 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3269 {
3270         struct bdaddr_list *entry;
3271
3272         if (!bacmp(bdaddr, BDADDR_ANY))
3273                 return -EBADF;
3274
3275         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3276                 return -EEXIST;
3277
3278         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3279         if (!entry)
3280                 return -ENOMEM;
3281
3282         bacpy(&entry->bdaddr, bdaddr);
3283         entry->bdaddr_type = type;
3284
3285         list_add(&entry->list, list);
3286
3287         return 0;
3288 }
3289
3290 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3291                                         u8 type, u8 *peer_irk, u8 *local_irk)
3292 {
3293         struct bdaddr_list_with_irk *entry;
3294
3295         if (!bacmp(bdaddr, BDADDR_ANY))
3296                 return -EBADF;
3297
3298         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3299                 return -EEXIST;
3300
3301         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3302         if (!entry)
3303                 return -ENOMEM;
3304
3305         bacpy(&entry->bdaddr, bdaddr);
3306         entry->bdaddr_type = type;
3307
3308         if (peer_irk)
3309                 memcpy(entry->peer_irk, peer_irk, 16);
3310
3311         if (local_irk)
3312                 memcpy(entry->local_irk, local_irk, 16);
3313
3314         list_add(&entry->list, list);
3315
3316         return 0;
3317 }
3318
3319 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3320                                    u8 type, u32 flags)
3321 {
3322         struct bdaddr_list_with_flags *entry;
3323
3324         if (!bacmp(bdaddr, BDADDR_ANY))
3325                 return -EBADF;
3326
3327         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3328                 return -EEXIST;
3329
3330         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3331         if (!entry)
3332                 return -ENOMEM;
3333
3334         bacpy(&entry->bdaddr, bdaddr);
3335         entry->bdaddr_type = type;
3336         entry->current_flags = flags;
3337
3338         list_add(&entry->list, list);
3339
3340         return 0;
3341 }
3342
3343 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3344 {
3345         struct bdaddr_list *entry;
3346
3347         if (!bacmp(bdaddr, BDADDR_ANY)) {
3348                 hci_bdaddr_list_clear(list);
3349                 return 0;
3350         }
3351
3352         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3353         if (!entry)
3354                 return -ENOENT;
3355
3356         list_del(&entry->list);
3357         kfree(entry);
3358
3359         return 0;
3360 }
3361
3362 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3363                                                         u8 type)
3364 {
3365         struct bdaddr_list_with_irk *entry;
3366
3367         if (!bacmp(bdaddr, BDADDR_ANY)) {
3368                 hci_bdaddr_list_clear(list);
3369                 return 0;
3370         }
3371
3372         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3373         if (!entry)
3374                 return -ENOENT;
3375
3376         list_del(&entry->list);
3377         kfree(entry);
3378
3379         return 0;
3380 }
3381
3382 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3383                                    u8 type)
3384 {
3385         struct bdaddr_list_with_flags *entry;
3386
3387         if (!bacmp(bdaddr, BDADDR_ANY)) {
3388                 hci_bdaddr_list_clear(list);
3389                 return 0;
3390         }
3391
3392         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3393         if (!entry)
3394                 return -ENOENT;
3395
3396         list_del(&entry->list);
3397         kfree(entry);
3398
3399         return 0;
3400 }
3401
3402 /* This function requires the caller holds hdev->lock */
3403 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3404                                                bdaddr_t *addr, u8 addr_type)
3405 {
3406         struct hci_conn_params *params;
3407
3408         list_for_each_entry(params, &hdev->le_conn_params, list) {
3409                 if (bacmp(&params->addr, addr) == 0 &&
3410                     params->addr_type == addr_type) {
3411                         return params;
3412                 }
3413         }
3414
3415         return NULL;
3416 }
3417
3418 /* This function requires the caller holds hdev->lock */
3419 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3420                                                   bdaddr_t *addr, u8 addr_type)
3421 {
3422         struct hci_conn_params *param;
3423
3424         list_for_each_entry(param, list, action) {
3425                 if (bacmp(&param->addr, addr) == 0 &&
3426                     param->addr_type == addr_type)
3427                         return param;
3428         }
3429
3430         return NULL;
3431 }
3432
3433 /* This function requires the caller holds hdev->lock */
3434 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3435                                             bdaddr_t *addr, u8 addr_type)
3436 {
3437         struct hci_conn_params *params;
3438
3439         params = hci_conn_params_lookup(hdev, addr, addr_type);
3440         if (params)
3441                 return params;
3442
3443         params = kzalloc(sizeof(*params), GFP_KERNEL);
3444         if (!params) {
3445                 bt_dev_err(hdev, "out of memory");
3446                 return NULL;
3447         }
3448
3449         bacpy(&params->addr, addr);
3450         params->addr_type = addr_type;
3451
3452         list_add(&params->list, &hdev->le_conn_params);
3453         INIT_LIST_HEAD(&params->action);
3454
3455         params->conn_min_interval = hdev->le_conn_min_interval;
3456         params->conn_max_interval = hdev->le_conn_max_interval;
3457         params->conn_latency = hdev->le_conn_latency;
3458         params->supervision_timeout = hdev->le_supv_timeout;
3459         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3460
3461         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3462
3463         return params;
3464 }
3465
3466 static void hci_conn_params_free(struct hci_conn_params *params)
3467 {
3468         if (params->conn) {
3469                 hci_conn_drop(params->conn);
3470                 hci_conn_put(params->conn);
3471         }
3472
3473         list_del(&params->action);
3474         list_del(&params->list);
3475         kfree(params);
3476 }
3477
3478 /* This function requires the caller holds hdev->lock */
3479 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3480 {
3481         struct hci_conn_params *params;
3482
3483         params = hci_conn_params_lookup(hdev, addr, addr_type);
3484         if (!params)
3485                 return;
3486
3487         hci_conn_params_free(params);
3488
3489         hci_update_background_scan(hdev);
3490
3491         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3492 }
3493
3494 /* This function requires the caller holds hdev->lock */
3495 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3496 {
3497         struct hci_conn_params *params, *tmp;
3498
3499         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3500                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3501                         continue;
3502
3503                 /* If trying to establish one time connection to disabled
3504                  * device, leave the params, but mark them as just once.
3505                  */
3506                 if (params->explicit_connect) {
3507                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3508                         continue;
3509                 }
3510
3511                 list_del(&params->list);
3512                 kfree(params);
3513         }
3514
3515         BT_DBG("All LE disabled connection parameters were removed");
3516 }
3517
3518 /* This function requires the caller holds hdev->lock */
3519 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3520 {
3521         struct hci_conn_params *params, *tmp;
3522
3523         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3524                 hci_conn_params_free(params);
3525
3526         BT_DBG("All LE connection parameters were removed");
3527 }
3528
3529 /* Copy the Identity Address of the controller.
3530  *
3531  * If the controller has a public BD_ADDR, then by default use that one.
3532  * If this is a LE only controller without a public address, default to
3533  * the static random address.
3534  *
3535  * For debugging purposes it is possible to force controllers with a
3536  * public address to use the static random address instead.
3537  *
3538  * In case BR/EDR has been disabled on a dual-mode controller and
3539  * userspace has configured a static address, then that address
3540  * becomes the identity address instead of the public BR/EDR address.
3541  */
3542 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3543                                u8 *bdaddr_type)
3544 {
3545         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3546             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3547             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3548              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3549                 bacpy(bdaddr, &hdev->static_addr);
3550                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3551         } else {
3552                 bacpy(bdaddr, &hdev->bdaddr);
3553                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3554         }
3555 }
3556
3557 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3558 {
3559         int i;
3560
3561         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3562                 clear_bit(i, hdev->suspend_tasks);
3563
3564         wake_up(&hdev->suspend_wait_q);
3565 }
3566
3567 static int hci_suspend_wait_event(struct hci_dev *hdev)
3568 {
3569 #define WAKE_COND                                                              \
3570         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3571          __SUSPEND_NUM_TASKS)
3572
3573         int i;
3574         int ret = wait_event_timeout(hdev->suspend_wait_q,
3575                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3576
3577         if (ret == 0) {
3578                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3579                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3580                         if (test_bit(i, hdev->suspend_tasks))
3581                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3582                         clear_bit(i, hdev->suspend_tasks);
3583                 }
3584
3585                 ret = -ETIMEDOUT;
3586         } else {
3587                 ret = 0;
3588         }
3589
3590         return ret;
3591 }
3592
3593 static void hci_prepare_suspend(struct work_struct *work)
3594 {
3595         struct hci_dev *hdev =
3596                 container_of(work, struct hci_dev, suspend_prepare);
3597
3598         hci_dev_lock(hdev);
3599         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3600         hci_dev_unlock(hdev);
3601 }
3602
3603 static int hci_change_suspend_state(struct hci_dev *hdev,
3604                                     enum suspended_state next)
3605 {
3606         hdev->suspend_state_next = next;
3607         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3608         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3609         return hci_suspend_wait_event(hdev);
3610 }
3611
3612 static void hci_clear_wake_reason(struct hci_dev *hdev)
3613 {
3614         hci_dev_lock(hdev);
3615
3616         hdev->wake_reason = 0;
3617         bacpy(&hdev->wake_addr, BDADDR_ANY);
3618         hdev->wake_addr_type = 0;
3619
3620         hci_dev_unlock(hdev);
3621 }
3622
3623 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3624                                 void *data)
3625 {
3626         struct hci_dev *hdev =
3627                 container_of(nb, struct hci_dev, suspend_notifier);
3628         int ret = 0;
3629
3630         if (action == PM_SUSPEND_PREPARE)
3631                 ret = hci_suspend_dev(hdev);
3632         else if (action == PM_POST_SUSPEND)
3633                 ret = hci_resume_dev(hdev);
3634
3635         if (ret)
3636                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3637                            action, ret);
3638
3639         return NOTIFY_DONE;
3640 }
3641
3642 /* Alloc HCI device */
3643 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3644 {
3645         struct hci_dev *hdev;
3646         unsigned int alloc_size;
3647
3648         alloc_size = sizeof(*hdev);
3649         if (sizeof_priv) {
3650                 /* Fixme: May need ALIGN-ment? */
3651                 alloc_size += sizeof_priv;
3652         }
3653
3654         hdev = kzalloc(alloc_size, GFP_KERNEL);
3655         if (!hdev)
3656                 return NULL;
3657
3658         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3659         hdev->esco_type = (ESCO_HV1);
3660         hdev->link_mode = (HCI_LM_ACCEPT);
3661         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3662         hdev->io_capability = 0x03;     /* No Input No Output */
3663         hdev->manufacturer = 0xffff;    /* Default to internal use */
3664         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3665         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3666         hdev->adv_instance_cnt = 0;
3667         hdev->cur_adv_instance = 0x00;
3668         hdev->adv_instance_timeout = 0;
3669
3670         hdev->advmon_allowlist_duration = 300;
3671         hdev->advmon_no_filter_duration = 500;
3672         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3673
3674         hdev->sniff_max_interval = 800;
3675         hdev->sniff_min_interval = 80;
3676
3677         hdev->le_adv_channel_map = 0x07;
3678         hdev->le_adv_min_interval = 0x0800;
3679         hdev->le_adv_max_interval = 0x0800;
3680         hdev->le_scan_interval = 0x0060;
3681         hdev->le_scan_window = 0x0030;
3682         hdev->le_scan_int_suspend = 0x0400;
3683         hdev->le_scan_window_suspend = 0x0012;
3684         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3685         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3686         hdev->le_scan_int_adv_monitor = 0x0060;
3687         hdev->le_scan_window_adv_monitor = 0x0030;
3688         hdev->le_scan_int_connect = 0x0060;
3689         hdev->le_scan_window_connect = 0x0060;
3690         hdev->le_conn_min_interval = 0x0018;
3691         hdev->le_conn_max_interval = 0x0028;
3692         hdev->le_conn_latency = 0x0000;
3693         hdev->le_supv_timeout = 0x002a;
3694         hdev->le_def_tx_len = 0x001b;
3695         hdev->le_def_tx_time = 0x0148;
3696         hdev->le_max_tx_len = 0x001b;
3697         hdev->le_max_tx_time = 0x0148;
3698         hdev->le_max_rx_len = 0x001b;
3699         hdev->le_max_rx_time = 0x0148;
3700         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3701         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3702         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3703         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3704         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3705         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3706         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3707         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3708         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3709
3710         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3711         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3712         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3713         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3714         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3715         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3716
3717         /* default 1.28 sec page scan */
3718         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3719         hdev->def_page_scan_int = 0x0800;
3720         hdev->def_page_scan_window = 0x0012;
3721
3722         mutex_init(&hdev->lock);
3723         mutex_init(&hdev->req_lock);
3724
3725         INIT_LIST_HEAD(&hdev->mgmt_pending);
3726         INIT_LIST_HEAD(&hdev->reject_list);
3727         INIT_LIST_HEAD(&hdev->accept_list);
3728         INIT_LIST_HEAD(&hdev->uuids);
3729         INIT_LIST_HEAD(&hdev->link_keys);
3730         INIT_LIST_HEAD(&hdev->long_term_keys);
3731         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3732         INIT_LIST_HEAD(&hdev->remote_oob_data);
3733         INIT_LIST_HEAD(&hdev->le_accept_list);
3734         INIT_LIST_HEAD(&hdev->le_resolv_list);
3735         INIT_LIST_HEAD(&hdev->le_conn_params);
3736         INIT_LIST_HEAD(&hdev->pend_le_conns);
3737         INIT_LIST_HEAD(&hdev->pend_le_reports);
3738         INIT_LIST_HEAD(&hdev->conn_hash.list);
3739         INIT_LIST_HEAD(&hdev->adv_instances);
3740         INIT_LIST_HEAD(&hdev->blocked_keys);
3741
3742         INIT_LIST_HEAD(&hdev->local_codecs);
3743         INIT_WORK(&hdev->rx_work, hci_rx_work);
3744         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3745         INIT_WORK(&hdev->tx_work, hci_tx_work);
3746         INIT_WORK(&hdev->power_on, hci_power_on);
3747         INIT_WORK(&hdev->error_reset, hci_error_reset);
3748         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3749
3750         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3751
3752         skb_queue_head_init(&hdev->rx_q);
3753         skb_queue_head_init(&hdev->cmd_q);
3754         skb_queue_head_init(&hdev->raw_q);
3755
3756         init_waitqueue_head(&hdev->req_wait_q);
3757         init_waitqueue_head(&hdev->suspend_wait_q);
3758
3759         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3760         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3761
3762         hci_request_setup(hdev);
3763
3764         hci_init_sysfs(hdev);
3765         discovery_init(hdev);
3766
3767         return hdev;
3768 }
3769 EXPORT_SYMBOL(hci_alloc_dev_priv);
3770
3771 /* Free HCI device */
3772 void hci_free_dev(struct hci_dev *hdev)
3773 {
3774         /* will free via device release */
3775         put_device(&hdev->dev);
3776 }
3777 EXPORT_SYMBOL(hci_free_dev);
3778
3779 /* Register HCI device */
3780 int hci_register_dev(struct hci_dev *hdev)
3781 {
3782         int id, error;
3783
3784         if (!hdev->open || !hdev->close || !hdev->send)
3785                 return -EINVAL;
3786
3787         /* Do not allow HCI_AMP devices to register at index 0,
3788          * so the index can be used as the AMP controller ID.
3789          */
3790         switch (hdev->dev_type) {
3791         case HCI_PRIMARY:
3792                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3793                 break;
3794         case HCI_AMP:
3795                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3796                 break;
3797         default:
3798                 return -EINVAL;
3799         }
3800
3801         if (id < 0)
3802                 return id;
3803
3804         sprintf(hdev->name, "hci%d", id);
3805         hdev->id = id;
3806
3807         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3808
3809         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3810         if (!hdev->workqueue) {
3811                 error = -ENOMEM;
3812                 goto err;
3813         }
3814
3815         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3816                                                       hdev->name);
3817         if (!hdev->req_workqueue) {
3818                 destroy_workqueue(hdev->workqueue);
3819                 error = -ENOMEM;
3820                 goto err;
3821         }
3822
3823         if (!IS_ERR_OR_NULL(bt_debugfs))
3824                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3825
3826         dev_set_name(&hdev->dev, "%s", hdev->name);
3827
3828         error = device_add(&hdev->dev);
3829         if (error < 0)
3830                 goto err_wqueue;
3831
3832         hci_leds_init(hdev);
3833
3834         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3835                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3836                                     hdev);
3837         if (hdev->rfkill) {
3838                 if (rfkill_register(hdev->rfkill) < 0) {
3839                         rfkill_destroy(hdev->rfkill);
3840                         hdev->rfkill = NULL;
3841                 }
3842         }
3843
3844         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3845                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3846
3847         hci_dev_set_flag(hdev, HCI_SETUP);
3848         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3849
3850         if (hdev->dev_type == HCI_PRIMARY) {
3851                 /* Assume BR/EDR support until proven otherwise (such as
3852                  * through reading supported features during init.
3853                  */
3854                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3855         }
3856
3857         write_lock(&hci_dev_list_lock);
3858         list_add(&hdev->list, &hci_dev_list);
3859         write_unlock(&hci_dev_list_lock);
3860
3861         /* Devices that are marked for raw-only usage are unconfigured
3862          * and should not be included in normal operation.
3863          */
3864         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3865                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3866
3867         hci_sock_dev_event(hdev, HCI_DEV_REG);
3868         hci_dev_hold(hdev);
3869
3870         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3871                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3872                 error = register_pm_notifier(&hdev->suspend_notifier);
3873                 if (error)
3874                         goto err_wqueue;
3875         }
3876
3877         queue_work(hdev->req_workqueue, &hdev->power_on);
3878
3879         idr_init(&hdev->adv_monitors_idr);
3880         msft_register(hdev);
3881
3882         return id;
3883
3884 err_wqueue:
3885         destroy_workqueue(hdev->workqueue);
3886         destroy_workqueue(hdev->req_workqueue);
3887 err:
3888         ida_simple_remove(&hci_index_ida, hdev->id);
3889
3890         return error;
3891 }
3892 EXPORT_SYMBOL(hci_register_dev);
3893
3894 /* Unregister HCI device */
3895 void hci_unregister_dev(struct hci_dev *hdev)
3896 {
3897         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3898
3899         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3900
3901         write_lock(&hci_dev_list_lock);
3902         list_del(&hdev->list);
3903         write_unlock(&hci_dev_list_lock);
3904
3905         cancel_work_sync(&hdev->power_on);
3906
3907         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3908                 hci_suspend_clear_tasks(hdev);
3909                 unregister_pm_notifier(&hdev->suspend_notifier);
3910                 cancel_work_sync(&hdev->suspend_prepare);
3911         }
3912
3913         msft_unregister(hdev);
3914
3915         hci_dev_do_close(hdev);
3916
3917         if (!test_bit(HCI_INIT, &hdev->flags) &&
3918             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3919             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3920                 hci_dev_lock(hdev);
3921                 mgmt_index_removed(hdev);
3922                 hci_dev_unlock(hdev);
3923         }
3924
3925         /* mgmt_index_removed should take care of emptying the
3926          * pending list */
3927         BUG_ON(!list_empty(&hdev->mgmt_pending));
3928
3929         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3930
3931         if (hdev->rfkill) {
3932                 rfkill_unregister(hdev->rfkill);
3933                 rfkill_destroy(hdev->rfkill);
3934         }
3935
3936         device_del(&hdev->dev);
3937         /* Actual cleanup is deferred until hci_release_dev(). */
3938         hci_dev_put(hdev);
3939 }
3940 EXPORT_SYMBOL(hci_unregister_dev);
3941
3942 /* Release HCI device */
3943 void hci_release_dev(struct hci_dev *hdev)
3944 {
3945         debugfs_remove_recursive(hdev->debugfs);
3946         kfree_const(hdev->hw_info);
3947         kfree_const(hdev->fw_info);
3948
3949         destroy_workqueue(hdev->workqueue);
3950         destroy_workqueue(hdev->req_workqueue);
3951
3952         hci_dev_lock(hdev);
3953         hci_bdaddr_list_clear(&hdev->reject_list);
3954         hci_bdaddr_list_clear(&hdev->accept_list);
3955         hci_uuids_clear(hdev);
3956         hci_link_keys_clear(hdev);
3957         hci_smp_ltks_clear(hdev);
3958         hci_smp_irks_clear(hdev);
3959         hci_remote_oob_data_clear(hdev);
3960         hci_adv_instances_clear(hdev);
3961         hci_adv_monitors_clear(hdev);
3962         hci_bdaddr_list_clear(&hdev->le_accept_list);
3963         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3964         hci_conn_params_clear_all(hdev);
3965         hci_discovery_filter_clear(hdev);
3966         hci_blocked_keys_clear(hdev);
3967         hci_dev_unlock(hdev);
3968
3969         ida_simple_remove(&hci_index_ida, hdev->id);
3970         kfree(hdev);
3971 }
3972 EXPORT_SYMBOL(hci_release_dev);
3973
3974 /* Suspend HCI device */
3975 int hci_suspend_dev(struct hci_dev *hdev)
3976 {
3977         int ret;
3978         u8 state = BT_RUNNING;
3979
3980         bt_dev_dbg(hdev, "");
3981
3982         /* Suspend should only act on when powered. */
3983         if (!hdev_is_powered(hdev) ||
3984             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3985                 return 0;
3986
3987         /* If powering down, wait for completion. */
3988         if (mgmt_powering_down(hdev)) {
3989                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3990                 ret = hci_suspend_wait_event(hdev);
3991                 if (ret)
3992                         goto done;
3993         }
3994
3995         /* Suspend consists of two actions:
3996          *  - First, disconnect everything and make the controller not
3997          *    connectable (disabling scanning)
3998          *  - Second, program event filter/accept list and enable scan
3999          */
4000         ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
4001         if (ret)
4002                 goto clear;
4003
4004         state = BT_SUSPEND_DISCONNECT;
4005
4006         /* Only configure accept list if device may wakeup. */
4007         if (hdev->wakeup && hdev->wakeup(hdev)) {
4008                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE);
4009                 if (!ret)
4010                         state = BT_SUSPEND_CONFIGURE_WAKE;
4011         }
4012
4013 clear:
4014         hci_clear_wake_reason(hdev);
4015         mgmt_suspending(hdev, state);
4016
4017 done:
4018         /* We always allow suspend even if suspend preparation failed and
4019          * attempt to recover in resume.
4020          */
4021         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4022         return ret;
4023 }
4024 EXPORT_SYMBOL(hci_suspend_dev);
4025
4026 /* Resume HCI device */
4027 int hci_resume_dev(struct hci_dev *hdev)
4028 {
4029         int ret;
4030
4031         bt_dev_dbg(hdev, "");
4032
4033         /* Resume should only act on when powered. */
4034         if (!hdev_is_powered(hdev) ||
4035             hci_dev_test_flag(hdev, HCI_UNREGISTER))
4036                 return 0;
4037
4038         /* If powering down don't attempt to resume */
4039         if (mgmt_powering_down(hdev))
4040                 return 0;
4041
4042         ret = hci_change_suspend_state(hdev, BT_RUNNING);
4043
4044         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
4045                               hdev->wake_addr_type);
4046
4047         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4048         return ret;
4049 }
4050 EXPORT_SYMBOL(hci_resume_dev);
4051
4052 /* Reset HCI device */
4053 int hci_reset_dev(struct hci_dev *hdev)
4054 {
4055         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4056         struct sk_buff *skb;
4057
4058         skb = bt_skb_alloc(3, GFP_ATOMIC);
4059         if (!skb)
4060                 return -ENOMEM;
4061
4062         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4063         skb_put_data(skb, hw_err, 3);
4064
4065         bt_dev_err(hdev, "Injecting HCI hardware error event");
4066
4067         /* Send Hardware Error to upper stack */
4068         return hci_recv_frame(hdev, skb);
4069 }
4070 EXPORT_SYMBOL(hci_reset_dev);
4071
4072 /* Receive frame from HCI drivers */
4073 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4074 {
4075         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4076                       && !test_bit(HCI_INIT, &hdev->flags))) {
4077                 kfree_skb(skb);
4078                 return -ENXIO;
4079         }
4080
4081         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4082             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4083             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4084             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4085                 kfree_skb(skb);
4086                 return -EINVAL;
4087         }
4088
4089         /* Incoming skb */
4090         bt_cb(skb)->incoming = 1;
4091
4092         /* Time stamp */
4093         __net_timestamp(skb);
4094
4095         skb_queue_tail(&hdev->rx_q, skb);
4096         queue_work(hdev->workqueue, &hdev->rx_work);
4097
4098         return 0;
4099 }
4100 EXPORT_SYMBOL(hci_recv_frame);
4101
4102 /* Receive diagnostic message from HCI drivers */
4103 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4104 {
4105         /* Mark as diagnostic packet */
4106         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4107
4108         /* Time stamp */
4109         __net_timestamp(skb);
4110
4111         skb_queue_tail(&hdev->rx_q, skb);
4112         queue_work(hdev->workqueue, &hdev->rx_work);
4113
4114         return 0;
4115 }
4116 EXPORT_SYMBOL(hci_recv_diag);
4117
4118 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4119 {
4120         va_list vargs;
4121
4122         va_start(vargs, fmt);
4123         kfree_const(hdev->hw_info);
4124         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4125         va_end(vargs);
4126 }
4127 EXPORT_SYMBOL(hci_set_hw_info);
4128
4129 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4130 {
4131         va_list vargs;
4132
4133         va_start(vargs, fmt);
4134         kfree_const(hdev->fw_info);
4135         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4136         va_end(vargs);
4137 }
4138 EXPORT_SYMBOL(hci_set_fw_info);
4139
4140 /* ---- Interface to upper protocols ---- */
4141
4142 int hci_register_cb(struct hci_cb *cb)
4143 {
4144         BT_DBG("%p name %s", cb, cb->name);
4145
4146         mutex_lock(&hci_cb_list_lock);
4147         list_add_tail(&cb->list, &hci_cb_list);
4148         mutex_unlock(&hci_cb_list_lock);
4149
4150         return 0;
4151 }
4152 EXPORT_SYMBOL(hci_register_cb);
4153
4154 int hci_unregister_cb(struct hci_cb *cb)
4155 {
4156         BT_DBG("%p name %s", cb, cb->name);
4157
4158         mutex_lock(&hci_cb_list_lock);
4159         list_del(&cb->list);
4160         mutex_unlock(&hci_cb_list_lock);
4161
4162         return 0;
4163 }
4164 EXPORT_SYMBOL(hci_unregister_cb);
4165
4166 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4167 {
4168         int err;
4169
4170         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4171                skb->len);
4172
4173         /* Time stamp */
4174         __net_timestamp(skb);
4175
4176         /* Send copy to monitor */
4177         hci_send_to_monitor(hdev, skb);
4178
4179         if (atomic_read(&hdev->promisc)) {
4180                 /* Send copy to the sockets */
4181                 hci_send_to_sock(hdev, skb);
4182         }
4183
4184         /* Get rid of skb owner, prior to sending to the driver. */
4185         skb_orphan(skb);
4186
4187         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4188                 kfree_skb(skb);
4189                 return;
4190         }
4191
4192         err = hdev->send(hdev, skb);
4193         if (err < 0) {
4194                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4195                 kfree_skb(skb);
4196         }
4197 }
4198
4199 /* Send HCI command */
4200 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4201                  const void *param)
4202 {
4203         struct sk_buff *skb;
4204
4205         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4206
4207         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4208         if (!skb) {
4209                 bt_dev_err(hdev, "no memory for command");
4210                 return -ENOMEM;
4211         }
4212
4213         /* Stand-alone HCI commands must be flagged as
4214          * single-command requests.
4215          */
4216         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4217
4218         skb_queue_tail(&hdev->cmd_q, skb);
4219         queue_work(hdev->workqueue, &hdev->cmd_work);
4220
4221         return 0;
4222 }
4223
4224 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4225                    const void *param)
4226 {
4227         struct sk_buff *skb;
4228
4229         if (hci_opcode_ogf(opcode) != 0x3f) {
4230                 /* A controller receiving a command shall respond with either
4231                  * a Command Status Event or a Command Complete Event.
4232                  * Therefore, all standard HCI commands must be sent via the
4233                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4234                  * Some vendors do not comply with this rule for vendor-specific
4235                  * commands and do not return any event. We want to support
4236                  * unresponded commands for such cases only.
4237                  */
4238                 bt_dev_err(hdev, "unresponded command not supported");
4239                 return -EINVAL;
4240         }
4241
4242         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4243         if (!skb) {
4244                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4245                            opcode);
4246                 return -ENOMEM;
4247         }
4248
4249         hci_send_frame(hdev, skb);
4250
4251         return 0;
4252 }
4253 EXPORT_SYMBOL(__hci_cmd_send);
4254
4255 /* Get data from the previously sent command */
4256 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4257 {
4258         struct hci_command_hdr *hdr;
4259
4260         if (!hdev->sent_cmd)
4261                 return NULL;
4262
4263         hdr = (void *) hdev->sent_cmd->data;
4264
4265         if (hdr->opcode != cpu_to_le16(opcode))
4266                 return NULL;
4267
4268         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4269
4270         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4271 }
4272
4273 /* Send HCI command and wait for command complete event */
4274 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4275                              const void *param, u32 timeout)
4276 {
4277         struct sk_buff *skb;
4278
4279         if (!test_bit(HCI_UP, &hdev->flags))
4280                 return ERR_PTR(-ENETDOWN);
4281
4282         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4283
4284         hci_req_sync_lock(hdev);
4285         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4286         hci_req_sync_unlock(hdev);
4287
4288         return skb;
4289 }
4290 EXPORT_SYMBOL(hci_cmd_sync);
4291
4292 /* Send ACL data */
4293 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4294 {
4295         struct hci_acl_hdr *hdr;
4296         int len = skb->len;
4297
4298         skb_push(skb, HCI_ACL_HDR_SIZE);
4299         skb_reset_transport_header(skb);
4300         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4301         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4302         hdr->dlen   = cpu_to_le16(len);
4303 }
4304
4305 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4306                           struct sk_buff *skb, __u16 flags)
4307 {
4308         struct hci_conn *conn = chan->conn;
4309         struct hci_dev *hdev = conn->hdev;
4310         struct sk_buff *list;
4311
4312         skb->len = skb_headlen(skb);
4313         skb->data_len = 0;
4314
4315         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4316
4317         switch (hdev->dev_type) {
4318         case HCI_PRIMARY:
4319                 hci_add_acl_hdr(skb, conn->handle, flags);
4320                 break;
4321         case HCI_AMP:
4322                 hci_add_acl_hdr(skb, chan->handle, flags);
4323                 break;
4324         default:
4325                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4326                 return;
4327         }
4328
4329         list = skb_shinfo(skb)->frag_list;
4330         if (!list) {
4331                 /* Non fragmented */
4332                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4333
4334                 skb_queue_tail(queue, skb);
4335         } else {
4336                 /* Fragmented */
4337                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4338
4339                 skb_shinfo(skb)->frag_list = NULL;
4340
4341                 /* Queue all fragments atomically. We need to use spin_lock_bh
4342                  * here because of 6LoWPAN links, as there this function is
4343                  * called from softirq and using normal spin lock could cause
4344                  * deadlocks.
4345                  */
4346                 spin_lock_bh(&queue->lock);
4347
4348                 __skb_queue_tail(queue, skb);
4349
4350                 flags &= ~ACL_START;
4351                 flags |= ACL_CONT;
4352                 do {
4353                         skb = list; list = list->next;
4354
4355                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4356                         hci_add_acl_hdr(skb, conn->handle, flags);
4357
4358                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4359
4360                         __skb_queue_tail(queue, skb);
4361                 } while (list);
4362
4363                 spin_unlock_bh(&queue->lock);
4364         }
4365 }
4366
4367 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4368 {
4369         struct hci_dev *hdev = chan->conn->hdev;
4370
4371         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4372
4373         hci_queue_acl(chan, &chan->data_q, skb, flags);
4374
4375         queue_work(hdev->workqueue, &hdev->tx_work);
4376 }
4377
4378 /* Send SCO data */
4379 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4380 {
4381         struct hci_dev *hdev = conn->hdev;
4382         struct hci_sco_hdr hdr;
4383
4384         BT_DBG("%s len %d", hdev->name, skb->len);
4385
4386         hdr.handle = cpu_to_le16(conn->handle);
4387         hdr.dlen   = skb->len;
4388
4389         skb_push(skb, HCI_SCO_HDR_SIZE);
4390         skb_reset_transport_header(skb);
4391         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4392
4393         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4394
4395         skb_queue_tail(&conn->data_q, skb);
4396         queue_work(hdev->workqueue, &hdev->tx_work);
4397 }
4398
4399 /* ---- HCI TX task (outgoing data) ---- */
4400
4401 /* HCI Connection scheduler */
4402 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4403                                      int *quote)
4404 {
4405         struct hci_conn_hash *h = &hdev->conn_hash;
4406         struct hci_conn *conn = NULL, *c;
4407         unsigned int num = 0, min = ~0;
4408
4409         /* We don't have to lock device here. Connections are always
4410          * added and removed with TX task disabled. */
4411
4412         rcu_read_lock();
4413
4414         list_for_each_entry_rcu(c, &h->list, list) {
4415                 if (c->type != type || skb_queue_empty(&c->data_q))
4416                         continue;
4417
4418                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4419                         continue;
4420
4421                 num++;
4422
4423                 if (c->sent < min) {
4424                         min  = c->sent;
4425                         conn = c;
4426                 }
4427
4428                 if (hci_conn_num(hdev, type) == num)
4429                         break;
4430         }
4431
4432         rcu_read_unlock();
4433
4434         if (conn) {
4435                 int cnt, q;
4436
4437                 switch (conn->type) {
4438                 case ACL_LINK:
4439                         cnt = hdev->acl_cnt;
4440                         break;
4441                 case SCO_LINK:
4442                 case ESCO_LINK:
4443                         cnt = hdev->sco_cnt;
4444                         break;
4445                 case LE_LINK:
4446                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4447                         break;
4448                 default:
4449                         cnt = 0;
4450                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4451                 }
4452
4453                 q = cnt / num;
4454                 *quote = q ? q : 1;
4455         } else
4456                 *quote = 0;
4457
4458         BT_DBG("conn %p quote %d", conn, *quote);
4459         return conn;
4460 }
4461
4462 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4463 {
4464         struct hci_conn_hash *h = &hdev->conn_hash;
4465         struct hci_conn *c;
4466
4467         bt_dev_err(hdev, "link tx timeout");
4468
4469         rcu_read_lock();
4470
4471         /* Kill stalled connections */
4472         list_for_each_entry_rcu(c, &h->list, list) {
4473                 if (c->type == type && c->sent) {
4474                         bt_dev_err(hdev, "killing stalled connection %pMR",
4475                                    &c->dst);
4476                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4477                 }
4478         }
4479
4480         rcu_read_unlock();
4481 }
4482
4483 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4484                                       int *quote)
4485 {
4486         struct hci_conn_hash *h = &hdev->conn_hash;
4487         struct hci_chan *chan = NULL;
4488         unsigned int num = 0, min = ~0, cur_prio = 0;
4489         struct hci_conn *conn;
4490         int cnt, q, conn_num = 0;
4491
4492         BT_DBG("%s", hdev->name);
4493
4494         rcu_read_lock();
4495
4496         list_for_each_entry_rcu(conn, &h->list, list) {
4497                 struct hci_chan *tmp;
4498
4499                 if (conn->type != type)
4500                         continue;
4501
4502                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4503                         continue;
4504
4505                 conn_num++;
4506
4507                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4508                         struct sk_buff *skb;
4509
4510                         if (skb_queue_empty(&tmp->data_q))
4511                                 continue;
4512
4513                         skb = skb_peek(&tmp->data_q);
4514                         if (skb->priority < cur_prio)
4515                                 continue;
4516
4517                         if (skb->priority > cur_prio) {
4518                                 num = 0;
4519                                 min = ~0;
4520                                 cur_prio = skb->priority;
4521                         }
4522
4523                         num++;
4524
4525                         if (conn->sent < min) {
4526                                 min  = conn->sent;
4527                                 chan = tmp;
4528                         }
4529                 }
4530
4531                 if (hci_conn_num(hdev, type) == conn_num)
4532                         break;
4533         }
4534
4535         rcu_read_unlock();
4536
4537         if (!chan)
4538                 return NULL;
4539
4540         switch (chan->conn->type) {
4541         case ACL_LINK:
4542                 cnt = hdev->acl_cnt;
4543                 break;
4544         case AMP_LINK:
4545                 cnt = hdev->block_cnt;
4546                 break;
4547         case SCO_LINK:
4548         case ESCO_LINK:
4549                 cnt = hdev->sco_cnt;
4550                 break;
4551         case LE_LINK:
4552                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4553                 break;
4554         default:
4555                 cnt = 0;
4556                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4557         }
4558
4559         q = cnt / num;
4560         *quote = q ? q : 1;
4561         BT_DBG("chan %p quote %d", chan, *quote);
4562         return chan;
4563 }
4564
4565 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4566 {
4567         struct hci_conn_hash *h = &hdev->conn_hash;
4568         struct hci_conn *conn;
4569         int num = 0;
4570
4571         BT_DBG("%s", hdev->name);
4572
4573         rcu_read_lock();
4574
4575         list_for_each_entry_rcu(conn, &h->list, list) {
4576                 struct hci_chan *chan;
4577
4578                 if (conn->type != type)
4579                         continue;
4580
4581                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4582                         continue;
4583
4584                 num++;
4585
4586                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4587                         struct sk_buff *skb;
4588
4589                         if (chan->sent) {
4590                                 chan->sent = 0;
4591                                 continue;
4592                         }
4593
4594                         if (skb_queue_empty(&chan->data_q))
4595                                 continue;
4596
4597                         skb = skb_peek(&chan->data_q);
4598                         if (skb->priority >= HCI_PRIO_MAX - 1)
4599                                 continue;
4600
4601                         skb->priority = HCI_PRIO_MAX - 1;
4602
4603                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4604                                skb->priority);
4605                 }
4606
4607                 if (hci_conn_num(hdev, type) == num)
4608                         break;
4609         }
4610
4611         rcu_read_unlock();
4612
4613 }
4614
4615 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4616 {
4617         /* Calculate count of blocks used by this packet */
4618         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4619 }
4620
4621 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4622 {
4623         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4624                 /* ACL tx timeout must be longer than maximum
4625                  * link supervision timeout (40.9 seconds) */
4626                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4627                                        HCI_ACL_TX_TIMEOUT))
4628                         hci_link_tx_to(hdev, ACL_LINK);
4629         }
4630 }
4631
4632 /* Schedule SCO */
4633 static void hci_sched_sco(struct hci_dev *hdev)
4634 {
4635         struct hci_conn *conn;
4636         struct sk_buff *skb;
4637         int quote;
4638
4639         BT_DBG("%s", hdev->name);
4640
4641         if (!hci_conn_num(hdev, SCO_LINK))
4642                 return;
4643
4644         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4645                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4646                         BT_DBG("skb %p len %d", skb, skb->len);
4647                         hci_send_frame(hdev, skb);
4648
4649                         conn->sent++;
4650                         if (conn->sent == ~0)
4651                                 conn->sent = 0;
4652                 }
4653         }
4654 }
4655
4656 static void hci_sched_esco(struct hci_dev *hdev)
4657 {
4658         struct hci_conn *conn;
4659         struct sk_buff *skb;
4660         int quote;
4661
4662         BT_DBG("%s", hdev->name);
4663
4664         if (!hci_conn_num(hdev, ESCO_LINK))
4665                 return;
4666
4667         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4668                                                      &quote))) {
4669                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4670                         BT_DBG("skb %p len %d", skb, skb->len);
4671                         hci_send_frame(hdev, skb);
4672
4673                         conn->sent++;
4674                         if (conn->sent == ~0)
4675                                 conn->sent = 0;
4676                 }
4677         }
4678 }
4679
4680 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4681 {
4682         unsigned int cnt = hdev->acl_cnt;
4683         struct hci_chan *chan;
4684         struct sk_buff *skb;
4685         int quote;
4686
4687         __check_timeout(hdev, cnt);
4688
4689         while (hdev->acl_cnt &&
4690                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4691                 u32 priority = (skb_peek(&chan->data_q))->priority;
4692                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4693                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4694                                skb->len, skb->priority);
4695
4696                         /* Stop if priority has changed */
4697                         if (skb->priority < priority)
4698                                 break;
4699
4700                         skb = skb_dequeue(&chan->data_q);
4701
4702                         hci_conn_enter_active_mode(chan->conn,
4703                                                    bt_cb(skb)->force_active);
4704
4705                         hci_send_frame(hdev, skb);
4706                         hdev->acl_last_tx = jiffies;
4707
4708                         hdev->acl_cnt--;
4709                         chan->sent++;
4710                         chan->conn->sent++;
4711
4712                         /* Send pending SCO packets right away */
4713                         hci_sched_sco(hdev);
4714                         hci_sched_esco(hdev);
4715                 }
4716         }
4717
4718         if (cnt != hdev->acl_cnt)
4719                 hci_prio_recalculate(hdev, ACL_LINK);
4720 }
4721
4722 static void hci_sched_acl_blk(struct hci_dev *hdev)
4723 {
4724         unsigned int cnt = hdev->block_cnt;
4725         struct hci_chan *chan;
4726         struct sk_buff *skb;
4727         int quote;
4728         u8 type;
4729
4730         __check_timeout(hdev, cnt);
4731
4732         BT_DBG("%s", hdev->name);
4733
4734         if (hdev->dev_type == HCI_AMP)
4735                 type = AMP_LINK;
4736         else
4737                 type = ACL_LINK;
4738
4739         while (hdev->block_cnt > 0 &&
4740                (chan = hci_chan_sent(hdev, type, &quote))) {
4741                 u32 priority = (skb_peek(&chan->data_q))->priority;
4742                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4743                         int blocks;
4744
4745                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4746                                skb->len, skb->priority);
4747
4748                         /* Stop if priority has changed */
4749                         if (skb->priority < priority)
4750                                 break;
4751
4752                         skb = skb_dequeue(&chan->data_q);
4753
4754                         blocks = __get_blocks(hdev, skb);
4755                         if (blocks > hdev->block_cnt)
4756                                 return;
4757
4758                         hci_conn_enter_active_mode(chan->conn,
4759                                                    bt_cb(skb)->force_active);
4760
4761                         hci_send_frame(hdev, skb);
4762                         hdev->acl_last_tx = jiffies;
4763
4764                         hdev->block_cnt -= blocks;
4765                         quote -= blocks;
4766
4767                         chan->sent += blocks;
4768                         chan->conn->sent += blocks;
4769                 }
4770         }
4771
4772         if (cnt != hdev->block_cnt)
4773                 hci_prio_recalculate(hdev, type);
4774 }
4775
4776 static void hci_sched_acl(struct hci_dev *hdev)
4777 {
4778         BT_DBG("%s", hdev->name);
4779
4780         /* No ACL link over BR/EDR controller */
4781         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4782                 return;
4783
4784         /* No AMP link over AMP controller */
4785         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4786                 return;
4787
4788         switch (hdev->flow_ctl_mode) {
4789         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4790                 hci_sched_acl_pkt(hdev);
4791                 break;
4792
4793         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4794                 hci_sched_acl_blk(hdev);
4795                 break;
4796         }
4797 }
4798
4799 static void hci_sched_le(struct hci_dev *hdev)
4800 {
4801         struct hci_chan *chan;
4802         struct sk_buff *skb;
4803         int quote, cnt, tmp;
4804
4805         BT_DBG("%s", hdev->name);
4806
4807         if (!hci_conn_num(hdev, LE_LINK))
4808                 return;
4809
4810         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4811
4812         __check_timeout(hdev, cnt);
4813
4814         tmp = cnt;
4815         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4816                 u32 priority = (skb_peek(&chan->data_q))->priority;
4817                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4818                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4819                                skb->len, skb->priority);
4820
4821                         /* Stop if priority has changed */
4822                         if (skb->priority < priority)
4823                                 break;
4824
4825                         skb = skb_dequeue(&chan->data_q);
4826
4827                         hci_send_frame(hdev, skb);
4828                         hdev->le_last_tx = jiffies;
4829
4830                         cnt--;
4831                         chan->sent++;
4832                         chan->conn->sent++;
4833
4834                         /* Send pending SCO packets right away */
4835                         hci_sched_sco(hdev);
4836                         hci_sched_esco(hdev);
4837                 }
4838         }
4839
4840         if (hdev->le_pkts)
4841                 hdev->le_cnt = cnt;
4842         else
4843                 hdev->acl_cnt = cnt;
4844
4845         if (cnt != tmp)
4846                 hci_prio_recalculate(hdev, LE_LINK);
4847 }
4848
4849 static void hci_tx_work(struct work_struct *work)
4850 {
4851         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4852         struct sk_buff *skb;
4853
4854         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4855                hdev->sco_cnt, hdev->le_cnt);
4856
4857         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4858                 /* Schedule queues and send stuff to HCI driver */
4859                 hci_sched_sco(hdev);
4860                 hci_sched_esco(hdev);
4861                 hci_sched_acl(hdev);
4862                 hci_sched_le(hdev);
4863         }
4864
4865         /* Send next queued raw (unknown type) packet */
4866         while ((skb = skb_dequeue(&hdev->raw_q)))
4867                 hci_send_frame(hdev, skb);
4868 }
4869
4870 /* ----- HCI RX task (incoming data processing) ----- */
4871
4872 /* ACL data packet */
4873 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4874 {
4875         struct hci_acl_hdr *hdr = (void *) skb->data;
4876         struct hci_conn *conn;
4877         __u16 handle, flags;
4878
4879         skb_pull(skb, HCI_ACL_HDR_SIZE);
4880
4881         handle = __le16_to_cpu(hdr->handle);
4882         flags  = hci_flags(handle);
4883         handle = hci_handle(handle);
4884
4885         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4886                handle, flags);
4887
4888         hdev->stat.acl_rx++;
4889
4890         hci_dev_lock(hdev);
4891         conn = hci_conn_hash_lookup_handle(hdev, handle);
4892         hci_dev_unlock(hdev);
4893
4894         if (conn) {
4895                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4896
4897                 /* Send to upper protocol */
4898                 l2cap_recv_acldata(conn, skb, flags);
4899                 return;
4900         } else {
4901                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4902                            handle);
4903         }
4904
4905         kfree_skb(skb);
4906 }
4907
4908 /* SCO data packet */
4909 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4910 {
4911         struct hci_sco_hdr *hdr = (void *) skb->data;
4912         struct hci_conn *conn;
4913         __u16 handle, flags;
4914
4915         skb_pull(skb, HCI_SCO_HDR_SIZE);
4916
4917         handle = __le16_to_cpu(hdr->handle);
4918         flags  = hci_flags(handle);
4919         handle = hci_handle(handle);
4920
4921         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4922                handle, flags);
4923
4924         hdev->stat.sco_rx++;
4925
4926         hci_dev_lock(hdev);
4927         conn = hci_conn_hash_lookup_handle(hdev, handle);
4928         hci_dev_unlock(hdev);
4929
4930         if (conn) {
4931                 /* Send to upper protocol */
4932                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4933                 sco_recv_scodata(conn, skb);
4934                 return;
4935         } else {
4936                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4937                            handle);
4938         }
4939
4940         kfree_skb(skb);
4941 }
4942
4943 static bool hci_req_is_complete(struct hci_dev *hdev)
4944 {
4945         struct sk_buff *skb;
4946
4947         skb = skb_peek(&hdev->cmd_q);
4948         if (!skb)
4949                 return true;
4950
4951         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4952 }
4953
4954 static void hci_resend_last(struct hci_dev *hdev)
4955 {
4956         struct hci_command_hdr *sent;
4957         struct sk_buff *skb;
4958         u16 opcode;
4959
4960         if (!hdev->sent_cmd)
4961                 return;
4962
4963         sent = (void *) hdev->sent_cmd->data;
4964         opcode = __le16_to_cpu(sent->opcode);
4965         if (opcode == HCI_OP_RESET)
4966                 return;
4967
4968         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4969         if (!skb)
4970                 return;
4971
4972         skb_queue_head(&hdev->cmd_q, skb);
4973         queue_work(hdev->workqueue, &hdev->cmd_work);
4974 }
4975
4976 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4977                           hci_req_complete_t *req_complete,
4978                           hci_req_complete_skb_t *req_complete_skb)
4979 {
4980         struct sk_buff *skb;
4981         unsigned long flags;
4982
4983         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4984
4985         /* If the completed command doesn't match the last one that was
4986          * sent we need to do special handling of it.
4987          */
4988         if (!hci_sent_cmd_data(hdev, opcode)) {
4989                 /* Some CSR based controllers generate a spontaneous
4990                  * reset complete event during init and any pending
4991                  * command will never be completed. In such a case we
4992                  * need to resend whatever was the last sent
4993                  * command.
4994                  */
4995                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4996                         hci_resend_last(hdev);
4997
4998                 return;
4999         }
5000
5001         /* If we reach this point this event matches the last command sent */
5002         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5003
5004         /* If the command succeeded and there's still more commands in
5005          * this request the request is not yet complete.
5006          */
5007         if (!status && !hci_req_is_complete(hdev))
5008                 return;
5009
5010         /* If this was the last command in a request the complete
5011          * callback would be found in hdev->sent_cmd instead of the
5012          * command queue (hdev->cmd_q).
5013          */
5014         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5015                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5016                 return;
5017         }
5018
5019         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5020                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5021                 return;
5022         }
5023
5024         /* Remove all pending commands belonging to this request */
5025         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5026         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5027                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5028                         __skb_queue_head(&hdev->cmd_q, skb);
5029                         break;
5030                 }
5031
5032                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5033                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5034                 else
5035                         *req_complete = bt_cb(skb)->hci.req_complete;
5036                 kfree_skb(skb);
5037         }
5038         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5039 }
5040
5041 static void hci_rx_work(struct work_struct *work)
5042 {
5043         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5044         struct sk_buff *skb;
5045
5046         BT_DBG("%s", hdev->name);
5047
5048         while ((skb = skb_dequeue(&hdev->rx_q))) {
5049                 /* Send copy to monitor */
5050                 hci_send_to_monitor(hdev, skb);
5051
5052                 if (atomic_read(&hdev->promisc)) {
5053                         /* Send copy to the sockets */
5054                         hci_send_to_sock(hdev, skb);
5055                 }
5056
5057                 /* If the device has been opened in HCI_USER_CHANNEL,
5058                  * the userspace has exclusive access to device.
5059                  * When device is HCI_INIT, we still need to process
5060                  * the data packets to the driver in order
5061                  * to complete its setup().
5062                  */
5063                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5064                     !test_bit(HCI_INIT, &hdev->flags)) {
5065                         kfree_skb(skb);
5066                         continue;
5067                 }
5068
5069                 if (test_bit(HCI_INIT, &hdev->flags)) {
5070                         /* Don't process data packets in this states. */
5071                         switch (hci_skb_pkt_type(skb)) {
5072                         case HCI_ACLDATA_PKT:
5073                         case HCI_SCODATA_PKT:
5074                         case HCI_ISODATA_PKT:
5075                                 kfree_skb(skb);
5076                                 continue;
5077                         }
5078                 }
5079
5080                 /* Process frame */
5081                 switch (hci_skb_pkt_type(skb)) {
5082                 case HCI_EVENT_PKT:
5083                         BT_DBG("%s Event packet", hdev->name);
5084                         hci_event_packet(hdev, skb);
5085                         break;
5086
5087                 case HCI_ACLDATA_PKT:
5088                         BT_DBG("%s ACL data packet", hdev->name);
5089                         hci_acldata_packet(hdev, skb);
5090                         break;
5091
5092                 case HCI_SCODATA_PKT:
5093                         BT_DBG("%s SCO data packet", hdev->name);
5094                         hci_scodata_packet(hdev, skb);
5095                         break;
5096
5097                 default:
5098                         kfree_skb(skb);
5099                         break;
5100                 }
5101         }
5102 }
5103
5104 static void hci_cmd_work(struct work_struct *work)
5105 {
5106         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5107         struct sk_buff *skb;
5108
5109         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5110                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5111
5112         /* Send queued commands */
5113         if (atomic_read(&hdev->cmd_cnt)) {
5114                 skb = skb_dequeue(&hdev->cmd_q);
5115                 if (!skb)
5116                         return;
5117
5118                 kfree_skb(hdev->sent_cmd);
5119
5120                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5121                 if (hdev->sent_cmd) {
5122                         if (hci_req_status_pend(hdev))
5123                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5124                         atomic_dec(&hdev->cmd_cnt);
5125                         hci_send_frame(hdev, skb);
5126                         if (test_bit(HCI_RESET, &hdev->flags))
5127                                 cancel_delayed_work(&hdev->cmd_timer);
5128                         else
5129                                 schedule_delayed_work(&hdev->cmd_timer,
5130                                                       HCI_CMD_TIMEOUT);
5131                 } else {
5132                         skb_queue_head(&hdev->cmd_q, skb);
5133                         queue_work(hdev->workqueue, &hdev->cmd_work);
5134                 }
5135         }
5136 }
This page took 0.320073 seconds and 4 git commands to generate.