]>
Commit | Line | Data |
---|---|---|
0ff252c1 BYTK |
1 | /* |
2 | * Bluetooth Software UART Qualcomm protocol | |
3 | * | |
4 | * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management | |
5 | * protocol extension to H4. | |
6 | * | |
7 | * Copyright (C) 2007 Texas Instruments, Inc. | |
8 | * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved. | |
9 | * | |
10 | * Acknowledgements: | |
11 | * This file is based on hci_ll.c, which was... | |
12 | * Written by Ohad Ben-Cohen <[email protected]> | |
13 | * which was in turn based on hci_h4.c, which was written | |
14 | * by Maxim Krasnyansky and Marcel Holtmann. | |
15 | * | |
16 | * This program is free software; you can redistribute it and/or modify | |
17 | * it under the terms of the GNU General Public License version 2 | |
18 | * as published by the Free Software Foundation | |
19 | * | |
20 | * This program is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
23 | * GNU General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU General Public License | |
26 | * along with this program; if not, write to the Free Software | |
27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
28 | * | |
29 | */ | |
30 | ||
31 | #include <linux/kernel.h> | |
32 | #include <linux/debugfs.h> | |
33 | ||
34 | #include <net/bluetooth/bluetooth.h> | |
35 | #include <net/bluetooth/hci_core.h> | |
36 | ||
37 | #include "hci_uart.h" | |
38 | #include "btqca.h" | |
39 | ||
40 | /* HCI_IBS protocol messages */ | |
41 | #define HCI_IBS_SLEEP_IND 0xFE | |
42 | #define HCI_IBS_WAKE_IND 0xFD | |
43 | #define HCI_IBS_WAKE_ACK 0xFC | |
f81b001a | 44 | #define HCI_MAX_IBS_SIZE 10 |
0ff252c1 BYTK |
45 | |
46 | /* Controller states */ | |
47 | #define STATE_IN_BAND_SLEEP_ENABLED 1 | |
48 | ||
f81b001a MH |
49 | #define IBS_WAKE_RETRANS_TIMEOUT_MS 100 |
50 | #define IBS_TX_IDLE_TIMEOUT_MS 2000 | |
0ff252c1 BYTK |
51 | #define BAUDRATE_SETTLE_TIMEOUT_MS 300 |
52 | ||
53 | /* HCI_IBS transmit side sleep protocol states */ | |
54 | enum tx_ibs_states { | |
55 | HCI_IBS_TX_ASLEEP, | |
56 | HCI_IBS_TX_WAKING, | |
57 | HCI_IBS_TX_AWAKE, | |
58 | }; | |
59 | ||
60 | /* HCI_IBS receive side sleep protocol states */ | |
61 | enum rx_states { | |
62 | HCI_IBS_RX_ASLEEP, | |
63 | HCI_IBS_RX_AWAKE, | |
64 | }; | |
65 | ||
66 | /* HCI_IBS transmit and receive side clock state vote */ | |
67 | enum hci_ibs_clock_state_vote { | |
68 | HCI_IBS_VOTE_STATS_UPDATE, | |
69 | HCI_IBS_TX_VOTE_CLOCK_ON, | |
70 | HCI_IBS_TX_VOTE_CLOCK_OFF, | |
71 | HCI_IBS_RX_VOTE_CLOCK_ON, | |
72 | HCI_IBS_RX_VOTE_CLOCK_OFF, | |
73 | }; | |
74 | ||
75 | struct qca_data { | |
76 | struct hci_uart *hu; | |
77 | struct sk_buff *rx_skb; | |
78 | struct sk_buff_head txq; | |
79 | struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */ | |
80 | spinlock_t hci_ibs_lock; /* HCI_IBS state lock */ | |
81 | u8 tx_ibs_state; /* HCI_IBS transmit side power state*/ | |
82 | u8 rx_ibs_state; /* HCI_IBS receive side power state */ | |
621a5f7a VK |
83 | bool tx_vote; /* Clock must be on for TX */ |
84 | bool rx_vote; /* Clock must be on for RX */ | |
0ff252c1 BYTK |
85 | struct timer_list tx_idle_timer; |
86 | u32 tx_idle_delay; | |
87 | struct timer_list wake_retrans_timer; | |
88 | u32 wake_retrans; | |
89 | struct workqueue_struct *workqueue; | |
90 | struct work_struct ws_awake_rx; | |
91 | struct work_struct ws_awake_device; | |
92 | struct work_struct ws_rx_vote_off; | |
93 | struct work_struct ws_tx_vote_off; | |
94 | unsigned long flags; | |
95 | ||
96 | /* For debugging purpose */ | |
97 | u64 ibs_sent_wacks; | |
98 | u64 ibs_sent_slps; | |
99 | u64 ibs_sent_wakes; | |
100 | u64 ibs_recv_wacks; | |
101 | u64 ibs_recv_slps; | |
102 | u64 ibs_recv_wakes; | |
103 | u64 vote_last_jif; | |
104 | u32 vote_on_ms; | |
105 | u32 vote_off_ms; | |
106 | u64 tx_votes_on; | |
107 | u64 rx_votes_on; | |
108 | u64 tx_votes_off; | |
109 | u64 rx_votes_off; | |
110 | u64 votes_on; | |
111 | u64 votes_off; | |
112 | }; | |
113 | ||
114 | static void __serial_clock_on(struct tty_struct *tty) | |
115 | { | |
116 | /* TODO: Some chipset requires to enable UART clock on client | |
117 | * side to save power consumption or manual work is required. | |
118 | * Please put your code to control UART clock here if needed | |
119 | */ | |
120 | } | |
121 | ||
122 | static void __serial_clock_off(struct tty_struct *tty) | |
123 | { | |
124 | /* TODO: Some chipset requires to disable UART clock on client | |
125 | * side to save power consumption or manual work is required. | |
126 | * Please put your code to control UART clock off here if needed | |
127 | */ | |
128 | } | |
129 | ||
130 | /* serial_clock_vote needs to be called with the ibs lock held */ | |
131 | static void serial_clock_vote(unsigned long vote, struct hci_uart *hu) | |
132 | { | |
133 | struct qca_data *qca = hu->priv; | |
134 | unsigned int diff; | |
135 | ||
136 | bool old_vote = (qca->tx_vote | qca->rx_vote); | |
137 | bool new_vote; | |
138 | ||
139 | switch (vote) { | |
140 | case HCI_IBS_VOTE_STATS_UPDATE: | |
141 | diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); | |
142 | ||
143 | if (old_vote) | |
144 | qca->vote_off_ms += diff; | |
145 | else | |
146 | qca->vote_on_ms += diff; | |
147 | return; | |
148 | ||
149 | case HCI_IBS_TX_VOTE_CLOCK_ON: | |
150 | qca->tx_vote = true; | |
151 | qca->tx_votes_on++; | |
152 | new_vote = true; | |
153 | break; | |
154 | ||
155 | case HCI_IBS_RX_VOTE_CLOCK_ON: | |
156 | qca->rx_vote = true; | |
157 | qca->rx_votes_on++; | |
158 | new_vote = true; | |
159 | break; | |
160 | ||
161 | case HCI_IBS_TX_VOTE_CLOCK_OFF: | |
162 | qca->tx_vote = false; | |
163 | qca->tx_votes_off++; | |
164 | new_vote = qca->rx_vote | qca->tx_vote; | |
165 | break; | |
166 | ||
167 | case HCI_IBS_RX_VOTE_CLOCK_OFF: | |
168 | qca->rx_vote = false; | |
169 | qca->rx_votes_off++; | |
170 | new_vote = qca->rx_vote | qca->tx_vote; | |
171 | break; | |
172 | ||
173 | default: | |
174 | BT_ERR("Voting irregularity"); | |
175 | return; | |
176 | } | |
177 | ||
178 | if (new_vote != old_vote) { | |
179 | if (new_vote) | |
180 | __serial_clock_on(hu->tty); | |
181 | else | |
182 | __serial_clock_off(hu->tty); | |
183 | ||
ce26d813 PK |
184 | BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false", |
185 | vote ? "true" : "false"); | |
0ff252c1 BYTK |
186 | |
187 | diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); | |
188 | ||
189 | if (new_vote) { | |
190 | qca->votes_on++; | |
191 | qca->vote_off_ms += diff; | |
192 | } else { | |
193 | qca->votes_off++; | |
194 | qca->vote_on_ms += diff; | |
195 | } | |
196 | qca->vote_last_jif = jiffies; | |
197 | } | |
198 | } | |
199 | ||
200 | /* Builds and sends an HCI_IBS command packet. | |
201 | * These are very simple packets with only 1 cmd byte. | |
202 | */ | |
203 | static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu) | |
204 | { | |
205 | int err = 0; | |
206 | struct sk_buff *skb = NULL; | |
207 | struct qca_data *qca = hu->priv; | |
208 | ||
209 | BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd); | |
210 | ||
211 | skb = bt_skb_alloc(1, GFP_ATOMIC); | |
212 | if (!skb) { | |
213 | BT_ERR("Failed to allocate memory for HCI_IBS packet"); | |
214 | return -ENOMEM; | |
215 | } | |
216 | ||
217 | /* Assign HCI_IBS type */ | |
218 | *skb_put(skb, 1) = cmd; | |
219 | ||
220 | skb_queue_tail(&qca->txq, skb); | |
221 | ||
222 | return err; | |
223 | } | |
224 | ||
225 | static void qca_wq_awake_device(struct work_struct *work) | |
226 | { | |
227 | struct qca_data *qca = container_of(work, struct qca_data, | |
228 | ws_awake_device); | |
229 | struct hci_uart *hu = qca->hu; | |
230 | unsigned long retrans_delay; | |
231 | ||
232 | BT_DBG("hu %p wq awake device", hu); | |
233 | ||
234 | /* Vote for serial clock */ | |
235 | serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); | |
236 | ||
237 | spin_lock(&qca->hci_ibs_lock); | |
238 | ||
239 | /* Send wake indication to device */ | |
240 | if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) | |
241 | BT_ERR("Failed to send WAKE to device"); | |
242 | ||
243 | qca->ibs_sent_wakes++; | |
244 | ||
245 | /* Start retransmit timer */ | |
246 | retrans_delay = msecs_to_jiffies(qca->wake_retrans); | |
247 | mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); | |
248 | ||
249 | spin_unlock(&qca->hci_ibs_lock); | |
250 | ||
251 | /* Actually send the packets */ | |
252 | hci_uart_tx_wakeup(hu); | |
253 | } | |
254 | ||
255 | static void qca_wq_awake_rx(struct work_struct *work) | |
256 | { | |
257 | struct qca_data *qca = container_of(work, struct qca_data, | |
258 | ws_awake_rx); | |
259 | struct hci_uart *hu = qca->hu; | |
260 | ||
261 | BT_DBG("hu %p wq awake rx", hu); | |
262 | ||
263 | serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); | |
264 | ||
265 | spin_lock(&qca->hci_ibs_lock); | |
266 | qca->rx_ibs_state = HCI_IBS_RX_AWAKE; | |
267 | ||
268 | /* Always acknowledge device wake up, | |
269 | * sending IBS message doesn't count as TX ON. | |
270 | */ | |
271 | if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) | |
272 | BT_ERR("Failed to acknowledge device wake up"); | |
273 | ||
274 | qca->ibs_sent_wacks++; | |
275 | ||
276 | spin_unlock(&qca->hci_ibs_lock); | |
277 | ||
278 | /* Actually send the packets */ | |
279 | hci_uart_tx_wakeup(hu); | |
280 | } | |
281 | ||
282 | static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work) | |
283 | { | |
284 | struct qca_data *qca = container_of(work, struct qca_data, | |
285 | ws_rx_vote_off); | |
286 | struct hci_uart *hu = qca->hu; | |
287 | ||
288 | BT_DBG("hu %p rx clock vote off", hu); | |
289 | ||
290 | serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu); | |
291 | } | |
292 | ||
293 | static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work) | |
294 | { | |
295 | struct qca_data *qca = container_of(work, struct qca_data, | |
296 | ws_tx_vote_off); | |
297 | struct hci_uart *hu = qca->hu; | |
298 | ||
299 | BT_DBG("hu %p tx clock vote off", hu); | |
300 | ||
301 | /* Run HCI tx handling unlocked */ | |
302 | hci_uart_tx_wakeup(hu); | |
303 | ||
304 | /* Now that message queued to tty driver, vote for tty clocks off. | |
305 | * It is up to the tty driver to pend the clocks off until tx done. | |
306 | */ | |
307 | serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); | |
308 | } | |
309 | ||
310 | static void hci_ibs_tx_idle_timeout(unsigned long arg) | |
311 | { | |
312 | struct hci_uart *hu = (struct hci_uart *)arg; | |
313 | struct qca_data *qca = hu->priv; | |
314 | unsigned long flags; | |
315 | ||
316 | BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); | |
317 | ||
318 | spin_lock_irqsave_nested(&qca->hci_ibs_lock, | |
319 | flags, SINGLE_DEPTH_NESTING); | |
320 | ||
321 | switch (qca->tx_ibs_state) { | |
322 | case HCI_IBS_TX_AWAKE: | |
323 | /* TX_IDLE, go to SLEEP */ | |
324 | if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) { | |
325 | BT_ERR("Failed to send SLEEP to device"); | |
326 | break; | |
327 | } | |
328 | qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; | |
329 | qca->ibs_sent_slps++; | |
330 | queue_work(qca->workqueue, &qca->ws_tx_vote_off); | |
331 | break; | |
332 | ||
333 | case HCI_IBS_TX_ASLEEP: | |
334 | case HCI_IBS_TX_WAKING: | |
335 | /* Fall through */ | |
336 | ||
337 | default: | |
338 | BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); | |
339 | break; | |
340 | } | |
341 | ||
342 | spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); | |
343 | } | |
344 | ||
345 | static void hci_ibs_wake_retrans_timeout(unsigned long arg) | |
346 | { | |
347 | struct hci_uart *hu = (struct hci_uart *)arg; | |
348 | struct qca_data *qca = hu->priv; | |
349 | unsigned long flags, retrans_delay; | |
a9137188 | 350 | bool retransmit = false; |
0ff252c1 BYTK |
351 | |
352 | BT_DBG("hu %p wake retransmit timeout in %d state", | |
353 | hu, qca->tx_ibs_state); | |
354 | ||
355 | spin_lock_irqsave_nested(&qca->hci_ibs_lock, | |
356 | flags, SINGLE_DEPTH_NESTING); | |
357 | ||
358 | switch (qca->tx_ibs_state) { | |
359 | case HCI_IBS_TX_WAKING: | |
360 | /* No WAKE_ACK, retransmit WAKE */ | |
a9137188 | 361 | retransmit = true; |
0ff252c1 BYTK |
362 | if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) { |
363 | BT_ERR("Failed to acknowledge device wake up"); | |
364 | break; | |
365 | } | |
366 | qca->ibs_sent_wakes++; | |
367 | retrans_delay = msecs_to_jiffies(qca->wake_retrans); | |
368 | mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); | |
369 | break; | |
370 | ||
371 | case HCI_IBS_TX_ASLEEP: | |
372 | case HCI_IBS_TX_AWAKE: | |
373 | /* Fall through */ | |
374 | ||
375 | default: | |
376 | BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); | |
377 | break; | |
378 | } | |
379 | ||
380 | spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); | |
381 | ||
382 | if (retransmit) | |
383 | hci_uart_tx_wakeup(hu); | |
384 | } | |
385 | ||
386 | /* Initialize protocol */ | |
387 | static int qca_open(struct hci_uart *hu) | |
388 | { | |
389 | struct qca_data *qca; | |
390 | ||
391 | BT_DBG("hu %p qca_open", hu); | |
392 | ||
393 | qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC); | |
394 | if (!qca) | |
395 | return -ENOMEM; | |
396 | ||
397 | skb_queue_head_init(&qca->txq); | |
398 | skb_queue_head_init(&qca->tx_wait_q); | |
399 | spin_lock_init(&qca->hci_ibs_lock); | |
400 | qca->workqueue = create_singlethread_workqueue("qca_wq"); | |
401 | if (!qca->workqueue) { | |
402 | BT_ERR("QCA Workqueue not initialized properly"); | |
403 | kfree(qca); | |
404 | return -ENOMEM; | |
405 | } | |
406 | ||
407 | INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); | |
408 | INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); | |
409 | INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); | |
410 | INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); | |
411 | ||
412 | qca->hu = hu; | |
413 | ||
414 | /* Assume we start with both sides asleep -- extra wakes OK */ | |
415 | qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; | |
416 | qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; | |
417 | ||
418 | /* clocks actually on, but we start votes off */ | |
419 | qca->tx_vote = false; | |
420 | qca->rx_vote = false; | |
421 | qca->flags = 0; | |
422 | ||
423 | qca->ibs_sent_wacks = 0; | |
424 | qca->ibs_sent_slps = 0; | |
425 | qca->ibs_sent_wakes = 0; | |
426 | qca->ibs_recv_wacks = 0; | |
427 | qca->ibs_recv_slps = 0; | |
428 | qca->ibs_recv_wakes = 0; | |
429 | qca->vote_last_jif = jiffies; | |
430 | qca->vote_on_ms = 0; | |
431 | qca->vote_off_ms = 0; | |
432 | qca->votes_on = 0; | |
433 | qca->votes_off = 0; | |
434 | qca->tx_votes_on = 0; | |
435 | qca->tx_votes_off = 0; | |
436 | qca->rx_votes_on = 0; | |
437 | qca->rx_votes_off = 0; | |
438 | ||
439 | hu->priv = qca; | |
440 | ||
441 | init_timer(&qca->wake_retrans_timer); | |
442 | qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout; | |
443 | qca->wake_retrans_timer.data = (u_long)hu; | |
444 | qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; | |
445 | ||
446 | init_timer(&qca->tx_idle_timer); | |
447 | qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout; | |
448 | qca->tx_idle_timer.data = (u_long)hu; | |
449 | qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; | |
450 | ||
451 | BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", | |
452 | qca->tx_idle_delay, qca->wake_retrans); | |
453 | ||
454 | return 0; | |
455 | } | |
456 | ||
457 | static void qca_debugfs_init(struct hci_dev *hdev) | |
458 | { | |
459 | struct hci_uart *hu = hci_get_drvdata(hdev); | |
460 | struct qca_data *qca = hu->priv; | |
461 | struct dentry *ibs_dir; | |
462 | umode_t mode; | |
463 | ||
464 | if (!hdev->debugfs) | |
465 | return; | |
466 | ||
467 | ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); | |
468 | ||
469 | /* read only */ | |
470 | mode = S_IRUGO; | |
471 | debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); | |
472 | debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); | |
473 | debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir, | |
474 | &qca->ibs_sent_slps); | |
475 | debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir, | |
476 | &qca->ibs_sent_wakes); | |
477 | debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir, | |
478 | &qca->ibs_sent_wacks); | |
479 | debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir, | |
480 | &qca->ibs_recv_slps); | |
481 | debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir, | |
482 | &qca->ibs_recv_wakes); | |
483 | debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir, | |
484 | &qca->ibs_recv_wacks); | |
10be6c0f | 485 | debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); |
0ff252c1 BYTK |
486 | debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); |
487 | debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); | |
10be6c0f | 488 | debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); |
0ff252c1 BYTK |
489 | debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); |
490 | debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); | |
491 | debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); | |
492 | debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); | |
493 | debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); | |
494 | debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); | |
495 | ||
496 | /* read/write */ | |
497 | mode = S_IRUGO | S_IWUSR; | |
498 | debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); | |
499 | debugfs_create_u32("tx_idle_delay", mode, ibs_dir, | |
500 | &qca->tx_idle_delay); | |
501 | } | |
502 | ||
503 | /* Flush protocol data */ | |
504 | static int qca_flush(struct hci_uart *hu) | |
505 | { | |
506 | struct qca_data *qca = hu->priv; | |
507 | ||
508 | BT_DBG("hu %p qca flush", hu); | |
509 | ||
510 | skb_queue_purge(&qca->tx_wait_q); | |
511 | skb_queue_purge(&qca->txq); | |
512 | ||
513 | return 0; | |
514 | } | |
515 | ||
516 | /* Close protocol */ | |
517 | static int qca_close(struct hci_uart *hu) | |
518 | { | |
519 | struct qca_data *qca = hu->priv; | |
520 | ||
521 | BT_DBG("hu %p qca close", hu); | |
522 | ||
523 | serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu); | |
524 | ||
525 | skb_queue_purge(&qca->tx_wait_q); | |
526 | skb_queue_purge(&qca->txq); | |
527 | del_timer(&qca->tx_idle_timer); | |
528 | del_timer(&qca->wake_retrans_timer); | |
529 | destroy_workqueue(qca->workqueue); | |
530 | qca->hu = NULL; | |
531 | ||
532 | kfree_skb(qca->rx_skb); | |
533 | ||
534 | hu->priv = NULL; | |
535 | ||
536 | kfree(qca); | |
537 | ||
538 | return 0; | |
539 | } | |
540 | ||
541 | /* Called upon a wake-up-indication from the device. | |
542 | */ | |
543 | static void device_want_to_wakeup(struct hci_uart *hu) | |
544 | { | |
545 | unsigned long flags; | |
546 | struct qca_data *qca = hu->priv; | |
547 | ||
548 | BT_DBG("hu %p want to wake up", hu); | |
549 | ||
550 | spin_lock_irqsave(&qca->hci_ibs_lock, flags); | |
551 | ||
552 | qca->ibs_recv_wakes++; | |
553 | ||
554 | switch (qca->rx_ibs_state) { | |
555 | case HCI_IBS_RX_ASLEEP: | |
556 | /* Make sure clock is on - we may have turned clock off since | |
557 | * receiving the wake up indicator awake rx clock. | |
558 | */ | |
559 | queue_work(qca->workqueue, &qca->ws_awake_rx); | |
560 | spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); | |
561 | return; | |
562 | ||
563 | case HCI_IBS_RX_AWAKE: | |
564 | /* Always acknowledge device wake up, | |
565 | * sending IBS message doesn't count as TX ON. | |
566 | */ | |
567 | if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) { | |
568 | BT_ERR("Failed to acknowledge device wake up"); | |
569 | break; | |
570 | } | |
571 | qca->ibs_sent_wacks++; | |
572 | break; | |
573 | ||
574 | default: | |
575 | /* Any other state is illegal */ | |
576 | BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d", | |
577 | qca->rx_ibs_state); | |
578 | break; | |
579 | } | |
580 | ||
581 | spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); | |
582 | ||
583 | /* Actually send the packets */ | |
584 | hci_uart_tx_wakeup(hu); | |
585 | } | |
586 | ||
587 | /* Called upon a sleep-indication from the device. | |
588 | */ | |
589 | static void device_want_to_sleep(struct hci_uart *hu) | |
590 | { | |
591 | unsigned long flags; | |
592 | struct qca_data *qca = hu->priv; | |
593 | ||
594 | BT_DBG("hu %p want to sleep", hu); | |
595 | ||
596 | spin_lock_irqsave(&qca->hci_ibs_lock, flags); | |
597 | ||
598 | qca->ibs_recv_slps++; | |
599 | ||
600 | switch (qca->rx_ibs_state) { | |
601 | case HCI_IBS_RX_AWAKE: | |
602 | /* Update state */ | |
603 | qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; | |
604 | /* Vote off rx clock under workqueue */ | |
605 | queue_work(qca->workqueue, &qca->ws_rx_vote_off); | |
606 | break; | |
607 | ||
608 | case HCI_IBS_RX_ASLEEP: | |
609 | /* Fall through */ | |
610 | ||
611 | default: | |
612 | /* Any other state is illegal */ | |
613 | BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d", | |
614 | qca->rx_ibs_state); | |
615 | break; | |
616 | } | |
617 | ||
618 | spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); | |
619 | } | |
620 | ||
621 | /* Called upon wake-up-acknowledgement from the device | |
622 | */ | |
623 | static void device_woke_up(struct hci_uart *hu) | |
624 | { | |
625 | unsigned long flags, idle_delay; | |
626 | struct qca_data *qca = hu->priv; | |
627 | struct sk_buff *skb = NULL; | |
628 | ||
629 | BT_DBG("hu %p woke up", hu); | |
630 | ||
631 | spin_lock_irqsave(&qca->hci_ibs_lock, flags); | |
632 | ||
633 | qca->ibs_recv_wacks++; | |
634 | ||
635 | switch (qca->tx_ibs_state) { | |
636 | case HCI_IBS_TX_AWAKE: | |
637 | /* Expect one if we send 2 WAKEs */ | |
638 | BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d", | |
639 | qca->tx_ibs_state); | |
640 | break; | |
641 | ||
642 | case HCI_IBS_TX_WAKING: | |
643 | /* Send pending packets */ | |
644 | while ((skb = skb_dequeue(&qca->tx_wait_q))) | |
645 | skb_queue_tail(&qca->txq, skb); | |
646 | ||
647 | /* Switch timers and change state to HCI_IBS_TX_AWAKE */ | |
648 | del_timer(&qca->wake_retrans_timer); | |
649 | idle_delay = msecs_to_jiffies(qca->tx_idle_delay); | |
650 | mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); | |
651 | qca->tx_ibs_state = HCI_IBS_TX_AWAKE; | |
652 | break; | |
653 | ||
654 | case HCI_IBS_TX_ASLEEP: | |
655 | /* Fall through */ | |
656 | ||
657 | default: | |
658 | BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d", | |
659 | qca->tx_ibs_state); | |
660 | break; | |
661 | } | |
662 | ||
663 | spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); | |
664 | ||
665 | /* Actually send the packets */ | |
666 | hci_uart_tx_wakeup(hu); | |
667 | } | |
668 | ||
669 | /* Enqueue frame for transmittion (padding, crc, etc) may be called from | |
670 | * two simultaneous tasklets. | |
671 | */ | |
672 | static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) | |
673 | { | |
674 | unsigned long flags = 0, idle_delay; | |
675 | struct qca_data *qca = hu->priv; | |
676 | ||
677 | BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, | |
678 | qca->tx_ibs_state); | |
679 | ||
680 | /* Prepend skb with frame type */ | |
618e8bc2 | 681 | memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); |
0ff252c1 BYTK |
682 | |
683 | /* Don't go to sleep in middle of patch download or | |
684 | * Out-Of-Band(GPIOs control) sleep is selected. | |
685 | */ | |
686 | if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) { | |
687 | skb_queue_tail(&qca->txq, skb); | |
688 | return 0; | |
689 | } | |
690 | ||
691 | spin_lock_irqsave(&qca->hci_ibs_lock, flags); | |
692 | ||
693 | /* Act according to current state */ | |
694 | switch (qca->tx_ibs_state) { | |
695 | case HCI_IBS_TX_AWAKE: | |
696 | BT_DBG("Device awake, sending normally"); | |
697 | skb_queue_tail(&qca->txq, skb); | |
698 | idle_delay = msecs_to_jiffies(qca->tx_idle_delay); | |
699 | mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); | |
700 | break; | |
701 | ||
702 | case HCI_IBS_TX_ASLEEP: | |
703 | BT_DBG("Device asleep, waking up and queueing packet"); | |
704 | /* Save packet for later */ | |
705 | skb_queue_tail(&qca->tx_wait_q, skb); | |
706 | ||
707 | qca->tx_ibs_state = HCI_IBS_TX_WAKING; | |
708 | /* Schedule a work queue to wake up device */ | |
709 | queue_work(qca->workqueue, &qca->ws_awake_device); | |
710 | break; | |
711 | ||
712 | case HCI_IBS_TX_WAKING: | |
713 | BT_DBG("Device waking up, queueing packet"); | |
714 | /* Transient state; just keep packet for later */ | |
715 | skb_queue_tail(&qca->tx_wait_q, skb); | |
716 | break; | |
717 | ||
718 | default: | |
719 | BT_ERR("Illegal tx state: %d (losing packet)", | |
720 | qca->tx_ibs_state); | |
721 | kfree_skb(skb); | |
722 | break; | |
723 | } | |
724 | ||
725 | spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); | |
726 | ||
727 | return 0; | |
728 | } | |
729 | ||
730 | static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb) | |
731 | { | |
732 | struct hci_uart *hu = hci_get_drvdata(hdev); | |
733 | ||
734 | BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND); | |
735 | ||
736 | device_want_to_sleep(hu); | |
737 | ||
738 | kfree_skb(skb); | |
739 | return 0; | |
740 | } | |
741 | ||
742 | static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb) | |
743 | { | |
744 | struct hci_uart *hu = hci_get_drvdata(hdev); | |
745 | ||
746 | BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND); | |
747 | ||
748 | device_want_to_wakeup(hu); | |
749 | ||
750 | kfree_skb(skb); | |
751 | return 0; | |
752 | } | |
753 | ||
754 | static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb) | |
755 | { | |
756 | struct hci_uart *hu = hci_get_drvdata(hdev); | |
757 | ||
758 | BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK); | |
759 | ||
760 | device_woke_up(hu); | |
761 | ||
762 | kfree_skb(skb); | |
763 | return 0; | |
764 | } | |
765 | ||
766 | #define QCA_IBS_SLEEP_IND_EVENT \ | |
767 | .type = HCI_IBS_SLEEP_IND, \ | |
768 | .hlen = 0, \ | |
769 | .loff = 0, \ | |
770 | .lsize = 0, \ | |
771 | .maxlen = HCI_MAX_IBS_SIZE | |
772 | ||
773 | #define QCA_IBS_WAKE_IND_EVENT \ | |
774 | .type = HCI_IBS_WAKE_IND, \ | |
775 | .hlen = 0, \ | |
776 | .loff = 0, \ | |
777 | .lsize = 0, \ | |
778 | .maxlen = HCI_MAX_IBS_SIZE | |
779 | ||
780 | #define QCA_IBS_WAKE_ACK_EVENT \ | |
781 | .type = HCI_IBS_WAKE_ACK, \ | |
782 | .hlen = 0, \ | |
783 | .loff = 0, \ | |
784 | .lsize = 0, \ | |
785 | .maxlen = HCI_MAX_IBS_SIZE | |
786 | ||
787 | static const struct h4_recv_pkt qca_recv_pkts[] = { | |
788 | { H4_RECV_ACL, .recv = hci_recv_frame }, | |
789 | { H4_RECV_SCO, .recv = hci_recv_frame }, | |
790 | { H4_RECV_EVENT, .recv = hci_recv_frame }, | |
791 | { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind }, | |
792 | { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack }, | |
793 | { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind }, | |
794 | }; | |
795 | ||
796 | static int qca_recv(struct hci_uart *hu, const void *data, int count) | |
797 | { | |
798 | struct qca_data *qca = hu->priv; | |
799 | ||
800 | if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) | |
801 | return -EUNATCH; | |
802 | ||
803 | qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, | |
804 | qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts)); | |
805 | if (IS_ERR(qca->rx_skb)) { | |
806 | int err = PTR_ERR(qca->rx_skb); | |
807 | BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err); | |
808 | qca->rx_skb = NULL; | |
809 | return err; | |
810 | } | |
811 | ||
812 | return count; | |
813 | } | |
814 | ||
815 | static struct sk_buff *qca_dequeue(struct hci_uart *hu) | |
816 | { | |
817 | struct qca_data *qca = hu->priv; | |
818 | ||
819 | return skb_dequeue(&qca->txq); | |
820 | } | |
821 | ||
822 | static uint8_t qca_get_baudrate_value(int speed) | |
823 | { | |
ce26d813 | 824 | switch (speed) { |
0ff252c1 BYTK |
825 | case 9600: |
826 | return QCA_BAUDRATE_9600; | |
827 | case 19200: | |
828 | return QCA_BAUDRATE_19200; | |
829 | case 38400: | |
830 | return QCA_BAUDRATE_38400; | |
831 | case 57600: | |
832 | return QCA_BAUDRATE_57600; | |
833 | case 115200: | |
834 | return QCA_BAUDRATE_115200; | |
835 | case 230400: | |
836 | return QCA_BAUDRATE_230400; | |
837 | case 460800: | |
838 | return QCA_BAUDRATE_460800; | |
839 | case 500000: | |
840 | return QCA_BAUDRATE_500000; | |
841 | case 921600: | |
842 | return QCA_BAUDRATE_921600; | |
843 | case 1000000: | |
844 | return QCA_BAUDRATE_1000000; | |
845 | case 2000000: | |
846 | return QCA_BAUDRATE_2000000; | |
847 | case 3000000: | |
848 | return QCA_BAUDRATE_3000000; | |
849 | case 3500000: | |
850 | return QCA_BAUDRATE_3500000; | |
851 | default: | |
852 | return QCA_BAUDRATE_115200; | |
853 | } | |
854 | } | |
855 | ||
856 | static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) | |
857 | { | |
858 | struct hci_uart *hu = hci_get_drvdata(hdev); | |
859 | struct qca_data *qca = hu->priv; | |
860 | struct sk_buff *skb; | |
861 | u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; | |
862 | ||
863 | if (baudrate > QCA_BAUDRATE_3000000) | |
864 | return -EINVAL; | |
865 | ||
866 | cmd[4] = baudrate; | |
867 | ||
868 | skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC); | |
869 | if (!skb) { | |
870 | BT_ERR("Failed to allocate memory for baudrate packet"); | |
871 | return -ENOMEM; | |
872 | } | |
873 | ||
874 | /* Assign commands to change baudrate and packet type. */ | |
875 | memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); | |
618e8bc2 | 876 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; |
0ff252c1 BYTK |
877 | |
878 | skb_queue_tail(&qca->txq, skb); | |
879 | hci_uart_tx_wakeup(hu); | |
880 | ||
881 | /* wait 300ms to change new baudrate on controller side | |
882 | * controller will come back after they receive this HCI command | |
883 | * then host can communicate with new baudrate to controller | |
884 | */ | |
885 | set_current_state(TASK_UNINTERRUPTIBLE); | |
886 | schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); | |
887 | set_current_state(TASK_INTERRUPTIBLE); | |
888 | ||
889 | return 0; | |
890 | } | |
891 | ||
892 | static int qca_setup(struct hci_uart *hu) | |
893 | { | |
894 | struct hci_dev *hdev = hu->hdev; | |
895 | struct qca_data *qca = hu->priv; | |
896 | unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; | |
897 | int ret; | |
898 | ||
899 | BT_INFO("%s: ROME setup", hdev->name); | |
900 | ||
901 | /* Patch downloading has to be done without IBS mode */ | |
902 | clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); | |
903 | ||
904 | /* Setup initial baudrate */ | |
905 | speed = 0; | |
906 | if (hu->init_speed) | |
907 | speed = hu->init_speed; | |
908 | else if (hu->proto->init_speed) | |
909 | speed = hu->proto->init_speed; | |
910 | ||
911 | if (speed) | |
912 | hci_uart_set_baudrate(hu, speed); | |
913 | ||
914 | /* Setup user speed if needed */ | |
915 | speed = 0; | |
916 | if (hu->oper_speed) | |
917 | speed = hu->oper_speed; | |
918 | else if (hu->proto->oper_speed) | |
919 | speed = hu->proto->oper_speed; | |
920 | ||
921 | if (speed) { | |
922 | qca_baudrate = qca_get_baudrate_value(speed); | |
923 | ||
924 | BT_INFO("%s: Set UART speed to %d", hdev->name, speed); | |
925 | ret = qca_set_baudrate(hdev, qca_baudrate); | |
926 | if (ret) { | |
927 | BT_ERR("%s: Failed to change the baud rate (%d)", | |
928 | hdev->name, ret); | |
929 | return ret; | |
930 | } | |
931 | hci_uart_set_baudrate(hu, speed); | |
932 | } | |
933 | ||
934 | /* Setup patch / NVM configurations */ | |
935 | ret = qca_uart_setup_rome(hdev, qca_baudrate); | |
936 | if (!ret) { | |
937 | set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); | |
938 | qca_debugfs_init(hdev); | |
939 | } | |
940 | ||
941 | /* Setup bdaddr */ | |
942 | hu->hdev->set_bdaddr = qca_set_bdaddr_rome; | |
943 | ||
944 | return ret; | |
945 | } | |
946 | ||
947 | static struct hci_uart_proto qca_proto = { | |
948 | .id = HCI_UART_QCA, | |
949 | .name = "QCA", | |
aee61f7a | 950 | .manufacturer = 29, |
0ff252c1 BYTK |
951 | .init_speed = 115200, |
952 | .oper_speed = 3000000, | |
953 | .open = qca_open, | |
954 | .close = qca_close, | |
955 | .flush = qca_flush, | |
956 | .setup = qca_setup, | |
957 | .recv = qca_recv, | |
958 | .enqueue = qca_enqueue, | |
959 | .dequeue = qca_dequeue, | |
960 | }; | |
961 | ||
962 | int __init qca_init(void) | |
963 | { | |
964 | return hci_uart_register_proto(&qca_proto); | |
965 | } | |
966 | ||
967 | int __exit qca_deinit(void) | |
968 | { | |
969 | return hci_uart_unregister_proto(&qca_proto); | |
970 | } |