2 * Bluetooth Software UART Qualcomm protocol
4 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
5 * protocol extension to H4.
7 * Copyright (C) 2007 Texas Instruments, Inc.
8 * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
11 * This file is based on hci_ll.c, which was...
13 * which was in turn based on hci_h4.c, which was written
14 * by Maxim Krasnyansky and Marcel Holtmann.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2
18 * as published by the Free Software Foundation
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/kernel.h>
32 #include <linux/debugfs.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
40 /* HCI_IBS protocol messages */
41 #define HCI_IBS_SLEEP_IND 0xFE
42 #define HCI_IBS_WAKE_IND 0xFD
43 #define HCI_IBS_WAKE_ACK 0xFC
44 #define HCI_MAX_IBS_SIZE 10
46 /* Controller states */
47 #define STATE_IN_BAND_SLEEP_ENABLED 1
49 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
50 #define IBS_TX_IDLE_TIMEOUT_MS 2000
51 #define BAUDRATE_SETTLE_TIMEOUT_MS 300
53 /* HCI_IBS transmit side sleep protocol states */
60 /* HCI_IBS receive side sleep protocol states */
66 /* HCI_IBS transmit and receive side clock state vote */
67 enum hci_ibs_clock_state_vote {
68 HCI_IBS_VOTE_STATS_UPDATE,
69 HCI_IBS_TX_VOTE_CLOCK_ON,
70 HCI_IBS_TX_VOTE_CLOCK_OFF,
71 HCI_IBS_RX_VOTE_CLOCK_ON,
72 HCI_IBS_RX_VOTE_CLOCK_OFF,
77 struct sk_buff *rx_skb;
78 struct sk_buff_head txq;
79 struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
80 spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
81 u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
82 u8 rx_ibs_state; /* HCI_IBS receive side power state */
83 bool tx_vote; /* Clock must be on for TX */
84 bool rx_vote; /* Clock must be on for RX */
85 struct timer_list tx_idle_timer;
87 struct timer_list wake_retrans_timer;
89 struct workqueue_struct *workqueue;
90 struct work_struct ws_awake_rx;
91 struct work_struct ws_awake_device;
92 struct work_struct ws_rx_vote_off;
93 struct work_struct ws_tx_vote_off;
96 /* For debugging purpose */
114 static void __serial_clock_on(struct tty_struct *tty)
116 /* TODO: Some chipset requires to enable UART clock on client
117 * side to save power consumption or manual work is required.
118 * Please put your code to control UART clock here if needed
122 static void __serial_clock_off(struct tty_struct *tty)
124 /* TODO: Some chipset requires to disable UART clock on client
125 * side to save power consumption or manual work is required.
126 * Please put your code to control UART clock off here if needed
130 /* serial_clock_vote needs to be called with the ibs lock held */
131 static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
133 struct qca_data *qca = hu->priv;
136 bool old_vote = (qca->tx_vote | qca->rx_vote);
140 case HCI_IBS_VOTE_STATS_UPDATE:
141 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
144 qca->vote_off_ms += diff;
146 qca->vote_on_ms += diff;
149 case HCI_IBS_TX_VOTE_CLOCK_ON:
155 case HCI_IBS_RX_VOTE_CLOCK_ON:
161 case HCI_IBS_TX_VOTE_CLOCK_OFF:
162 qca->tx_vote = false;
164 new_vote = qca->rx_vote | qca->tx_vote;
167 case HCI_IBS_RX_VOTE_CLOCK_OFF:
168 qca->rx_vote = false;
170 new_vote = qca->rx_vote | qca->tx_vote;
174 BT_ERR("Voting irregularity");
178 if (new_vote != old_vote) {
180 __serial_clock_on(hu->tty);
182 __serial_clock_off(hu->tty);
184 BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
185 vote ? "true" : "false");
187 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
191 qca->vote_off_ms += diff;
194 qca->vote_on_ms += diff;
196 qca->vote_last_jif = jiffies;
200 /* Builds and sends an HCI_IBS command packet.
201 * These are very simple packets with only 1 cmd byte.
203 static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
206 struct sk_buff *skb = NULL;
207 struct qca_data *qca = hu->priv;
209 BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
211 skb = bt_skb_alloc(1, GFP_ATOMIC);
213 BT_ERR("Failed to allocate memory for HCI_IBS packet");
217 /* Assign HCI_IBS type */
218 skb_put_u8(skb, cmd);
220 skb_queue_tail(&qca->txq, skb);
225 static void qca_wq_awake_device(struct work_struct *work)
227 struct qca_data *qca = container_of(work, struct qca_data,
229 struct hci_uart *hu = qca->hu;
230 unsigned long retrans_delay;
232 BT_DBG("hu %p wq awake device", hu);
234 /* Vote for serial clock */
235 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
237 spin_lock(&qca->hci_ibs_lock);
239 /* Send wake indication to device */
240 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
241 BT_ERR("Failed to send WAKE to device");
243 qca->ibs_sent_wakes++;
245 /* Start retransmit timer */
246 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
247 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
249 spin_unlock(&qca->hci_ibs_lock);
251 /* Actually send the packets */
252 hci_uart_tx_wakeup(hu);
255 static void qca_wq_awake_rx(struct work_struct *work)
257 struct qca_data *qca = container_of(work, struct qca_data,
259 struct hci_uart *hu = qca->hu;
261 BT_DBG("hu %p wq awake rx", hu);
263 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
265 spin_lock(&qca->hci_ibs_lock);
266 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
268 /* Always acknowledge device wake up,
269 * sending IBS message doesn't count as TX ON.
271 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
272 BT_ERR("Failed to acknowledge device wake up");
274 qca->ibs_sent_wacks++;
276 spin_unlock(&qca->hci_ibs_lock);
278 /* Actually send the packets */
279 hci_uart_tx_wakeup(hu);
282 static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
284 struct qca_data *qca = container_of(work, struct qca_data,
286 struct hci_uart *hu = qca->hu;
288 BT_DBG("hu %p rx clock vote off", hu);
290 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
293 static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
295 struct qca_data *qca = container_of(work, struct qca_data,
297 struct hci_uart *hu = qca->hu;
299 BT_DBG("hu %p tx clock vote off", hu);
301 /* Run HCI tx handling unlocked */
302 hci_uart_tx_wakeup(hu);
304 /* Now that message queued to tty driver, vote for tty clocks off.
305 * It is up to the tty driver to pend the clocks off until tx done.
307 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
310 static void hci_ibs_tx_idle_timeout(struct timer_list *t)
312 struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
313 struct hci_uart *hu = qca->hu;
316 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
318 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
319 flags, SINGLE_DEPTH_NESTING);
321 switch (qca->tx_ibs_state) {
322 case HCI_IBS_TX_AWAKE:
323 /* TX_IDLE, go to SLEEP */
324 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
325 BT_ERR("Failed to send SLEEP to device");
328 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
329 qca->ibs_sent_slps++;
330 queue_work(qca->workqueue, &qca->ws_tx_vote_off);
333 case HCI_IBS_TX_ASLEEP:
334 case HCI_IBS_TX_WAKING:
338 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
342 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
345 static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
347 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
348 struct hci_uart *hu = qca->hu;
349 unsigned long flags, retrans_delay;
350 bool retransmit = false;
352 BT_DBG("hu %p wake retransmit timeout in %d state",
353 hu, qca->tx_ibs_state);
355 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
356 flags, SINGLE_DEPTH_NESTING);
358 switch (qca->tx_ibs_state) {
359 case HCI_IBS_TX_WAKING:
360 /* No WAKE_ACK, retransmit WAKE */
362 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
363 BT_ERR("Failed to acknowledge device wake up");
366 qca->ibs_sent_wakes++;
367 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
368 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
371 case HCI_IBS_TX_ASLEEP:
372 case HCI_IBS_TX_AWAKE:
376 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
380 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
383 hci_uart_tx_wakeup(hu);
386 /* Initialize protocol */
387 static int qca_open(struct hci_uart *hu)
389 struct qca_data *qca;
391 BT_DBG("hu %p qca_open", hu);
393 qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
397 skb_queue_head_init(&qca->txq);
398 skb_queue_head_init(&qca->tx_wait_q);
399 spin_lock_init(&qca->hci_ibs_lock);
400 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
401 if (!qca->workqueue) {
402 BT_ERR("QCA Workqueue not initialized properly");
407 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
408 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
409 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
410 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
414 /* Assume we start with both sides asleep -- extra wakes OK */
415 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
416 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
418 /* clocks actually on, but we start votes off */
419 qca->tx_vote = false;
420 qca->rx_vote = false;
423 qca->ibs_sent_wacks = 0;
424 qca->ibs_sent_slps = 0;
425 qca->ibs_sent_wakes = 0;
426 qca->ibs_recv_wacks = 0;
427 qca->ibs_recv_slps = 0;
428 qca->ibs_recv_wakes = 0;
429 qca->vote_last_jif = jiffies;
431 qca->vote_off_ms = 0;
434 qca->tx_votes_on = 0;
435 qca->tx_votes_off = 0;
436 qca->rx_votes_on = 0;
437 qca->rx_votes_off = 0;
441 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
442 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
444 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
445 qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
447 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
448 qca->tx_idle_delay, qca->wake_retrans);
453 static void qca_debugfs_init(struct hci_dev *hdev)
455 struct hci_uart *hu = hci_get_drvdata(hdev);
456 struct qca_data *qca = hu->priv;
457 struct dentry *ibs_dir;
463 ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
467 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
468 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
469 debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
470 &qca->ibs_sent_slps);
471 debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
472 &qca->ibs_sent_wakes);
473 debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
474 &qca->ibs_sent_wacks);
475 debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
476 &qca->ibs_recv_slps);
477 debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
478 &qca->ibs_recv_wakes);
479 debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
480 &qca->ibs_recv_wacks);
481 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
482 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
483 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
484 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
485 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
486 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
487 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
488 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
489 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
490 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
493 mode = S_IRUGO | S_IWUSR;
494 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
495 debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
496 &qca->tx_idle_delay);
499 /* Flush protocol data */
500 static int qca_flush(struct hci_uart *hu)
502 struct qca_data *qca = hu->priv;
504 BT_DBG("hu %p qca flush", hu);
506 skb_queue_purge(&qca->tx_wait_q);
507 skb_queue_purge(&qca->txq);
513 static int qca_close(struct hci_uart *hu)
515 struct qca_data *qca = hu->priv;
517 BT_DBG("hu %p qca close", hu);
519 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
521 skb_queue_purge(&qca->tx_wait_q);
522 skb_queue_purge(&qca->txq);
523 del_timer(&qca->tx_idle_timer);
524 del_timer(&qca->wake_retrans_timer);
525 destroy_workqueue(qca->workqueue);
528 kfree_skb(qca->rx_skb);
537 /* Called upon a wake-up-indication from the device.
539 static void device_want_to_wakeup(struct hci_uart *hu)
542 struct qca_data *qca = hu->priv;
544 BT_DBG("hu %p want to wake up", hu);
546 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
548 qca->ibs_recv_wakes++;
550 switch (qca->rx_ibs_state) {
551 case HCI_IBS_RX_ASLEEP:
552 /* Make sure clock is on - we may have turned clock off since
553 * receiving the wake up indicator awake rx clock.
555 queue_work(qca->workqueue, &qca->ws_awake_rx);
556 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
559 case HCI_IBS_RX_AWAKE:
560 /* Always acknowledge device wake up,
561 * sending IBS message doesn't count as TX ON.
563 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
564 BT_ERR("Failed to acknowledge device wake up");
567 qca->ibs_sent_wacks++;
571 /* Any other state is illegal */
572 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
577 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
579 /* Actually send the packets */
580 hci_uart_tx_wakeup(hu);
583 /* Called upon a sleep-indication from the device.
585 static void device_want_to_sleep(struct hci_uart *hu)
588 struct qca_data *qca = hu->priv;
590 BT_DBG("hu %p want to sleep", hu);
592 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
594 qca->ibs_recv_slps++;
596 switch (qca->rx_ibs_state) {
597 case HCI_IBS_RX_AWAKE:
599 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
600 /* Vote off rx clock under workqueue */
601 queue_work(qca->workqueue, &qca->ws_rx_vote_off);
604 case HCI_IBS_RX_ASLEEP:
608 /* Any other state is illegal */
609 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
614 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
617 /* Called upon wake-up-acknowledgement from the device
619 static void device_woke_up(struct hci_uart *hu)
621 unsigned long flags, idle_delay;
622 struct qca_data *qca = hu->priv;
623 struct sk_buff *skb = NULL;
625 BT_DBG("hu %p woke up", hu);
627 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
629 qca->ibs_recv_wacks++;
631 switch (qca->tx_ibs_state) {
632 case HCI_IBS_TX_AWAKE:
633 /* Expect one if we send 2 WAKEs */
634 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
638 case HCI_IBS_TX_WAKING:
639 /* Send pending packets */
640 while ((skb = skb_dequeue(&qca->tx_wait_q)))
641 skb_queue_tail(&qca->txq, skb);
643 /* Switch timers and change state to HCI_IBS_TX_AWAKE */
644 del_timer(&qca->wake_retrans_timer);
645 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
646 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
647 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
650 case HCI_IBS_TX_ASLEEP:
654 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
659 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
661 /* Actually send the packets */
662 hci_uart_tx_wakeup(hu);
665 /* Enqueue frame for transmittion (padding, crc, etc) may be called from
666 * two simultaneous tasklets.
668 static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
670 unsigned long flags = 0, idle_delay;
671 struct qca_data *qca = hu->priv;
673 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
676 /* Prepend skb with frame type */
677 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
679 /* Don't go to sleep in middle of patch download or
680 * Out-Of-Band(GPIOs control) sleep is selected.
682 if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
683 skb_queue_tail(&qca->txq, skb);
687 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
689 /* Act according to current state */
690 switch (qca->tx_ibs_state) {
691 case HCI_IBS_TX_AWAKE:
692 BT_DBG("Device awake, sending normally");
693 skb_queue_tail(&qca->txq, skb);
694 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
695 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
698 case HCI_IBS_TX_ASLEEP:
699 BT_DBG("Device asleep, waking up and queueing packet");
700 /* Save packet for later */
701 skb_queue_tail(&qca->tx_wait_q, skb);
703 qca->tx_ibs_state = HCI_IBS_TX_WAKING;
704 /* Schedule a work queue to wake up device */
705 queue_work(qca->workqueue, &qca->ws_awake_device);
708 case HCI_IBS_TX_WAKING:
709 BT_DBG("Device waking up, queueing packet");
710 /* Transient state; just keep packet for later */
711 skb_queue_tail(&qca->tx_wait_q, skb);
715 BT_ERR("Illegal tx state: %d (losing packet)",
721 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
726 static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
728 struct hci_uart *hu = hci_get_drvdata(hdev);
730 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
732 device_want_to_sleep(hu);
738 static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
740 struct hci_uart *hu = hci_get_drvdata(hdev);
742 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
744 device_want_to_wakeup(hu);
750 static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
752 struct hci_uart *hu = hci_get_drvdata(hdev);
754 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
762 #define QCA_IBS_SLEEP_IND_EVENT \
763 .type = HCI_IBS_SLEEP_IND, \
767 .maxlen = HCI_MAX_IBS_SIZE
769 #define QCA_IBS_WAKE_IND_EVENT \
770 .type = HCI_IBS_WAKE_IND, \
774 .maxlen = HCI_MAX_IBS_SIZE
776 #define QCA_IBS_WAKE_ACK_EVENT \
777 .type = HCI_IBS_WAKE_ACK, \
781 .maxlen = HCI_MAX_IBS_SIZE
783 static const struct h4_recv_pkt qca_recv_pkts[] = {
784 { H4_RECV_ACL, .recv = hci_recv_frame },
785 { H4_RECV_SCO, .recv = hci_recv_frame },
786 { H4_RECV_EVENT, .recv = hci_recv_frame },
787 { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
788 { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
789 { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
792 static int qca_recv(struct hci_uart *hu, const void *data, int count)
794 struct qca_data *qca = hu->priv;
796 if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
799 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
800 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
801 if (IS_ERR(qca->rx_skb)) {
802 int err = PTR_ERR(qca->rx_skb);
803 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
811 static struct sk_buff *qca_dequeue(struct hci_uart *hu)
813 struct qca_data *qca = hu->priv;
815 return skb_dequeue(&qca->txq);
818 static uint8_t qca_get_baudrate_value(int speed)
822 return QCA_BAUDRATE_9600;
824 return QCA_BAUDRATE_19200;
826 return QCA_BAUDRATE_38400;
828 return QCA_BAUDRATE_57600;
830 return QCA_BAUDRATE_115200;
832 return QCA_BAUDRATE_230400;
834 return QCA_BAUDRATE_460800;
836 return QCA_BAUDRATE_500000;
838 return QCA_BAUDRATE_921600;
840 return QCA_BAUDRATE_1000000;
842 return QCA_BAUDRATE_2000000;
844 return QCA_BAUDRATE_3000000;
846 return QCA_BAUDRATE_3500000;
848 return QCA_BAUDRATE_115200;
852 static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
854 struct hci_uart *hu = hci_get_drvdata(hdev);
855 struct qca_data *qca = hu->priv;
857 u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
859 if (baudrate > QCA_BAUDRATE_3000000)
864 skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
866 bt_dev_err(hdev, "Failed to allocate baudrate packet");
870 /* Assign commands to change baudrate and packet type. */
871 skb_put_data(skb, cmd, sizeof(cmd));
872 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
874 skb_queue_tail(&qca->txq, skb);
875 hci_uart_tx_wakeup(hu);
877 /* wait 300ms to change new baudrate on controller side
878 * controller will come back after they receive this HCI command
879 * then host can communicate with new baudrate to controller
881 set_current_state(TASK_UNINTERRUPTIBLE);
882 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
883 set_current_state(TASK_INTERRUPTIBLE);
888 static int qca_setup(struct hci_uart *hu)
890 struct hci_dev *hdev = hu->hdev;
891 struct qca_data *qca = hu->priv;
892 unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
895 bt_dev_info(hdev, "ROME setup");
897 /* Patch downloading has to be done without IBS mode */
898 clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
900 /* Setup initial baudrate */
903 speed = hu->init_speed;
904 else if (hu->proto->init_speed)
905 speed = hu->proto->init_speed;
908 hci_uart_set_baudrate(hu, speed);
910 /* Setup user speed if needed */
913 speed = hu->oper_speed;
914 else if (hu->proto->oper_speed)
915 speed = hu->proto->oper_speed;
918 qca_baudrate = qca_get_baudrate_value(speed);
920 bt_dev_info(hdev, "Set UART speed to %d", speed);
921 ret = qca_set_baudrate(hdev, qca_baudrate);
923 bt_dev_err(hdev, "Failed to change the baud rate (%d)",
927 hci_uart_set_baudrate(hu, speed);
930 /* Setup patch / NVM configurations */
931 ret = qca_uart_setup_rome(hdev, qca_baudrate);
933 set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
934 qca_debugfs_init(hdev);
935 } else if (ret == -ENOENT) {
936 /* No patch/nvm-config found, run with original fw/config */
941 hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
946 static struct hci_uart_proto qca_proto = {
950 .init_speed = 115200,
951 .oper_speed = 3000000,
957 .enqueue = qca_enqueue,
958 .dequeue = qca_dequeue,
961 int __init qca_init(void)
963 return hci_uart_register_proto(&qca_proto);
966 int __exit qca_deinit(void)
968 return hci_uart_unregister_proto(&qca_proto);