1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
15 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
21 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
24 struct ath10k_skb_cb *skb_cb;
26 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
30 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
31 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
33 skb_cb = ATH10K_SKB_CB(skb);
34 memset(skb_cb, 0, sizeof(*skb_cb));
36 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
40 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
43 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
45 if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
46 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
47 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
50 void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
53 struct ath10k *ar = ep->htc->ar;
54 struct ath10k_htc_hdr *hdr;
56 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
59 /* A corner case where the copy completion is reaching to host but still
60 * copy engine is processing it due to which host unmaps corresponding
61 * memory and causes SMMU fault, hence as workaround adding delay
62 * the unmapping memory to avoid SMMU faults.
64 if (ar->hw_params.delay_unmap_buffer &&
68 hdr = (struct ath10k_htc_hdr *)skb->data;
69 ath10k_htc_restore_tx_skb(ep->htc, skb);
71 if (!ep->ep_ops.ep_tx_complete) {
72 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
73 dev_kfree_skb_any(skb);
77 if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
78 dev_kfree_skb_any(skb);
82 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
84 EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
86 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
89 struct ath10k_htc_hdr *hdr;
91 hdr = (struct ath10k_htc_hdr *)skb->data;
92 memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
95 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
97 if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
98 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
100 spin_lock_bh(&ep->htc->tx_lock);
101 hdr->seq_no = ep->seq_no++;
102 spin_unlock_bh(&ep->htc->tx_lock);
105 static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
109 struct ath10k_htc *htc = ep->htc;
110 struct ath10k *ar = htc->ar;
111 enum ath10k_htc_ep_id eid = ep->eid;
112 int credits, ret = 0;
114 if (!ep->tx_credit_flow_enabled)
117 credits = DIV_ROUND_UP(len, ep->tx_credit_size);
118 spin_lock_bh(&htc->tx_lock);
120 if (ep->tx_credits < credits) {
121 ath10k_dbg(ar, ATH10K_DBG_HTC,
122 "htc insufficient credits ep %d required %d available %d consume %d\n",
123 eid, credits, ep->tx_credits, consume);
129 ep->tx_credits -= credits;
130 ath10k_dbg(ar, ATH10K_DBG_HTC,
131 "htc ep %d consumed %d credits total %d\n",
132 eid, credits, ep->tx_credits);
136 spin_unlock_bh(&htc->tx_lock);
140 static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
142 struct ath10k_htc *htc = ep->htc;
143 struct ath10k *ar = htc->ar;
144 enum ath10k_htc_ep_id eid = ep->eid;
147 if (!ep->tx_credit_flow_enabled)
150 credits = DIV_ROUND_UP(len, ep->tx_credit_size);
151 spin_lock_bh(&htc->tx_lock);
152 ep->tx_credits += credits;
153 ath10k_dbg(ar, ATH10K_DBG_HTC,
154 "htc ep %d reverted %d credits back total %d\n",
155 eid, credits, ep->tx_credits);
156 spin_unlock_bh(&htc->tx_lock);
158 if (ep->ep_ops.ep_tx_credits)
159 ep->ep_ops.ep_tx_credits(htc->ar);
162 int ath10k_htc_send(struct ath10k_htc *htc,
163 enum ath10k_htc_ep_id eid,
166 struct ath10k *ar = htc->ar;
167 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
168 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
169 struct ath10k_hif_sg_item sg_item;
170 struct device *dev = htc->ar->dev;
172 unsigned int skb_len;
174 if (htc->ar->state == ATH10K_STATE_WEDGED)
177 if (eid >= ATH10K_HTC_EP_COUNT) {
178 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
182 skb_push(skb, sizeof(struct ath10k_htc_hdr));
185 ret = ath10k_htc_consume_credit(ep, skb_len, true);
189 ath10k_htc_prepare_tx_skb(ep, skb);
192 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
193 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
195 ret = dma_mapping_error(dev, skb_cb->paddr);
202 sg_item.transfer_id = ep->eid;
203 sg_item.transfer_context = skb;
204 sg_item.vaddr = skb->data;
205 sg_item.paddr = skb_cb->paddr;
206 sg_item.len = skb->len;
208 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
215 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
216 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
218 ath10k_htc_release_credit(ep, skb_len);
220 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
224 void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
226 struct ath10k_htc *htc = &ar->htc;
227 struct ath10k_skb_cb *skb_cb;
228 struct ath10k_htc_ep *ep;
230 if (WARN_ON_ONCE(!skb))
233 skb_cb = ATH10K_SKB_CB(skb);
234 ep = &htc->endpoint[skb_cb->eid];
236 ath10k_htc_notify_tx_completion(ep, skb);
237 /* the skb now belongs to the completion handler */
239 EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
246 ath10k_htc_process_credit_report(struct ath10k_htc *htc,
247 const struct ath10k_htc_credit_report *report,
249 enum ath10k_htc_ep_id eid)
251 struct ath10k *ar = htc->ar;
252 struct ath10k_htc_ep *ep;
255 if (len % sizeof(*report))
256 ath10k_warn(ar, "Uneven credit report len %d", len);
258 n_reports = len / sizeof(*report);
260 spin_lock_bh(&htc->tx_lock);
261 for (i = 0; i < n_reports; i++, report++) {
262 if (report->eid >= ATH10K_HTC_EP_COUNT)
265 ep = &htc->endpoint[report->eid];
266 ep->tx_credits += report->credits;
268 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
269 report->eid, report->credits, ep->tx_credits);
271 if (ep->ep_ops.ep_tx_credits) {
272 spin_unlock_bh(&htc->tx_lock);
273 ep->ep_ops.ep_tx_credits(htc->ar);
274 spin_lock_bh(&htc->tx_lock);
277 spin_unlock_bh(&htc->tx_lock);
281 ath10k_htc_process_lookahead(struct ath10k_htc *htc,
282 const struct ath10k_htc_lookahead_report *report,
284 enum ath10k_htc_ep_id eid,
285 void *next_lookaheads,
286 int *next_lookaheads_len)
288 struct ath10k *ar = htc->ar;
290 /* Invalid lookahead flags are actually transmitted by
291 * the target in the HTC control message.
292 * Since this will happen at every boot we silently ignore
293 * the lookahead in this case
295 if (report->pre_valid != ((~report->post_valid) & 0xFF))
298 if (next_lookaheads && next_lookaheads_len) {
299 ath10k_dbg(ar, ATH10K_DBG_HTC,
300 "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
301 report->pre_valid, report->post_valid);
303 /* look ahead bytes are valid, copy them over */
304 memcpy((u8 *)next_lookaheads, report->lookahead, 4);
306 *next_lookaheads_len = 1;
313 ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
314 const struct ath10k_htc_lookahead_bundle *report,
316 enum ath10k_htc_ep_id eid,
317 void *next_lookaheads,
318 int *next_lookaheads_len)
320 struct ath10k *ar = htc->ar;
321 int bundle_cnt = len / sizeof(*report);
323 if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
324 ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
329 if (next_lookaheads && next_lookaheads_len) {
332 for (i = 0; i < bundle_cnt; i++) {
333 memcpy(((u8 *)next_lookaheads) + 4 * i,
334 report->lookahead, 4);
338 *next_lookaheads_len = bundle_cnt;
344 int ath10k_htc_process_trailer(struct ath10k_htc *htc,
347 enum ath10k_htc_ep_id src_eid,
348 void *next_lookaheads,
349 int *next_lookaheads_len)
351 struct ath10k_htc_lookahead_bundle *bundle;
352 struct ath10k *ar = htc->ar;
354 struct ath10k_htc_record *record;
359 orig_buffer = buffer;
360 orig_length = length;
363 record = (struct ath10k_htc_record *)buffer;
365 if (length < sizeof(record->hdr)) {
370 if (record->hdr.len > length) {
371 /* no room left in buffer for record */
372 ath10k_warn(ar, "Invalid record length: %d\n",
378 switch (record->hdr.id) {
379 case ATH10K_HTC_RECORD_CREDITS:
380 len = sizeof(struct ath10k_htc_credit_report);
381 if (record->hdr.len < len) {
382 ath10k_warn(ar, "Credit report too long\n");
386 ath10k_htc_process_credit_report(htc,
387 record->credit_report,
391 case ATH10K_HTC_RECORD_LOOKAHEAD:
392 len = sizeof(struct ath10k_htc_lookahead_report);
393 if (record->hdr.len < len) {
394 ath10k_warn(ar, "Lookahead report too long\n");
398 status = ath10k_htc_process_lookahead(htc,
399 record->lookahead_report,
403 next_lookaheads_len);
405 case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
406 bundle = record->lookahead_bundle;
407 status = ath10k_htc_process_lookahead_bundle(htc,
412 next_lookaheads_len);
415 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
416 record->hdr.id, record->hdr.len);
423 /* multiple records may be present in a trailer */
424 buffer += sizeof(record->hdr) + record->hdr.len;
425 length -= sizeof(record->hdr) + record->hdr.len;
429 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
430 orig_buffer, orig_length);
434 EXPORT_SYMBOL(ath10k_htc_process_trailer);
436 void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
439 struct ath10k_htc *htc = &ar->htc;
440 struct ath10k_htc_hdr *hdr;
441 struct ath10k_htc_ep *ep;
446 bool trailer_present;
448 hdr = (struct ath10k_htc_hdr *)skb->data;
449 skb_pull(skb, sizeof(*hdr));
453 if (eid >= ATH10K_HTC_EP_COUNT) {
454 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
455 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
460 ep = &htc->endpoint[eid];
461 if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
462 ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
466 payload_len = __le16_to_cpu(hdr->len);
468 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
469 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
470 payload_len + sizeof(*hdr));
471 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
476 if (skb->len < payload_len) {
477 ath10k_dbg(ar, ATH10K_DBG_HTC,
478 "HTC Rx: insufficient length, got %d, expected %d\n",
479 skb->len, payload_len);
480 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
481 "", hdr, sizeof(*hdr));
485 /* get flags to check for trailer */
486 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
487 if (trailer_present) {
490 trailer_len = hdr->trailer_len;
491 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
493 if ((trailer_len < min_len) ||
494 (trailer_len > payload_len)) {
495 ath10k_warn(ar, "Invalid trailer length: %d\n",
501 trailer += sizeof(*hdr);
502 trailer += payload_len;
503 trailer -= trailer_len;
504 status = ath10k_htc_process_trailer(htc, trailer,
505 trailer_len, hdr->eid,
510 skb_trim(skb, skb->len - trailer_len);
513 if (((int)payload_len - (int)trailer_len) <= 0)
514 /* zero length packet with trailer data, just drop these */
517 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
519 ep->ep_ops.ep_rx_complete(ar, skb);
521 /* skb is now owned by the rx completion handler */
526 EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
528 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
531 struct ath10k_htc *htc = &ar->htc;
532 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
534 switch (__le16_to_cpu(msg->hdr.message_id)) {
535 case ATH10K_HTC_MSG_READY_ID:
536 case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
537 /* handle HTC control message */
538 if (completion_done(&htc->ctl_resp)) {
539 /* this is a fatal error, target should not be
540 * sending unsolicited messages on the ep 0
542 ath10k_warn(ar, "HTC rx ctrl still processing\n");
543 complete(&htc->ctl_resp);
547 htc->control_resp_len =
549 ATH10K_HTC_MAX_CTRL_MSG_LEN);
551 memcpy(htc->control_resp_buffer, skb->data,
552 htc->control_resp_len);
554 complete(&htc->ctl_resp);
556 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
557 htc->htc_ops.target_send_suspend_complete(ar);
560 ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
572 static const char *htc_service_name(enum ath10k_htc_svc_id id)
575 case ATH10K_HTC_SVC_ID_RESERVED:
577 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
579 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
581 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
583 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
585 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
587 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
589 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
590 return "NMI Control";
591 case ATH10K_HTC_SVC_ID_NMI_DATA:
593 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
595 case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
597 case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
599 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
601 case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
608 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
610 struct ath10k_htc_ep *ep;
613 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
614 ep = &htc->endpoint[i];
615 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
616 ep->max_ep_message_len = 0;
617 ep->max_tx_queue_depth = 0;
620 ep->tx_credit_flow_enabled = true;
624 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
629 /* The WMI control service is the only service with flow control.
630 * Let it have all transmit credits.
632 if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
633 allocation = htc->total_transmit_credits;
638 static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
639 struct sk_buff *bundle_skb,
640 struct sk_buff_head *tx_save_head)
642 struct ath10k_hif_sg_item sg_item;
643 struct ath10k_htc *htc = ep->htc;
644 struct ath10k *ar = htc->ar;
647 unsigned int skb_len;
649 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
650 skb_len = bundle_skb->len;
651 ret = ath10k_htc_consume_credit(ep, skb_len, true);
654 sg_item.transfer_id = ep->eid;
655 sg_item.transfer_context = bundle_skb;
656 sg_item.vaddr = bundle_skb->data;
657 sg_item.len = bundle_skb->len;
659 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
661 ath10k_htc_release_credit(ep, skb_len);
665 dev_kfree_skb_any(bundle_skb);
667 for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
669 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
670 skb_queue_head(&ep->tx_req_head, skb);
672 skb_queue_tail(&ep->tx_complete_head, skb);
677 queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
679 ath10k_dbg(ar, ATH10K_DBG_HTC,
680 "bundle tx status %d eid %d req count %d count %d len %d\n",
681 ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
685 static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
687 struct ath10k_htc *htc = ep->htc;
688 struct ath10k *ar = htc->ar;
691 ret = ath10k_htc_send(htc, ep->eid, skb);
694 skb_queue_head(&ep->tx_req_head, skb);
696 ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
697 ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
700 static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
702 struct ath10k_htc *htc = ep->htc;
703 struct sk_buff *bundle_skb, *skb;
704 struct sk_buff_head tx_save_head;
705 struct ath10k_htc_hdr *hdr;
707 int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
709 if (htc->ar->state == ATH10K_STATE_WEDGED)
712 if (ep->tx_credit_flow_enabled &&
713 ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
716 bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
717 bundle_skb = dev_alloc_skb(bundles_left);
722 bundle_buf = bundle_skb->data;
723 skb_queue_head_init(&tx_save_head);
726 skb = skb_dequeue(&ep->tx_req_head);
731 trans_len = skb->len + sizeof(*hdr);
732 credit_remainder = trans_len % ep->tx_credit_size;
734 if (credit_remainder != 0) {
735 credit_pad = ep->tx_credit_size - credit_remainder;
736 trans_len += credit_pad;
739 ret = ath10k_htc_consume_credit(ep,
740 bundle_buf + trans_len - bundle_skb->data,
743 skb_queue_head(&ep->tx_req_head, skb);
747 if (bundles_left < trans_len) {
748 bundle_skb->len = bundle_buf - bundle_skb->data;
749 ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
752 skb_queue_head(&ep->tx_req_head, skb);
756 if (skb_queue_len(&ep->tx_req_head) == 0) {
757 ath10k_htc_send_one_skb(ep, skb);
761 if (ep->tx_credit_flow_enabled &&
762 ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
763 skb_queue_head(&ep->tx_req_head, skb);
768 ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
769 bundle_skb = dev_alloc_skb(bundles_left);
772 skb_queue_head(&ep->tx_req_head, skb);
775 bundle_buf = bundle_skb->data;
776 skb_queue_head_init(&tx_save_head);
779 skb_push(skb, sizeof(struct ath10k_htc_hdr));
780 ath10k_htc_prepare_tx_skb(ep, skb);
782 memcpy(bundle_buf, skb->data, skb->len);
783 hdr = (struct ath10k_htc_hdr *)bundle_buf;
784 hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
785 hdr->pad_len = __cpu_to_le16(credit_pad);
786 bundle_buf += trans_len;
787 bundles_left -= trans_len;
788 skb_queue_tail(&tx_save_head, skb);
791 if (bundle_buf != bundle_skb->data) {
792 bundle_skb->len = bundle_buf - bundle_skb->data;
793 ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
795 dev_kfree_skb_any(bundle_skb);
801 static void ath10k_htc_bundle_tx_work(struct work_struct *work)
803 struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
804 struct ath10k_htc_ep *ep;
808 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
809 ep = &ar->htc.endpoint[i];
814 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
815 ep->eid, skb_queue_len(&ep->tx_req_head));
817 if (skb_queue_len(&ep->tx_req_head) >=
818 ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
819 ath10k_htc_send_bundle_skbs(ep);
821 skb = skb_dequeue(&ep->tx_req_head);
825 ath10k_htc_send_one_skb(ep, skb);
830 static void ath10k_htc_tx_complete_work(struct work_struct *work)
832 struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
833 struct ath10k_htc_ep *ep;
834 enum ath10k_htc_ep_id eid;
838 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
839 ep = &ar->htc.endpoint[i];
841 if (ep->bundle_tx && eid == ar->htt.eid) {
842 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
843 ep->eid, skb_queue_len(&ep->tx_complete_head));
846 skb = skb_dequeue(&ep->tx_complete_head);
849 ath10k_htc_notify_tx_completion(ep, skb);
855 int ath10k_htc_send_hl(struct ath10k_htc *htc,
856 enum ath10k_htc_ep_id eid,
859 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
860 struct ath10k *ar = htc->ar;
862 if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
863 ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
867 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
868 eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
871 skb_queue_tail(&ep->tx_req_head, skb);
872 queue_work(ar->workqueue, &ar->bundle_tx_work);
875 return ath10k_htc_send(htc, eid, skb);
879 void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
881 if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
883 ep->bundle_tx = true;
884 skb_queue_head_init(&ep->tx_req_head);
885 skb_queue_head_init(&ep->tx_complete_head);
889 void ath10k_htc_stop_hl(struct ath10k *ar)
891 struct ath10k_htc_ep *ep;
894 cancel_work_sync(&ar->bundle_tx_work);
895 cancel_work_sync(&ar->tx_complete_work);
897 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
898 ep = &ar->htc.endpoint[i];
903 ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
904 ep->eid, skb_queue_len(&ep->tx_req_head));
906 skb_queue_purge(&ep->tx_req_head);
910 int ath10k_htc_wait_target(struct ath10k_htc *htc)
912 struct ath10k *ar = htc->ar;
914 unsigned long time_left;
915 struct ath10k_htc_msg *msg;
918 time_left = wait_for_completion_timeout(&htc->ctl_resp,
919 ATH10K_HTC_WAIT_TIMEOUT_HZ);
921 /* Workaround: In some cases the PCI HIF doesn't
922 * receive interrupt for the control response message
923 * even if the buffer was completed. It is suspected
924 * iomap writes unmasking PCI CE irqs aren't propagated
925 * properly in KVM PCI-passthrough sometimes.
927 ath10k_warn(ar, "failed to receive control response completion, polling..\n");
929 for (i = 0; i < CE_COUNT; i++)
930 ath10k_hif_send_complete_check(htc->ar, i, 1);
933 wait_for_completion_timeout(&htc->ctl_resp,
934 ATH10K_HTC_WAIT_TIMEOUT_HZ);
941 ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
945 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
946 ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
947 htc->control_resp_len);
951 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
952 message_id = __le16_to_cpu(msg->hdr.message_id);
954 if (message_id != ATH10K_HTC_MSG_READY_ID) {
955 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
959 if (ar->hw_params.use_fw_tx_credits)
960 htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
962 htc->total_transmit_credits = 1;
964 htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
966 ath10k_dbg(ar, ATH10K_DBG_HTC,
967 "Target ready! transmit resources: %d size:%d actual credits:%d\n",
968 htc->total_transmit_credits,
969 htc->target_credit_size,
970 msg->ready.credit_count);
972 if ((htc->total_transmit_credits == 0) ||
973 (htc->target_credit_size == 0)) {
974 ath10k_err(ar, "Invalid credit size received\n");
978 /* The only way to determine if the ready message is an extended
979 * message is from the size.
981 if (htc->control_resp_len >=
982 sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
983 htc->alt_data_credit_size =
984 __le16_to_cpu(msg->ready_ext.reserved) &
985 ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
986 htc->max_msgs_per_htc_bundle =
987 min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
988 HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
989 ath10k_dbg(ar, ATH10K_DBG_HTC,
990 "Extended ready message RX bundle size %d alt size %d\n",
991 htc->max_msgs_per_htc_bundle,
992 htc->alt_data_credit_size);
995 INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
996 INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
1001 void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
1002 enum ath10k_htc_ep_id eid,
1005 struct ath10k *ar = htc->ar;
1006 struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
1008 ep->tx_credit_flow_enabled = enable;
1011 int ath10k_htc_connect_service(struct ath10k_htc *htc,
1012 struct ath10k_htc_svc_conn_req *conn_req,
1013 struct ath10k_htc_svc_conn_resp *conn_resp)
1015 struct ath10k *ar = htc->ar;
1016 struct ath10k_htc_msg *msg;
1017 struct ath10k_htc_conn_svc *req_msg;
1018 struct ath10k_htc_conn_svc_response resp_msg_dummy;
1019 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
1020 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
1021 struct ath10k_htc_ep *ep;
1022 struct sk_buff *skb;
1023 unsigned int max_msg_size = 0;
1025 unsigned long time_left;
1026 bool disable_credit_flow_ctrl = false;
1027 u16 message_id, service_id, flags = 0;
1030 /* special case for HTC pseudo control service */
1031 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
1032 disable_credit_flow_ctrl = true;
1033 assigned_eid = ATH10K_HTC_EP_0;
1034 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
1035 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
1039 tx_alloc = ath10k_htc_get_credit_allocation(htc,
1040 conn_req->service_id);
1042 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1043 "boot htc service %s does not allocate target credits\n",
1044 htc_service_name(conn_req->service_id));
1046 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1048 ath10k_err(ar, "Failed to allocate HTC packet\n");
1052 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
1053 skb_put(skb, length);
1054 memset(skb->data, 0, length);
1056 msg = (struct ath10k_htc_msg *)skb->data;
1057 msg->hdr.message_id =
1058 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
1060 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
1062 /* Only enable credit flow control for WMI ctrl service */
1063 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
1064 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1065 disable_credit_flow_ctrl = true;
1068 req_msg = &msg->connect_service;
1069 req_msg->flags = __cpu_to_le16(flags);
1070 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
1072 reinit_completion(&htc->ctl_resp);
1074 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1080 /* wait for response */
1081 time_left = wait_for_completion_timeout(&htc->ctl_resp,
1082 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
1084 ath10k_err(ar, "Service connect timeout\n");
1088 /* we controlled the buffer creation, it's aligned */
1089 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
1090 resp_msg = &msg->connect_service_response;
1091 message_id = __le16_to_cpu(msg->hdr.message_id);
1092 service_id = __le16_to_cpu(resp_msg->service_id);
1094 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
1095 (htc->control_resp_len < sizeof(msg->hdr) +
1096 sizeof(msg->connect_service_response))) {
1097 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
1101 ath10k_dbg(ar, ATH10K_DBG_HTC,
1102 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
1103 htc_service_name(service_id),
1104 resp_msg->status, resp_msg->eid);
1106 conn_resp->connect_resp_code = resp_msg->status;
1108 /* check response status */
1109 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
1110 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
1111 htc_service_name(service_id),
1116 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
1117 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
1121 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
1124 if (max_msg_size == 0)
1127 ep = &htc->endpoint[assigned_eid];
1128 ep->eid = assigned_eid;
1130 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
1133 /* return assigned endpoint to caller */
1134 conn_resp->eid = assigned_eid;
1135 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
1137 /* setup the endpoint */
1138 ep->service_id = conn_req->service_id;
1139 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
1140 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
1141 ep->tx_credits = tx_alloc;
1142 ep->tx_credit_size = htc->target_credit_size;
1144 if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
1145 htc->alt_data_credit_size != 0)
1146 ep->tx_credit_size = htc->alt_data_credit_size;
1148 /* copy all the callbacks */
1149 ep->ep_ops = conn_req->ep_ops;
1151 status = ath10k_hif_map_service_to_pipe(htc->ar,
1156 ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
1161 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1162 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
1163 htc_service_name(ep->service_id), ep->ul_pipe_id,
1164 ep->dl_pipe_id, ep->eid);
1166 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
1167 ep->tx_credit_flow_enabled = false;
1168 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1169 "boot htc service '%s' eid %d TX flow control disabled\n",
1170 htc_service_name(ep->service_id), assigned_eid);
1176 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
1178 struct sk_buff *skb;
1180 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
1184 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
1186 /* FW/HTC requires 4-byte aligned streams */
1187 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1188 ath10k_warn(ar, "Unaligned HTC tx skb\n");
1193 static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
1195 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
1196 dev_kfree_skb_any(skb);
1199 static int ath10k_htc_pktlog_connect(struct ath10k *ar)
1201 struct ath10k_htc_svc_conn_resp conn_resp;
1202 struct ath10k_htc_svc_conn_req conn_req;
1205 memset(&conn_req, 0, sizeof(conn_req));
1206 memset(&conn_resp, 0, sizeof(conn_resp));
1208 conn_req.ep_ops.ep_tx_complete = NULL;
1209 conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
1210 conn_req.ep_ops.ep_tx_credits = NULL;
1212 /* connect to control service */
1213 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
1214 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
1216 ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
1224 static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
1230 status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
1234 ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
1235 ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
1243 int ath10k_htc_start(struct ath10k_htc *htc)
1245 struct ath10k *ar = htc->ar;
1246 struct sk_buff *skb;
1248 struct ath10k_htc_msg *msg;
1250 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1254 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
1255 memset(skb->data, 0, skb->len);
1257 msg = (struct ath10k_htc_msg *)skb->data;
1258 msg->hdr.message_id =
1259 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
1261 if (ar->hif.bus == ATH10K_BUS_SDIO) {
1262 /* Extra setup params used by SDIO */
1263 msg->setup_complete_ext.flags =
1264 __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
1265 msg->setup_complete_ext.max_msgs_per_bundled_recv =
1266 htc->max_msgs_per_htc_bundle;
1268 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
1270 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1276 if (ath10k_htc_pktlog_svc_supported(ar)) {
1277 status = ath10k_htc_pktlog_connect(ar);
1279 ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
1287 /* registered target arrival callback from the HIF layer */
1288 int ath10k_htc_init(struct ath10k *ar)
1291 struct ath10k_htc *htc = &ar->htc;
1292 struct ath10k_htc_svc_conn_req conn_req;
1293 struct ath10k_htc_svc_conn_resp conn_resp;
1295 spin_lock_init(&htc->tx_lock);
1297 ath10k_htc_reset_endpoint_states(htc);
1301 /* setup our pseudo HTC control endpoint connection */
1302 memset(&conn_req, 0, sizeof(conn_req));
1303 memset(&conn_resp, 0, sizeof(conn_resp));
1304 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
1305 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
1306 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
1307 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
1309 /* connect fake service */
1310 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
1312 ath10k_err(ar, "could not connect to htc service (%d)\n",
1317 init_completion(&htc->ctl_resp);