1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright(c) 2015 - 2017 Intel Corporation.
5 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define __HFI1_TRACE_TX_H
8 #include <linux/tracepoint.h>
9 #include <linux/trace_seq.h>
15 #include "user_sdma.h"
17 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
19 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
22 #define TRACE_SYSTEM hfi1_tx
24 TRACE_EVENT(hfi1_piofree,
25 TP_PROTO(struct send_context *sc, int extra),
27 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
28 __field(u32, sw_index)
29 __field(u32, hw_context)
32 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
33 __entry->sw_index = sc->sw_index;
34 __entry->hw_context = sc->hw_context;
35 __entry->extra = extra;
37 TP_printk("[%s] ctxt %u(%u) extra %d",
45 TRACE_EVENT(hfi1_wantpiointr,
46 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
47 TP_ARGS(sc, needint, credit_ctrl),
48 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
49 __field(u32, sw_index)
50 __field(u32, hw_context)
52 __field(u64, credit_ctrl)
54 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
55 __entry->sw_index = sc->sw_index;
56 __entry->hw_context = sc->hw_context;
57 __entry->needint = needint;
58 __entry->credit_ctrl = credit_ctrl;
60 TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
65 (unsigned long long)__entry->credit_ctrl
69 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
70 TP_PROTO(struct rvt_qp *qp, u32 flags),
73 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
77 __field(u32, ps_flags)
78 __field(unsigned long, iow_flags)
81 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
82 __entry->flags = flags;
83 __entry->qpn = qp->ibqp.qp_num;
84 __entry->s_flags = qp->s_flags;
86 ((struct hfi1_qp_priv *)qp->priv)->s_flags;
88 ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
91 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
101 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
102 TP_PROTO(struct rvt_qp *qp, u32 flags),
105 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
106 TP_PROTO(struct rvt_qp *qp, u32 flags),
109 TRACE_EVENT(hfi1_sdma_descriptor,
110 TP_PROTO(struct sdma_engine *sde,
115 TP_ARGS(sde, desc0, desc1, e, descp),
116 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
117 __field(void *, descp)
123 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
124 __entry->desc0 = desc0;
125 __entry->desc1 = desc1;
126 __entry->idx = sde->this_idx;
127 __entry->descp = descp;
131 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
134 __parse_sdma_flags(__entry->desc0, __entry->desc1),
135 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
136 SDMA_DESC0_PHY_ADDR_MASK,
137 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
138 SDMA_DESC1_GENERATION_MASK),
139 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
140 SDMA_DESC0_BYTE_COUNT_MASK),
148 TRACE_EVENT(hfi1_sdma_engine_select,
149 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
150 TP_ARGS(dd, sel, vl, idx),
151 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
156 TP_fast_assign(DD_DEV_ASSIGN(dd);
161 TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
169 TRACE_EVENT(hfi1_sdma_user_free_queues,
170 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
171 TP_ARGS(dd, ctxt, subctxt),
172 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
174 __field(u16, subctxt)
176 TP_fast_assign(DD_DEV_ASSIGN(dd);
177 __entry->ctxt = ctxt;
178 __entry->subctxt = subctxt;
180 TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
187 TRACE_EVENT(hfi1_sdma_user_process_request,
188 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
190 TP_ARGS(dd, ctxt, subctxt, comp_idx),
191 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
193 __field(u16, subctxt)
194 __field(u16, comp_idx)
196 TP_fast_assign(DD_DEV_ASSIGN(dd);
197 __entry->ctxt = ctxt;
198 __entry->subctxt = subctxt;
199 __entry->comp_idx = comp_idx;
201 TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
210 hfi1_sdma_value_template,
211 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
213 TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
214 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
216 __field(u16, subctxt)
217 __field(u16, comp_idx)
220 TP_fast_assign(DD_DEV_ASSIGN(dd);
221 __entry->ctxt = ctxt;
222 __entry->subctxt = subctxt;
223 __entry->comp_idx = comp_idx;
224 __entry->value = value;
226 TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
235 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
236 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
237 u16 comp_idx, u32 tidoffset),
238 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
240 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
241 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
242 u16 comp_idx, u32 data_len),
243 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
245 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
246 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
247 u16 comp_idx, u32 data_len),
248 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
250 TRACE_EVENT(hfi1_sdma_user_tid_info,
251 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
252 u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
253 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
254 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
256 __field(u16, subctxt)
257 __field(u16, comp_idx)
258 __field(u32, tidoffset)
262 TP_fast_assign(DD_DEV_ASSIGN(dd);
263 __entry->ctxt = ctxt;
264 __entry->subctxt = subctxt;
265 __entry->comp_idx = comp_idx;
266 __entry->tidoffset = tidoffset;
267 __entry->units = units;
268 __entry->shift = shift;
270 TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
281 TRACE_EVENT(hfi1_sdma_request,
282 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
284 TP_ARGS(dd, ctxt, subctxt, dim),
285 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
287 __field(u16, subctxt)
288 __field(unsigned long, dim)
290 TP_fast_assign(DD_DEV_ASSIGN(dd);
291 __entry->ctxt = ctxt;
292 __entry->subctxt = subctxt;
295 TP_printk("[%s] SDMA from %u:%u (%lu)",
303 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
304 TP_PROTO(struct sdma_engine *sde, u64 status),
305 TP_ARGS(sde, status),
306 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
310 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
311 __entry->status = status;
312 __entry->idx = sde->this_idx;
314 TP_printk("[%s] SDE(%u) status %llx",
317 (unsigned long long)__entry->status
321 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
322 TP_PROTO(struct sdma_engine *sde, u64 status),
326 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
327 TP_PROTO(struct sdma_engine *sde, u64 status),
331 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
332 TP_PROTO(struct sdma_engine *sde, int aidx),
334 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
338 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
339 __entry->idx = sde->this_idx;
340 __entry->aidx = aidx;
342 TP_printk("[%s] SDE(%u) aidx %d",
349 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
350 TP_PROTO(struct sdma_engine *sde, int aidx),
353 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
354 TP_PROTO(struct sdma_engine *sde, int aidx),
357 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
358 TRACE_EVENT(hfi1_sdma_progress,
359 TP_PROTO(struct sdma_engine *sde,
362 struct sdma_txreq *txp
364 TP_ARGS(sde, hwhead, swhead, txp),
365 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
370 __field(u16, tx_tail)
371 __field(u16, tx_head)
374 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
375 __entry->hwhead = hwhead;
376 __entry->swhead = swhead;
377 __entry->tx_tail = sde->tx_tail;
378 __entry->tx_head = sde->tx_head;
379 __entry->txnext = txp ? txp->next_descq_idx : ~0;
380 __entry->idx = sde->this_idx;
381 __entry->sn = txp ? txp->sn : ~0;
384 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
396 TRACE_EVENT(hfi1_sdma_progress,
397 TP_PROTO(struct sdma_engine *sde,
398 u16 hwhead, u16 swhead,
399 struct sdma_txreq *txp
401 TP_ARGS(sde, hwhead, swhead, txp),
402 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
406 __field(u16, tx_tail)
407 __field(u16, tx_head)
410 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
411 __entry->hwhead = hwhead;
412 __entry->swhead = swhead;
413 __entry->tx_tail = sde->tx_tail;
414 __entry->tx_head = sde->tx_head;
415 __entry->txnext = txp ? txp->next_descq_idx : ~0;
416 __entry->idx = sde->this_idx;
419 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
431 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
432 TP_PROTO(struct sdma_engine *sde, u64 sn),
434 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
438 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
440 __entry->idx = sde->this_idx;
442 TP_printk("[%s] SDE(%u) sn %llu",
449 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
451 struct sdma_engine *sde,
457 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
458 TP_PROTO(struct sdma_engine *sde, u64 sn),
462 #define USDMA_HDR_FORMAT \
463 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
465 TRACE_EVENT(hfi1_sdma_user_header,
466 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
467 struct hfi1_pkt_header *hdr, u32 tidval),
468 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
493 __le32 *pbc = (__le32 *)hdr->pbc;
494 __be32 *lrh = (__be32 *)hdr->lrh;
495 __be32 *bth = (__be32 *)hdr->bth;
496 __le32 *kdeth = (__le32 *)&hdr->kdeth;
499 __entry->ctxt = ctxt;
500 __entry->subctxt = subctxt;
502 __entry->pbc0 = le32_to_cpu(pbc[0]);
503 __entry->pbc1 = le32_to_cpu(pbc[1]);
504 __entry->lrh0 = be32_to_cpu(lrh[0]);
505 __entry->lrh1 = be32_to_cpu(lrh[1]);
506 __entry->bth0 = be32_to_cpu(bth[0]);
507 __entry->bth1 = be32_to_cpu(bth[1]);
508 __entry->bth2 = be32_to_cpu(bth[2]);
509 __entry->kdeth0 = le32_to_cpu(kdeth[0]);
510 __entry->kdeth1 = le32_to_cpu(kdeth[1]);
511 __entry->kdeth2 = le32_to_cpu(kdeth[2]);
512 __entry->kdeth3 = le32_to_cpu(kdeth[3]);
513 __entry->kdeth4 = le32_to_cpu(kdeth[4]);
514 __entry->kdeth5 = le32_to_cpu(kdeth[5]);
515 __entry->kdeth6 = le32_to_cpu(kdeth[6]);
516 __entry->kdeth7 = le32_to_cpu(kdeth[7]);
517 __entry->kdeth8 = le32_to_cpu(kdeth[8]);
518 __entry->tidval = tidval;
520 TP_printk(USDMA_HDR_FORMAT,
545 #define SDMA_UREQ_FMT \
546 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
547 TRACE_EVENT(hfi1_sdma_user_reqinfo,
548 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
549 TP_ARGS(dd, ctxt, subctxt, i),
554 __field(u8, ver_opcode)
557 __field(u16, fragsize)
558 __field(u16, comp_idx)
562 __entry->ctxt = ctxt;
563 __entry->subctxt = subctxt;
564 __entry->ver_opcode = i[0] & 0xff;
565 __entry->iovcnt = (i[0] >> 8) & 0xff;
566 __entry->npkts = i[1];
567 __entry->fragsize = i[2];
568 __entry->comp_idx = i[3];
570 TP_printk(SDMA_UREQ_FMT,
582 #define usdma_complete_name(st) { st, #st }
583 #define show_usdma_complete_state(st) \
584 __print_symbolic(st, \
585 usdma_complete_name(FREE), \
586 usdma_complete_name(QUEUED), \
587 usdma_complete_name(COMPLETE), \
588 usdma_complete_name(ERROR))
590 TRACE_EVENT(hfi1_sdma_user_completion,
591 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
593 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
604 __entry->ctxt = ctxt;
605 __entry->subctxt = subctxt;
607 __entry->state = state;
608 __entry->code = code;
610 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
611 __get_str(dev), __entry->ctxt, __entry->subctxt,
612 __entry->idx, show_usdma_complete_state(__entry->state),
616 TRACE_EVENT(hfi1_usdma_defer,
617 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
618 struct sdma_engine *sde,
619 struct iowait *wait),
620 TP_ARGS(pq, sde, wait),
621 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
622 __field(struct hfi1_user_sdma_pkt_q *, pq)
623 __field(struct sdma_engine *, sde)
624 __field(struct iowait *, wait)
628 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
631 __entry->wait = wait;
632 __entry->engine = sde->this_idx;
633 __entry->empty = list_empty(&__entry->wait->list);
635 TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
637 (unsigned long long)__entry->pq,
638 (unsigned long long)__entry->sde,
639 (unsigned long long)__entry->wait,
645 TRACE_EVENT(hfi1_usdma_activate,
646 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
649 TP_ARGS(pq, wait, reason),
650 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
651 __field(struct hfi1_user_sdma_pkt_q *, pq)
652 __field(struct iowait *, wait)
655 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
657 __entry->wait = wait;
658 __entry->reason = reason;
660 TP_printk("[%s] pq %llx wait %llx reason %d",
662 (unsigned long long)__entry->pq,
663 (unsigned long long)__entry->wait,
668 TRACE_EVENT(hfi1_usdma_we,
669 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
672 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
673 __field(struct hfi1_user_sdma_pkt_q *, pq)
677 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
679 __entry->state = pq->state;
680 __entry->we_ret = we_ret;
682 TP_printk("[%s] pq %llx state %d we_ret %d",
684 (unsigned long long)__entry->pq,
690 const char *print_u32_array(struct trace_seq *, u32 *, int);
691 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
693 TRACE_EVENT(hfi1_sdma_user_header_ahg,
694 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
695 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
696 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
706 __array(u32, ahg, 10)
710 __entry->ctxt = ctxt;
711 __entry->subctxt = subctxt;
714 __entry->idx = ahgidx;
716 __entry->tidval = tidval;
717 memcpy(__entry->ahg, ahg, len * sizeof(u32));
719 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
727 __print_u32_hex(__entry->ahg, __entry->len),
732 TRACE_EVENT(hfi1_sdma_state,
733 TP_PROTO(struct sdma_engine *sde,
737 TP_ARGS(sde, cstate, nstate),
738 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
739 __string(curstate, cstate)
740 __string(newstate, nstate)
742 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
743 __assign_str(curstate);
744 __assign_str(newstate);
746 TP_printk("[%s] current state %s new state %s",
754 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
758 ((struct buffer_control *)__get_dynamic_array(bct))->field \
761 DECLARE_EVENT_CLASS(hfi1_bct_template,
762 TP_PROTO(struct hfi1_devdata *dd,
763 struct buffer_control *bc),
765 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
766 __dynamic_array(u8, bct, sizeof(*bc))
768 TP_fast_assign(DD_DEV_ASSIGN(dd);
769 memcpy(__get_dynamic_array(bct), bc,
772 TP_printk(BCT_FORMAT,
773 BCT(overall_shared_limit),
775 BCT(vl[0].dedicated),
778 BCT(vl[1].dedicated),
781 BCT(vl[2].dedicated),
784 BCT(vl[3].dedicated),
787 BCT(vl[4].dedicated),
790 BCT(vl[5].dedicated),
793 BCT(vl[6].dedicated),
796 BCT(vl[7].dedicated),
799 BCT(vl[15].dedicated),
804 DEFINE_EVENT(hfi1_bct_template, bct_set,
805 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
808 DEFINE_EVENT(hfi1_bct_template, bct_get,
809 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
813 hfi1_qp_send_completion,
814 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
815 TP_ARGS(qp, wqe, idx),
817 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
818 __field(struct rvt_swqe *, wqe)
825 __field(enum ib_wr_opcode, opcode)
826 __field(int, send_flags)
829 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
831 __entry->wr_id = wqe->wr.wr_id;
832 __entry->qpn = qp->ibqp.qp_num;
833 __entry->qpt = qp->ibqp.qp_type;
834 __entry->length = wqe->length;
836 __entry->ssn = wqe->ssn;
837 __entry->opcode = wqe->wr.opcode;
838 __entry->send_flags = wqe->wr.send_flags;
841 "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
856 hfi1_do_send_template,
857 TP_PROTO(struct rvt_qp *qp, bool flag),
860 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
865 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
866 __entry->qpn = qp->ibqp.qp_num;
867 __entry->flag = flag;
870 "[%s] qpn %x flag %d",
878 hfi1_do_send_template, hfi1_rc_do_send,
879 TP_PROTO(struct rvt_qp *qp, bool flag),
883 DEFINE_EVENT(/* event */
884 hfi1_do_send_template, hfi1_rc_do_tid_send,
885 TP_PROTO(struct rvt_qp *qp, bool flag),
890 hfi1_do_send_template, hfi1_rc_expired_time_slice,
891 TP_PROTO(struct rvt_qp *qp, bool flag),
895 DECLARE_EVENT_CLASS(/* AIP */
896 hfi1_ipoib_txq_template,
897 TP_PROTO(struct hfi1_ipoib_txq *txq),
899 TP_STRUCT__entry(/* entry */
900 DD_DEV_ENTRY(txq->priv->dd)
901 __field(struct hfi1_ipoib_txq *, txq)
902 __field(struct sdma_engine *, sde)
908 __field(int, no_desc)
912 TP_fast_assign(/* assign */
913 DD_DEV_ASSIGN(txq->priv->dd);
915 __entry->sde = txq->sde;
916 __entry->head = txq->tx_ring.head;
917 __entry->tail = txq->tx_ring.tail;
918 __entry->idx = txq->q_idx;
920 txq->tx_ring.sent_txreqs -
921 txq->tx_ring.complete_txreqs;
922 __entry->flow = txq->flow.as_int;
923 __entry->stops = atomic_read(&txq->tx_ring.stops);
924 __entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
926 __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
928 TP_printk(/* print */
929 "[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
931 (unsigned long long)__entry->txq,
933 (unsigned long long)__entry->sde,
934 __entry->sde ? __entry->sde->this_idx : 0,
935 __entry->sde ? __entry->sde->cpu : 0,
946 DEFINE_EVENT(/* queue stop */
947 hfi1_ipoib_txq_template, hfi1_txq_stop,
948 TP_PROTO(struct hfi1_ipoib_txq *txq),
952 DEFINE_EVENT(/* queue wake */
953 hfi1_ipoib_txq_template, hfi1_txq_wake,
954 TP_PROTO(struct hfi1_ipoib_txq *txq),
958 DEFINE_EVENT(/* flow flush */
959 hfi1_ipoib_txq_template, hfi1_flow_flush,
960 TP_PROTO(struct hfi1_ipoib_txq *txq),
964 DEFINE_EVENT(/* flow switch */
965 hfi1_ipoib_txq_template, hfi1_flow_switch,
966 TP_PROTO(struct hfi1_ipoib_txq *txq),
970 DEFINE_EVENT(/* wakeup */
971 hfi1_ipoib_txq_template, hfi1_txq_wakeup,
972 TP_PROTO(struct hfi1_ipoib_txq *txq),
976 DEFINE_EVENT(/* full */
977 hfi1_ipoib_txq_template, hfi1_txq_full,
978 TP_PROTO(struct hfi1_ipoib_txq *txq),
982 DEFINE_EVENT(/* queued */
983 hfi1_ipoib_txq_template, hfi1_txq_queued,
984 TP_PROTO(struct hfi1_ipoib_txq *txq),
988 DEFINE_EVENT(/* xmit_stopped */
989 hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
990 TP_PROTO(struct hfi1_ipoib_txq *txq),
994 DEFINE_EVENT(/* xmit_unstopped */
995 hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
996 TP_PROTO(struct hfi1_ipoib_txq *txq),
1000 DECLARE_EVENT_CLASS(/* AIP */
1001 hfi1_ipoib_tx_template,
1002 TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1004 TP_STRUCT__entry(/* entry */
1005 DD_DEV_ENTRY(tx->txq->priv->dd)
1006 __field(struct ipoib_txreq *, tx)
1007 __field(struct hfi1_ipoib_txq *, txq)
1008 __field(struct sk_buff *, skb)
1011 TP_fast_assign(/* assign */
1012 DD_DEV_ASSIGN(tx->txq->priv->dd);
1014 __entry->skb = tx->skb;
1015 __entry->txq = tx->txq;
1018 TP_printk(/* print */
1019 "[%s] tx %llx txq %llx,%u skb %llx idx %lu",
1021 (unsigned long long)__entry->tx,
1022 (unsigned long long)__entry->txq,
1023 __entry->txq ? __entry->txq->q_idx : 0,
1024 (unsigned long long)__entry->skb,
1029 DEFINE_EVENT(/* produce */
1030 hfi1_ipoib_tx_template, hfi1_tx_produce,
1031 TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1035 DEFINE_EVENT(/* consume */
1036 hfi1_ipoib_tx_template, hfi1_tx_consume,
1037 TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1041 DEFINE_EVENT(/* alloc_tx */
1042 hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
1043 TP_PROTO(struct hfi1_ipoib_txq *txq),
1047 DEFINE_EVENT(/* poll */
1048 hfi1_ipoib_txq_template, hfi1_txq_poll,
1049 TP_PROTO(struct hfi1_ipoib_txq *txq),
1053 DEFINE_EVENT(/* complete */
1054 hfi1_ipoib_txq_template, hfi1_txq_complete,
1055 TP_PROTO(struct hfi1_ipoib_txq *txq),
1059 #endif /* __HFI1_TRACE_TX_H */
1061 #undef TRACE_INCLUDE_PATH
1062 #undef TRACE_INCLUDE_FILE
1063 #define TRACE_INCLUDE_PATH .
1064 #define TRACE_INCLUDE_FILE trace_tx
1065 #include <trace/define_trace.h>