1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright © 2022 Intel Corporation
7 #define TRACE_SYSTEM xe
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
15 #include "xe_exec_queue_types.h"
16 #include "xe_gpu_scheduler_types.h"
17 #include "xe_gt_tlb_invalidation_types.h"
18 #include "xe_gt_types.h"
19 #include "xe_guc_exec_queue_types.h"
20 #include "xe_sched_job.h"
23 #define __dev_name_xe(xe) dev_name((xe)->drm.dev)
24 #define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
25 #define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
26 #define __dev_name_eq(q) __dev_name_gt((q)->gt)
28 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
29 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
33 __string(dev, __dev_name_xe(xe))
34 __field(struct xe_gt_tlb_invalidation_fence *, fence)
40 __entry->fence = fence;
41 __entry->seqno = fence->seqno;
44 TP_printk("dev=%s, fence=%p, seqno=%d",
45 __get_str(dev), __entry->fence, __entry->seqno)
48 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
49 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
53 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
54 xe_gt_tlb_invalidation_fence_work_func,
55 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
59 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
60 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
64 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
65 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
69 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
70 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
74 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
75 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
79 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
80 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
84 DECLARE_EVENT_CLASS(xe_exec_queue,
85 TP_PROTO(struct xe_exec_queue *q),
89 __string(dev, __dev_name_eq(q))
90 __field(enum xe_engine_class, class)
91 __field(u32, logical_mask)
95 __field(u32, guc_state)
101 __entry->class = q->class;
102 __entry->logical_mask = q->logical_mask;
103 __entry->gt_id = q->gt->info.id;
104 __entry->width = q->width;
105 __entry->guc_id = q->guc->id;
106 __entry->guc_state = atomic_read(&q->guc->state);
107 __entry->flags = q->flags;
110 TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
111 __get_str(dev), __entry->class, __entry->logical_mask,
112 __entry->gt_id, __entry->width, __entry->guc_id,
113 __entry->guc_state, __entry->flags)
116 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
117 TP_PROTO(struct xe_exec_queue *q),
121 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
122 TP_PROTO(struct xe_exec_queue *q),
126 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
127 TP_PROTO(struct xe_exec_queue *q),
131 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
132 TP_PROTO(struct xe_exec_queue *q),
136 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
137 TP_PROTO(struct xe_exec_queue *q),
141 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
142 TP_PROTO(struct xe_exec_queue *q),
146 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
147 TP_PROTO(struct xe_exec_queue *q),
151 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
152 TP_PROTO(struct xe_exec_queue *q),
156 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
157 TP_PROTO(struct xe_exec_queue *q),
161 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
162 TP_PROTO(struct xe_exec_queue *q),
166 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
167 TP_PROTO(struct xe_exec_queue *q),
171 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
172 TP_PROTO(struct xe_exec_queue *q),
176 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
177 TP_PROTO(struct xe_exec_queue *q),
181 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
182 TP_PROTO(struct xe_exec_queue *q),
186 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
187 TP_PROTO(struct xe_exec_queue *q),
191 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
192 TP_PROTO(struct xe_exec_queue *q),
196 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
197 TP_PROTO(struct xe_exec_queue *q),
201 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
202 TP_PROTO(struct xe_exec_queue *q),
206 DECLARE_EVENT_CLASS(xe_sched_job,
207 TP_PROTO(struct xe_sched_job *job),
211 __string(dev, __dev_name_eq(job->q))
213 __field(u32, lrc_seqno)
215 __field(u32, guc_state)
218 __field(struct dma_fence *, fence)
219 __field(u64, batch_addr)
224 __entry->seqno = xe_sched_job_seqno(job);
225 __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
226 __entry->guc_id = job->q->guc->id;
228 atomic_read(&job->q->guc->state);
229 __entry->flags = job->q->flags;
230 __entry->error = job->fence ? job->fence->error : 0;
231 __entry->fence = job->fence;
232 __entry->batch_addr = (u64)job->ptrs[0].batch_addr;
235 TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
236 __get_str(dev), __entry->fence, __entry->seqno,
237 __entry->lrc_seqno, __entry->guc_id,
238 __entry->batch_addr, __entry->guc_state,
239 __entry->flags, __entry->error)
242 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
243 TP_PROTO(struct xe_sched_job *job),
247 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
248 TP_PROTO(struct xe_sched_job *job),
252 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
253 TP_PROTO(struct xe_sched_job *job),
257 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
258 TP_PROTO(struct xe_sched_job *job),
262 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
263 TP_PROTO(struct xe_sched_job *job),
267 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
268 TP_PROTO(struct xe_sched_job *job),
272 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
273 TP_PROTO(struct xe_sched_job *job),
277 DECLARE_EVENT_CLASS(xe_sched_msg,
278 TP_PROTO(struct xe_sched_msg *msg),
282 __string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
289 __entry->opcode = msg->opcode;
291 ((struct xe_exec_queue *)msg->private_data)->guc->id;
294 TP_printk("dev=%s, guc_id=%d, opcode=%u", __get_str(dev), __entry->guc_id,
298 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
299 TP_PROTO(struct xe_sched_msg *msg),
303 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
304 TP_PROTO(struct xe_sched_msg *msg),
308 DECLARE_EVENT_CLASS(xe_hw_fence,
309 TP_PROTO(struct xe_hw_fence *fence),
313 __string(dev, __dev_name_xe(fence->xe))
316 __field(struct xe_hw_fence *, fence)
321 __entry->ctx = fence->dma.context;
322 __entry->seqno = fence->dma.seqno;
323 __entry->fence = fence;
326 TP_printk("dev=%s, ctx=0x%016llx, fence=%p, seqno=%u",
327 __get_str(dev), __entry->ctx, __entry->fence, __entry->seqno)
330 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
331 TP_PROTO(struct xe_hw_fence *fence),
335 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
336 TP_PROTO(struct xe_hw_fence *fence),
340 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
341 TP_PROTO(struct xe_hw_fence *fence),
345 TRACE_EVENT(xe_reg_rw,
346 TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
348 TP_ARGS(mmio, write, reg, val, len),
351 __string(dev, __dev_name_tile(mmio->tile))
362 __entry->write = write;
366 TP_printk("dev=%s, %s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
367 __get_str(dev), __entry->write ? "write" : "read",
368 __entry->reg, __entry->len,
369 (u32)(__entry->val & 0xffffffff),
370 (u32)(__entry->val >> 32))
373 DECLARE_EVENT_CLASS(xe_pm_runtime,
374 TP_PROTO(struct xe_device *xe, void *caller),
378 __string(dev, __dev_name_xe(xe))
379 __field(void *, caller)
384 __entry->caller = caller;
387 TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
390 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
391 TP_PROTO(struct xe_device *xe, void *caller),
395 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
396 TP_PROTO(struct xe_device *xe, void *caller),
400 DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
401 TP_PROTO(struct xe_device *xe, void *caller),
405 DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
406 TP_PROTO(struct xe_device *xe, void *caller),
410 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
411 TP_PROTO(struct xe_device *xe, void *caller),
415 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
416 TP_PROTO(struct xe_device *xe, void *caller),
420 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
421 TP_PROTO(struct xe_device *xe, void *caller),
427 /* This part must be outside protection */
428 #undef TRACE_INCLUDE_PATH
429 #undef TRACE_INCLUDE_FILE
430 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
431 #define TRACE_INCLUDE_FILE xe_trace
432 #include <trace/define_trace.h>