1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright © 2022 Intel Corporation
7 #define TRACE_SYSTEM xe
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
15 #include "xe_bo_types.h"
16 #include "xe_exec_queue_types.h"
17 #include "xe_gpu_scheduler_types.h"
18 #include "xe_gt_tlb_invalidation_types.h"
19 #include "xe_gt_types.h"
20 #include "xe_guc_exec_queue_types.h"
21 #include "xe_sched_job.h"
24 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
25 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
34 __entry->fence = (u64)fence;
35 __entry->seqno = fence->seqno;
38 TP_printk("fence=0x%016llx, seqno=%d",
39 __entry->fence, __entry->seqno)
42 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
43 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
47 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
48 xe_gt_tlb_invalidation_fence_work_func,
49 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
53 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
54 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
58 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
59 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
63 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
64 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
68 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
69 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
73 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
74 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
78 DECLARE_EVENT_CLASS(xe_bo,
79 TP_PROTO(struct xe_bo *bo),
89 __entry->size = bo->size;
90 __entry->flags = bo->flags;
91 __entry->vm = (unsigned long)bo->vm;
94 TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
95 __entry->size, __entry->flags, __entry->vm)
98 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
99 TP_PROTO(struct xe_bo *bo),
103 DEFINE_EVENT(xe_bo, xe_bo_move,
104 TP_PROTO(struct xe_bo *bo),
108 DECLARE_EVENT_CLASS(xe_exec_queue,
109 TP_PROTO(struct xe_exec_queue *q),
113 __field(enum xe_engine_class, class)
114 __field(u32, logical_mask)
118 __field(u32, guc_state)
123 __entry->class = q->class;
124 __entry->logical_mask = q->logical_mask;
125 __entry->gt_id = q->gt->info.id;
126 __entry->width = q->width;
127 __entry->guc_id = q->guc->id;
128 __entry->guc_state = atomic_read(&q->guc->state);
129 __entry->flags = q->flags;
132 TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
133 __entry->class, __entry->logical_mask,
134 __entry->gt_id, __entry->width, __entry->guc_id,
135 __entry->guc_state, __entry->flags)
138 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
139 TP_PROTO(struct xe_exec_queue *q),
143 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
144 TP_PROTO(struct xe_exec_queue *q),
148 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
149 TP_PROTO(struct xe_exec_queue *q),
153 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
154 TP_PROTO(struct xe_exec_queue *q),
158 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
159 TP_PROTO(struct xe_exec_queue *q),
163 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
164 TP_PROTO(struct xe_exec_queue *q),
168 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
169 TP_PROTO(struct xe_exec_queue *q),
173 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
174 TP_PROTO(struct xe_exec_queue *q),
178 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
179 TP_PROTO(struct xe_exec_queue *q),
183 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
184 TP_PROTO(struct xe_exec_queue *q),
188 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
189 TP_PROTO(struct xe_exec_queue *q),
193 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
194 TP_PROTO(struct xe_exec_queue *q),
198 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
199 TP_PROTO(struct xe_exec_queue *q),
203 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
204 TP_PROTO(struct xe_exec_queue *q),
208 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
209 TP_PROTO(struct xe_exec_queue *q),
213 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
214 TP_PROTO(struct xe_exec_queue *q),
218 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
219 TP_PROTO(struct xe_exec_queue *q),
223 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
224 TP_PROTO(struct xe_exec_queue *q),
228 DECLARE_EVENT_CLASS(xe_sched_job,
229 TP_PROTO(struct xe_sched_job *job),
235 __field(u32, guc_state)
239 __field(u64, batch_addr)
243 __entry->seqno = xe_sched_job_seqno(job);
244 __entry->guc_id = job->q->guc->id;
246 atomic_read(&job->q->guc->state);
247 __entry->flags = job->q->flags;
248 __entry->error = job->fence->error;
249 __entry->fence = (unsigned long)job->fence;
250 __entry->batch_addr = (u64)job->batch_addr[0];
253 TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
254 __entry->fence, __entry->seqno, __entry->guc_id,
255 __entry->batch_addr, __entry->guc_state,
256 __entry->flags, __entry->error)
259 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
260 TP_PROTO(struct xe_sched_job *job),
264 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
265 TP_PROTO(struct xe_sched_job *job),
269 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
270 TP_PROTO(struct xe_sched_job *job),
274 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
275 TP_PROTO(struct xe_sched_job *job),
279 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
280 TP_PROTO(struct xe_sched_job *job),
284 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
285 TP_PROTO(struct xe_sched_job *job),
289 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
290 TP_PROTO(struct xe_sched_job *job),
294 DECLARE_EVENT_CLASS(xe_sched_msg,
295 TP_PROTO(struct xe_sched_msg *msg),
304 __entry->opcode = msg->opcode;
306 ((struct xe_exec_queue *)msg->private_data)->guc->id;
309 TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
313 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
314 TP_PROTO(struct xe_sched_msg *msg),
318 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
319 TP_PROTO(struct xe_sched_msg *msg),
323 DECLARE_EVENT_CLASS(xe_hw_fence,
324 TP_PROTO(struct xe_hw_fence *fence),
334 __entry->ctx = fence->dma.context;
335 __entry->seqno = fence->dma.seqno;
336 __entry->fence = (unsigned long)fence;
339 TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
340 __entry->ctx, __entry->fence, __entry->seqno)
343 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
344 TP_PROTO(struct xe_hw_fence *fence),
348 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
349 TP_PROTO(struct xe_hw_fence *fence),
353 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
354 TP_PROTO(struct xe_hw_fence *fence),
358 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
359 TP_PROTO(struct xe_hw_fence *fence),
363 DECLARE_EVENT_CLASS(xe_vma,
364 TP_PROTO(struct xe_vma *vma),
376 __entry->vma = (unsigned long)vma;
377 __entry->asid = xe_vma_vm(vma)->usm.asid;
378 __entry->start = xe_vma_start(vma);
379 __entry->end = xe_vma_end(vma) - 1;
380 __entry->ptr = xe_vma_userptr(vma);
383 TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
384 __entry->vma, __entry->asid, __entry->start,
385 __entry->end, __entry->ptr)
388 DEFINE_EVENT(xe_vma, xe_vma_flush,
389 TP_PROTO(struct xe_vma *vma),
393 DEFINE_EVENT(xe_vma, xe_vma_pagefault,
394 TP_PROTO(struct xe_vma *vma),
398 DEFINE_EVENT(xe_vma, xe_vma_acc,
399 TP_PROTO(struct xe_vma *vma),
403 DEFINE_EVENT(xe_vma, xe_vma_fail,
404 TP_PROTO(struct xe_vma *vma),
408 DEFINE_EVENT(xe_vma, xe_vma_bind,
409 TP_PROTO(struct xe_vma *vma),
413 DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
414 TP_PROTO(struct xe_vma *vma),
418 DEFINE_EVENT(xe_vma, xe_vma_unbind,
419 TP_PROTO(struct xe_vma *vma),
423 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
424 TP_PROTO(struct xe_vma *vma),
428 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
429 TP_PROTO(struct xe_vma *vma),
433 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
434 TP_PROTO(struct xe_vma *vma),
438 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
439 TP_PROTO(struct xe_vma *vma),
443 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
444 TP_PROTO(struct xe_vma *vma),
448 DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
449 TP_PROTO(struct xe_vma *vma),
453 DEFINE_EVENT(xe_vma, xe_vma_evict,
454 TP_PROTO(struct xe_vma *vma),
458 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
459 TP_PROTO(struct xe_vma *vma),
463 DECLARE_EVENT_CLASS(xe_vm,
464 TP_PROTO(struct xe_vm *vm),
473 __entry->vm = (unsigned long)vm;
474 __entry->asid = vm->usm.asid;
477 TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
481 DEFINE_EVENT(xe_vm, xe_vm_kill,
482 TP_PROTO(struct xe_vm *vm),
486 DEFINE_EVENT(xe_vm, xe_vm_create,
487 TP_PROTO(struct xe_vm *vm),
491 DEFINE_EVENT(xe_vm, xe_vm_free,
492 TP_PROTO(struct xe_vm *vm),
496 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
497 TP_PROTO(struct xe_vm *vm),
501 DEFINE_EVENT(xe_vm, xe_vm_restart,
502 TP_PROTO(struct xe_vm *vm),
506 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
507 TP_PROTO(struct xe_vm *vm),
511 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
512 TP_PROTO(struct xe_vm *vm),
516 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
517 TP_PROTO(struct xe_vm *vm),
522 DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
523 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
524 TP_ARGS(_head, _tail, size, space, len),
535 __entry->_head = _head;
536 __entry->_tail = _tail;
537 __entry->size = size;
538 __entry->space = space;
542 TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
543 __entry->_head, __entry->_tail, __entry->size,
544 __entry->space, __entry->len)
547 DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
548 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
549 TP_ARGS(_head, _tail, size, space, len)
552 DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
553 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
554 TP_ARGS(_head, _tail, size, space, len),
556 TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
557 __entry->_head, __entry->_tail, __entry->size,
558 __entry->space, __entry->len)
561 DECLARE_EVENT_CLASS(xe_guc_ctb,
562 TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
563 TP_ARGS(gt_id, action, len, _head, tail),
574 __entry->gt_id = gt_id;
575 __entry->action = action;
577 __entry->tail = tail;
578 __entry->_head = _head;
581 TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
582 __entry->gt_id, __entry->action, __entry->len,
583 __entry->tail, __entry->_head)
586 DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
587 TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
588 TP_ARGS(gt_id, action, len, _head, tail)
591 DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
592 TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
593 TP_ARGS(gt_id, action, len, _head, tail),
595 TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
596 __entry->gt_id, __entry->action, __entry->len,
597 __entry->tail, __entry->_head)
603 /* This part must be outside protection */
604 #undef TRACE_INCLUDE_PATH
605 #undef TRACE_INCLUDE_FILE
606 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
607 #define TRACE_INCLUDE_FILE xe_trace
608 #include <trace/define_trace.h>