1 /* SPDX-License-Identifier: GPL-2.0 */
4 #define TRACE_SYSTEM i915
6 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
9 #include <linux/stringify.h>
10 #include <linux/types.h>
11 #include <linux/tracepoint.h>
13 #include <drm/drm_drv.h>
15 #include "gt/intel_engine.h"
22 TRACE_EVENT(i915_gem_object_create,
23 TP_PROTO(struct drm_i915_gem_object *obj),
27 __field(struct drm_i915_gem_object *, obj)
33 __entry->size = obj->base.size;
36 TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
39 TRACE_EVENT(i915_gem_shrink,
40 TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
41 TP_ARGS(i915, target, flags),
45 __field(unsigned long, target)
46 __field(unsigned, flags)
50 __entry->dev = i915->drm.primary->index;
51 __entry->target = target;
52 __entry->flags = flags;
55 TP_printk("dev=%d, target=%lu, flags=%x",
56 __entry->dev, __entry->target, __entry->flags)
59 TRACE_EVENT(i915_vma_bind,
60 TP_PROTO(struct i915_vma *vma, unsigned flags),
64 __field(struct drm_i915_gem_object *, obj)
65 __field(struct i915_address_space *, vm)
68 __field(unsigned, flags)
72 __entry->obj = vma->obj;
73 __entry->vm = vma->vm;
74 __entry->offset = vma->node.start;
75 __entry->size = vma->node.size;
76 __entry->flags = flags;
79 TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
80 __entry->obj, __entry->offset, __entry->size,
81 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
85 TRACE_EVENT(i915_vma_unbind,
86 TP_PROTO(struct i915_vma *vma),
90 __field(struct drm_i915_gem_object *, obj)
91 __field(struct i915_address_space *, vm)
97 __entry->obj = vma->obj;
98 __entry->vm = vma->vm;
99 __entry->offset = vma->node.start;
100 __entry->size = vma->node.size;
103 TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
104 __entry->obj, __entry->offset, __entry->size, __entry->vm)
107 TRACE_EVENT(i915_gem_object_pwrite,
108 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
109 TP_ARGS(obj, offset, len),
112 __field(struct drm_i915_gem_object *, obj)
119 __entry->offset = offset;
123 TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
124 __entry->obj, __entry->offset, __entry->len)
127 TRACE_EVENT(i915_gem_object_pread,
128 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
129 TP_ARGS(obj, offset, len),
132 __field(struct drm_i915_gem_object *, obj)
139 __entry->offset = offset;
143 TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
144 __entry->obj, __entry->offset, __entry->len)
147 TRACE_EVENT(i915_gem_object_fault,
148 TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
149 TP_ARGS(obj, index, gtt, write),
152 __field(struct drm_i915_gem_object *, obj)
160 __entry->index = index;
162 __entry->write = write;
165 TP_printk("obj=%p, %s index=%llu %s",
167 __entry->gtt ? "GTT" : "CPU",
169 __entry->write ? ", writable" : "")
172 DECLARE_EVENT_CLASS(i915_gem_object,
173 TP_PROTO(struct drm_i915_gem_object *obj),
177 __field(struct drm_i915_gem_object *, obj)
184 TP_printk("obj=%p", __entry->obj)
187 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
188 TP_PROTO(struct drm_i915_gem_object *obj),
192 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
193 TP_PROTO(struct drm_i915_gem_object *obj),
197 TRACE_EVENT(i915_gem_evict,
198 TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
199 TP_ARGS(vm, size, align, flags),
203 __field(struct i915_address_space *, vm)
206 __field(unsigned int, flags)
210 __entry->dev = vm->i915->drm.primary->index;
212 __entry->size = size;
213 __entry->align = align;
214 __entry->flags = flags;
217 TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
218 __entry->dev, __entry->vm, __entry->size, __entry->align,
219 __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
222 TRACE_EVENT(i915_gem_evict_node,
223 TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
224 TP_ARGS(vm, node, flags),
228 __field(struct i915_address_space *, vm)
231 __field(unsigned long, color)
232 __field(unsigned int, flags)
236 __entry->dev = vm->i915->drm.primary->index;
238 __entry->start = node->start;
239 __entry->size = node->size;
240 __entry->color = node->color;
241 __entry->flags = flags;
244 TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
245 __entry->dev, __entry->vm,
246 __entry->start, __entry->size,
247 __entry->color, __entry->flags)
250 TRACE_EVENT(i915_gem_evict_vm,
251 TP_PROTO(struct i915_address_space *vm),
256 __field(struct i915_address_space *, vm)
260 __entry->dev = vm->i915->drm.primary->index;
264 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
267 TRACE_EVENT(i915_request_queue,
268 TP_PROTO(struct i915_request *rq, u32 flags),
275 __field(u16, instance)
281 __entry->dev = rq->engine->i915->drm.primary->index;
282 __entry->class = rq->engine->uabi_class;
283 __entry->instance = rq->engine->uabi_instance;
284 __entry->ctx = rq->fence.context;
285 __entry->seqno = rq->fence.seqno;
286 __entry->flags = flags;
289 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
290 __entry->dev, __entry->class, __entry->instance,
291 __entry->ctx, __entry->seqno, __entry->flags)
294 DECLARE_EVENT_CLASS(i915_request,
295 TP_PROTO(struct i915_request *rq),
302 __field(u16, instance)
308 __entry->dev = rq->engine->i915->drm.primary->index;
309 __entry->class = rq->engine->uabi_class;
310 __entry->instance = rq->engine->uabi_instance;
311 __entry->ctx = rq->fence.context;
312 __entry->seqno = rq->fence.seqno;
313 __entry->tail = rq->tail;
316 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
317 __entry->dev, __entry->class, __entry->instance,
318 __entry->ctx, __entry->seqno, __entry->tail)
321 DEFINE_EVENT(i915_request, i915_request_add,
322 TP_PROTO(struct i915_request *rq),
326 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
327 DEFINE_EVENT(i915_request, i915_request_guc_submit,
328 TP_PROTO(struct i915_request *rq),
332 DEFINE_EVENT(i915_request, i915_request_submit,
333 TP_PROTO(struct i915_request *rq),
337 DEFINE_EVENT(i915_request, i915_request_execute,
338 TP_PROTO(struct i915_request *rq),
342 TRACE_EVENT(i915_request_in,
343 TP_PROTO(struct i915_request *rq, unsigned int port),
350 __field(u16, instance)
357 __entry->dev = rq->engine->i915->drm.primary->index;
358 __entry->class = rq->engine->uabi_class;
359 __entry->instance = rq->engine->uabi_instance;
360 __entry->ctx = rq->fence.context;
361 __entry->seqno = rq->fence.seqno;
362 __entry->prio = rq->sched.attr.priority;
363 __entry->port = port;
366 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
367 __entry->dev, __entry->class, __entry->instance,
368 __entry->ctx, __entry->seqno,
369 __entry->prio, __entry->port)
372 TRACE_EVENT(i915_request_out,
373 TP_PROTO(struct i915_request *rq),
380 __field(u16, instance)
382 __field(u32, completed)
386 __entry->dev = rq->engine->i915->drm.primary->index;
387 __entry->class = rq->engine->uabi_class;
388 __entry->instance = rq->engine->uabi_instance;
389 __entry->ctx = rq->fence.context;
390 __entry->seqno = rq->fence.seqno;
391 __entry->completed = i915_request_completed(rq);
394 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
395 __entry->dev, __entry->class, __entry->instance,
396 __entry->ctx, __entry->seqno, __entry->completed)
399 DECLARE_EVENT_CLASS(intel_context,
400 TP_PROTO(struct intel_context *ce),
405 __field(int, pin_count)
406 __field(u32, sched_state)
407 __field(u8, guc_prio)
411 __entry->guc_id = ce->guc_id.id;
412 __entry->pin_count = atomic_read(&ce->pin_count);
413 __entry->sched_state = ce->guc_state.sched_state;
414 __entry->guc_prio = ce->guc_state.prio;
417 TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
418 __entry->guc_id, __entry->pin_count,
419 __entry->sched_state,
423 DEFINE_EVENT(intel_context, intel_context_set_prio,
424 TP_PROTO(struct intel_context *ce),
428 DEFINE_EVENT(intel_context, intel_context_reset,
429 TP_PROTO(struct intel_context *ce),
433 DEFINE_EVENT(intel_context, intel_context_ban,
434 TP_PROTO(struct intel_context *ce),
438 DEFINE_EVENT(intel_context, intel_context_register,
439 TP_PROTO(struct intel_context *ce),
443 DEFINE_EVENT(intel_context, intel_context_deregister,
444 TP_PROTO(struct intel_context *ce),
448 DEFINE_EVENT(intel_context, intel_context_deregister_done,
449 TP_PROTO(struct intel_context *ce),
453 DEFINE_EVENT(intel_context, intel_context_sched_enable,
454 TP_PROTO(struct intel_context *ce),
458 DEFINE_EVENT(intel_context, intel_context_sched_disable,
459 TP_PROTO(struct intel_context *ce),
463 DEFINE_EVENT(intel_context, intel_context_sched_done,
464 TP_PROTO(struct intel_context *ce),
468 DEFINE_EVENT(intel_context, intel_context_create,
469 TP_PROTO(struct intel_context *ce),
473 DEFINE_EVENT(intel_context, intel_context_fence_release,
474 TP_PROTO(struct intel_context *ce),
478 DEFINE_EVENT(intel_context, intel_context_free,
479 TP_PROTO(struct intel_context *ce),
483 DEFINE_EVENT(intel_context, intel_context_steal_guc_id,
484 TP_PROTO(struct intel_context *ce),
488 DEFINE_EVENT(intel_context, intel_context_do_pin,
489 TP_PROTO(struct intel_context *ce),
493 DEFINE_EVENT(intel_context, intel_context_do_unpin,
494 TP_PROTO(struct intel_context *ce),
499 #if !defined(TRACE_HEADER_MULTI_READ)
501 trace_i915_request_guc_submit(struct i915_request *rq)
506 trace_i915_request_submit(struct i915_request *rq)
511 trace_i915_request_execute(struct i915_request *rq)
516 trace_i915_request_in(struct i915_request *rq, unsigned int port)
521 trace_i915_request_out(struct i915_request *rq)
526 trace_intel_context_set_prio(struct intel_context *ce)
531 trace_intel_context_reset(struct intel_context *ce)
536 trace_intel_context_ban(struct intel_context *ce)
541 trace_intel_context_register(struct intel_context *ce)
546 trace_intel_context_deregister(struct intel_context *ce)
551 trace_intel_context_deregister_done(struct intel_context *ce)
556 trace_intel_context_sched_enable(struct intel_context *ce)
561 trace_intel_context_sched_disable(struct intel_context *ce)
566 trace_intel_context_sched_done(struct intel_context *ce)
571 trace_intel_context_create(struct intel_context *ce)
576 trace_intel_context_fence_release(struct intel_context *ce)
581 trace_intel_context_free(struct intel_context *ce)
586 trace_intel_context_steal_guc_id(struct intel_context *ce)
591 trace_intel_context_do_pin(struct intel_context *ce)
596 trace_intel_context_do_unpin(struct intel_context *ce)
602 DEFINE_EVENT(i915_request, i915_request_retire,
603 TP_PROTO(struct i915_request *rq),
607 TRACE_EVENT(i915_request_wait_begin,
608 TP_PROTO(struct i915_request *rq, unsigned int flags),
615 __field(u16, instance)
617 __field(unsigned int, flags)
620 /* NB: the blocking information is racy since mutex_is_locked
621 * doesn't check that the current thread holds the lock. The only
622 * other option would be to pass the boolean information of whether
623 * or not the class was blocking down through the stack which is
627 __entry->dev = rq->engine->i915->drm.primary->index;
628 __entry->class = rq->engine->uabi_class;
629 __entry->instance = rq->engine->uabi_instance;
630 __entry->ctx = rq->fence.context;
631 __entry->seqno = rq->fence.seqno;
632 __entry->flags = flags;
635 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
636 __entry->dev, __entry->class, __entry->instance,
637 __entry->ctx, __entry->seqno,
641 DEFINE_EVENT(i915_request, i915_request_wait_end,
642 TP_PROTO(struct i915_request *rq),
646 TRACE_EVENT_CONDITION(i915_reg_rw,
647 TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
649 TP_ARGS(write, reg, val, len, trace),
661 __entry->val = (u64)val;
662 __entry->reg = i915_mmio_reg_offset(reg);
663 __entry->write = write;
667 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
668 __entry->write ? "write" : "read",
669 __entry->reg, __entry->len,
670 (u32)(__entry->val & 0xffffffff),
671 (u32)(__entry->val >> 32))
674 TRACE_EVENT(intel_gpu_freq_change,
683 __entry->freq = freq;
686 TP_printk("new_freq=%u", __entry->freq)
690 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
692 * With full ppgtt enabled each process using drm will allocate at least one
693 * translation table. With these traces it is possible to keep track of the
694 * allocation and of the lifetime of the tables; this can be used during
695 * testing/debug to verify that we are not leaking ppgtts.
696 * These traces identify the ppgtt through the vm pointer, which is also printed
697 * by the i915_vma_bind and i915_vma_unbind tracepoints.
699 DECLARE_EVENT_CLASS(i915_ppgtt,
700 TP_PROTO(struct i915_address_space *vm),
704 __field(struct i915_address_space *, vm)
710 __entry->dev = vm->i915->drm.primary->index;
713 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
716 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
717 TP_PROTO(struct i915_address_space *vm),
721 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
722 TP_PROTO(struct i915_address_space *vm),
727 * DOC: i915_context_create and i915_context_free tracepoints
729 * These tracepoints are used to track creation and deletion of contexts.
730 * If full ppgtt is enabled, they also print the address of the vm assigned to
733 DECLARE_EVENT_CLASS(i915_context,
734 TP_PROTO(struct i915_gem_context *ctx),
739 __field(struct i915_gem_context *, ctx)
740 __field(struct i915_address_space *, vm)
744 __entry->dev = ctx->i915->drm.primary->index;
746 __entry->vm = ctx->vm;
749 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
750 __entry->dev, __entry->ctx, __entry->vm)
753 DEFINE_EVENT(i915_context, i915_context_create,
754 TP_PROTO(struct i915_gem_context *ctx),
758 DEFINE_EVENT(i915_context, i915_context_free,
759 TP_PROTO(struct i915_gem_context *ctx),
763 #endif /* _I915_TRACE_H_ */
765 /* This part must be outside protection */
766 #undef TRACE_INCLUDE_PATH
767 #undef TRACE_INCLUDE_FILE
768 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
769 #define TRACE_INCLUDE_FILE i915_trace
770 #include <trace/define_trace.h>