1 #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _AMDGPU_TRACE_H_
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
11 #define TRACE_SYSTEM amdgpu
12 #define TRACE_INCLUDE_FILE amdgpu_trace
14 TRACE_EVENT(amdgpu_mm_rreg,
15 TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
16 TP_ARGS(did, reg, value),
18 __field(unsigned, did)
19 __field(uint32_t, reg)
20 __field(uint32_t, value)
25 __entry->value = value;
27 TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
28 (unsigned long)__entry->did,
29 (unsigned long)__entry->reg,
30 (unsigned long)__entry->value)
33 TRACE_EVENT(amdgpu_mm_wreg,
34 TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
35 TP_ARGS(did, reg, value),
37 __field(unsigned, did)
38 __field(uint32_t, reg)
39 __field(uint32_t, value)
44 __entry->value = value;
46 TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
47 (unsigned long)__entry->did,
48 (unsigned long)__entry->reg,
49 (unsigned long)__entry->value)
52 TRACE_EVENT(amdgpu_bo_create,
53 TP_PROTO(struct amdgpu_bo *bo),
56 __field(struct amdgpu_bo *, bo)
66 __entry->pages = bo->tbo.num_pages;
67 __entry->type = bo->tbo.mem.mem_type;
68 __entry->prefer = bo->prefered_domains;
69 __entry->allow = bo->allowed_domains;
70 __entry->visible = bo->flags;
73 TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
74 __entry->bo, __entry->pages, __entry->type,
75 __entry->prefer, __entry->allow, __entry->visible)
78 TRACE_EVENT(amdgpu_cs,
79 TP_PROTO(struct amdgpu_cs_parser *p, int i),
82 __field(struct amdgpu_bo_list *, bo_list)
89 __entry->bo_list = p->bo_list;
90 __entry->ring = p->job->ring->idx;
91 __entry->dw = p->job->ibs[i].length_dw;
92 __entry->fences = amdgpu_fence_count_emitted(
95 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
96 __entry->bo_list, __entry->ring, __entry->dw,
100 TRACE_EVENT(amdgpu_cs_ioctl,
101 TP_PROTO(struct amdgpu_job *job),
104 __field(struct amdgpu_device *, adev)
105 __field(struct amd_sched_job *, sched_job)
106 __field(struct amdgpu_ib *, ib)
107 __field(struct dma_fence *, fence)
108 __field(char *, ring_name)
109 __field(u32, num_ibs)
113 __entry->adev = job->adev;
114 __entry->sched_job = &job->base;
115 __entry->ib = job->ibs;
116 __entry->fence = &job->base.s_fence->finished;
117 __entry->ring_name = job->ring->name;
118 __entry->num_ibs = job->num_ibs;
120 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
121 __entry->adev, __entry->sched_job, __entry->ib,
122 __entry->fence, __entry->ring_name, __entry->num_ibs)
125 TRACE_EVENT(amdgpu_sched_run_job,
126 TP_PROTO(struct amdgpu_job *job),
129 __field(struct amdgpu_device *, adev)
130 __field(struct amd_sched_job *, sched_job)
131 __field(struct amdgpu_ib *, ib)
132 __field(struct dma_fence *, fence)
133 __string(timeline, job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished))
134 __field(unsigned int, context)
135 __field(unsigned int, seqno)
136 __field(char *, ring_name)
137 __field(u32, num_ibs)
141 __entry->adev = job->adev;
142 __entry->sched_job = &job->base;
143 __entry->ib = job->ibs;
144 __entry->fence = &job->base.s_fence->finished;
145 __assign_str(timeline, job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished))
146 __entry->context = job->base.s_fence->finished.context;
147 __entry->seqno = job->base.s_fence->finished.seqno;
148 __entry->ring_name = job->ring->name;
149 __entry->num_ibs = job->num_ibs;
151 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, timeline=%s, context=%u, seqno=%u, ring name=%s, num_ibs=%u",
152 __entry->adev, __entry->sched_job, __entry->ib,
153 __entry->fence, __get_str(timeline), __entry->context, __entry->seqno,
154 __entry->ring_name, __entry->num_ibs)
158 TRACE_EVENT(amdgpu_vm_grab_id,
159 TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
160 TP_ARGS(vm, ring, job),
162 __field(struct amdgpu_vm *, vm)
165 __field(u64, pd_addr)
166 __field(u32, needs_flush)
171 __entry->ring = ring;
172 __entry->vmid = job->vm_id;
173 __entry->pd_addr = job->vm_pd_addr;
174 __entry->needs_flush = job->vm_needs_flush;
176 TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
177 __entry->vm, __entry->ring, __entry->vmid,
178 __entry->pd_addr, __entry->needs_flush)
181 TRACE_EVENT(amdgpu_vm_bo_map,
182 TP_PROTO(struct amdgpu_bo_va *bo_va,
183 struct amdgpu_bo_va_mapping *mapping),
184 TP_ARGS(bo_va, mapping),
186 __field(struct amdgpu_bo *, bo)
194 __entry->bo = bo_va->bo;
195 __entry->start = mapping->it.start;
196 __entry->last = mapping->it.last;
197 __entry->offset = mapping->offset;
198 __entry->flags = mapping->flags;
200 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
201 __entry->bo, __entry->start, __entry->last,
202 __entry->offset, __entry->flags)
205 TRACE_EVENT(amdgpu_vm_bo_unmap,
206 TP_PROTO(struct amdgpu_bo_va *bo_va,
207 struct amdgpu_bo_va_mapping *mapping),
208 TP_ARGS(bo_va, mapping),
210 __field(struct amdgpu_bo *, bo)
218 __entry->bo = bo_va->bo;
219 __entry->start = mapping->it.start;
220 __entry->last = mapping->it.last;
221 __entry->offset = mapping->offset;
222 __entry->flags = mapping->flags;
224 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
225 __entry->bo, __entry->start, __entry->last,
226 __entry->offset, __entry->flags)
229 DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
230 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
233 __field(u64, soffset)
234 __field(u64, eoffset)
239 __entry->soffset = mapping->it.start;
240 __entry->eoffset = mapping->it.last + 1;
241 __entry->flags = mapping->flags;
243 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
244 __entry->soffset, __entry->eoffset, __entry->flags)
247 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
248 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
252 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
253 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
257 TRACE_EVENT(amdgpu_vm_set_ptes,
258 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
259 uint32_t incr, uint32_t flags),
260 TP_ARGS(pe, addr, count, incr, flags),
271 __entry->addr = addr;
272 __entry->count = count;
273 __entry->incr = incr;
274 __entry->flags = flags;
276 TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
277 __entry->pe, __entry->addr, __entry->incr,
278 __entry->flags, __entry->count)
281 TRACE_EVENT(amdgpu_vm_copy_ptes,
282 TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
283 TP_ARGS(pe, src, count),
293 __entry->count = count;
295 TP_printk("pe=%010Lx, src=%010Lx, count=%u",
296 __entry->pe, __entry->src, __entry->count)
299 TRACE_EVENT(amdgpu_vm_flush,
300 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
301 TP_ARGS(pd_addr, ring, id),
303 __field(u64, pd_addr)
309 __entry->pd_addr = pd_addr;
310 __entry->ring = ring;
313 TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
314 __entry->ring, __entry->id, __entry->pd_addr)
317 TRACE_EVENT(amdgpu_bo_list_set,
318 TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
321 __field(struct amdgpu_bo_list *, list)
322 __field(struct amdgpu_bo *, bo)
323 __field(u64, bo_size)
327 __entry->list = list;
329 __entry->bo_size = amdgpu_bo_size(bo);
331 TP_printk("list=%p, bo=%p, bo_size = %Ld",
337 TRACE_EVENT(amdgpu_cs_bo_status,
338 TP_PROTO(uint64_t total_bo, uint64_t total_size),
339 TP_ARGS(total_bo, total_size),
341 __field(u64, total_bo)
342 __field(u64, total_size)
346 __entry->total_bo = total_bo;
347 __entry->total_size = total_size;
349 TP_printk("total bo size = %Ld, total bo count = %Ld",
350 __entry->total_bo, __entry->total_size)
353 TRACE_EVENT(amdgpu_ttm_bo_move,
354 TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
355 TP_ARGS(bo, new_placement, old_placement),
357 __field(struct amdgpu_bo *, bo)
358 __field(u64, bo_size)
359 __field(u32, new_placement)
360 __field(u32, old_placement)
365 __entry->bo_size = amdgpu_bo_size(bo);
366 __entry->new_placement = new_placement;
367 __entry->old_placement = old_placement;
369 TP_printk("bo=%p, from=%d, to=%d, size=%Ld",
370 __entry->bo, __entry->old_placement,
371 __entry->new_placement, __entry->bo_size)
376 /* This part must be outside protection */
377 #undef TRACE_INCLUDE_PATH
378 #define TRACE_INCLUDE_PATH .
379 #include <trace/define_trace.h>