1 #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _AMDGPU_TRACE_H_
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
11 #define TRACE_SYSTEM amdgpu
12 #define TRACE_INCLUDE_FILE amdgpu_trace
14 TRACE_EVENT(amdgpu_mm_rreg,
15 TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
16 TP_ARGS(did, reg, value),
18 __field(unsigned, did)
19 __field(uint32_t, reg)
20 __field(uint32_t, value)
25 __entry->value = value;
27 TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
28 (unsigned long)__entry->did,
29 (unsigned long)__entry->reg,
30 (unsigned long)__entry->value)
33 TRACE_EVENT(amdgpu_mm_wreg,
34 TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
35 TP_ARGS(did, reg, value),
37 __field(unsigned, did)
38 __field(uint32_t, reg)
39 __field(uint32_t, value)
44 __entry->value = value;
46 TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
47 (unsigned long)__entry->did,
48 (unsigned long)__entry->reg,
49 (unsigned long)__entry->value)
52 TRACE_EVENT(amdgpu_bo_create,
53 TP_PROTO(struct amdgpu_bo *bo),
56 __field(struct amdgpu_bo *, bo)
66 __entry->pages = bo->tbo.num_pages;
67 __entry->type = bo->tbo.mem.mem_type;
68 __entry->prefer = bo->prefered_domains;
69 __entry->allow = bo->allowed_domains;
70 __entry->visible = bo->flags;
73 TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
74 __entry->bo, __entry->pages, __entry->type,
75 __entry->prefer, __entry->allow, __entry->visible)
78 TRACE_EVENT(amdgpu_cs,
79 TP_PROTO(struct amdgpu_cs_parser *p, int i),
82 __field(struct amdgpu_bo_list *, bo_list)
89 __entry->bo_list = p->bo_list;
90 __entry->ring = p->job->ring->idx;
91 __entry->dw = p->job->ibs[i].length_dw;
92 __entry->fences = amdgpu_fence_count_emitted(
95 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
96 __entry->bo_list, __entry->ring, __entry->dw,
100 TRACE_EVENT(amdgpu_cs_ioctl,
101 TP_PROTO(struct amdgpu_job *job),
104 __field(struct amdgpu_device *, adev)
105 __field(struct amd_sched_job *, sched_job)
106 __field(struct amdgpu_ib *, ib)
107 __field(struct dma_fence *, fence)
108 __field(char *, ring_name)
109 __field(u32, num_ibs)
113 __entry->adev = job->adev;
114 __entry->sched_job = &job->base;
115 __entry->ib = job->ibs;
116 __entry->fence = &job->base.s_fence->finished;
117 __entry->ring_name = job->ring->name;
118 __entry->num_ibs = job->num_ibs;
120 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
121 __entry->adev, __entry->sched_job, __entry->ib,
122 __entry->fence, __entry->ring_name, __entry->num_ibs)
125 TRACE_EVENT(amdgpu_sched_run_job,
126 TP_PROTO(struct amdgpu_job *job),
129 __field(struct amdgpu_device *, adev)
130 __field(struct amd_sched_job *, sched_job)
131 __field(struct amdgpu_ib *, ib)
132 __field(struct dma_fence *, fence)
133 __field(char *, ring_name)
134 __field(u32, num_ibs)
138 __entry->adev = job->adev;
139 __entry->sched_job = &job->base;
140 __entry->ib = job->ibs;
141 __entry->fence = &job->base.s_fence->finished;
142 __entry->ring_name = job->ring->name;
143 __entry->num_ibs = job->num_ibs;
145 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
146 __entry->adev, __entry->sched_job, __entry->ib,
147 __entry->fence, __entry->ring_name, __entry->num_ibs)
151 TRACE_EVENT(amdgpu_vm_grab_id,
152 TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
153 TP_ARGS(vm, ring, job),
155 __field(struct amdgpu_vm *, vm)
158 __field(u64, pd_addr)
159 __field(u32, needs_flush)
164 __entry->ring = ring;
165 __entry->vmid = job->vm_id;
166 __entry->pd_addr = job->vm_pd_addr;
167 __entry->needs_flush = job->vm_needs_flush;
169 TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
170 __entry->vm, __entry->ring, __entry->vmid,
171 __entry->pd_addr, __entry->needs_flush)
174 TRACE_EVENT(amdgpu_vm_bo_map,
175 TP_PROTO(struct amdgpu_bo_va *bo_va,
176 struct amdgpu_bo_va_mapping *mapping),
177 TP_ARGS(bo_va, mapping),
179 __field(struct amdgpu_bo *, bo)
187 __entry->bo = bo_va->bo;
188 __entry->start = mapping->it.start;
189 __entry->last = mapping->it.last;
190 __entry->offset = mapping->offset;
191 __entry->flags = mapping->flags;
193 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
194 __entry->bo, __entry->start, __entry->last,
195 __entry->offset, __entry->flags)
198 TRACE_EVENT(amdgpu_vm_bo_unmap,
199 TP_PROTO(struct amdgpu_bo_va *bo_va,
200 struct amdgpu_bo_va_mapping *mapping),
201 TP_ARGS(bo_va, mapping),
203 __field(struct amdgpu_bo *, bo)
211 __entry->bo = bo_va->bo;
212 __entry->start = mapping->it.start;
213 __entry->last = mapping->it.last;
214 __entry->offset = mapping->offset;
215 __entry->flags = mapping->flags;
217 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
218 __entry->bo, __entry->start, __entry->last,
219 __entry->offset, __entry->flags)
222 DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
223 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
226 __field(u64, soffset)
227 __field(u64, eoffset)
232 __entry->soffset = mapping->it.start;
233 __entry->eoffset = mapping->it.last + 1;
234 __entry->flags = mapping->flags;
236 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
237 __entry->soffset, __entry->eoffset, __entry->flags)
240 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
241 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
245 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
246 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
250 TRACE_EVENT(amdgpu_vm_set_ptes,
251 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
252 uint32_t incr, uint32_t flags),
253 TP_ARGS(pe, addr, count, incr, flags),
264 __entry->addr = addr;
265 __entry->count = count;
266 __entry->incr = incr;
267 __entry->flags = flags;
269 TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
270 __entry->pe, __entry->addr, __entry->incr,
271 __entry->flags, __entry->count)
274 TRACE_EVENT(amdgpu_vm_copy_ptes,
275 TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
276 TP_ARGS(pe, src, count),
286 __entry->count = count;
288 TP_printk("pe=%010Lx, src=%010Lx, count=%u",
289 __entry->pe, __entry->src, __entry->count)
292 TRACE_EVENT(amdgpu_vm_flush,
293 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
294 TP_ARGS(pd_addr, ring, id),
296 __field(u64, pd_addr)
302 __entry->pd_addr = pd_addr;
303 __entry->ring = ring;
306 TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
307 __entry->ring, __entry->id, __entry->pd_addr)
310 TRACE_EVENT(amdgpu_bo_list_set,
311 TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
314 __field(struct amdgpu_bo_list *, list)
315 __field(struct amdgpu_bo *, bo)
316 __field(u64, bo_size)
320 __entry->list = list;
322 __entry->bo_size = amdgpu_bo_size(bo);
324 TP_printk("list=%p, bo=%p, bo_size = %Ld",
330 TRACE_EVENT(amdgpu_cs_bo_status,
331 TP_PROTO(uint64_t total_bo, uint64_t total_size),
332 TP_ARGS(total_bo, total_size),
334 __field(u64, total_bo)
335 __field(u64, total_size)
339 __entry->total_bo = total_bo;
340 __entry->total_size = total_size;
342 TP_printk("total bo size = %Ld, total bo count = %Ld",
343 __entry->total_bo, __entry->total_size)
346 TRACE_EVENT(amdgpu_ttm_bo_move,
347 TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
348 TP_ARGS(bo, new_placement, old_placement),
350 __field(struct amdgpu_bo *, bo)
351 __field(u64, bo_size)
352 __field(u32, new_placement)
353 __field(u32, old_placement)
358 __entry->bo_size = amdgpu_bo_size(bo);
359 __entry->new_placement = new_placement;
360 __entry->old_placement = old_placement;
362 TP_printk("bo=%p from:%d to %d with size = %Ld",
363 __entry->bo, __entry->old_placement,
364 __entry->new_placement, __entry->bo_size)
369 /* This part must be outside protection */
370 #undef TRACE_INCLUDE_PATH
371 #define TRACE_INCLUDE_PATH .
372 #include <trace/define_trace.h>