]> Git Repo - J-linux.git/blob - include/trace/events/sched.h
Merge branch 'iommu/iommufd/paging-domain-alloc' into iommu/next
[J-linux.git] / include / trace / events / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_SCHED_H
7
8 #include <linux/kthread.h>
9 #include <linux/sched/numa_balancing.h>
10 #include <linux/tracepoint.h>
11 #include <linux/binfmts.h>
12
13 /*
14  * Tracepoint for calling kthread_stop, performed to end a kthread:
15  */
16 TRACE_EVENT(sched_kthread_stop,
17
18         TP_PROTO(struct task_struct *t),
19
20         TP_ARGS(t),
21
22         TP_STRUCT__entry(
23                 __array(        char,   comm,   TASK_COMM_LEN   )
24                 __field(        pid_t,  pid                     )
25         ),
26
27         TP_fast_assign(
28                 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29                 __entry->pid    = t->pid;
30         ),
31
32         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33 );
34
35 /*
36  * Tracepoint for the return value of the kthread stopping:
37  */
38 TRACE_EVENT(sched_kthread_stop_ret,
39
40         TP_PROTO(int ret),
41
42         TP_ARGS(ret),
43
44         TP_STRUCT__entry(
45                 __field(        int,    ret     )
46         ),
47
48         TP_fast_assign(
49                 __entry->ret    = ret;
50         ),
51
52         TP_printk("ret=%d", __entry->ret)
53 );
54
55 /**
56  * sched_kthread_work_queue_work - called when a work gets queued
57  * @worker:     pointer to the kthread_worker
58  * @work:       pointer to struct kthread_work
59  *
60  * This event occurs when a work is queued immediately or once a
61  * delayed work is actually queued (ie: once the delay has been
62  * reached).
63  */
64 TRACE_EVENT(sched_kthread_work_queue_work,
65
66         TP_PROTO(struct kthread_worker *worker,
67                  struct kthread_work *work),
68
69         TP_ARGS(worker, work),
70
71         TP_STRUCT__entry(
72                 __field( void *,        work    )
73                 __field( void *,        function)
74                 __field( void *,        worker)
75         ),
76
77         TP_fast_assign(
78                 __entry->work           = work;
79                 __entry->function       = work->func;
80                 __entry->worker         = worker;
81         ),
82
83         TP_printk("work struct=%p function=%ps worker=%p",
84                   __entry->work, __entry->function, __entry->worker)
85 );
86
87 /**
88  * sched_kthread_work_execute_start - called immediately before the work callback
89  * @work:       pointer to struct kthread_work
90  *
91  * Allows to track kthread work execution.
92  */
93 TRACE_EVENT(sched_kthread_work_execute_start,
94
95         TP_PROTO(struct kthread_work *work),
96
97         TP_ARGS(work),
98
99         TP_STRUCT__entry(
100                 __field( void *,        work    )
101                 __field( void *,        function)
102         ),
103
104         TP_fast_assign(
105                 __entry->work           = work;
106                 __entry->function       = work->func;
107         ),
108
109         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110 );
111
112 /**
113  * sched_kthread_work_execute_end - called immediately after the work callback
114  * @work:       pointer to struct work_struct
115  * @function:   pointer to worker function
116  *
117  * Allows to track workqueue execution.
118  */
119 TRACE_EVENT(sched_kthread_work_execute_end,
120
121         TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123         TP_ARGS(work, function),
124
125         TP_STRUCT__entry(
126                 __field( void *,        work    )
127                 __field( void *,        function)
128         ),
129
130         TP_fast_assign(
131                 __entry->work           = work;
132                 __entry->function       = function;
133         ),
134
135         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136 );
137
138 /*
139  * Tracepoint for waking up a task:
140  */
141 DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143         TP_PROTO(struct task_struct *p),
144
145         TP_ARGS(__perf_task(p)),
146
147         TP_STRUCT__entry(
148                 __array(        char,   comm,   TASK_COMM_LEN   )
149                 __field(        pid_t,  pid                     )
150                 __field(        int,    prio                    )
151                 __field(        int,    target_cpu              )
152         ),
153
154         TP_fast_assign(
155                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156                 __entry->pid            = p->pid;
157                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
158                 __entry->target_cpu     = task_cpu(p);
159         ),
160
161         TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162                   __entry->comm, __entry->pid, __entry->prio,
163                   __entry->target_cpu)
164 );
165
166 /*
167  * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168  * called from the waking context.
169  */
170 DEFINE_EVENT(sched_wakeup_template, sched_waking,
171              TP_PROTO(struct task_struct *p),
172              TP_ARGS(p));
173
174 /*
175  * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176  * It is not always called from the waking context.
177  */
178 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179              TP_PROTO(struct task_struct *p),
180              TP_ARGS(p));
181
182 /*
183  * Tracepoint for waking up a new task:
184  */
185 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186              TP_PROTO(struct task_struct *p),
187              TP_ARGS(p));
188
189 #ifdef CREATE_TRACE_POINTS
190 static inline long __trace_sched_switch_state(bool preempt,
191                                               unsigned int prev_state,
192                                               struct task_struct *p)
193 {
194         unsigned int state;
195
196 #ifdef CONFIG_SCHED_DEBUG
197         BUG_ON(p != current);
198 #endif /* CONFIG_SCHED_DEBUG */
199
200         /*
201          * Preemption ignores task state, therefore preempted tasks are always
202          * RUNNING (we will not have dequeued if state != RUNNING).
203          */
204         if (preempt)
205                 return TASK_REPORT_MAX;
206
207         /*
208          * task_state_index() uses fls() and returns a value from 0-8 range.
209          * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
210          * it for left shift operation to get the correct task->state
211          * mapping.
212          */
213         state = __task_state_index(prev_state, p->exit_state);
214
215         return state ? (1 << (state - 1)) : state;
216 }
217 #endif /* CREATE_TRACE_POINTS */
218
219 /*
220  * Tracepoint for task switches, performed by the scheduler:
221  */
222 TRACE_EVENT(sched_switch,
223
224         TP_PROTO(bool preempt,
225                  struct task_struct *prev,
226                  struct task_struct *next,
227                  unsigned int prev_state),
228
229         TP_ARGS(preempt, prev, next, prev_state),
230
231         TP_STRUCT__entry(
232                 __array(        char,   prev_comm,      TASK_COMM_LEN   )
233                 __field(        pid_t,  prev_pid                        )
234                 __field(        int,    prev_prio                       )
235                 __field(        long,   prev_state                      )
236                 __array(        char,   next_comm,      TASK_COMM_LEN   )
237                 __field(        pid_t,  next_pid                        )
238                 __field(        int,    next_prio                       )
239         ),
240
241         TP_fast_assign(
242                 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
243                 __entry->prev_pid       = prev->pid;
244                 __entry->prev_prio      = prev->prio;
245                 __entry->prev_state     = __trace_sched_switch_state(preempt, prev_state, prev);
246                 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
247                 __entry->next_pid       = next->pid;
248                 __entry->next_prio      = next->prio;
249                 /* XXX SCHED_DEADLINE */
250         ),
251
252         TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
253                 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
254
255                 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
256                   __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
257                                 { TASK_INTERRUPTIBLE, "S" },
258                                 { TASK_UNINTERRUPTIBLE, "D" },
259                                 { __TASK_STOPPED, "T" },
260                                 { __TASK_TRACED, "t" },
261                                 { EXIT_DEAD, "X" },
262                                 { EXIT_ZOMBIE, "Z" },
263                                 { TASK_PARKED, "P" },
264                                 { TASK_DEAD, "I" }) :
265                   "R",
266
267                 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
268                 __entry->next_comm, __entry->next_pid, __entry->next_prio)
269 );
270
271 /*
272  * Tracepoint for a task being migrated:
273  */
274 TRACE_EVENT(sched_migrate_task,
275
276         TP_PROTO(struct task_struct *p, int dest_cpu),
277
278         TP_ARGS(p, dest_cpu),
279
280         TP_STRUCT__entry(
281                 __array(        char,   comm,   TASK_COMM_LEN   )
282                 __field(        pid_t,  pid                     )
283                 __field(        int,    prio                    )
284                 __field(        int,    orig_cpu                )
285                 __field(        int,    dest_cpu                )
286         ),
287
288         TP_fast_assign(
289                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
290                 __entry->pid            = p->pid;
291                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
292                 __entry->orig_cpu       = task_cpu(p);
293                 __entry->dest_cpu       = dest_cpu;
294         ),
295
296         TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
297                   __entry->comm, __entry->pid, __entry->prio,
298                   __entry->orig_cpu, __entry->dest_cpu)
299 );
300
301 DECLARE_EVENT_CLASS(sched_process_template,
302
303         TP_PROTO(struct task_struct *p),
304
305         TP_ARGS(p),
306
307         TP_STRUCT__entry(
308                 __array(        char,   comm,   TASK_COMM_LEN   )
309                 __field(        pid_t,  pid                     )
310                 __field(        int,    prio                    )
311         ),
312
313         TP_fast_assign(
314                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
315                 __entry->pid            = p->pid;
316                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
317         ),
318
319         TP_printk("comm=%s pid=%d prio=%d",
320                   __entry->comm, __entry->pid, __entry->prio)
321 );
322
323 /*
324  * Tracepoint for freeing a task:
325  */
326 DEFINE_EVENT(sched_process_template, sched_process_free,
327              TP_PROTO(struct task_struct *p),
328              TP_ARGS(p));
329
330 /*
331  * Tracepoint for a task exiting:
332  */
333 DEFINE_EVENT(sched_process_template, sched_process_exit,
334              TP_PROTO(struct task_struct *p),
335              TP_ARGS(p));
336
337 /*
338  * Tracepoint for waiting on task to unschedule:
339  */
340 DEFINE_EVENT(sched_process_template, sched_wait_task,
341         TP_PROTO(struct task_struct *p),
342         TP_ARGS(p));
343
344 /*
345  * Tracepoint for a waiting task:
346  */
347 TRACE_EVENT(sched_process_wait,
348
349         TP_PROTO(struct pid *pid),
350
351         TP_ARGS(pid),
352
353         TP_STRUCT__entry(
354                 __array(        char,   comm,   TASK_COMM_LEN   )
355                 __field(        pid_t,  pid                     )
356                 __field(        int,    prio                    )
357         ),
358
359         TP_fast_assign(
360                 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
361                 __entry->pid            = pid_nr(pid);
362                 __entry->prio           = current->prio; /* XXX SCHED_DEADLINE */
363         ),
364
365         TP_printk("comm=%s pid=%d prio=%d",
366                   __entry->comm, __entry->pid, __entry->prio)
367 );
368
369 /*
370  * Tracepoint for kernel_clone:
371  */
372 TRACE_EVENT(sched_process_fork,
373
374         TP_PROTO(struct task_struct *parent, struct task_struct *child),
375
376         TP_ARGS(parent, child),
377
378         TP_STRUCT__entry(
379                 __array(        char,   parent_comm,    TASK_COMM_LEN   )
380                 __field(        pid_t,  parent_pid                      )
381                 __array(        char,   child_comm,     TASK_COMM_LEN   )
382                 __field(        pid_t,  child_pid                       )
383         ),
384
385         TP_fast_assign(
386                 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
387                 __entry->parent_pid     = parent->pid;
388                 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
389                 __entry->child_pid      = child->pid;
390         ),
391
392         TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
393                 __entry->parent_comm, __entry->parent_pid,
394                 __entry->child_comm, __entry->child_pid)
395 );
396
397 /*
398  * Tracepoint for exec:
399  */
400 TRACE_EVENT(sched_process_exec,
401
402         TP_PROTO(struct task_struct *p, pid_t old_pid,
403                  struct linux_binprm *bprm),
404
405         TP_ARGS(p, old_pid, bprm),
406
407         TP_STRUCT__entry(
408                 __string(       filename,       bprm->filename  )
409                 __field(        pid_t,          pid             )
410                 __field(        pid_t,          old_pid         )
411         ),
412
413         TP_fast_assign(
414                 __assign_str(filename);
415                 __entry->pid            = p->pid;
416                 __entry->old_pid        = old_pid;
417         ),
418
419         TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
420                   __entry->pid, __entry->old_pid)
421 );
422
423 /**
424  * sched_prepare_exec - called before setting up new exec
425  * @task:       pointer to the current task
426  * @bprm:       pointer to linux_binprm used for new exec
427  *
428  * Called before flushing the old exec, where @task is still unchanged, but at
429  * the point of no return during switching to the new exec. At the point it is
430  * called the exec will either succeed, or on failure terminate the task. Also
431  * see the "sched_process_exec" tracepoint, which is called right after @task
432  * has successfully switched to the new exec.
433  */
434 TRACE_EVENT(sched_prepare_exec,
435
436         TP_PROTO(struct task_struct *task, struct linux_binprm *bprm),
437
438         TP_ARGS(task, bprm),
439
440         TP_STRUCT__entry(
441                 __string(       interp,         bprm->interp    )
442                 __string(       filename,       bprm->filename  )
443                 __field(        pid_t,          pid             )
444                 __string(       comm,           task->comm      )
445         ),
446
447         TP_fast_assign(
448                 __assign_str(interp);
449                 __assign_str(filename);
450                 __entry->pid = task->pid;
451                 __assign_str(comm);
452         ),
453
454         TP_printk("interp=%s filename=%s pid=%d comm=%s",
455                   __get_str(interp), __get_str(filename),
456                   __entry->pid, __get_str(comm))
457 );
458
459 #ifdef CONFIG_SCHEDSTATS
460 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
461 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
462 #else
463 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
464 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
465 #endif
466
467 /*
468  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
469  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
470  */
471 DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
472
473         TP_PROTO(struct task_struct *tsk, u64 delay),
474
475         TP_ARGS(__perf_task(tsk), __perf_count(delay)),
476
477         TP_STRUCT__entry(
478                 __array( char,  comm,   TASK_COMM_LEN   )
479                 __field( pid_t, pid                     )
480                 __field( u64,   delay                   )
481         ),
482
483         TP_fast_assign(
484                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
485                 __entry->pid    = tsk->pid;
486                 __entry->delay  = delay;
487         ),
488
489         TP_printk("comm=%s pid=%d delay=%Lu [ns]",
490                         __entry->comm, __entry->pid,
491                         (unsigned long long)__entry->delay)
492 );
493
494 /*
495  * Tracepoint for accounting wait time (time the task is runnable
496  * but not actually running due to scheduler contention).
497  */
498 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
499              TP_PROTO(struct task_struct *tsk, u64 delay),
500              TP_ARGS(tsk, delay));
501
502 /*
503  * Tracepoint for accounting sleep time (time the task is not runnable,
504  * including iowait, see below).
505  */
506 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
507              TP_PROTO(struct task_struct *tsk, u64 delay),
508              TP_ARGS(tsk, delay));
509
510 /*
511  * Tracepoint for accounting iowait time (time the task is not runnable
512  * due to waiting on IO to complete).
513  */
514 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
515              TP_PROTO(struct task_struct *tsk, u64 delay),
516              TP_ARGS(tsk, delay));
517
518 /*
519  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
520  */
521 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
522              TP_PROTO(struct task_struct *tsk, u64 delay),
523              TP_ARGS(tsk, delay));
524
525 /*
526  * Tracepoint for accounting runtime (time the task is executing
527  * on a CPU).
528  */
529 DECLARE_EVENT_CLASS(sched_stat_runtime,
530
531         TP_PROTO(struct task_struct *tsk, u64 runtime),
532
533         TP_ARGS(tsk, __perf_count(runtime)),
534
535         TP_STRUCT__entry(
536                 __array( char,  comm,   TASK_COMM_LEN   )
537                 __field( pid_t, pid                     )
538                 __field( u64,   runtime                 )
539         ),
540
541         TP_fast_assign(
542                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
543                 __entry->pid            = tsk->pid;
544                 __entry->runtime        = runtime;
545         ),
546
547         TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
548                         __entry->comm, __entry->pid,
549                         (unsigned long long)__entry->runtime)
550 );
551
552 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
553              TP_PROTO(struct task_struct *tsk, u64 runtime),
554              TP_ARGS(tsk, runtime));
555
556 /*
557  * Tracepoint for showing priority inheritance modifying a tasks
558  * priority.
559  */
560 TRACE_EVENT(sched_pi_setprio,
561
562         TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
563
564         TP_ARGS(tsk, pi_task),
565
566         TP_STRUCT__entry(
567                 __array( char,  comm,   TASK_COMM_LEN   )
568                 __field( pid_t, pid                     )
569                 __field( int,   oldprio                 )
570                 __field( int,   newprio                 )
571         ),
572
573         TP_fast_assign(
574                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
575                 __entry->pid            = tsk->pid;
576                 __entry->oldprio        = tsk->prio;
577                 __entry->newprio        = pi_task ?
578                                 min(tsk->normal_prio, pi_task->prio) :
579                                 tsk->normal_prio;
580                 /* XXX SCHED_DEADLINE bits missing */
581         ),
582
583         TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
584                         __entry->comm, __entry->pid,
585                         __entry->oldprio, __entry->newprio)
586 );
587
588 #ifdef CONFIG_DETECT_HUNG_TASK
589 TRACE_EVENT(sched_process_hang,
590         TP_PROTO(struct task_struct *tsk),
591         TP_ARGS(tsk),
592
593         TP_STRUCT__entry(
594                 __array( char,  comm,   TASK_COMM_LEN   )
595                 __field( pid_t, pid                     )
596         ),
597
598         TP_fast_assign(
599                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
600                 __entry->pid = tsk->pid;
601         ),
602
603         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
604 );
605 #endif /* CONFIG_DETECT_HUNG_TASK */
606
607 /*
608  * Tracks migration of tasks from one runqueue to another. Can be used to
609  * detect if automatic NUMA balancing is bouncing between nodes.
610  */
611 TRACE_EVENT(sched_move_numa,
612
613         TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
614
615         TP_ARGS(tsk, src_cpu, dst_cpu),
616
617         TP_STRUCT__entry(
618                 __field( pid_t, pid                     )
619                 __field( pid_t, tgid                    )
620                 __field( pid_t, ngid                    )
621                 __field( int,   src_cpu                 )
622                 __field( int,   src_nid                 )
623                 __field( int,   dst_cpu                 )
624                 __field( int,   dst_nid                 )
625         ),
626
627         TP_fast_assign(
628                 __entry->pid            = task_pid_nr(tsk);
629                 __entry->tgid           = task_tgid_nr(tsk);
630                 __entry->ngid           = task_numa_group_id(tsk);
631                 __entry->src_cpu        = src_cpu;
632                 __entry->src_nid        = cpu_to_node(src_cpu);
633                 __entry->dst_cpu        = dst_cpu;
634                 __entry->dst_nid        = cpu_to_node(dst_cpu);
635         ),
636
637         TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
638                         __entry->pid, __entry->tgid, __entry->ngid,
639                         __entry->src_cpu, __entry->src_nid,
640                         __entry->dst_cpu, __entry->dst_nid)
641 );
642
643 DECLARE_EVENT_CLASS(sched_numa_pair_template,
644
645         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
646                  struct task_struct *dst_tsk, int dst_cpu),
647
648         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
649
650         TP_STRUCT__entry(
651                 __field( pid_t, src_pid                 )
652                 __field( pid_t, src_tgid                )
653                 __field( pid_t, src_ngid                )
654                 __field( int,   src_cpu                 )
655                 __field( int,   src_nid                 )
656                 __field( pid_t, dst_pid                 )
657                 __field( pid_t, dst_tgid                )
658                 __field( pid_t, dst_ngid                )
659                 __field( int,   dst_cpu                 )
660                 __field( int,   dst_nid                 )
661         ),
662
663         TP_fast_assign(
664                 __entry->src_pid        = task_pid_nr(src_tsk);
665                 __entry->src_tgid       = task_tgid_nr(src_tsk);
666                 __entry->src_ngid       = task_numa_group_id(src_tsk);
667                 __entry->src_cpu        = src_cpu;
668                 __entry->src_nid        = cpu_to_node(src_cpu);
669                 __entry->dst_pid        = dst_tsk ? task_pid_nr(dst_tsk) : 0;
670                 __entry->dst_tgid       = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
671                 __entry->dst_ngid       = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
672                 __entry->dst_cpu        = dst_cpu;
673                 __entry->dst_nid        = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
674         ),
675
676         TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
677                         __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
678                         __entry->src_cpu, __entry->src_nid,
679                         __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
680                         __entry->dst_cpu, __entry->dst_nid)
681 );
682
683 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
684
685         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
686                  struct task_struct *dst_tsk, int dst_cpu),
687
688         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
689 );
690
691 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
692
693         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
694                  struct task_struct *dst_tsk, int dst_cpu),
695
696         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
697 );
698
699 #ifdef CONFIG_NUMA_BALANCING
700 #define NUMAB_SKIP_REASON                                       \
701         EM( NUMAB_SKIP_UNSUITABLE,              "unsuitable" )  \
702         EM( NUMAB_SKIP_SHARED_RO,               "shared_ro" )   \
703         EM( NUMAB_SKIP_INACCESSIBLE,            "inaccessible" )        \
704         EM( NUMAB_SKIP_SCAN_DELAY,              "scan_delay" )  \
705         EM( NUMAB_SKIP_PID_INACTIVE,            "pid_inactive" )        \
706         EM( NUMAB_SKIP_IGNORE_PID,              "ignore_pid_inactive" )         \
707         EMe(NUMAB_SKIP_SEQ_COMPLETED,           "seq_completed" )
708
709 /* Redefine for export. */
710 #undef EM
711 #undef EMe
712 #define EM(a, b)        TRACE_DEFINE_ENUM(a);
713 #define EMe(a, b)       TRACE_DEFINE_ENUM(a);
714
715 NUMAB_SKIP_REASON
716
717 /* Redefine for symbolic printing. */
718 #undef EM
719 #undef EMe
720 #define EM(a, b)        { a, b },
721 #define EMe(a, b)       { a, b }
722
723 TRACE_EVENT(sched_skip_vma_numa,
724
725         TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
726                  enum numa_vmaskip_reason reason),
727
728         TP_ARGS(mm, vma, reason),
729
730         TP_STRUCT__entry(
731                 __field(unsigned long, numa_scan_offset)
732                 __field(unsigned long, vm_start)
733                 __field(unsigned long, vm_end)
734                 __field(enum numa_vmaskip_reason, reason)
735         ),
736
737         TP_fast_assign(
738                 __entry->numa_scan_offset       = mm->numa_scan_offset;
739                 __entry->vm_start               = vma->vm_start;
740                 __entry->vm_end                 = vma->vm_end;
741                 __entry->reason                 = reason;
742         ),
743
744         TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
745                   __entry->numa_scan_offset,
746                   __entry->vm_start,
747                   __entry->vm_end,
748                   __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
749 );
750 #endif /* CONFIG_NUMA_BALANCING */
751
752 /*
753  * Tracepoint for waking a polling cpu without an IPI.
754  */
755 TRACE_EVENT(sched_wake_idle_without_ipi,
756
757         TP_PROTO(int cpu),
758
759         TP_ARGS(cpu),
760
761         TP_STRUCT__entry(
762                 __field(        int,    cpu     )
763         ),
764
765         TP_fast_assign(
766                 __entry->cpu    = cpu;
767         ),
768
769         TP_printk("cpu=%d", __entry->cpu)
770 );
771
772 /*
773  * Following tracepoints are not exported in tracefs and provide hooking
774  * mechanisms only for testing and debugging purposes.
775  *
776  * Postfixed with _tp to make them easily identifiable in the code.
777  */
778 DECLARE_TRACE(pelt_cfs_tp,
779         TP_PROTO(struct cfs_rq *cfs_rq),
780         TP_ARGS(cfs_rq));
781
782 DECLARE_TRACE(pelt_rt_tp,
783         TP_PROTO(struct rq *rq),
784         TP_ARGS(rq));
785
786 DECLARE_TRACE(pelt_dl_tp,
787         TP_PROTO(struct rq *rq),
788         TP_ARGS(rq));
789
790 DECLARE_TRACE(pelt_hw_tp,
791         TP_PROTO(struct rq *rq),
792         TP_ARGS(rq));
793
794 DECLARE_TRACE(pelt_irq_tp,
795         TP_PROTO(struct rq *rq),
796         TP_ARGS(rq));
797
798 DECLARE_TRACE(pelt_se_tp,
799         TP_PROTO(struct sched_entity *se),
800         TP_ARGS(se));
801
802 DECLARE_TRACE(sched_cpu_capacity_tp,
803         TP_PROTO(struct rq *rq),
804         TP_ARGS(rq));
805
806 DECLARE_TRACE(sched_overutilized_tp,
807         TP_PROTO(struct root_domain *rd, bool overutilized),
808         TP_ARGS(rd, overutilized));
809
810 DECLARE_TRACE(sched_util_est_cfs_tp,
811         TP_PROTO(struct cfs_rq *cfs_rq),
812         TP_ARGS(cfs_rq));
813
814 DECLARE_TRACE(sched_util_est_se_tp,
815         TP_PROTO(struct sched_entity *se),
816         TP_ARGS(se));
817
818 DECLARE_TRACE(sched_update_nr_running_tp,
819         TP_PROTO(struct rq *rq, int change),
820         TP_ARGS(rq, change));
821
822 DECLARE_TRACE(sched_compute_energy_tp,
823         TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
824                  unsigned long max_util, unsigned long busy_time),
825         TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
826
827 #endif /* _TRACE_SCHED_H */
828
829 /* This part must be outside protection */
830 #include <trace/define_trace.h>
This page took 0.07729 seconds and 4 git commands to generate.