]> Git Repo - linux.git/commitdiff
ftrace: Use synchronize_rcu_tasks_rude() instead of ftrace_sync()
authorPaul E. McKenney <[email protected]>
Fri, 3 Apr 2020 19:10:28 +0000 (12:10 -0700)
committerPaul E. McKenney <[email protected]>
Mon, 27 Apr 2020 18:03:53 +0000 (11:03 -0700)
This commit replaces the schedule_on_each_cpu(ftrace_sync) instances
with synchronize_rcu_tasks_rude().

Suggested-by: Steven Rostedt <[email protected]>
Cc: Ingo Molnar <[email protected]>
[ paulmck: Make Kconfig adjustments noted by kbuild test robot. ]
Signed-off-by: Paul E. McKenney <[email protected]>
kernel/trace/Kconfig
kernel/trace/ftrace.c

index 402eef84c859ac0b7356ca89f22446b00e0b757e..ae69010d521a8ed79a4c5115307fdc95b4d07cf7 100644 (file)
@@ -158,6 +158,7 @@ config FUNCTION_TRACER
        select CONTEXT_SWITCH_TRACER
        select GLOB
        select TASKS_RCU if PREEMPTION
+       select TASKS_RUDE_RCU
        help
          Enable the kernel to trace every kernel function. This is done
          by using a compiler feature to insert a small, 5-byte No-Operation
index 041694a1eb74d88d8b0fb44d26f13d06a7a29d82..771eace959f3a579a0b5d20cdac0a3d7d28708b0 100644 (file)
@@ -160,17 +160,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        op->saved_func(ip, parent_ip, op, regs);
 }
 
-static void ftrace_sync(struct work_struct *work)
-{
-       /*
-        * This function is just a stub to implement a hard force
-        * of synchronize_rcu(). This requires synchronizing
-        * tasks even in userspace and idle.
-        *
-        * Yes, function tracing is rude.
-        */
-}
-
 static void ftrace_sync_ipi(void *data)
 {
        /* Probably not needed, but do it anyway */
@@ -256,7 +245,7 @@ static void update_ftrace_function(void)
         * Make sure all CPUs see this. Yes this is slow, but static
         * tracing is slow and nasty to have enabled.
         */
-       schedule_on_each_cpu(ftrace_sync);
+       synchronize_rcu_tasks_rude();
        /* Now all cpus are using the list ops. */
        function_trace_op = set_function_trace_op;
        /* Make sure the function_trace_op is visible on all CPUs */
@@ -2932,7 +2921,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
                 * infrastructure to do the synchronization, thus we must do it
                 * ourselves.
                 */
-               schedule_on_each_cpu(ftrace_sync);
+               synchronize_rcu_tasks_rude();
 
                /*
                 * When the kernel is preeptive, tasks can be preempted
@@ -5887,7 +5876,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
                 * infrastructure to do the synchronization, thus we must do it
                 * ourselves.
                 */
-               schedule_on_each_cpu(ftrace_sync);
+               synchronize_rcu_tasks_rude();
 
                free_ftrace_hash(old_hash);
        }
This page took 0.073144 seconds and 4 git commands to generate.