]> Git Repo - linux.git/blobdiff - kernel/trace/ftrace.c
treewide: kmalloc() -> kmalloc_array()
[linux.git] / kernel / trace / ftrace.c
index 16bbf062018fa79af48db2339db8ad1c094b3e85..df4b6254f986fae4a016fb5922a3692d7affb4b9 100644 (file)
@@ -5514,10 +5514,10 @@ static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
        ftrace_create_filter_files(&global_ops, d_tracer);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       trace_create_file("set_graph_function", 0444, d_tracer,
+       trace_create_file("set_graph_function", 0644, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       trace_create_file("set_graph_notrace", 0444, d_tracer,
+       trace_create_file("set_graph_notrace", 0644, d_tracer,
                                    NULL,
                                    &ftrace_graph_notrace_fops);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -6830,9 +6830,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
        struct task_struct *g, *t;
 
        for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
-               ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
-                                       * sizeof(struct ftrace_ret_stack),
-                                       GFP_KERNEL);
+               ret_stack_list[i] =
+                       kmalloc_array(FTRACE_RETFUNC_DEPTH,
+                                     sizeof(struct ftrace_ret_stack),
+                                     GFP_KERNEL);
                if (!ret_stack_list[i]) {
                        start = 0;
                        end = i;
@@ -6904,9 +6905,9 @@ static int start_graph_tracing(void)
        struct ftrace_ret_stack **ret_stack_list;
        int ret, cpu;
 
-       ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
-                               sizeof(struct ftrace_ret_stack *),
-                               GFP_KERNEL);
+       ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
+                                      sizeof(struct ftrace_ret_stack *),
+                                      GFP_KERNEL);
 
        if (!ret_stack_list)
                return -ENOMEM;
@@ -7088,9 +7089,10 @@ void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
 
                ret_stack = per_cpu(idle_ret_stack, cpu);
                if (!ret_stack) {
-                       ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
-                                           * sizeof(struct ftrace_ret_stack),
-                                           GFP_KERNEL);
+                       ret_stack =
+                               kmalloc_array(FTRACE_RETFUNC_DEPTH,
+                                             sizeof(struct ftrace_ret_stack),
+                                             GFP_KERNEL);
                        if (!ret_stack)
                                return;
                        per_cpu(idle_ret_stack, cpu) = ret_stack;
@@ -7109,9 +7111,9 @@ void ftrace_graph_init_task(struct task_struct *t)
        if (ftrace_graph_active) {
                struct ftrace_ret_stack *ret_stack;
 
-               ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
-                               * sizeof(struct ftrace_ret_stack),
-                               GFP_KERNEL);
+               ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
+                                         sizeof(struct ftrace_ret_stack),
+                                         GFP_KERNEL);
                if (!ret_stack)
                        return;
                graph_init_task(t, ret_stack);
This page took 0.03241 seconds and 4 git commands to generate.