]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/profile.c | |
3 | * Simple profiling. Manages a direct-mapped profile hit count buffer, | |
4 | * with configurable resolution, support for restricting the cpus on | |
5 | * which profiling is done, and switching between cpu time and | |
6 | * schedule() calls via kernel command line parameters passed at boot. | |
7 | * | |
8 | * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, | |
9 | * Red Hat, July 2004 | |
10 | * Consolidation of architecture support code for profiling, | |
11 | * William Irwin, Oracle, July 2004 | |
12 | * Amortized hit count accounting via per-cpu open-addressed hashtables | |
13 | * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 | |
14 | */ | |
15 | ||
1da177e4 LT |
16 | #include <linux/module.h> |
17 | #include <linux/profile.h> | |
18 | #include <linux/bootmem.h> | |
19 | #include <linux/notifier.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/cpumask.h> | |
22 | #include <linux/cpu.h> | |
23 | #include <linux/profile.h> | |
24 | #include <linux/highmem.h> | |
97d1f15b | 25 | #include <linux/mutex.h> |
1da177e4 LT |
26 | #include <asm/sections.h> |
27 | #include <asm/semaphore.h> | |
28 | ||
29 | struct profile_hit { | |
30 | u32 pc, hits; | |
31 | }; | |
32 | #define PROFILE_GRPSHIFT 3 | |
33 | #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) | |
34 | #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) | |
35 | #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) | |
36 | ||
37 | /* Oprofile timer tick hook */ | |
6c036527 | 38 | int (*timer_hook)(struct pt_regs *) __read_mostly; |
1da177e4 LT |
39 | |
40 | static atomic_t *prof_buffer; | |
41 | static unsigned long prof_len, prof_shift; | |
6c036527 | 42 | static int prof_on __read_mostly; |
1da177e4 LT |
43 | static cpumask_t prof_cpu_mask = CPU_MASK_ALL; |
44 | #ifdef CONFIG_SMP | |
45 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); | |
46 | static DEFINE_PER_CPU(int, cpu_profile_flip); | |
97d1f15b | 47 | static DEFINE_MUTEX(profile_flip_mutex); |
1da177e4 LT |
48 | #endif /* CONFIG_SMP */ |
49 | ||
50 | static int __init profile_setup(char * str) | |
51 | { | |
dfaa9c94 | 52 | static char __initdata schedstr[] = "schedule"; |
1da177e4 LT |
53 | int par; |
54 | ||
dfaa9c94 | 55 | if (!strncmp(str, schedstr, strlen(schedstr))) { |
1da177e4 | 56 | prof_on = SCHED_PROFILING; |
dfaa9c94 NYC |
57 | if (str[strlen(schedstr)] == ',') |
58 | str += strlen(schedstr) + 1; | |
59 | if (get_option(&str, &par)) | |
60 | prof_shift = par; | |
61 | printk(KERN_INFO | |
62 | "kernel schedule profiling enabled (shift: %ld)\n", | |
63 | prof_shift); | |
64 | } else if (get_option(&str, &par)) { | |
1da177e4 LT |
65 | prof_shift = par; |
66 | prof_on = CPU_PROFILING; | |
67 | printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", | |
68 | prof_shift); | |
69 | } | |
70 | return 1; | |
71 | } | |
72 | __setup("profile=", profile_setup); | |
73 | ||
74 | ||
75 | void __init profile_init(void) | |
76 | { | |
77 | if (!prof_on) | |
78 | return; | |
79 | ||
80 | /* only text is profiled */ | |
81 | prof_len = (_etext - _stext) >> prof_shift; | |
82 | prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t)); | |
83 | } | |
84 | ||
85 | /* Profile event notifications */ | |
86 | ||
87 | #ifdef CONFIG_PROFILING | |
88 | ||
e041c683 AS |
89 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); |
90 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); | |
91 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); | |
1da177e4 LT |
92 | |
93 | void profile_task_exit(struct task_struct * task) | |
94 | { | |
e041c683 | 95 | blocking_notifier_call_chain(&task_exit_notifier, 0, task); |
1da177e4 LT |
96 | } |
97 | ||
98 | int profile_handoff_task(struct task_struct * task) | |
99 | { | |
100 | int ret; | |
e041c683 | 101 | ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); |
1da177e4 LT |
102 | return (ret == NOTIFY_OK) ? 1 : 0; |
103 | } | |
104 | ||
105 | void profile_munmap(unsigned long addr) | |
106 | { | |
e041c683 | 107 | blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); |
1da177e4 LT |
108 | } |
109 | ||
110 | int task_handoff_register(struct notifier_block * n) | |
111 | { | |
e041c683 | 112 | return atomic_notifier_chain_register(&task_free_notifier, n); |
1da177e4 LT |
113 | } |
114 | ||
115 | int task_handoff_unregister(struct notifier_block * n) | |
116 | { | |
e041c683 | 117 | return atomic_notifier_chain_unregister(&task_free_notifier, n); |
1da177e4 LT |
118 | } |
119 | ||
120 | int profile_event_register(enum profile_type type, struct notifier_block * n) | |
121 | { | |
122 | int err = -EINVAL; | |
123 | ||
1da177e4 LT |
124 | switch (type) { |
125 | case PROFILE_TASK_EXIT: | |
e041c683 AS |
126 | err = blocking_notifier_chain_register( |
127 | &task_exit_notifier, n); | |
1da177e4 LT |
128 | break; |
129 | case PROFILE_MUNMAP: | |
e041c683 AS |
130 | err = blocking_notifier_chain_register( |
131 | &munmap_notifier, n); | |
1da177e4 LT |
132 | break; |
133 | } | |
134 | ||
1da177e4 LT |
135 | return err; |
136 | } | |
137 | ||
138 | ||
139 | int profile_event_unregister(enum profile_type type, struct notifier_block * n) | |
140 | { | |
141 | int err = -EINVAL; | |
142 | ||
1da177e4 LT |
143 | switch (type) { |
144 | case PROFILE_TASK_EXIT: | |
e041c683 AS |
145 | err = blocking_notifier_chain_unregister( |
146 | &task_exit_notifier, n); | |
1da177e4 LT |
147 | break; |
148 | case PROFILE_MUNMAP: | |
e041c683 AS |
149 | err = blocking_notifier_chain_unregister( |
150 | &munmap_notifier, n); | |
1da177e4 LT |
151 | break; |
152 | } | |
153 | ||
1da177e4 LT |
154 | return err; |
155 | } | |
156 | ||
157 | int register_timer_hook(int (*hook)(struct pt_regs *)) | |
158 | { | |
159 | if (timer_hook) | |
160 | return -EBUSY; | |
161 | timer_hook = hook; | |
162 | return 0; | |
163 | } | |
164 | ||
165 | void unregister_timer_hook(int (*hook)(struct pt_regs *)) | |
166 | { | |
167 | WARN_ON(hook != timer_hook); | |
168 | timer_hook = NULL; | |
169 | /* make sure all CPUs see the NULL hook */ | |
fbd568a3 | 170 | synchronize_sched(); /* Allow ongoing interrupts to complete. */ |
1da177e4 LT |
171 | } |
172 | ||
173 | EXPORT_SYMBOL_GPL(register_timer_hook); | |
174 | EXPORT_SYMBOL_GPL(unregister_timer_hook); | |
175 | EXPORT_SYMBOL_GPL(task_handoff_register); | |
176 | EXPORT_SYMBOL_GPL(task_handoff_unregister); | |
177 | ||
178 | #endif /* CONFIG_PROFILING */ | |
179 | ||
180 | EXPORT_SYMBOL_GPL(profile_event_register); | |
181 | EXPORT_SYMBOL_GPL(profile_event_unregister); | |
182 | ||
183 | #ifdef CONFIG_SMP | |
184 | /* | |
185 | * Each cpu has a pair of open-addressed hashtables for pending | |
186 | * profile hits. read_profile() IPI's all cpus to request them | |
187 | * to flip buffers and flushes their contents to prof_buffer itself. | |
188 | * Flip requests are serialized by the profile_flip_mutex. The sole | |
189 | * use of having a second hashtable is for avoiding cacheline | |
190 | * contention that would otherwise happen during flushes of pending | |
191 | * profile hits required for the accuracy of reported profile hits | |
192 | * and so resurrect the interrupt livelock issue. | |
193 | * | |
194 | * The open-addressed hashtables are indexed by profile buffer slot | |
195 | * and hold the number of pending hits to that profile buffer slot on | |
196 | * a cpu in an entry. When the hashtable overflows, all pending hits | |
197 | * are accounted to their corresponding profile buffer slots with | |
198 | * atomic_add() and the hashtable emptied. As numerous pending hits | |
199 | * may be accounted to a profile buffer slot in a hashtable entry, | |
200 | * this amortizes a number of atomic profile buffer increments likely | |
201 | * to be far larger than the number of entries in the hashtable, | |
202 | * particularly given that the number of distinct profile buffer | |
203 | * positions to which hits are accounted during short intervals (e.g. | |
204 | * several seconds) is usually very small. Exclusion from buffer | |
205 | * flipping is provided by interrupt disablement (note that for | |
206 | * SCHED_PROFILING profile_hit() may be called from process context). | |
207 | * The hash function is meant to be lightweight as opposed to strong, | |
208 | * and was vaguely inspired by ppc64 firmware-supported inverted | |
209 | * pagetable hash functions, but uses a full hashtable full of finite | |
210 | * collision chains, not just pairs of them. | |
211 | * | |
212 | * -- wli | |
213 | */ | |
214 | static void __profile_flip_buffers(void *unused) | |
215 | { | |
216 | int cpu = smp_processor_id(); | |
217 | ||
218 | per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); | |
219 | } | |
220 | ||
221 | static void profile_flip_buffers(void) | |
222 | { | |
223 | int i, j, cpu; | |
224 | ||
97d1f15b | 225 | mutex_lock(&profile_flip_mutex); |
1da177e4 LT |
226 | j = per_cpu(cpu_profile_flip, get_cpu()); |
227 | put_cpu(); | |
228 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | |
229 | for_each_online_cpu(cpu) { | |
230 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; | |
231 | for (i = 0; i < NR_PROFILE_HIT; ++i) { | |
232 | if (!hits[i].hits) { | |
233 | if (hits[i].pc) | |
234 | hits[i].pc = 0; | |
235 | continue; | |
236 | } | |
237 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); | |
238 | hits[i].hits = hits[i].pc = 0; | |
239 | } | |
240 | } | |
97d1f15b | 241 | mutex_unlock(&profile_flip_mutex); |
1da177e4 LT |
242 | } |
243 | ||
244 | static void profile_discard_flip_buffers(void) | |
245 | { | |
246 | int i, cpu; | |
247 | ||
97d1f15b | 248 | mutex_lock(&profile_flip_mutex); |
1da177e4 LT |
249 | i = per_cpu(cpu_profile_flip, get_cpu()); |
250 | put_cpu(); | |
251 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | |
252 | for_each_online_cpu(cpu) { | |
253 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; | |
254 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); | |
255 | } | |
97d1f15b | 256 | mutex_unlock(&profile_flip_mutex); |
1da177e4 LT |
257 | } |
258 | ||
259 | void profile_hit(int type, void *__pc) | |
260 | { | |
261 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; | |
262 | int i, j, cpu; | |
263 | struct profile_hit *hits; | |
264 | ||
265 | if (prof_on != type || !prof_buffer) | |
266 | return; | |
267 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); | |
268 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | |
269 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | |
270 | cpu = get_cpu(); | |
271 | hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; | |
272 | if (!hits) { | |
273 | put_cpu(); | |
274 | return; | |
275 | } | |
276 | local_irq_save(flags); | |
277 | do { | |
278 | for (j = 0; j < PROFILE_GRPSZ; ++j) { | |
279 | if (hits[i + j].pc == pc) { | |
280 | hits[i + j].hits++; | |
281 | goto out; | |
282 | } else if (!hits[i + j].hits) { | |
283 | hits[i + j].pc = pc; | |
284 | hits[i + j].hits = 1; | |
285 | goto out; | |
286 | } | |
287 | } | |
288 | i = (i + secondary) & (NR_PROFILE_HIT - 1); | |
289 | } while (i != primary); | |
290 | atomic_inc(&prof_buffer[pc]); | |
291 | for (i = 0; i < NR_PROFILE_HIT; ++i) { | |
292 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); | |
293 | hits[i].pc = hits[i].hits = 0; | |
294 | } | |
295 | out: | |
296 | local_irq_restore(flags); | |
297 | put_cpu(); | |
298 | } | |
299 | ||
300 | #ifdef CONFIG_HOTPLUG_CPU | |
9c7b216d | 301 | static int __devinit profile_cpu_callback(struct notifier_block *info, |
1da177e4 LT |
302 | unsigned long action, void *__cpu) |
303 | { | |
304 | int node, cpu = (unsigned long)__cpu; | |
305 | struct page *page; | |
306 | ||
307 | switch (action) { | |
308 | case CPU_UP_PREPARE: | |
309 | node = cpu_to_node(cpu); | |
310 | per_cpu(cpu_profile_flip, cpu) = 0; | |
311 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { | |
fbd98167 CL |
312 | page = alloc_pages_node(node, |
313 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
314 | 0); | |
1da177e4 LT |
315 | if (!page) |
316 | return NOTIFY_BAD; | |
317 | per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); | |
318 | } | |
319 | if (!per_cpu(cpu_profile_hits, cpu)[0]) { | |
fbd98167 CL |
320 | page = alloc_pages_node(node, |
321 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
322 | 0); | |
1da177e4 LT |
323 | if (!page) |
324 | goto out_free; | |
325 | per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); | |
326 | } | |
327 | break; | |
328 | out_free: | |
329 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
330 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
331 | __free_page(page); | |
332 | return NOTIFY_BAD; | |
333 | case CPU_ONLINE: | |
334 | cpu_set(cpu, prof_cpu_mask); | |
335 | break; | |
336 | case CPU_UP_CANCELED: | |
337 | case CPU_DEAD: | |
338 | cpu_clear(cpu, prof_cpu_mask); | |
339 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | |
340 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | |
341 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | |
342 | __free_page(page); | |
343 | } | |
344 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | |
345 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
346 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
347 | __free_page(page); | |
348 | } | |
349 | break; | |
350 | } | |
351 | return NOTIFY_OK; | |
352 | } | |
353 | #endif /* CONFIG_HOTPLUG_CPU */ | |
354 | #else /* !CONFIG_SMP */ | |
355 | #define profile_flip_buffers() do { } while (0) | |
356 | #define profile_discard_flip_buffers() do { } while (0) | |
357 | ||
358 | void profile_hit(int type, void *__pc) | |
359 | { | |
360 | unsigned long pc; | |
361 | ||
362 | if (prof_on != type || !prof_buffer) | |
363 | return; | |
364 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; | |
365 | atomic_inc(&prof_buffer[min(pc, prof_len - 1)]); | |
366 | } | |
367 | #endif /* !CONFIG_SMP */ | |
368 | ||
369 | void profile_tick(int type, struct pt_regs *regs) | |
370 | { | |
371 | if (type == CPU_PROFILING && timer_hook) | |
372 | timer_hook(regs); | |
373 | if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) | |
374 | profile_hit(type, (void *)profile_pc(regs)); | |
375 | } | |
376 | ||
377 | #ifdef CONFIG_PROC_FS | |
378 | #include <linux/proc_fs.h> | |
379 | #include <asm/uaccess.h> | |
380 | #include <asm/ptrace.h> | |
381 | ||
382 | static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, | |
383 | int count, int *eof, void *data) | |
384 | { | |
385 | int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); | |
386 | if (count - len < 2) | |
387 | return -EINVAL; | |
388 | len += sprintf(page + len, "\n"); | |
389 | return len; | |
390 | } | |
391 | ||
392 | static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer, | |
393 | unsigned long count, void *data) | |
394 | { | |
395 | cpumask_t *mask = (cpumask_t *)data; | |
396 | unsigned long full_count = count, err; | |
397 | cpumask_t new_value; | |
398 | ||
399 | err = cpumask_parse(buffer, count, new_value); | |
400 | if (err) | |
401 | return err; | |
402 | ||
403 | *mask = new_value; | |
404 | return full_count; | |
405 | } | |
406 | ||
407 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) | |
408 | { | |
409 | struct proc_dir_entry *entry; | |
410 | ||
411 | /* create /proc/irq/prof_cpu_mask */ | |
412 | if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir))) | |
413 | return; | |
414 | entry->nlink = 1; | |
415 | entry->data = (void *)&prof_cpu_mask; | |
416 | entry->read_proc = prof_cpu_mask_read_proc; | |
417 | entry->write_proc = prof_cpu_mask_write_proc; | |
418 | } | |
419 | ||
420 | /* | |
421 | * This function accesses profiling information. The returned data is | |
422 | * binary: the sampling step and the actual contents of the profile | |
423 | * buffer. Use of the program readprofile is recommended in order to | |
424 | * get meaningful info out of these data. | |
425 | */ | |
426 | static ssize_t | |
427 | read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
428 | { | |
429 | unsigned long p = *ppos; | |
430 | ssize_t read; | |
431 | char * pnt; | |
432 | unsigned int sample_step = 1 << prof_shift; | |
433 | ||
434 | profile_flip_buffers(); | |
435 | if (p >= (prof_len+1)*sizeof(unsigned int)) | |
436 | return 0; | |
437 | if (count > (prof_len+1)*sizeof(unsigned int) - p) | |
438 | count = (prof_len+1)*sizeof(unsigned int) - p; | |
439 | read = 0; | |
440 | ||
441 | while (p < sizeof(unsigned int) && count > 0) { | |
442 | put_user(*((char *)(&sample_step)+p),buf); | |
443 | buf++; p++; count--; read++; | |
444 | } | |
445 | pnt = (char *)prof_buffer + p - sizeof(atomic_t); | |
446 | if (copy_to_user(buf,(void *)pnt,count)) | |
447 | return -EFAULT; | |
448 | read += count; | |
449 | *ppos += read; | |
450 | return read; | |
451 | } | |
452 | ||
453 | /* | |
454 | * Writing to /proc/profile resets the counters | |
455 | * | |
456 | * Writing a 'profiling multiplier' value into it also re-sets the profiling | |
457 | * interrupt frequency, on architectures that support this. | |
458 | */ | |
459 | static ssize_t write_profile(struct file *file, const char __user *buf, | |
460 | size_t count, loff_t *ppos) | |
461 | { | |
462 | #ifdef CONFIG_SMP | |
463 | extern int setup_profiling_timer (unsigned int multiplier); | |
464 | ||
465 | if (count == sizeof(int)) { | |
466 | unsigned int multiplier; | |
467 | ||
468 | if (copy_from_user(&multiplier, buf, sizeof(int))) | |
469 | return -EFAULT; | |
470 | ||
471 | if (setup_profiling_timer(multiplier)) | |
472 | return -EINVAL; | |
473 | } | |
474 | #endif | |
475 | profile_discard_flip_buffers(); | |
476 | memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); | |
477 | return count; | |
478 | } | |
479 | ||
480 | static struct file_operations proc_profile_operations = { | |
481 | .read = read_profile, | |
482 | .write = write_profile, | |
483 | }; | |
484 | ||
485 | #ifdef CONFIG_SMP | |
486 | static void __init profile_nop(void *unused) | |
487 | { | |
488 | } | |
489 | ||
490 | static int __init create_hash_tables(void) | |
491 | { | |
492 | int cpu; | |
493 | ||
494 | for_each_online_cpu(cpu) { | |
495 | int node = cpu_to_node(cpu); | |
496 | struct page *page; | |
497 | ||
fbd98167 CL |
498 | page = alloc_pages_node(node, |
499 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
500 | 0); | |
1da177e4 LT |
501 | if (!page) |
502 | goto out_cleanup; | |
503 | per_cpu(cpu_profile_hits, cpu)[1] | |
504 | = (struct profile_hit *)page_address(page); | |
fbd98167 CL |
505 | page = alloc_pages_node(node, |
506 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
507 | 0); | |
1da177e4 LT |
508 | if (!page) |
509 | goto out_cleanup; | |
510 | per_cpu(cpu_profile_hits, cpu)[0] | |
511 | = (struct profile_hit *)page_address(page); | |
512 | } | |
513 | return 0; | |
514 | out_cleanup: | |
515 | prof_on = 0; | |
d59dd462 | 516 | smp_mb(); |
1da177e4 LT |
517 | on_each_cpu(profile_nop, NULL, 0, 1); |
518 | for_each_online_cpu(cpu) { | |
519 | struct page *page; | |
520 | ||
521 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | |
522 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | |
523 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | |
524 | __free_page(page); | |
525 | } | |
526 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | |
527 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
528 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
529 | __free_page(page); | |
530 | } | |
531 | } | |
532 | return -1; | |
533 | } | |
534 | #else | |
535 | #define create_hash_tables() ({ 0; }) | |
536 | #endif | |
537 | ||
538 | static int __init create_proc_profile(void) | |
539 | { | |
540 | struct proc_dir_entry *entry; | |
541 | ||
542 | if (!prof_on) | |
543 | return 0; | |
544 | if (create_hash_tables()) | |
545 | return -1; | |
546 | if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL))) | |
547 | return 0; | |
548 | entry->proc_fops = &proc_profile_operations; | |
549 | entry->size = (1+prof_len) * sizeof(atomic_t); | |
550 | hotcpu_notifier(profile_cpu_callback, 0); | |
551 | return 0; | |
552 | } | |
553 | module_init(create_proc_profile); | |
554 | #endif /* CONFIG_PROC_FS */ |