]>
Commit | Line | Data |
---|---|---|
97e1c18e MD |
1 | /* |
2 | * Copyright (C) 2008 Mathieu Desnoyers | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | */ | |
18 | #include <linux/module.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/jhash.h> | |
22 | #include <linux/list.h> | |
23 | #include <linux/rcupdate.h> | |
24 | #include <linux/tracepoint.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/slab.h> | |
a871bd33 | 27 | #include <linux/sched.h> |
c5905afb | 28 | #include <linux/static_key.h> |
97e1c18e | 29 | |
65498646 MD |
30 | extern struct tracepoint * const __start___tracepoints_ptrs[]; |
31 | extern struct tracepoint * const __stop___tracepoints_ptrs[]; | |
97e1c18e MD |
32 | |
33 | /* Set to 1 to enable tracepoint debug output */ | |
34 | static const int tracepoint_debug; | |
35 | ||
36 | /* | |
b75ef8b4 MD |
37 | * Tracepoints mutex protects the builtin and module tracepoints and the hash |
38 | * table, as well as the local module list. | |
97e1c18e MD |
39 | */ |
40 | static DEFINE_MUTEX(tracepoints_mutex); | |
41 | ||
b75ef8b4 MD |
42 | #ifdef CONFIG_MODULES |
43 | /* Local list of struct module */ | |
44 | static LIST_HEAD(tracepoint_module_list); | |
45 | #endif /* CONFIG_MODULES */ | |
46 | ||
97e1c18e MD |
47 | /* |
48 | * Tracepoint hash table, containing the active tracepoints. | |
49 | * Protected by tracepoints_mutex. | |
50 | */ | |
51 | #define TRACEPOINT_HASH_BITS 6 | |
52 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) | |
19dba33c | 53 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; |
97e1c18e MD |
54 | |
55 | /* | |
56 | * Note about RCU : | |
fd589a8f | 57 | * It is used to delay the free of multiple probes array until a quiescent |
97e1c18e MD |
58 | * state is reached. |
59 | * Tracepoint entries modifications are protected by the tracepoints_mutex. | |
60 | */ | |
61 | struct tracepoint_entry { | |
62 | struct hlist_node hlist; | |
38516ab5 | 63 | struct tracepoint_func *funcs; |
97e1c18e | 64 | int refcount; /* Number of times armed. 0 if disarmed. */ |
97e1c18e MD |
65 | char name[0]; |
66 | }; | |
67 | ||
19dba33c | 68 | struct tp_probes { |
127cafbb LJ |
69 | union { |
70 | struct rcu_head rcu; | |
71 | struct list_head list; | |
72 | } u; | |
38516ab5 | 73 | struct tracepoint_func probes[0]; |
19dba33c | 74 | }; |
97e1c18e | 75 | |
19dba33c | 76 | static inline void *allocate_probes(int count) |
97e1c18e | 77 | { |
38516ab5 | 78 | struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) |
19dba33c LJ |
79 | + sizeof(struct tp_probes), GFP_KERNEL); |
80 | return p == NULL ? NULL : p->probes; | |
97e1c18e MD |
81 | } |
82 | ||
19dba33c | 83 | static void rcu_free_old_probes(struct rcu_head *head) |
97e1c18e | 84 | { |
127cafbb | 85 | kfree(container_of(head, struct tp_probes, u.rcu)); |
19dba33c LJ |
86 | } |
87 | ||
38516ab5 | 88 | static inline void release_probes(struct tracepoint_func *old) |
19dba33c LJ |
89 | { |
90 | if (old) { | |
91 | struct tp_probes *tp_probes = container_of(old, | |
92 | struct tp_probes, probes[0]); | |
127cafbb | 93 | call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); |
19dba33c | 94 | } |
97e1c18e MD |
95 | } |
96 | ||
97 | static void debug_print_probes(struct tracepoint_entry *entry) | |
98 | { | |
99 | int i; | |
100 | ||
19dba33c | 101 | if (!tracepoint_debug || !entry->funcs) |
97e1c18e MD |
102 | return; |
103 | ||
38516ab5 SR |
104 | for (i = 0; entry->funcs[i].func; i++) |
105 | printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); | |
97e1c18e MD |
106 | } |
107 | ||
38516ab5 SR |
108 | static struct tracepoint_func * |
109 | tracepoint_entry_add_probe(struct tracepoint_entry *entry, | |
110 | void *probe, void *data) | |
97e1c18e MD |
111 | { |
112 | int nr_probes = 0; | |
38516ab5 | 113 | struct tracepoint_func *old, *new; |
97e1c18e MD |
114 | |
115 | WARN_ON(!probe); | |
116 | ||
117 | debug_print_probes(entry); | |
118 | old = entry->funcs; | |
119 | if (old) { | |
120 | /* (N -> N+1), (N != 0, 1) probes */ | |
38516ab5 SR |
121 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) |
122 | if (old[nr_probes].func == probe && | |
123 | old[nr_probes].data == data) | |
97e1c18e MD |
124 | return ERR_PTR(-EEXIST); |
125 | } | |
126 | /* + 2 : one for new probe, one for NULL func */ | |
19dba33c | 127 | new = allocate_probes(nr_probes + 2); |
97e1c18e MD |
128 | if (new == NULL) |
129 | return ERR_PTR(-ENOMEM); | |
130 | if (old) | |
38516ab5 SR |
131 | memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); |
132 | new[nr_probes].func = probe; | |
133 | new[nr_probes].data = data; | |
134 | new[nr_probes + 1].func = NULL; | |
97e1c18e MD |
135 | entry->refcount = nr_probes + 1; |
136 | entry->funcs = new; | |
137 | debug_print_probes(entry); | |
138 | return old; | |
139 | } | |
140 | ||
141 | static void * | |
38516ab5 SR |
142 | tracepoint_entry_remove_probe(struct tracepoint_entry *entry, |
143 | void *probe, void *data) | |
97e1c18e MD |
144 | { |
145 | int nr_probes = 0, nr_del = 0, i; | |
38516ab5 | 146 | struct tracepoint_func *old, *new; |
97e1c18e MD |
147 | |
148 | old = entry->funcs; | |
149 | ||
f66af459 | 150 | if (!old) |
19dba33c | 151 | return ERR_PTR(-ENOENT); |
f66af459 | 152 | |
97e1c18e MD |
153 | debug_print_probes(entry); |
154 | /* (N -> M), (N > 1, M >= 0) probes */ | |
38516ab5 SR |
155 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { |
156 | if (!probe || | |
157 | (old[nr_probes].func == probe && | |
158 | old[nr_probes].data == data)) | |
97e1c18e MD |
159 | nr_del++; |
160 | } | |
161 | ||
162 | if (nr_probes - nr_del == 0) { | |
163 | /* N -> 0, (N > 1) */ | |
164 | entry->funcs = NULL; | |
165 | entry->refcount = 0; | |
166 | debug_print_probes(entry); | |
167 | return old; | |
168 | } else { | |
169 | int j = 0; | |
170 | /* N -> M, (N > 1, M > 0) */ | |
171 | /* + 1 for NULL */ | |
19dba33c | 172 | new = allocate_probes(nr_probes - nr_del + 1); |
97e1c18e MD |
173 | if (new == NULL) |
174 | return ERR_PTR(-ENOMEM); | |
38516ab5 SR |
175 | for (i = 0; old[i].func; i++) |
176 | if (probe && | |
177 | (old[i].func != probe || old[i].data != data)) | |
97e1c18e | 178 | new[j++] = old[i]; |
38516ab5 | 179 | new[nr_probes - nr_del].func = NULL; |
97e1c18e MD |
180 | entry->refcount = nr_probes - nr_del; |
181 | entry->funcs = new; | |
182 | } | |
183 | debug_print_probes(entry); | |
184 | return old; | |
185 | } | |
186 | ||
187 | /* | |
188 | * Get tracepoint if the tracepoint is present in the tracepoint hash table. | |
189 | * Must be called with tracepoints_mutex held. | |
190 | * Returns NULL if not present. | |
191 | */ | |
192 | static struct tracepoint_entry *get_tracepoint(const char *name) | |
193 | { | |
194 | struct hlist_head *head; | |
97e1c18e MD |
195 | struct tracepoint_entry *e; |
196 | u32 hash = jhash(name, strlen(name), 0); | |
197 | ||
9795302a | 198 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; |
b67bfe0d | 199 | hlist_for_each_entry(e, head, hlist) { |
97e1c18e MD |
200 | if (!strcmp(name, e->name)) |
201 | return e; | |
202 | } | |
203 | return NULL; | |
204 | } | |
205 | ||
206 | /* | |
207 | * Add the tracepoint to the tracepoint hash table. Must be called with | |
208 | * tracepoints_mutex held. | |
209 | */ | |
210 | static struct tracepoint_entry *add_tracepoint(const char *name) | |
211 | { | |
212 | struct hlist_head *head; | |
97e1c18e MD |
213 | struct tracepoint_entry *e; |
214 | size_t name_len = strlen(name) + 1; | |
215 | u32 hash = jhash(name, name_len-1, 0); | |
216 | ||
9795302a | 217 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; |
b67bfe0d | 218 | hlist_for_each_entry(e, head, hlist) { |
97e1c18e MD |
219 | if (!strcmp(name, e->name)) { |
220 | printk(KERN_NOTICE | |
221 | "tracepoint %s busy\n", name); | |
222 | return ERR_PTR(-EEXIST); /* Already there */ | |
223 | } | |
224 | } | |
225 | /* | |
226 | * Using kmalloc here to allocate a variable length element. Could | |
227 | * cause some memory fragmentation if overused. | |
228 | */ | |
229 | e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL); | |
230 | if (!e) | |
231 | return ERR_PTR(-ENOMEM); | |
232 | memcpy(&e->name[0], name, name_len); | |
233 | e->funcs = NULL; | |
234 | e->refcount = 0; | |
97e1c18e MD |
235 | hlist_add_head(&e->hlist, head); |
236 | return e; | |
237 | } | |
238 | ||
239 | /* | |
240 | * Remove the tracepoint from the tracepoint hash table. Must be called with | |
241 | * mutex_lock held. | |
242 | */ | |
19dba33c | 243 | static inline void remove_tracepoint(struct tracepoint_entry *e) |
97e1c18e | 244 | { |
97e1c18e | 245 | hlist_del(&e->hlist); |
97e1c18e | 246 | kfree(e); |
97e1c18e MD |
247 | } |
248 | ||
249 | /* | |
250 | * Sets the probe callback corresponding to one tracepoint. | |
251 | */ | |
252 | static void set_tracepoint(struct tracepoint_entry **entry, | |
253 | struct tracepoint *elem, int active) | |
254 | { | |
255 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | |
256 | ||
c5905afb | 257 | if (elem->regfunc && !static_key_enabled(&elem->key) && active) |
97419875 | 258 | elem->regfunc(); |
c5905afb | 259 | else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) |
97419875 JS |
260 | elem->unregfunc(); |
261 | ||
97e1c18e MD |
262 | /* |
263 | * rcu_assign_pointer has a smp_wmb() which makes sure that the new | |
264 | * probe callbacks array is consistent before setting a pointer to it. | |
265 | * This array is referenced by __DO_TRACE from | |
266 | * include/linux/tracepoints.h. A matching smp_read_barrier_depends() | |
267 | * is used. | |
268 | */ | |
269 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | |
c5905afb IM |
270 | if (active && !static_key_enabled(&elem->key)) |
271 | static_key_slow_inc(&elem->key); | |
272 | else if (!active && static_key_enabled(&elem->key)) | |
273 | static_key_slow_dec(&elem->key); | |
97e1c18e MD |
274 | } |
275 | ||
276 | /* | |
277 | * Disable a tracepoint and its probe callback. | |
278 | * Note: only waiting an RCU period after setting elem->call to the empty | |
279 | * function insures that the original callback is not used anymore. This insured | |
280 | * by preempt_disable around the call site. | |
281 | */ | |
282 | static void disable_tracepoint(struct tracepoint *elem) | |
283 | { | |
c5905afb | 284 | if (elem->unregfunc && static_key_enabled(&elem->key)) |
97419875 JS |
285 | elem->unregfunc(); |
286 | ||
c5905afb IM |
287 | if (static_key_enabled(&elem->key)) |
288 | static_key_slow_dec(&elem->key); | |
de0baf9a | 289 | rcu_assign_pointer(elem->funcs, NULL); |
97e1c18e MD |
290 | } |
291 | ||
292 | /** | |
293 | * tracepoint_update_probe_range - Update a probe range | |
294 | * @begin: beginning of the range | |
295 | * @end: end of the range | |
296 | * | |
297 | * Updates the probe callback corresponding to a range of tracepoints. | |
b75ef8b4 | 298 | * Called with tracepoints_mutex held. |
97e1c18e | 299 | */ |
b75ef8b4 MD |
300 | static void tracepoint_update_probe_range(struct tracepoint * const *begin, |
301 | struct tracepoint * const *end) | |
97e1c18e | 302 | { |
65498646 | 303 | struct tracepoint * const *iter; |
97e1c18e MD |
304 | struct tracepoint_entry *mark_entry; |
305 | ||
ec625cb2 | 306 | if (!begin) |
09933a10 | 307 | return; |
09933a10 | 308 | |
97e1c18e | 309 | for (iter = begin; iter < end; iter++) { |
65498646 | 310 | mark_entry = get_tracepoint((*iter)->name); |
97e1c18e | 311 | if (mark_entry) { |
65498646 | 312 | set_tracepoint(&mark_entry, *iter, |
97e1c18e MD |
313 | !!mark_entry->refcount); |
314 | } else { | |
65498646 | 315 | disable_tracepoint(*iter); |
97e1c18e MD |
316 | } |
317 | } | |
97e1c18e MD |
318 | } |
319 | ||
b75ef8b4 MD |
320 | #ifdef CONFIG_MODULES |
321 | void module_update_tracepoints(void) | |
322 | { | |
323 | struct tp_module *tp_mod; | |
324 | ||
325 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | |
326 | tracepoint_update_probe_range(tp_mod->tracepoints_ptrs, | |
327 | tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints); | |
328 | } | |
329 | #else /* CONFIG_MODULES */ | |
330 | void module_update_tracepoints(void) | |
331 | { | |
332 | } | |
333 | #endif /* CONFIG_MODULES */ | |
334 | ||
335 | ||
97e1c18e MD |
336 | /* |
337 | * Update probes, removing the faulty probes. | |
b75ef8b4 | 338 | * Called with tracepoints_mutex held. |
97e1c18e MD |
339 | */ |
340 | static void tracepoint_update_probes(void) | |
341 | { | |
342 | /* Core kernel tracepoints */ | |
65498646 MD |
343 | tracepoint_update_probe_range(__start___tracepoints_ptrs, |
344 | __stop___tracepoints_ptrs); | |
97e1c18e MD |
345 | /* tracepoints in modules. */ |
346 | module_update_tracepoints(); | |
347 | } | |
348 | ||
38516ab5 SR |
349 | static struct tracepoint_func * |
350 | tracepoint_add_probe(const char *name, void *probe, void *data) | |
127cafbb LJ |
351 | { |
352 | struct tracepoint_entry *entry; | |
38516ab5 | 353 | struct tracepoint_func *old; |
127cafbb LJ |
354 | |
355 | entry = get_tracepoint(name); | |
356 | if (!entry) { | |
357 | entry = add_tracepoint(name); | |
358 | if (IS_ERR(entry)) | |
38516ab5 | 359 | return (struct tracepoint_func *)entry; |
127cafbb | 360 | } |
38516ab5 | 361 | old = tracepoint_entry_add_probe(entry, probe, data); |
127cafbb LJ |
362 | if (IS_ERR(old) && !entry->refcount) |
363 | remove_tracepoint(entry); | |
364 | return old; | |
365 | } | |
366 | ||
97e1c18e MD |
367 | /** |
368 | * tracepoint_probe_register - Connect a probe to a tracepoint | |
369 | * @name: tracepoint name | |
370 | * @probe: probe handler | |
371 | * | |
372 | * Returns 0 if ok, error value on error. | |
373 | * The probe address must at least be aligned on the architecture pointer size. | |
374 | */ | |
38516ab5 | 375 | int tracepoint_probe_register(const char *name, void *probe, void *data) |
97e1c18e | 376 | { |
38516ab5 | 377 | struct tracepoint_func *old; |
97e1c18e MD |
378 | |
379 | mutex_lock(&tracepoints_mutex); | |
38516ab5 | 380 | old = tracepoint_add_probe(name, probe, data); |
b75ef8b4 MD |
381 | if (IS_ERR(old)) { |
382 | mutex_unlock(&tracepoints_mutex); | |
127cafbb | 383 | return PTR_ERR(old); |
b75ef8b4 | 384 | } |
97e1c18e | 385 | tracepoint_update_probes(); /* may update entry */ |
b75ef8b4 | 386 | mutex_unlock(&tracepoints_mutex); |
19dba33c LJ |
387 | release_probes(old); |
388 | return 0; | |
97e1c18e MD |
389 | } |
390 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); | |
391 | ||
38516ab5 SR |
392 | static struct tracepoint_func * |
393 | tracepoint_remove_probe(const char *name, void *probe, void *data) | |
127cafbb LJ |
394 | { |
395 | struct tracepoint_entry *entry; | |
38516ab5 | 396 | struct tracepoint_func *old; |
127cafbb LJ |
397 | |
398 | entry = get_tracepoint(name); | |
399 | if (!entry) | |
400 | return ERR_PTR(-ENOENT); | |
38516ab5 | 401 | old = tracepoint_entry_remove_probe(entry, probe, data); |
127cafbb LJ |
402 | if (IS_ERR(old)) |
403 | return old; | |
404 | if (!entry->refcount) | |
405 | remove_tracepoint(entry); | |
406 | return old; | |
407 | } | |
408 | ||
97e1c18e MD |
409 | /** |
410 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint | |
411 | * @name: tracepoint name | |
412 | * @probe: probe function pointer | |
413 | * | |
414 | * We do not need to call a synchronize_sched to make sure the probes have | |
415 | * finished running before doing a module unload, because the module unload | |
416 | * itself uses stop_machine(), which insures that every preempt disabled section | |
417 | * have finished. | |
418 | */ | |
38516ab5 | 419 | int tracepoint_probe_unregister(const char *name, void *probe, void *data) |
97e1c18e | 420 | { |
38516ab5 | 421 | struct tracepoint_func *old; |
97e1c18e MD |
422 | |
423 | mutex_lock(&tracepoints_mutex); | |
38516ab5 | 424 | old = tracepoint_remove_probe(name, probe, data); |
b75ef8b4 MD |
425 | if (IS_ERR(old)) { |
426 | mutex_unlock(&tracepoints_mutex); | |
127cafbb | 427 | return PTR_ERR(old); |
b75ef8b4 | 428 | } |
97e1c18e | 429 | tracepoint_update_probes(); /* may update entry */ |
b75ef8b4 | 430 | mutex_unlock(&tracepoints_mutex); |
19dba33c LJ |
431 | release_probes(old); |
432 | return 0; | |
97e1c18e MD |
433 | } |
434 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | |
435 | ||
127cafbb LJ |
436 | static LIST_HEAD(old_probes); |
437 | static int need_update; | |
438 | ||
439 | static void tracepoint_add_old_probes(void *old) | |
440 | { | |
441 | need_update = 1; | |
442 | if (old) { | |
443 | struct tp_probes *tp_probes = container_of(old, | |
444 | struct tp_probes, probes[0]); | |
445 | list_add(&tp_probes->u.list, &old_probes); | |
446 | } | |
447 | } | |
448 | ||
449 | /** | |
450 | * tracepoint_probe_register_noupdate - register a probe but not connect | |
451 | * @name: tracepoint name | |
452 | * @probe: probe handler | |
453 | * | |
454 | * caller must call tracepoint_probe_update_all() | |
455 | */ | |
38516ab5 SR |
456 | int tracepoint_probe_register_noupdate(const char *name, void *probe, |
457 | void *data) | |
127cafbb | 458 | { |
38516ab5 | 459 | struct tracepoint_func *old; |
127cafbb LJ |
460 | |
461 | mutex_lock(&tracepoints_mutex); | |
38516ab5 | 462 | old = tracepoint_add_probe(name, probe, data); |
127cafbb LJ |
463 | if (IS_ERR(old)) { |
464 | mutex_unlock(&tracepoints_mutex); | |
465 | return PTR_ERR(old); | |
466 | } | |
467 | tracepoint_add_old_probes(old); | |
468 | mutex_unlock(&tracepoints_mutex); | |
469 | return 0; | |
470 | } | |
471 | EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); | |
472 | ||
473 | /** | |
474 | * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect | |
475 | * @name: tracepoint name | |
476 | * @probe: probe function pointer | |
477 | * | |
478 | * caller must call tracepoint_probe_update_all() | |
479 | */ | |
38516ab5 SR |
480 | int tracepoint_probe_unregister_noupdate(const char *name, void *probe, |
481 | void *data) | |
127cafbb | 482 | { |
38516ab5 | 483 | struct tracepoint_func *old; |
127cafbb LJ |
484 | |
485 | mutex_lock(&tracepoints_mutex); | |
38516ab5 | 486 | old = tracepoint_remove_probe(name, probe, data); |
127cafbb LJ |
487 | if (IS_ERR(old)) { |
488 | mutex_unlock(&tracepoints_mutex); | |
489 | return PTR_ERR(old); | |
490 | } | |
491 | tracepoint_add_old_probes(old); | |
492 | mutex_unlock(&tracepoints_mutex); | |
493 | return 0; | |
494 | } | |
495 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); | |
496 | ||
497 | /** | |
498 | * tracepoint_probe_update_all - update tracepoints | |
499 | */ | |
500 | void tracepoint_probe_update_all(void) | |
501 | { | |
502 | LIST_HEAD(release_probes); | |
503 | struct tp_probes *pos, *next; | |
504 | ||
505 | mutex_lock(&tracepoints_mutex); | |
506 | if (!need_update) { | |
507 | mutex_unlock(&tracepoints_mutex); | |
508 | return; | |
509 | } | |
510 | if (!list_empty(&old_probes)) | |
511 | list_replace_init(&old_probes, &release_probes); | |
512 | need_update = 0; | |
127cafbb | 513 | tracepoint_update_probes(); |
b75ef8b4 | 514 | mutex_unlock(&tracepoints_mutex); |
127cafbb LJ |
515 | list_for_each_entry_safe(pos, next, &release_probes, u.list) { |
516 | list_del(&pos->u.list); | |
517 | call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); | |
518 | } | |
519 | } | |
520 | EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); | |
521 | ||
97e1c18e MD |
522 | /** |
523 | * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. | |
524 | * @tracepoint: current tracepoints (in), next tracepoint (out) | |
525 | * @begin: beginning of the range | |
526 | * @end: end of the range | |
527 | * | |
528 | * Returns whether a next tracepoint has been found (1) or not (0). | |
529 | * Will return the first tracepoint in the range if the input tracepoint is | |
530 | * NULL. | |
531 | */ | |
b75ef8b4 | 532 | static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, |
65498646 | 533 | struct tracepoint * const *begin, struct tracepoint * const *end) |
97e1c18e MD |
534 | { |
535 | if (!*tracepoint && begin != end) { | |
536 | *tracepoint = begin; | |
537 | return 1; | |
538 | } | |
539 | if (*tracepoint >= begin && *tracepoint < end) | |
540 | return 1; | |
541 | return 0; | |
542 | } | |
97e1c18e | 543 | |
b75ef8b4 | 544 | #ifdef CONFIG_MODULES |
97e1c18e MD |
545 | static void tracepoint_get_iter(struct tracepoint_iter *iter) |
546 | { | |
547 | int found = 0; | |
b75ef8b4 | 548 | struct tp_module *iter_mod; |
97e1c18e MD |
549 | |
550 | /* Core kernel tracepoints */ | |
551 | if (!iter->module) { | |
552 | found = tracepoint_get_iter_range(&iter->tracepoint, | |
65498646 MD |
553 | __start___tracepoints_ptrs, |
554 | __stop___tracepoints_ptrs); | |
97e1c18e MD |
555 | if (found) |
556 | goto end; | |
557 | } | |
b75ef8b4 MD |
558 | /* Tracepoints in modules */ |
559 | mutex_lock(&tracepoints_mutex); | |
560 | list_for_each_entry(iter_mod, &tracepoint_module_list, list) { | |
561 | /* | |
562 | * Sorted module list | |
563 | */ | |
564 | if (iter_mod < iter->module) | |
565 | continue; | |
566 | else if (iter_mod > iter->module) | |
567 | iter->tracepoint = NULL; | |
568 | found = tracepoint_get_iter_range(&iter->tracepoint, | |
569 | iter_mod->tracepoints_ptrs, | |
570 | iter_mod->tracepoints_ptrs | |
571 | + iter_mod->num_tracepoints); | |
572 | if (found) { | |
573 | iter->module = iter_mod; | |
574 | break; | |
575 | } | |
576 | } | |
577 | mutex_unlock(&tracepoints_mutex); | |
97e1c18e MD |
578 | end: |
579 | if (!found) | |
580 | tracepoint_iter_reset(iter); | |
581 | } | |
b75ef8b4 MD |
582 | #else /* CONFIG_MODULES */ |
583 | static void tracepoint_get_iter(struct tracepoint_iter *iter) | |
584 | { | |
585 | int found = 0; | |
586 | ||
587 | /* Core kernel tracepoints */ | |
588 | found = tracepoint_get_iter_range(&iter->tracepoint, | |
589 | __start___tracepoints_ptrs, | |
590 | __stop___tracepoints_ptrs); | |
591 | if (!found) | |
592 | tracepoint_iter_reset(iter); | |
593 | } | |
594 | #endif /* CONFIG_MODULES */ | |
97e1c18e MD |
595 | |
596 | void tracepoint_iter_start(struct tracepoint_iter *iter) | |
597 | { | |
598 | tracepoint_get_iter(iter); | |
599 | } | |
600 | EXPORT_SYMBOL_GPL(tracepoint_iter_start); | |
601 | ||
602 | void tracepoint_iter_next(struct tracepoint_iter *iter) | |
603 | { | |
604 | iter->tracepoint++; | |
605 | /* | |
606 | * iter->tracepoint may be invalid because we blindly incremented it. | |
607 | * Make sure it is valid by marshalling on the tracepoints, getting the | |
608 | * tracepoints from following modules if necessary. | |
609 | */ | |
610 | tracepoint_get_iter(iter); | |
611 | } | |
612 | EXPORT_SYMBOL_GPL(tracepoint_iter_next); | |
613 | ||
614 | void tracepoint_iter_stop(struct tracepoint_iter *iter) | |
615 | { | |
616 | } | |
617 | EXPORT_SYMBOL_GPL(tracepoint_iter_stop); | |
618 | ||
619 | void tracepoint_iter_reset(struct tracepoint_iter *iter) | |
620 | { | |
b75ef8b4 | 621 | #ifdef CONFIG_MODULES |
97e1c18e | 622 | iter->module = NULL; |
b75ef8b4 | 623 | #endif /* CONFIG_MODULES */ |
97e1c18e MD |
624 | iter->tracepoint = NULL; |
625 | } | |
626 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | |
32f85742 | 627 | |
227a8375 | 628 | #ifdef CONFIG_MODULES |
b75ef8b4 MD |
629 | static int tracepoint_module_coming(struct module *mod) |
630 | { | |
631 | struct tp_module *tp_mod, *iter; | |
632 | int ret = 0; | |
633 | ||
634 | /* | |
c10076c4 SR |
635 | * We skip modules that taint the kernel, especially those with different |
636 | * module headers (for forced load), to make sure we don't cause a crash. | |
637 | * Staging and out-of-tree GPL modules are fine. | |
b75ef8b4 | 638 | */ |
c10076c4 | 639 | if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) |
b75ef8b4 MD |
640 | return 0; |
641 | mutex_lock(&tracepoints_mutex); | |
642 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | |
643 | if (!tp_mod) { | |
644 | ret = -ENOMEM; | |
645 | goto end; | |
646 | } | |
647 | tp_mod->num_tracepoints = mod->num_tracepoints; | |
648 | tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; | |
649 | ||
650 | /* | |
651 | * tracepoint_module_list is kept sorted by struct module pointer | |
652 | * address for iteration on tracepoints from a seq_file that can release | |
653 | * the mutex between calls. | |
654 | */ | |
655 | list_for_each_entry_reverse(iter, &tracepoint_module_list, list) { | |
656 | BUG_ON(iter == tp_mod); /* Should never be in the list twice */ | |
657 | if (iter < tp_mod) { | |
658 | /* We belong to the location right after iter. */ | |
659 | list_add(&tp_mod->list, &iter->list); | |
660 | goto module_added; | |
661 | } | |
662 | } | |
663 | /* We belong to the beginning of the list */ | |
664 | list_add(&tp_mod->list, &tracepoint_module_list); | |
665 | module_added: | |
666 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | |
667 | mod->tracepoints_ptrs + mod->num_tracepoints); | |
668 | end: | |
669 | mutex_unlock(&tracepoints_mutex); | |
670 | return ret; | |
671 | } | |
672 | ||
673 | static int tracepoint_module_going(struct module *mod) | |
674 | { | |
675 | struct tp_module *pos; | |
676 | ||
677 | mutex_lock(&tracepoints_mutex); | |
678 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | |
679 | mod->tracepoints_ptrs + mod->num_tracepoints); | |
680 | list_for_each_entry(pos, &tracepoint_module_list, list) { | |
681 | if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { | |
682 | list_del(&pos->list); | |
683 | kfree(pos); | |
684 | break; | |
685 | } | |
686 | } | |
687 | /* | |
688 | * In the case of modules that were tainted at "coming", we'll simply | |
689 | * walk through the list without finding it. We cannot use the "tainted" | |
690 | * flag on "going", in case a module taints the kernel only after being | |
691 | * loaded. | |
692 | */ | |
693 | mutex_unlock(&tracepoints_mutex); | |
694 | return 0; | |
695 | } | |
227a8375 | 696 | |
32f85742 MD |
697 | int tracepoint_module_notify(struct notifier_block *self, |
698 | unsigned long val, void *data) | |
699 | { | |
700 | struct module *mod = data; | |
b75ef8b4 | 701 | int ret = 0; |
32f85742 MD |
702 | |
703 | switch (val) { | |
704 | case MODULE_STATE_COMING: | |
b75ef8b4 MD |
705 | ret = tracepoint_module_coming(mod); |
706 | break; | |
707 | case MODULE_STATE_LIVE: | |
708 | break; | |
32f85742 | 709 | case MODULE_STATE_GOING: |
b75ef8b4 | 710 | ret = tracepoint_module_going(mod); |
32f85742 MD |
711 | break; |
712 | } | |
b75ef8b4 | 713 | return ret; |
32f85742 MD |
714 | } |
715 | ||
716 | struct notifier_block tracepoint_module_nb = { | |
717 | .notifier_call = tracepoint_module_notify, | |
718 | .priority = 0, | |
719 | }; | |
720 | ||
721 | static int init_tracepoints(void) | |
722 | { | |
723 | return register_module_notifier(&tracepoint_module_nb); | |
724 | } | |
725 | __initcall(init_tracepoints); | |
227a8375 | 726 | #endif /* CONFIG_MODULES */ |
a871bd33 | 727 | |
3d27d8cb | 728 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
60d970c2 | 729 | |
97419875 | 730 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
a871bd33 JB |
731 | static int sys_tracepoint_refcount; |
732 | ||
733 | void syscall_regfunc(void) | |
734 | { | |
735 | unsigned long flags; | |
736 | struct task_struct *g, *t; | |
737 | ||
a871bd33 JB |
738 | if (!sys_tracepoint_refcount) { |
739 | read_lock_irqsave(&tasklist_lock, flags); | |
740 | do_each_thread(g, t) { | |
cc3b13c1 HB |
741 | /* Skip kernel threads. */ |
742 | if (t->mm) | |
743 | set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); | |
a871bd33 JB |
744 | } while_each_thread(g, t); |
745 | read_unlock_irqrestore(&tasklist_lock, flags); | |
746 | } | |
747 | sys_tracepoint_refcount++; | |
a871bd33 JB |
748 | } |
749 | ||
750 | void syscall_unregfunc(void) | |
751 | { | |
752 | unsigned long flags; | |
753 | struct task_struct *g, *t; | |
754 | ||
a871bd33 JB |
755 | sys_tracepoint_refcount--; |
756 | if (!sys_tracepoint_refcount) { | |
757 | read_lock_irqsave(&tasklist_lock, flags); | |
758 | do_each_thread(g, t) { | |
66700001 | 759 | clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); |
a871bd33 JB |
760 | } while_each_thread(g, t); |
761 | read_unlock_irqrestore(&tasklist_lock, flags); | |
762 | } | |
a871bd33 | 763 | } |
60d970c2 | 764 | #endif |