]>
Commit | Line | Data |
---|---|---|
62a038d3 P |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License as published by | |
4 | * the Free Software Foundation; either version 2 of the License, or | |
5 | * (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2007 Alan Stern | |
17 | * Copyright (C) IBM Corporation, 2009 | |
24f1e32c | 18 | * Copyright (C) 2009, Frederic Weisbecker <[email protected]> |
ba1c813a FW |
19 | * |
20 | * Thanks to Ingo Molnar for his many suggestions. | |
ba6909b7 P |
21 | * |
22 | * Authors: Alan Stern <[email protected]> | |
23 | * K.Prasad <[email protected]> | |
24 | * Frederic Weisbecker <[email protected]> | |
62a038d3 P |
25 | */ |
26 | ||
27 | /* | |
28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
29 | * using the CPU's debug registers. | |
30 | * This file contains the arch-independent routines. | |
31 | */ | |
32 | ||
33 | #include <linux/irqflags.h> | |
34 | #include <linux/kallsyms.h> | |
35 | #include <linux/notifier.h> | |
36 | #include <linux/kprobes.h> | |
37 | #include <linux/kdebug.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/percpu.h> | |
41 | #include <linux/sched.h> | |
42 | #include <linux/init.h> | |
feef47d0 | 43 | #include <linux/slab.h> |
88f7a890 | 44 | #include <linux/cpu.h> |
62a038d3 P |
45 | #include <linux/smp.h> |
46 | ||
24f1e32c FW |
47 | #include <linux/hw_breakpoint.h> |
48 | ||
0102752e | 49 | |
ba1c813a FW |
50 | /* |
51 | * Constraints data | |
52 | */ | |
62a038d3 | 53 | |
ba1c813a | 54 | /* Number of pinned cpu breakpoints in a cpu */ |
0102752e | 55 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); |
ba1c813a FW |
56 | |
57 | /* Number of pinned task breakpoints in a cpu */ | |
777d0411 | 58 | static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); |
ba1c813a FW |
59 | |
60 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | |
0102752e | 61 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); |
ba1c813a | 62 | |
feef47d0 FW |
63 | static int nr_slots[TYPE_MAX]; |
64 | ||
65 | static int constraints_initialized; | |
66 | ||
ba1c813a FW |
67 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
68 | struct bp_busy_slots { | |
69 | unsigned int pinned; | |
70 | unsigned int flexible; | |
71 | }; | |
72 | ||
73 | /* Serialize accesses to the above constraints */ | |
74 | static DEFINE_MUTEX(nr_bp_mutex); | |
75 | ||
f93a2054 FW |
76 | __weak int hw_breakpoint_weight(struct perf_event *bp) |
77 | { | |
78 | return 1; | |
79 | } | |
80 | ||
0102752e FW |
81 | static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) |
82 | { | |
83 | if (bp->attr.bp_type & HW_BREAKPOINT_RW) | |
84 | return TYPE_DATA; | |
85 | ||
86 | return TYPE_INST; | |
87 | } | |
88 | ||
ba1c813a FW |
89 | /* |
90 | * Report the maximum number of pinned breakpoints a task | |
91 | * have in this cpu | |
92 | */ | |
0102752e | 93 | static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
62a038d3 | 94 | { |
ba1c813a | 95 | int i; |
0102752e | 96 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
62a038d3 | 97 | |
feef47d0 | 98 | for (i = nr_slots[type] - 1; i >= 0; i--) { |
ba1c813a FW |
99 | if (tsk_pinned[i] > 0) |
100 | return i + 1; | |
62a038d3 P |
101 | } |
102 | ||
24f1e32c | 103 | return 0; |
62a038d3 P |
104 | } |
105 | ||
0102752e | 106 | static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type) |
56053170 FW |
107 | { |
108 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | |
109 | struct list_head *list; | |
110 | struct perf_event *bp; | |
111 | unsigned long flags; | |
112 | int count = 0; | |
113 | ||
114 | if (WARN_ONCE(!ctx, "No perf context for this task")) | |
115 | return 0; | |
116 | ||
117 | list = &ctx->event_list; | |
118 | ||
e625cce1 | 119 | raw_spin_lock_irqsave(&ctx->lock, flags); |
56053170 FW |
120 | |
121 | /* | |
122 | * The current breakpoint counter is not included in the list | |
123 | * at the open() callback time | |
124 | */ | |
125 | list_for_each_entry(bp, list, event_entry) { | |
126 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | |
0102752e | 127 | if (find_slot_idx(bp) == type) |
f93a2054 | 128 | count += hw_breakpoint_weight(bp); |
56053170 FW |
129 | } |
130 | ||
e625cce1 | 131 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
56053170 FW |
132 | |
133 | return count; | |
134 | } | |
135 | ||
ba1c813a FW |
136 | /* |
137 | * Report the number of pinned/un-pinned breakpoints we have in | |
138 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | |
139 | */ | |
56053170 | 140 | static void |
0102752e FW |
141 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
142 | enum bp_type_idx type) | |
ba1c813a | 143 | { |
56053170 FW |
144 | int cpu = bp->cpu; |
145 | struct task_struct *tsk = bp->ctx->task; | |
146 | ||
ba1c813a | 147 | if (cpu >= 0) { |
0102752e | 148 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); |
56053170 | 149 | if (!tsk) |
0102752e | 150 | slots->pinned += max_task_bp_pinned(cpu, type); |
56053170 | 151 | else |
0102752e FW |
152 | slots->pinned += task_bp_pinned(tsk, type); |
153 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); | |
ba1c813a FW |
154 | |
155 | return; | |
156 | } | |
157 | ||
158 | for_each_online_cpu(cpu) { | |
159 | unsigned int nr; | |
160 | ||
0102752e | 161 | nr = per_cpu(nr_cpu_bp_pinned[type], cpu); |
56053170 | 162 | if (!tsk) |
0102752e | 163 | nr += max_task_bp_pinned(cpu, type); |
56053170 | 164 | else |
0102752e | 165 | nr += task_bp_pinned(tsk, type); |
ba1c813a FW |
166 | |
167 | if (nr > slots->pinned) | |
168 | slots->pinned = nr; | |
169 | ||
0102752e | 170 | nr = per_cpu(nr_bp_flexible[type], cpu); |
ba1c813a FW |
171 | |
172 | if (nr > slots->flexible) | |
173 | slots->flexible = nr; | |
174 | } | |
175 | } | |
176 | ||
f93a2054 FW |
177 | /* |
178 | * For now, continue to consider flexible as pinned, until we can | |
179 | * ensure no flexible event can ever be scheduled before a pinned event | |
180 | * in a same cpu. | |
181 | */ | |
182 | static void | |
183 | fetch_this_slot(struct bp_busy_slots *slots, int weight) | |
184 | { | |
185 | slots->pinned += weight; | |
186 | } | |
187 | ||
ba1c813a FW |
188 | /* |
189 | * Add a pinned breakpoint for the given task in our constraint table | |
190 | */ | |
0102752e | 191 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, |
f93a2054 | 192 | enum bp_type_idx type, int weight) |
ba1c813a | 193 | { |
11e66357 | 194 | unsigned int *tsk_pinned; |
f93a2054 FW |
195 | int old_count = 0; |
196 | int old_idx = 0; | |
197 | int idx = 0; | |
ba1c813a | 198 | |
f93a2054 FW |
199 | old_count = task_bp_pinned(tsk, type); |
200 | old_idx = old_count - 1; | |
201 | idx = old_idx + weight; | |
ba1c813a | 202 | |
0102752e | 203 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
ba1c813a | 204 | if (enable) { |
f93a2054 FW |
205 | tsk_pinned[idx]++; |
206 | if (old_count > 0) | |
207 | tsk_pinned[old_idx]--; | |
ba1c813a | 208 | } else { |
f93a2054 FW |
209 | tsk_pinned[idx]--; |
210 | if (old_count > 0) | |
211 | tsk_pinned[old_idx]++; | |
ba1c813a FW |
212 | } |
213 | } | |
214 | ||
215 | /* | |
216 | * Add/remove the given breakpoint in our constraint table | |
217 | */ | |
0102752e | 218 | static void |
f93a2054 FW |
219 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
220 | int weight) | |
ba1c813a FW |
221 | { |
222 | int cpu = bp->cpu; | |
223 | struct task_struct *tsk = bp->ctx->task; | |
224 | ||
225 | /* Pinned counter task profiling */ | |
226 | if (tsk) { | |
227 | if (cpu >= 0) { | |
f93a2054 | 228 | toggle_bp_task_slot(tsk, cpu, enable, type, weight); |
ba1c813a FW |
229 | return; |
230 | } | |
231 | ||
232 | for_each_online_cpu(cpu) | |
f93a2054 | 233 | toggle_bp_task_slot(tsk, cpu, enable, type, weight); |
ba1c813a FW |
234 | return; |
235 | } | |
236 | ||
237 | /* Pinned counter cpu profiling */ | |
238 | if (enable) | |
f93a2054 | 239 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; |
ba1c813a | 240 | else |
f93a2054 | 241 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; |
ba1c813a FW |
242 | } |
243 | ||
244 | /* | |
245 | * Contraints to check before allowing this new breakpoint counter: | |
246 | * | |
247 | * == Non-pinned counter == (Considered as pinned for now) | |
248 | * | |
249 | * - If attached to a single cpu, check: | |
250 | * | |
251 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 252 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
ba1c813a FW |
253 | * |
254 | * -> If there are already non-pinned counters in this cpu, it means | |
255 | * there is already a free slot for them. | |
256 | * Otherwise, we check that the maximum number of per task | |
257 | * breakpoints (for this cpu) plus the number of per cpu breakpoint | |
258 | * (for this cpu) doesn't cover every registers. | |
259 | * | |
260 | * - If attached to every cpus, check: | |
261 | * | |
262 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 263 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
ba1c813a FW |
264 | * |
265 | * -> This is roughly the same, except we check the number of per cpu | |
266 | * bp for every cpu and we keep the max one. Same for the per tasks | |
267 | * breakpoints. | |
268 | * | |
269 | * | |
270 | * == Pinned counter == | |
271 | * | |
272 | * - If attached to a single cpu, check: | |
273 | * | |
274 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 275 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
ba1c813a FW |
276 | * |
277 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | |
278 | * one register at least (or they will never be fed). | |
279 | * | |
280 | * - If attached to every cpus, check: | |
281 | * | |
282 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 283 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
ba1c813a | 284 | */ |
5352ae63 | 285 | static int __reserve_bp_slot(struct perf_event *bp) |
ba1c813a FW |
286 | { |
287 | struct bp_busy_slots slots = {0}; | |
0102752e | 288 | enum bp_type_idx type; |
f93a2054 | 289 | int weight; |
ba1c813a | 290 | |
feef47d0 FW |
291 | /* We couldn't initialize breakpoint constraints on boot */ |
292 | if (!constraints_initialized) | |
293 | return -ENOMEM; | |
294 | ||
0102752e FW |
295 | /* Basic checks */ |
296 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || | |
297 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) | |
298 | return -EINVAL; | |
299 | ||
300 | type = find_slot_idx(bp); | |
f93a2054 FW |
301 | weight = hw_breakpoint_weight(bp); |
302 | ||
0102752e | 303 | fetch_bp_busy_slots(&slots, bp, type); |
f93a2054 | 304 | fetch_this_slot(&slots, weight); |
ba1c813a FW |
305 | |
306 | /* Flexible counters need to keep at least one slot */ | |
feef47d0 | 307 | if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
5352ae63 | 308 | return -ENOSPC; |
ba1c813a | 309 | |
f93a2054 | 310 | toggle_bp_slot(bp, true, type, weight); |
ba1c813a | 311 | |
5352ae63 JW |
312 | return 0; |
313 | } | |
314 | ||
315 | int reserve_bp_slot(struct perf_event *bp) | |
316 | { | |
317 | int ret; | |
318 | ||
319 | mutex_lock(&nr_bp_mutex); | |
320 | ||
321 | ret = __reserve_bp_slot(bp); | |
322 | ||
ba1c813a FW |
323 | mutex_unlock(&nr_bp_mutex); |
324 | ||
325 | return ret; | |
326 | } | |
327 | ||
5352ae63 JW |
328 | static void __release_bp_slot(struct perf_event *bp) |
329 | { | |
0102752e | 330 | enum bp_type_idx type; |
f93a2054 | 331 | int weight; |
0102752e FW |
332 | |
333 | type = find_slot_idx(bp); | |
f93a2054 FW |
334 | weight = hw_breakpoint_weight(bp); |
335 | toggle_bp_slot(bp, false, type, weight); | |
5352ae63 JW |
336 | } |
337 | ||
24f1e32c | 338 | void release_bp_slot(struct perf_event *bp) |
62a038d3 | 339 | { |
ba1c813a FW |
340 | mutex_lock(&nr_bp_mutex); |
341 | ||
5352ae63 | 342 | __release_bp_slot(bp); |
ba1c813a FW |
343 | |
344 | mutex_unlock(&nr_bp_mutex); | |
62a038d3 P |
345 | } |
346 | ||
5352ae63 JW |
347 | /* |
348 | * Allow the kernel debugger to reserve breakpoint slots without | |
349 | * taking a lock using the dbg_* variant of for the reserve and | |
350 | * release breakpoint slots. | |
351 | */ | |
352 | int dbg_reserve_bp_slot(struct perf_event *bp) | |
353 | { | |
354 | if (mutex_is_locked(&nr_bp_mutex)) | |
355 | return -1; | |
356 | ||
357 | return __reserve_bp_slot(bp); | |
358 | } | |
359 | ||
360 | int dbg_release_bp_slot(struct perf_event *bp) | |
361 | { | |
362 | if (mutex_is_locked(&nr_bp_mutex)) | |
363 | return -1; | |
364 | ||
365 | __release_bp_slot(bp); | |
366 | ||
367 | return 0; | |
368 | } | |
ba1c813a | 369 | |
b2812d03 FW |
370 | static int validate_hw_breakpoint(struct perf_event *bp) |
371 | { | |
372 | int ret; | |
373 | ||
374 | ret = arch_validate_hwbkpt_settings(bp); | |
375 | if (ret) | |
376 | return ret; | |
377 | ||
378 | if (arch_check_bp_in_kernelspace(bp)) { | |
379 | if (bp->attr.exclude_kernel) | |
380 | return -EINVAL; | |
381 | /* | |
382 | * Don't let unprivileged users set a breakpoint in the trap | |
383 | * path to avoid trap recursion attacks. | |
384 | */ | |
385 | if (!capable(CAP_SYS_ADMIN)) | |
386 | return -EPERM; | |
387 | } | |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
b326e956 | 392 | int register_perf_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 393 | { |
24f1e32c | 394 | int ret; |
62a038d3 | 395 | |
24f1e32c FW |
396 | ret = reserve_bp_slot(bp); |
397 | if (ret) | |
398 | return ret; | |
62a038d3 | 399 | |
b2812d03 | 400 | ret = validate_hw_breakpoint(bp); |
62a038d3 | 401 | |
b23ff0e9 MS |
402 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ |
403 | if (ret) | |
404 | release_bp_slot(bp); | |
405 | ||
24f1e32c FW |
406 | return ret; |
407 | } | |
62a038d3 | 408 | |
62a038d3 P |
409 | /** |
410 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | |
5fa10b28 | 411 | * @attr: breakpoint attributes |
24f1e32c | 412 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 413 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 414 | */ |
24f1e32c | 415 | struct perf_event * |
5fa10b28 | 416 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 417 | perf_overflow_handler_t triggered, |
5fa10b28 | 418 | struct task_struct *tsk) |
62a038d3 | 419 | { |
5fa10b28 | 420 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
62a038d3 P |
421 | } |
422 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |
423 | ||
424 | /** | |
425 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | |
24f1e32c | 426 | * @bp: the breakpoint structure to modify |
5fa10b28 | 427 | * @attr: new breakpoint attributes |
24f1e32c | 428 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 429 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 430 | */ |
44234adc | 431 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
62a038d3 | 432 | { |
44234adc | 433 | u64 old_addr = bp->attr.bp_addr; |
cd757645 | 434 | u64 old_len = bp->attr.bp_len; |
44234adc | 435 | int old_type = bp->attr.bp_type; |
44234adc FW |
436 | int err = 0; |
437 | ||
438 | perf_event_disable(bp); | |
439 | ||
440 | bp->attr.bp_addr = attr->bp_addr; | |
441 | bp->attr.bp_type = attr->bp_type; | |
442 | bp->attr.bp_len = attr->bp_len; | |
443 | ||
444 | if (attr->disabled) | |
445 | goto end; | |
62a038d3 | 446 | |
b2812d03 | 447 | err = validate_hw_breakpoint(bp); |
44234adc FW |
448 | if (!err) |
449 | perf_event_enable(bp); | |
450 | ||
451 | if (err) { | |
452 | bp->attr.bp_addr = old_addr; | |
453 | bp->attr.bp_type = old_type; | |
454 | bp->attr.bp_len = old_len; | |
455 | if (!bp->attr.disabled) | |
456 | perf_event_enable(bp); | |
457 | ||
458 | return err; | |
459 | } | |
460 | ||
461 | end: | |
462 | bp->attr.disabled = attr->disabled; | |
463 | ||
464 | return 0; | |
62a038d3 P |
465 | } |
466 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | |
467 | ||
468 | /** | |
24f1e32c | 469 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
62a038d3 | 470 | * @bp: the breakpoint structure to unregister |
62a038d3 | 471 | */ |
24f1e32c | 472 | void unregister_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 473 | { |
24f1e32c FW |
474 | if (!bp) |
475 | return; | |
476 | perf_event_release_kernel(bp); | |
477 | } | |
478 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |
479 | ||
62a038d3 | 480 | /** |
24f1e32c | 481 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
dd1853c3 | 482 | * @attr: breakpoint attributes |
24f1e32c | 483 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 484 | * |
24f1e32c | 485 | * @return a set of per_cpu pointers to perf events |
62a038d3 | 486 | */ |
44ee6358 | 487 | struct perf_event * __percpu * |
dd1853c3 | 488 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 489 | perf_overflow_handler_t triggered) |
62a038d3 | 490 | { |
44ee6358 | 491 | struct perf_event * __percpu *cpu_events, **pevent, *bp; |
24f1e32c FW |
492 | long err; |
493 | int cpu; | |
494 | ||
495 | cpu_events = alloc_percpu(typeof(*cpu_events)); | |
496 | if (!cpu_events) | |
44ee6358 | 497 | return (void __percpu __force *)ERR_PTR(-ENOMEM); |
62a038d3 | 498 | |
88f7a890 LZ |
499 | get_online_cpus(); |
500 | for_each_online_cpu(cpu) { | |
24f1e32c | 501 | pevent = per_cpu_ptr(cpu_events, cpu); |
dd1853c3 | 502 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); |
62a038d3 | 503 | |
24f1e32c | 504 | *pevent = bp; |
62a038d3 | 505 | |
605bfaee | 506 | if (IS_ERR(bp)) { |
24f1e32c FW |
507 | err = PTR_ERR(bp); |
508 | goto fail; | |
509 | } | |
62a038d3 | 510 | } |
88f7a890 | 511 | put_online_cpus(); |
62a038d3 | 512 | |
24f1e32c FW |
513 | return cpu_events; |
514 | ||
515 | fail: | |
88f7a890 | 516 | for_each_online_cpu(cpu) { |
24f1e32c | 517 | pevent = per_cpu_ptr(cpu_events, cpu); |
605bfaee | 518 | if (IS_ERR(*pevent)) |
24f1e32c FW |
519 | break; |
520 | unregister_hw_breakpoint(*pevent); | |
521 | } | |
88f7a890 LZ |
522 | put_online_cpus(); |
523 | ||
24f1e32c | 524 | free_percpu(cpu_events); |
44ee6358 | 525 | return (void __percpu __force *)ERR_PTR(err); |
62a038d3 | 526 | } |
f60d24d2 | 527 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
62a038d3 P |
528 | |
529 | /** | |
24f1e32c FW |
530 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
531 | * @cpu_events: the per cpu set of events to unregister | |
62a038d3 | 532 | */ |
44ee6358 | 533 | void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
62a038d3 | 534 | { |
24f1e32c FW |
535 | int cpu; |
536 | struct perf_event **pevent; | |
62a038d3 | 537 | |
24f1e32c FW |
538 | for_each_possible_cpu(cpu) { |
539 | pevent = per_cpu_ptr(cpu_events, cpu); | |
540 | unregister_hw_breakpoint(*pevent); | |
62a038d3 | 541 | } |
24f1e32c | 542 | free_percpu(cpu_events); |
62a038d3 | 543 | } |
f60d24d2 | 544 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
62a038d3 P |
545 | |
546 | static struct notifier_block hw_breakpoint_exceptions_nb = { | |
547 | .notifier_call = hw_breakpoint_exceptions_notify, | |
548 | /* we need to be notified first */ | |
549 | .priority = 0x7fffffff | |
550 | }; | |
551 | ||
552 | static int __init init_hw_breakpoint(void) | |
553 | { | |
feef47d0 FW |
554 | unsigned int **task_bp_pinned; |
555 | int cpu, err_cpu; | |
556 | int i; | |
557 | ||
558 | for (i = 0; i < TYPE_MAX; i++) | |
559 | nr_slots[i] = hw_breakpoint_slots(i); | |
560 | ||
561 | for_each_possible_cpu(cpu) { | |
562 | for (i = 0; i < TYPE_MAX; i++) { | |
563 | task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); | |
564 | *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], | |
565 | GFP_KERNEL); | |
566 | if (!*task_bp_pinned) | |
567 | goto err_alloc; | |
568 | } | |
569 | } | |
570 | ||
571 | constraints_initialized = 1; | |
572 | ||
62a038d3 | 573 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
feef47d0 FW |
574 | |
575 | err_alloc: | |
576 | for_each_possible_cpu(err_cpu) { | |
577 | if (err_cpu == cpu) | |
578 | break; | |
579 | for (i = 0; i < TYPE_MAX; i++) | |
580 | kfree(per_cpu(nr_task_bp_pinned[i], cpu)); | |
581 | } | |
582 | ||
583 | return -ENOMEM; | |
62a038d3 | 584 | } |
62a038d3 | 585 | core_initcall(init_hw_breakpoint); |
24f1e32c FW |
586 | |
587 | ||
588 | struct pmu perf_ops_bp = { | |
589 | .enable = arch_install_hw_breakpoint, | |
590 | .disable = arch_uninstall_hw_breakpoint, | |
591 | .read = hw_breakpoint_pmu_read, | |
24f1e32c | 592 | }; |