]>
Commit | Line | Data |
---|---|---|
76369139 FW |
1 | /* |
2 | * Performance events ring-buffer code: | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <[email protected]> | |
5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <[email protected]> | |
d36b6910 | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <[email protected]> |
76369139 FW |
8 | * |
9 | * For licensing details see kernel-base/COPYING | |
10 | */ | |
11 | ||
12 | #include <linux/perf_event.h> | |
13 | #include <linux/vmalloc.h> | |
14 | #include <linux/slab.h> | |
15 | ||
16 | #include "internal.h" | |
17 | ||
18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, | |
19 | unsigned long offset, unsigned long head) | |
20 | { | |
21 | unsigned long mask; | |
22 | ||
23 | if (!rb->writable) | |
24 | return true; | |
25 | ||
26 | mask = perf_data_size(rb) - 1; | |
27 | ||
28 | offset = (offset - tail) & mask; | |
29 | head = (head - tail) & mask; | |
30 | ||
31 | if ((int)(head - offset) < 0) | |
32 | return false; | |
33 | ||
34 | return true; | |
35 | } | |
36 | ||
37 | static void perf_output_wakeup(struct perf_output_handle *handle) | |
38 | { | |
39 | atomic_set(&handle->rb->poll, POLL_IN); | |
40 | ||
a8b0ca17 PZ |
41 | handle->event->pending_wakeup = 1; |
42 | irq_work_queue(&handle->event->pending); | |
76369139 FW |
43 | } |
44 | ||
45 | /* | |
46 | * We need to ensure a later event_id doesn't publish a head when a former | |
47 | * event isn't done writing. However since we need to deal with NMIs we | |
48 | * cannot fully serialize things. | |
49 | * | |
50 | * We only publish the head (and generate a wakeup) when the outer-most | |
51 | * event completes. | |
52 | */ | |
53 | static void perf_output_get_handle(struct perf_output_handle *handle) | |
54 | { | |
55 | struct ring_buffer *rb = handle->rb; | |
56 | ||
57 | preempt_disable(); | |
58 | local_inc(&rb->nest); | |
59 | handle->wakeup = local_read(&rb->wakeup); | |
60 | } | |
61 | ||
62 | static void perf_output_put_handle(struct perf_output_handle *handle) | |
63 | { | |
64 | struct ring_buffer *rb = handle->rb; | |
65 | unsigned long head; | |
66 | ||
67 | again: | |
68 | head = local_read(&rb->head); | |
69 | ||
70 | /* | |
71 | * IRQ/NMI can happen here, which means we can miss a head update. | |
72 | */ | |
73 | ||
74 | if (!local_dec_and_test(&rb->nest)) | |
75 | goto out; | |
76 | ||
77 | /* | |
78 | * Publish the known good head. Rely on the full barrier implied | |
79 | * by atomic_dec_and_test() order the rb->head read and this | |
80 | * write. | |
81 | */ | |
82 | rb->user_page->data_head = head; | |
83 | ||
84 | /* | |
85 | * Now check if we missed an update, rely on the (compiler) | |
86 | * barrier in atomic_dec_and_test() to re-read rb->head. | |
87 | */ | |
88 | if (unlikely(head != local_read(&rb->head))) { | |
89 | local_inc(&rb->nest); | |
90 | goto again; | |
91 | } | |
92 | ||
93 | if (handle->wakeup != local_read(&rb->wakeup)) | |
94 | perf_output_wakeup(handle); | |
95 | ||
96 | out: | |
97 | preempt_enable(); | |
98 | } | |
99 | ||
100 | int perf_output_begin(struct perf_output_handle *handle, | |
a7ac67ea | 101 | struct perf_event *event, unsigned int size) |
76369139 FW |
102 | { |
103 | struct ring_buffer *rb; | |
104 | unsigned long tail, offset, head; | |
105 | int have_lost; | |
106 | struct perf_sample_data sample_data; | |
107 | struct { | |
108 | struct perf_event_header header; | |
109 | u64 id; | |
110 | u64 lost; | |
111 | } lost_event; | |
112 | ||
113 | rcu_read_lock(); | |
114 | /* | |
115 | * For inherited events we send all the output towards the parent. | |
116 | */ | |
117 | if (event->parent) | |
118 | event = event->parent; | |
119 | ||
120 | rb = rcu_dereference(event->rb); | |
121 | if (!rb) | |
122 | goto out; | |
123 | ||
124 | handle->rb = rb; | |
125 | handle->event = event; | |
76369139 FW |
126 | |
127 | if (!rb->nr_pages) | |
128 | goto out; | |
129 | ||
130 | have_lost = local_read(&rb->lost); | |
131 | if (have_lost) { | |
132 | lost_event.header.size = sizeof(lost_event); | |
133 | perf_event_header__init_id(&lost_event.header, &sample_data, | |
134 | event); | |
135 | size += lost_event.header.size; | |
136 | } | |
137 | ||
138 | perf_output_get_handle(handle); | |
139 | ||
140 | do { | |
141 | /* | |
142 | * Userspace could choose to issue a mb() before updating the | |
143 | * tail pointer. So that all reads will be completed before the | |
144 | * write is issued. | |
145 | */ | |
146 | tail = ACCESS_ONCE(rb->user_page->data_tail); | |
147 | smp_rmb(); | |
148 | offset = head = local_read(&rb->head); | |
149 | head += size; | |
150 | if (unlikely(!perf_output_space(rb, tail, offset, head))) | |
151 | goto fail; | |
152 | } while (local_cmpxchg(&rb->head, offset, head) != offset); | |
153 | ||
154 | if (head - local_read(&rb->wakeup) > rb->watermark) | |
155 | local_add(rb->watermark, &rb->wakeup); | |
156 | ||
157 | handle->page = offset >> (PAGE_SHIFT + page_order(rb)); | |
158 | handle->page &= rb->nr_pages - 1; | |
159 | handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1); | |
160 | handle->addr = rb->data_pages[handle->page]; | |
161 | handle->addr += handle->size; | |
162 | handle->size = (PAGE_SIZE << page_order(rb)) - handle->size; | |
163 | ||
164 | if (have_lost) { | |
165 | lost_event.header.type = PERF_RECORD_LOST; | |
166 | lost_event.header.misc = 0; | |
167 | lost_event.id = event->id; | |
168 | lost_event.lost = local_xchg(&rb->lost, 0); | |
169 | ||
170 | perf_output_put(handle, lost_event); | |
171 | perf_event__output_id_sample(event, handle, &sample_data); | |
172 | } | |
173 | ||
174 | return 0; | |
175 | ||
176 | fail: | |
177 | local_inc(&rb->lost); | |
178 | perf_output_put_handle(handle); | |
179 | out: | |
180 | rcu_read_unlock(); | |
181 | ||
182 | return -ENOSPC; | |
183 | } | |
184 | ||
185 | void perf_output_copy(struct perf_output_handle *handle, | |
186 | const void *buf, unsigned int len) | |
187 | { | |
188 | __output_copy(handle, buf, len); | |
189 | } | |
190 | ||
191 | void perf_output_end(struct perf_output_handle *handle) | |
192 | { | |
76369139 FW |
193 | perf_output_put_handle(handle); |
194 | rcu_read_unlock(); | |
195 | } | |
196 | ||
197 | static void | |
198 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | |
199 | { | |
200 | long max_size = perf_data_size(rb); | |
201 | ||
202 | if (watermark) | |
203 | rb->watermark = min(max_size, watermark); | |
204 | ||
205 | if (!rb->watermark) | |
206 | rb->watermark = max_size / 2; | |
207 | ||
208 | if (flags & RING_BUFFER_WRITABLE) | |
209 | rb->writable = 1; | |
210 | ||
211 | atomic_set(&rb->refcount, 1); | |
10c6db11 PZ |
212 | |
213 | INIT_LIST_HEAD(&rb->event_list); | |
214 | spin_lock_init(&rb->event_lock); | |
76369139 FW |
215 | } |
216 | ||
217 | #ifndef CONFIG_PERF_USE_VMALLOC | |
218 | ||
219 | /* | |
220 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. | |
221 | */ | |
222 | ||
223 | struct page * | |
224 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |
225 | { | |
226 | if (pgoff > rb->nr_pages) | |
227 | return NULL; | |
228 | ||
229 | if (pgoff == 0) | |
230 | return virt_to_page(rb->user_page); | |
231 | ||
232 | return virt_to_page(rb->data_pages[pgoff - 1]); | |
233 | } | |
234 | ||
235 | static void *perf_mmap_alloc_page(int cpu) | |
236 | { | |
237 | struct page *page; | |
238 | int node; | |
239 | ||
240 | node = (cpu == -1) ? cpu : cpu_to_node(cpu); | |
241 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | |
242 | if (!page) | |
243 | return NULL; | |
244 | ||
245 | return page_address(page); | |
246 | } | |
247 | ||
248 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |
249 | { | |
250 | struct ring_buffer *rb; | |
251 | unsigned long size; | |
252 | int i; | |
253 | ||
254 | size = sizeof(struct ring_buffer); | |
255 | size += nr_pages * sizeof(void *); | |
256 | ||
257 | rb = kzalloc(size, GFP_KERNEL); | |
258 | if (!rb) | |
259 | goto fail; | |
260 | ||
261 | rb->user_page = perf_mmap_alloc_page(cpu); | |
262 | if (!rb->user_page) | |
263 | goto fail_user_page; | |
264 | ||
265 | for (i = 0; i < nr_pages; i++) { | |
266 | rb->data_pages[i] = perf_mmap_alloc_page(cpu); | |
267 | if (!rb->data_pages[i]) | |
268 | goto fail_data_pages; | |
269 | } | |
270 | ||
271 | rb->nr_pages = nr_pages; | |
272 | ||
273 | ring_buffer_init(rb, watermark, flags); | |
274 | ||
275 | return rb; | |
276 | ||
277 | fail_data_pages: | |
278 | for (i--; i >= 0; i--) | |
279 | free_page((unsigned long)rb->data_pages[i]); | |
280 | ||
281 | free_page((unsigned long)rb->user_page); | |
282 | ||
283 | fail_user_page: | |
284 | kfree(rb); | |
285 | ||
286 | fail: | |
287 | return NULL; | |
288 | } | |
289 | ||
290 | static void perf_mmap_free_page(unsigned long addr) | |
291 | { | |
292 | struct page *page = virt_to_page((void *)addr); | |
293 | ||
294 | page->mapping = NULL; | |
295 | __free_page(page); | |
296 | } | |
297 | ||
298 | void rb_free(struct ring_buffer *rb) | |
299 | { | |
300 | int i; | |
301 | ||
302 | perf_mmap_free_page((unsigned long)rb->user_page); | |
303 | for (i = 0; i < rb->nr_pages; i++) | |
304 | perf_mmap_free_page((unsigned long)rb->data_pages[i]); | |
305 | kfree(rb); | |
306 | } | |
307 | ||
308 | #else | |
309 | ||
310 | struct page * | |
311 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |
312 | { | |
313 | if (pgoff > (1UL << page_order(rb))) | |
314 | return NULL; | |
315 | ||
316 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); | |
317 | } | |
318 | ||
319 | static void perf_mmap_unmark_page(void *addr) | |
320 | { | |
321 | struct page *page = vmalloc_to_page(addr); | |
322 | ||
323 | page->mapping = NULL; | |
324 | } | |
325 | ||
326 | static void rb_free_work(struct work_struct *work) | |
327 | { | |
328 | struct ring_buffer *rb; | |
329 | void *base; | |
330 | int i, nr; | |
331 | ||
332 | rb = container_of(work, struct ring_buffer, work); | |
333 | nr = 1 << page_order(rb); | |
334 | ||
335 | base = rb->user_page; | |
336 | for (i = 0; i < nr + 1; i++) | |
337 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | |
338 | ||
339 | vfree(base); | |
340 | kfree(rb); | |
341 | } | |
342 | ||
343 | void rb_free(struct ring_buffer *rb) | |
344 | { | |
345 | schedule_work(&rb->work); | |
346 | } | |
347 | ||
348 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |
349 | { | |
350 | struct ring_buffer *rb; | |
351 | unsigned long size; | |
352 | void *all_buf; | |
353 | ||
354 | size = sizeof(struct ring_buffer); | |
355 | size += sizeof(void *); | |
356 | ||
357 | rb = kzalloc(size, GFP_KERNEL); | |
358 | if (!rb) | |
359 | goto fail; | |
360 | ||
361 | INIT_WORK(&rb->work, rb_free_work); | |
362 | ||
363 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | |
364 | if (!all_buf) | |
365 | goto fail_all_buf; | |
366 | ||
367 | rb->user_page = all_buf; | |
368 | rb->data_pages[0] = all_buf + PAGE_SIZE; | |
369 | rb->page_order = ilog2(nr_pages); | |
370 | rb->nr_pages = 1; | |
371 | ||
372 | ring_buffer_init(rb, watermark, flags); | |
373 | ||
374 | return rb; | |
375 | ||
376 | fail_all_buf: | |
377 | kfree(rb); | |
378 | ||
379 | fail: | |
380 | return NULL; | |
381 | } | |
382 | ||
383 | #endif |