]> Git Repo - linux.git/blob - kernel/trace/ring_buffer.c
Linux 6.14-rc3
[linux.git] / kernel / trace / ring_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic ring buffer
4  *
5  * Copyright (C) 2008 Steven Rostedt <[email protected]>
6  */
7 #include <linux/trace_recursion.h>
8 #include <linux/trace_events.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/trace_clock.h>
11 #include <linux/sched/clock.h>
12 #include <linux/cacheflush.h>
13 #include <linux/trace_seq.h>
14 #include <linux/spinlock.h>
15 #include <linux/irq_work.h>
16 #include <linux/security.h>
17 #include <linux/uaccess.h>
18 #include <linux/hardirq.h>
19 #include <linux/kthread.h>      /* for self test */
20 #include <linux/module.h>
21 #include <linux/percpu.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/list.h>
28 #include <linux/cpu.h>
29 #include <linux/oom.h>
30 #include <linux/mm.h>
31
32 #include <asm/local64.h>
33 #include <asm/local.h>
34
35 #include "trace.h"
36
37 /*
38  * The "absolute" timestamp in the buffer is only 59 bits.
39  * If a clock has the 5 MSBs set, it needs to be saved and
40  * reinserted.
41  */
42 #define TS_MSB          (0xf8ULL << 56)
43 #define ABS_TS_MASK     (~TS_MSB)
44
45 static void update_pages_handler(struct work_struct *work);
46
47 #define RING_BUFFER_META_MAGIC  0xBADFEED
48
49 struct ring_buffer_meta {
50         int             magic;
51         int             struct_size;
52         unsigned long   text_addr;
53         unsigned long   data_addr;
54         unsigned long   first_buffer;
55         unsigned long   head_buffer;
56         unsigned long   commit_buffer;
57         __u32           subbuf_size;
58         __u32           nr_subbufs;
59         int             buffers[];
60 };
61
62 /*
63  * The ring buffer header is special. We must manually up keep it.
64  */
65 int ring_buffer_print_entry_header(struct trace_seq *s)
66 {
67         trace_seq_puts(s, "# compressed entry header\n");
68         trace_seq_puts(s, "\ttype_len    :    5 bits\n");
69         trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
70         trace_seq_puts(s, "\tarray       :   32 bits\n");
71         trace_seq_putc(s, '\n');
72         trace_seq_printf(s, "\tpadding     : type == %d\n",
73                          RINGBUF_TYPE_PADDING);
74         trace_seq_printf(s, "\ttime_extend : type == %d\n",
75                          RINGBUF_TYPE_TIME_EXTEND);
76         trace_seq_printf(s, "\ttime_stamp : type == %d\n",
77                          RINGBUF_TYPE_TIME_STAMP);
78         trace_seq_printf(s, "\tdata max type_len  == %d\n",
79                          RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
80
81         return !trace_seq_has_overflowed(s);
82 }
83
84 /*
85  * The ring buffer is made up of a list of pages. A separate list of pages is
86  * allocated for each CPU. A writer may only write to a buffer that is
87  * associated with the CPU it is currently executing on.  A reader may read
88  * from any per cpu buffer.
89  *
90  * The reader is special. For each per cpu buffer, the reader has its own
91  * reader page. When a reader has read the entire reader page, this reader
92  * page is swapped with another page in the ring buffer.
93  *
94  * Now, as long as the writer is off the reader page, the reader can do what
95  * ever it wants with that page. The writer will never write to that page
96  * again (as long as it is out of the ring buffer).
97  *
98  * Here's some silly ASCII art.
99  *
100  *   +------+
101  *   |reader|          RING BUFFER
102  *   |page  |
103  *   +------+        +---+   +---+   +---+
104  *                   |   |-->|   |-->|   |
105  *                   +---+   +---+   +---+
106  *                     ^               |
107  *                     |               |
108  *                     +---------------+
109  *
110  *
111  *   +------+
112  *   |reader|          RING BUFFER
113  *   |page  |------------------v
114  *   +------+        +---+   +---+   +---+
115  *                   |   |-->|   |-->|   |
116  *                   +---+   +---+   +---+
117  *                     ^               |
118  *                     |               |
119  *                     +---------------+
120  *
121  *
122  *   +------+
123  *   |reader|          RING BUFFER
124  *   |page  |------------------v
125  *   +------+        +---+   +---+   +---+
126  *      ^            |   |-->|   |-->|   |
127  *      |            +---+   +---+   +---+
128  *      |                              |
129  *      |                              |
130  *      +------------------------------+
131  *
132  *
133  *   +------+
134  *   |buffer|          RING BUFFER
135  *   |page  |------------------v
136  *   +------+        +---+   +---+   +---+
137  *      ^            |   |   |   |-->|   |
138  *      |   New      +---+   +---+   +---+
139  *      |  Reader------^               |
140  *      |   page                       |
141  *      +------------------------------+
142  *
143  *
144  * After we make this swap, the reader can hand this page off to the splice
145  * code and be done with it. It can even allocate a new page if it needs to
146  * and swap that into the ring buffer.
147  *
148  * We will be using cmpxchg soon to make all this lockless.
149  *
150  */
151
152 /* Used for individual buffers (after the counter) */
153 #define RB_BUFFER_OFF           (1 << 20)
154
155 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156
157 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
158 #define RB_ALIGNMENT            4U
159 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
160 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
161
162 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
163 # define RB_FORCE_8BYTE_ALIGNMENT       0
164 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
165 #else
166 # define RB_FORCE_8BYTE_ALIGNMENT       1
167 # define RB_ARCH_ALIGNMENT              8U
168 #endif
169
170 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
171
172 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
173 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
174
175 enum {
176         RB_LEN_TIME_EXTEND = 8,
177         RB_LEN_TIME_STAMP =  8,
178 };
179
180 #define skip_time_extend(event) \
181         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
182
183 #define extended_time(event) \
184         (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
185
186 static inline bool rb_null_event(struct ring_buffer_event *event)
187 {
188         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
189 }
190
191 static void rb_event_set_padding(struct ring_buffer_event *event)
192 {
193         /* padding has a NULL time_delta */
194         event->type_len = RINGBUF_TYPE_PADDING;
195         event->time_delta = 0;
196 }
197
198 static unsigned
199 rb_event_data_length(struct ring_buffer_event *event)
200 {
201         unsigned length;
202
203         if (event->type_len)
204                 length = event->type_len * RB_ALIGNMENT;
205         else
206                 length = event->array[0];
207         return length + RB_EVNT_HDR_SIZE;
208 }
209
210 /*
211  * Return the length of the given event. Will return
212  * the length of the time extend if the event is a
213  * time extend.
214  */
215 static inline unsigned
216 rb_event_length(struct ring_buffer_event *event)
217 {
218         switch (event->type_len) {
219         case RINGBUF_TYPE_PADDING:
220                 if (rb_null_event(event))
221                         /* undefined */
222                         return -1;
223                 return  event->array[0] + RB_EVNT_HDR_SIZE;
224
225         case RINGBUF_TYPE_TIME_EXTEND:
226                 return RB_LEN_TIME_EXTEND;
227
228         case RINGBUF_TYPE_TIME_STAMP:
229                 return RB_LEN_TIME_STAMP;
230
231         case RINGBUF_TYPE_DATA:
232                 return rb_event_data_length(event);
233         default:
234                 WARN_ON_ONCE(1);
235         }
236         /* not hit */
237         return 0;
238 }
239
240 /*
241  * Return total length of time extend and data,
242  *   or just the event length for all other events.
243  */
244 static inline unsigned
245 rb_event_ts_length(struct ring_buffer_event *event)
246 {
247         unsigned len = 0;
248
249         if (extended_time(event)) {
250                 /* time extends include the data event after it */
251                 len = RB_LEN_TIME_EXTEND;
252                 event = skip_time_extend(event);
253         }
254         return len + rb_event_length(event);
255 }
256
257 /**
258  * ring_buffer_event_length - return the length of the event
259  * @event: the event to get the length of
260  *
261  * Returns the size of the data load of a data event.
262  * If the event is something other than a data event, it
263  * returns the size of the event itself. With the exception
264  * of a TIME EXTEND, where it still returns the size of the
265  * data load of the data event after it.
266  */
267 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
268 {
269         unsigned length;
270
271         if (extended_time(event))
272                 event = skip_time_extend(event);
273
274         length = rb_event_length(event);
275         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
276                 return length;
277         length -= RB_EVNT_HDR_SIZE;
278         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
279                 length -= sizeof(event->array[0]);
280         return length;
281 }
282 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
283
284 /* inline for ring buffer fast paths */
285 static __always_inline void *
286 rb_event_data(struct ring_buffer_event *event)
287 {
288         if (extended_time(event))
289                 event = skip_time_extend(event);
290         WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
291         /* If length is in len field, then array[0] has the data */
292         if (event->type_len)
293                 return (void *)&event->array[0];
294         /* Otherwise length is in array[0] and array[1] has the data */
295         return (void *)&event->array[1];
296 }
297
298 /**
299  * ring_buffer_event_data - return the data of the event
300  * @event: the event to get the data from
301  */
302 void *ring_buffer_event_data(struct ring_buffer_event *event)
303 {
304         return rb_event_data(event);
305 }
306 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
307
308 #define for_each_buffer_cpu(buffer, cpu)                \
309         for_each_cpu(cpu, buffer->cpumask)
310
311 #define for_each_online_buffer_cpu(buffer, cpu)         \
312         for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
313
314 #define TS_SHIFT        27
315 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
316 #define TS_DELTA_TEST   (~TS_MASK)
317
318 static u64 rb_event_time_stamp(struct ring_buffer_event *event)
319 {
320         u64 ts;
321
322         ts = event->array[0];
323         ts <<= TS_SHIFT;
324         ts += event->time_delta;
325
326         return ts;
327 }
328
329 /* Flag when events were overwritten */
330 #define RB_MISSED_EVENTS        (1 << 31)
331 /* Missed count stored at end */
332 #define RB_MISSED_STORED        (1 << 30)
333
334 #define RB_MISSED_MASK          (3 << 30)
335
336 struct buffer_data_page {
337         u64              time_stamp;    /* page time stamp */
338         local_t          commit;        /* write committed index */
339         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
340 };
341
342 struct buffer_data_read_page {
343         unsigned                order;  /* order of the page */
344         struct buffer_data_page *data;  /* actual data, stored in this page */
345 };
346
347 /*
348  * Note, the buffer_page list must be first. The buffer pages
349  * are allocated in cache lines, which means that each buffer
350  * page will be at the beginning of a cache line, and thus
351  * the least significant bits will be zero. We use this to
352  * add flags in the list struct pointers, to make the ring buffer
353  * lockless.
354  */
355 struct buffer_page {
356         struct list_head list;          /* list of buffer pages */
357         local_t          write;         /* index for next write */
358         unsigned         read;          /* index for next read */
359         local_t          entries;       /* entries on this page */
360         unsigned long    real_end;      /* real end of data */
361         unsigned         order;         /* order of the page */
362         u32              id:30;         /* ID for external mapping */
363         u32              range:1;       /* Mapped via a range */
364         struct buffer_data_page *page;  /* Actual data page */
365 };
366
367 /*
368  * The buffer page counters, write and entries, must be reset
369  * atomically when crossing page boundaries. To synchronize this
370  * update, two counters are inserted into the number. One is
371  * the actual counter for the write position or count on the page.
372  *
373  * The other is a counter of updaters. Before an update happens
374  * the update partition of the counter is incremented. This will
375  * allow the updater to update the counter atomically.
376  *
377  * The counter is 20 bits, and the state data is 12.
378  */
379 #define RB_WRITE_MASK           0xfffff
380 #define RB_WRITE_INTCNT         (1 << 20)
381
382 static void rb_init_page(struct buffer_data_page *bpage)
383 {
384         local_set(&bpage->commit, 0);
385 }
386
387 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
388 {
389         return local_read(&bpage->page->commit);
390 }
391
392 static void free_buffer_page(struct buffer_page *bpage)
393 {
394         /* Range pages are not to be freed */
395         if (!bpage->range)
396                 free_pages((unsigned long)bpage->page, bpage->order);
397         kfree(bpage);
398 }
399
400 /*
401  * We need to fit the time_stamp delta into 27 bits.
402  */
403 static inline bool test_time_stamp(u64 delta)
404 {
405         return !!(delta & TS_DELTA_TEST);
406 }
407
408 struct rb_irq_work {
409         struct irq_work                 work;
410         wait_queue_head_t               waiters;
411         wait_queue_head_t               full_waiters;
412         atomic_t                        seq;
413         bool                            waiters_pending;
414         bool                            full_waiters_pending;
415         bool                            wakeup_full;
416 };
417
418 /*
419  * Structure to hold event state and handle nested events.
420  */
421 struct rb_event_info {
422         u64                     ts;
423         u64                     delta;
424         u64                     before;
425         u64                     after;
426         unsigned long           length;
427         struct buffer_page      *tail_page;
428         int                     add_timestamp;
429 };
430
431 /*
432  * Used for the add_timestamp
433  *  NONE
434  *  EXTEND - wants a time extend
435  *  ABSOLUTE - the buffer requests all events to have absolute time stamps
436  *  FORCE - force a full time stamp.
437  */
438 enum {
439         RB_ADD_STAMP_NONE               = 0,
440         RB_ADD_STAMP_EXTEND             = BIT(1),
441         RB_ADD_STAMP_ABSOLUTE           = BIT(2),
442         RB_ADD_STAMP_FORCE              = BIT(3)
443 };
444 /*
445  * Used for which event context the event is in.
446  *  TRANSITION = 0
447  *  NMI     = 1
448  *  IRQ     = 2
449  *  SOFTIRQ = 3
450  *  NORMAL  = 4
451  *
452  * See trace_recursive_lock() comment below for more details.
453  */
454 enum {
455         RB_CTX_TRANSITION,
456         RB_CTX_NMI,
457         RB_CTX_IRQ,
458         RB_CTX_SOFTIRQ,
459         RB_CTX_NORMAL,
460         RB_CTX_MAX
461 };
462
463 struct rb_time_struct {
464         local64_t       time;
465 };
466 typedef struct rb_time_struct rb_time_t;
467
468 #define MAX_NEST        5
469
470 /*
471  * head_page == tail_page && head == tail then buffer is empty.
472  */
473 struct ring_buffer_per_cpu {
474         int                             cpu;
475         atomic_t                        record_disabled;
476         atomic_t                        resize_disabled;
477         struct trace_buffer     *buffer;
478         raw_spinlock_t                  reader_lock;    /* serialize readers */
479         arch_spinlock_t                 lock;
480         struct lock_class_key           lock_key;
481         struct buffer_data_page         *free_page;
482         unsigned long                   nr_pages;
483         unsigned int                    current_context;
484         struct list_head                *pages;
485         /* pages generation counter, incremented when the list changes */
486         unsigned long                   cnt;
487         struct buffer_page              *head_page;     /* read from head */
488         struct buffer_page              *tail_page;     /* write to tail */
489         struct buffer_page              *commit_page;   /* committed pages */
490         struct buffer_page              *reader_page;
491         unsigned long                   lost_events;
492         unsigned long                   last_overrun;
493         unsigned long                   nest;
494         local_t                         entries_bytes;
495         local_t                         entries;
496         local_t                         overrun;
497         local_t                         commit_overrun;
498         local_t                         dropped_events;
499         local_t                         committing;
500         local_t                         commits;
501         local_t                         pages_touched;
502         local_t                         pages_lost;
503         local_t                         pages_read;
504         long                            last_pages_touch;
505         size_t                          shortest_full;
506         unsigned long                   read;
507         unsigned long                   read_bytes;
508         rb_time_t                       write_stamp;
509         rb_time_t                       before_stamp;
510         u64                             event_stamp[MAX_NEST];
511         u64                             read_stamp;
512         /* pages removed since last reset */
513         unsigned long                   pages_removed;
514
515         unsigned int                    mapped;
516         unsigned int                    user_mapped;    /* user space mapping */
517         struct mutex                    mapping_lock;
518         unsigned long                   *subbuf_ids;    /* ID to subbuf VA */
519         struct trace_buffer_meta        *meta_page;
520         struct ring_buffer_meta         *ring_meta;
521
522         /* ring buffer pages to update, > 0 to add, < 0 to remove */
523         long                            nr_pages_to_update;
524         struct list_head                new_pages; /* new pages to add */
525         struct work_struct              update_pages_work;
526         struct completion               update_done;
527
528         struct rb_irq_work              irq_work;
529 };
530
531 struct trace_buffer {
532         unsigned                        flags;
533         int                             cpus;
534         atomic_t                        record_disabled;
535         atomic_t                        resizing;
536         cpumask_var_t                   cpumask;
537
538         struct lock_class_key           *reader_lock_key;
539
540         struct mutex                    mutex;
541
542         struct ring_buffer_per_cpu      **buffers;
543
544         struct hlist_node               node;
545         u64                             (*clock)(void);
546
547         struct rb_irq_work              irq_work;
548         bool                            time_stamp_abs;
549
550         unsigned long                   range_addr_start;
551         unsigned long                   range_addr_end;
552
553         long                            last_text_delta;
554         long                            last_data_delta;
555
556         unsigned int                    subbuf_size;
557         unsigned int                    subbuf_order;
558         unsigned int                    max_data_size;
559 };
560
561 struct ring_buffer_iter {
562         struct ring_buffer_per_cpu      *cpu_buffer;
563         unsigned long                   head;
564         unsigned long                   next_event;
565         struct buffer_page              *head_page;
566         struct buffer_page              *cache_reader_page;
567         unsigned long                   cache_read;
568         unsigned long                   cache_pages_removed;
569         u64                             read_stamp;
570         u64                             page_stamp;
571         struct ring_buffer_event        *event;
572         size_t                          event_size;
573         int                             missed_events;
574 };
575
576 int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
577 {
578         struct buffer_data_page field;
579
580         trace_seq_printf(s, "\tfield: u64 timestamp;\t"
581                          "offset:0;\tsize:%u;\tsigned:%u;\n",
582                          (unsigned int)sizeof(field.time_stamp),
583                          (unsigned int)is_signed_type(u64));
584
585         trace_seq_printf(s, "\tfield: local_t commit;\t"
586                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
587                          (unsigned int)offsetof(typeof(field), commit),
588                          (unsigned int)sizeof(field.commit),
589                          (unsigned int)is_signed_type(long));
590
591         trace_seq_printf(s, "\tfield: int overwrite;\t"
592                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
593                          (unsigned int)offsetof(typeof(field), commit),
594                          1,
595                          (unsigned int)is_signed_type(long));
596
597         trace_seq_printf(s, "\tfield: char data;\t"
598                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
599                          (unsigned int)offsetof(typeof(field), data),
600                          (unsigned int)buffer->subbuf_size,
601                          (unsigned int)is_signed_type(char));
602
603         return !trace_seq_has_overflowed(s);
604 }
605
606 static inline void rb_time_read(rb_time_t *t, u64 *ret)
607 {
608         *ret = local64_read(&t->time);
609 }
610 static void rb_time_set(rb_time_t *t, u64 val)
611 {
612         local64_set(&t->time, val);
613 }
614
615 /*
616  * Enable this to make sure that the event passed to
617  * ring_buffer_event_time_stamp() is not committed and also
618  * is on the buffer that it passed in.
619  */
620 //#define RB_VERIFY_EVENT
621 #ifdef RB_VERIFY_EVENT
622 static struct list_head *rb_list_head(struct list_head *list);
623 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
624                          void *event)
625 {
626         struct buffer_page *page = cpu_buffer->commit_page;
627         struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
628         struct list_head *next;
629         long commit, write;
630         unsigned long addr = (unsigned long)event;
631         bool done = false;
632         int stop = 0;
633
634         /* Make sure the event exists and is not committed yet */
635         do {
636                 if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
637                         done = true;
638                 commit = local_read(&page->page->commit);
639                 write = local_read(&page->write);
640                 if (addr >= (unsigned long)&page->page->data[commit] &&
641                     addr < (unsigned long)&page->page->data[write])
642                         return;
643
644                 next = rb_list_head(page->list.next);
645                 page = list_entry(next, struct buffer_page, list);
646         } while (!done);
647         WARN_ON_ONCE(1);
648 }
649 #else
650 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
651                          void *event)
652 {
653 }
654 #endif
655
656 /*
657  * The absolute time stamp drops the 5 MSBs and some clocks may
658  * require them. The rb_fix_abs_ts() will take a previous full
659  * time stamp, and add the 5 MSB of that time stamp on to the
660  * saved absolute time stamp. Then they are compared in case of
661  * the unlikely event that the latest time stamp incremented
662  * the 5 MSB.
663  */
664 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
665 {
666         if (save_ts & TS_MSB) {
667                 abs |= save_ts & TS_MSB;
668                 /* Check for overflow */
669                 if (unlikely(abs < save_ts))
670                         abs += 1ULL << 59;
671         }
672         return abs;
673 }
674
675 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
676
677 /**
678  * ring_buffer_event_time_stamp - return the event's current time stamp
679  * @buffer: The buffer that the event is on
680  * @event: the event to get the time stamp of
681  *
682  * Note, this must be called after @event is reserved, and before it is
683  * committed to the ring buffer. And must be called from the same
684  * context where the event was reserved (normal, softirq, irq, etc).
685  *
686  * Returns the time stamp associated with the current event.
687  * If the event has an extended time stamp, then that is used as
688  * the time stamp to return.
689  * In the highly unlikely case that the event was nested more than
690  * the max nesting, then the write_stamp of the buffer is returned,
691  * otherwise  current time is returned, but that really neither of
692  * the last two cases should ever happen.
693  */
694 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
695                                  struct ring_buffer_event *event)
696 {
697         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
698         unsigned int nest;
699         u64 ts;
700
701         /* If the event includes an absolute time, then just use that */
702         if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
703                 ts = rb_event_time_stamp(event);
704                 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
705         }
706
707         nest = local_read(&cpu_buffer->committing);
708         verify_event(cpu_buffer, event);
709         if (WARN_ON_ONCE(!nest))
710                 goto fail;
711
712         /* Read the current saved nesting level time stamp */
713         if (likely(--nest < MAX_NEST))
714                 return cpu_buffer->event_stamp[nest];
715
716         /* Shouldn't happen, warn if it does */
717         WARN_ONCE(1, "nest (%d) greater than max", nest);
718
719  fail:
720         rb_time_read(&cpu_buffer->write_stamp, &ts);
721
722         return ts;
723 }
724
725 /**
726  * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
727  * @buffer: The ring_buffer to get the number of pages from
728  * @cpu: The cpu of the ring_buffer to get the number of pages from
729  *
730  * Returns the number of pages that have content in the ring buffer.
731  */
732 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
733 {
734         size_t read;
735         size_t lost;
736         size_t cnt;
737
738         read = local_read(&buffer->buffers[cpu]->pages_read);
739         lost = local_read(&buffer->buffers[cpu]->pages_lost);
740         cnt = local_read(&buffer->buffers[cpu]->pages_touched);
741
742         if (WARN_ON_ONCE(cnt < lost))
743                 return 0;
744
745         cnt -= lost;
746
747         /* The reader can read an empty page, but not more than that */
748         if (cnt < read) {
749                 WARN_ON_ONCE(read > cnt + 1);
750                 return 0;
751         }
752
753         return cnt - read;
754 }
755
756 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
757 {
758         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
759         size_t nr_pages;
760         size_t dirty;
761
762         nr_pages = cpu_buffer->nr_pages;
763         if (!nr_pages || !full)
764                 return true;
765
766         /*
767          * Add one as dirty will never equal nr_pages, as the sub-buffer
768          * that the writer is on is not counted as dirty.
769          * This is needed if "buffer_percent" is set to 100.
770          */
771         dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
772
773         return (dirty * 100) >= (full * nr_pages);
774 }
775
776 /*
777  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
778  *
779  * Schedules a delayed work to wake up any task that is blocked on the
780  * ring buffer waiters queue.
781  */
782 static void rb_wake_up_waiters(struct irq_work *work)
783 {
784         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
785
786         /* For waiters waiting for the first wake up */
787         (void)atomic_fetch_inc_release(&rbwork->seq);
788
789         wake_up_all(&rbwork->waiters);
790         if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
791                 /* Only cpu_buffer sets the above flags */
792                 struct ring_buffer_per_cpu *cpu_buffer =
793                         container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
794
795                 /* Called from interrupt context */
796                 raw_spin_lock(&cpu_buffer->reader_lock);
797                 rbwork->wakeup_full = false;
798                 rbwork->full_waiters_pending = false;
799
800                 /* Waking up all waiters, they will reset the shortest full */
801                 cpu_buffer->shortest_full = 0;
802                 raw_spin_unlock(&cpu_buffer->reader_lock);
803
804                 wake_up_all(&rbwork->full_waiters);
805         }
806 }
807
808 /**
809  * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
810  * @buffer: The ring buffer to wake waiters on
811  * @cpu: The CPU buffer to wake waiters on
812  *
813  * In the case of a file that represents a ring buffer is closing,
814  * it is prudent to wake up any waiters that are on this.
815  */
816 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
817 {
818         struct ring_buffer_per_cpu *cpu_buffer;
819         struct rb_irq_work *rbwork;
820
821         if (!buffer)
822                 return;
823
824         if (cpu == RING_BUFFER_ALL_CPUS) {
825
826                 /* Wake up individual ones too. One level recursion */
827                 for_each_buffer_cpu(buffer, cpu)
828                         ring_buffer_wake_waiters(buffer, cpu);
829
830                 rbwork = &buffer->irq_work;
831         } else {
832                 if (WARN_ON_ONCE(!buffer->buffers))
833                         return;
834                 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
835                         return;
836
837                 cpu_buffer = buffer->buffers[cpu];
838                 /* The CPU buffer may not have been initialized yet */
839                 if (!cpu_buffer)
840                         return;
841                 rbwork = &cpu_buffer->irq_work;
842         }
843
844         /* This can be called in any context */
845         irq_work_queue(&rbwork->work);
846 }
847
848 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
849 {
850         struct ring_buffer_per_cpu *cpu_buffer;
851         bool ret = false;
852
853         /* Reads of all CPUs always waits for any data */
854         if (cpu == RING_BUFFER_ALL_CPUS)
855                 return !ring_buffer_empty(buffer);
856
857         cpu_buffer = buffer->buffers[cpu];
858
859         if (!ring_buffer_empty_cpu(buffer, cpu)) {
860                 unsigned long flags;
861                 bool pagebusy;
862
863                 if (!full)
864                         return true;
865
866                 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
867                 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
868                 ret = !pagebusy && full_hit(buffer, cpu, full);
869
870                 if (!ret && (!cpu_buffer->shortest_full ||
871                              cpu_buffer->shortest_full > full)) {
872                     cpu_buffer->shortest_full = full;
873                 }
874                 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
875         }
876         return ret;
877 }
878
879 static inline bool
880 rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
881              int cpu, int full, ring_buffer_cond_fn cond, void *data)
882 {
883         if (rb_watermark_hit(buffer, cpu, full))
884                 return true;
885
886         if (cond(data))
887                 return true;
888
889         /*
890          * The events can happen in critical sections where
891          * checking a work queue can cause deadlocks.
892          * After adding a task to the queue, this flag is set
893          * only to notify events to try to wake up the queue
894          * using irq_work.
895          *
896          * We don't clear it even if the buffer is no longer
897          * empty. The flag only causes the next event to run
898          * irq_work to do the work queue wake up. The worse
899          * that can happen if we race with !trace_empty() is that
900          * an event will cause an irq_work to try to wake up
901          * an empty queue.
902          *
903          * There's no reason to protect this flag either, as
904          * the work queue and irq_work logic will do the necessary
905          * synchronization for the wake ups. The only thing
906          * that is necessary is that the wake up happens after
907          * a task has been queued. It's OK for spurious wake ups.
908          */
909         if (full)
910                 rbwork->full_waiters_pending = true;
911         else
912                 rbwork->waiters_pending = true;
913
914         return false;
915 }
916
917 struct rb_wait_data {
918         struct rb_irq_work              *irq_work;
919         int                             seq;
920 };
921
922 /*
923  * The default wait condition for ring_buffer_wait() is to just to exit the
924  * wait loop the first time it is woken up.
925  */
926 static bool rb_wait_once(void *data)
927 {
928         struct rb_wait_data *rdata = data;
929         struct rb_irq_work *rbwork = rdata->irq_work;
930
931         return atomic_read_acquire(&rbwork->seq) != rdata->seq;
932 }
933
934 /**
935  * ring_buffer_wait - wait for input to the ring buffer
936  * @buffer: buffer to wait on
937  * @cpu: the cpu buffer to wait on
938  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
939  * @cond: condition function to break out of wait (NULL to run once)
940  * @data: the data to pass to @cond.
941  *
942  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
943  * as data is added to any of the @buffer's cpu buffers. Otherwise
944  * it will wait for data to be added to a specific cpu buffer.
945  */
946 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
947                      ring_buffer_cond_fn cond, void *data)
948 {
949         struct ring_buffer_per_cpu *cpu_buffer;
950         struct wait_queue_head *waitq;
951         struct rb_irq_work *rbwork;
952         struct rb_wait_data rdata;
953         int ret = 0;
954
955         /*
956          * Depending on what the caller is waiting for, either any
957          * data in any cpu buffer, or a specific buffer, put the
958          * caller on the appropriate wait queue.
959          */
960         if (cpu == RING_BUFFER_ALL_CPUS) {
961                 rbwork = &buffer->irq_work;
962                 /* Full only makes sense on per cpu reads */
963                 full = 0;
964         } else {
965                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
966                         return -ENODEV;
967                 cpu_buffer = buffer->buffers[cpu];
968                 rbwork = &cpu_buffer->irq_work;
969         }
970
971         if (full)
972                 waitq = &rbwork->full_waiters;
973         else
974                 waitq = &rbwork->waiters;
975
976         /* Set up to exit loop as soon as it is woken */
977         if (!cond) {
978                 cond = rb_wait_once;
979                 rdata.irq_work = rbwork;
980                 rdata.seq = atomic_read_acquire(&rbwork->seq);
981                 data = &rdata;
982         }
983
984         ret = wait_event_interruptible((*waitq),
985                                 rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
986
987         return ret;
988 }
989
990 /**
991  * ring_buffer_poll_wait - poll on buffer input
992  * @buffer: buffer to wait on
993  * @cpu: the cpu buffer to wait on
994  * @filp: the file descriptor
995  * @poll_table: The poll descriptor
996  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
997  *
998  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
999  * as data is added to any of the @buffer's cpu buffers. Otherwise
1000  * it will wait for data to be added to a specific cpu buffer.
1001  *
1002  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
1003  * zero otherwise.
1004  */
1005 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1006                           struct file *filp, poll_table *poll_table, int full)
1007 {
1008         struct ring_buffer_per_cpu *cpu_buffer;
1009         struct rb_irq_work *rbwork;
1010
1011         if (cpu == RING_BUFFER_ALL_CPUS) {
1012                 rbwork = &buffer->irq_work;
1013                 full = 0;
1014         } else {
1015                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1016                         return EPOLLERR;
1017
1018                 cpu_buffer = buffer->buffers[cpu];
1019                 rbwork = &cpu_buffer->irq_work;
1020         }
1021
1022         if (full) {
1023                 poll_wait(filp, &rbwork->full_waiters, poll_table);
1024
1025                 if (rb_watermark_hit(buffer, cpu, full))
1026                         return EPOLLIN | EPOLLRDNORM;
1027                 /*
1028                  * Only allow full_waiters_pending update to be seen after
1029                  * the shortest_full is set (in rb_watermark_hit). If the
1030                  * writer sees the full_waiters_pending flag set, it will
1031                  * compare the amount in the ring buffer to shortest_full.
1032                  * If the amount in the ring buffer is greater than the
1033                  * shortest_full percent, it will call the irq_work handler
1034                  * to wake up this list. The irq_handler will reset shortest_full
1035                  * back to zero. That's done under the reader_lock, but
1036                  * the below smp_mb() makes sure that the update to
1037                  * full_waiters_pending doesn't leak up into the above.
1038                  */
1039                 smp_mb();
1040                 rbwork->full_waiters_pending = true;
1041                 return 0;
1042         }
1043
1044         poll_wait(filp, &rbwork->waiters, poll_table);
1045         rbwork->waiters_pending = true;
1046
1047         /*
1048          * There's a tight race between setting the waiters_pending and
1049          * checking if the ring buffer is empty.  Once the waiters_pending bit
1050          * is set, the next event will wake the task up, but we can get stuck
1051          * if there's only a single event in.
1052          *
1053          * FIXME: Ideally, we need a memory barrier on the writer side as well,
1054          * but adding a memory barrier to all events will cause too much of a
1055          * performance hit in the fast path.  We only need a memory barrier when
1056          * the buffer goes from empty to having content.  But as this race is
1057          * extremely small, and it's not a problem if another event comes in, we
1058          * will fix it later.
1059          */
1060         smp_mb();
1061
1062         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1063             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1064                 return EPOLLIN | EPOLLRDNORM;
1065         return 0;
1066 }
1067
1068 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1069 #define RB_WARN_ON(b, cond)                                             \
1070         ({                                                              \
1071                 int _____ret = unlikely(cond);                          \
1072                 if (_____ret) {                                         \
1073                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1074                                 struct ring_buffer_per_cpu *__b =       \
1075                                         (void *)b;                      \
1076                                 atomic_inc(&__b->buffer->record_disabled); \
1077                         } else                                          \
1078                                 atomic_inc(&b->record_disabled);        \
1079                         WARN_ON(1);                                     \
1080                 }                                                       \
1081                 _____ret;                                               \
1082         })
1083
1084 /* Up this if you want to test the TIME_EXTENTS and normalization */
1085 #define DEBUG_SHIFT 0
1086
1087 static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1088 {
1089         u64 ts;
1090
1091         /* Skip retpolines :-( */
1092         if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1093                 ts = trace_clock_local();
1094         else
1095                 ts = buffer->clock();
1096
1097         /* shift to debug/test normalization and TIME_EXTENTS */
1098         return ts << DEBUG_SHIFT;
1099 }
1100
1101 u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1102 {
1103         u64 time;
1104
1105         preempt_disable_notrace();
1106         time = rb_time_stamp(buffer);
1107         preempt_enable_notrace();
1108
1109         return time;
1110 }
1111 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1112
1113 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1114                                       int cpu, u64 *ts)
1115 {
1116         /* Just stupid testing the normalize function and deltas */
1117         *ts >>= DEBUG_SHIFT;
1118 }
1119 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1120
1121 /*
1122  * Making the ring buffer lockless makes things tricky.
1123  * Although writes only happen on the CPU that they are on,
1124  * and they only need to worry about interrupts. Reads can
1125  * happen on any CPU.
1126  *
1127  * The reader page is always off the ring buffer, but when the
1128  * reader finishes with a page, it needs to swap its page with
1129  * a new one from the buffer. The reader needs to take from
1130  * the head (writes go to the tail). But if a writer is in overwrite
1131  * mode and wraps, it must push the head page forward.
1132  *
1133  * Here lies the problem.
1134  *
1135  * The reader must be careful to replace only the head page, and
1136  * not another one. As described at the top of the file in the
1137  * ASCII art, the reader sets its old page to point to the next
1138  * page after head. It then sets the page after head to point to
1139  * the old reader page. But if the writer moves the head page
1140  * during this operation, the reader could end up with the tail.
1141  *
1142  * We use cmpxchg to help prevent this race. We also do something
1143  * special with the page before head. We set the LSB to 1.
1144  *
1145  * When the writer must push the page forward, it will clear the
1146  * bit that points to the head page, move the head, and then set
1147  * the bit that points to the new head page.
1148  *
1149  * We also don't want an interrupt coming in and moving the head
1150  * page on another writer. Thus we use the second LSB to catch
1151  * that too. Thus:
1152  *
1153  * head->list->prev->next        bit 1          bit 0
1154  *                              -------        -------
1155  * Normal page                     0              0
1156  * Points to head page             0              1
1157  * New head page                   1              0
1158  *
1159  * Note we can not trust the prev pointer of the head page, because:
1160  *
1161  * +----+       +-----+        +-----+
1162  * |    |------>|  T  |---X--->|  N  |
1163  * |    |<------|     |        |     |
1164  * +----+       +-----+        +-----+
1165  *   ^                           ^ |
1166  *   |          +-----+          | |
1167  *   +----------|  R  |----------+ |
1168  *              |     |<-----------+
1169  *              +-----+
1170  *
1171  * Key:  ---X-->  HEAD flag set in pointer
1172  *         T      Tail page
1173  *         R      Reader page
1174  *         N      Next page
1175  *
1176  * (see __rb_reserve_next() to see where this happens)
1177  *
1178  *  What the above shows is that the reader just swapped out
1179  *  the reader page with a page in the buffer, but before it
1180  *  could make the new header point back to the new page added
1181  *  it was preempted by a writer. The writer moved forward onto
1182  *  the new page added by the reader and is about to move forward
1183  *  again.
1184  *
1185  *  You can see, it is legitimate for the previous pointer of
1186  *  the head (or any page) not to point back to itself. But only
1187  *  temporarily.
1188  */
1189
1190 #define RB_PAGE_NORMAL          0UL
1191 #define RB_PAGE_HEAD            1UL
1192 #define RB_PAGE_UPDATE          2UL
1193
1194
1195 #define RB_FLAG_MASK            3UL
1196
1197 /* PAGE_MOVED is not part of the mask */
1198 #define RB_PAGE_MOVED           4UL
1199
1200 /*
1201  * rb_list_head - remove any bit
1202  */
1203 static struct list_head *rb_list_head(struct list_head *list)
1204 {
1205         unsigned long val = (unsigned long)list;
1206
1207         return (struct list_head *)(val & ~RB_FLAG_MASK);
1208 }
1209
1210 /*
1211  * rb_is_head_page - test if the given page is the head page
1212  *
1213  * Because the reader may move the head_page pointer, we can
1214  * not trust what the head page is (it may be pointing to
1215  * the reader page). But if the next page is a header page,
1216  * its flags will be non zero.
1217  */
1218 static inline int
1219 rb_is_head_page(struct buffer_page *page, struct list_head *list)
1220 {
1221         unsigned long val;
1222
1223         val = (unsigned long)list->next;
1224
1225         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1226                 return RB_PAGE_MOVED;
1227
1228         return val & RB_FLAG_MASK;
1229 }
1230
1231 /*
1232  * rb_is_reader_page
1233  *
1234  * The unique thing about the reader page, is that, if the
1235  * writer is ever on it, the previous pointer never points
1236  * back to the reader page.
1237  */
1238 static bool rb_is_reader_page(struct buffer_page *page)
1239 {
1240         struct list_head *list = page->list.prev;
1241
1242         return rb_list_head(list->next) != &page->list;
1243 }
1244
1245 /*
1246  * rb_set_list_to_head - set a list_head to be pointing to head.
1247  */
1248 static void rb_set_list_to_head(struct list_head *list)
1249 {
1250         unsigned long *ptr;
1251
1252         ptr = (unsigned long *)&list->next;
1253         *ptr |= RB_PAGE_HEAD;
1254         *ptr &= ~RB_PAGE_UPDATE;
1255 }
1256
1257 /*
1258  * rb_head_page_activate - sets up head page
1259  */
1260 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1261 {
1262         struct buffer_page *head;
1263
1264         head = cpu_buffer->head_page;
1265         if (!head)
1266                 return;
1267
1268         /*
1269          * Set the previous list pointer to have the HEAD flag.
1270          */
1271         rb_set_list_to_head(head->list.prev);
1272
1273         if (cpu_buffer->ring_meta) {
1274                 struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
1275                 meta->head_buffer = (unsigned long)head->page;
1276         }
1277 }
1278
1279 static void rb_list_head_clear(struct list_head *list)
1280 {
1281         unsigned long *ptr = (unsigned long *)&list->next;
1282
1283         *ptr &= ~RB_FLAG_MASK;
1284 }
1285
1286 /*
1287  * rb_head_page_deactivate - clears head page ptr (for free list)
1288  */
1289 static void
1290 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1291 {
1292         struct list_head *hd;
1293
1294         /* Go through the whole list and clear any pointers found. */
1295         rb_list_head_clear(cpu_buffer->pages);
1296
1297         list_for_each(hd, cpu_buffer->pages)
1298                 rb_list_head_clear(hd);
1299 }
1300
1301 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1302                             struct buffer_page *head,
1303                             struct buffer_page *prev,
1304                             int old_flag, int new_flag)
1305 {
1306         struct list_head *list;
1307         unsigned long val = (unsigned long)&head->list;
1308         unsigned long ret;
1309
1310         list = &prev->list;
1311
1312         val &= ~RB_FLAG_MASK;
1313
1314         ret = cmpxchg((unsigned long *)&list->next,
1315                       val | old_flag, val | new_flag);
1316
1317         /* check if the reader took the page */
1318         if ((ret & ~RB_FLAG_MASK) != val)
1319                 return RB_PAGE_MOVED;
1320
1321         return ret & RB_FLAG_MASK;
1322 }
1323
1324 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1325                                    struct buffer_page *head,
1326                                    struct buffer_page *prev,
1327                                    int old_flag)
1328 {
1329         return rb_head_page_set(cpu_buffer, head, prev,
1330                                 old_flag, RB_PAGE_UPDATE);
1331 }
1332
1333 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1334                                  struct buffer_page *head,
1335                                  struct buffer_page *prev,
1336                                  int old_flag)
1337 {
1338         return rb_head_page_set(cpu_buffer, head, prev,
1339                                 old_flag, RB_PAGE_HEAD);
1340 }
1341
1342 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1343                                    struct buffer_page *head,
1344                                    struct buffer_page *prev,
1345                                    int old_flag)
1346 {
1347         return rb_head_page_set(cpu_buffer, head, prev,
1348                                 old_flag, RB_PAGE_NORMAL);
1349 }
1350
1351 static inline void rb_inc_page(struct buffer_page **bpage)
1352 {
1353         struct list_head *p = rb_list_head((*bpage)->list.next);
1354
1355         *bpage = list_entry(p, struct buffer_page, list);
1356 }
1357
1358 static struct buffer_page *
1359 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1360 {
1361         struct buffer_page *head;
1362         struct buffer_page *page;
1363         struct list_head *list;
1364         int i;
1365
1366         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1367                 return NULL;
1368
1369         /* sanity check */
1370         list = cpu_buffer->pages;
1371         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1372                 return NULL;
1373
1374         page = head = cpu_buffer->head_page;
1375         /*
1376          * It is possible that the writer moves the header behind
1377          * where we started, and we miss in one loop.
1378          * A second loop should grab the header, but we'll do
1379          * three loops just because I'm paranoid.
1380          */
1381         for (i = 0; i < 3; i++) {
1382                 do {
1383                         if (rb_is_head_page(page, page->list.prev)) {
1384                                 cpu_buffer->head_page = page;
1385                                 return page;
1386                         }
1387                         rb_inc_page(&page);
1388                 } while (page != head);
1389         }
1390
1391         RB_WARN_ON(cpu_buffer, 1);
1392
1393         return NULL;
1394 }
1395
1396 static bool rb_head_page_replace(struct buffer_page *old,
1397                                 struct buffer_page *new)
1398 {
1399         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1400         unsigned long val;
1401
1402         val = *ptr & ~RB_FLAG_MASK;
1403         val |= RB_PAGE_HEAD;
1404
1405         return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
1406 }
1407
1408 /*
1409  * rb_tail_page_update - move the tail page forward
1410  */
1411 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1412                                struct buffer_page *tail_page,
1413                                struct buffer_page *next_page)
1414 {
1415         unsigned long old_entries;
1416         unsigned long old_write;
1417
1418         /*
1419          * The tail page now needs to be moved forward.
1420          *
1421          * We need to reset the tail page, but without messing
1422          * with possible erasing of data brought in by interrupts
1423          * that have moved the tail page and are currently on it.
1424          *
1425          * We add a counter to the write field to denote this.
1426          */
1427         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1428         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1429
1430         /*
1431          * Just make sure we have seen our old_write and synchronize
1432          * with any interrupts that come in.
1433          */
1434         barrier();
1435
1436         /*
1437          * If the tail page is still the same as what we think
1438          * it is, then it is up to us to update the tail
1439          * pointer.
1440          */
1441         if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1442                 /* Zero the write counter */
1443                 unsigned long val = old_write & ~RB_WRITE_MASK;
1444                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1445
1446                 /*
1447                  * This will only succeed if an interrupt did
1448                  * not come in and change it. In which case, we
1449                  * do not want to modify it.
1450                  *
1451                  * We add (void) to let the compiler know that we do not care
1452                  * about the return value of these functions. We use the
1453                  * cmpxchg to only update if an interrupt did not already
1454                  * do it for us. If the cmpxchg fails, we don't care.
1455                  */
1456                 (void)local_cmpxchg(&next_page->write, old_write, val);
1457                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1458
1459                 /*
1460                  * No need to worry about races with clearing out the commit.
1461                  * it only can increment when a commit takes place. But that
1462                  * only happens in the outer most nested commit.
1463                  */
1464                 local_set(&next_page->page->commit, 0);
1465
1466                 /* Either we update tail_page or an interrupt does */
1467                 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
1468                         local_inc(&cpu_buffer->pages_touched);
1469         }
1470 }
1471
1472 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1473                           struct buffer_page *bpage)
1474 {
1475         unsigned long val = (unsigned long)bpage;
1476
1477         RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
1478 }
1479
1480 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer,
1481                            struct list_head *list)
1482 {
1483         if (RB_WARN_ON(cpu_buffer,
1484                        rb_list_head(rb_list_head(list->next)->prev) != list))
1485                 return false;
1486
1487         if (RB_WARN_ON(cpu_buffer,
1488                        rb_list_head(rb_list_head(list->prev)->next) != list))
1489                 return false;
1490
1491         return true;
1492 }
1493
1494 /**
1495  * rb_check_pages - integrity check of buffer pages
1496  * @cpu_buffer: CPU buffer with pages to test
1497  *
1498  * As a safety measure we check to make sure the data pages have not
1499  * been corrupted.
1500  */
1501 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1502 {
1503         struct list_head *head, *tmp;
1504         unsigned long buffer_cnt;
1505         unsigned long flags;
1506         int nr_loops = 0;
1507
1508         /*
1509          * Walk the linked list underpinning the ring buffer and validate all
1510          * its next and prev links.
1511          *
1512          * The check acquires the reader_lock to avoid concurrent processing
1513          * with code that could be modifying the list. However, the lock cannot
1514          * be held for the entire duration of the walk, as this would make the
1515          * time when interrupts are disabled non-deterministic, dependent on the
1516          * ring buffer size. Therefore, the code releases and re-acquires the
1517          * lock after checking each page. The ring_buffer_per_cpu.cnt variable
1518          * is then used to detect if the list was modified while the lock was
1519          * not held, in which case the check needs to be restarted.
1520          *
1521          * The code attempts to perform the check at most three times before
1522          * giving up. This is acceptable because this is only a self-validation
1523          * to detect problems early on. In practice, the list modification
1524          * operations are fairly spaced, and so this check typically succeeds at
1525          * most on the second try.
1526          */
1527 again:
1528         if (++nr_loops > 3)
1529                 return;
1530
1531         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1532         head = rb_list_head(cpu_buffer->pages);
1533         if (!rb_check_links(cpu_buffer, head))
1534                 goto out_locked;
1535         buffer_cnt = cpu_buffer->cnt;
1536         tmp = head;
1537         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1538
1539         while (true) {
1540                 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1541
1542                 if (buffer_cnt != cpu_buffer->cnt) {
1543                         /* The list was updated, try again. */
1544                         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1545                         goto again;
1546                 }
1547
1548                 tmp = rb_list_head(tmp->next);
1549                 if (tmp == head)
1550                         /* The iteration circled back, all is done. */
1551                         goto out_locked;
1552
1553                 if (!rb_check_links(cpu_buffer, tmp))
1554                         goto out_locked;
1555
1556                 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1557         }
1558
1559 out_locked:
1560         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1561 }
1562
1563 /*
1564  * Take an address, add the meta data size as well as the array of
1565  * array subbuffer indexes, then align it to a subbuffer size.
1566  *
1567  * This is used to help find the next per cpu subbuffer within a mapped range.
1568  */
1569 static unsigned long
1570 rb_range_align_subbuf(unsigned long addr, int subbuf_size, int nr_subbufs)
1571 {
1572         addr += sizeof(struct ring_buffer_meta) +
1573                 sizeof(int) * nr_subbufs;
1574         return ALIGN(addr, subbuf_size);
1575 }
1576
1577 /*
1578  * Return the ring_buffer_meta for a given @cpu.
1579  */
1580 static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu)
1581 {
1582         int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
1583         unsigned long ptr = buffer->range_addr_start;
1584         struct ring_buffer_meta *meta;
1585         int nr_subbufs;
1586
1587         if (!ptr)
1588                 return NULL;
1589
1590         /* When nr_pages passed in is zero, the first meta has already been initialized */
1591         if (!nr_pages) {
1592                 meta = (struct ring_buffer_meta *)ptr;
1593                 nr_subbufs = meta->nr_subbufs;
1594         } else {
1595                 meta = NULL;
1596                 /* Include the reader page */
1597                 nr_subbufs = nr_pages + 1;
1598         }
1599
1600         /*
1601          * The first chunk may not be subbuffer aligned, where as
1602          * the rest of the chunks are.
1603          */
1604         if (cpu) {
1605                 ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1606                 ptr += subbuf_size * nr_subbufs;
1607
1608                 /* We can use multiplication to find chunks greater than 1 */
1609                 if (cpu > 1) {
1610                         unsigned long size;
1611                         unsigned long p;
1612
1613                         /* Save the beginning of this CPU chunk */
1614                         p = ptr;
1615                         ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1616                         ptr += subbuf_size * nr_subbufs;
1617
1618                         /* Now all chunks after this are the same size */
1619                         size = ptr - p;
1620                         ptr += size * (cpu - 2);
1621                 }
1622         }
1623         return (void *)ptr;
1624 }
1625
1626 /* Return the start of subbufs given the meta pointer */
1627 static void *rb_subbufs_from_meta(struct ring_buffer_meta *meta)
1628 {
1629         int subbuf_size = meta->subbuf_size;
1630         unsigned long ptr;
1631
1632         ptr = (unsigned long)meta;
1633         ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs);
1634
1635         return (void *)ptr;
1636 }
1637
1638 /*
1639  * Return a specific sub-buffer for a given @cpu defined by @idx.
1640  */
1641 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
1642 {
1643         struct ring_buffer_meta *meta;
1644         unsigned long ptr;
1645         int subbuf_size;
1646
1647         meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu);
1648         if (!meta)
1649                 return NULL;
1650
1651         if (WARN_ON_ONCE(idx >= meta->nr_subbufs))
1652                 return NULL;
1653
1654         subbuf_size = meta->subbuf_size;
1655
1656         /* Map this buffer to the order that's in meta->buffers[] */
1657         idx = meta->buffers[idx];
1658
1659         ptr = (unsigned long)rb_subbufs_from_meta(meta);
1660
1661         ptr += subbuf_size * idx;
1662         if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end)
1663                 return NULL;
1664
1665         return (void *)ptr;
1666 }
1667
1668 /*
1669  * See if the existing memory contains valid ring buffer data.
1670  * As the previous kernel must be the same as this kernel, all
1671  * the calculations (size of buffers and number of buffers)
1672  * must be the same.
1673  */
1674 static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
1675                           struct trace_buffer *buffer, int nr_pages,
1676                           unsigned long *subbuf_mask)
1677 {
1678         int subbuf_size = PAGE_SIZE;
1679         struct buffer_data_page *subbuf;
1680         unsigned long buffers_start;
1681         unsigned long buffers_end;
1682         int i;
1683
1684         if (!subbuf_mask)
1685                 return false;
1686
1687         /* Check the meta magic and meta struct size */
1688         if (meta->magic != RING_BUFFER_META_MAGIC ||
1689             meta->struct_size != sizeof(*meta)) {
1690                 pr_info("Ring buffer boot meta[%d] mismatch of magic or struct size\n", cpu);
1691                 return false;
1692         }
1693
1694         /* The subbuffer's size and number of subbuffers must match */
1695         if (meta->subbuf_size != subbuf_size ||
1696             meta->nr_subbufs != nr_pages + 1) {
1697                 pr_info("Ring buffer boot meta [%d] mismatch of subbuf_size/nr_pages\n", cpu);
1698                 return false;
1699         }
1700
1701         buffers_start = meta->first_buffer;
1702         buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs);
1703
1704         /* Is the head and commit buffers within the range of buffers? */
1705         if (meta->head_buffer < buffers_start ||
1706             meta->head_buffer >= buffers_end) {
1707                 pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu);
1708                 return false;
1709         }
1710
1711         if (meta->commit_buffer < buffers_start ||
1712             meta->commit_buffer >= buffers_end) {
1713                 pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu);
1714                 return false;
1715         }
1716
1717         subbuf = rb_subbufs_from_meta(meta);
1718
1719         bitmap_clear(subbuf_mask, 0, meta->nr_subbufs);
1720
1721         /* Is the meta buffers and the subbufs themselves have correct data? */
1722         for (i = 0; i < meta->nr_subbufs; i++) {
1723                 if (meta->buffers[i] < 0 ||
1724                     meta->buffers[i] >= meta->nr_subbufs) {
1725                         pr_info("Ring buffer boot meta [%d] array out of range\n", cpu);
1726                         return false;
1727                 }
1728
1729                 if ((unsigned)local_read(&subbuf->commit) > subbuf_size) {
1730                         pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu);
1731                         return false;
1732                 }
1733
1734                 if (test_bit(meta->buffers[i], subbuf_mask)) {
1735                         pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu);
1736                         return false;
1737                 }
1738
1739                 set_bit(meta->buffers[i], subbuf_mask);
1740                 subbuf = (void *)subbuf + subbuf_size;
1741         }
1742
1743         return true;
1744 }
1745
1746 static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf);
1747
1748 static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu,
1749                                unsigned long long *timestamp, u64 *delta_ptr)
1750 {
1751         struct ring_buffer_event *event;
1752         u64 ts, delta;
1753         int events = 0;
1754         int e;
1755
1756         *delta_ptr = 0;
1757         *timestamp = 0;
1758
1759         ts = dpage->time_stamp;
1760
1761         for (e = 0; e < tail; e += rb_event_length(event)) {
1762
1763                 event = (struct ring_buffer_event *)(dpage->data + e);
1764
1765                 switch (event->type_len) {
1766
1767                 case RINGBUF_TYPE_TIME_EXTEND:
1768                         delta = rb_event_time_stamp(event);
1769                         ts += delta;
1770                         break;
1771
1772                 case RINGBUF_TYPE_TIME_STAMP:
1773                         delta = rb_event_time_stamp(event);
1774                         delta = rb_fix_abs_ts(delta, ts);
1775                         if (delta < ts) {
1776                                 *delta_ptr = delta;
1777                                 *timestamp = ts;
1778                                 return -1;
1779                         }
1780                         ts = delta;
1781                         break;
1782
1783                 case RINGBUF_TYPE_PADDING:
1784                         if (event->time_delta == 1)
1785                                 break;
1786                         fallthrough;
1787                 case RINGBUF_TYPE_DATA:
1788                         events++;
1789                         ts += event->time_delta;
1790                         break;
1791
1792                 default:
1793                         return -1;
1794                 }
1795         }
1796         *timestamp = ts;
1797         return events;
1798 }
1799
1800 static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu)
1801 {
1802         unsigned long long ts;
1803         u64 delta;
1804         int tail;
1805
1806         tail = local_read(&dpage->commit);
1807         return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
1808 }
1809
1810 /* If the meta data has been validated, now validate the events */
1811 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
1812 {
1813         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
1814         struct buffer_page *head_page;
1815         unsigned long entry_bytes = 0;
1816         unsigned long entries = 0;
1817         int ret;
1818         int i;
1819
1820         if (!meta || !meta->head_buffer)
1821                 return;
1822
1823         /* Do the reader page first */
1824         ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
1825         if (ret < 0) {
1826                 pr_info("Ring buffer reader page is invalid\n");
1827                 goto invalid;
1828         }
1829         entries += ret;
1830         entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
1831         local_set(&cpu_buffer->reader_page->entries, ret);
1832
1833         head_page = cpu_buffer->head_page;
1834
1835         /* If both the head and commit are on the reader_page then we are done. */
1836         if (head_page == cpu_buffer->reader_page &&
1837             head_page == cpu_buffer->commit_page)
1838                 goto done;
1839
1840         /* Iterate until finding the commit page */
1841         for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) {
1842
1843                 /* Reader page has already been done */
1844                 if (head_page == cpu_buffer->reader_page)
1845                         continue;
1846
1847                 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
1848                 if (ret < 0) {
1849                         pr_info("Ring buffer meta [%d] invalid buffer page\n",
1850                                 cpu_buffer->cpu);
1851                         goto invalid;
1852                 }
1853
1854                 /* If the buffer has content, update pages_touched */
1855                 if (ret)
1856                         local_inc(&cpu_buffer->pages_touched);
1857
1858                 entries += ret;
1859                 entry_bytes += local_read(&head_page->page->commit);
1860                 local_set(&cpu_buffer->head_page->entries, ret);
1861
1862                 if (head_page == cpu_buffer->commit_page)
1863                         break;
1864         }
1865
1866         if (head_page != cpu_buffer->commit_page) {
1867                 pr_info("Ring buffer meta [%d] commit page not found\n",
1868                         cpu_buffer->cpu);
1869                 goto invalid;
1870         }
1871  done:
1872         local_set(&cpu_buffer->entries, entries);
1873         local_set(&cpu_buffer->entries_bytes, entry_bytes);
1874
1875         pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu);
1876         return;
1877
1878  invalid:
1879         /* The content of the buffers are invalid, reset the meta data */
1880         meta->head_buffer = 0;
1881         meta->commit_buffer = 0;
1882
1883         /* Reset the reader page */
1884         local_set(&cpu_buffer->reader_page->entries, 0);
1885         local_set(&cpu_buffer->reader_page->page->commit, 0);
1886
1887         /* Reset all the subbuffers */
1888         for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) {
1889                 local_set(&head_page->entries, 0);
1890                 local_set(&head_page->page->commit, 0);
1891         }
1892 }
1893
1894 /* Used to calculate data delta */
1895 static char rb_data_ptr[] = "";
1896
1897 #define THIS_TEXT_PTR           ((unsigned long)rb_meta_init_text_addr)
1898 #define THIS_DATA_PTR           ((unsigned long)rb_data_ptr)
1899
1900 static void rb_meta_init_text_addr(struct ring_buffer_meta *meta)
1901 {
1902         meta->text_addr = THIS_TEXT_PTR;
1903         meta->data_addr = THIS_DATA_PTR;
1904 }
1905
1906 static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
1907 {
1908         struct ring_buffer_meta *meta;
1909         unsigned long *subbuf_mask;
1910         unsigned long delta;
1911         void *subbuf;
1912         int cpu;
1913         int i;
1914
1915         /* Create a mask to test the subbuf array */
1916         subbuf_mask = bitmap_alloc(nr_pages + 1, GFP_KERNEL);
1917         /* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
1918
1919         for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1920                 void *next_meta;
1921
1922                 meta = rb_range_meta(buffer, nr_pages, cpu);
1923
1924                 if (rb_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) {
1925                         /* Make the mappings match the current address */
1926                         subbuf = rb_subbufs_from_meta(meta);
1927                         delta = (unsigned long)subbuf - meta->first_buffer;
1928                         meta->first_buffer += delta;
1929                         meta->head_buffer += delta;
1930                         meta->commit_buffer += delta;
1931                         buffer->last_text_delta = THIS_TEXT_PTR - meta->text_addr;
1932                         buffer->last_data_delta = THIS_DATA_PTR - meta->data_addr;
1933                         continue;
1934                 }
1935
1936                 if (cpu < nr_cpu_ids - 1)
1937                         next_meta = rb_range_meta(buffer, nr_pages, cpu + 1);
1938                 else
1939                         next_meta = (void *)buffer->range_addr_end;
1940
1941                 memset(meta, 0, next_meta - (void *)meta);
1942
1943                 meta->magic = RING_BUFFER_META_MAGIC;
1944                 meta->struct_size = sizeof(*meta);
1945
1946                 meta->nr_subbufs = nr_pages + 1;
1947                 meta->subbuf_size = PAGE_SIZE;
1948
1949                 subbuf = rb_subbufs_from_meta(meta);
1950
1951                 meta->first_buffer = (unsigned long)subbuf;
1952                 rb_meta_init_text_addr(meta);
1953
1954                 /*
1955                  * The buffers[] array holds the order of the sub-buffers
1956                  * that are after the meta data. The sub-buffers may
1957                  * be swapped out when read and inserted into a different
1958                  * location of the ring buffer. Although their addresses
1959                  * remain the same, the buffers[] array contains the
1960                  * index into the sub-buffers holding their actual order.
1961                  */
1962                 for (i = 0; i < meta->nr_subbufs; i++) {
1963                         meta->buffers[i] = i;
1964                         rb_init_page(subbuf);
1965                         subbuf += meta->subbuf_size;
1966                 }
1967         }
1968         bitmap_free(subbuf_mask);
1969 }
1970
1971 static void *rbm_start(struct seq_file *m, loff_t *pos)
1972 {
1973         struct ring_buffer_per_cpu *cpu_buffer = m->private;
1974         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
1975         unsigned long val;
1976
1977         if (!meta)
1978                 return NULL;
1979
1980         if (*pos > meta->nr_subbufs)
1981                 return NULL;
1982
1983         val = *pos;
1984         val++;
1985
1986         return (void *)val;
1987 }
1988
1989 static void *rbm_next(struct seq_file *m, void *v, loff_t *pos)
1990 {
1991         (*pos)++;
1992
1993         return rbm_start(m, pos);
1994 }
1995
1996 static int rbm_show(struct seq_file *m, void *v)
1997 {
1998         struct ring_buffer_per_cpu *cpu_buffer = m->private;
1999         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
2000         unsigned long val = (unsigned long)v;
2001
2002         if (val == 1) {
2003                 seq_printf(m, "head_buffer:   %d\n",
2004                            rb_meta_subbuf_idx(meta, (void *)meta->head_buffer));
2005                 seq_printf(m, "commit_buffer: %d\n",
2006                            rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer));
2007                 seq_printf(m, "subbuf_size:   %d\n", meta->subbuf_size);
2008                 seq_printf(m, "nr_subbufs:    %d\n", meta->nr_subbufs);
2009                 return 0;
2010         }
2011
2012         val -= 2;
2013         seq_printf(m, "buffer[%ld]:    %d\n", val, meta->buffers[val]);
2014
2015         return 0;
2016 }
2017
2018 static void rbm_stop(struct seq_file *m, void *p)
2019 {
2020 }
2021
2022 static const struct seq_operations rb_meta_seq_ops = {
2023         .start          = rbm_start,
2024         .next           = rbm_next,
2025         .show           = rbm_show,
2026         .stop           = rbm_stop,
2027 };
2028
2029 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu)
2030 {
2031         struct seq_file *m;
2032         int ret;
2033
2034         ret = seq_open(file, &rb_meta_seq_ops);
2035         if (ret)
2036                 return ret;
2037
2038         m = file->private_data;
2039         m->private = buffer->buffers[cpu];
2040
2041         return 0;
2042 }
2043
2044 /* Map the buffer_pages to the previous head and commit pages */
2045 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
2046                                   struct buffer_page *bpage)
2047 {
2048         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
2049
2050         if (meta->head_buffer == (unsigned long)bpage->page)
2051                 cpu_buffer->head_page = bpage;
2052
2053         if (meta->commit_buffer == (unsigned long)bpage->page) {
2054                 cpu_buffer->commit_page = bpage;
2055                 cpu_buffer->tail_page = bpage;
2056         }
2057 }
2058
2059 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
2060                 long nr_pages, struct list_head *pages)
2061 {
2062         struct trace_buffer *buffer = cpu_buffer->buffer;
2063         struct ring_buffer_meta *meta = NULL;
2064         struct buffer_page *bpage, *tmp;
2065         bool user_thread = current->mm != NULL;
2066         gfp_t mflags;
2067         long i;
2068
2069         /*
2070          * Check if the available memory is there first.
2071          * Note, si_mem_available() only gives us a rough estimate of available
2072          * memory. It may not be accurate. But we don't care, we just want
2073          * to prevent doing any allocation when it is obvious that it is
2074          * not going to succeed.
2075          */
2076         i = si_mem_available();
2077         if (i < nr_pages)
2078                 return -ENOMEM;
2079
2080         /*
2081          * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
2082          * gracefully without invoking oom-killer and the system is not
2083          * destabilized.
2084          */
2085         mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
2086
2087         /*
2088          * If a user thread allocates too much, and si_mem_available()
2089          * reports there's enough memory, even though there is not.
2090          * Make sure the OOM killer kills this thread. This can happen
2091          * even with RETRY_MAYFAIL because another task may be doing
2092          * an allocation after this task has taken all memory.
2093          * This is the task the OOM killer needs to take out during this
2094          * loop, even if it was triggered by an allocation somewhere else.
2095          */
2096         if (user_thread)
2097                 set_current_oom_origin();
2098
2099         if (buffer->range_addr_start)
2100                 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
2101
2102         for (i = 0; i < nr_pages; i++) {
2103                 struct page *page;
2104
2105                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2106                                     mflags, cpu_to_node(cpu_buffer->cpu));
2107                 if (!bpage)
2108                         goto free_pages;
2109
2110                 rb_check_bpage(cpu_buffer, bpage);
2111
2112                 /*
2113                  * Append the pages as for mapped buffers we want to keep
2114                  * the order
2115                  */
2116                 list_add_tail(&bpage->list, pages);
2117
2118                 if (meta) {
2119                         /* A range was given. Use that for the buffer page */
2120                         bpage->page = rb_range_buffer(cpu_buffer, i + 1);
2121                         if (!bpage->page)
2122                                 goto free_pages;
2123                         /* If this is valid from a previous boot */
2124                         if (meta->head_buffer)
2125                                 rb_meta_buffer_update(cpu_buffer, bpage);
2126                         bpage->range = 1;
2127                         bpage->id = i + 1;
2128                 } else {
2129                         page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
2130                                                 mflags | __GFP_COMP | __GFP_ZERO,
2131                                                 cpu_buffer->buffer->subbuf_order);
2132                         if (!page)
2133                                 goto free_pages;
2134                         bpage->page = page_address(page);
2135                         rb_init_page(bpage->page);
2136                 }
2137                 bpage->order = cpu_buffer->buffer->subbuf_order;
2138
2139                 if (user_thread && fatal_signal_pending(current))
2140                         goto free_pages;
2141         }
2142         if (user_thread)
2143                 clear_current_oom_origin();
2144
2145         return 0;
2146
2147 free_pages:
2148         list_for_each_entry_safe(bpage, tmp, pages, list) {
2149                 list_del_init(&bpage->list);
2150                 free_buffer_page(bpage);
2151         }
2152         if (user_thread)
2153                 clear_current_oom_origin();
2154
2155         return -ENOMEM;
2156 }
2157
2158 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
2159                              unsigned long nr_pages)
2160 {
2161         LIST_HEAD(pages);
2162
2163         WARN_ON(!nr_pages);
2164
2165         if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
2166                 return -ENOMEM;
2167
2168         /*
2169          * The ring buffer page list is a circular list that does not
2170          * start and end with a list head. All page list items point to
2171          * other pages.
2172          */
2173         cpu_buffer->pages = pages.next;
2174         list_del(&pages);
2175
2176         cpu_buffer->nr_pages = nr_pages;
2177
2178         rb_check_pages(cpu_buffer);
2179
2180         return 0;
2181 }
2182
2183 static struct ring_buffer_per_cpu *
2184 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
2185 {
2186         struct ring_buffer_per_cpu *cpu_buffer;
2187         struct ring_buffer_meta *meta;
2188         struct buffer_page *bpage;
2189         struct page *page;
2190         int ret;
2191
2192         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
2193                                   GFP_KERNEL, cpu_to_node(cpu));
2194         if (!cpu_buffer)
2195                 return NULL;
2196
2197         cpu_buffer->cpu = cpu;
2198         cpu_buffer->buffer = buffer;
2199         raw_spin_lock_init(&cpu_buffer->reader_lock);
2200         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
2201         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
2202         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
2203         init_completion(&cpu_buffer->update_done);
2204         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
2205         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
2206         init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
2207         mutex_init(&cpu_buffer->mapping_lock);
2208
2209         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2210                             GFP_KERNEL, cpu_to_node(cpu));
2211         if (!bpage)
2212                 goto fail_free_buffer;
2213
2214         rb_check_bpage(cpu_buffer, bpage);
2215
2216         cpu_buffer->reader_page = bpage;
2217
2218         if (buffer->range_addr_start) {
2219                 /*
2220                  * Range mapped buffers have the same restrictions as memory
2221                  * mapped ones do.
2222                  */
2223                 cpu_buffer->mapped = 1;
2224                 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu);
2225                 bpage->page = rb_range_buffer(cpu_buffer, 0);
2226                 if (!bpage->page)
2227                         goto fail_free_reader;
2228                 if (cpu_buffer->ring_meta->head_buffer)
2229                         rb_meta_buffer_update(cpu_buffer, bpage);
2230                 bpage->range = 1;
2231         } else {
2232                 page = alloc_pages_node(cpu_to_node(cpu),
2233                                         GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
2234                                         cpu_buffer->buffer->subbuf_order);
2235                 if (!page)
2236                         goto fail_free_reader;
2237                 bpage->page = page_address(page);
2238                 rb_init_page(bpage->page);
2239         }
2240
2241         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2242         INIT_LIST_HEAD(&cpu_buffer->new_pages);
2243
2244         ret = rb_allocate_pages(cpu_buffer, nr_pages);
2245         if (ret < 0)
2246                 goto fail_free_reader;
2247
2248         rb_meta_validate_events(cpu_buffer);
2249
2250         /* If the boot meta was valid then this has already been updated */
2251         meta = cpu_buffer->ring_meta;
2252         if (!meta || !meta->head_buffer ||
2253             !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) {
2254                 if (meta && meta->head_buffer &&
2255                     (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) {
2256                         pr_warn("Ring buffer meta buffers not all mapped\n");
2257                         if (!cpu_buffer->head_page)
2258                                 pr_warn("   Missing head_page\n");
2259                         if (!cpu_buffer->commit_page)
2260                                 pr_warn("   Missing commit_page\n");
2261                         if (!cpu_buffer->tail_page)
2262                                 pr_warn("   Missing tail_page\n");
2263                 }
2264
2265                 cpu_buffer->head_page
2266                         = list_entry(cpu_buffer->pages, struct buffer_page, list);
2267                 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
2268
2269                 rb_head_page_activate(cpu_buffer);
2270
2271                 if (cpu_buffer->ring_meta)
2272                         meta->commit_buffer = meta->head_buffer;
2273         } else {
2274                 /* The valid meta buffer still needs to activate the head page */
2275                 rb_head_page_activate(cpu_buffer);
2276         }
2277
2278         return cpu_buffer;
2279
2280  fail_free_reader:
2281         free_buffer_page(cpu_buffer->reader_page);
2282
2283  fail_free_buffer:
2284         kfree(cpu_buffer);
2285         return NULL;
2286 }
2287
2288 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
2289 {
2290         struct list_head *head = cpu_buffer->pages;
2291         struct buffer_page *bpage, *tmp;
2292
2293         irq_work_sync(&cpu_buffer->irq_work.work);
2294
2295         free_buffer_page(cpu_buffer->reader_page);
2296
2297         if (head) {
2298                 rb_head_page_deactivate(cpu_buffer);
2299
2300                 list_for_each_entry_safe(bpage, tmp, head, list) {
2301                         list_del_init(&bpage->list);
2302                         free_buffer_page(bpage);
2303                 }
2304                 bpage = list_entry(head, struct buffer_page, list);
2305                 free_buffer_page(bpage);
2306         }
2307
2308         free_page((unsigned long)cpu_buffer->free_page);
2309
2310         kfree(cpu_buffer);
2311 }
2312
2313 static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
2314                                          int order, unsigned long start,
2315                                          unsigned long end,
2316                                          struct lock_class_key *key)
2317 {
2318         struct trace_buffer *buffer;
2319         long nr_pages;
2320         int subbuf_size;
2321         int bsize;
2322         int cpu;
2323         int ret;
2324
2325         /* keep it in its own cache line */
2326         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
2327                          GFP_KERNEL);
2328         if (!buffer)
2329                 return NULL;
2330
2331         if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
2332                 goto fail_free_buffer;
2333
2334         buffer->subbuf_order = order;
2335         subbuf_size = (PAGE_SIZE << order);
2336         buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE;
2337
2338         /* Max payload is buffer page size - header (8bytes) */
2339         buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
2340
2341         buffer->flags = flags;
2342         buffer->clock = trace_clock_local;
2343         buffer->reader_lock_key = key;
2344
2345         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
2346         init_waitqueue_head(&buffer->irq_work.waiters);
2347
2348         buffer->cpus = nr_cpu_ids;
2349
2350         bsize = sizeof(void *) * nr_cpu_ids;
2351         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
2352                                   GFP_KERNEL);
2353         if (!buffer->buffers)
2354                 goto fail_free_cpumask;
2355
2356         /* If start/end are specified, then that overrides size */
2357         if (start && end) {
2358                 unsigned long ptr;
2359                 int n;
2360
2361                 size = end - start;
2362                 size = size / nr_cpu_ids;
2363
2364                 /*
2365                  * The number of sub-buffers (nr_pages) is determined by the
2366                  * total size allocated minus the meta data size.
2367                  * Then that is divided by the number of per CPU buffers
2368                  * needed, plus account for the integer array index that
2369                  * will be appended to the meta data.
2370                  */
2371                 nr_pages = (size - sizeof(struct ring_buffer_meta)) /
2372                         (subbuf_size + sizeof(int));
2373                 /* Need at least two pages plus the reader page */
2374                 if (nr_pages < 3)
2375                         goto fail_free_buffers;
2376
2377  again:
2378                 /* Make sure that the size fits aligned */
2379                 for (n = 0, ptr = start; n < nr_cpu_ids; n++) {
2380                         ptr += sizeof(struct ring_buffer_meta) +
2381                                 sizeof(int) * nr_pages;
2382                         ptr = ALIGN(ptr, subbuf_size);
2383                         ptr += subbuf_size * nr_pages;
2384                 }
2385                 if (ptr > end) {
2386                         if (nr_pages <= 3)
2387                                 goto fail_free_buffers;
2388                         nr_pages--;
2389                         goto again;
2390                 }
2391
2392                 /* nr_pages should not count the reader page */
2393                 nr_pages--;
2394                 buffer->range_addr_start = start;
2395                 buffer->range_addr_end = end;
2396
2397                 rb_range_meta_init(buffer, nr_pages);
2398         } else {
2399
2400                 /* need at least two pages */
2401                 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2402                 if (nr_pages < 2)
2403                         nr_pages = 2;
2404         }
2405
2406         cpu = raw_smp_processor_id();
2407         cpumask_set_cpu(cpu, buffer->cpumask);
2408         buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
2409         if (!buffer->buffers[cpu])
2410                 goto fail_free_buffers;
2411
2412         ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2413         if (ret < 0)
2414                 goto fail_free_buffers;
2415
2416         mutex_init(&buffer->mutex);
2417
2418         return buffer;
2419
2420  fail_free_buffers:
2421         for_each_buffer_cpu(buffer, cpu) {
2422                 if (buffer->buffers[cpu])
2423                         rb_free_cpu_buffer(buffer->buffers[cpu]);
2424         }
2425         kfree(buffer->buffers);
2426
2427  fail_free_cpumask:
2428         free_cpumask_var(buffer->cpumask);
2429
2430  fail_free_buffer:
2431         kfree(buffer);
2432         return NULL;
2433 }
2434
2435 /**
2436  * __ring_buffer_alloc - allocate a new ring_buffer
2437  * @size: the size in bytes per cpu that is needed.
2438  * @flags: attributes to set for the ring buffer.
2439  * @key: ring buffer reader_lock_key.
2440  *
2441  * Currently the only flag that is available is the RB_FL_OVERWRITE
2442  * flag. This flag means that the buffer will overwrite old data
2443  * when the buffer wraps. If this flag is not set, the buffer will
2444  * drop data when the tail hits the head.
2445  */
2446 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
2447                                         struct lock_class_key *key)
2448 {
2449         /* Default buffer page size - one system page */
2450         return alloc_buffer(size, flags, 0, 0, 0,key);
2451
2452 }
2453 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
2454
2455 /**
2456  * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
2457  * @size: the size in bytes per cpu that is needed.
2458  * @flags: attributes to set for the ring buffer.
2459  * @order: sub-buffer order
2460  * @start: start of allocated range
2461  * @range_size: size of allocated range
2462  * @key: ring buffer reader_lock_key.
2463  *
2464  * Currently the only flag that is available is the RB_FL_OVERWRITE
2465  * flag. This flag means that the buffer will overwrite old data
2466  * when the buffer wraps. If this flag is not set, the buffer will
2467  * drop data when the tail hits the head.
2468  */
2469 struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
2470                                                int order, unsigned long start,
2471                                                unsigned long range_size,
2472                                                struct lock_class_key *key)
2473 {
2474         return alloc_buffer(size, flags, order, start, start + range_size, key);
2475 }
2476
2477 /**
2478  * ring_buffer_last_boot_delta - return the delta offset from last boot
2479  * @buffer: The buffer to return the delta from
2480  * @text: Return text delta
2481  * @data: Return data delta
2482  *
2483  * Returns: The true if the delta is non zero
2484  */
2485 bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
2486                                  long *data)
2487 {
2488         if (!buffer)
2489                 return false;
2490
2491         if (!buffer->last_text_delta)
2492                 return false;
2493
2494         *text = buffer->last_text_delta;
2495         *data = buffer->last_data_delta;
2496
2497         return true;
2498 }
2499
2500 /**
2501  * ring_buffer_free - free a ring buffer.
2502  * @buffer: the buffer to free.
2503  */
2504 void
2505 ring_buffer_free(struct trace_buffer *buffer)
2506 {
2507         int cpu;
2508
2509         cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2510
2511         irq_work_sync(&buffer->irq_work.work);
2512
2513         for_each_buffer_cpu(buffer, cpu)
2514                 rb_free_cpu_buffer(buffer->buffers[cpu]);
2515
2516         kfree(buffer->buffers);
2517         free_cpumask_var(buffer->cpumask);
2518
2519         kfree(buffer);
2520 }
2521 EXPORT_SYMBOL_GPL(ring_buffer_free);
2522
2523 void ring_buffer_set_clock(struct trace_buffer *buffer,
2524                            u64 (*clock)(void))
2525 {
2526         buffer->clock = clock;
2527 }
2528
2529 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
2530 {
2531         buffer->time_stamp_abs = abs;
2532 }
2533
2534 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
2535 {
2536         return buffer->time_stamp_abs;
2537 }
2538
2539 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
2540 {
2541         return local_read(&bpage->entries) & RB_WRITE_MASK;
2542 }
2543
2544 static inline unsigned long rb_page_write(struct buffer_page *bpage)
2545 {
2546         return local_read(&bpage->write) & RB_WRITE_MASK;
2547 }
2548
2549 static bool
2550 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
2551 {
2552         struct list_head *tail_page, *to_remove, *next_page;
2553         struct buffer_page *to_remove_page, *tmp_iter_page;
2554         struct buffer_page *last_page, *first_page;
2555         unsigned long nr_removed;
2556         unsigned long head_bit;
2557         int page_entries;
2558
2559         head_bit = 0;
2560
2561         raw_spin_lock_irq(&cpu_buffer->reader_lock);
2562         atomic_inc(&cpu_buffer->record_disabled);
2563         /*
2564          * We don't race with the readers since we have acquired the reader
2565          * lock. We also don't race with writers after disabling recording.
2566          * This makes it easy to figure out the first and the last page to be
2567          * removed from the list. We unlink all the pages in between including
2568          * the first and last pages. This is done in a busy loop so that we
2569          * lose the least number of traces.
2570          * The pages are freed after we restart recording and unlock readers.
2571          */
2572         tail_page = &cpu_buffer->tail_page->list;
2573
2574         /*
2575          * tail page might be on reader page, we remove the next page
2576          * from the ring buffer
2577          */
2578         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2579                 tail_page = rb_list_head(tail_page->next);
2580         to_remove = tail_page;
2581
2582         /* start of pages to remove */
2583         first_page = list_entry(rb_list_head(to_remove->next),
2584                                 struct buffer_page, list);
2585
2586         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
2587                 to_remove = rb_list_head(to_remove)->next;
2588                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
2589         }
2590         /* Read iterators need to reset themselves when some pages removed */
2591         cpu_buffer->pages_removed += nr_removed;
2592
2593         next_page = rb_list_head(to_remove)->next;
2594
2595         /*
2596          * Now we remove all pages between tail_page and next_page.
2597          * Make sure that we have head_bit value preserved for the
2598          * next page
2599          */
2600         tail_page->next = (struct list_head *)((unsigned long)next_page |
2601                                                 head_bit);
2602         next_page = rb_list_head(next_page);
2603         next_page->prev = tail_page;
2604
2605         /* make sure pages points to a valid page in the ring buffer */
2606         cpu_buffer->pages = next_page;
2607         cpu_buffer->cnt++;
2608
2609         /* update head page */
2610         if (head_bit)
2611                 cpu_buffer->head_page = list_entry(next_page,
2612                                                 struct buffer_page, list);
2613
2614         /* pages are removed, resume tracing and then free the pages */
2615         atomic_dec(&cpu_buffer->record_disabled);
2616         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
2617
2618         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
2619
2620         /* last buffer page to remove */
2621         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
2622                                 list);
2623         tmp_iter_page = first_page;
2624
2625         do {
2626                 cond_resched();
2627
2628                 to_remove_page = tmp_iter_page;
2629                 rb_inc_page(&tmp_iter_page);
2630
2631                 /* update the counters */
2632                 page_entries = rb_page_entries(to_remove_page);
2633                 if (page_entries) {
2634                         /*
2635                          * If something was added to this page, it was full
2636                          * since it is not the tail page. So we deduct the
2637                          * bytes consumed in ring buffer from here.
2638                          * Increment overrun to account for the lost events.
2639                          */
2640                         local_add(page_entries, &cpu_buffer->overrun);
2641                         local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
2642                         local_inc(&cpu_buffer->pages_lost);
2643                 }
2644
2645                 /*
2646                  * We have already removed references to this list item, just
2647                  * free up the buffer_page and its page
2648                  */
2649                 free_buffer_page(to_remove_page);
2650                 nr_removed--;
2651
2652         } while (to_remove_page != last_page);
2653
2654         RB_WARN_ON(cpu_buffer, nr_removed);
2655
2656         return nr_removed == 0;
2657 }
2658
2659 static bool
2660 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
2661 {
2662         struct list_head *pages = &cpu_buffer->new_pages;
2663         unsigned long flags;
2664         bool success;
2665         int retries;
2666
2667         /* Can be called at early boot up, where interrupts must not been enabled */
2668         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2669         /*
2670          * We are holding the reader lock, so the reader page won't be swapped
2671          * in the ring buffer. Now we are racing with the writer trying to
2672          * move head page and the tail page.
2673          * We are going to adapt the reader page update process where:
2674          * 1. We first splice the start and end of list of new pages between
2675          *    the head page and its previous page.
2676          * 2. We cmpxchg the prev_page->next to point from head page to the
2677          *    start of new pages list.
2678          * 3. Finally, we update the head->prev to the end of new list.
2679          *
2680          * We will try this process 10 times, to make sure that we don't keep
2681          * spinning.
2682          */
2683         retries = 10;
2684         success = false;
2685         while (retries--) {
2686                 struct list_head *head_page, *prev_page;
2687                 struct list_head *last_page, *first_page;
2688                 struct list_head *head_page_with_bit;
2689                 struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
2690
2691                 if (!hpage)
2692                         break;
2693                 head_page = &hpage->list;
2694                 prev_page = head_page->prev;
2695
2696                 first_page = pages->next;
2697                 last_page  = pages->prev;
2698
2699                 head_page_with_bit = (struct list_head *)
2700                                      ((unsigned long)head_page | RB_PAGE_HEAD);
2701
2702                 last_page->next = head_page_with_bit;
2703                 first_page->prev = prev_page;
2704
2705                 /* caution: head_page_with_bit gets updated on cmpxchg failure */
2706                 if (try_cmpxchg(&prev_page->next,
2707                                 &head_page_with_bit, first_page)) {
2708                         /*
2709                          * yay, we replaced the page pointer to our new list,
2710                          * now, we just have to update to head page's prev
2711                          * pointer to point to end of list
2712                          */
2713                         head_page->prev = last_page;
2714                         cpu_buffer->cnt++;
2715                         success = true;
2716                         break;
2717                 }
2718         }
2719
2720         if (success)
2721                 INIT_LIST_HEAD(pages);
2722         /*
2723          * If we weren't successful in adding in new pages, warn and stop
2724          * tracing
2725          */
2726         RB_WARN_ON(cpu_buffer, !success);
2727         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2728
2729         /* free pages if they weren't inserted */
2730         if (!success) {
2731                 struct buffer_page *bpage, *tmp;
2732                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2733                                          list) {
2734                         list_del_init(&bpage->list);
2735                         free_buffer_page(bpage);
2736                 }
2737         }
2738         return success;
2739 }
2740
2741 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2742 {
2743         bool success;
2744
2745         if (cpu_buffer->nr_pages_to_update > 0)
2746                 success = rb_insert_pages(cpu_buffer);
2747         else
2748                 success = rb_remove_pages(cpu_buffer,
2749                                         -cpu_buffer->nr_pages_to_update);
2750
2751         if (success)
2752                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2753 }
2754
2755 static void update_pages_handler(struct work_struct *work)
2756 {
2757         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2758                         struct ring_buffer_per_cpu, update_pages_work);
2759         rb_update_pages(cpu_buffer);
2760         complete(&cpu_buffer->update_done);
2761 }
2762
2763 /**
2764  * ring_buffer_resize - resize the ring buffer
2765  * @buffer: the buffer to resize.
2766  * @size: the new size.
2767  * @cpu_id: the cpu buffer to resize
2768  *
2769  * Minimum size is 2 * buffer->subbuf_size.
2770  *
2771  * Returns 0 on success and < 0 on failure.
2772  */
2773 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2774                         int cpu_id)
2775 {
2776         struct ring_buffer_per_cpu *cpu_buffer;
2777         unsigned long nr_pages;
2778         int cpu, err;
2779
2780         /*
2781          * Always succeed at resizing a non-existent buffer:
2782          */
2783         if (!buffer)
2784                 return 0;
2785
2786         /* Make sure the requested buffer exists */
2787         if (cpu_id != RING_BUFFER_ALL_CPUS &&
2788             !cpumask_test_cpu(cpu_id, buffer->cpumask))
2789                 return 0;
2790
2791         nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2792
2793         /* we need a minimum of two pages */
2794         if (nr_pages < 2)
2795                 nr_pages = 2;
2796
2797         /* prevent another thread from changing buffer sizes */
2798         mutex_lock(&buffer->mutex);
2799         atomic_inc(&buffer->resizing);
2800
2801         if (cpu_id == RING_BUFFER_ALL_CPUS) {
2802                 /*
2803                  * Don't succeed if resizing is disabled, as a reader might be
2804                  * manipulating the ring buffer and is expecting a sane state while
2805                  * this is true.
2806                  */
2807                 for_each_buffer_cpu(buffer, cpu) {
2808                         cpu_buffer = buffer->buffers[cpu];
2809                         if (atomic_read(&cpu_buffer->resize_disabled)) {
2810                                 err = -EBUSY;
2811                                 goto out_err_unlock;
2812                         }
2813                 }
2814
2815                 /* calculate the pages to update */
2816                 for_each_buffer_cpu(buffer, cpu) {
2817                         cpu_buffer = buffer->buffers[cpu];
2818
2819                         cpu_buffer->nr_pages_to_update = nr_pages -
2820                                                         cpu_buffer->nr_pages;
2821                         /*
2822                          * nothing more to do for removing pages or no update
2823                          */
2824                         if (cpu_buffer->nr_pages_to_update <= 0)
2825                                 continue;
2826                         /*
2827                          * to add pages, make sure all new pages can be
2828                          * allocated without receiving ENOMEM
2829                          */
2830                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
2831                         if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2832                                                 &cpu_buffer->new_pages)) {
2833                                 /* not enough memory for new pages */
2834                                 err = -ENOMEM;
2835                                 goto out_err;
2836                         }
2837
2838                         cond_resched();
2839                 }
2840
2841                 cpus_read_lock();
2842                 /*
2843                  * Fire off all the required work handlers
2844                  * We can't schedule on offline CPUs, but it's not necessary
2845                  * since we can change their buffer sizes without any race.
2846                  */
2847                 for_each_buffer_cpu(buffer, cpu) {
2848                         cpu_buffer = buffer->buffers[cpu];
2849                         if (!cpu_buffer->nr_pages_to_update)
2850                                 continue;
2851
2852                         /* Can't run something on an offline CPU. */
2853                         if (!cpu_online(cpu)) {
2854                                 rb_update_pages(cpu_buffer);
2855                                 cpu_buffer->nr_pages_to_update = 0;
2856                         } else {
2857                                 /* Run directly if possible. */
2858                                 migrate_disable();
2859                                 if (cpu != smp_processor_id()) {
2860                                         migrate_enable();
2861                                         schedule_work_on(cpu,
2862                                                          &cpu_buffer->update_pages_work);
2863                                 } else {
2864                                         update_pages_handler(&cpu_buffer->update_pages_work);
2865                                         migrate_enable();
2866                                 }
2867                         }
2868                 }
2869
2870                 /* wait for all the updates to complete */
2871                 for_each_buffer_cpu(buffer, cpu) {
2872                         cpu_buffer = buffer->buffers[cpu];
2873                         if (!cpu_buffer->nr_pages_to_update)
2874                                 continue;
2875
2876                         if (cpu_online(cpu))
2877                                 wait_for_completion(&cpu_buffer->update_done);
2878                         cpu_buffer->nr_pages_to_update = 0;
2879                 }
2880
2881                 cpus_read_unlock();
2882         } else {
2883                 cpu_buffer = buffer->buffers[cpu_id];
2884
2885                 if (nr_pages == cpu_buffer->nr_pages)
2886                         goto out;
2887
2888                 /*
2889                  * Don't succeed if resizing is disabled, as a reader might be
2890                  * manipulating the ring buffer and is expecting a sane state while
2891                  * this is true.
2892                  */
2893                 if (atomic_read(&cpu_buffer->resize_disabled)) {
2894                         err = -EBUSY;
2895                         goto out_err_unlock;
2896                 }
2897
2898                 cpu_buffer->nr_pages_to_update = nr_pages -
2899                                                 cpu_buffer->nr_pages;
2900
2901                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2902                 if (cpu_buffer->nr_pages_to_update > 0 &&
2903                         __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2904                                             &cpu_buffer->new_pages)) {
2905                         err = -ENOMEM;
2906                         goto out_err;
2907                 }
2908
2909                 cpus_read_lock();
2910
2911                 /* Can't run something on an offline CPU. */
2912                 if (!cpu_online(cpu_id))
2913                         rb_update_pages(cpu_buffer);
2914                 else {
2915                         /* Run directly if possible. */
2916                         migrate_disable();
2917                         if (cpu_id == smp_processor_id()) {
2918                                 rb_update_pages(cpu_buffer);
2919                                 migrate_enable();
2920                         } else {
2921                                 migrate_enable();
2922                                 schedule_work_on(cpu_id,
2923                                                  &cpu_buffer->update_pages_work);
2924                                 wait_for_completion(&cpu_buffer->update_done);
2925                         }
2926                 }
2927
2928                 cpu_buffer->nr_pages_to_update = 0;
2929                 cpus_read_unlock();
2930         }
2931
2932  out:
2933         /*
2934          * The ring buffer resize can happen with the ring buffer
2935          * enabled, so that the update disturbs the tracing as little
2936          * as possible. But if the buffer is disabled, we do not need
2937          * to worry about that, and we can take the time to verify
2938          * that the buffer is not corrupt.
2939          */
2940         if (atomic_read(&buffer->record_disabled)) {
2941                 atomic_inc(&buffer->record_disabled);
2942                 /*
2943                  * Even though the buffer was disabled, we must make sure
2944                  * that it is truly disabled before calling rb_check_pages.
2945                  * There could have been a race between checking
2946                  * record_disable and incrementing it.
2947                  */
2948                 synchronize_rcu();
2949                 for_each_buffer_cpu(buffer, cpu) {
2950                         cpu_buffer = buffer->buffers[cpu];
2951                         rb_check_pages(cpu_buffer);
2952                 }
2953                 atomic_dec(&buffer->record_disabled);
2954         }
2955
2956         atomic_dec(&buffer->resizing);
2957         mutex_unlock(&buffer->mutex);
2958         return 0;
2959
2960  out_err:
2961         for_each_buffer_cpu(buffer, cpu) {
2962                 struct buffer_page *bpage, *tmp;
2963
2964                 cpu_buffer = buffer->buffers[cpu];
2965                 cpu_buffer->nr_pages_to_update = 0;
2966
2967                 if (list_empty(&cpu_buffer->new_pages))
2968                         continue;
2969
2970                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2971                                         list) {
2972                         list_del_init(&bpage->list);
2973                         free_buffer_page(bpage);
2974                 }
2975         }
2976  out_err_unlock:
2977         atomic_dec(&buffer->resizing);
2978         mutex_unlock(&buffer->mutex);
2979         return err;
2980 }
2981 EXPORT_SYMBOL_GPL(ring_buffer_resize);
2982
2983 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2984 {
2985         mutex_lock(&buffer->mutex);
2986         if (val)
2987                 buffer->flags |= RB_FL_OVERWRITE;
2988         else
2989                 buffer->flags &= ~RB_FL_OVERWRITE;
2990         mutex_unlock(&buffer->mutex);
2991 }
2992 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2993
2994 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2995 {
2996         return bpage->page->data + index;
2997 }
2998
2999 static __always_inline struct ring_buffer_event *
3000 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
3001 {
3002         return __rb_page_index(cpu_buffer->reader_page,
3003                                cpu_buffer->reader_page->read);
3004 }
3005
3006 static struct ring_buffer_event *
3007 rb_iter_head_event(struct ring_buffer_iter *iter)
3008 {
3009         struct ring_buffer_event *event;
3010         struct buffer_page *iter_head_page = iter->head_page;
3011         unsigned long commit;
3012         unsigned length;
3013
3014         if (iter->head != iter->next_event)
3015                 return iter->event;
3016
3017         /*
3018          * When the writer goes across pages, it issues a cmpxchg which
3019          * is a mb(), which will synchronize with the rmb here.
3020          * (see rb_tail_page_update() and __rb_reserve_next())
3021          */
3022         commit = rb_page_commit(iter_head_page);
3023         smp_rmb();
3024
3025         /* An event needs to be at least 8 bytes in size */
3026         if (iter->head > commit - 8)
3027                 goto reset;
3028
3029         event = __rb_page_index(iter_head_page, iter->head);
3030         length = rb_event_length(event);
3031
3032         /*
3033          * READ_ONCE() doesn't work on functions and we don't want the
3034          * compiler doing any crazy optimizations with length.
3035          */
3036         barrier();
3037
3038         if ((iter->head + length) > commit || length > iter->event_size)
3039                 /* Writer corrupted the read? */
3040                 goto reset;
3041
3042         memcpy(iter->event, event, length);
3043         /*
3044          * If the page stamp is still the same after this rmb() then the
3045          * event was safely copied without the writer entering the page.
3046          */
3047         smp_rmb();
3048
3049         /* Make sure the page didn't change since we read this */
3050         if (iter->page_stamp != iter_head_page->page->time_stamp ||
3051             commit > rb_page_commit(iter_head_page))
3052                 goto reset;
3053
3054         iter->next_event = iter->head + length;
3055         return iter->event;
3056  reset:
3057         /* Reset to the beginning */
3058         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
3059         iter->head = 0;
3060         iter->next_event = 0;
3061         iter->missed_events = 1;
3062         return NULL;
3063 }
3064
3065 /* Size is determined by what has been committed */
3066 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
3067 {
3068         return rb_page_commit(bpage) & ~RB_MISSED_MASK;
3069 }
3070
3071 static __always_inline unsigned
3072 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
3073 {
3074         return rb_page_commit(cpu_buffer->commit_page);
3075 }
3076
3077 static __always_inline unsigned
3078 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
3079 {
3080         unsigned long addr = (unsigned long)event;
3081
3082         addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
3083
3084         return addr - BUF_PAGE_HDR_SIZE;
3085 }
3086
3087 static void rb_inc_iter(struct ring_buffer_iter *iter)
3088 {
3089         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3090
3091         /*
3092          * The iterator could be on the reader page (it starts there).
3093          * But the head could have moved, since the reader was
3094          * found. Check for this case and assign the iterator
3095          * to the head page instead of next.
3096          */
3097         if (iter->head_page == cpu_buffer->reader_page)
3098                 iter->head_page = rb_set_head_page(cpu_buffer);
3099         else
3100                 rb_inc_page(&iter->head_page);
3101
3102         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
3103         iter->head = 0;
3104         iter->next_event = 0;
3105 }
3106
3107 /* Return the index into the sub-buffers for a given sub-buffer */
3108 static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf)
3109 {
3110         void *subbuf_array;
3111
3112         subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs;
3113         subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size);
3114         return (subbuf - subbuf_array) / meta->subbuf_size;
3115 }
3116
3117 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer,
3118                                 struct buffer_page *next_page)
3119 {
3120         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
3121         unsigned long old_head = (unsigned long)next_page->page;
3122         unsigned long new_head;
3123
3124         rb_inc_page(&next_page);
3125         new_head = (unsigned long)next_page->page;
3126
3127         /*
3128          * Only move it forward once, if something else came in and
3129          * moved it forward, then we don't want to touch it.
3130          */
3131         (void)cmpxchg(&meta->head_buffer, old_head, new_head);
3132 }
3133
3134 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer,
3135                                   struct buffer_page *reader)
3136 {
3137         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
3138         void *old_reader = cpu_buffer->reader_page->page;
3139         void *new_reader = reader->page;
3140         int id;
3141
3142         id = reader->id;
3143         cpu_buffer->reader_page->id = id;
3144         reader->id = 0;
3145
3146         meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader);
3147         meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader);
3148
3149         /* The head pointer is the one after the reader */
3150         rb_update_meta_head(cpu_buffer, reader);
3151 }
3152
3153 /*
3154  * rb_handle_head_page - writer hit the head page
3155  *
3156  * Returns: +1 to retry page
3157  *           0 to continue
3158  *          -1 on error
3159  */
3160 static int
3161 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
3162                     struct buffer_page *tail_page,
3163                     struct buffer_page *next_page)
3164 {
3165         struct buffer_page *new_head;
3166         int entries;
3167         int type;
3168         int ret;
3169
3170         entries = rb_page_entries(next_page);
3171
3172         /*
3173          * The hard part is here. We need to move the head
3174          * forward, and protect against both readers on
3175          * other CPUs and writers coming in via interrupts.
3176          */
3177         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
3178                                        RB_PAGE_HEAD);
3179
3180         /*
3181          * type can be one of four:
3182          *  NORMAL - an interrupt already moved it for us
3183          *  HEAD   - we are the first to get here.
3184          *  UPDATE - we are the interrupt interrupting
3185          *           a current move.
3186          *  MOVED  - a reader on another CPU moved the next
3187          *           pointer to its reader page. Give up
3188          *           and try again.
3189          */
3190
3191         switch (type) {
3192         case RB_PAGE_HEAD:
3193                 /*
3194                  * We changed the head to UPDATE, thus
3195                  * it is our responsibility to update
3196                  * the counters.
3197                  */
3198                 local_add(entries, &cpu_buffer->overrun);
3199                 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
3200                 local_inc(&cpu_buffer->pages_lost);
3201
3202                 if (cpu_buffer->ring_meta)
3203                         rb_update_meta_head(cpu_buffer, next_page);
3204                 /*
3205                  * The entries will be zeroed out when we move the
3206                  * tail page.
3207                  */
3208
3209                 /* still more to do */
3210                 break;
3211
3212         case RB_PAGE_UPDATE:
3213                 /*
3214                  * This is an interrupt that interrupt the
3215                  * previous update. Still more to do.
3216                  */
3217                 break;
3218         case RB_PAGE_NORMAL:
3219                 /*
3220                  * An interrupt came in before the update
3221                  * and processed this for us.
3222                  * Nothing left to do.
3223                  */
3224                 return 1;
3225         case RB_PAGE_MOVED:
3226                 /*
3227                  * The reader is on another CPU and just did
3228                  * a swap with our next_page.
3229                  * Try again.
3230                  */
3231                 return 1;
3232         default:
3233                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
3234                 return -1;
3235         }
3236
3237         /*
3238          * Now that we are here, the old head pointer is
3239          * set to UPDATE. This will keep the reader from
3240          * swapping the head page with the reader page.
3241          * The reader (on another CPU) will spin till
3242          * we are finished.
3243          *
3244          * We just need to protect against interrupts
3245          * doing the job. We will set the next pointer
3246          * to HEAD. After that, we set the old pointer
3247          * to NORMAL, but only if it was HEAD before.
3248          * otherwise we are an interrupt, and only
3249          * want the outer most commit to reset it.
3250          */
3251         new_head = next_page;
3252         rb_inc_page(&new_head);
3253
3254         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
3255                                     RB_PAGE_NORMAL);
3256
3257         /*
3258          * Valid returns are:
3259          *  HEAD   - an interrupt came in and already set it.
3260          *  NORMAL - One of two things:
3261          *            1) We really set it.
3262          *            2) A bunch of interrupts came in and moved
3263          *               the page forward again.
3264          */
3265         switch (ret) {
3266         case RB_PAGE_HEAD:
3267         case RB_PAGE_NORMAL:
3268                 /* OK */
3269                 break;
3270         default:
3271                 RB_WARN_ON(cpu_buffer, 1);
3272                 return -1;
3273         }
3274
3275         /*
3276          * It is possible that an interrupt came in,
3277          * set the head up, then more interrupts came in
3278          * and moved it again. When we get back here,
3279          * the page would have been set to NORMAL but we
3280          * just set it back to HEAD.
3281          *
3282          * How do you detect this? Well, if that happened
3283          * the tail page would have moved.
3284          */
3285         if (ret == RB_PAGE_NORMAL) {
3286                 struct buffer_page *buffer_tail_page;
3287
3288                 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
3289                 /*
3290                  * If the tail had moved passed next, then we need
3291                  * to reset the pointer.
3292                  */
3293                 if (buffer_tail_page != tail_page &&
3294                     buffer_tail_page != next_page)
3295                         rb_head_page_set_normal(cpu_buffer, new_head,
3296                                                 next_page,
3297                                                 RB_PAGE_HEAD);
3298         }
3299
3300         /*
3301          * If this was the outer most commit (the one that
3302          * changed the original pointer from HEAD to UPDATE),
3303          * then it is up to us to reset it to NORMAL.
3304          */
3305         if (type == RB_PAGE_HEAD) {
3306                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
3307                                               tail_page,
3308                                               RB_PAGE_UPDATE);
3309                 if (RB_WARN_ON(cpu_buffer,
3310                                ret != RB_PAGE_UPDATE))
3311                         return -1;
3312         }
3313
3314         return 0;
3315 }
3316
3317 static inline void
3318 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
3319               unsigned long tail, struct rb_event_info *info)
3320 {
3321         unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
3322         struct buffer_page *tail_page = info->tail_page;
3323         struct ring_buffer_event *event;
3324         unsigned long length = info->length;
3325
3326         /*
3327          * Only the event that crossed the page boundary
3328          * must fill the old tail_page with padding.
3329          */
3330         if (tail >= bsize) {
3331                 /*
3332                  * If the page was filled, then we still need
3333                  * to update the real_end. Reset it to zero
3334                  * and the reader will ignore it.
3335                  */
3336                 if (tail == bsize)
3337                         tail_page->real_end = 0;
3338
3339                 local_sub(length, &tail_page->write);
3340                 return;
3341         }
3342
3343         event = __rb_page_index(tail_page, tail);
3344
3345         /*
3346          * Save the original length to the meta data.
3347          * This will be used by the reader to add lost event
3348          * counter.
3349          */
3350         tail_page->real_end = tail;
3351
3352         /*
3353          * If this event is bigger than the minimum size, then
3354          * we need to be careful that we don't subtract the
3355          * write counter enough to allow another writer to slip
3356          * in on this page.
3357          * We put in a discarded commit instead, to make sure
3358          * that this space is not used again, and this space will
3359          * not be accounted into 'entries_bytes'.
3360          *
3361          * If we are less than the minimum size, we don't need to
3362          * worry about it.
3363          */
3364         if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
3365                 /* No room for any events */
3366
3367                 /* Mark the rest of the page with padding */
3368                 rb_event_set_padding(event);
3369
3370                 /* Make sure the padding is visible before the write update */
3371                 smp_wmb();
3372
3373                 /* Set the write back to the previous setting */
3374                 local_sub(length, &tail_page->write);
3375                 return;
3376         }
3377
3378         /* Put in a discarded event */
3379         event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
3380         event->type_len = RINGBUF_TYPE_PADDING;
3381         /* time delta must be non zero */
3382         event->time_delta = 1;
3383
3384         /* account for padding bytes */
3385         local_add(bsize - tail, &cpu_buffer->entries_bytes);
3386
3387         /* Make sure the padding is visible before the tail_page->write update */
3388         smp_wmb();
3389
3390         /* Set write to end of buffer */
3391         length = (tail + length) - bsize;
3392         local_sub(length, &tail_page->write);
3393 }
3394
3395 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3396
3397 /*
3398  * This is the slow path, force gcc not to inline it.
3399  */
3400 static noinline struct ring_buffer_event *
3401 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
3402              unsigned long tail, struct rb_event_info *info)
3403 {
3404         struct buffer_page *tail_page = info->tail_page;
3405         struct buffer_page *commit_page = cpu_buffer->commit_page;
3406         struct trace_buffer *buffer = cpu_buffer->buffer;
3407         struct buffer_page *next_page;
3408         int ret;
3409
3410         next_page = tail_page;
3411
3412         rb_inc_page(&next_page);
3413
3414         /*
3415          * If for some reason, we had an interrupt storm that made
3416          * it all the way around the buffer, bail, and warn
3417          * about it.
3418          */
3419         if (unlikely(next_page == commit_page)) {
3420                 local_inc(&cpu_buffer->commit_overrun);
3421                 goto out_reset;
3422         }
3423
3424         /*
3425          * This is where the fun begins!
3426          *
3427          * We are fighting against races between a reader that
3428          * could be on another CPU trying to swap its reader
3429          * page with the buffer head.
3430          *
3431          * We are also fighting against interrupts coming in and
3432          * moving the head or tail on us as well.
3433          *
3434          * If the next page is the head page then we have filled
3435          * the buffer, unless the commit page is still on the
3436          * reader page.
3437          */
3438         if (rb_is_head_page(next_page, &tail_page->list)) {
3439
3440                 /*
3441                  * If the commit is not on the reader page, then
3442                  * move the header page.
3443                  */
3444                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
3445                         /*
3446                          * If we are not in overwrite mode,
3447                          * this is easy, just stop here.
3448                          */
3449                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
3450                                 local_inc(&cpu_buffer->dropped_events);
3451                                 goto out_reset;
3452                         }
3453
3454                         ret = rb_handle_head_page(cpu_buffer,
3455                                                   tail_page,
3456                                                   next_page);
3457                         if (ret < 0)
3458                                 goto out_reset;
3459                         if (ret)
3460                                 goto out_again;
3461                 } else {
3462                         /*
3463                          * We need to be careful here too. The
3464                          * commit page could still be on the reader
3465                          * page. We could have a small buffer, and
3466                          * have filled up the buffer with events
3467                          * from interrupts and such, and wrapped.
3468                          *
3469                          * Note, if the tail page is also on the
3470                          * reader_page, we let it move out.
3471                          */
3472                         if (unlikely((cpu_buffer->commit_page !=
3473                                       cpu_buffer->tail_page) &&
3474                                      (cpu_buffer->commit_page ==
3475                                       cpu_buffer->reader_page))) {
3476                                 local_inc(&cpu_buffer->commit_overrun);
3477                                 goto out_reset;
3478                         }
3479                 }
3480         }
3481
3482         rb_tail_page_update(cpu_buffer, tail_page, next_page);
3483
3484  out_again:
3485
3486         rb_reset_tail(cpu_buffer, tail, info);
3487
3488         /* Commit what we have for now. */
3489         rb_end_commit(cpu_buffer);
3490         /* rb_end_commit() decs committing */
3491         local_inc(&cpu_buffer->committing);
3492
3493         /* fail and let the caller try again */
3494         return ERR_PTR(-EAGAIN);
3495
3496  out_reset:
3497         /* reset write */
3498         rb_reset_tail(cpu_buffer, tail, info);
3499
3500         return NULL;
3501 }
3502
3503 /* Slow path */
3504 static struct ring_buffer_event *
3505 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3506                   struct ring_buffer_event *event, u64 delta, bool abs)
3507 {
3508         if (abs)
3509                 event->type_len = RINGBUF_TYPE_TIME_STAMP;
3510         else
3511                 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
3512
3513         /* Not the first event on the page, or not delta? */
3514         if (abs || rb_event_index(cpu_buffer, event)) {
3515                 event->time_delta = delta & TS_MASK;
3516                 event->array[0] = delta >> TS_SHIFT;
3517         } else {
3518                 /* nope, just zero it */
3519                 event->time_delta = 0;
3520                 event->array[0] = 0;
3521         }
3522
3523         return skip_time_extend(event);
3524 }
3525
3526 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
3527 static inline bool sched_clock_stable(void)
3528 {
3529         return true;
3530 }
3531 #endif
3532
3533 static void
3534 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
3535                    struct rb_event_info *info)
3536 {
3537         u64 write_stamp;
3538
3539         WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
3540                   (unsigned long long)info->delta,
3541                   (unsigned long long)info->ts,
3542                   (unsigned long long)info->before,
3543                   (unsigned long long)info->after,
3544                   (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
3545                   sched_clock_stable() ? "" :
3546                   "If you just came from a suspend/resume,\n"
3547                   "please switch to the trace global clock:\n"
3548                   "  echo global > /sys/kernel/tracing/trace_clock\n"
3549                   "or add trace_clock=global to the kernel command line\n");
3550 }
3551
3552 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
3553                                       struct ring_buffer_event **event,
3554                                       struct rb_event_info *info,
3555                                       u64 *delta,
3556                                       unsigned int *length)
3557 {
3558         bool abs = info->add_timestamp &
3559                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
3560
3561         if (unlikely(info->delta > (1ULL << 59))) {
3562                 /*
3563                  * Some timers can use more than 59 bits, and when a timestamp
3564                  * is added to the buffer, it will lose those bits.
3565                  */
3566                 if (abs && (info->ts & TS_MSB)) {
3567                         info->delta &= ABS_TS_MASK;
3568
3569                 /* did the clock go backwards */
3570                 } else if (info->before == info->after && info->before > info->ts) {
3571                         /* not interrupted */
3572                         static int once;
3573
3574                         /*
3575                          * This is possible with a recalibrating of the TSC.
3576                          * Do not produce a call stack, but just report it.
3577                          */
3578                         if (!once) {
3579                                 once++;
3580                                 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
3581                                         info->before, info->ts);
3582                         }
3583                 } else
3584                         rb_check_timestamp(cpu_buffer, info);
3585                 if (!abs)
3586                         info->delta = 0;
3587         }
3588         *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
3589         *length -= RB_LEN_TIME_EXTEND;
3590         *delta = 0;
3591 }
3592
3593 /**
3594  * rb_update_event - update event type and data
3595  * @cpu_buffer: The per cpu buffer of the @event
3596  * @event: the event to update
3597  * @info: The info to update the @event with (contains length and delta)
3598  *
3599  * Update the type and data fields of the @event. The length
3600  * is the actual size that is written to the ring buffer,
3601  * and with this, we can determine what to place into the
3602  * data field.
3603  */
3604 static void
3605 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
3606                 struct ring_buffer_event *event,
3607                 struct rb_event_info *info)
3608 {
3609         unsigned length = info->length;
3610         u64 delta = info->delta;
3611         unsigned int nest = local_read(&cpu_buffer->committing) - 1;
3612
3613         if (!WARN_ON_ONCE(nest >= MAX_NEST))
3614                 cpu_buffer->event_stamp[nest] = info->ts;
3615
3616         /*
3617          * If we need to add a timestamp, then we
3618          * add it to the start of the reserved space.
3619          */
3620         if (unlikely(info->add_timestamp))
3621                 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
3622
3623         event->time_delta = delta;
3624         length -= RB_EVNT_HDR_SIZE;
3625         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
3626                 event->type_len = 0;
3627                 event->array[0] = length;
3628         } else
3629                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
3630 }
3631
3632 static unsigned rb_calculate_event_length(unsigned length)
3633 {
3634         struct ring_buffer_event event; /* Used only for sizeof array */
3635
3636         /* zero length can cause confusions */
3637         if (!length)
3638                 length++;
3639
3640         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
3641                 length += sizeof(event.array[0]);
3642
3643         length += RB_EVNT_HDR_SIZE;
3644         length = ALIGN(length, RB_ARCH_ALIGNMENT);
3645
3646         /*
3647          * In case the time delta is larger than the 27 bits for it
3648          * in the header, we need to add a timestamp. If another
3649          * event comes in when trying to discard this one to increase
3650          * the length, then the timestamp will be added in the allocated
3651          * space of this event. If length is bigger than the size needed
3652          * for the TIME_EXTEND, then padding has to be used. The events
3653          * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
3654          * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
3655          * As length is a multiple of 4, we only need to worry if it
3656          * is 12 (RB_LEN_TIME_EXTEND + 4).
3657          */
3658         if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
3659                 length += RB_ALIGNMENT;
3660
3661         return length;
3662 }
3663
3664 static inline bool
3665 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
3666                   struct ring_buffer_event *event)
3667 {
3668         unsigned long new_index, old_index;
3669         struct buffer_page *bpage;
3670         unsigned long addr;
3671
3672         new_index = rb_event_index(cpu_buffer, event);
3673         old_index = new_index + rb_event_ts_length(event);
3674         addr = (unsigned long)event;
3675         addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3676
3677         bpage = READ_ONCE(cpu_buffer->tail_page);
3678
3679         /*
3680          * Make sure the tail_page is still the same and
3681          * the next write location is the end of this event
3682          */
3683         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
3684                 unsigned long write_mask =
3685                         local_read(&bpage->write) & ~RB_WRITE_MASK;
3686                 unsigned long event_length = rb_event_length(event);
3687
3688                 /*
3689                  * For the before_stamp to be different than the write_stamp
3690                  * to make sure that the next event adds an absolute
3691                  * value and does not rely on the saved write stamp, which
3692                  * is now going to be bogus.
3693                  *
3694                  * By setting the before_stamp to zero, the next event
3695                  * is not going to use the write_stamp and will instead
3696                  * create an absolute timestamp. This means there's no
3697                  * reason to update the wirte_stamp!
3698                  */
3699                 rb_time_set(&cpu_buffer->before_stamp, 0);
3700
3701                 /*
3702                  * If an event were to come in now, it would see that the
3703                  * write_stamp and the before_stamp are different, and assume
3704                  * that this event just added itself before updating
3705                  * the write stamp. The interrupting event will fix the
3706                  * write stamp for us, and use an absolute timestamp.
3707                  */
3708
3709                 /*
3710                  * This is on the tail page. It is possible that
3711                  * a write could come in and move the tail page
3712                  * and write to the next page. That is fine
3713                  * because we just shorten what is on this page.
3714                  */
3715                 old_index += write_mask;
3716                 new_index += write_mask;
3717
3718                 /* caution: old_index gets updated on cmpxchg failure */
3719                 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
3720                         /* update counters */
3721                         local_sub(event_length, &cpu_buffer->entries_bytes);
3722                         return true;
3723                 }
3724         }
3725
3726         /* could not discard */
3727         return false;
3728 }
3729
3730 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
3731 {
3732         local_inc(&cpu_buffer->committing);
3733         local_inc(&cpu_buffer->commits);
3734 }
3735
3736 static __always_inline void
3737 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3738 {
3739         unsigned long max_count;
3740
3741         /*
3742          * We only race with interrupts and NMIs on this CPU.
3743          * If we own the commit event, then we can commit
3744          * all others that interrupted us, since the interruptions
3745          * are in stack format (they finish before they come
3746          * back to us). This allows us to do a simple loop to
3747          * assign the commit to the tail.
3748          */
3749  again:
3750         max_count = cpu_buffer->nr_pages * 100;
3751
3752         while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
3753                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
3754                         return;
3755                 if (RB_WARN_ON(cpu_buffer,
3756                                rb_is_reader_page(cpu_buffer->tail_page)))
3757                         return;
3758                 /*
3759                  * No need for a memory barrier here, as the update
3760                  * of the tail_page did it for this page.
3761                  */
3762                 local_set(&cpu_buffer->commit_page->page->commit,
3763                           rb_page_write(cpu_buffer->commit_page));
3764                 rb_inc_page(&cpu_buffer->commit_page);
3765                 if (cpu_buffer->ring_meta) {
3766                         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
3767                         meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page;
3768                 }
3769                 /* add barrier to keep gcc from optimizing too much */
3770                 barrier();
3771         }
3772         while (rb_commit_index(cpu_buffer) !=
3773                rb_page_write(cpu_buffer->commit_page)) {
3774
3775                 /* Make sure the readers see the content of what is committed. */
3776                 smp_wmb();
3777                 local_set(&cpu_buffer->commit_page->page->commit,
3778                           rb_page_write(cpu_buffer->commit_page));
3779                 RB_WARN_ON(cpu_buffer,
3780                            local_read(&cpu_buffer->commit_page->page->commit) &
3781                            ~RB_WRITE_MASK);
3782                 barrier();
3783         }
3784
3785         /* again, keep gcc from optimizing */
3786         barrier();
3787
3788         /*
3789          * If an interrupt came in just after the first while loop
3790          * and pushed the tail page forward, we will be left with
3791          * a dangling commit that will never go forward.
3792          */
3793         if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3794                 goto again;
3795 }
3796
3797 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3798 {
3799         unsigned long commits;
3800
3801         if (RB_WARN_ON(cpu_buffer,
3802                        !local_read(&cpu_buffer->committing)))
3803                 return;
3804
3805  again:
3806         commits = local_read(&cpu_buffer->commits);
3807         /* synchronize with interrupts */
3808         barrier();
3809         if (local_read(&cpu_buffer->committing) == 1)
3810                 rb_set_commit_to_write(cpu_buffer);
3811
3812         local_dec(&cpu_buffer->committing);
3813
3814         /* synchronize with interrupts */
3815         barrier();
3816
3817         /*
3818          * Need to account for interrupts coming in between the
3819          * updating of the commit page and the clearing of the
3820          * committing counter.
3821          */
3822         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3823             !local_read(&cpu_buffer->committing)) {
3824                 local_inc(&cpu_buffer->committing);
3825                 goto again;
3826         }
3827 }
3828
3829 static inline void rb_event_discard(struct ring_buffer_event *event)
3830 {
3831         if (extended_time(event))
3832                 event = skip_time_extend(event);
3833
3834         /* array[0] holds the actual length for the discarded event */
3835         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3836         event->type_len = RINGBUF_TYPE_PADDING;
3837         /* time delta must be non zero */
3838         if (!event->time_delta)
3839                 event->time_delta = 1;
3840 }
3841
3842 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3843 {
3844         local_inc(&cpu_buffer->entries);
3845         rb_end_commit(cpu_buffer);
3846 }
3847
3848 static __always_inline void
3849 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3850 {
3851         if (buffer->irq_work.waiters_pending) {
3852                 buffer->irq_work.waiters_pending = false;
3853                 /* irq_work_queue() supplies it's own memory barriers */
3854                 irq_work_queue(&buffer->irq_work.work);
3855         }
3856
3857         if (cpu_buffer->irq_work.waiters_pending) {
3858                 cpu_buffer->irq_work.waiters_pending = false;
3859                 /* irq_work_queue() supplies it's own memory barriers */
3860                 irq_work_queue(&cpu_buffer->irq_work.work);
3861         }
3862
3863         if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3864                 return;
3865
3866         if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3867                 return;
3868
3869         if (!cpu_buffer->irq_work.full_waiters_pending)
3870                 return;
3871
3872         cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3873
3874         if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3875                 return;
3876
3877         cpu_buffer->irq_work.wakeup_full = true;
3878         cpu_buffer->irq_work.full_waiters_pending = false;
3879         /* irq_work_queue() supplies it's own memory barriers */
3880         irq_work_queue(&cpu_buffer->irq_work.work);
3881 }
3882
3883 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3884 # define do_ring_buffer_record_recursion()      \
3885         do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3886 #else
3887 # define do_ring_buffer_record_recursion() do { } while (0)
3888 #endif
3889
3890 /*
3891  * The lock and unlock are done within a preempt disable section.
3892  * The current_context per_cpu variable can only be modified
3893  * by the current task between lock and unlock. But it can
3894  * be modified more than once via an interrupt. To pass this
3895  * information from the lock to the unlock without having to
3896  * access the 'in_interrupt()' functions again (which do show
3897  * a bit of overhead in something as critical as function tracing,
3898  * we use a bitmask trick.
3899  *
3900  *  bit 1 =  NMI context
3901  *  bit 2 =  IRQ context
3902  *  bit 3 =  SoftIRQ context
3903  *  bit 4 =  normal context.
3904  *
3905  * This works because this is the order of contexts that can
3906  * preempt other contexts. A SoftIRQ never preempts an IRQ
3907  * context.
3908  *
3909  * When the context is determined, the corresponding bit is
3910  * checked and set (if it was set, then a recursion of that context
3911  * happened).
3912  *
3913  * On unlock, we need to clear this bit. To do so, just subtract
3914  * 1 from the current_context and AND it to itself.
3915  *
3916  * (binary)
3917  *  101 - 1 = 100
3918  *  101 & 100 = 100 (clearing bit zero)
3919  *
3920  *  1010 - 1 = 1001
3921  *  1010 & 1001 = 1000 (clearing bit 1)
3922  *
3923  * The least significant bit can be cleared this way, and it
3924  * just so happens that it is the same bit corresponding to
3925  * the current context.
3926  *
3927  * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3928  * is set when a recursion is detected at the current context, and if
3929  * the TRANSITION bit is already set, it will fail the recursion.
3930  * This is needed because there's a lag between the changing of
3931  * interrupt context and updating the preempt count. In this case,
3932  * a false positive will be found. To handle this, one extra recursion
3933  * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3934  * bit is already set, then it is considered a recursion and the function
3935  * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3936  *
3937  * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3938  * to be cleared. Even if it wasn't the context that set it. That is,
3939  * if an interrupt comes in while NORMAL bit is set and the ring buffer
3940  * is called before preempt_count() is updated, since the check will
3941  * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3942  * NMI then comes in, it will set the NMI bit, but when the NMI code
3943  * does the trace_recursive_unlock() it will clear the TRANSITION bit
3944  * and leave the NMI bit set. But this is fine, because the interrupt
3945  * code that set the TRANSITION bit will then clear the NMI bit when it
3946  * calls trace_recursive_unlock(). If another NMI comes in, it will
3947  * set the TRANSITION bit and continue.
3948  *
3949  * Note: The TRANSITION bit only handles a single transition between context.
3950  */
3951
3952 static __always_inline bool
3953 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3954 {
3955         unsigned int val = cpu_buffer->current_context;
3956         int bit = interrupt_context_level();
3957
3958         bit = RB_CTX_NORMAL - bit;
3959
3960         if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3961                 /*
3962                  * It is possible that this was called by transitioning
3963                  * between interrupt context, and preempt_count() has not
3964                  * been updated yet. In this case, use the TRANSITION bit.
3965                  */
3966                 bit = RB_CTX_TRANSITION;
3967                 if (val & (1 << (bit + cpu_buffer->nest))) {
3968                         do_ring_buffer_record_recursion();
3969                         return true;
3970                 }
3971         }
3972
3973         val |= (1 << (bit + cpu_buffer->nest));
3974         cpu_buffer->current_context = val;
3975
3976         return false;
3977 }
3978
3979 static __always_inline void
3980 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3981 {
3982         cpu_buffer->current_context &=
3983                 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3984 }
3985
3986 /* The recursive locking above uses 5 bits */
3987 #define NESTED_BITS 5
3988
3989 /**
3990  * ring_buffer_nest_start - Allow to trace while nested
3991  * @buffer: The ring buffer to modify
3992  *
3993  * The ring buffer has a safety mechanism to prevent recursion.
3994  * But there may be a case where a trace needs to be done while
3995  * tracing something else. In this case, calling this function
3996  * will allow this function to nest within a currently active
3997  * ring_buffer_lock_reserve().
3998  *
3999  * Call this function before calling another ring_buffer_lock_reserve() and
4000  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
4001  */
4002 void ring_buffer_nest_start(struct trace_buffer *buffer)
4003 {
4004         struct ring_buffer_per_cpu *cpu_buffer;
4005         int cpu;
4006
4007         /* Enabled by ring_buffer_nest_end() */
4008         preempt_disable_notrace();
4009         cpu = raw_smp_processor_id();
4010         cpu_buffer = buffer->buffers[cpu];
4011         /* This is the shift value for the above recursive locking */
4012         cpu_buffer->nest += NESTED_BITS;
4013 }
4014
4015 /**
4016  * ring_buffer_nest_end - Allow to trace while nested
4017  * @buffer: The ring buffer to modify
4018  *
4019  * Must be called after ring_buffer_nest_start() and after the
4020  * ring_buffer_unlock_commit().
4021  */
4022 void ring_buffer_nest_end(struct trace_buffer *buffer)
4023 {
4024         struct ring_buffer_per_cpu *cpu_buffer;
4025         int cpu;
4026
4027         /* disabled by ring_buffer_nest_start() */
4028         cpu = raw_smp_processor_id();
4029         cpu_buffer = buffer->buffers[cpu];
4030         /* This is the shift value for the above recursive locking */
4031         cpu_buffer->nest -= NESTED_BITS;
4032         preempt_enable_notrace();
4033 }
4034
4035 /**
4036  * ring_buffer_unlock_commit - commit a reserved
4037  * @buffer: The buffer to commit to
4038  *
4039  * This commits the data to the ring buffer, and releases any locks held.
4040  *
4041  * Must be paired with ring_buffer_lock_reserve.
4042  */
4043 int ring_buffer_unlock_commit(struct trace_buffer *buffer)
4044 {
4045         struct ring_buffer_per_cpu *cpu_buffer;
4046         int cpu = raw_smp_processor_id();
4047
4048         cpu_buffer = buffer->buffers[cpu];
4049
4050         rb_commit(cpu_buffer);
4051
4052         rb_wakeups(buffer, cpu_buffer);
4053
4054         trace_recursive_unlock(cpu_buffer);
4055
4056         preempt_enable_notrace();
4057
4058         return 0;
4059 }
4060 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
4061
4062 /* Special value to validate all deltas on a page. */
4063 #define CHECK_FULL_PAGE         1L
4064
4065 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
4066
4067 static const char *show_irq_str(int bits)
4068 {
4069         const char *type[] = {
4070                 ".",    // 0
4071                 "s",    // 1
4072                 "h",    // 2
4073                 "Hs",   // 3
4074                 "n",    // 4
4075                 "Ns",   // 5
4076                 "Nh",   // 6
4077                 "NHs",  // 7
4078         };
4079
4080         return type[bits];
4081 }
4082
4083 /* Assume this is a trace event */
4084 static const char *show_flags(struct ring_buffer_event *event)
4085 {
4086         struct trace_entry *entry;
4087         int bits = 0;
4088
4089         if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4090                 return "X";
4091
4092         entry = ring_buffer_event_data(event);
4093
4094         if (entry->flags & TRACE_FLAG_SOFTIRQ)
4095                 bits |= 1;
4096
4097         if (entry->flags & TRACE_FLAG_HARDIRQ)
4098                 bits |= 2;
4099
4100         if (entry->flags & TRACE_FLAG_NMI)
4101                 bits |= 4;
4102
4103         return show_irq_str(bits);
4104 }
4105
4106 static const char *show_irq(struct ring_buffer_event *event)
4107 {
4108         struct trace_entry *entry;
4109
4110         if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4111                 return "";
4112
4113         entry = ring_buffer_event_data(event);
4114         if (entry->flags & TRACE_FLAG_IRQS_OFF)
4115                 return "d";
4116         return "";
4117 }
4118
4119 static const char *show_interrupt_level(void)
4120 {
4121         unsigned long pc = preempt_count();
4122         unsigned char level = 0;
4123
4124         if (pc & SOFTIRQ_OFFSET)
4125                 level |= 1;
4126
4127         if (pc & HARDIRQ_MASK)
4128                 level |= 2;
4129
4130         if (pc & NMI_MASK)
4131                 level |= 4;
4132
4133         return show_irq_str(level);
4134 }
4135
4136 static void dump_buffer_page(struct buffer_data_page *bpage,
4137                              struct rb_event_info *info,
4138                              unsigned long tail)
4139 {
4140         struct ring_buffer_event *event;
4141         u64 ts, delta;
4142         int e;
4143
4144         ts = bpage->time_stamp;
4145         pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
4146
4147         for (e = 0; e < tail; e += rb_event_length(event)) {
4148
4149                 event = (struct ring_buffer_event *)(bpage->data + e);
4150
4151                 switch (event->type_len) {
4152
4153                 case RINGBUF_TYPE_TIME_EXTEND:
4154                         delta = rb_event_time_stamp(event);
4155                         ts += delta;
4156                         pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
4157                                 e, ts, delta);
4158                         break;
4159
4160                 case RINGBUF_TYPE_TIME_STAMP:
4161                         delta = rb_event_time_stamp(event);
4162                         ts = rb_fix_abs_ts(delta, ts);
4163                         pr_warn(" 0x%x:  [%lld] absolute:%lld TIME STAMP\n",
4164                                 e, ts, delta);
4165                         break;
4166
4167                 case RINGBUF_TYPE_PADDING:
4168                         ts += event->time_delta;
4169                         pr_warn(" 0x%x:  [%lld] delta:%d PADDING\n",
4170                                 e, ts, event->time_delta);
4171                         break;
4172
4173                 case RINGBUF_TYPE_DATA:
4174                         ts += event->time_delta;
4175                         pr_warn(" 0x%x:  [%lld] delta:%d %s%s\n",
4176                                 e, ts, event->time_delta,
4177                                 show_flags(event), show_irq(event));
4178                         break;
4179
4180                 default:
4181                         break;
4182                 }
4183         }
4184         pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
4185 }
4186
4187 static DEFINE_PER_CPU(atomic_t, checking);
4188 static atomic_t ts_dump;
4189
4190 #define buffer_warn_return(fmt, ...)                                    \
4191         do {                                                            \
4192                 /* If another report is happening, ignore this one */   \
4193                 if (atomic_inc_return(&ts_dump) != 1) {                 \
4194                         atomic_dec(&ts_dump);                           \
4195                         goto out;                                       \
4196                 }                                                       \
4197                 atomic_inc(&cpu_buffer->record_disabled);               \
4198                 pr_warn(fmt, ##__VA_ARGS__);                            \
4199                 dump_buffer_page(bpage, info, tail);                    \
4200                 atomic_dec(&ts_dump);                                   \
4201                 /* There's some cases in boot up that this can happen */ \
4202                 if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING))       \
4203                         /* Do not re-enable checking */                 \
4204                         return;                                         \
4205         } while (0)
4206
4207 /*
4208  * Check if the current event time stamp matches the deltas on
4209  * the buffer page.
4210  */
4211 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
4212                          struct rb_event_info *info,
4213                          unsigned long tail)
4214 {
4215         struct buffer_data_page *bpage;
4216         u64 ts, delta;
4217         bool full = false;
4218         int ret;
4219
4220         bpage = info->tail_page->page;
4221
4222         if (tail == CHECK_FULL_PAGE) {
4223                 full = true;
4224                 tail = local_read(&bpage->commit);
4225         } else if (info->add_timestamp &
4226                    (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
4227                 /* Ignore events with absolute time stamps */
4228                 return;
4229         }
4230
4231         /*
4232          * Do not check the first event (skip possible extends too).
4233          * Also do not check if previous events have not been committed.
4234          */
4235         if (tail <= 8 || tail > local_read(&bpage->commit))
4236                 return;
4237
4238         /*
4239          * If this interrupted another event,
4240          */
4241         if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
4242                 goto out;
4243
4244         ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
4245         if (ret < 0) {
4246                 if (delta < ts) {
4247                         buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
4248                                            cpu_buffer->cpu, ts, delta);
4249                         goto out;
4250                 }
4251         }
4252         if ((full && ts > info->ts) ||
4253             (!full && ts + info->delta != info->ts)) {
4254                 buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
4255                                    cpu_buffer->cpu,
4256                                    ts + info->delta, info->ts, info->delta,
4257                                    info->before, info->after,
4258                                    full ? " (full)" : "", show_interrupt_level());
4259         }
4260 out:
4261         atomic_dec(this_cpu_ptr(&checking));
4262 }
4263 #else
4264 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
4265                          struct rb_event_info *info,
4266                          unsigned long tail)
4267 {
4268 }
4269 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
4270
4271 static struct ring_buffer_event *
4272 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
4273                   struct rb_event_info *info)
4274 {
4275         struct ring_buffer_event *event;
4276         struct buffer_page *tail_page;
4277         unsigned long tail, write, w;
4278
4279         /* Don't let the compiler play games with cpu_buffer->tail_page */
4280         tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
4281
4282  /*A*/  w = local_read(&tail_page->write) & RB_WRITE_MASK;
4283         barrier();
4284         rb_time_read(&cpu_buffer->before_stamp, &info->before);
4285         rb_time_read(&cpu_buffer->write_stamp, &info->after);
4286         barrier();
4287         info->ts = rb_time_stamp(cpu_buffer->buffer);
4288
4289         if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
4290                 info->delta = info->ts;
4291         } else {
4292                 /*
4293                  * If interrupting an event time update, we may need an
4294                  * absolute timestamp.
4295                  * Don't bother if this is the start of a new page (w == 0).
4296                  */
4297                 if (!w) {
4298                         /* Use the sub-buffer timestamp */
4299                         info->delta = 0;
4300                 } else if (unlikely(info->before != info->after)) {
4301                         info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
4302                         info->length += RB_LEN_TIME_EXTEND;
4303                 } else {
4304                         info->delta = info->ts - info->after;
4305                         if (unlikely(test_time_stamp(info->delta))) {
4306                                 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
4307                                 info->length += RB_LEN_TIME_EXTEND;
4308                         }
4309                 }
4310         }
4311
4312  /*B*/  rb_time_set(&cpu_buffer->before_stamp, info->ts);
4313
4314  /*C*/  write = local_add_return(info->length, &tail_page->write);
4315
4316         /* set write to only the index of the write */
4317         write &= RB_WRITE_MASK;
4318
4319         tail = write - info->length;
4320
4321         /* See if we shot pass the end of this buffer page */
4322         if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
4323                 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
4324                 return rb_move_tail(cpu_buffer, tail, info);
4325         }
4326
4327         if (likely(tail == w)) {
4328                 /* Nothing interrupted us between A and C */
4329  /*D*/          rb_time_set(&cpu_buffer->write_stamp, info->ts);
4330                 /*
4331                  * If something came in between C and D, the write stamp
4332                  * may now not be in sync. But that's fine as the before_stamp
4333                  * will be different and then next event will just be forced
4334                  * to use an absolute timestamp.
4335                  */
4336                 if (likely(!(info->add_timestamp &
4337                              (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4338                         /* This did not interrupt any time update */
4339                         info->delta = info->ts - info->after;
4340                 else
4341                         /* Just use full timestamp for interrupting event */
4342                         info->delta = info->ts;
4343                 check_buffer(cpu_buffer, info, tail);
4344         } else {
4345                 u64 ts;
4346                 /* SLOW PATH - Interrupted between A and C */
4347
4348                 /* Save the old before_stamp */
4349                 rb_time_read(&cpu_buffer->before_stamp, &info->before);
4350
4351                 /*
4352                  * Read a new timestamp and update the before_stamp to make
4353                  * the next event after this one force using an absolute
4354                  * timestamp. This is in case an interrupt were to come in
4355                  * between E and F.
4356                  */
4357                 ts = rb_time_stamp(cpu_buffer->buffer);
4358                 rb_time_set(&cpu_buffer->before_stamp, ts);
4359
4360                 barrier();
4361  /*E*/          rb_time_read(&cpu_buffer->write_stamp, &info->after);
4362                 barrier();
4363  /*F*/          if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
4364                     info->after == info->before && info->after < ts) {
4365                         /*
4366                          * Nothing came after this event between C and F, it is
4367                          * safe to use info->after for the delta as it
4368                          * matched info->before and is still valid.
4369                          */
4370                         info->delta = ts - info->after;
4371                 } else {
4372                         /*
4373                          * Interrupted between C and F:
4374                          * Lost the previous events time stamp. Just set the
4375                          * delta to zero, and this will be the same time as
4376                          * the event this event interrupted. And the events that
4377                          * came after this will still be correct (as they would
4378                          * have built their delta on the previous event.
4379                          */
4380                         info->delta = 0;
4381                 }
4382                 info->ts = ts;
4383                 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
4384         }
4385
4386         /*
4387          * If this is the first commit on the page, then it has the same
4388          * timestamp as the page itself.
4389          */
4390         if (unlikely(!tail && !(info->add_timestamp &
4391                                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4392                 info->delta = 0;
4393
4394         /* We reserved something on the buffer */
4395
4396         event = __rb_page_index(tail_page, tail);
4397         rb_update_event(cpu_buffer, event, info);
4398
4399         local_inc(&tail_page->entries);
4400
4401         /*
4402          * If this is the first commit on the page, then update
4403          * its timestamp.
4404          */
4405         if (unlikely(!tail))
4406                 tail_page->page->time_stamp = info->ts;
4407
4408         /* account for these added bytes */
4409         local_add(info->length, &cpu_buffer->entries_bytes);
4410
4411         return event;
4412 }
4413
4414 static __always_inline struct ring_buffer_event *
4415 rb_reserve_next_event(struct trace_buffer *buffer,
4416                       struct ring_buffer_per_cpu *cpu_buffer,
4417                       unsigned long length)
4418 {
4419         struct ring_buffer_event *event;
4420         struct rb_event_info info;
4421         int nr_loops = 0;
4422         int add_ts_default;
4423
4424         /*
4425          * ring buffer does cmpxchg as well as atomic64 operations
4426          * (which some archs use locking for atomic64), make sure this
4427          * is safe in NMI context
4428          */
4429         if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
4430              IS_ENABLED(CONFIG_GENERIC_ATOMIC64)) &&
4431             (unlikely(in_nmi()))) {
4432                 return NULL;
4433         }
4434
4435         rb_start_commit(cpu_buffer);
4436         /* The commit page can not change after this */
4437
4438 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4439         /*
4440          * Due to the ability to swap a cpu buffer from a buffer
4441          * it is possible it was swapped before we committed.
4442          * (committing stops a swap). We check for it here and
4443          * if it happened, we have to fail the write.
4444          */
4445         barrier();
4446         if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
4447                 local_dec(&cpu_buffer->committing);
4448                 local_dec(&cpu_buffer->commits);
4449                 return NULL;
4450         }
4451 #endif
4452
4453         info.length = rb_calculate_event_length(length);
4454
4455         if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
4456                 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
4457                 info.length += RB_LEN_TIME_EXTEND;
4458                 if (info.length > cpu_buffer->buffer->max_data_size)
4459                         goto out_fail;
4460         } else {
4461                 add_ts_default = RB_ADD_STAMP_NONE;
4462         }
4463
4464  again:
4465         info.add_timestamp = add_ts_default;
4466         info.delta = 0;
4467
4468         /*
4469          * We allow for interrupts to reenter here and do a trace.
4470          * If one does, it will cause this original code to loop
4471          * back here. Even with heavy interrupts happening, this
4472          * should only happen a few times in a row. If this happens
4473          * 1000 times in a row, there must be either an interrupt
4474          * storm or we have something buggy.
4475          * Bail!
4476          */
4477         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
4478                 goto out_fail;
4479
4480         event = __rb_reserve_next(cpu_buffer, &info);
4481
4482         if (unlikely(PTR_ERR(event) == -EAGAIN)) {
4483                 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
4484                         info.length -= RB_LEN_TIME_EXTEND;
4485                 goto again;
4486         }
4487
4488         if (likely(event))
4489                 return event;
4490  out_fail:
4491         rb_end_commit(cpu_buffer);
4492         return NULL;
4493 }
4494
4495 /**
4496  * ring_buffer_lock_reserve - reserve a part of the buffer
4497  * @buffer: the ring buffer to reserve from
4498  * @length: the length of the data to reserve (excluding event header)
4499  *
4500  * Returns a reserved event on the ring buffer to copy directly to.
4501  * The user of this interface will need to get the body to write into
4502  * and can use the ring_buffer_event_data() interface.
4503  *
4504  * The length is the length of the data needed, not the event length
4505  * which also includes the event header.
4506  *
4507  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
4508  * If NULL is returned, then nothing has been allocated or locked.
4509  */
4510 struct ring_buffer_event *
4511 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
4512 {
4513         struct ring_buffer_per_cpu *cpu_buffer;
4514         struct ring_buffer_event *event;
4515         int cpu;
4516
4517         /* If we are tracing schedule, we don't want to recurse */
4518         preempt_disable_notrace();
4519
4520         if (unlikely(atomic_read(&buffer->record_disabled)))
4521                 goto out;
4522
4523         cpu = raw_smp_processor_id();
4524
4525         if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
4526                 goto out;
4527
4528         cpu_buffer = buffer->buffers[cpu];
4529
4530         if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
4531                 goto out;
4532
4533         if (unlikely(length > buffer->max_data_size))
4534                 goto out;
4535
4536         if (unlikely(trace_recursive_lock(cpu_buffer)))
4537                 goto out;
4538
4539         event = rb_reserve_next_event(buffer, cpu_buffer, length);
4540         if (!event)
4541                 goto out_unlock;
4542
4543         return event;
4544
4545  out_unlock:
4546         trace_recursive_unlock(cpu_buffer);
4547  out:
4548         preempt_enable_notrace();
4549         return NULL;
4550 }
4551 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
4552
4553 /*
4554  * Decrement the entries to the page that an event is on.
4555  * The event does not even need to exist, only the pointer
4556  * to the page it is on. This may only be called before the commit
4557  * takes place.
4558  */
4559 static inline void
4560 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
4561                    struct ring_buffer_event *event)
4562 {
4563         unsigned long addr = (unsigned long)event;
4564         struct buffer_page *bpage = cpu_buffer->commit_page;
4565         struct buffer_page *start;
4566
4567         addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
4568
4569         /* Do the likely case first */
4570         if (likely(bpage->page == (void *)addr)) {
4571                 local_dec(&bpage->entries);
4572                 return;
4573         }
4574
4575         /*
4576          * Because the commit page may be on the reader page we
4577          * start with the next page and check the end loop there.
4578          */
4579         rb_inc_page(&bpage);
4580         start = bpage;
4581         do {
4582                 if (bpage->page == (void *)addr) {
4583                         local_dec(&bpage->entries);
4584                         return;
4585                 }
4586                 rb_inc_page(&bpage);
4587         } while (bpage != start);
4588
4589         /* commit not part of this buffer?? */
4590         RB_WARN_ON(cpu_buffer, 1);
4591 }
4592
4593 /**
4594  * ring_buffer_discard_commit - discard an event that has not been committed
4595  * @buffer: the ring buffer
4596  * @event: non committed event to discard
4597  *
4598  * Sometimes an event that is in the ring buffer needs to be ignored.
4599  * This function lets the user discard an event in the ring buffer
4600  * and then that event will not be read later.
4601  *
4602  * This function only works if it is called before the item has been
4603  * committed. It will try to free the event from the ring buffer
4604  * if another event has not been added behind it.
4605  *
4606  * If another event has been added behind it, it will set the event
4607  * up as discarded, and perform the commit.
4608  *
4609  * If this function is called, do not call ring_buffer_unlock_commit on
4610  * the event.
4611  */
4612 void ring_buffer_discard_commit(struct trace_buffer *buffer,
4613                                 struct ring_buffer_event *event)
4614 {
4615         struct ring_buffer_per_cpu *cpu_buffer;
4616         int cpu;
4617
4618         /* The event is discarded regardless */
4619         rb_event_discard(event);
4620
4621         cpu = smp_processor_id();
4622         cpu_buffer = buffer->buffers[cpu];
4623
4624         /*
4625          * This must only be called if the event has not been
4626          * committed yet. Thus we can assume that preemption
4627          * is still disabled.
4628          */
4629         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
4630
4631         rb_decrement_entry(cpu_buffer, event);
4632         if (rb_try_to_discard(cpu_buffer, event))
4633                 goto out;
4634
4635  out:
4636         rb_end_commit(cpu_buffer);
4637
4638         trace_recursive_unlock(cpu_buffer);
4639
4640         preempt_enable_notrace();
4641
4642 }
4643 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
4644
4645 /**
4646  * ring_buffer_write - write data to the buffer without reserving
4647  * @buffer: The ring buffer to write to.
4648  * @length: The length of the data being written (excluding the event header)
4649  * @data: The data to write to the buffer.
4650  *
4651  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
4652  * one function. If you already have the data to write to the buffer, it
4653  * may be easier to simply call this function.
4654  *
4655  * Note, like ring_buffer_lock_reserve, the length is the length of the data
4656  * and not the length of the event which would hold the header.
4657  */
4658 int ring_buffer_write(struct trace_buffer *buffer,
4659                       unsigned long length,
4660                       void *data)
4661 {
4662         struct ring_buffer_per_cpu *cpu_buffer;
4663         struct ring_buffer_event *event;
4664         void *body;
4665         int ret = -EBUSY;
4666         int cpu;
4667
4668         preempt_disable_notrace();
4669
4670         if (atomic_read(&buffer->record_disabled))
4671                 goto out;
4672
4673         cpu = raw_smp_processor_id();
4674
4675         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4676                 goto out;
4677
4678         cpu_buffer = buffer->buffers[cpu];
4679
4680         if (atomic_read(&cpu_buffer->record_disabled))
4681                 goto out;
4682
4683         if (length > buffer->max_data_size)
4684                 goto out;
4685
4686         if (unlikely(trace_recursive_lock(cpu_buffer)))
4687                 goto out;
4688
4689         event = rb_reserve_next_event(buffer, cpu_buffer, length);
4690         if (!event)
4691                 goto out_unlock;
4692
4693         body = rb_event_data(event);
4694
4695         memcpy(body, data, length);
4696
4697         rb_commit(cpu_buffer);
4698
4699         rb_wakeups(buffer, cpu_buffer);
4700
4701         ret = 0;
4702
4703  out_unlock:
4704         trace_recursive_unlock(cpu_buffer);
4705
4706  out:
4707         preempt_enable_notrace();
4708
4709         return ret;
4710 }
4711 EXPORT_SYMBOL_GPL(ring_buffer_write);
4712
4713 /*
4714  * The total entries in the ring buffer is the running counter
4715  * of entries entered into the ring buffer, minus the sum of
4716  * the entries read from the ring buffer and the number of
4717  * entries that were overwritten.
4718  */
4719 static inline unsigned long
4720 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4721 {
4722         return local_read(&cpu_buffer->entries) -
4723                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4724 }
4725
4726 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
4727 {
4728         return !rb_num_of_entries(cpu_buffer);
4729 }
4730
4731 /**
4732  * ring_buffer_record_disable - stop all writes into the buffer
4733  * @buffer: The ring buffer to stop writes to.
4734  *
4735  * This prevents all writes to the buffer. Any attempt to write
4736  * to the buffer after this will fail and return NULL.
4737  *
4738  * The caller should call synchronize_rcu() after this.
4739  */
4740 void ring_buffer_record_disable(struct trace_buffer *buffer)
4741 {
4742         atomic_inc(&buffer->record_disabled);
4743 }
4744 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
4745
4746 /**
4747  * ring_buffer_record_enable - enable writes to the buffer
4748  * @buffer: The ring buffer to enable writes
4749  *
4750  * Note, multiple disables will need the same number of enables
4751  * to truly enable the writing (much like preempt_disable).
4752  */
4753 void ring_buffer_record_enable(struct trace_buffer *buffer)
4754 {
4755         atomic_dec(&buffer->record_disabled);
4756 }
4757 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
4758
4759 /**
4760  * ring_buffer_record_off - stop all writes into the buffer
4761  * @buffer: The ring buffer to stop writes to.
4762  *
4763  * This prevents all writes to the buffer. Any attempt to write
4764  * to the buffer after this will fail and return NULL.
4765  *
4766  * This is different than ring_buffer_record_disable() as
4767  * it works like an on/off switch, where as the disable() version
4768  * must be paired with a enable().
4769  */
4770 void ring_buffer_record_off(struct trace_buffer *buffer)
4771 {
4772         unsigned int rd;
4773         unsigned int new_rd;
4774
4775         rd = atomic_read(&buffer->record_disabled);
4776         do {
4777                 new_rd = rd | RB_BUFFER_OFF;
4778         } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4779 }
4780 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4781
4782 /**
4783  * ring_buffer_record_on - restart writes into the buffer
4784  * @buffer: The ring buffer to start writes to.
4785  *
4786  * This enables all writes to the buffer that was disabled by
4787  * ring_buffer_record_off().
4788  *
4789  * This is different than ring_buffer_record_enable() as
4790  * it works like an on/off switch, where as the enable() version
4791  * must be paired with a disable().
4792  */
4793 void ring_buffer_record_on(struct trace_buffer *buffer)
4794 {
4795         unsigned int rd;
4796         unsigned int new_rd;
4797
4798         rd = atomic_read(&buffer->record_disabled);
4799         do {
4800                 new_rd = rd & ~RB_BUFFER_OFF;
4801         } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4802 }
4803 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4804
4805 /**
4806  * ring_buffer_record_is_on - return true if the ring buffer can write
4807  * @buffer: The ring buffer to see if write is enabled
4808  *
4809  * Returns true if the ring buffer is in a state that it accepts writes.
4810  */
4811 bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4812 {
4813         return !atomic_read(&buffer->record_disabled);
4814 }
4815
4816 /**
4817  * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4818  * @buffer: The ring buffer to see if write is set enabled
4819  *
4820  * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4821  * Note that this does NOT mean it is in a writable state.
4822  *
4823  * It may return true when the ring buffer has been disabled by
4824  * ring_buffer_record_disable(), as that is a temporary disabling of
4825  * the ring buffer.
4826  */
4827 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4828 {
4829         return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4830 }
4831
4832 /**
4833  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4834  * @buffer: The ring buffer to stop writes to.
4835  * @cpu: The CPU buffer to stop
4836  *
4837  * This prevents all writes to the buffer. Any attempt to write
4838  * to the buffer after this will fail and return NULL.
4839  *
4840  * The caller should call synchronize_rcu() after this.
4841  */
4842 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4843 {
4844         struct ring_buffer_per_cpu *cpu_buffer;
4845
4846         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4847                 return;
4848
4849         cpu_buffer = buffer->buffers[cpu];
4850         atomic_inc(&cpu_buffer->record_disabled);
4851 }
4852 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4853
4854 /**
4855  * ring_buffer_record_enable_cpu - enable writes to the buffer
4856  * @buffer: The ring buffer to enable writes
4857  * @cpu: The CPU to enable.
4858  *
4859  * Note, multiple disables will need the same number of enables
4860  * to truly enable the writing (much like preempt_disable).
4861  */
4862 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4863 {
4864         struct ring_buffer_per_cpu *cpu_buffer;
4865
4866         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4867                 return;
4868
4869         cpu_buffer = buffer->buffers[cpu];
4870         atomic_dec(&cpu_buffer->record_disabled);
4871 }
4872 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4873
4874 /**
4875  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4876  * @buffer: The ring buffer
4877  * @cpu: The per CPU buffer to read from.
4878  */
4879 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4880 {
4881         unsigned long flags;
4882         struct ring_buffer_per_cpu *cpu_buffer;
4883         struct buffer_page *bpage;
4884         u64 ret = 0;
4885
4886         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4887                 return 0;
4888
4889         cpu_buffer = buffer->buffers[cpu];
4890         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4891         /*
4892          * if the tail is on reader_page, oldest time stamp is on the reader
4893          * page
4894          */
4895         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4896                 bpage = cpu_buffer->reader_page;
4897         else
4898                 bpage = rb_set_head_page(cpu_buffer);
4899         if (bpage)
4900                 ret = bpage->page->time_stamp;
4901         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4902
4903         return ret;
4904 }
4905 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4906
4907 /**
4908  * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4909  * @buffer: The ring buffer
4910  * @cpu: The per CPU buffer to read from.
4911  */
4912 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4913 {
4914         struct ring_buffer_per_cpu *cpu_buffer;
4915         unsigned long ret;
4916
4917         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4918                 return 0;
4919
4920         cpu_buffer = buffer->buffers[cpu];
4921         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4922
4923         return ret;
4924 }
4925 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4926
4927 /**
4928  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4929  * @buffer: The ring buffer
4930  * @cpu: The per CPU buffer to get the entries from.
4931  */
4932 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4933 {
4934         struct ring_buffer_per_cpu *cpu_buffer;
4935
4936         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4937                 return 0;
4938
4939         cpu_buffer = buffer->buffers[cpu];
4940
4941         return rb_num_of_entries(cpu_buffer);
4942 }
4943 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4944
4945 /**
4946  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4947  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4948  * @buffer: The ring buffer
4949  * @cpu: The per CPU buffer to get the number of overruns from
4950  */
4951 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4952 {
4953         struct ring_buffer_per_cpu *cpu_buffer;
4954         unsigned long ret;
4955
4956         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4957                 return 0;
4958
4959         cpu_buffer = buffer->buffers[cpu];
4960         ret = local_read(&cpu_buffer->overrun);
4961
4962         return ret;
4963 }
4964 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4965
4966 /**
4967  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4968  * commits failing due to the buffer wrapping around while there are uncommitted
4969  * events, such as during an interrupt storm.
4970  * @buffer: The ring buffer
4971  * @cpu: The per CPU buffer to get the number of overruns from
4972  */
4973 unsigned long
4974 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4975 {
4976         struct ring_buffer_per_cpu *cpu_buffer;
4977         unsigned long ret;
4978
4979         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4980                 return 0;
4981
4982         cpu_buffer = buffer->buffers[cpu];
4983         ret = local_read(&cpu_buffer->commit_overrun);
4984
4985         return ret;
4986 }
4987 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4988
4989 /**
4990  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4991  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4992  * @buffer: The ring buffer
4993  * @cpu: The per CPU buffer to get the number of overruns from
4994  */
4995 unsigned long
4996 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4997 {
4998         struct ring_buffer_per_cpu *cpu_buffer;
4999         unsigned long ret;
5000
5001         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5002                 return 0;
5003
5004         cpu_buffer = buffer->buffers[cpu];
5005         ret = local_read(&cpu_buffer->dropped_events);
5006
5007         return ret;
5008 }
5009 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
5010
5011 /**
5012  * ring_buffer_read_events_cpu - get the number of events successfully read
5013  * @buffer: The ring buffer
5014  * @cpu: The per CPU buffer to get the number of events read
5015  */
5016 unsigned long
5017 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
5018 {
5019         struct ring_buffer_per_cpu *cpu_buffer;
5020
5021         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5022                 return 0;
5023
5024         cpu_buffer = buffer->buffers[cpu];
5025         return cpu_buffer->read;
5026 }
5027 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
5028
5029 /**
5030  * ring_buffer_entries - get the number of entries in a buffer
5031  * @buffer: The ring buffer
5032  *
5033  * Returns the total number of entries in the ring buffer
5034  * (all CPU entries)
5035  */
5036 unsigned long ring_buffer_entries(struct trace_buffer *buffer)
5037 {
5038         struct ring_buffer_per_cpu *cpu_buffer;
5039         unsigned long entries = 0;
5040         int cpu;
5041
5042         /* if you care about this being correct, lock the buffer */
5043         for_each_buffer_cpu(buffer, cpu) {
5044                 cpu_buffer = buffer->buffers[cpu];
5045                 entries += rb_num_of_entries(cpu_buffer);
5046         }
5047
5048         return entries;
5049 }
5050 EXPORT_SYMBOL_GPL(ring_buffer_entries);
5051
5052 /**
5053  * ring_buffer_overruns - get the number of overruns in buffer
5054  * @buffer: The ring buffer
5055  *
5056  * Returns the total number of overruns in the ring buffer
5057  * (all CPU entries)
5058  */
5059 unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
5060 {
5061         struct ring_buffer_per_cpu *cpu_buffer;
5062         unsigned long overruns = 0;
5063         int cpu;
5064
5065         /* if you care about this being correct, lock the buffer */
5066         for_each_buffer_cpu(buffer, cpu) {
5067                 cpu_buffer = buffer->buffers[cpu];
5068                 overruns += local_read(&cpu_buffer->overrun);
5069         }
5070
5071         return overruns;
5072 }
5073 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
5074
5075 static void rb_iter_reset(struct ring_buffer_iter *iter)
5076 {
5077         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5078
5079         /* Iterator usage is expected to have record disabled */
5080         iter->head_page = cpu_buffer->reader_page;
5081         iter->head = cpu_buffer->reader_page->read;
5082         iter->next_event = iter->head;
5083
5084         iter->cache_reader_page = iter->head_page;
5085         iter->cache_read = cpu_buffer->read;
5086         iter->cache_pages_removed = cpu_buffer->pages_removed;
5087
5088         if (iter->head) {
5089                 iter->read_stamp = cpu_buffer->read_stamp;
5090                 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
5091         } else {
5092                 iter->read_stamp = iter->head_page->page->time_stamp;
5093                 iter->page_stamp = iter->read_stamp;
5094         }
5095 }
5096
5097 /**
5098  * ring_buffer_iter_reset - reset an iterator
5099  * @iter: The iterator to reset
5100  *
5101  * Resets the iterator, so that it will start from the beginning
5102  * again.
5103  */
5104 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
5105 {
5106         struct ring_buffer_per_cpu *cpu_buffer;
5107         unsigned long flags;
5108
5109         if (!iter)
5110                 return;
5111
5112         cpu_buffer = iter->cpu_buffer;
5113
5114         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5115         rb_iter_reset(iter);
5116         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5117 }
5118 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
5119
5120 /**
5121  * ring_buffer_iter_empty - check if an iterator has no more to read
5122  * @iter: The iterator to check
5123  */
5124 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
5125 {
5126         struct ring_buffer_per_cpu *cpu_buffer;
5127         struct buffer_page *reader;
5128         struct buffer_page *head_page;
5129         struct buffer_page *commit_page;
5130         struct buffer_page *curr_commit_page;
5131         unsigned commit;
5132         u64 curr_commit_ts;
5133         u64 commit_ts;
5134
5135         cpu_buffer = iter->cpu_buffer;
5136         reader = cpu_buffer->reader_page;
5137         head_page = cpu_buffer->head_page;
5138         commit_page = READ_ONCE(cpu_buffer->commit_page);
5139         commit_ts = commit_page->page->time_stamp;
5140
5141         /*
5142          * When the writer goes across pages, it issues a cmpxchg which
5143          * is a mb(), which will synchronize with the rmb here.
5144          * (see rb_tail_page_update())
5145          */
5146         smp_rmb();
5147         commit = rb_page_commit(commit_page);
5148         /* We want to make sure that the commit page doesn't change */
5149         smp_rmb();
5150
5151         /* Make sure commit page didn't change */
5152         curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
5153         curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
5154
5155         /* If the commit page changed, then there's more data */
5156         if (curr_commit_page != commit_page ||
5157             curr_commit_ts != commit_ts)
5158                 return 0;
5159
5160         /* Still racy, as it may return a false positive, but that's OK */
5161         return ((iter->head_page == commit_page && iter->head >= commit) ||
5162                 (iter->head_page == reader && commit_page == head_page &&
5163                  head_page->read == commit &&
5164                  iter->head == rb_page_size(cpu_buffer->reader_page)));
5165 }
5166 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
5167
5168 static void
5169 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
5170                      struct ring_buffer_event *event)
5171 {
5172         u64 delta;
5173
5174         switch (event->type_len) {
5175         case RINGBUF_TYPE_PADDING:
5176                 return;
5177
5178         case RINGBUF_TYPE_TIME_EXTEND:
5179                 delta = rb_event_time_stamp(event);
5180                 cpu_buffer->read_stamp += delta;
5181                 return;
5182
5183         case RINGBUF_TYPE_TIME_STAMP:
5184                 delta = rb_event_time_stamp(event);
5185                 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
5186                 cpu_buffer->read_stamp = delta;
5187                 return;
5188
5189         case RINGBUF_TYPE_DATA:
5190                 cpu_buffer->read_stamp += event->time_delta;
5191                 return;
5192
5193         default:
5194                 RB_WARN_ON(cpu_buffer, 1);
5195         }
5196 }
5197
5198 static void
5199 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
5200                           struct ring_buffer_event *event)
5201 {
5202         u64 delta;
5203
5204         switch (event->type_len) {
5205         case RINGBUF_TYPE_PADDING:
5206                 return;
5207
5208         case RINGBUF_TYPE_TIME_EXTEND:
5209                 delta = rb_event_time_stamp(event);
5210                 iter->read_stamp += delta;
5211                 return;
5212
5213         case RINGBUF_TYPE_TIME_STAMP:
5214                 delta = rb_event_time_stamp(event);
5215                 delta = rb_fix_abs_ts(delta, iter->read_stamp);
5216                 iter->read_stamp = delta;
5217                 return;
5218
5219         case RINGBUF_TYPE_DATA:
5220                 iter->read_stamp += event->time_delta;
5221                 return;
5222
5223         default:
5224                 RB_WARN_ON(iter->cpu_buffer, 1);
5225         }
5226 }
5227
5228 static struct buffer_page *
5229 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
5230 {
5231         struct buffer_page *reader = NULL;
5232         unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
5233         unsigned long overwrite;
5234         unsigned long flags;
5235         int nr_loops = 0;
5236         bool ret;
5237
5238         local_irq_save(flags);
5239         arch_spin_lock(&cpu_buffer->lock);
5240
5241  again:
5242         /*
5243          * This should normally only loop twice. But because the
5244          * start of the reader inserts an empty page, it causes
5245          * a case where we will loop three times. There should be no
5246          * reason to loop four times (that I know of).
5247          */
5248         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
5249                 reader = NULL;
5250                 goto out;
5251         }
5252
5253         reader = cpu_buffer->reader_page;
5254
5255         /* If there's more to read, return this page */
5256         if (cpu_buffer->reader_page->read < rb_page_size(reader))
5257                 goto out;
5258
5259         /* Never should we have an index greater than the size */
5260         if (RB_WARN_ON(cpu_buffer,
5261                        cpu_buffer->reader_page->read > rb_page_size(reader)))
5262                 goto out;
5263
5264         /* check if we caught up to the tail */
5265         reader = NULL;
5266         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
5267                 goto out;
5268
5269         /* Don't bother swapping if the ring buffer is empty */
5270         if (rb_num_of_entries(cpu_buffer) == 0)
5271                 goto out;
5272
5273         /*
5274          * Reset the reader page to size zero.
5275          */
5276         local_set(&cpu_buffer->reader_page->write, 0);
5277         local_set(&cpu_buffer->reader_page->entries, 0);
5278         local_set(&cpu_buffer->reader_page->page->commit, 0);
5279         cpu_buffer->reader_page->real_end = 0;
5280
5281  spin:
5282         /*
5283          * Splice the empty reader page into the list around the head.
5284          */
5285         reader = rb_set_head_page(cpu_buffer);
5286         if (!reader)
5287                 goto out;
5288         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
5289         cpu_buffer->reader_page->list.prev = reader->list.prev;
5290
5291         /*
5292          * cpu_buffer->pages just needs to point to the buffer, it
5293          *  has no specific buffer page to point to. Lets move it out
5294          *  of our way so we don't accidentally swap it.
5295          */
5296         cpu_buffer->pages = reader->list.prev;
5297
5298         /* The reader page will be pointing to the new head */
5299         rb_set_list_to_head(&cpu_buffer->reader_page->list);
5300
5301         /*
5302          * We want to make sure we read the overruns after we set up our
5303          * pointers to the next object. The writer side does a
5304          * cmpxchg to cross pages which acts as the mb on the writer
5305          * side. Note, the reader will constantly fail the swap
5306          * while the writer is updating the pointers, so this
5307          * guarantees that the overwrite recorded here is the one we
5308          * want to compare with the last_overrun.
5309          */
5310         smp_mb();
5311         overwrite = local_read(&(cpu_buffer->overrun));
5312
5313         /*
5314          * Here's the tricky part.
5315          *
5316          * We need to move the pointer past the header page.
5317          * But we can only do that if a writer is not currently
5318          * moving it. The page before the header page has the
5319          * flag bit '1' set if it is pointing to the page we want.
5320          * but if the writer is in the process of moving it
5321          * than it will be '2' or already moved '0'.
5322          */
5323
5324         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
5325
5326         /*
5327          * If we did not convert it, then we must try again.
5328          */
5329         if (!ret)
5330                 goto spin;
5331
5332         if (cpu_buffer->ring_meta)
5333                 rb_update_meta_reader(cpu_buffer, reader);
5334
5335         /*
5336          * Yay! We succeeded in replacing the page.
5337          *
5338          * Now make the new head point back to the reader page.
5339          */
5340         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
5341         rb_inc_page(&cpu_buffer->head_page);
5342
5343         cpu_buffer->cnt++;
5344         local_inc(&cpu_buffer->pages_read);
5345
5346         /* Finally update the reader page to the new head */
5347         cpu_buffer->reader_page = reader;
5348         cpu_buffer->reader_page->read = 0;
5349
5350         if (overwrite != cpu_buffer->last_overrun) {
5351                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
5352                 cpu_buffer->last_overrun = overwrite;
5353         }
5354
5355         goto again;
5356
5357  out:
5358         /* Update the read_stamp on the first event */
5359         if (reader && reader->read == 0)
5360                 cpu_buffer->read_stamp = reader->page->time_stamp;
5361
5362         arch_spin_unlock(&cpu_buffer->lock);
5363         local_irq_restore(flags);
5364
5365         /*
5366          * The writer has preempt disable, wait for it. But not forever
5367          * Although, 1 second is pretty much "forever"
5368          */
5369 #define USECS_WAIT      1000000
5370         for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
5371                 /* If the write is past the end of page, a writer is still updating it */
5372                 if (likely(!reader || rb_page_write(reader) <= bsize))
5373                         break;
5374
5375                 udelay(1);
5376
5377                 /* Get the latest version of the reader write value */
5378                 smp_rmb();
5379         }
5380
5381         /* The writer is not moving forward? Something is wrong */
5382         if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
5383                 reader = NULL;
5384
5385         /*
5386          * Make sure we see any padding after the write update
5387          * (see rb_reset_tail()).
5388          *
5389          * In addition, a writer may be writing on the reader page
5390          * if the page has not been fully filled, so the read barrier
5391          * is also needed to make sure we see the content of what is
5392          * committed by the writer (see rb_set_commit_to_write()).
5393          */
5394         smp_rmb();
5395
5396
5397         return reader;
5398 }
5399
5400 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
5401 {
5402         struct ring_buffer_event *event;
5403         struct buffer_page *reader;
5404         unsigned length;
5405
5406         reader = rb_get_reader_page(cpu_buffer);
5407
5408         /* This function should not be called when buffer is empty */
5409         if (RB_WARN_ON(cpu_buffer, !reader))
5410                 return;
5411
5412         event = rb_reader_event(cpu_buffer);
5413
5414         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
5415                 cpu_buffer->read++;
5416
5417         rb_update_read_stamp(cpu_buffer, event);
5418
5419         length = rb_event_length(event);
5420         cpu_buffer->reader_page->read += length;
5421         cpu_buffer->read_bytes += length;
5422 }
5423
5424 static void rb_advance_iter(struct ring_buffer_iter *iter)
5425 {
5426         struct ring_buffer_per_cpu *cpu_buffer;
5427
5428         cpu_buffer = iter->cpu_buffer;
5429
5430         /* If head == next_event then we need to jump to the next event */
5431         if (iter->head == iter->next_event) {
5432                 /* If the event gets overwritten again, there's nothing to do */
5433                 if (rb_iter_head_event(iter) == NULL)
5434                         return;
5435         }
5436
5437         iter->head = iter->next_event;
5438
5439         /*
5440          * Check if we are at the end of the buffer.
5441          */
5442         if (iter->next_event >= rb_page_size(iter->head_page)) {
5443                 /* discarded commits can make the page empty */
5444                 if (iter->head_page == cpu_buffer->commit_page)
5445                         return;
5446                 rb_inc_iter(iter);
5447                 return;
5448         }
5449
5450         rb_update_iter_read_stamp(iter, iter->event);
5451 }
5452
5453 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
5454 {
5455         return cpu_buffer->lost_events;
5456 }
5457
5458 static struct ring_buffer_event *
5459 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
5460                unsigned long *lost_events)
5461 {
5462         struct ring_buffer_event *event;
5463         struct buffer_page *reader;
5464         int nr_loops = 0;
5465
5466         if (ts)
5467                 *ts = 0;
5468  again:
5469         /*
5470          * We repeat when a time extend is encountered.
5471          * Since the time extend is always attached to a data event,
5472          * we should never loop more than once.
5473          * (We never hit the following condition more than twice).
5474          */
5475         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
5476                 return NULL;
5477
5478         reader = rb_get_reader_page(cpu_buffer);
5479         if (!reader)
5480                 return NULL;
5481
5482         event = rb_reader_event(cpu_buffer);
5483
5484         switch (event->type_len) {
5485         case RINGBUF_TYPE_PADDING:
5486                 if (rb_null_event(event))
5487                         RB_WARN_ON(cpu_buffer, 1);
5488                 /*
5489                  * Because the writer could be discarding every
5490                  * event it creates (which would probably be bad)
5491                  * if we were to go back to "again" then we may never
5492                  * catch up, and will trigger the warn on, or lock
5493                  * the box. Return the padding, and we will release
5494                  * the current locks, and try again.
5495                  */
5496                 return event;
5497
5498         case RINGBUF_TYPE_TIME_EXTEND:
5499                 /* Internal data, OK to advance */
5500                 rb_advance_reader(cpu_buffer);
5501                 goto again;
5502
5503         case RINGBUF_TYPE_TIME_STAMP:
5504                 if (ts) {
5505                         *ts = rb_event_time_stamp(event);
5506                         *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
5507                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5508                                                          cpu_buffer->cpu, ts);
5509                 }
5510                 /* Internal data, OK to advance */
5511                 rb_advance_reader(cpu_buffer);
5512                 goto again;
5513
5514         case RINGBUF_TYPE_DATA:
5515                 if (ts && !(*ts)) {
5516                         *ts = cpu_buffer->read_stamp + event->time_delta;
5517                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5518                                                          cpu_buffer->cpu, ts);
5519                 }
5520                 if (lost_events)
5521                         *lost_events = rb_lost_events(cpu_buffer);
5522                 return event;
5523
5524         default:
5525                 RB_WARN_ON(cpu_buffer, 1);
5526         }
5527
5528         return NULL;
5529 }
5530 EXPORT_SYMBOL_GPL(ring_buffer_peek);
5531
5532 static struct ring_buffer_event *
5533 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5534 {
5535         struct trace_buffer *buffer;
5536         struct ring_buffer_per_cpu *cpu_buffer;
5537         struct ring_buffer_event *event;
5538         int nr_loops = 0;
5539
5540         if (ts)
5541                 *ts = 0;
5542
5543         cpu_buffer = iter->cpu_buffer;
5544         buffer = cpu_buffer->buffer;
5545
5546         /*
5547          * Check if someone performed a consuming read to the buffer
5548          * or removed some pages from the buffer. In these cases,
5549          * iterator was invalidated and we need to reset it.
5550          */
5551         if (unlikely(iter->cache_read != cpu_buffer->read ||
5552                      iter->cache_reader_page != cpu_buffer->reader_page ||
5553                      iter->cache_pages_removed != cpu_buffer->pages_removed))
5554                 rb_iter_reset(iter);
5555
5556  again:
5557         if (ring_buffer_iter_empty(iter))
5558                 return NULL;
5559
5560         /*
5561          * As the writer can mess with what the iterator is trying
5562          * to read, just give up if we fail to get an event after
5563          * three tries. The iterator is not as reliable when reading
5564          * the ring buffer with an active write as the consumer is.
5565          * Do not warn if the three failures is reached.
5566          */
5567         if (++nr_loops > 3)
5568                 return NULL;
5569
5570         if (rb_per_cpu_empty(cpu_buffer))
5571                 return NULL;
5572
5573         if (iter->head >= rb_page_size(iter->head_page)) {
5574                 rb_inc_iter(iter);
5575                 goto again;
5576         }
5577
5578         event = rb_iter_head_event(iter);
5579         if (!event)
5580                 goto again;
5581
5582         switch (event->type_len) {
5583         case RINGBUF_TYPE_PADDING:
5584                 if (rb_null_event(event)) {
5585                         rb_inc_iter(iter);
5586                         goto again;
5587                 }
5588                 rb_advance_iter(iter);
5589                 return event;
5590
5591         case RINGBUF_TYPE_TIME_EXTEND:
5592                 /* Internal data, OK to advance */
5593                 rb_advance_iter(iter);
5594                 goto again;
5595
5596         case RINGBUF_TYPE_TIME_STAMP:
5597                 if (ts) {
5598                         *ts = rb_event_time_stamp(event);
5599                         *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
5600                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5601                                                          cpu_buffer->cpu, ts);
5602                 }
5603                 /* Internal data, OK to advance */
5604                 rb_advance_iter(iter);
5605                 goto again;
5606
5607         case RINGBUF_TYPE_DATA:
5608                 if (ts && !(*ts)) {
5609                         *ts = iter->read_stamp + event->time_delta;
5610                         ring_buffer_normalize_time_stamp(buffer,
5611                                                          cpu_buffer->cpu, ts);
5612                 }
5613                 return event;
5614
5615         default:
5616                 RB_WARN_ON(cpu_buffer, 1);
5617         }
5618
5619         return NULL;
5620 }
5621 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
5622
5623 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
5624 {
5625         if (likely(!in_nmi())) {
5626                 raw_spin_lock(&cpu_buffer->reader_lock);
5627                 return true;
5628         }
5629
5630         /*
5631          * If an NMI die dumps out the content of the ring buffer
5632          * trylock must be used to prevent a deadlock if the NMI
5633          * preempted a task that holds the ring buffer locks. If
5634          * we get the lock then all is fine, if not, then continue
5635          * to do the read, but this can corrupt the ring buffer,
5636          * so it must be permanently disabled from future writes.
5637          * Reading from NMI is a oneshot deal.
5638          */
5639         if (raw_spin_trylock(&cpu_buffer->reader_lock))
5640                 return true;
5641
5642         /* Continue without locking, but disable the ring buffer */
5643         atomic_inc(&cpu_buffer->record_disabled);
5644         return false;
5645 }
5646
5647 static inline void
5648 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
5649 {
5650         if (likely(locked))
5651                 raw_spin_unlock(&cpu_buffer->reader_lock);
5652 }
5653
5654 /**
5655  * ring_buffer_peek - peek at the next event to be read
5656  * @buffer: The ring buffer to read
5657  * @cpu: The cpu to peak at
5658  * @ts: The timestamp counter of this event.
5659  * @lost_events: a variable to store if events were lost (may be NULL)
5660  *
5661  * This will return the event that will be read next, but does
5662  * not consume the data.
5663  */
5664 struct ring_buffer_event *
5665 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
5666                  unsigned long *lost_events)
5667 {
5668         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5669         struct ring_buffer_event *event;
5670         unsigned long flags;
5671         bool dolock;
5672
5673         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5674                 return NULL;
5675
5676  again:
5677         local_irq_save(flags);
5678         dolock = rb_reader_lock(cpu_buffer);
5679         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5680         if (event && event->type_len == RINGBUF_TYPE_PADDING)
5681                 rb_advance_reader(cpu_buffer);
5682         rb_reader_unlock(cpu_buffer, dolock);
5683         local_irq_restore(flags);
5684
5685         if (event && event->type_len == RINGBUF_TYPE_PADDING)
5686                 goto again;
5687
5688         return event;
5689 }
5690
5691 /** ring_buffer_iter_dropped - report if there are dropped events
5692  * @iter: The ring buffer iterator
5693  *
5694  * Returns true if there was dropped events since the last peek.
5695  */
5696 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
5697 {
5698         bool ret = iter->missed_events != 0;
5699
5700         iter->missed_events = 0;
5701         return ret;
5702 }
5703 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
5704
5705 /**
5706  * ring_buffer_iter_peek - peek at the next event to be read
5707  * @iter: The ring buffer iterator
5708  * @ts: The timestamp counter of this event.
5709  *
5710  * This will return the event that will be read next, but does
5711  * not increment the iterator.
5712  */
5713 struct ring_buffer_event *
5714 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5715 {
5716         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5717         struct ring_buffer_event *event;
5718         unsigned long flags;
5719
5720  again:
5721         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5722         event = rb_iter_peek(iter, ts);
5723         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5724
5725         if (event && event->type_len == RINGBUF_TYPE_PADDING)
5726                 goto again;
5727
5728         return event;
5729 }
5730
5731 /**
5732  * ring_buffer_consume - return an event and consume it
5733  * @buffer: The ring buffer to get the next event from
5734  * @cpu: the cpu to read the buffer from
5735  * @ts: a variable to store the timestamp (may be NULL)
5736  * @lost_events: a variable to store if events were lost (may be NULL)
5737  *
5738  * Returns the next event in the ring buffer, and that event is consumed.
5739  * Meaning, that sequential reads will keep returning a different event,
5740  * and eventually empty the ring buffer if the producer is slower.
5741  */
5742 struct ring_buffer_event *
5743 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5744                     unsigned long *lost_events)
5745 {
5746         struct ring_buffer_per_cpu *cpu_buffer;
5747         struct ring_buffer_event *event = NULL;
5748         unsigned long flags;
5749         bool dolock;
5750
5751  again:
5752         /* might be called in atomic */
5753         preempt_disable();
5754
5755         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5756                 goto out;
5757
5758         cpu_buffer = buffer->buffers[cpu];
5759         local_irq_save(flags);
5760         dolock = rb_reader_lock(cpu_buffer);
5761
5762         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5763         if (event) {
5764                 cpu_buffer->lost_events = 0;
5765                 rb_advance_reader(cpu_buffer);
5766         }
5767
5768         rb_reader_unlock(cpu_buffer, dolock);
5769         local_irq_restore(flags);
5770
5771  out:
5772         preempt_enable();
5773
5774         if (event && event->type_len == RINGBUF_TYPE_PADDING)
5775                 goto again;
5776
5777         return event;
5778 }
5779 EXPORT_SYMBOL_GPL(ring_buffer_consume);
5780
5781 /**
5782  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5783  * @buffer: The ring buffer to read from
5784  * @cpu: The cpu buffer to iterate over
5785  * @flags: gfp flags to use for memory allocation
5786  *
5787  * This performs the initial preparations necessary to iterate
5788  * through the buffer.  Memory is allocated, buffer resizing
5789  * is disabled, and the iterator pointer is returned to the caller.
5790  *
5791  * After a sequence of ring_buffer_read_prepare calls, the user is
5792  * expected to make at least one call to ring_buffer_read_prepare_sync.
5793  * Afterwards, ring_buffer_read_start is invoked to get things going
5794  * for real.
5795  *
5796  * This overall must be paired with ring_buffer_read_finish.
5797  */
5798 struct ring_buffer_iter *
5799 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5800 {
5801         struct ring_buffer_per_cpu *cpu_buffer;
5802         struct ring_buffer_iter *iter;
5803
5804         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5805                 return NULL;
5806
5807         iter = kzalloc(sizeof(*iter), flags);
5808         if (!iter)
5809                 return NULL;
5810
5811         /* Holds the entire event: data and meta data */
5812         iter->event_size = buffer->subbuf_size;
5813         iter->event = kmalloc(iter->event_size, flags);
5814         if (!iter->event) {
5815                 kfree(iter);
5816                 return NULL;
5817         }
5818
5819         cpu_buffer = buffer->buffers[cpu];
5820
5821         iter->cpu_buffer = cpu_buffer;
5822
5823         atomic_inc(&cpu_buffer->resize_disabled);
5824
5825         return iter;
5826 }
5827 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5828
5829 /**
5830  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5831  *
5832  * All previously invoked ring_buffer_read_prepare calls to prepare
5833  * iterators will be synchronized.  Afterwards, read_buffer_read_start
5834  * calls on those iterators are allowed.
5835  */
5836 void
5837 ring_buffer_read_prepare_sync(void)
5838 {
5839         synchronize_rcu();
5840 }
5841 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5842
5843 /**
5844  * ring_buffer_read_start - start a non consuming read of the buffer
5845  * @iter: The iterator returned by ring_buffer_read_prepare
5846  *
5847  * This finalizes the startup of an iteration through the buffer.
5848  * The iterator comes from a call to ring_buffer_read_prepare and
5849  * an intervening ring_buffer_read_prepare_sync must have been
5850  * performed.
5851  *
5852  * Must be paired with ring_buffer_read_finish.
5853  */
5854 void
5855 ring_buffer_read_start(struct ring_buffer_iter *iter)
5856 {
5857         struct ring_buffer_per_cpu *cpu_buffer;
5858         unsigned long flags;
5859
5860         if (!iter)
5861                 return;
5862
5863         cpu_buffer = iter->cpu_buffer;
5864
5865         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5866         arch_spin_lock(&cpu_buffer->lock);
5867         rb_iter_reset(iter);
5868         arch_spin_unlock(&cpu_buffer->lock);
5869         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5870 }
5871 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5872
5873 /**
5874  * ring_buffer_read_finish - finish reading the iterator of the buffer
5875  * @iter: The iterator retrieved by ring_buffer_start
5876  *
5877  * This re-enables resizing of the buffer, and frees the iterator.
5878  */
5879 void
5880 ring_buffer_read_finish(struct ring_buffer_iter *iter)
5881 {
5882         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5883
5884         /* Use this opportunity to check the integrity of the ring buffer. */
5885         rb_check_pages(cpu_buffer);
5886
5887         atomic_dec(&cpu_buffer->resize_disabled);
5888         kfree(iter->event);
5889         kfree(iter);
5890 }
5891 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5892
5893 /**
5894  * ring_buffer_iter_advance - advance the iterator to the next location
5895  * @iter: The ring buffer iterator
5896  *
5897  * Move the location of the iterator such that the next read will
5898  * be the next location of the iterator.
5899  */
5900 void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5901 {
5902         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5903         unsigned long flags;
5904
5905         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5906
5907         rb_advance_iter(iter);
5908
5909         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5910 }
5911 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5912
5913 /**
5914  * ring_buffer_size - return the size of the ring buffer (in bytes)
5915  * @buffer: The ring buffer.
5916  * @cpu: The CPU to get ring buffer size from.
5917  */
5918 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5919 {
5920         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5921                 return 0;
5922
5923         return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
5924 }
5925 EXPORT_SYMBOL_GPL(ring_buffer_size);
5926
5927 /**
5928  * ring_buffer_max_event_size - return the max data size of an event
5929  * @buffer: The ring buffer.
5930  *
5931  * Returns the maximum size an event can be.
5932  */
5933 unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5934 {
5935         /* If abs timestamp is requested, events have a timestamp too */
5936         if (ring_buffer_time_stamp_abs(buffer))
5937                 return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5938         return buffer->max_data_size;
5939 }
5940 EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5941
5942 static void rb_clear_buffer_page(struct buffer_page *page)
5943 {
5944         local_set(&page->write, 0);
5945         local_set(&page->entries, 0);
5946         rb_init_page(page->page);
5947         page->read = 0;
5948 }
5949
5950 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
5951 {
5952         struct trace_buffer_meta *meta = cpu_buffer->meta_page;
5953
5954         if (!meta)
5955                 return;
5956
5957         meta->reader.read = cpu_buffer->reader_page->read;
5958         meta->reader.id = cpu_buffer->reader_page->id;
5959         meta->reader.lost_events = cpu_buffer->lost_events;
5960
5961         meta->entries = local_read(&cpu_buffer->entries);
5962         meta->overrun = local_read(&cpu_buffer->overrun);
5963         meta->read = cpu_buffer->read;
5964
5965         /* Some archs do not have data cache coherency between kernel and user-space */
5966         flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page));
5967 }
5968
5969 static void
5970 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5971 {
5972         struct buffer_page *page;
5973
5974         rb_head_page_deactivate(cpu_buffer);
5975
5976         cpu_buffer->head_page
5977                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
5978         rb_clear_buffer_page(cpu_buffer->head_page);
5979         list_for_each_entry(page, cpu_buffer->pages, list) {
5980                 rb_clear_buffer_page(page);
5981         }
5982
5983         cpu_buffer->tail_page = cpu_buffer->head_page;
5984         cpu_buffer->commit_page = cpu_buffer->head_page;
5985
5986         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5987         INIT_LIST_HEAD(&cpu_buffer->new_pages);
5988         rb_clear_buffer_page(cpu_buffer->reader_page);
5989
5990         local_set(&cpu_buffer->entries_bytes, 0);
5991         local_set(&cpu_buffer->overrun, 0);
5992         local_set(&cpu_buffer->commit_overrun, 0);
5993         local_set(&cpu_buffer->dropped_events, 0);
5994         local_set(&cpu_buffer->entries, 0);
5995         local_set(&cpu_buffer->committing, 0);
5996         local_set(&cpu_buffer->commits, 0);
5997         local_set(&cpu_buffer->pages_touched, 0);
5998         local_set(&cpu_buffer->pages_lost, 0);
5999         local_set(&cpu_buffer->pages_read, 0);
6000         cpu_buffer->last_pages_touch = 0;
6001         cpu_buffer->shortest_full = 0;
6002         cpu_buffer->read = 0;
6003         cpu_buffer->read_bytes = 0;
6004
6005         rb_time_set(&cpu_buffer->write_stamp, 0);
6006         rb_time_set(&cpu_buffer->before_stamp, 0);
6007
6008         memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
6009
6010         cpu_buffer->lost_events = 0;
6011         cpu_buffer->last_overrun = 0;
6012
6013         rb_head_page_activate(cpu_buffer);
6014         cpu_buffer->pages_removed = 0;
6015
6016         if (cpu_buffer->mapped) {
6017                 rb_update_meta_page(cpu_buffer);
6018                 if (cpu_buffer->ring_meta) {
6019                         struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
6020                         meta->commit_buffer = meta->head_buffer;
6021                 }
6022         }
6023 }
6024
6025 /* Must have disabled the cpu buffer then done a synchronize_rcu */
6026 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6027 {
6028         unsigned long flags;
6029
6030         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6031
6032         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
6033                 goto out;
6034
6035         arch_spin_lock(&cpu_buffer->lock);
6036
6037         rb_reset_cpu(cpu_buffer);
6038
6039         arch_spin_unlock(&cpu_buffer->lock);
6040
6041  out:
6042         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6043 }
6044
6045 /**
6046  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
6047  * @buffer: The ring buffer to reset a per cpu buffer of
6048  * @cpu: The CPU buffer to be reset
6049  */
6050 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
6051 {
6052         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
6053         struct ring_buffer_meta *meta;
6054
6055         if (!cpumask_test_cpu(cpu, buffer->cpumask))
6056                 return;
6057
6058         /* prevent another thread from changing buffer sizes */
6059         mutex_lock(&buffer->mutex);
6060
6061         atomic_inc(&cpu_buffer->resize_disabled);
6062         atomic_inc(&cpu_buffer->record_disabled);
6063
6064         /* Make sure all commits have finished */
6065         synchronize_rcu();
6066
6067         reset_disabled_cpu_buffer(cpu_buffer);
6068
6069         atomic_dec(&cpu_buffer->record_disabled);
6070         atomic_dec(&cpu_buffer->resize_disabled);
6071
6072         /* Make sure persistent meta now uses this buffer's addresses */
6073         meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
6074         if (meta)
6075                 rb_meta_init_text_addr(meta);
6076
6077         mutex_unlock(&buffer->mutex);
6078 }
6079 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
6080
6081 /* Flag to ensure proper resetting of atomic variables */
6082 #define RESET_BIT       (1 << 30)
6083
6084 /**
6085  * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
6086  * @buffer: The ring buffer to reset a per cpu buffer of
6087  */
6088 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
6089 {
6090         struct ring_buffer_per_cpu *cpu_buffer;
6091         struct ring_buffer_meta *meta;
6092         int cpu;
6093
6094         /* prevent another thread from changing buffer sizes */
6095         mutex_lock(&buffer->mutex);
6096
6097         for_each_online_buffer_cpu(buffer, cpu) {
6098                 cpu_buffer = buffer->buffers[cpu];
6099
6100                 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
6101                 atomic_inc(&cpu_buffer->record_disabled);
6102         }
6103
6104         /* Make sure all commits have finished */
6105         synchronize_rcu();
6106
6107         for_each_buffer_cpu(buffer, cpu) {
6108                 cpu_buffer = buffer->buffers[cpu];
6109
6110                 /*
6111                  * If a CPU came online during the synchronize_rcu(), then
6112                  * ignore it.
6113                  */
6114                 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
6115                         continue;
6116
6117                 reset_disabled_cpu_buffer(cpu_buffer);
6118
6119                 /* Make sure persistent meta now uses this buffer's addresses */
6120                 meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
6121                 if (meta)
6122                         rb_meta_init_text_addr(meta);
6123
6124                 atomic_dec(&cpu_buffer->record_disabled);
6125                 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
6126         }
6127
6128         mutex_unlock(&buffer->mutex);
6129 }
6130
6131 /**
6132  * ring_buffer_reset - reset a ring buffer
6133  * @buffer: The ring buffer to reset all cpu buffers
6134  */
6135 void ring_buffer_reset(struct trace_buffer *buffer)
6136 {
6137         struct ring_buffer_per_cpu *cpu_buffer;
6138         int cpu;
6139
6140         /* prevent another thread from changing buffer sizes */
6141         mutex_lock(&buffer->mutex);
6142
6143         for_each_buffer_cpu(buffer, cpu) {
6144                 cpu_buffer = buffer->buffers[cpu];
6145
6146                 atomic_inc(&cpu_buffer->resize_disabled);
6147                 atomic_inc(&cpu_buffer->record_disabled);
6148         }
6149
6150         /* Make sure all commits have finished */
6151         synchronize_rcu();
6152
6153         for_each_buffer_cpu(buffer, cpu) {
6154                 cpu_buffer = buffer->buffers[cpu];
6155
6156                 reset_disabled_cpu_buffer(cpu_buffer);
6157
6158                 atomic_dec(&cpu_buffer->record_disabled);
6159                 atomic_dec(&cpu_buffer->resize_disabled);
6160         }
6161
6162         mutex_unlock(&buffer->mutex);
6163 }
6164 EXPORT_SYMBOL_GPL(ring_buffer_reset);
6165
6166 /**
6167  * ring_buffer_empty - is the ring buffer empty?
6168  * @buffer: The ring buffer to test
6169  */
6170 bool ring_buffer_empty(struct trace_buffer *buffer)
6171 {
6172         struct ring_buffer_per_cpu *cpu_buffer;
6173         unsigned long flags;
6174         bool dolock;
6175         bool ret;
6176         int cpu;
6177
6178         /* yes this is racy, but if you don't like the race, lock the buffer */
6179         for_each_buffer_cpu(buffer, cpu) {
6180                 cpu_buffer = buffer->buffers[cpu];
6181                 local_irq_save(flags);
6182                 dolock = rb_reader_lock(cpu_buffer);
6183                 ret = rb_per_cpu_empty(cpu_buffer);
6184                 rb_reader_unlock(cpu_buffer, dolock);
6185                 local_irq_restore(flags);
6186
6187                 if (!ret)
6188                         return false;
6189         }
6190
6191         return true;
6192 }
6193 EXPORT_SYMBOL_GPL(ring_buffer_empty);
6194
6195 /**
6196  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
6197  * @buffer: The ring buffer
6198  * @cpu: The CPU buffer to test
6199  */
6200 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
6201 {
6202         struct ring_buffer_per_cpu *cpu_buffer;
6203         unsigned long flags;
6204         bool dolock;
6205         bool ret;
6206
6207         if (!cpumask_test_cpu(cpu, buffer->cpumask))
6208                 return true;
6209
6210         cpu_buffer = buffer->buffers[cpu];
6211         local_irq_save(flags);
6212         dolock = rb_reader_lock(cpu_buffer);
6213         ret = rb_per_cpu_empty(cpu_buffer);
6214         rb_reader_unlock(cpu_buffer, dolock);
6215         local_irq_restore(flags);
6216
6217         return ret;
6218 }
6219 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
6220
6221 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
6222 /**
6223  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
6224  * @buffer_a: One buffer to swap with
6225  * @buffer_b: The other buffer to swap with
6226  * @cpu: the CPU of the buffers to swap
6227  *
6228  * This function is useful for tracers that want to take a "snapshot"
6229  * of a CPU buffer and has another back up buffer lying around.
6230  * it is expected that the tracer handles the cpu buffer not being
6231  * used at the moment.
6232  */
6233 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
6234                          struct trace_buffer *buffer_b, int cpu)
6235 {
6236         struct ring_buffer_per_cpu *cpu_buffer_a;
6237         struct ring_buffer_per_cpu *cpu_buffer_b;
6238         int ret = -EINVAL;
6239
6240         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
6241             !cpumask_test_cpu(cpu, buffer_b->cpumask))
6242                 goto out;
6243
6244         cpu_buffer_a = buffer_a->buffers[cpu];
6245         cpu_buffer_b = buffer_b->buffers[cpu];
6246
6247         /* It's up to the callers to not try to swap mapped buffers */
6248         if (WARN_ON_ONCE(cpu_buffer_a->mapped || cpu_buffer_b->mapped)) {
6249                 ret = -EBUSY;
6250                 goto out;
6251         }
6252
6253         /* At least make sure the two buffers are somewhat the same */
6254         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
6255                 goto out;
6256
6257         if (buffer_a->subbuf_order != buffer_b->subbuf_order)
6258                 goto out;
6259
6260         ret = -EAGAIN;
6261
6262         if (atomic_read(&buffer_a->record_disabled))
6263                 goto out;
6264
6265         if (atomic_read(&buffer_b->record_disabled))
6266                 goto out;
6267
6268         if (atomic_read(&cpu_buffer_a->record_disabled))
6269                 goto out;
6270
6271         if (atomic_read(&cpu_buffer_b->record_disabled))
6272                 goto out;
6273
6274         /*
6275          * We can't do a synchronize_rcu here because this
6276          * function can be called in atomic context.
6277          * Normally this will be called from the same CPU as cpu.
6278          * If not it's up to the caller to protect this.
6279          */
6280         atomic_inc(&cpu_buffer_a->record_disabled);
6281         atomic_inc(&cpu_buffer_b->record_disabled);
6282
6283         ret = -EBUSY;
6284         if (local_read(&cpu_buffer_a->committing))
6285                 goto out_dec;
6286         if (local_read(&cpu_buffer_b->committing))
6287                 goto out_dec;
6288
6289         /*
6290          * When resize is in progress, we cannot swap it because
6291          * it will mess the state of the cpu buffer.
6292          */
6293         if (atomic_read(&buffer_a->resizing))
6294                 goto out_dec;
6295         if (atomic_read(&buffer_b->resizing))
6296                 goto out_dec;
6297
6298         buffer_a->buffers[cpu] = cpu_buffer_b;
6299         buffer_b->buffers[cpu] = cpu_buffer_a;
6300
6301         cpu_buffer_b->buffer = buffer_a;
6302         cpu_buffer_a->buffer = buffer_b;
6303
6304         ret = 0;
6305
6306 out_dec:
6307         atomic_dec(&cpu_buffer_a->record_disabled);
6308         atomic_dec(&cpu_buffer_b->record_disabled);
6309 out:
6310         return ret;
6311 }
6312 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
6313 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
6314
6315 /**
6316  * ring_buffer_alloc_read_page - allocate a page to read from buffer
6317  * @buffer: the buffer to allocate for.
6318  * @cpu: the cpu buffer to allocate.
6319  *
6320  * This function is used in conjunction with ring_buffer_read_page.
6321  * When reading a full page from the ring buffer, these functions
6322  * can be used to speed up the process. The calling function should
6323  * allocate a few pages first with this function. Then when it
6324  * needs to get pages from the ring buffer, it passes the result
6325  * of this function into ring_buffer_read_page, which will swap
6326  * the page that was allocated, with the read page of the buffer.
6327  *
6328  * Returns:
6329  *  The page allocated, or ERR_PTR
6330  */
6331 struct buffer_data_read_page *
6332 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
6333 {
6334         struct ring_buffer_per_cpu *cpu_buffer;
6335         struct buffer_data_read_page *bpage = NULL;
6336         unsigned long flags;
6337         struct page *page;
6338
6339         if (!cpumask_test_cpu(cpu, buffer->cpumask))
6340                 return ERR_PTR(-ENODEV);
6341
6342         bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
6343         if (!bpage)
6344                 return ERR_PTR(-ENOMEM);
6345
6346         bpage->order = buffer->subbuf_order;
6347         cpu_buffer = buffer->buffers[cpu];
6348         local_irq_save(flags);
6349         arch_spin_lock(&cpu_buffer->lock);
6350
6351         if (cpu_buffer->free_page) {
6352                 bpage->data = cpu_buffer->free_page;
6353                 cpu_buffer->free_page = NULL;
6354         }
6355
6356         arch_spin_unlock(&cpu_buffer->lock);
6357         local_irq_restore(flags);
6358
6359         if (bpage->data)
6360                 goto out;
6361
6362         page = alloc_pages_node(cpu_to_node(cpu),
6363                                 GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | __GFP_ZERO,
6364                                 cpu_buffer->buffer->subbuf_order);
6365         if (!page) {
6366                 kfree(bpage);
6367                 return ERR_PTR(-ENOMEM);
6368         }
6369
6370         bpage->data = page_address(page);
6371
6372  out:
6373         rb_init_page(bpage->data);
6374
6375         return bpage;
6376 }
6377 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
6378
6379 /**
6380  * ring_buffer_free_read_page - free an allocated read page
6381  * @buffer: the buffer the page was allocate for
6382  * @cpu: the cpu buffer the page came from
6383  * @data_page: the page to free
6384  *
6385  * Free a page allocated from ring_buffer_alloc_read_page.
6386  */
6387 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
6388                                 struct buffer_data_read_page *data_page)
6389 {
6390         struct ring_buffer_per_cpu *cpu_buffer;
6391         struct buffer_data_page *bpage = data_page->data;
6392         struct page *page = virt_to_page(bpage);
6393         unsigned long flags;
6394
6395         if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
6396                 return;
6397
6398         cpu_buffer = buffer->buffers[cpu];
6399
6400         /*
6401          * If the page is still in use someplace else, or order of the page
6402          * is different from the subbuffer order of the buffer -
6403          * we can't reuse it
6404          */
6405         if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
6406                 goto out;
6407
6408         local_irq_save(flags);
6409         arch_spin_lock(&cpu_buffer->lock);
6410
6411         if (!cpu_buffer->free_page) {
6412                 cpu_buffer->free_page = bpage;
6413                 bpage = NULL;
6414         }
6415
6416         arch_spin_unlock(&cpu_buffer->lock);
6417         local_irq_restore(flags);
6418
6419  out:
6420         free_pages((unsigned long)bpage, data_page->order);
6421         kfree(data_page);
6422 }
6423 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
6424
6425 /**
6426  * ring_buffer_read_page - extract a page from the ring buffer
6427  * @buffer: buffer to extract from
6428  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
6429  * @len: amount to extract
6430  * @cpu: the cpu of the buffer to extract
6431  * @full: should the extraction only happen when the page is full.
6432  *
6433  * This function will pull out a page from the ring buffer and consume it.
6434  * @data_page must be the address of the variable that was returned
6435  * from ring_buffer_alloc_read_page. This is because the page might be used
6436  * to swap with a page in the ring buffer.
6437  *
6438  * for example:
6439  *      rpage = ring_buffer_alloc_read_page(buffer, cpu);
6440  *      if (IS_ERR(rpage))
6441  *              return PTR_ERR(rpage);
6442  *      ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
6443  *      if (ret >= 0)
6444  *              process_page(ring_buffer_read_page_data(rpage), ret);
6445  *      ring_buffer_free_read_page(buffer, cpu, rpage);
6446  *
6447  * When @full is set, the function will not return true unless
6448  * the writer is off the reader page.
6449  *
6450  * Note: it is up to the calling functions to handle sleeps and wakeups.
6451  *  The ring buffer can be used anywhere in the kernel and can not
6452  *  blindly call wake_up. The layer that uses the ring buffer must be
6453  *  responsible for that.
6454  *
6455  * Returns:
6456  *  >=0 if data has been transferred, returns the offset of consumed data.
6457  *  <0 if no data has been transferred.
6458  */
6459 int ring_buffer_read_page(struct trace_buffer *buffer,
6460                           struct buffer_data_read_page *data_page,
6461                           size_t len, int cpu, int full)
6462 {
6463         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
6464         struct ring_buffer_event *event;
6465         struct buffer_data_page *bpage;
6466         struct buffer_page *reader;
6467         unsigned long missed_events;
6468         unsigned long flags;
6469         unsigned int commit;
6470         unsigned int read;
6471         u64 save_timestamp;
6472         int ret = -1;
6473
6474         if (!cpumask_test_cpu(cpu, buffer->cpumask))
6475                 goto out;
6476
6477         /*
6478          * If len is not big enough to hold the page header, then
6479          * we can not copy anything.
6480          */
6481         if (len <= BUF_PAGE_HDR_SIZE)
6482                 goto out;
6483
6484         len -= BUF_PAGE_HDR_SIZE;
6485
6486         if (!data_page || !data_page->data)
6487                 goto out;
6488         if (data_page->order != buffer->subbuf_order)
6489                 goto out;
6490
6491         bpage = data_page->data;
6492         if (!bpage)
6493                 goto out;
6494
6495         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6496
6497         reader = rb_get_reader_page(cpu_buffer);
6498         if (!reader)
6499                 goto out_unlock;
6500
6501         event = rb_reader_event(cpu_buffer);
6502
6503         read = reader->read;
6504         commit = rb_page_size(reader);
6505
6506         /* Check if any events were dropped */
6507         missed_events = cpu_buffer->lost_events;
6508
6509         /*
6510          * If this page has been partially read or
6511          * if len is not big enough to read the rest of the page or
6512          * a writer is still on the page, then
6513          * we must copy the data from the page to the buffer.
6514          * Otherwise, we can simply swap the page with the one passed in.
6515          */
6516         if (read || (len < (commit - read)) ||
6517             cpu_buffer->reader_page == cpu_buffer->commit_page ||
6518             cpu_buffer->mapped) {
6519                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
6520                 unsigned int rpos = read;
6521                 unsigned int pos = 0;
6522                 unsigned int size;
6523
6524                 /*
6525                  * If a full page is expected, this can still be returned
6526                  * if there's been a previous partial read and the
6527                  * rest of the page can be read and the commit page is off
6528                  * the reader page.
6529                  */
6530                 if (full &&
6531                     (!read || (len < (commit - read)) ||
6532                      cpu_buffer->reader_page == cpu_buffer->commit_page))
6533                         goto out_unlock;
6534
6535                 if (len > (commit - read))
6536                         len = (commit - read);
6537
6538                 /* Always keep the time extend and data together */
6539                 size = rb_event_ts_length(event);
6540
6541                 if (len < size)
6542                         goto out_unlock;
6543
6544                 /* save the current timestamp, since the user will need it */
6545                 save_timestamp = cpu_buffer->read_stamp;
6546
6547                 /* Need to copy one event at a time */
6548                 do {
6549                         /* We need the size of one event, because
6550                          * rb_advance_reader only advances by one event,
6551                          * whereas rb_event_ts_length may include the size of
6552                          * one or two events.
6553                          * We have already ensured there's enough space if this
6554                          * is a time extend. */
6555                         size = rb_event_length(event);
6556                         memcpy(bpage->data + pos, rpage->data + rpos, size);
6557
6558                         len -= size;
6559
6560                         rb_advance_reader(cpu_buffer);
6561                         rpos = reader->read;
6562                         pos += size;
6563
6564                         if (rpos >= commit)
6565                                 break;
6566
6567                         event = rb_reader_event(cpu_buffer);
6568                         /* Always keep the time extend and data together */
6569                         size = rb_event_ts_length(event);
6570                 } while (len >= size);
6571
6572                 /* update bpage */
6573                 local_set(&bpage->commit, pos);
6574                 bpage->time_stamp = save_timestamp;
6575
6576                 /* we copied everything to the beginning */
6577                 read = 0;
6578         } else {
6579                 /* update the entry counter */
6580                 cpu_buffer->read += rb_page_entries(reader);
6581                 cpu_buffer->read_bytes += rb_page_size(reader);
6582
6583                 /* swap the pages */
6584                 rb_init_page(bpage);
6585                 bpage = reader->page;
6586                 reader->page = data_page->data;
6587                 local_set(&reader->write, 0);
6588                 local_set(&reader->entries, 0);
6589                 reader->read = 0;
6590                 data_page->data = bpage;
6591
6592                 /*
6593                  * Use the real_end for the data size,
6594                  * This gives us a chance to store the lost events
6595                  * on the page.
6596                  */
6597                 if (reader->real_end)
6598                         local_set(&bpage->commit, reader->real_end);
6599         }
6600         ret = read;
6601
6602         cpu_buffer->lost_events = 0;
6603
6604         commit = local_read(&bpage->commit);
6605         /*
6606          * Set a flag in the commit field if we lost events
6607          */
6608         if (missed_events) {
6609                 /* If there is room at the end of the page to save the
6610                  * missed events, then record it there.
6611                  */
6612                 if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
6613                         memcpy(&bpage->data[commit], &missed_events,
6614                                sizeof(missed_events));
6615                         local_add(RB_MISSED_STORED, &bpage->commit);
6616                         commit += sizeof(missed_events);
6617                 }
6618                 local_add(RB_MISSED_EVENTS, &bpage->commit);
6619         }
6620
6621         /*
6622          * This page may be off to user land. Zero it out here.
6623          */
6624         if (commit < buffer->subbuf_size)
6625                 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
6626
6627  out_unlock:
6628         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6629
6630  out:
6631         return ret;
6632 }
6633 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
6634
6635 /**
6636  * ring_buffer_read_page_data - get pointer to the data in the page.
6637  * @page:  the page to get the data from
6638  *
6639  * Returns pointer to the actual data in this page.
6640  */
6641 void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
6642 {
6643         return page->data;
6644 }
6645 EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
6646
6647 /**
6648  * ring_buffer_subbuf_size_get - get size of the sub buffer.
6649  * @buffer: the buffer to get the sub buffer size from
6650  *
6651  * Returns size of the sub buffer, in bytes.
6652  */
6653 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
6654 {
6655         return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
6656 }
6657 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
6658
6659 /**
6660  * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
6661  * @buffer: The ring_buffer to get the system sub page order from
6662  *
6663  * By default, one ring buffer sub page equals to one system page. This parameter
6664  * is configurable, per ring buffer. The size of the ring buffer sub page can be
6665  * extended, but must be an order of system page size.
6666  *
6667  * Returns the order of buffer sub page size, in system pages:
6668  * 0 means the sub buffer size is 1 system page and so forth.
6669  * In case of an error < 0 is returned.
6670  */
6671 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
6672 {
6673         if (!buffer)
6674                 return -EINVAL;
6675
6676         return buffer->subbuf_order;
6677 }
6678 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
6679
6680 /**
6681  * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
6682  * @buffer: The ring_buffer to set the new page size.
6683  * @order: Order of the system pages in one sub buffer page
6684  *
6685  * By default, one ring buffer pages equals to one system page. This API can be
6686  * used to set new size of the ring buffer page. The size must be order of
6687  * system page size, that's why the input parameter @order is the order of
6688  * system pages that are allocated for one ring buffer page:
6689  *  0 - 1 system page
6690  *  1 - 2 system pages
6691  *  3 - 4 system pages
6692  *  ...
6693  *
6694  * Returns 0 on success or < 0 in case of an error.
6695  */
6696 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
6697 {
6698         struct ring_buffer_per_cpu *cpu_buffer;
6699         struct buffer_page *bpage, *tmp;
6700         int old_order, old_size;
6701         int nr_pages;
6702         int psize;
6703         int err;
6704         int cpu;
6705
6706         if (!buffer || order < 0)
6707                 return -EINVAL;
6708
6709         if (buffer->subbuf_order == order)
6710                 return 0;
6711
6712         psize = (1 << order) * PAGE_SIZE;
6713         if (psize <= BUF_PAGE_HDR_SIZE)
6714                 return -EINVAL;
6715
6716         /* Size of a subbuf cannot be greater than the write counter */
6717         if (psize > RB_WRITE_MASK + 1)
6718                 return -EINVAL;
6719
6720         old_order = buffer->subbuf_order;
6721         old_size = buffer->subbuf_size;
6722
6723         /* prevent another thread from changing buffer sizes */
6724         mutex_lock(&buffer->mutex);
6725         atomic_inc(&buffer->record_disabled);
6726
6727         /* Make sure all commits have finished */
6728         synchronize_rcu();
6729
6730         buffer->subbuf_order = order;
6731         buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
6732
6733         /* Make sure all new buffers are allocated, before deleting the old ones */
6734         for_each_buffer_cpu(buffer, cpu) {
6735
6736                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6737                         continue;
6738
6739                 cpu_buffer = buffer->buffers[cpu];
6740
6741                 if (cpu_buffer->mapped) {
6742                         err = -EBUSY;
6743                         goto error;
6744                 }
6745
6746                 /* Update the number of pages to match the new size */
6747                 nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
6748                 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
6749
6750                 /* we need a minimum of two pages */
6751                 if (nr_pages < 2)
6752                         nr_pages = 2;
6753
6754                 cpu_buffer->nr_pages_to_update = nr_pages;
6755
6756                 /* Include the reader page */
6757                 nr_pages++;
6758
6759                 /* Allocate the new size buffer */
6760                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
6761                 if (__rb_allocate_pages(cpu_buffer, nr_pages,
6762                                         &cpu_buffer->new_pages)) {
6763                         /* not enough memory for new pages */
6764                         err = -ENOMEM;
6765                         goto error;
6766                 }
6767         }
6768
6769         for_each_buffer_cpu(buffer, cpu) {
6770                 struct buffer_data_page *old_free_data_page;
6771                 struct list_head old_pages;
6772                 unsigned long flags;
6773
6774                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6775                         continue;
6776
6777                 cpu_buffer = buffer->buffers[cpu];
6778
6779                 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6780
6781                 /* Clear the head bit to make the link list normal to read */
6782                 rb_head_page_deactivate(cpu_buffer);
6783
6784                 /*
6785                  * Collect buffers from the cpu_buffer pages list and the
6786                  * reader_page on old_pages, so they can be freed later when not
6787                  * under a spinlock. The pages list is a linked list with no
6788                  * head, adding old_pages turns it into a regular list with
6789                  * old_pages being the head.
6790                  */
6791                 list_add(&old_pages, cpu_buffer->pages);
6792                 list_add(&cpu_buffer->reader_page->list, &old_pages);
6793
6794                 /* One page was allocated for the reader page */
6795                 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
6796                                                      struct buffer_page, list);
6797                 list_del_init(&cpu_buffer->reader_page->list);
6798
6799                 /* Install the new pages, remove the head from the list */
6800                 cpu_buffer->pages = cpu_buffer->new_pages.next;
6801                 list_del_init(&cpu_buffer->new_pages);
6802                 cpu_buffer->cnt++;
6803
6804                 cpu_buffer->head_page
6805                         = list_entry(cpu_buffer->pages, struct buffer_page, list);
6806                 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
6807
6808                 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
6809                 cpu_buffer->nr_pages_to_update = 0;
6810
6811                 old_free_data_page = cpu_buffer->free_page;
6812                 cpu_buffer->free_page = NULL;
6813
6814                 rb_head_page_activate(cpu_buffer);
6815
6816                 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6817
6818                 /* Free old sub buffers */
6819                 list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
6820                         list_del_init(&bpage->list);
6821                         free_buffer_page(bpage);
6822                 }
6823                 free_pages((unsigned long)old_free_data_page, old_order);
6824
6825                 rb_check_pages(cpu_buffer);
6826         }
6827
6828         atomic_dec(&buffer->record_disabled);
6829         mutex_unlock(&buffer->mutex);
6830
6831         return 0;
6832
6833 error:
6834         buffer->subbuf_order = old_order;
6835         buffer->subbuf_size = old_size;
6836
6837         atomic_dec(&buffer->record_disabled);
6838         mutex_unlock(&buffer->mutex);
6839
6840         for_each_buffer_cpu(buffer, cpu) {
6841                 cpu_buffer = buffer->buffers[cpu];
6842
6843                 if (!cpu_buffer->nr_pages_to_update)
6844                         continue;
6845
6846                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6847                         list_del_init(&bpage->list);
6848                         free_buffer_page(bpage);
6849                 }
6850         }
6851
6852         return err;
6853 }
6854 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6855
6856 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6857 {
6858         struct page *page;
6859
6860         if (cpu_buffer->meta_page)
6861                 return 0;
6862
6863         page = alloc_page(GFP_USER | __GFP_ZERO);
6864         if (!page)
6865                 return -ENOMEM;
6866
6867         cpu_buffer->meta_page = page_to_virt(page);
6868
6869         return 0;
6870 }
6871
6872 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6873 {
6874         unsigned long addr = (unsigned long)cpu_buffer->meta_page;
6875
6876         free_page(addr);
6877         cpu_buffer->meta_page = NULL;
6878 }
6879
6880 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
6881                                    unsigned long *subbuf_ids)
6882 {
6883         struct trace_buffer_meta *meta = cpu_buffer->meta_page;
6884         unsigned int nr_subbufs = cpu_buffer->nr_pages + 1;
6885         struct buffer_page *first_subbuf, *subbuf;
6886         int id = 0;
6887
6888         subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page;
6889         cpu_buffer->reader_page->id = id++;
6890
6891         first_subbuf = subbuf = rb_set_head_page(cpu_buffer);
6892         do {
6893                 if (WARN_ON(id >= nr_subbufs))
6894                         break;
6895
6896                 subbuf_ids[id] = (unsigned long)subbuf->page;
6897                 subbuf->id = id;
6898
6899                 rb_inc_page(&subbuf);
6900                 id++;
6901         } while (subbuf != first_subbuf);
6902
6903         /* install subbuf ID to kern VA translation */
6904         cpu_buffer->subbuf_ids = subbuf_ids;
6905
6906         meta->meta_struct_len = sizeof(*meta);
6907         meta->nr_subbufs = nr_subbufs;
6908         meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
6909         meta->meta_page_size = meta->subbuf_size;
6910
6911         rb_update_meta_page(cpu_buffer);
6912 }
6913
6914 static struct ring_buffer_per_cpu *
6915 rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
6916 {
6917         struct ring_buffer_per_cpu *cpu_buffer;
6918
6919         if (!cpumask_test_cpu(cpu, buffer->cpumask))
6920                 return ERR_PTR(-EINVAL);
6921
6922         cpu_buffer = buffer->buffers[cpu];
6923
6924         mutex_lock(&cpu_buffer->mapping_lock);
6925
6926         if (!cpu_buffer->user_mapped) {
6927                 mutex_unlock(&cpu_buffer->mapping_lock);
6928                 return ERR_PTR(-ENODEV);
6929         }
6930
6931         return cpu_buffer;
6932 }
6933
6934 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6935 {
6936         mutex_unlock(&cpu_buffer->mapping_lock);
6937 }
6938
6939 /*
6940  * Fast-path for rb_buffer_(un)map(). Called whenever the meta-page doesn't need
6941  * to be set-up or torn-down.
6942  */
6943 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
6944                                bool inc)
6945 {
6946         unsigned long flags;
6947
6948         lockdep_assert_held(&cpu_buffer->mapping_lock);
6949
6950         /* mapped is always greater or equal to user_mapped */
6951         if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
6952                 return -EINVAL;
6953
6954         if (inc && cpu_buffer->mapped == UINT_MAX)
6955                 return -EBUSY;
6956
6957         if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
6958                 return -EINVAL;
6959
6960         mutex_lock(&cpu_buffer->buffer->mutex);
6961         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6962
6963         if (inc) {
6964                 cpu_buffer->user_mapped++;
6965                 cpu_buffer->mapped++;
6966         } else {
6967                 cpu_buffer->user_mapped--;
6968                 cpu_buffer->mapped--;
6969         }
6970
6971         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6972         mutex_unlock(&cpu_buffer->buffer->mutex);
6973
6974         return 0;
6975 }
6976
6977 /*
6978  *   +--------------+  pgoff == 0
6979  *   |   meta page  |
6980  *   +--------------+  pgoff == 1
6981  *   | subbuffer 0  |
6982  *   |              |
6983  *   +--------------+  pgoff == (1 + (1 << subbuf_order))
6984  *   | subbuffer 1  |
6985  *   |              |
6986  *         ...
6987  */
6988 #ifdef CONFIG_MMU
6989 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
6990                         struct vm_area_struct *vma)
6991 {
6992         unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff;
6993         unsigned int subbuf_pages, subbuf_order;
6994         struct page **pages;
6995         int p = 0, s = 0;
6996         int err;
6997
6998         /* Refuse MP_PRIVATE or writable mappings */
6999         if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
7000             !(vma->vm_flags & VM_MAYSHARE))
7001                 return -EPERM;
7002
7003         subbuf_order = cpu_buffer->buffer->subbuf_order;
7004         subbuf_pages = 1 << subbuf_order;
7005
7006         if (subbuf_order && pgoff % subbuf_pages)
7007                 return -EINVAL;
7008
7009         /*
7010          * Make sure the mapping cannot become writable later. Also tell the VM
7011          * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
7012          */
7013         vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP,
7014                      VM_MAYWRITE);
7015
7016         lockdep_assert_held(&cpu_buffer->mapping_lock);
7017
7018         nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
7019         nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */
7020         if (nr_pages <= pgoff)
7021                 return -EINVAL;
7022
7023         nr_pages -= pgoff;
7024
7025         nr_vma_pages = vma_pages(vma);
7026         if (!nr_vma_pages || nr_vma_pages > nr_pages)
7027                 return -EINVAL;
7028
7029         nr_pages = nr_vma_pages;
7030
7031         pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
7032         if (!pages)
7033                 return -ENOMEM;
7034
7035         if (!pgoff) {
7036                 unsigned long meta_page_padding;
7037
7038                 pages[p++] = virt_to_page(cpu_buffer->meta_page);
7039
7040                 /*
7041                  * Pad with the zero-page to align the meta-page with the
7042                  * sub-buffers.
7043                  */
7044                 meta_page_padding = subbuf_pages - 1;
7045                 while (meta_page_padding-- && p < nr_pages) {
7046                         unsigned long __maybe_unused zero_addr =
7047                                 vma->vm_start + (PAGE_SIZE * p);
7048
7049                         pages[p++] = ZERO_PAGE(zero_addr);
7050                 }
7051         } else {
7052                 /* Skip the meta-page */
7053                 pgoff -= subbuf_pages;
7054
7055                 s += pgoff / subbuf_pages;
7056         }
7057
7058         while (p < nr_pages) {
7059                 struct page *page;
7060                 int off = 0;
7061
7062                 if (WARN_ON_ONCE(s >= nr_subbufs)) {
7063                         err = -EINVAL;
7064                         goto out;
7065                 }
7066
7067                 page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
7068
7069                 for (; off < (1 << (subbuf_order)); off++, page++) {
7070                         if (p >= nr_pages)
7071                                 break;
7072
7073                         pages[p++] = page;
7074                 }
7075                 s++;
7076         }
7077
7078         err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
7079
7080 out:
7081         kfree(pages);
7082
7083         return err;
7084 }
7085 #else
7086 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
7087                         struct vm_area_struct *vma)
7088 {
7089         return -EOPNOTSUPP;
7090 }
7091 #endif
7092
7093 int ring_buffer_map(struct trace_buffer *buffer, int cpu,
7094                     struct vm_area_struct *vma)
7095 {
7096         struct ring_buffer_per_cpu *cpu_buffer;
7097         unsigned long flags, *subbuf_ids;
7098         int err = 0;
7099
7100         if (!cpumask_test_cpu(cpu, buffer->cpumask))
7101                 return -EINVAL;
7102
7103         cpu_buffer = buffer->buffers[cpu];
7104
7105         mutex_lock(&cpu_buffer->mapping_lock);
7106
7107         if (cpu_buffer->user_mapped) {
7108                 err = __rb_map_vma(cpu_buffer, vma);
7109                 if (!err)
7110                         err = __rb_inc_dec_mapped(cpu_buffer, true);
7111                 mutex_unlock(&cpu_buffer->mapping_lock);
7112                 return err;
7113         }
7114
7115         /* prevent another thread from changing buffer/sub-buffer sizes */
7116         mutex_lock(&buffer->mutex);
7117
7118         err = rb_alloc_meta_page(cpu_buffer);
7119         if (err)
7120                 goto unlock;
7121
7122         /* subbuf_ids include the reader while nr_pages does not */
7123         subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
7124         if (!subbuf_ids) {
7125                 rb_free_meta_page(cpu_buffer);
7126                 err = -ENOMEM;
7127                 goto unlock;
7128         }
7129
7130         atomic_inc(&cpu_buffer->resize_disabled);
7131
7132         /*
7133          * Lock all readers to block any subbuf swap until the subbuf IDs are
7134          * assigned.
7135          */
7136         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7137         rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
7138
7139         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7140
7141         err = __rb_map_vma(cpu_buffer, vma);
7142         if (!err) {
7143                 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7144                 /* This is the first time it is mapped by user */
7145                 cpu_buffer->mapped++;
7146                 cpu_buffer->user_mapped = 1;
7147                 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7148         } else {
7149                 kfree(cpu_buffer->subbuf_ids);
7150                 cpu_buffer->subbuf_ids = NULL;
7151                 rb_free_meta_page(cpu_buffer);
7152                 atomic_dec(&cpu_buffer->resize_disabled);
7153         }
7154
7155 unlock:
7156         mutex_unlock(&buffer->mutex);
7157         mutex_unlock(&cpu_buffer->mapping_lock);
7158
7159         return err;
7160 }
7161
7162 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
7163 {
7164         struct ring_buffer_per_cpu *cpu_buffer;
7165         unsigned long flags;
7166         int err = 0;
7167
7168         if (!cpumask_test_cpu(cpu, buffer->cpumask))
7169                 return -EINVAL;
7170
7171         cpu_buffer = buffer->buffers[cpu];
7172
7173         mutex_lock(&cpu_buffer->mapping_lock);
7174
7175         if (!cpu_buffer->user_mapped) {
7176                 err = -ENODEV;
7177                 goto out;
7178         } else if (cpu_buffer->user_mapped > 1) {
7179                 __rb_inc_dec_mapped(cpu_buffer, false);
7180                 goto out;
7181         }
7182
7183         mutex_lock(&buffer->mutex);
7184         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7185
7186         /* This is the last user space mapping */
7187         if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
7188                 cpu_buffer->mapped--;
7189         cpu_buffer->user_mapped = 0;
7190
7191         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7192
7193         kfree(cpu_buffer->subbuf_ids);
7194         cpu_buffer->subbuf_ids = NULL;
7195         rb_free_meta_page(cpu_buffer);
7196         atomic_dec(&cpu_buffer->resize_disabled);
7197
7198         mutex_unlock(&buffer->mutex);
7199
7200 out:
7201         mutex_unlock(&cpu_buffer->mapping_lock);
7202
7203         return err;
7204 }
7205
7206 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
7207 {
7208         struct ring_buffer_per_cpu *cpu_buffer;
7209         struct buffer_page *reader;
7210         unsigned long missed_events;
7211         unsigned long reader_size;
7212         unsigned long flags;
7213
7214         cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
7215         if (IS_ERR(cpu_buffer))
7216                 return (int)PTR_ERR(cpu_buffer);
7217
7218         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7219
7220 consume:
7221         if (rb_per_cpu_empty(cpu_buffer))
7222                 goto out;
7223
7224         reader_size = rb_page_size(cpu_buffer->reader_page);
7225
7226         /*
7227          * There are data to be read on the current reader page, we can
7228          * return to the caller. But before that, we assume the latter will read
7229          * everything. Let's update the kernel reader accordingly.
7230          */
7231         if (cpu_buffer->reader_page->read < reader_size) {
7232                 while (cpu_buffer->reader_page->read < reader_size)
7233                         rb_advance_reader(cpu_buffer);
7234                 goto out;
7235         }
7236
7237         reader = rb_get_reader_page(cpu_buffer);
7238         if (WARN_ON(!reader))
7239                 goto out;
7240
7241         /* Check if any events were dropped */
7242         missed_events = cpu_buffer->lost_events;
7243
7244         if (cpu_buffer->reader_page != cpu_buffer->commit_page) {
7245                 if (missed_events) {
7246                         struct buffer_data_page *bpage = reader->page;
7247                         unsigned int commit;
7248                         /*
7249                          * Use the real_end for the data size,
7250                          * This gives us a chance to store the lost events
7251                          * on the page.
7252                          */
7253                         if (reader->real_end)
7254                                 local_set(&bpage->commit, reader->real_end);
7255                         /*
7256                          * If there is room at the end of the page to save the
7257                          * missed events, then record it there.
7258                          */
7259                         commit = rb_page_size(reader);
7260                         if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
7261                                 memcpy(&bpage->data[commit], &missed_events,
7262                                        sizeof(missed_events));
7263                                 local_add(RB_MISSED_STORED, &bpage->commit);
7264                         }
7265                         local_add(RB_MISSED_EVENTS, &bpage->commit);
7266                 }
7267         } else {
7268                 /*
7269                  * There really shouldn't be any missed events if the commit
7270                  * is on the reader page.
7271                  */
7272                 WARN_ON_ONCE(missed_events);
7273         }
7274
7275         cpu_buffer->lost_events = 0;
7276
7277         goto consume;
7278
7279 out:
7280         /* Some archs do not have data cache coherency between kernel and user-space */
7281         flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page));
7282
7283         rb_update_meta_page(cpu_buffer);
7284
7285         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7286         rb_put_mapped_buffer(cpu_buffer);
7287
7288         return 0;
7289 }
7290
7291 /*
7292  * We only allocate new buffers, never free them if the CPU goes down.
7293  * If we were to free the buffer, then the user would lose any trace that was in
7294  * the buffer.
7295  */
7296 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
7297 {
7298         struct trace_buffer *buffer;
7299         long nr_pages_same;
7300         int cpu_i;
7301         unsigned long nr_pages;
7302
7303         buffer = container_of(node, struct trace_buffer, node);
7304         if (cpumask_test_cpu(cpu, buffer->cpumask))
7305                 return 0;
7306
7307         nr_pages = 0;
7308         nr_pages_same = 1;
7309         /* check if all cpu sizes are same */
7310         for_each_buffer_cpu(buffer, cpu_i) {
7311                 /* fill in the size from first enabled cpu */
7312                 if (nr_pages == 0)
7313                         nr_pages = buffer->buffers[cpu_i]->nr_pages;
7314                 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
7315                         nr_pages_same = 0;
7316                         break;
7317                 }
7318         }
7319         /* allocate minimum pages, user can later expand it */
7320         if (!nr_pages_same)
7321                 nr_pages = 2;
7322         buffer->buffers[cpu] =
7323                 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
7324         if (!buffer->buffers[cpu]) {
7325                 WARN(1, "failed to allocate ring buffer on CPU %u\n",
7326                      cpu);
7327                 return -ENOMEM;
7328         }
7329         smp_wmb();
7330         cpumask_set_cpu(cpu, buffer->cpumask);
7331         return 0;
7332 }
7333
7334 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
7335 /*
7336  * This is a basic integrity check of the ring buffer.
7337  * Late in the boot cycle this test will run when configured in.
7338  * It will kick off a thread per CPU that will go into a loop
7339  * writing to the per cpu ring buffer various sizes of data.
7340  * Some of the data will be large items, some small.
7341  *
7342  * Another thread is created that goes into a spin, sending out
7343  * IPIs to the other CPUs to also write into the ring buffer.
7344  * this is to test the nesting ability of the buffer.
7345  *
7346  * Basic stats are recorded and reported. If something in the
7347  * ring buffer should happen that's not expected, a big warning
7348  * is displayed and all ring buffers are disabled.
7349  */
7350 static struct task_struct *rb_threads[NR_CPUS] __initdata;
7351
7352 struct rb_test_data {
7353         struct trace_buffer *buffer;
7354         unsigned long           events;
7355         unsigned long           bytes_written;
7356         unsigned long           bytes_alloc;
7357         unsigned long           bytes_dropped;
7358         unsigned long           events_nested;
7359         unsigned long           bytes_written_nested;
7360         unsigned long           bytes_alloc_nested;
7361         unsigned long           bytes_dropped_nested;
7362         int                     min_size_nested;
7363         int                     max_size_nested;
7364         int                     max_size;
7365         int                     min_size;
7366         int                     cpu;
7367         int                     cnt;
7368 };
7369
7370 static struct rb_test_data rb_data[NR_CPUS] __initdata;
7371
7372 /* 1 meg per cpu */
7373 #define RB_TEST_BUFFER_SIZE     1048576
7374
7375 static char rb_string[] __initdata =
7376         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
7377         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
7378         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
7379
7380 static bool rb_test_started __initdata;
7381
7382 struct rb_item {
7383         int size;
7384         char str[];
7385 };
7386
7387 static __init int rb_write_something(struct rb_test_data *data, bool nested)
7388 {
7389         struct ring_buffer_event *event;
7390         struct rb_item *item;
7391         bool started;
7392         int event_len;
7393         int size;
7394         int len;
7395         int cnt;
7396
7397         /* Have nested writes different that what is written */
7398         cnt = data->cnt + (nested ? 27 : 0);
7399
7400         /* Multiply cnt by ~e, to make some unique increment */
7401         size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
7402
7403         len = size + sizeof(struct rb_item);
7404
7405         started = rb_test_started;
7406         /* read rb_test_started before checking buffer enabled */
7407         smp_rmb();
7408
7409         event = ring_buffer_lock_reserve(data->buffer, len);
7410         if (!event) {
7411                 /* Ignore dropped events before test starts. */
7412                 if (started) {
7413                         if (nested)
7414                                 data->bytes_dropped += len;
7415                         else
7416                                 data->bytes_dropped_nested += len;
7417                 }
7418                 return len;
7419         }
7420
7421         event_len = ring_buffer_event_length(event);
7422
7423         if (RB_WARN_ON(data->buffer, event_len < len))
7424                 goto out;
7425
7426         item = ring_buffer_event_data(event);
7427         item->size = size;
7428         memcpy(item->str, rb_string, size);
7429
7430         if (nested) {
7431                 data->bytes_alloc_nested += event_len;
7432                 data->bytes_written_nested += len;
7433                 data->events_nested++;
7434                 if (!data->min_size_nested || len < data->min_size_nested)
7435                         data->min_size_nested = len;
7436                 if (len > data->max_size_nested)
7437                         data->max_size_nested = len;
7438         } else {
7439                 data->bytes_alloc += event_len;
7440                 data->bytes_written += len;
7441                 data->events++;
7442                 if (!data->min_size || len < data->min_size)
7443                         data->max_size = len;
7444                 if (len > data->max_size)
7445                         data->max_size = len;
7446         }
7447
7448  out:
7449         ring_buffer_unlock_commit(data->buffer);
7450
7451         return 0;
7452 }
7453
7454 static __init int rb_test(void *arg)
7455 {
7456         struct rb_test_data *data = arg;
7457
7458         while (!kthread_should_stop()) {
7459                 rb_write_something(data, false);
7460                 data->cnt++;
7461
7462                 set_current_state(TASK_INTERRUPTIBLE);
7463                 /* Now sleep between a min of 100-300us and a max of 1ms */
7464                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
7465         }
7466
7467         return 0;
7468 }
7469
7470 static __init void rb_ipi(void *ignore)
7471 {
7472         struct rb_test_data *data;
7473         int cpu = smp_processor_id();
7474
7475         data = &rb_data[cpu];
7476         rb_write_something(data, true);
7477 }
7478
7479 static __init int rb_hammer_test(void *arg)
7480 {
7481         while (!kthread_should_stop()) {
7482
7483                 /* Send an IPI to all cpus to write data! */
7484                 smp_call_function(rb_ipi, NULL, 1);
7485                 /* No sleep, but for non preempt, let others run */
7486                 schedule();
7487         }
7488
7489         return 0;
7490 }
7491
7492 static __init int test_ringbuffer(void)
7493 {
7494         struct task_struct *rb_hammer;
7495         struct trace_buffer *buffer;
7496         int cpu;
7497         int ret = 0;
7498
7499         if (security_locked_down(LOCKDOWN_TRACEFS)) {
7500                 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
7501                 return 0;
7502         }
7503
7504         pr_info("Running ring buffer tests...\n");
7505
7506         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
7507         if (WARN_ON(!buffer))
7508                 return 0;
7509
7510         /* Disable buffer so that threads can't write to it yet */
7511         ring_buffer_record_off(buffer);
7512
7513         for_each_online_cpu(cpu) {
7514                 rb_data[cpu].buffer = buffer;
7515                 rb_data[cpu].cpu = cpu;
7516                 rb_data[cpu].cnt = cpu;
7517                 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
7518                                                      cpu, "rbtester/%u");
7519                 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
7520                         pr_cont("FAILED\n");
7521                         ret = PTR_ERR(rb_threads[cpu]);
7522                         goto out_free;
7523                 }
7524         }
7525
7526         /* Now create the rb hammer! */
7527         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
7528         if (WARN_ON(IS_ERR(rb_hammer))) {
7529                 pr_cont("FAILED\n");
7530                 ret = PTR_ERR(rb_hammer);
7531                 goto out_free;
7532         }
7533
7534         ring_buffer_record_on(buffer);
7535         /*
7536          * Show buffer is enabled before setting rb_test_started.
7537          * Yes there's a small race window where events could be
7538          * dropped and the thread wont catch it. But when a ring
7539          * buffer gets enabled, there will always be some kind of
7540          * delay before other CPUs see it. Thus, we don't care about
7541          * those dropped events. We care about events dropped after
7542          * the threads see that the buffer is active.
7543          */
7544         smp_wmb();
7545         rb_test_started = true;
7546
7547         set_current_state(TASK_INTERRUPTIBLE);
7548         /* Just run for 10 seconds */;
7549         schedule_timeout(10 * HZ);
7550
7551         kthread_stop(rb_hammer);
7552
7553  out_free:
7554         for_each_online_cpu(cpu) {
7555                 if (!rb_threads[cpu])
7556                         break;
7557                 kthread_stop(rb_threads[cpu]);
7558         }
7559         if (ret) {
7560                 ring_buffer_free(buffer);
7561                 return ret;
7562         }
7563
7564         /* Report! */
7565         pr_info("finished\n");
7566         for_each_online_cpu(cpu) {
7567                 struct ring_buffer_event *event;
7568                 struct rb_test_data *data = &rb_data[cpu];
7569                 struct rb_item *item;
7570                 unsigned long total_events;
7571                 unsigned long total_dropped;
7572                 unsigned long total_written;
7573                 unsigned long total_alloc;
7574                 unsigned long total_read = 0;
7575                 unsigned long total_size = 0;
7576                 unsigned long total_len = 0;
7577                 unsigned long total_lost = 0;
7578                 unsigned long lost;
7579                 int big_event_size;
7580                 int small_event_size;
7581
7582                 ret = -1;
7583
7584                 total_events = data->events + data->events_nested;
7585                 total_written = data->bytes_written + data->bytes_written_nested;
7586                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
7587                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
7588
7589                 big_event_size = data->max_size + data->max_size_nested;
7590                 small_event_size = data->min_size + data->min_size_nested;
7591
7592                 pr_info("CPU %d:\n", cpu);
7593                 pr_info("              events:    %ld\n", total_events);
7594                 pr_info("       dropped bytes:    %ld\n", total_dropped);
7595                 pr_info("       alloced bytes:    %ld\n", total_alloc);
7596                 pr_info("       written bytes:    %ld\n", total_written);
7597                 pr_info("       biggest event:    %d\n", big_event_size);
7598                 pr_info("      smallest event:    %d\n", small_event_size);
7599
7600                 if (RB_WARN_ON(buffer, total_dropped))
7601                         break;
7602
7603                 ret = 0;
7604
7605                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
7606                         total_lost += lost;
7607                         item = ring_buffer_event_data(event);
7608                         total_len += ring_buffer_event_length(event);
7609                         total_size += item->size + sizeof(struct rb_item);
7610                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
7611                                 pr_info("FAILED!\n");
7612                                 pr_info("buffer had: %.*s\n", item->size, item->str);
7613                                 pr_info("expected:   %.*s\n", item->size, rb_string);
7614                                 RB_WARN_ON(buffer, 1);
7615                                 ret = -1;
7616                                 break;
7617                         }
7618                         total_read++;
7619                 }
7620                 if (ret)
7621                         break;
7622
7623                 ret = -1;
7624
7625                 pr_info("         read events:   %ld\n", total_read);
7626                 pr_info("         lost events:   %ld\n", total_lost);
7627                 pr_info("        total events:   %ld\n", total_lost + total_read);
7628                 pr_info("  recorded len bytes:   %ld\n", total_len);
7629                 pr_info(" recorded size bytes:   %ld\n", total_size);
7630                 if (total_lost) {
7631                         pr_info(" With dropped events, record len and size may not match\n"
7632                                 " alloced and written from above\n");
7633                 } else {
7634                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
7635                                        total_size != total_written))
7636                                 break;
7637                 }
7638                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
7639                         break;
7640
7641                 ret = 0;
7642         }
7643         if (!ret)
7644                 pr_info("Ring buffer PASSED!\n");
7645
7646         ring_buffer_free(buffer);
7647         return 0;
7648 }
7649
7650 late_initcall(test_ringbuffer);
7651 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
This page took 0.472808 seconds and 4 git commands to generate.