1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
12 #include <linux/hashtable.h>
13 #include <linux/init.h>
14 #include <linux/kmsan-checks.h>
16 #include <linux/preempt.h>
17 #include <linux/printk.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/vmalloc.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/kcov.h>
25 #include <linux/refcount.h>
26 #include <linux/log2.h>
27 #include <asm/setup.h>
29 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
31 /* Number of 64-bit words written per one comparison: */
32 #define KCOV_WORDS_PER_CMP 4
35 * kcov descriptor (one per opened debugfs file).
36 * State transitions of the descriptor:
37 * - initial state after open()
38 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
39 * - then, mmap() call (several calls are allowed but not useful)
40 * - then, ioctl(KCOV_ENABLE, arg), where arg is
41 * KCOV_TRACE_PC - to trace only the PCs
43 * KCOV_TRACE_CMP - to trace only the comparison operands
44 * - then, ioctl(KCOV_DISABLE) to disable the task.
45 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
49 * Reference counter. We keep one for:
50 * - opened file descriptor
51 * - task with enabled coverage (we can't unwire it from another task)
52 * - each code section for remote coverage collection
55 /* The lock protects mode, size, area and t. */
58 /* Size of arena (in long's). */
60 /* Coverage buffer shared with user space. */
62 /* Task for which we collect coverage, or NULL. */
63 struct task_struct *t;
64 /* Collecting coverage from remote (background) threads. */
66 /* Size of remote area (in long's). */
67 unsigned int remote_size;
69 * Sequence is incremented each time kcov is reenabled, used by
70 * kcov_remote_stop(), see the comment there.
75 struct kcov_remote_area {
76 struct list_head list;
83 struct hlist_node hnode;
86 static DEFINE_SPINLOCK(kcov_remote_lock);
87 static DEFINE_HASHTABLE(kcov_remote_map, 4);
88 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
90 struct kcov_percpu_data {
94 unsigned int saved_mode;
95 unsigned int saved_size;
97 struct kcov *saved_kcov;
101 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
102 .lock = INIT_LOCAL_LOCK(lock),
105 /* Must be called with kcov_remote_lock locked. */
106 static struct kcov_remote *kcov_remote_find(u64 handle)
108 struct kcov_remote *remote;
110 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
111 if (remote->handle == handle)
117 /* Must be called with kcov_remote_lock locked. */
118 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
120 struct kcov_remote *remote;
122 if (kcov_remote_find(handle))
123 return ERR_PTR(-EEXIST);
124 remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
126 return ERR_PTR(-ENOMEM);
127 remote->handle = handle;
129 hash_add(kcov_remote_map, &remote->hnode, handle);
133 /* Must be called with kcov_remote_lock locked. */
134 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
136 struct kcov_remote_area *area;
137 struct list_head *pos;
139 list_for_each(pos, &kcov_remote_areas) {
140 area = list_entry(pos, struct kcov_remote_area, list);
141 if (area->size == size) {
142 list_del(&area->list);
149 /* Must be called with kcov_remote_lock locked. */
150 static void kcov_remote_area_put(struct kcov_remote_area *area,
153 INIT_LIST_HEAD(&area->list);
155 list_add(&area->list, &kcov_remote_areas);
157 * KMSAN doesn't instrument this file, so it may not know area->list
158 * is initialized. Unpoison it explicitly to avoid reports in
159 * kcov_remote_area_get().
161 kmsan_unpoison_memory(&area->list, sizeof(area->list));
165 * Unlike in_serving_softirq(), this function returns false when called during
166 * a hardirq or an NMI that happened in the softirq context.
168 static inline bool in_softirq_really(void)
170 return in_serving_softirq() && !in_hardirq() && !in_nmi();
173 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
178 * We are interested in code coverage as a function of a syscall inputs,
179 * so we ignore code executed in interrupts, unless we are in a remote
180 * coverage collection section in a softirq.
182 if (!in_task() && !(in_softirq_really() && t->kcov_softirq))
184 mode = READ_ONCE(t->kcov_mode);
186 * There is some code that runs in interrupts but for which
187 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
188 * READ_ONCE()/barrier() effectively provides load-acquire wrt
189 * interrupts, there are paired barrier()/WRITE_ONCE() in
193 return mode == needed_mode;
196 static notrace unsigned long canonicalize_ip(unsigned long ip)
198 #ifdef CONFIG_RANDOMIZE_BASE
199 ip -= kaslr_offset();
205 * Entry point from instrumented code.
206 * This is called once per basic-block/edge.
208 void notrace __sanitizer_cov_trace_pc(void)
210 struct task_struct *t;
212 unsigned long ip = canonicalize_ip(_RET_IP_);
216 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
220 /* The first 64-bit word is the number of subsequent PCs. */
221 pos = READ_ONCE(area[0]) + 1;
222 if (likely(pos < t->kcov_size)) {
223 /* Previously we write pc before updating pos. However, some
224 * early interrupt code could bypass check_kcov_mode() check
225 * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
226 * raised between writing pc and updating pos, the pc could be
227 * overitten by the recursive __sanitizer_cov_trace_pc().
228 * Update pos before writing pc to avoid such interleaving.
230 WRITE_ONCE(area[0], pos);
235 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
237 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
238 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
240 struct task_struct *t;
242 u64 count, start_index, end_pos, max_pos;
245 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
248 ip = canonicalize_ip(ip);
251 * We write all comparison arguments and types as u64.
252 * The buffer was allocated for t->kcov_size unsigned longs.
254 area = (u64 *)t->kcov_area;
255 max_pos = t->kcov_size * sizeof(unsigned long);
257 count = READ_ONCE(area[0]);
259 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
260 start_index = 1 + count * KCOV_WORDS_PER_CMP;
261 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
262 if (likely(end_pos <= max_pos)) {
263 /* See comment in __sanitizer_cov_trace_pc(). */
264 WRITE_ONCE(area[0], count + 1);
266 area[start_index] = type;
267 area[start_index + 1] = arg1;
268 area[start_index + 2] = arg2;
269 area[start_index + 3] = ip;
273 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
275 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
277 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
279 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
281 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
283 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
285 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
287 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
289 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
291 void notrace __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2)
293 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
295 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
297 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
299 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
302 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
304 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
306 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
309 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
311 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
313 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
316 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
318 void notrace __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2)
320 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
323 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
325 void notrace __sanitizer_cov_trace_switch(kcov_u64 val, void *arg)
329 u64 count = cases[0];
331 u64 type = KCOV_CMP_CONST;
335 type |= KCOV_CMP_SIZE(0);
338 type |= KCOV_CMP_SIZE(1);
341 type |= KCOV_CMP_SIZE(2);
344 type |= KCOV_CMP_SIZE(3);
349 for (i = 0; i < count; i++)
350 write_comp_data(type, cases[i + 2], val, _RET_IP_);
352 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
353 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
355 static void kcov_start(struct task_struct *t, struct kcov *kcov,
356 unsigned int size, void *area, enum kcov_mode mode,
359 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
361 /* Cache in task struct for performance. */
364 t->kcov_sequence = sequence;
365 /* See comment in check_kcov_mode(). */
367 WRITE_ONCE(t->kcov_mode, mode);
370 static void kcov_stop(struct task_struct *t)
372 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
379 static void kcov_task_reset(struct task_struct *t)
382 t->kcov_sequence = 0;
386 void kcov_task_init(struct task_struct *t)
389 t->kcov_handle = current->kcov_handle;
392 static void kcov_reset(struct kcov *kcov)
395 kcov->mode = KCOV_MODE_INIT;
396 kcov->remote = false;
397 kcov->remote_size = 0;
401 static void kcov_remote_reset(struct kcov *kcov)
404 struct kcov_remote *remote;
405 struct hlist_node *tmp;
408 spin_lock_irqsave(&kcov_remote_lock, flags);
409 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
410 if (remote->kcov != kcov)
412 hash_del(&remote->hnode);
415 /* Do reset before unlock to prevent races with kcov_remote_start(). */
417 spin_unlock_irqrestore(&kcov_remote_lock, flags);
420 static void kcov_disable(struct task_struct *t, struct kcov *kcov)
424 kcov_remote_reset(kcov);
429 static void kcov_get(struct kcov *kcov)
431 refcount_inc(&kcov->refcount);
434 static void kcov_put(struct kcov *kcov)
436 if (refcount_dec_and_test(&kcov->refcount)) {
437 kcov_remote_reset(kcov);
443 void kcov_task_exit(struct task_struct *t)
452 spin_lock_irqsave(&kcov->lock, flags);
453 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
455 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
456 * which comes down to:
457 * WARN_ON(!kcov->remote && kcov->t != t);
459 * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
461 * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
462 * In this case we should print a warning right away, since a task
463 * shouldn't be exiting when it's in a kcov coverage collection
464 * section. Here t points to the task that is collecting remote
465 * coverage, and t->kcov->t points to the thread that created the
466 * kcov device. Which means that to detect this case we need to
467 * check that t != t->kcov->t, and this gives us the following:
468 * WARN_ON(kcov->remote && kcov->t != t);
470 * 2. The task that created kcov exiting without calling KCOV_DISABLE,
471 * and then again we make sure that t->kcov->t == t:
472 * WARN_ON(kcov->remote && kcov->t != t);
474 * By combining all three checks into one we get:
476 if (WARN_ON(kcov->t != t)) {
477 spin_unlock_irqrestore(&kcov->lock, flags);
480 /* Just to not leave dangling references behind. */
481 kcov_disable(t, kcov);
482 spin_unlock_irqrestore(&kcov->lock, flags);
486 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
489 struct kcov *kcov = vma->vm_file->private_data;
490 unsigned long size, off;
494 spin_lock_irqsave(&kcov->lock, flags);
495 size = kcov->size * sizeof(unsigned long);
496 if (kcov->area == NULL || vma->vm_pgoff != 0 ||
497 vma->vm_end - vma->vm_start != size) {
501 spin_unlock_irqrestore(&kcov->lock, flags);
502 vm_flags_set(vma, VM_DONTEXPAND);
503 for (off = 0; off < size; off += PAGE_SIZE) {
504 page = vmalloc_to_page(kcov->area + off);
505 res = vm_insert_page(vma, vma->vm_start + off, page);
507 pr_warn_once("kcov: vm_insert_page() failed\n");
513 spin_unlock_irqrestore(&kcov->lock, flags);
517 static int kcov_open(struct inode *inode, struct file *filep)
521 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
524 kcov->mode = KCOV_MODE_DISABLED;
526 refcount_set(&kcov->refcount, 1);
527 spin_lock_init(&kcov->lock);
528 filep->private_data = kcov;
529 return nonseekable_open(inode, filep);
532 static int kcov_close(struct inode *inode, struct file *filep)
534 kcov_put(filep->private_data);
538 static int kcov_get_mode(unsigned long arg)
540 if (arg == KCOV_TRACE_PC)
541 return KCOV_MODE_TRACE_PC;
542 else if (arg == KCOV_TRACE_CMP)
543 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
544 return KCOV_MODE_TRACE_CMP;
553 * Fault in a lazily-faulted vmalloc area before it can be used by
554 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
555 * vmalloc fault handling path is instrumented.
557 static void kcov_fault_in_area(struct kcov *kcov)
559 unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
560 unsigned long *area = kcov->area;
561 unsigned long offset;
563 for (offset = 0; offset < kcov->size; offset += stride)
564 READ_ONCE(area[offset]);
567 static inline bool kcov_check_handle(u64 handle, bool common_valid,
568 bool uncommon_valid, bool zero_valid)
570 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
572 switch (handle & KCOV_SUBSYSTEM_MASK) {
573 case KCOV_SUBSYSTEM_COMMON:
574 return (handle & KCOV_INSTANCE_MASK) ?
575 common_valid : zero_valid;
576 case KCOV_SUBSYSTEM_USB:
577 return uncommon_valid;
584 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
587 struct task_struct *t;
588 unsigned long flags, unused;
590 struct kcov_remote_arg *remote_arg;
591 struct kcov_remote *remote;
596 * Enable coverage for the current task.
597 * At this point user must have been enabled trace mode,
598 * and mmapped the file. Coverage collection is disabled only
599 * at task exit or voluntary by KCOV_DISABLE. After that it can
600 * be enabled for another task.
602 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
605 if (kcov->t != NULL || t->kcov != NULL)
607 mode = kcov_get_mode(arg);
610 kcov_fault_in_area(kcov);
612 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
615 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
619 /* Disable coverage for the current task. */
621 if (unused != 0 || current->kcov != kcov)
624 if (WARN_ON(kcov->t != t))
626 kcov_disable(t, kcov);
629 case KCOV_REMOTE_ENABLE:
630 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
633 if (kcov->t != NULL || t->kcov != NULL)
635 remote_arg = (struct kcov_remote_arg *)arg;
636 mode = kcov_get_mode(remote_arg->trace_mode);
639 if ((unsigned long)remote_arg->area_size >
640 LONG_MAX / sizeof(unsigned long))
644 t->kcov_mode = KCOV_MODE_REMOTE;
647 kcov->remote_size = remote_arg->area_size;
648 spin_lock_irqsave(&kcov_remote_lock, flags);
649 for (i = 0; i < remote_arg->num_handles; i++) {
650 if (!kcov_check_handle(remote_arg->handles[i],
651 false, true, false)) {
652 spin_unlock_irqrestore(&kcov_remote_lock,
654 kcov_disable(t, kcov);
657 remote = kcov_remote_add(kcov, remote_arg->handles[i]);
658 if (IS_ERR(remote)) {
659 spin_unlock_irqrestore(&kcov_remote_lock,
661 kcov_disable(t, kcov);
662 return PTR_ERR(remote);
665 if (remote_arg->common_handle) {
666 if (!kcov_check_handle(remote_arg->common_handle,
667 true, false, false)) {
668 spin_unlock_irqrestore(&kcov_remote_lock,
670 kcov_disable(t, kcov);
673 remote = kcov_remote_add(kcov,
674 remote_arg->common_handle);
675 if (IS_ERR(remote)) {
676 spin_unlock_irqrestore(&kcov_remote_lock,
678 kcov_disable(t, kcov);
679 return PTR_ERR(remote);
681 t->kcov_handle = remote_arg->common_handle;
683 spin_unlock_irqrestore(&kcov_remote_lock, flags);
684 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
692 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
696 struct kcov_remote_arg *remote_arg = NULL;
697 unsigned int remote_num_handles;
698 unsigned long remote_arg_size;
699 unsigned long size, flags;
702 kcov = filep->private_data;
704 case KCOV_INIT_TRACE:
706 * Enable kcov in trace mode and setup buffer size.
707 * Must happen before anything else.
709 * First check the size argument - it must be at least 2
710 * to hold the current position and one PC.
713 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
715 area = vmalloc_user(size * sizeof(unsigned long));
718 spin_lock_irqsave(&kcov->lock, flags);
719 if (kcov->mode != KCOV_MODE_DISABLED) {
720 spin_unlock_irqrestore(&kcov->lock, flags);
726 kcov->mode = KCOV_MODE_INIT;
727 spin_unlock_irqrestore(&kcov->lock, flags);
729 case KCOV_REMOTE_ENABLE:
730 if (get_user(remote_num_handles, (unsigned __user *)(arg +
731 offsetof(struct kcov_remote_arg, num_handles))))
733 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
735 remote_arg_size = struct_size(remote_arg, handles,
737 remote_arg = memdup_user((void __user *)arg, remote_arg_size);
738 if (IS_ERR(remote_arg))
739 return PTR_ERR(remote_arg);
740 if (remote_arg->num_handles != remote_num_handles) {
744 arg = (unsigned long)remote_arg;
748 * All other commands can be normally executed under a spin lock, so we
749 * obtain and release it here in order to simplify kcov_ioctl_locked().
751 spin_lock_irqsave(&kcov->lock, flags);
752 res = kcov_ioctl_locked(kcov, cmd, arg);
753 spin_unlock_irqrestore(&kcov->lock, flags);
759 static const struct file_operations kcov_fops = {
761 .unlocked_ioctl = kcov_ioctl,
762 .compat_ioctl = kcov_ioctl,
764 .release = kcov_close,
768 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
769 * of code in a kernel background thread or in a softirq to allow kcov to be
770 * used to collect coverage from that part of code.
772 * The handle argument of kcov_remote_start() identifies a code section that is
773 * used for coverage collection. A userspace process passes this handle to
774 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
775 * coverage for the code section identified by this handle.
777 * The usage of these annotations in the kernel code is different depending on
778 * the type of the kernel thread whose code is being annotated.
780 * For global kernel threads that are spawned in a limited number of instances
781 * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
782 * softirqs, each instance must be assigned a unique 4-byte instance id. The
783 * instance id is then combined with a 1-byte subsystem id to get a handle via
784 * kcov_remote_handle(subsystem_id, instance_id).
786 * For local kernel threads that are spawned from system calls handler when a
787 * user interacts with some kernel interface (e.g. vhost workers), a handle is
788 * passed from a userspace process as the common_handle field of the
789 * kcov_remote_arg struct (note, that the user must generate a handle by using
790 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
791 * arbitrary 4-byte non-zero number as the instance id). This common handle
792 * then gets saved into the task_struct of the process that issued the
793 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
794 * kernel threads, the common handle must be retrieved via kcov_common_handle()
795 * and passed to the spawned threads via custom annotations. Those kernel
796 * threads must in turn be annotated with kcov_remote_start(common_handle) and
797 * kcov_remote_stop(). All of the threads that are spawned by the same process
798 * obtain the same handle, hence the name "common".
800 * See Documentation/dev-tools/kcov.rst for more details.
802 * Internally, kcov_remote_start() looks up the kcov device associated with the
803 * provided handle, allocates an area for coverage collection, and saves the
804 * pointers to kcov and area into the current task_struct to allow coverage to
805 * be collected via __sanitizer_cov_trace_pc().
806 * In turns kcov_remote_stop() clears those pointers from task_struct to stop
807 * collecting coverage and copies all collected coverage into the kcov area.
810 static inline bool kcov_mode_enabled(unsigned int mode)
812 return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
815 static void kcov_remote_softirq_start(struct task_struct *t)
817 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
820 mode = READ_ONCE(t->kcov_mode);
822 if (kcov_mode_enabled(mode)) {
823 data->saved_mode = mode;
824 data->saved_size = t->kcov_size;
825 data->saved_area = t->kcov_area;
826 data->saved_sequence = t->kcov_sequence;
827 data->saved_kcov = t->kcov;
832 static void kcov_remote_softirq_stop(struct task_struct *t)
834 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
836 if (data->saved_kcov) {
837 kcov_start(t, data->saved_kcov, data->saved_size,
838 data->saved_area, data->saved_mode,
839 data->saved_sequence);
840 data->saved_mode = 0;
841 data->saved_size = 0;
842 data->saved_area = NULL;
843 data->saved_sequence = 0;
844 data->saved_kcov = NULL;
848 void kcov_remote_start(u64 handle)
850 struct task_struct *t = current;
851 struct kcov_remote *remote;
859 if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
861 if (!in_task() && !in_softirq_really())
864 local_lock_irqsave(&kcov_percpu_data.lock, flags);
867 * Check that kcov_remote_start() is not called twice in background
868 * threads nor called by user tasks (with enabled kcov).
870 mode = READ_ONCE(t->kcov_mode);
871 if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
872 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
876 * Check that kcov_remote_start() is not called twice in softirqs.
877 * Note, that kcov_remote_start() can be called from a softirq that
878 * happened while collecting coverage from a background thread.
880 if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
881 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
885 spin_lock(&kcov_remote_lock);
886 remote = kcov_remote_find(handle);
888 spin_unlock(&kcov_remote_lock);
889 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
892 kcov_debug("handle = %llx, context: %s\n", handle,
893 in_task() ? "task" : "softirq");
895 /* Put in kcov_remote_stop(). */
898 * Read kcov fields before unlock to prevent races with
899 * KCOV_DISABLE / kcov_remote_reset().
902 sequence = kcov->sequence;
904 size = kcov->remote_size;
905 area = kcov_remote_area_get(size);
907 size = CONFIG_KCOV_IRQ_AREA_SIZE;
908 area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
910 spin_unlock(&kcov_remote_lock);
912 /* Can only happen when in_task(). */
914 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
915 area = vmalloc(size * sizeof(unsigned long));
920 local_lock_irqsave(&kcov_percpu_data.lock, flags);
923 /* Reset coverage size. */
926 if (in_serving_softirq()) {
927 kcov_remote_softirq_start(t);
930 kcov_start(t, kcov, size, area, mode, sequence);
932 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
935 EXPORT_SYMBOL(kcov_remote_start);
937 static void kcov_move_area(enum kcov_mode mode, void *dst_area,
938 unsigned int dst_area_size, void *src_area)
940 u64 word_size = sizeof(unsigned long);
941 u64 count_size, entry_size_log;
942 u64 dst_len, src_len;
943 void *dst_entries, *src_entries;
944 u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
946 kcov_debug("%px %u <= %px %lu\n",
947 dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
950 case KCOV_MODE_TRACE_PC:
951 dst_len = READ_ONCE(*(unsigned long *)dst_area);
952 src_len = *(unsigned long *)src_area;
953 count_size = sizeof(unsigned long);
954 entry_size_log = __ilog2_u64(sizeof(unsigned long));
956 case KCOV_MODE_TRACE_CMP:
957 dst_len = READ_ONCE(*(u64 *)dst_area);
958 src_len = *(u64 *)src_area;
959 count_size = sizeof(u64);
960 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
961 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
968 /* As arm can't divide u64 integers use log of entry size. */
969 if (dst_len > ((dst_area_size * word_size - count_size) >>
972 dst_occupied = count_size + (dst_len << entry_size_log);
973 dst_free = dst_area_size * word_size - dst_occupied;
974 bytes_to_move = min(dst_free, src_len << entry_size_log);
975 dst_entries = dst_area + dst_occupied;
976 src_entries = src_area + count_size;
977 memcpy(dst_entries, src_entries, bytes_to_move);
978 entries_moved = bytes_to_move >> entry_size_log;
981 case KCOV_MODE_TRACE_PC:
982 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
984 case KCOV_MODE_TRACE_CMP:
985 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
992 /* See the comment before kcov_remote_start() for usage details. */
993 void kcov_remote_stop(void)
995 struct task_struct *t = current;
1001 unsigned long flags;
1003 if (!in_task() && !in_softirq_really())
1006 local_lock_irqsave(&kcov_percpu_data.lock, flags);
1008 mode = READ_ONCE(t->kcov_mode);
1010 if (!kcov_mode_enabled(mode)) {
1011 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1015 * When in softirq, check if the corresponding kcov_remote_start()
1016 * actually found the remote handle and started collecting coverage.
1018 if (in_serving_softirq() && !t->kcov_softirq) {
1019 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1022 /* Make sure that kcov_softirq is only set when in softirq. */
1023 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
1024 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1029 area = t->kcov_area;
1030 size = t->kcov_size;
1031 sequence = t->kcov_sequence;
1034 if (in_serving_softirq()) {
1035 t->kcov_softirq = 0;
1036 kcov_remote_softirq_stop(t);
1039 spin_lock(&kcov->lock);
1041 * KCOV_DISABLE could have been called between kcov_remote_start()
1042 * and kcov_remote_stop(), hence the sequence check.
1044 if (sequence == kcov->sequence && kcov->remote)
1045 kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
1046 spin_unlock(&kcov->lock);
1049 spin_lock(&kcov_remote_lock);
1050 kcov_remote_area_put(area, size);
1051 spin_unlock(&kcov_remote_lock);
1054 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1056 /* Get in kcov_remote_start(). */
1059 EXPORT_SYMBOL(kcov_remote_stop);
1061 /* See the comment before kcov_remote_start() for usage details. */
1062 u64 kcov_common_handle(void)
1066 return current->kcov_handle;
1068 EXPORT_SYMBOL(kcov_common_handle);
1070 static int __init kcov_init(void)
1074 for_each_possible_cpu(cpu) {
1075 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
1076 sizeof(unsigned long), cpu_to_node(cpu));
1079 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
1083 * The kcov debugfs file won't ever get removed and thus,
1084 * there is no need to protect it against removal races. The
1085 * use of debugfs_create_file_unsafe() is actually safe here.
1087 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
1092 device_initcall(kcov_init);