]>
Commit | Line | Data |
---|---|---|
5c9a8750 DV |
1 | #define pr_fmt(fmt) "kcov: " fmt |
2 | ||
36f05ae8 | 3 | #define DISABLE_BRANCH_PROFILING |
5c9a8750 DV |
4 | #include <linux/compiler.h> |
5 | #include <linux/types.h> | |
6 | #include <linux/file.h> | |
7 | #include <linux/fs.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/printk.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/spinlock.h> | |
12 | #include <linux/vmalloc.h> | |
13 | #include <linux/debugfs.h> | |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/kcov.h> | |
16 | ||
17 | /* | |
18 | * kcov descriptor (one per opened debugfs file). | |
19 | * State transitions of the descriptor: | |
20 | * - initial state after open() | |
21 | * - then there must be a single ioctl(KCOV_INIT_TRACE) call | |
22 | * - then, mmap() call (several calls are allowed but not useful) | |
23 | * - then, repeated enable/disable for a task (only one task a time allowed) | |
24 | */ | |
25 | struct kcov { | |
26 | /* | |
27 | * Reference counter. We keep one for: | |
28 | * - opened file descriptor | |
29 | * - task with enabled coverage (we can't unwire it from another task) | |
30 | */ | |
31 | atomic_t refcount; | |
32 | /* The lock protects mode, size, area and t. */ | |
33 | spinlock_t lock; | |
34 | enum kcov_mode mode; | |
35 | /* Size of arena (in long's for KCOV_MODE_TRACE). */ | |
36 | unsigned size; | |
37 | /* Coverage buffer shared with user space. */ | |
38 | void *area; | |
39 | /* Task for which we collect coverage, or NULL. */ | |
40 | struct task_struct *t; | |
41 | }; | |
42 | ||
43 | /* | |
44 | * Entry point from instrumented code. | |
45 | * This is called once per basic-block/edge. | |
46 | */ | |
bdab42df | 47 | void notrace __sanitizer_cov_trace_pc(void) |
5c9a8750 DV |
48 | { |
49 | struct task_struct *t; | |
50 | enum kcov_mode mode; | |
51 | ||
52 | t = current; | |
53 | /* | |
54 | * We are interested in code coverage as a function of a syscall inputs, | |
55 | * so we ignore code executed in interrupts. | |
56 | */ | |
57 | if (!t || in_interrupt()) | |
58 | return; | |
59 | mode = READ_ONCE(t->kcov_mode); | |
60 | if (mode == KCOV_MODE_TRACE) { | |
61 | unsigned long *area; | |
62 | unsigned long pos; | |
63 | ||
64 | /* | |
65 | * There is some code that runs in interrupts but for which | |
66 | * in_interrupt() returns false (e.g. preempt_schedule_irq()). | |
67 | * READ_ONCE()/barrier() effectively provides load-acquire wrt | |
68 | * interrupts, there are paired barrier()/WRITE_ONCE() in | |
69 | * kcov_ioctl_locked(). | |
70 | */ | |
71 | barrier(); | |
72 | area = t->kcov_area; | |
73 | /* The first word is number of subsequent PCs. */ | |
74 | pos = READ_ONCE(area[0]) + 1; | |
75 | if (likely(pos < t->kcov_size)) { | |
76 | area[pos] = _RET_IP_; | |
77 | WRITE_ONCE(area[0], pos); | |
78 | } | |
79 | } | |
80 | } | |
81 | EXPORT_SYMBOL(__sanitizer_cov_trace_pc); | |
82 | ||
83 | static void kcov_get(struct kcov *kcov) | |
84 | { | |
85 | atomic_inc(&kcov->refcount); | |
86 | } | |
87 | ||
88 | static void kcov_put(struct kcov *kcov) | |
89 | { | |
90 | if (atomic_dec_and_test(&kcov->refcount)) { | |
91 | vfree(kcov->area); | |
92 | kfree(kcov); | |
93 | } | |
94 | } | |
95 | ||
96 | void kcov_task_init(struct task_struct *t) | |
97 | { | |
98 | t->kcov_mode = KCOV_MODE_DISABLED; | |
99 | t->kcov_size = 0; | |
100 | t->kcov_area = NULL; | |
101 | t->kcov = NULL; | |
102 | } | |
103 | ||
104 | void kcov_task_exit(struct task_struct *t) | |
105 | { | |
106 | struct kcov *kcov; | |
107 | ||
108 | kcov = t->kcov; | |
109 | if (kcov == NULL) | |
110 | return; | |
111 | spin_lock(&kcov->lock); | |
112 | if (WARN_ON(kcov->t != t)) { | |
113 | spin_unlock(&kcov->lock); | |
114 | return; | |
115 | } | |
116 | /* Just to not leave dangling references behind. */ | |
117 | kcov_task_init(t); | |
118 | kcov->t = NULL; | |
119 | spin_unlock(&kcov->lock); | |
120 | kcov_put(kcov); | |
121 | } | |
122 | ||
123 | static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) | |
124 | { | |
125 | int res = 0; | |
126 | void *area; | |
127 | struct kcov *kcov = vma->vm_file->private_data; | |
128 | unsigned long size, off; | |
129 | struct page *page; | |
130 | ||
131 | area = vmalloc_user(vma->vm_end - vma->vm_start); | |
132 | if (!area) | |
133 | return -ENOMEM; | |
134 | ||
135 | spin_lock(&kcov->lock); | |
136 | size = kcov->size * sizeof(unsigned long); | |
137 | if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 || | |
138 | vma->vm_end - vma->vm_start != size) { | |
139 | res = -EINVAL; | |
140 | goto exit; | |
141 | } | |
142 | if (!kcov->area) { | |
143 | kcov->area = area; | |
144 | vma->vm_flags |= VM_DONTEXPAND; | |
145 | spin_unlock(&kcov->lock); | |
146 | for (off = 0; off < size; off += PAGE_SIZE) { | |
147 | page = vmalloc_to_page(kcov->area + off); | |
148 | if (vm_insert_page(vma, vma->vm_start + off, page)) | |
149 | WARN_ONCE(1, "vm_insert_page() failed"); | |
150 | } | |
151 | return 0; | |
152 | } | |
153 | exit: | |
154 | spin_unlock(&kcov->lock); | |
155 | vfree(area); | |
156 | return res; | |
157 | } | |
158 | ||
159 | static int kcov_open(struct inode *inode, struct file *filep) | |
160 | { | |
161 | struct kcov *kcov; | |
162 | ||
163 | kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); | |
164 | if (!kcov) | |
165 | return -ENOMEM; | |
166 | atomic_set(&kcov->refcount, 1); | |
167 | spin_lock_init(&kcov->lock); | |
168 | filep->private_data = kcov; | |
169 | return nonseekable_open(inode, filep); | |
170 | } | |
171 | ||
172 | static int kcov_close(struct inode *inode, struct file *filep) | |
173 | { | |
174 | kcov_put(filep->private_data); | |
175 | return 0; | |
176 | } | |
177 | ||
178 | static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, | |
179 | unsigned long arg) | |
180 | { | |
181 | struct task_struct *t; | |
182 | unsigned long size, unused; | |
183 | ||
184 | switch (cmd) { | |
185 | case KCOV_INIT_TRACE: | |
186 | /* | |
187 | * Enable kcov in trace mode and setup buffer size. | |
188 | * Must happen before anything else. | |
189 | */ | |
190 | if (kcov->mode != KCOV_MODE_DISABLED) | |
191 | return -EBUSY; | |
192 | /* | |
193 | * Size must be at least 2 to hold current position and one PC. | |
194 | * Later we allocate size * sizeof(unsigned long) memory, | |
195 | * that must not overflow. | |
196 | */ | |
197 | size = arg; | |
198 | if (size < 2 || size > INT_MAX / sizeof(unsigned long)) | |
199 | return -EINVAL; | |
200 | kcov->size = size; | |
201 | kcov->mode = KCOV_MODE_TRACE; | |
202 | return 0; | |
203 | case KCOV_ENABLE: | |
204 | /* | |
205 | * Enable coverage for the current task. | |
206 | * At this point user must have been enabled trace mode, | |
207 | * and mmapped the file. Coverage collection is disabled only | |
208 | * at task exit or voluntary by KCOV_DISABLE. After that it can | |
209 | * be enabled for another task. | |
210 | */ | |
211 | unused = arg; | |
212 | if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED || | |
213 | kcov->area == NULL) | |
214 | return -EINVAL; | |
215 | if (kcov->t != NULL) | |
216 | return -EBUSY; | |
217 | t = current; | |
218 | /* Cache in task struct for performance. */ | |
219 | t->kcov_size = kcov->size; | |
220 | t->kcov_area = kcov->area; | |
221 | /* See comment in __sanitizer_cov_trace_pc(). */ | |
222 | barrier(); | |
223 | WRITE_ONCE(t->kcov_mode, kcov->mode); | |
224 | t->kcov = kcov; | |
225 | kcov->t = t; | |
226 | /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */ | |
227 | kcov_get(kcov); | |
228 | return 0; | |
229 | case KCOV_DISABLE: | |
230 | /* Disable coverage for the current task. */ | |
231 | unused = arg; | |
232 | if (unused != 0 || current->kcov != kcov) | |
233 | return -EINVAL; | |
234 | t = current; | |
235 | if (WARN_ON(kcov->t != t)) | |
236 | return -EINVAL; | |
237 | kcov_task_init(t); | |
238 | kcov->t = NULL; | |
239 | kcov_put(kcov); | |
240 | return 0; | |
241 | default: | |
242 | return -ENOTTY; | |
243 | } | |
244 | } | |
245 | ||
246 | static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) | |
247 | { | |
248 | struct kcov *kcov; | |
249 | int res; | |
250 | ||
251 | kcov = filep->private_data; | |
252 | spin_lock(&kcov->lock); | |
253 | res = kcov_ioctl_locked(kcov, cmd, arg); | |
254 | spin_unlock(&kcov->lock); | |
255 | return res; | |
256 | } | |
257 | ||
258 | static const struct file_operations kcov_fops = { | |
259 | .open = kcov_open, | |
260 | .unlocked_ioctl = kcov_ioctl, | |
261 | .mmap = kcov_mmap, | |
262 | .release = kcov_close, | |
263 | }; | |
264 | ||
265 | static int __init kcov_init(void) | |
266 | { | |
df4565f9 NS |
267 | /* |
268 | * The kcov debugfs file won't ever get removed and thus, | |
269 | * there is no need to protect it against removal races. The | |
270 | * use of debugfs_create_file_unsafe() is actually safe here. | |
271 | */ | |
272 | if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) { | |
5c9a8750 DV |
273 | pr_err("failed to create kcov in debugfs\n"); |
274 | return -ENOMEM; | |
275 | } | |
276 | return 0; | |
277 | } | |
278 | ||
279 | device_initcall(kcov_init); |