]>
Commit | Line | Data |
---|---|---|
2056a782 | 1 | /* |
0fe23479 | 2 | * Copyright (C) 2006 Jens Axboe <[email protected]> |
2056a782 JA |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
16 | * | |
17 | */ | |
2056a782 JA |
18 | #include <linux/kernel.h> |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/blktrace_api.h> | |
21 | #include <linux/percpu.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/mutex.h> | |
24 | #include <linux/debugfs.h> | |
be1c6341 | 25 | #include <linux/time.h> |
2056a782 JA |
26 | #include <asm/uaccess.h> |
27 | ||
28 | static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; | |
29 | static unsigned int blktrace_seq __read_mostly = 1; | |
30 | ||
be1c6341 OK |
31 | /* |
32 | * Send out a notify message. | |
33 | */ | |
a863055b JA |
34 | static void trace_note(struct blk_trace *bt, pid_t pid, int action, |
35 | const void *data, size_t len) | |
be1c6341 OK |
36 | { |
37 | struct blk_io_trace *t; | |
be1c6341 OK |
38 | |
39 | t = relay_reserve(bt->rchan, sizeof(*t) + len); | |
d3d9d2a5 JA |
40 | if (t) { |
41 | const int cpu = smp_processor_id(); | |
42 | ||
43 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
7c2ff389 | 44 | t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu); |
d3d9d2a5 JA |
45 | t->device = bt->dev; |
46 | t->action = action; | |
47 | t->pid = pid; | |
48 | t->cpu = cpu; | |
49 | t->pdu_len = len; | |
50 | memcpy((void *) t + sizeof(*t), data, len); | |
51 | } | |
be1c6341 OK |
52 | } |
53 | ||
2056a782 JA |
54 | /* |
55 | * Send out a notify for this process, if we haven't done so since a trace | |
56 | * started | |
57 | */ | |
58 | static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) | |
59 | { | |
a863055b JA |
60 | tsk->btrace_seq = blktrace_seq; |
61 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); | |
be1c6341 | 62 | } |
2056a782 | 63 | |
be1c6341 OK |
64 | static void trace_note_time(struct blk_trace *bt) |
65 | { | |
66 | struct timespec now; | |
67 | unsigned long flags; | |
68 | u32 words[2]; | |
69 | ||
70 | getnstimeofday(&now); | |
71 | words[0] = now.tv_sec; | |
72 | words[1] = now.tv_nsec; | |
73 | ||
74 | local_irq_save(flags); | |
75 | trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); | |
76 | local_irq_restore(flags); | |
2056a782 JA |
77 | } |
78 | ||
79 | static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |
80 | pid_t pid) | |
81 | { | |
82 | if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) | |
83 | return 1; | |
84 | if (sector < bt->start_lba || sector > bt->end_lba) | |
85 | return 1; | |
86 | if (bt->pid && pid != bt->pid) | |
87 | return 1; | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
92 | /* | |
93 | * Data direction bit lookup | |
94 | */ | |
95 | static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; | |
96 | ||
97 | /* | |
98 | * Bio action bits of interest | |
99 | */ | |
7457e6e2 | 100 | static u32 bio_act[9] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD), 0, 0, 0, BLK_TC_ACT(BLK_TC_META) }; |
2056a782 JA |
101 | |
102 | /* | |
103 | * More could be added as needed, taking care to increment the decrementer | |
104 | * to get correct indexing | |
105 | */ | |
106 | #define trace_barrier_bit(rw) \ | |
107 | (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0)) | |
108 | #define trace_sync_bit(rw) \ | |
109 | (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) | |
40359ccb | 110 | #define trace_ahead_bit(rw) \ |
ad01b1ca | 111 | (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD)) |
7457e6e2 JA |
112 | #define trace_meta_bit(rw) \ |
113 | (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3)) | |
2056a782 JA |
114 | |
115 | /* | |
116 | * The worker for the various blk_add_trace*() types. Fills out a | |
117 | * blk_io_trace structure and places it in a per-cpu subbuffer. | |
118 | */ | |
119 | void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |
120 | int rw, u32 what, int error, int pdu_len, void *pdu_data) | |
121 | { | |
122 | struct task_struct *tsk = current; | |
123 | struct blk_io_trace *t; | |
124 | unsigned long flags; | |
125 | unsigned long *sequence; | |
126 | pid_t pid; | |
127 | int cpu; | |
128 | ||
129 | if (unlikely(bt->trace_state != Blktrace_running)) | |
130 | return; | |
131 | ||
132 | what |= ddir_act[rw & WRITE]; | |
133 | what |= bio_act[trace_barrier_bit(rw)]; | |
134 | what |= bio_act[trace_sync_bit(rw)]; | |
40359ccb | 135 | what |= bio_act[trace_ahead_bit(rw)]; |
7457e6e2 | 136 | what |= bio_act[trace_meta_bit(rw)]; |
2056a782 JA |
137 | |
138 | pid = tsk->pid; | |
139 | if (unlikely(act_log_check(bt, what, sector, pid))) | |
140 | return; | |
141 | ||
142 | /* | |
143 | * A word about the locking here - we disable interrupts to reserve | |
144 | * some space in the relay per-cpu buffer, to prevent an irq | |
145 | * from coming in and stepping on our toes. Once reserved, it's | |
146 | * enough to get preemption disabled to prevent read of this data | |
147 | * before we are through filling it. get_cpu()/put_cpu() does this | |
148 | * for us | |
149 | */ | |
150 | local_irq_save(flags); | |
151 | ||
152 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | |
153 | trace_note_tsk(bt, tsk); | |
154 | ||
155 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); | |
156 | if (t) { | |
157 | cpu = smp_processor_id(); | |
158 | sequence = per_cpu_ptr(bt->sequence, cpu); | |
159 | ||
160 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
161 | t->sequence = ++(*sequence); | |
7c2ff389 | 162 | t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu); |
2056a782 JA |
163 | t->sector = sector; |
164 | t->bytes = bytes; | |
165 | t->action = what; | |
166 | t->pid = pid; | |
167 | t->device = bt->dev; | |
168 | t->cpu = cpu; | |
169 | t->error = error; | |
170 | t->pdu_len = pdu_len; | |
171 | ||
172 | if (pdu_len) | |
173 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | |
174 | } | |
175 | ||
176 | local_irq_restore(flags); | |
177 | } | |
178 | ||
179 | EXPORT_SYMBOL_GPL(__blk_add_trace); | |
180 | ||
181 | static struct dentry *blk_tree_root; | |
182 | static struct mutex blk_tree_mutex; | |
183 | static unsigned int root_users; | |
184 | ||
185 | static inline void blk_remove_root(void) | |
186 | { | |
187 | if (blk_tree_root) { | |
188 | debugfs_remove(blk_tree_root); | |
189 | blk_tree_root = NULL; | |
190 | } | |
191 | } | |
192 | ||
193 | static void blk_remove_tree(struct dentry *dir) | |
194 | { | |
195 | mutex_lock(&blk_tree_mutex); | |
196 | debugfs_remove(dir); | |
197 | if (--root_users == 0) | |
198 | blk_remove_root(); | |
199 | mutex_unlock(&blk_tree_mutex); | |
200 | } | |
201 | ||
202 | static struct dentry *blk_create_tree(const char *blk_name) | |
203 | { | |
204 | struct dentry *dir = NULL; | |
205 | ||
206 | mutex_lock(&blk_tree_mutex); | |
207 | ||
208 | if (!blk_tree_root) { | |
209 | blk_tree_root = debugfs_create_dir("block", NULL); | |
210 | if (!blk_tree_root) | |
211 | goto err; | |
212 | } | |
213 | ||
214 | dir = debugfs_create_dir(blk_name, blk_tree_root); | |
215 | if (dir) | |
216 | root_users++; | |
217 | else | |
218 | blk_remove_root(); | |
219 | ||
220 | err: | |
221 | mutex_unlock(&blk_tree_mutex); | |
222 | return dir; | |
223 | } | |
224 | ||
225 | static void blk_trace_cleanup(struct blk_trace *bt) | |
226 | { | |
227 | relay_close(bt->rchan); | |
228 | debugfs_remove(bt->dropped_file); | |
229 | blk_remove_tree(bt->dir); | |
230 | free_percpu(bt->sequence); | |
231 | kfree(bt); | |
232 | } | |
233 | ||
165125e1 | 234 | static int blk_trace_remove(struct request_queue *q) |
2056a782 JA |
235 | { |
236 | struct blk_trace *bt; | |
237 | ||
238 | bt = xchg(&q->blk_trace, NULL); | |
239 | if (!bt) | |
240 | return -EINVAL; | |
241 | ||
242 | if (bt->trace_state == Blktrace_setup || | |
243 | bt->trace_state == Blktrace_stopped) | |
244 | blk_trace_cleanup(bt); | |
245 | ||
246 | return 0; | |
247 | } | |
248 | ||
249 | static int blk_dropped_open(struct inode *inode, struct file *filp) | |
250 | { | |
8e18e294 | 251 | filp->private_data = inode->i_private; |
2056a782 JA |
252 | |
253 | return 0; | |
254 | } | |
255 | ||
256 | static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, | |
257 | size_t count, loff_t *ppos) | |
258 | { | |
259 | struct blk_trace *bt = filp->private_data; | |
260 | char buf[16]; | |
261 | ||
262 | snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); | |
263 | ||
264 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); | |
265 | } | |
266 | ||
2b8693c0 | 267 | static const struct file_operations blk_dropped_fops = { |
2056a782 JA |
268 | .owner = THIS_MODULE, |
269 | .open = blk_dropped_open, | |
270 | .read = blk_dropped_read, | |
271 | }; | |
272 | ||
273 | /* | |
274 | * Keep track of how many times we encountered a full subbuffer, to aid | |
275 | * the user space app in telling how many lost events there were. | |
276 | */ | |
277 | static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |
278 | void *prev_subbuf, size_t prev_padding) | |
279 | { | |
280 | struct blk_trace *bt; | |
281 | ||
282 | if (!relay_buf_full(buf)) | |
283 | return 1; | |
284 | ||
285 | bt = buf->chan->private_data; | |
286 | atomic_inc(&bt->dropped); | |
287 | return 0; | |
288 | } | |
289 | ||
290 | static int blk_remove_buf_file_callback(struct dentry *dentry) | |
291 | { | |
292 | debugfs_remove(dentry); | |
293 | return 0; | |
294 | } | |
295 | ||
296 | static struct dentry *blk_create_buf_file_callback(const char *filename, | |
297 | struct dentry *parent, | |
298 | int mode, | |
299 | struct rchan_buf *buf, | |
300 | int *is_global) | |
301 | { | |
302 | return debugfs_create_file(filename, mode, parent, buf, | |
303 | &relay_file_operations); | |
304 | } | |
305 | ||
306 | static struct rchan_callbacks blk_relay_callbacks = { | |
307 | .subbuf_start = blk_subbuf_start_callback, | |
308 | .create_buf_file = blk_create_buf_file_callback, | |
309 | .remove_buf_file = blk_remove_buf_file_callback, | |
310 | }; | |
311 | ||
312 | /* | |
313 | * Setup everything required to start tracing | |
314 | */ | |
165125e1 | 315 | static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, |
2056a782 JA |
316 | char __user *arg) |
317 | { | |
318 | struct blk_user_trace_setup buts; | |
319 | struct blk_trace *old_bt, *bt = NULL; | |
320 | struct dentry *dir = NULL; | |
321 | char b[BDEVNAME_SIZE]; | |
322 | int ret, i; | |
323 | ||
324 | if (copy_from_user(&buts, arg, sizeof(buts))) | |
325 | return -EFAULT; | |
326 | ||
327 | if (!buts.buf_size || !buts.buf_nr) | |
328 | return -EINVAL; | |
329 | ||
330 | strcpy(buts.name, bdevname(bdev, b)); | |
331 | ||
332 | /* | |
333 | * some device names have larger paths - convert the slashes | |
334 | * to underscores for this to work as expected | |
335 | */ | |
336 | for (i = 0; i < strlen(buts.name); i++) | |
337 | if (buts.name[i] == '/') | |
338 | buts.name[i] = '_'; | |
339 | ||
340 | if (copy_to_user(arg, &buts, sizeof(buts))) | |
341 | return -EFAULT; | |
342 | ||
343 | ret = -ENOMEM; | |
344 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | |
345 | if (!bt) | |
346 | goto err; | |
347 | ||
348 | bt->sequence = alloc_percpu(unsigned long); | |
349 | if (!bt->sequence) | |
350 | goto err; | |
351 | ||
352 | ret = -ENOENT; | |
353 | dir = blk_create_tree(buts.name); | |
354 | if (!dir) | |
355 | goto err; | |
356 | ||
357 | bt->dir = dir; | |
358 | bt->dev = bdev->bd_dev; | |
359 | atomic_set(&bt->dropped, 0); | |
360 | ||
361 | ret = -EIO; | |
362 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); | |
363 | if (!bt->dropped_file) | |
364 | goto err; | |
365 | ||
23c88752 | 366 | bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks, bt); |
2056a782 JA |
367 | if (!bt->rchan) |
368 | goto err; | |
2056a782 JA |
369 | |
370 | bt->act_mask = buts.act_mask; | |
371 | if (!bt->act_mask) | |
372 | bt->act_mask = (u16) -1; | |
373 | ||
374 | bt->start_lba = buts.start_lba; | |
375 | bt->end_lba = buts.end_lba; | |
376 | if (!bt->end_lba) | |
377 | bt->end_lba = -1ULL; | |
378 | ||
379 | bt->pid = buts.pid; | |
380 | bt->trace_state = Blktrace_setup; | |
381 | ||
382 | ret = -EBUSY; | |
383 | old_bt = xchg(&q->blk_trace, bt); | |
384 | if (old_bt) { | |
385 | (void) xchg(&q->blk_trace, old_bt); | |
386 | goto err; | |
387 | } | |
388 | ||
389 | return 0; | |
390 | err: | |
391 | if (dir) | |
392 | blk_remove_tree(dir); | |
393 | if (bt) { | |
394 | if (bt->dropped_file) | |
395 | debugfs_remove(bt->dropped_file); | |
a1205868 | 396 | free_percpu(bt->sequence); |
2056a782 JA |
397 | if (bt->rchan) |
398 | relay_close(bt->rchan); | |
399 | kfree(bt); | |
400 | } | |
401 | return ret; | |
402 | } | |
403 | ||
165125e1 | 404 | static int blk_trace_startstop(struct request_queue *q, int start) |
2056a782 JA |
405 | { |
406 | struct blk_trace *bt; | |
407 | int ret; | |
408 | ||
409 | if ((bt = q->blk_trace) == NULL) | |
410 | return -EINVAL; | |
411 | ||
412 | /* | |
413 | * For starting a trace, we can transition from a setup or stopped | |
414 | * trace. For stopping a trace, the state must be running | |
415 | */ | |
416 | ret = -EINVAL; | |
417 | if (start) { | |
418 | if (bt->trace_state == Blktrace_setup || | |
419 | bt->trace_state == Blktrace_stopped) { | |
420 | blktrace_seq++; | |
421 | smp_mb(); | |
422 | bt->trace_state = Blktrace_running; | |
be1c6341 OK |
423 | |
424 | trace_note_time(bt); | |
2056a782 JA |
425 | ret = 0; |
426 | } | |
427 | } else { | |
428 | if (bt->trace_state == Blktrace_running) { | |
429 | bt->trace_state = Blktrace_stopped; | |
430 | relay_flush(bt->rchan); | |
431 | ret = 0; | |
432 | } | |
433 | } | |
434 | ||
435 | return ret; | |
436 | } | |
437 | ||
438 | /** | |
439 | * blk_trace_ioctl: - handle the ioctls associated with tracing | |
440 | * @bdev: the block device | |
441 | * @cmd: the ioctl cmd | |
442 | * @arg: the argument data, if any | |
443 | * | |
444 | **/ | |
445 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |
446 | { | |
165125e1 | 447 | struct request_queue *q; |
2056a782 JA |
448 | int ret, start = 0; |
449 | ||
450 | q = bdev_get_queue(bdev); | |
451 | if (!q) | |
452 | return -ENXIO; | |
453 | ||
454 | mutex_lock(&bdev->bd_mutex); | |
455 | ||
456 | switch (cmd) { | |
457 | case BLKTRACESETUP: | |
458 | ret = blk_trace_setup(q, bdev, arg); | |
459 | break; | |
460 | case BLKTRACESTART: | |
461 | start = 1; | |
462 | case BLKTRACESTOP: | |
463 | ret = blk_trace_startstop(q, start); | |
464 | break; | |
465 | case BLKTRACETEARDOWN: | |
466 | ret = blk_trace_remove(q); | |
467 | break; | |
468 | default: | |
469 | ret = -ENOTTY; | |
470 | break; | |
471 | } | |
472 | ||
473 | mutex_unlock(&bdev->bd_mutex); | |
474 | return ret; | |
475 | } | |
476 | ||
477 | /** | |
478 | * blk_trace_shutdown: - stop and cleanup trace structures | |
479 | * @q: the request queue associated with the device | |
480 | * | |
481 | **/ | |
165125e1 | 482 | void blk_trace_shutdown(struct request_queue *q) |
2056a782 | 483 | { |
6c5c9341 AD |
484 | if (q->blk_trace) { |
485 | blk_trace_startstop(q, 0); | |
486 | blk_trace_remove(q); | |
487 | } | |
2056a782 JA |
488 | } |
489 | ||
490 | /* | |
7c2ff389 | 491 | * Average offset over two calls to cpu_clock() with a gettimeofday() |
2056a782 JA |
492 | * in the middle |
493 | */ | |
7c2ff389 | 494 | static void blk_check_time(unsigned long long *t, int this_cpu) |
2056a782 JA |
495 | { |
496 | unsigned long long a, b; | |
497 | struct timeval tv; | |
498 | ||
7c2ff389 | 499 | a = cpu_clock(this_cpu); |
2056a782 | 500 | do_gettimeofday(&tv); |
7c2ff389 | 501 | b = cpu_clock(this_cpu); |
2056a782 JA |
502 | |
503 | *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; | |
504 | *t -= (a + b) / 2; | |
505 | } | |
506 | ||
4090959a MP |
507 | /* |
508 | * calibrate our inter-CPU timings | |
509 | */ | |
2056a782 JA |
510 | static void blk_trace_check_cpu_time(void *data) |
511 | { | |
512 | unsigned long long *t; | |
7c2ff389 | 513 | int this_cpu = get_cpu(); |
2056a782 | 514 | |
7c2ff389 | 515 | t = &per_cpu(blk_trace_cpu_offset, this_cpu); |
2056a782 JA |
516 | |
517 | /* | |
518 | * Just call it twice, hopefully the second call will be cache hot | |
519 | * and a little more precise | |
520 | */ | |
7c2ff389 IM |
521 | blk_check_time(t, this_cpu); |
522 | blk_check_time(t, this_cpu); | |
2056a782 JA |
523 | |
524 | put_cpu(); | |
525 | } | |
526 | ||
2056a782 JA |
527 | static void blk_trace_set_ht_offsets(void) |
528 | { | |
529 | #if defined(CONFIG_SCHED_SMT) | |
530 | int cpu, i; | |
531 | ||
532 | /* | |
533 | * now make sure HT siblings have the same time offset | |
534 | */ | |
535 | preempt_disable(); | |
536 | for_each_online_cpu(cpu) { | |
537 | unsigned long long *cpu_off, *sibling_off; | |
538 | ||
539 | for_each_cpu_mask(i, cpu_sibling_map[cpu]) { | |
540 | if (i == cpu) | |
541 | continue; | |
542 | ||
543 | cpu_off = &per_cpu(blk_trace_cpu_offset, cpu); | |
544 | sibling_off = &per_cpu(blk_trace_cpu_offset, i); | |
545 | *sibling_off = *cpu_off; | |
546 | } | |
547 | } | |
548 | preempt_enable(); | |
549 | #endif | |
550 | } | |
551 | ||
552 | static __init int blk_trace_init(void) | |
553 | { | |
554 | mutex_init(&blk_tree_mutex); | |
4090959a | 555 | on_each_cpu(blk_trace_check_cpu_time, NULL, 1, 1); |
2056a782 JA |
556 | blk_trace_set_ht_offsets(); |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
561 | module_init(blk_trace_init); | |
562 |