]>
Commit | Line | Data |
---|---|---|
2056a782 | 1 | /* |
0fe23479 | 2 | * Copyright (C) 2006 Jens Axboe <[email protected]> |
2056a782 JA |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
16 | * | |
17 | */ | |
2056a782 JA |
18 | #include <linux/kernel.h> |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/blktrace_api.h> | |
21 | #include <linux/percpu.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/mutex.h> | |
5a0e3ad6 | 24 | #include <linux/slab.h> |
2056a782 | 25 | #include <linux/debugfs.h> |
6e5fdeed | 26 | #include <linux/export.h> |
be1c6341 | 27 | #include <linux/time.h> |
939b3669 | 28 | #include <linux/uaccess.h> |
a404d557 | 29 | #include <linux/list.h> |
55782138 LZ |
30 | |
31 | #include <trace/events/block.h> | |
32 | ||
2db270a8 | 33 | #include "trace_output.h" |
2056a782 | 34 | |
55782138 LZ |
35 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
36 | ||
2056a782 JA |
37 | static unsigned int blktrace_seq __read_mostly = 1; |
38 | ||
c71a8961 | 39 | static struct trace_array *blk_tr; |
5006ea73 | 40 | static bool blk_tracer_enabled __read_mostly; |
c71a8961 | 41 | |
a404d557 JK |
42 | static LIST_HEAD(running_trace_list); |
43 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); | |
44 | ||
c71a8961 | 45 | /* Select an alternative, minimalistic output than the original one */ |
ef18012b | 46 | #define TRACE_BLK_OPT_CLASSIC 0x1 |
c71a8961 ACM |
47 | |
48 | static struct tracer_opt blk_tracer_opts[] = { | |
49 | /* Default disable the minimalistic output */ | |
157f9c00 | 50 | { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, |
c71a8961 ACM |
51 | { } |
52 | }; | |
53 | ||
54 | static struct tracer_flags blk_tracer_flags = { | |
55 | .val = 0, | |
56 | .opts = blk_tracer_opts, | |
57 | }; | |
58 | ||
5f3ea37c | 59 | /* Global reference count of probes */ |
5f3ea37c ACM |
60 | static atomic_t blk_probes_ref = ATOMIC_INIT(0); |
61 | ||
3c289ba7 | 62 | static void blk_register_tracepoints(void); |
5f3ea37c ACM |
63 | static void blk_unregister_tracepoints(void); |
64 | ||
be1c6341 OK |
65 | /* |
66 | * Send out a notify message. | |
67 | */ | |
a863055b JA |
68 | static void trace_note(struct blk_trace *bt, pid_t pid, int action, |
69 | const void *data, size_t len) | |
be1c6341 OK |
70 | { |
71 | struct blk_io_trace *t; | |
18cea459 | 72 | struct ring_buffer_event *event = NULL; |
e77405ad | 73 | struct ring_buffer *buffer = NULL; |
18cea459 LZ |
74 | int pc = 0; |
75 | int cpu = smp_processor_id(); | |
76 | bool blk_tracer = blk_tracer_enabled; | |
77 | ||
78 | if (blk_tracer) { | |
12883efb | 79 | buffer = blk_tr->trace_buffer.buffer; |
18cea459 | 80 | pc = preempt_count(); |
e77405ad | 81 | event = trace_buffer_lock_reserve(buffer, TRACE_BLK, |
18cea459 LZ |
82 | sizeof(*t) + len, |
83 | 0, pc); | |
84 | if (!event) | |
85 | return; | |
86 | t = ring_buffer_event_data(event); | |
87 | goto record_it; | |
88 | } | |
be1c6341 | 89 | |
c71a8961 ACM |
90 | if (!bt->rchan) |
91 | return; | |
92 | ||
be1c6341 | 93 | t = relay_reserve(bt->rchan, sizeof(*t) + len); |
d3d9d2a5 | 94 | if (t) { |
d3d9d2a5 | 95 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; |
2997c8c4 | 96 | t->time = ktime_to_ns(ktime_get()); |
18cea459 | 97 | record_it: |
d3d9d2a5 JA |
98 | t->device = bt->dev; |
99 | t->action = action; | |
100 | t->pid = pid; | |
101 | t->cpu = cpu; | |
102 | t->pdu_len = len; | |
103 | memcpy((void *) t + sizeof(*t), data, len); | |
18cea459 LZ |
104 | |
105 | if (blk_tracer) | |
e77405ad | 106 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
d3d9d2a5 | 107 | } |
be1c6341 OK |
108 | } |
109 | ||
2056a782 JA |
110 | /* |
111 | * Send out a notify for this process, if we haven't done so since a trace | |
112 | * started | |
113 | */ | |
a404d557 | 114 | static void trace_note_tsk(struct task_struct *tsk) |
2056a782 | 115 | { |
a404d557 JK |
116 | unsigned long flags; |
117 | struct blk_trace *bt; | |
118 | ||
a863055b | 119 | tsk->btrace_seq = blktrace_seq; |
a404d557 JK |
120 | spin_lock_irqsave(&running_trace_lock, flags); |
121 | list_for_each_entry(bt, &running_trace_list, running_list) { | |
122 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, | |
123 | sizeof(tsk->comm)); | |
124 | } | |
125 | spin_unlock_irqrestore(&running_trace_lock, flags); | |
be1c6341 | 126 | } |
2056a782 | 127 | |
be1c6341 OK |
128 | static void trace_note_time(struct blk_trace *bt) |
129 | { | |
130 | struct timespec now; | |
131 | unsigned long flags; | |
132 | u32 words[2]; | |
133 | ||
134 | getnstimeofday(&now); | |
135 | words[0] = now.tv_sec; | |
136 | words[1] = now.tv_nsec; | |
137 | ||
138 | local_irq_save(flags); | |
139 | trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); | |
140 | local_irq_restore(flags); | |
2056a782 JA |
141 | } |
142 | ||
9d5f09a4 AB |
143 | void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) |
144 | { | |
145 | int n; | |
146 | va_list args; | |
14a73f54 | 147 | unsigned long flags; |
64565911 | 148 | char *buf; |
9d5f09a4 | 149 | |
18cea459 LZ |
150 | if (unlikely(bt->trace_state != Blktrace_running && |
151 | !blk_tracer_enabled)) | |
c71a8961 ACM |
152 | return; |
153 | ||
490da40d TM |
154 | /* |
155 | * If the BLK_TC_NOTIFY action mask isn't set, don't send any note | |
156 | * message to the trace. | |
157 | */ | |
158 | if (!(bt->act_mask & BLK_TC_NOTIFY)) | |
159 | return; | |
160 | ||
14a73f54 | 161 | local_irq_save(flags); |
d8a0349c | 162 | buf = this_cpu_ptr(bt->msg_data); |
9d5f09a4 | 163 | va_start(args, fmt); |
64565911 | 164 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); |
9d5f09a4 AB |
165 | va_end(args); |
166 | ||
64565911 | 167 | trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); |
14a73f54 | 168 | local_irq_restore(flags); |
9d5f09a4 AB |
169 | } |
170 | EXPORT_SYMBOL_GPL(__trace_note_message); | |
171 | ||
2056a782 JA |
172 | static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, |
173 | pid_t pid) | |
174 | { | |
175 | if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) | |
176 | return 1; | |
d0deef5b | 177 | if (sector && (sector < bt->start_lba || sector > bt->end_lba)) |
2056a782 JA |
178 | return 1; |
179 | if (bt->pid && pid != bt->pid) | |
180 | return 1; | |
181 | ||
182 | return 0; | |
183 | } | |
184 | ||
185 | /* | |
186 | * Data direction bit lookup | |
187 | */ | |
e4955c99 LZ |
188 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), |
189 | BLK_TC_ACT(BLK_TC_WRITE) }; | |
2056a782 | 190 | |
7b6d91da CH |
191 | #define BLK_TC_RAHEAD BLK_TC_AHEAD |
192 | ||
35ba8f70 | 193 | /* The ilog2() calls fall out because they're constant */ |
7b6d91da CH |
194 | #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ |
195 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) | |
2056a782 JA |
196 | |
197 | /* | |
198 | * The worker for the various blk_add_trace*() types. Fills out a | |
199 | * blk_io_trace structure and places it in a per-cpu subbuffer. | |
200 | */ | |
5f3ea37c | 201 | static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, |
2056a782 JA |
202 | int rw, u32 what, int error, int pdu_len, void *pdu_data) |
203 | { | |
204 | struct task_struct *tsk = current; | |
c71a8961 | 205 | struct ring_buffer_event *event = NULL; |
e77405ad | 206 | struct ring_buffer *buffer = NULL; |
2056a782 | 207 | struct blk_io_trace *t; |
0a987751 | 208 | unsigned long flags = 0; |
2056a782 JA |
209 | unsigned long *sequence; |
210 | pid_t pid; | |
c71a8961 | 211 | int cpu, pc = 0; |
18cea459 | 212 | bool blk_tracer = blk_tracer_enabled; |
2056a782 | 213 | |
18cea459 | 214 | if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) |
2056a782 JA |
215 | return; |
216 | ||
217 | what |= ddir_act[rw & WRITE]; | |
7b6d91da CH |
218 | what |= MASK_TC_BIT(rw, SYNC); |
219 | what |= MASK_TC_BIT(rw, RAHEAD); | |
35ba8f70 DW |
220 | what |= MASK_TC_BIT(rw, META); |
221 | what |= MASK_TC_BIT(rw, DISCARD); | |
c09c47ca NK |
222 | what |= MASK_TC_BIT(rw, FLUSH); |
223 | what |= MASK_TC_BIT(rw, FUA); | |
2056a782 JA |
224 | |
225 | pid = tsk->pid; | |
d0deef5b | 226 | if (act_log_check(bt, what, sector, pid)) |
2056a782 | 227 | return; |
c71a8961 ACM |
228 | cpu = raw_smp_processor_id(); |
229 | ||
18cea459 | 230 | if (blk_tracer) { |
c71a8961 ACM |
231 | tracing_record_cmdline(current); |
232 | ||
12883efb | 233 | buffer = blk_tr->trace_buffer.buffer; |
51a763dd | 234 | pc = preempt_count(); |
e77405ad | 235 | event = trace_buffer_lock_reserve(buffer, TRACE_BLK, |
51a763dd ACM |
236 | sizeof(*t) + pdu_len, |
237 | 0, pc); | |
c71a8961 ACM |
238 | if (!event) |
239 | return; | |
51a763dd | 240 | t = ring_buffer_event_data(event); |
c71a8961 ACM |
241 | goto record_it; |
242 | } | |
2056a782 | 243 | |
a404d557 JK |
244 | if (unlikely(tsk->btrace_seq != blktrace_seq)) |
245 | trace_note_tsk(tsk); | |
246 | ||
2056a782 JA |
247 | /* |
248 | * A word about the locking here - we disable interrupts to reserve | |
249 | * some space in the relay per-cpu buffer, to prevent an irq | |
14a73f54 | 250 | * from coming in and stepping on our toes. |
2056a782 JA |
251 | */ |
252 | local_irq_save(flags); | |
2056a782 JA |
253 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); |
254 | if (t) { | |
2056a782 JA |
255 | sequence = per_cpu_ptr(bt->sequence, cpu); |
256 | ||
257 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
258 | t->sequence = ++(*sequence); | |
2997c8c4 | 259 | t->time = ktime_to_ns(ktime_get()); |
c71a8961 | 260 | record_it: |
08a06b83 | 261 | /* |
939b3669 ACM |
262 | * These two are not needed in ftrace as they are in the |
263 | * generic trace_entry, filled by tracing_generic_entry_update, | |
264 | * but for the trace_event->bin() synthesizer benefit we do it | |
265 | * here too. | |
266 | */ | |
267 | t->cpu = cpu; | |
268 | t->pid = pid; | |
08a06b83 | 269 | |
2056a782 JA |
270 | t->sector = sector; |
271 | t->bytes = bytes; | |
272 | t->action = what; | |
2056a782 | 273 | t->device = bt->dev; |
2056a782 JA |
274 | t->error = error; |
275 | t->pdu_len = pdu_len; | |
276 | ||
277 | if (pdu_len) | |
278 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | |
c71a8961 | 279 | |
18cea459 | 280 | if (blk_tracer) { |
e77405ad | 281 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
c71a8961 ACM |
282 | return; |
283 | } | |
2056a782 JA |
284 | } |
285 | ||
286 | local_irq_restore(flags); | |
287 | } | |
288 | ||
2056a782 | 289 | static struct dentry *blk_tree_root; |
11a57153 | 290 | static DEFINE_MUTEX(blk_tree_mutex); |
2056a782 | 291 | |
ad5dd549 | 292 | static void blk_trace_free(struct blk_trace *bt) |
2056a782 | 293 | { |
02c62304 | 294 | debugfs_remove(bt->msg_file); |
2056a782 | 295 | debugfs_remove(bt->dropped_file); |
f48fc4d3 | 296 | relay_close(bt->rchan); |
39cbb602 | 297 | debugfs_remove(bt->dir); |
2056a782 | 298 | free_percpu(bt->sequence); |
64565911 | 299 | free_percpu(bt->msg_data); |
2056a782 | 300 | kfree(bt); |
ad5dd549 LZ |
301 | } |
302 | ||
303 | static void blk_trace_cleanup(struct blk_trace *bt) | |
304 | { | |
305 | blk_trace_free(bt); | |
5f3ea37c ACM |
306 | if (atomic_dec_and_test(&blk_probes_ref)) |
307 | blk_unregister_tracepoints(); | |
2056a782 JA |
308 | } |
309 | ||
6da127ad | 310 | int blk_trace_remove(struct request_queue *q) |
2056a782 JA |
311 | { |
312 | struct blk_trace *bt; | |
313 | ||
314 | bt = xchg(&q->blk_trace, NULL); | |
315 | if (!bt) | |
316 | return -EINVAL; | |
317 | ||
55547204 | 318 | if (bt->trace_state != Blktrace_running) |
2056a782 JA |
319 | blk_trace_cleanup(bt); |
320 | ||
321 | return 0; | |
322 | } | |
6da127ad | 323 | EXPORT_SYMBOL_GPL(blk_trace_remove); |
2056a782 | 324 | |
2056a782 JA |
325 | static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, |
326 | size_t count, loff_t *ppos) | |
327 | { | |
328 | struct blk_trace *bt = filp->private_data; | |
329 | char buf[16]; | |
330 | ||
331 | snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); | |
332 | ||
333 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); | |
334 | } | |
335 | ||
2b8693c0 | 336 | static const struct file_operations blk_dropped_fops = { |
2056a782 | 337 | .owner = THIS_MODULE, |
234e3405 | 338 | .open = simple_open, |
2056a782 | 339 | .read = blk_dropped_read, |
6038f373 | 340 | .llseek = default_llseek, |
2056a782 JA |
341 | }; |
342 | ||
02c62304 AB |
343 | static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, |
344 | size_t count, loff_t *ppos) | |
345 | { | |
346 | char *msg; | |
347 | struct blk_trace *bt; | |
348 | ||
7635b03a | 349 | if (count >= BLK_TN_MAX_MSG) |
02c62304 AB |
350 | return -EINVAL; |
351 | ||
a4b3ada8 | 352 | msg = kmalloc(count + 1, GFP_KERNEL); |
02c62304 AB |
353 | if (msg == NULL) |
354 | return -ENOMEM; | |
355 | ||
356 | if (copy_from_user(msg, buffer, count)) { | |
357 | kfree(msg); | |
358 | return -EFAULT; | |
359 | } | |
360 | ||
a4b3ada8 | 361 | msg[count] = '\0'; |
02c62304 AB |
362 | bt = filp->private_data; |
363 | __trace_note_message(bt, "%s", msg); | |
364 | kfree(msg); | |
365 | ||
366 | return count; | |
367 | } | |
368 | ||
369 | static const struct file_operations blk_msg_fops = { | |
370 | .owner = THIS_MODULE, | |
234e3405 | 371 | .open = simple_open, |
02c62304 | 372 | .write = blk_msg_write, |
6038f373 | 373 | .llseek = noop_llseek, |
02c62304 AB |
374 | }; |
375 | ||
2056a782 JA |
376 | /* |
377 | * Keep track of how many times we encountered a full subbuffer, to aid | |
378 | * the user space app in telling how many lost events there were. | |
379 | */ | |
380 | static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |
381 | void *prev_subbuf, size_t prev_padding) | |
382 | { | |
383 | struct blk_trace *bt; | |
384 | ||
385 | if (!relay_buf_full(buf)) | |
386 | return 1; | |
387 | ||
388 | bt = buf->chan->private_data; | |
389 | atomic_inc(&bt->dropped); | |
390 | return 0; | |
391 | } | |
392 | ||
393 | static int blk_remove_buf_file_callback(struct dentry *dentry) | |
394 | { | |
395 | debugfs_remove(dentry); | |
f48fc4d3 | 396 | |
2056a782 JA |
397 | return 0; |
398 | } | |
399 | ||
400 | static struct dentry *blk_create_buf_file_callback(const char *filename, | |
401 | struct dentry *parent, | |
f4ae40a6 | 402 | umode_t mode, |
2056a782 JA |
403 | struct rchan_buf *buf, |
404 | int *is_global) | |
405 | { | |
406 | return debugfs_create_file(filename, mode, parent, buf, | |
407 | &relay_file_operations); | |
408 | } | |
409 | ||
410 | static struct rchan_callbacks blk_relay_callbacks = { | |
411 | .subbuf_start = blk_subbuf_start_callback, | |
412 | .create_buf_file = blk_create_buf_file_callback, | |
413 | .remove_buf_file = blk_remove_buf_file_callback, | |
414 | }; | |
415 | ||
9908c309 LZ |
416 | static void blk_trace_setup_lba(struct blk_trace *bt, |
417 | struct block_device *bdev) | |
418 | { | |
419 | struct hd_struct *part = NULL; | |
420 | ||
421 | if (bdev) | |
422 | part = bdev->bd_part; | |
423 | ||
424 | if (part) { | |
425 | bt->start_lba = part->start_sect; | |
426 | bt->end_lba = part->start_sect + part->nr_sects; | |
427 | } else { | |
428 | bt->start_lba = 0; | |
429 | bt->end_lba = -1ULL; | |
430 | } | |
431 | } | |
432 | ||
2056a782 JA |
433 | /* |
434 | * Setup everything required to start tracing | |
435 | */ | |
6da127ad | 436 | int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
d0deef5b SD |
437 | struct block_device *bdev, |
438 | struct blk_user_trace_setup *buts) | |
2056a782 | 439 | { |
2056a782 JA |
440 | struct blk_trace *old_bt, *bt = NULL; |
441 | struct dentry *dir = NULL; | |
2056a782 JA |
442 | int ret, i; |
443 | ||
171044d4 | 444 | if (!buts->buf_size || !buts->buf_nr) |
2056a782 JA |
445 | return -EINVAL; |
446 | ||
0497b345 JA |
447 | strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); |
448 | buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; | |
2056a782 JA |
449 | |
450 | /* | |
451 | * some device names have larger paths - convert the slashes | |
452 | * to underscores for this to work as expected | |
453 | */ | |
171044d4 AB |
454 | for (i = 0; i < strlen(buts->name); i++) |
455 | if (buts->name[i] == '/') | |
456 | buts->name[i] = '_'; | |
2056a782 | 457 | |
2056a782 JA |
458 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); |
459 | if (!bt) | |
ad5dd549 | 460 | return -ENOMEM; |
2056a782 | 461 | |
ad5dd549 | 462 | ret = -ENOMEM; |
2056a782 JA |
463 | bt->sequence = alloc_percpu(unsigned long); |
464 | if (!bt->sequence) | |
465 | goto err; | |
466 | ||
313e458f | 467 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); |
64565911 JA |
468 | if (!bt->msg_data) |
469 | goto err; | |
470 | ||
2056a782 | 471 | ret = -ENOENT; |
f48fc4d3 | 472 | |
b5230b56 | 473 | mutex_lock(&blk_tree_mutex); |
f48fc4d3 JA |
474 | if (!blk_tree_root) { |
475 | blk_tree_root = debugfs_create_dir("block", NULL); | |
b5230b56 LZ |
476 | if (!blk_tree_root) { |
477 | mutex_unlock(&blk_tree_mutex); | |
1a17662e | 478 | goto err; |
b5230b56 | 479 | } |
f48fc4d3 | 480 | } |
b5230b56 | 481 | mutex_unlock(&blk_tree_mutex); |
f48fc4d3 JA |
482 | |
483 | dir = debugfs_create_dir(buts->name, blk_tree_root); | |
484 | ||
2056a782 JA |
485 | if (!dir) |
486 | goto err; | |
487 | ||
488 | bt->dir = dir; | |
6da127ad | 489 | bt->dev = dev; |
2056a782 | 490 | atomic_set(&bt->dropped, 0); |
a404d557 | 491 | INIT_LIST_HEAD(&bt->running_list); |
2056a782 JA |
492 | |
493 | ret = -EIO; | |
939b3669 ACM |
494 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, |
495 | &blk_dropped_fops); | |
2056a782 JA |
496 | if (!bt->dropped_file) |
497 | goto err; | |
498 | ||
02c62304 AB |
499 | bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); |
500 | if (!bt->msg_file) | |
501 | goto err; | |
502 | ||
171044d4 AB |
503 | bt->rchan = relay_open("trace", dir, buts->buf_size, |
504 | buts->buf_nr, &blk_relay_callbacks, bt); | |
2056a782 JA |
505 | if (!bt->rchan) |
506 | goto err; | |
2056a782 | 507 | |
171044d4 | 508 | bt->act_mask = buts->act_mask; |
2056a782 JA |
509 | if (!bt->act_mask) |
510 | bt->act_mask = (u16) -1; | |
511 | ||
9908c309 | 512 | blk_trace_setup_lba(bt, bdev); |
2056a782 | 513 | |
d0deef5b SD |
514 | /* overwrite with user settings */ |
515 | if (buts->start_lba) | |
516 | bt->start_lba = buts->start_lba; | |
517 | if (buts->end_lba) | |
518 | bt->end_lba = buts->end_lba; | |
519 | ||
171044d4 | 520 | bt->pid = buts->pid; |
2056a782 JA |
521 | bt->trace_state = Blktrace_setup; |
522 | ||
523 | ret = -EBUSY; | |
524 | old_bt = xchg(&q->blk_trace, bt); | |
525 | if (old_bt) { | |
526 | (void) xchg(&q->blk_trace, old_bt); | |
527 | goto err; | |
528 | } | |
529 | ||
17ba97e3 | 530 | if (atomic_inc_return(&blk_probes_ref) == 1) |
cbe28296 LZ |
531 | blk_register_tracepoints(); |
532 | ||
2056a782 JA |
533 | return 0; |
534 | err: | |
ad5dd549 | 535 | blk_trace_free(bt); |
2056a782 JA |
536 | return ret; |
537 | } | |
171044d4 | 538 | |
6da127ad | 539 | int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
d0deef5b | 540 | struct block_device *bdev, |
6da127ad | 541 | char __user *arg) |
171044d4 AB |
542 | { |
543 | struct blk_user_trace_setup buts; | |
544 | int ret; | |
545 | ||
546 | ret = copy_from_user(&buts, arg, sizeof(buts)); | |
547 | if (ret) | |
548 | return -EFAULT; | |
549 | ||
d0deef5b | 550 | ret = do_blk_trace_setup(q, name, dev, bdev, &buts); |
171044d4 AB |
551 | if (ret) |
552 | return ret; | |
553 | ||
9a8c28c8 DM |
554 | if (copy_to_user(arg, &buts, sizeof(buts))) { |
555 | blk_trace_remove(q); | |
171044d4 | 556 | return -EFAULT; |
9a8c28c8 | 557 | } |
171044d4 AB |
558 | return 0; |
559 | } | |
6da127ad | 560 | EXPORT_SYMBOL_GPL(blk_trace_setup); |
2056a782 | 561 | |
62c2a7d9 AB |
562 | #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) |
563 | static int compat_blk_trace_setup(struct request_queue *q, char *name, | |
564 | dev_t dev, struct block_device *bdev, | |
565 | char __user *arg) | |
566 | { | |
567 | struct blk_user_trace_setup buts; | |
568 | struct compat_blk_user_trace_setup cbuts; | |
569 | int ret; | |
570 | ||
571 | if (copy_from_user(&cbuts, arg, sizeof(cbuts))) | |
572 | return -EFAULT; | |
573 | ||
574 | buts = (struct blk_user_trace_setup) { | |
575 | .act_mask = cbuts.act_mask, | |
576 | .buf_size = cbuts.buf_size, | |
577 | .buf_nr = cbuts.buf_nr, | |
578 | .start_lba = cbuts.start_lba, | |
579 | .end_lba = cbuts.end_lba, | |
580 | .pid = cbuts.pid, | |
581 | }; | |
62c2a7d9 AB |
582 | |
583 | ret = do_blk_trace_setup(q, name, dev, bdev, &buts); | |
584 | if (ret) | |
585 | return ret; | |
586 | ||
f8c5e944 | 587 | if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { |
62c2a7d9 AB |
588 | blk_trace_remove(q); |
589 | return -EFAULT; | |
590 | } | |
591 | ||
592 | return 0; | |
593 | } | |
594 | #endif | |
595 | ||
6da127ad | 596 | int blk_trace_startstop(struct request_queue *q, int start) |
2056a782 | 597 | { |
2056a782 | 598 | int ret; |
939b3669 | 599 | struct blk_trace *bt = q->blk_trace; |
2056a782 | 600 | |
939b3669 | 601 | if (bt == NULL) |
2056a782 JA |
602 | return -EINVAL; |
603 | ||
604 | /* | |
605 | * For starting a trace, we can transition from a setup or stopped | |
606 | * trace. For stopping a trace, the state must be running | |
607 | */ | |
608 | ret = -EINVAL; | |
609 | if (start) { | |
610 | if (bt->trace_state == Blktrace_setup || | |
611 | bt->trace_state == Blktrace_stopped) { | |
612 | blktrace_seq++; | |
613 | smp_mb(); | |
614 | bt->trace_state = Blktrace_running; | |
a404d557 JK |
615 | spin_lock_irq(&running_trace_lock); |
616 | list_add(&bt->running_list, &running_trace_list); | |
617 | spin_unlock_irq(&running_trace_lock); | |
be1c6341 OK |
618 | |
619 | trace_note_time(bt); | |
2056a782 JA |
620 | ret = 0; |
621 | } | |
622 | } else { | |
623 | if (bt->trace_state == Blktrace_running) { | |
624 | bt->trace_state = Blktrace_stopped; | |
a404d557 JK |
625 | spin_lock_irq(&running_trace_lock); |
626 | list_del_init(&bt->running_list); | |
627 | spin_unlock_irq(&running_trace_lock); | |
2056a782 JA |
628 | relay_flush(bt->rchan); |
629 | ret = 0; | |
630 | } | |
631 | } | |
632 | ||
633 | return ret; | |
634 | } | |
6da127ad | 635 | EXPORT_SYMBOL_GPL(blk_trace_startstop); |
2056a782 JA |
636 | |
637 | /** | |
638 | * blk_trace_ioctl: - handle the ioctls associated with tracing | |
639 | * @bdev: the block device | |
ef18012b | 640 | * @cmd: the ioctl cmd |
2056a782 JA |
641 | * @arg: the argument data, if any |
642 | * | |
643 | **/ | |
644 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |
645 | { | |
165125e1 | 646 | struct request_queue *q; |
2056a782 | 647 | int ret, start = 0; |
6da127ad | 648 | char b[BDEVNAME_SIZE]; |
2056a782 JA |
649 | |
650 | q = bdev_get_queue(bdev); | |
651 | if (!q) | |
652 | return -ENXIO; | |
653 | ||
654 | mutex_lock(&bdev->bd_mutex); | |
655 | ||
656 | switch (cmd) { | |
657 | case BLKTRACESETUP: | |
f36f21ec | 658 | bdevname(bdev, b); |
d0deef5b | 659 | ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); |
2056a782 | 660 | break; |
62c2a7d9 AB |
661 | #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) |
662 | case BLKTRACESETUP32: | |
663 | bdevname(bdev, b); | |
664 | ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); | |
665 | break; | |
666 | #endif | |
2056a782 JA |
667 | case BLKTRACESTART: |
668 | start = 1; | |
669 | case BLKTRACESTOP: | |
670 | ret = blk_trace_startstop(q, start); | |
671 | break; | |
672 | case BLKTRACETEARDOWN: | |
673 | ret = blk_trace_remove(q); | |
674 | break; | |
675 | default: | |
676 | ret = -ENOTTY; | |
677 | break; | |
678 | } | |
679 | ||
680 | mutex_unlock(&bdev->bd_mutex); | |
681 | return ret; | |
682 | } | |
683 | ||
684 | /** | |
685 | * blk_trace_shutdown: - stop and cleanup trace structures | |
686 | * @q: the request queue associated with the device | |
687 | * | |
688 | **/ | |
165125e1 | 689 | void blk_trace_shutdown(struct request_queue *q) |
2056a782 | 690 | { |
6c5c9341 AD |
691 | if (q->blk_trace) { |
692 | blk_trace_startstop(q, 0); | |
693 | blk_trace_remove(q); | |
694 | } | |
2056a782 | 695 | } |
5f3ea37c ACM |
696 | |
697 | /* | |
698 | * blktrace probes | |
699 | */ | |
700 | ||
701 | /** | |
702 | * blk_add_trace_rq - Add a trace for a request oriented action | |
703 | * @q: queue the io is for | |
704 | * @rq: the source request | |
af5040da | 705 | * @nr_bytes: number of completed bytes |
5f3ea37c ACM |
706 | * @what: the action |
707 | * | |
708 | * Description: | |
709 | * Records an action against a request. Will log the bio offset + size. | |
710 | * | |
711 | **/ | |
712 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |
af5040da | 713 | unsigned int nr_bytes, u32 what) |
5f3ea37c ACM |
714 | { |
715 | struct blk_trace *bt = q->blk_trace; | |
5f3ea37c ACM |
716 | |
717 | if (likely(!bt)) | |
718 | return; | |
719 | ||
33659ebb | 720 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
5f3ea37c | 721 | what |= BLK_TC_ACT(BLK_TC_PC); |
af5040da | 722 | __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, |
2e46e8b2 | 723 | what, rq->errors, rq->cmd_len, rq->cmd); |
5f3ea37c ACM |
724 | } else { |
725 | what |= BLK_TC_ACT(BLK_TC_FS); | |
af5040da | 726 | __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, |
805f6b5e | 727 | rq->cmd_flags, what, rq->errors, 0, NULL); |
5f3ea37c ACM |
728 | } |
729 | } | |
730 | ||
38516ab5 SR |
731 | static void blk_add_trace_rq_abort(void *ignore, |
732 | struct request_queue *q, struct request *rq) | |
5f3ea37c | 733 | { |
af5040da | 734 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT); |
5f3ea37c ACM |
735 | } |
736 | ||
38516ab5 SR |
737 | static void blk_add_trace_rq_insert(void *ignore, |
738 | struct request_queue *q, struct request *rq) | |
5f3ea37c | 739 | { |
af5040da | 740 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT); |
5f3ea37c ACM |
741 | } |
742 | ||
38516ab5 SR |
743 | static void blk_add_trace_rq_issue(void *ignore, |
744 | struct request_queue *q, struct request *rq) | |
5f3ea37c | 745 | { |
af5040da | 746 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE); |
5f3ea37c ACM |
747 | } |
748 | ||
38516ab5 SR |
749 | static void blk_add_trace_rq_requeue(void *ignore, |
750 | struct request_queue *q, | |
939b3669 | 751 | struct request *rq) |
5f3ea37c | 752 | { |
af5040da | 753 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE); |
5f3ea37c ACM |
754 | } |
755 | ||
38516ab5 SR |
756 | static void blk_add_trace_rq_complete(void *ignore, |
757 | struct request_queue *q, | |
af5040da RP |
758 | struct request *rq, |
759 | unsigned int nr_bytes) | |
5f3ea37c | 760 | { |
af5040da | 761 | blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE); |
5f3ea37c ACM |
762 | } |
763 | ||
764 | /** | |
765 | * blk_add_trace_bio - Add a trace for a bio oriented action | |
766 | * @q: queue the io is for | |
767 | * @bio: the source bio | |
768 | * @what: the action | |
797a455d | 769 | * @error: error, if any |
5f3ea37c ACM |
770 | * |
771 | * Description: | |
772 | * Records an action against a bio. Will log the bio offset + size. | |
773 | * | |
774 | **/ | |
775 | static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | |
797a455d | 776 | u32 what, int error) |
5f3ea37c ACM |
777 | { |
778 | struct blk_trace *bt = q->blk_trace; | |
779 | ||
780 | if (likely(!bt)) | |
781 | return; | |
782 | ||
797a455d JA |
783 | if (!error && !bio_flagged(bio, BIO_UPTODATE)) |
784 | error = EIO; | |
785 | ||
4f024f37 KO |
786 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, |
787 | bio->bi_rw, what, error, 0, NULL); | |
5f3ea37c ACM |
788 | } |
789 | ||
38516ab5 SR |
790 | static void blk_add_trace_bio_bounce(void *ignore, |
791 | struct request_queue *q, struct bio *bio) | |
5f3ea37c | 792 | { |
797a455d | 793 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); |
5f3ea37c ACM |
794 | } |
795 | ||
0a82a8d1 LT |
796 | static void blk_add_trace_bio_complete(void *ignore, |
797 | struct request_queue *q, struct bio *bio, | |
798 | int error) | |
5f3ea37c | 799 | { |
797a455d | 800 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); |
5f3ea37c ACM |
801 | } |
802 | ||
38516ab5 SR |
803 | static void blk_add_trace_bio_backmerge(void *ignore, |
804 | struct request_queue *q, | |
8c1cf6bb | 805 | struct request *rq, |
939b3669 | 806 | struct bio *bio) |
5f3ea37c | 807 | { |
797a455d | 808 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); |
5f3ea37c ACM |
809 | } |
810 | ||
38516ab5 SR |
811 | static void blk_add_trace_bio_frontmerge(void *ignore, |
812 | struct request_queue *q, | |
8c1cf6bb | 813 | struct request *rq, |
939b3669 | 814 | struct bio *bio) |
5f3ea37c | 815 | { |
797a455d | 816 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); |
5f3ea37c ACM |
817 | } |
818 | ||
38516ab5 SR |
819 | static void blk_add_trace_bio_queue(void *ignore, |
820 | struct request_queue *q, struct bio *bio) | |
5f3ea37c | 821 | { |
797a455d | 822 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); |
5f3ea37c ACM |
823 | } |
824 | ||
38516ab5 SR |
825 | static void blk_add_trace_getrq(void *ignore, |
826 | struct request_queue *q, | |
939b3669 | 827 | struct bio *bio, int rw) |
5f3ea37c ACM |
828 | { |
829 | if (bio) | |
797a455d | 830 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); |
5f3ea37c ACM |
831 | else { |
832 | struct blk_trace *bt = q->blk_trace; | |
833 | ||
834 | if (bt) | |
835 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); | |
836 | } | |
837 | } | |
838 | ||
839 | ||
38516ab5 SR |
840 | static void blk_add_trace_sleeprq(void *ignore, |
841 | struct request_queue *q, | |
939b3669 | 842 | struct bio *bio, int rw) |
5f3ea37c ACM |
843 | { |
844 | if (bio) | |
797a455d | 845 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); |
5f3ea37c ACM |
846 | else { |
847 | struct blk_trace *bt = q->blk_trace; | |
848 | ||
849 | if (bt) | |
939b3669 ACM |
850 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, |
851 | 0, 0, NULL); | |
5f3ea37c ACM |
852 | } |
853 | } | |
854 | ||
38516ab5 | 855 | static void blk_add_trace_plug(void *ignore, struct request_queue *q) |
5f3ea37c ACM |
856 | { |
857 | struct blk_trace *bt = q->blk_trace; | |
858 | ||
859 | if (bt) | |
860 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | |
861 | } | |
862 | ||
49cac01e JA |
863 | static void blk_add_trace_unplug(void *ignore, struct request_queue *q, |
864 | unsigned int depth, bool explicit) | |
5f3ea37c ACM |
865 | { |
866 | struct blk_trace *bt = q->blk_trace; | |
867 | ||
868 | if (bt) { | |
94b5eb28 | 869 | __be64 rpdu = cpu_to_be64(depth); |
49cac01e | 870 | u32 what; |
5f3ea37c | 871 | |
49cac01e JA |
872 | if (explicit) |
873 | what = BLK_TA_UNPLUG_IO; | |
874 | else | |
875 | what = BLK_TA_UNPLUG_TIMER; | |
876 | ||
877 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); | |
5f3ea37c ACM |
878 | } |
879 | } | |
880 | ||
38516ab5 SR |
881 | static void blk_add_trace_split(void *ignore, |
882 | struct request_queue *q, struct bio *bio, | |
5f3ea37c ACM |
883 | unsigned int pdu) |
884 | { | |
885 | struct blk_trace *bt = q->blk_trace; | |
886 | ||
887 | if (bt) { | |
888 | __be64 rpdu = cpu_to_be64(pdu); | |
889 | ||
4f024f37 KO |
890 | __blk_add_trace(bt, bio->bi_iter.bi_sector, |
891 | bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, | |
892 | !bio_flagged(bio, BIO_UPTODATE), | |
5f3ea37c ACM |
893 | sizeof(rpdu), &rpdu); |
894 | } | |
895 | } | |
896 | ||
897 | /** | |
d07335e5 | 898 | * blk_add_trace_bio_remap - Add a trace for a bio-remap operation |
546cf44a | 899 | * @ignore: trace callback data parameter (not used) |
5f3ea37c ACM |
900 | * @q: queue the io is for |
901 | * @bio: the source bio | |
902 | * @dev: target device | |
a42aaa3b | 903 | * @from: source sector |
5f3ea37c ACM |
904 | * |
905 | * Description: | |
906 | * Device mapper or raid target sometimes need to split a bio because | |
907 | * it spans a stripe (or similar). Add a trace for that action. | |
908 | * | |
909 | **/ | |
d07335e5 MS |
910 | static void blk_add_trace_bio_remap(void *ignore, |
911 | struct request_queue *q, struct bio *bio, | |
912 | dev_t dev, sector_t from) | |
5f3ea37c ACM |
913 | { |
914 | struct blk_trace *bt = q->blk_trace; | |
915 | struct blk_io_trace_remap r; | |
916 | ||
917 | if (likely(!bt)) | |
918 | return; | |
919 | ||
a42aaa3b AB |
920 | r.device_from = cpu_to_be32(dev); |
921 | r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); | |
922 | r.sector_from = cpu_to_be64(from); | |
5f3ea37c | 923 | |
4f024f37 KO |
924 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, |
925 | bio->bi_rw, BLK_TA_REMAP, | |
926 | !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | |
5f3ea37c ACM |
927 | } |
928 | ||
b0da3f0d JN |
929 | /** |
930 | * blk_add_trace_rq_remap - Add a trace for a request-remap operation | |
546cf44a | 931 | * @ignore: trace callback data parameter (not used) |
b0da3f0d JN |
932 | * @q: queue the io is for |
933 | * @rq: the source request | |
934 | * @dev: target device | |
935 | * @from: source sector | |
936 | * | |
937 | * Description: | |
938 | * Device mapper remaps request to other devices. | |
939 | * Add a trace for that action. | |
940 | * | |
941 | **/ | |
38516ab5 SR |
942 | static void blk_add_trace_rq_remap(void *ignore, |
943 | struct request_queue *q, | |
b0da3f0d JN |
944 | struct request *rq, dev_t dev, |
945 | sector_t from) | |
946 | { | |
947 | struct blk_trace *bt = q->blk_trace; | |
948 | struct blk_io_trace_remap r; | |
949 | ||
950 | if (likely(!bt)) | |
951 | return; | |
952 | ||
953 | r.device_from = cpu_to_be32(dev); | |
954 | r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); | |
955 | r.sector_from = cpu_to_be64(from); | |
956 | ||
957 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | |
958 | rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, | |
959 | sizeof(r), &r); | |
960 | } | |
961 | ||
5f3ea37c ACM |
962 | /** |
963 | * blk_add_driver_data - Add binary message with driver-specific data | |
964 | * @q: queue the io is for | |
965 | * @rq: io request | |
966 | * @data: driver-specific data | |
967 | * @len: length of driver-specific data | |
968 | * | |
969 | * Description: | |
970 | * Some drivers might want to write driver-specific data per request. | |
971 | * | |
972 | **/ | |
973 | void blk_add_driver_data(struct request_queue *q, | |
974 | struct request *rq, | |
975 | void *data, size_t len) | |
976 | { | |
977 | struct blk_trace *bt = q->blk_trace; | |
978 | ||
979 | if (likely(!bt)) | |
980 | return; | |
981 | ||
33659ebb | 982 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) |
2e46e8b2 TH |
983 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, |
984 | BLK_TA_DRV_DATA, rq->errors, len, data); | |
5f3ea37c | 985 | else |
2e46e8b2 TH |
986 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, |
987 | BLK_TA_DRV_DATA, rq->errors, len, data); | |
5f3ea37c ACM |
988 | } |
989 | EXPORT_SYMBOL_GPL(blk_add_driver_data); | |
990 | ||
3c289ba7 | 991 | static void blk_register_tracepoints(void) |
5f3ea37c ACM |
992 | { |
993 | int ret; | |
994 | ||
38516ab5 | 995 | ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL); |
5f3ea37c | 996 | WARN_ON(ret); |
38516ab5 | 997 | ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); |
5f3ea37c | 998 | WARN_ON(ret); |
38516ab5 | 999 | ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); |
5f3ea37c | 1000 | WARN_ON(ret); |
38516ab5 | 1001 | ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); |
5f3ea37c | 1002 | WARN_ON(ret); |
38516ab5 | 1003 | ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); |
5f3ea37c | 1004 | WARN_ON(ret); |
38516ab5 | 1005 | ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); |
5f3ea37c | 1006 | WARN_ON(ret); |
38516ab5 | 1007 | ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); |
5f3ea37c | 1008 | WARN_ON(ret); |
38516ab5 | 1009 | ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); |
5f3ea37c | 1010 | WARN_ON(ret); |
38516ab5 | 1011 | ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); |
5f3ea37c | 1012 | WARN_ON(ret); |
38516ab5 | 1013 | ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); |
5f3ea37c | 1014 | WARN_ON(ret); |
38516ab5 | 1015 | ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); |
5f3ea37c | 1016 | WARN_ON(ret); |
38516ab5 | 1017 | ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); |
5f3ea37c | 1018 | WARN_ON(ret); |
38516ab5 | 1019 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); |
5f3ea37c | 1020 | WARN_ON(ret); |
49cac01e | 1021 | ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); |
5f3ea37c | 1022 | WARN_ON(ret); |
38516ab5 | 1023 | ret = register_trace_block_split(blk_add_trace_split, NULL); |
5f3ea37c | 1024 | WARN_ON(ret); |
d07335e5 | 1025 | ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
5f3ea37c | 1026 | WARN_ON(ret); |
38516ab5 | 1027 | ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); |
b0da3f0d | 1028 | WARN_ON(ret); |
5f3ea37c ACM |
1029 | } |
1030 | ||
1031 | static void blk_unregister_tracepoints(void) | |
1032 | { | |
38516ab5 | 1033 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); |
d07335e5 | 1034 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
38516ab5 | 1035 | unregister_trace_block_split(blk_add_trace_split, NULL); |
49cac01e | 1036 | unregister_trace_block_unplug(blk_add_trace_unplug, NULL); |
38516ab5 SR |
1037 | unregister_trace_block_plug(blk_add_trace_plug, NULL); |
1038 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); | |
1039 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); | |
1040 | unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); | |
1041 | unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); | |
1042 | unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); | |
1043 | unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); | |
1044 | unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); | |
1045 | unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); | |
1046 | unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); | |
1047 | unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); | |
1048 | unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); | |
1049 | unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL); | |
5f3ea37c ACM |
1050 | |
1051 | tracepoint_synchronize_unregister(); | |
1052 | } | |
c71a8961 ACM |
1053 | |
1054 | /* | |
1055 | * struct blk_io_tracer formatting routines | |
1056 | */ | |
1057 | ||
1058 | static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | |
1059 | { | |
157f9c00 | 1060 | int i = 0; |
65796348 | 1061 | int tc = t->action >> BLK_TC_SHIFT; |
157f9c00 | 1062 | |
18cea459 LZ |
1063 | if (t->action == BLK_TN_MESSAGE) { |
1064 | rwbs[i++] = 'N'; | |
1065 | goto out; | |
1066 | } | |
1067 | ||
c09c47ca NK |
1068 | if (tc & BLK_TC_FLUSH) |
1069 | rwbs[i++] = 'F'; | |
1070 | ||
65796348 | 1071 | if (tc & BLK_TC_DISCARD) |
157f9c00 | 1072 | rwbs[i++] = 'D'; |
65796348 | 1073 | else if (tc & BLK_TC_WRITE) |
157f9c00 ACM |
1074 | rwbs[i++] = 'W'; |
1075 | else if (t->bytes) | |
1076 | rwbs[i++] = 'R'; | |
1077 | else | |
1078 | rwbs[i++] = 'N'; | |
1079 | ||
c09c47ca NK |
1080 | if (tc & BLK_TC_FUA) |
1081 | rwbs[i++] = 'F'; | |
65796348 | 1082 | if (tc & BLK_TC_AHEAD) |
157f9c00 | 1083 | rwbs[i++] = 'A'; |
65796348 | 1084 | if (tc & BLK_TC_SYNC) |
157f9c00 | 1085 | rwbs[i++] = 'S'; |
65796348 | 1086 | if (tc & BLK_TC_META) |
157f9c00 | 1087 | rwbs[i++] = 'M'; |
18cea459 | 1088 | out: |
157f9c00 | 1089 | rwbs[i] = '\0'; |
c71a8961 ACM |
1090 | } |
1091 | ||
1092 | static inline | |
1093 | const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) | |
1094 | { | |
1095 | return (const struct blk_io_trace *)ent; | |
1096 | } | |
1097 | ||
1098 | static inline const void *pdu_start(const struct trace_entry *ent) | |
1099 | { | |
1100 | return te_blk_io_trace(ent) + 1; | |
1101 | } | |
1102 | ||
66de7792 LZ |
1103 | static inline u32 t_action(const struct trace_entry *ent) |
1104 | { | |
1105 | return te_blk_io_trace(ent)->action; | |
1106 | } | |
1107 | ||
1108 | static inline u32 t_bytes(const struct trace_entry *ent) | |
1109 | { | |
1110 | return te_blk_io_trace(ent)->bytes; | |
1111 | } | |
1112 | ||
c71a8961 ACM |
1113 | static inline u32 t_sec(const struct trace_entry *ent) |
1114 | { | |
1115 | return te_blk_io_trace(ent)->bytes >> 9; | |
1116 | } | |
1117 | ||
1118 | static inline unsigned long long t_sector(const struct trace_entry *ent) | |
1119 | { | |
1120 | return te_blk_io_trace(ent)->sector; | |
1121 | } | |
1122 | ||
1123 | static inline __u16 t_error(const struct trace_entry *ent) | |
1124 | { | |
e0dc81be | 1125 | return te_blk_io_trace(ent)->error; |
c71a8961 ACM |
1126 | } |
1127 | ||
1128 | static __u64 get_pdu_int(const struct trace_entry *ent) | |
1129 | { | |
1130 | const __u64 *val = pdu_start(ent); | |
1131 | return be64_to_cpu(*val); | |
1132 | } | |
1133 | ||
1134 | static void get_pdu_remap(const struct trace_entry *ent, | |
1135 | struct blk_io_trace_remap *r) | |
1136 | { | |
1137 | const struct blk_io_trace_remap *__r = pdu_start(ent); | |
a42aaa3b | 1138 | __u64 sector_from = __r->sector_from; |
c71a8961 | 1139 | |
c71a8961 | 1140 | r->device_from = be32_to_cpu(__r->device_from); |
a42aaa3b AB |
1141 | r->device_to = be32_to_cpu(__r->device_to); |
1142 | r->sector_from = be64_to_cpu(sector_from); | |
c71a8961 ACM |
1143 | } |
1144 | ||
b6a4b0c3 LZ |
1145 | typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); |
1146 | ||
1147 | static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | |
c71a8961 | 1148 | { |
c09c47ca | 1149 | char rwbs[RWBS_LEN]; |
35ac51bf LZ |
1150 | unsigned long long ts = iter->ts; |
1151 | unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); | |
c71a8961 | 1152 | unsigned secs = (unsigned long)ts; |
b6a4b0c3 | 1153 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); |
c71a8961 ACM |
1154 | |
1155 | fill_rwbs(rwbs, t); | |
1156 | ||
1157 | return trace_seq_printf(&iter->seq, | |
35ac51bf | 1158 | "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", |
c71a8961 | 1159 | MAJOR(t->device), MINOR(t->device), iter->cpu, |
b6a4b0c3 | 1160 | secs, nsec_rem, iter->ent->pid, act, rwbs); |
c71a8961 ACM |
1161 | } |
1162 | ||
b6a4b0c3 | 1163 | static int blk_log_action(struct trace_iterator *iter, const char *act) |
c71a8961 | 1164 | { |
c09c47ca | 1165 | char rwbs[RWBS_LEN]; |
b6a4b0c3 LZ |
1166 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); |
1167 | ||
c71a8961 | 1168 | fill_rwbs(rwbs, t); |
b6a4b0c3 | 1169 | return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", |
c71a8961 ACM |
1170 | MAJOR(t->device), MINOR(t->device), act, rwbs); |
1171 | } | |
1172 | ||
66de7792 LZ |
1173 | static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) |
1174 | { | |
04986257 | 1175 | const unsigned char *pdu_buf; |
66de7792 LZ |
1176 | int pdu_len; |
1177 | int i, end, ret; | |
1178 | ||
1179 | pdu_buf = pdu_start(ent); | |
1180 | pdu_len = te_blk_io_trace(ent)->pdu_len; | |
1181 | ||
1182 | if (!pdu_len) | |
1183 | return 1; | |
1184 | ||
1185 | /* find the last zero that needs to be printed */ | |
1186 | for (end = pdu_len - 1; end >= 0; end--) | |
1187 | if (pdu_buf[end]) | |
1188 | break; | |
1189 | end++; | |
1190 | ||
1191 | if (!trace_seq_putc(s, '(')) | |
1192 | return 0; | |
1193 | ||
1194 | for (i = 0; i < pdu_len; i++) { | |
1195 | ||
1196 | ret = trace_seq_printf(s, "%s%02x", | |
1197 | i == 0 ? "" : " ", pdu_buf[i]); | |
1198 | if (!ret) | |
1199 | return ret; | |
1200 | ||
1201 | /* | |
1202 | * stop when the rest is just zeroes and indicate so | |
1203 | * with a ".." appended | |
1204 | */ | |
1205 | if (i == end && end != pdu_len - 1) | |
1206 | return trace_seq_puts(s, " ..) "); | |
1207 | } | |
1208 | ||
1209 | return trace_seq_puts(s, ") "); | |
1210 | } | |
1211 | ||
c71a8961 ACM |
1212 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) |
1213 | { | |
4ca53085 SR |
1214 | char cmd[TASK_COMM_LEN]; |
1215 | ||
1216 | trace_find_cmdline(ent->pid, cmd); | |
c71a8961 | 1217 | |
66de7792 LZ |
1218 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
1219 | int ret; | |
1220 | ||
1221 | ret = trace_seq_printf(s, "%u ", t_bytes(ent)); | |
1222 | if (!ret) | |
1223 | return 0; | |
1224 | ret = blk_log_dump_pdu(s, ent); | |
1225 | if (!ret) | |
1226 | return 0; | |
1227 | return trace_seq_printf(s, "[%s]\n", cmd); | |
1228 | } else { | |
1229 | if (t_sec(ent)) | |
1230 | return trace_seq_printf(s, "%llu + %u [%s]\n", | |
1231 | t_sector(ent), t_sec(ent), cmd); | |
1232 | return trace_seq_printf(s, "[%s]\n", cmd); | |
1233 | } | |
c71a8961 ACM |
1234 | } |
1235 | ||
157f9c00 ACM |
1236 | static int blk_log_with_error(struct trace_seq *s, |
1237 | const struct trace_entry *ent) | |
c71a8961 | 1238 | { |
66de7792 LZ |
1239 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
1240 | int ret; | |
1241 | ||
1242 | ret = blk_log_dump_pdu(s, ent); | |
1243 | if (ret) | |
1244 | return trace_seq_printf(s, "[%d]\n", t_error(ent)); | |
1245 | return 0; | |
1246 | } else { | |
1247 | if (t_sec(ent)) | |
1248 | return trace_seq_printf(s, "%llu + %u [%d]\n", | |
1249 | t_sector(ent), | |
1250 | t_sec(ent), t_error(ent)); | |
1251 | return trace_seq_printf(s, "%llu [%d]\n", | |
1252 | t_sector(ent), t_error(ent)); | |
1253 | } | |
c71a8961 ACM |
1254 | } |
1255 | ||
1256 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) | |
1257 | { | |
a42aaa3b | 1258 | struct blk_io_trace_remap r = { .device_from = 0, }; |
c71a8961 ACM |
1259 | |
1260 | get_pdu_remap(ent, &r); | |
1261 | return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", | |
a42aaa3b AB |
1262 | t_sector(ent), t_sec(ent), |
1263 | MAJOR(r.device_from), MINOR(r.device_from), | |
1264 | (unsigned long long)r.sector_from); | |
c71a8961 ACM |
1265 | } |
1266 | ||
1267 | static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) | |
1268 | { | |
4ca53085 SR |
1269 | char cmd[TASK_COMM_LEN]; |
1270 | ||
1271 | trace_find_cmdline(ent->pid, cmd); | |
1272 | ||
1273 | return trace_seq_printf(s, "[%s]\n", cmd); | |
c71a8961 ACM |
1274 | } |
1275 | ||
1276 | static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) | |
1277 | { | |
4ca53085 SR |
1278 | char cmd[TASK_COMM_LEN]; |
1279 | ||
1280 | trace_find_cmdline(ent->pid, cmd); | |
1281 | ||
1282 | return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); | |
c71a8961 ACM |
1283 | } |
1284 | ||
1285 | static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) | |
1286 | { | |
4ca53085 SR |
1287 | char cmd[TASK_COMM_LEN]; |
1288 | ||
1289 | trace_find_cmdline(ent->pid, cmd); | |
1290 | ||
c71a8961 | 1291 | return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), |
4ca53085 | 1292 | get_pdu_int(ent), cmd); |
c71a8961 ACM |
1293 | } |
1294 | ||
18cea459 LZ |
1295 | static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) |
1296 | { | |
1297 | int ret; | |
1298 | const struct blk_io_trace *t = te_blk_io_trace(ent); | |
1299 | ||
1300 | ret = trace_seq_putmem(s, t + 1, t->pdu_len); | |
1301 | if (ret) | |
1302 | return trace_seq_putc(s, '\n'); | |
1303 | return ret; | |
1304 | } | |
1305 | ||
c71a8961 ACM |
1306 | /* |
1307 | * struct tracer operations | |
1308 | */ | |
1309 | ||
1310 | static void blk_tracer_print_header(struct seq_file *m) | |
1311 | { | |
1312 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) | |
1313 | return; | |
1314 | seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" | |
1315 | "# | | | | | |\n"); | |
1316 | } | |
1317 | ||
1318 | static void blk_tracer_start(struct trace_array *tr) | |
1319 | { | |
ad5dd549 | 1320 | blk_tracer_enabled = true; |
c71a8961 ACM |
1321 | } |
1322 | ||
1323 | static int blk_tracer_init(struct trace_array *tr) | |
1324 | { | |
1325 | blk_tr = tr; | |
1326 | blk_tracer_start(tr); | |
c71a8961 ACM |
1327 | return 0; |
1328 | } | |
1329 | ||
1330 | static void blk_tracer_stop(struct trace_array *tr) | |
1331 | { | |
ad5dd549 | 1332 | blk_tracer_enabled = false; |
c71a8961 ACM |
1333 | } |
1334 | ||
1335 | static void blk_tracer_reset(struct trace_array *tr) | |
1336 | { | |
c71a8961 ACM |
1337 | blk_tracer_stop(tr); |
1338 | } | |
1339 | ||
e4955c99 | 1340 | static const struct { |
c71a8961 | 1341 | const char *act[2]; |
ef18012b | 1342 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); |
e4955c99 | 1343 | } what2act[] = { |
ef18012b | 1344 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, |
c71a8961 ACM |
1345 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, |
1346 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, | |
1347 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, | |
1348 | [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, | |
1349 | [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, | |
1350 | [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, | |
1351 | [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, | |
1352 | [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, | |
1353 | [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, | |
49cac01e | 1354 | [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, |
c71a8961 ACM |
1355 | [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, |
1356 | [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, | |
1357 | [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, | |
1358 | [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, | |
1359 | }; | |
1360 | ||
b6a4b0c3 LZ |
1361 | static enum print_line_t print_one_line(struct trace_iterator *iter, |
1362 | bool classic) | |
c71a8961 | 1363 | { |
2c9b238e | 1364 | struct trace_seq *s = &iter->seq; |
b6a4b0c3 LZ |
1365 | const struct blk_io_trace *t; |
1366 | u16 what; | |
c71a8961 | 1367 | int ret; |
b6a4b0c3 LZ |
1368 | bool long_act; |
1369 | blk_log_action_t *log_action; | |
c71a8961 | 1370 | |
b6a4b0c3 LZ |
1371 | t = te_blk_io_trace(iter->ent); |
1372 | what = t->action & ((1 << BLK_TC_SHIFT) - 1); | |
1373 | long_act = !!(trace_flags & TRACE_ITER_VERBOSE); | |
1374 | log_action = classic ? &blk_log_action_classic : &blk_log_action; | |
08a06b83 | 1375 | |
18cea459 LZ |
1376 | if (t->action == BLK_TN_MESSAGE) { |
1377 | ret = log_action(iter, long_act ? "message" : "m"); | |
1378 | if (ret) | |
1379 | ret = blk_log_msg(s, iter->ent); | |
1380 | goto out; | |
1381 | } | |
1382 | ||
eb08f8eb | 1383 | if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) |
b78825d6 | 1384 | ret = trace_seq_printf(s, "Unknown action %x\n", what); |
c71a8961 | 1385 | else { |
b6a4b0c3 | 1386 | ret = log_action(iter, what2act[what].act[long_act]); |
c71a8961 | 1387 | if (ret) |
2c9b238e | 1388 | ret = what2act[what].print(s, iter->ent); |
c71a8961 | 1389 | } |
18cea459 | 1390 | out: |
c71a8961 ACM |
1391 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
1392 | } | |
1393 | ||
b6a4b0c3 | 1394 | static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, |
a9a57763 | 1395 | int flags, struct trace_event *event) |
b6a4b0c3 | 1396 | { |
b6a4b0c3 LZ |
1397 | return print_one_line(iter, false); |
1398 | } | |
1399 | ||
08a06b83 ACM |
1400 | static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) |
1401 | { | |
1402 | struct trace_seq *s = &iter->seq; | |
1403 | struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; | |
1404 | const int offset = offsetof(struct blk_io_trace, sector); | |
1405 | struct blk_io_trace old = { | |
1406 | .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, | |
6c051ce0 | 1407 | .time = iter->ts, |
08a06b83 ACM |
1408 | }; |
1409 | ||
1410 | if (!trace_seq_putmem(s, &old, offset)) | |
1411 | return 0; | |
1412 | return trace_seq_putmem(s, &t->sector, | |
1413 | sizeof(old) - offset + t->pdu_len); | |
1414 | } | |
1415 | ||
ae7462b4 | 1416 | static enum print_line_t |
a9a57763 SR |
1417 | blk_trace_event_print_binary(struct trace_iterator *iter, int flags, |
1418 | struct trace_event *event) | |
08a06b83 ACM |
1419 | { |
1420 | return blk_trace_synthesize_old_trace(iter) ? | |
1421 | TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | |
1422 | } | |
1423 | ||
c71a8961 ACM |
1424 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) |
1425 | { | |
c71a8961 ACM |
1426 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) |
1427 | return TRACE_TYPE_UNHANDLED; | |
1428 | ||
b6a4b0c3 | 1429 | return print_one_line(iter, true); |
c71a8961 ACM |
1430 | } |
1431 | ||
8c1a49ae SRRH |
1432 | static int |
1433 | blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
f3948f88 LZ |
1434 | { |
1435 | /* don't output context-info for blk_classic output */ | |
1436 | if (bit == TRACE_BLK_OPT_CLASSIC) { | |
1437 | if (set) | |
1438 | trace_flags &= ~TRACE_ITER_CONTEXT_INFO; | |
1439 | else | |
1440 | trace_flags |= TRACE_ITER_CONTEXT_INFO; | |
1441 | } | |
1442 | return 0; | |
1443 | } | |
1444 | ||
c71a8961 ACM |
1445 | static struct tracer blk_tracer __read_mostly = { |
1446 | .name = "blk", | |
1447 | .init = blk_tracer_init, | |
1448 | .reset = blk_tracer_reset, | |
1449 | .start = blk_tracer_start, | |
1450 | .stop = blk_tracer_stop, | |
1451 | .print_header = blk_tracer_print_header, | |
1452 | .print_line = blk_tracer_print_line, | |
1453 | .flags = &blk_tracer_flags, | |
f3948f88 | 1454 | .set_flag = blk_tracer_set_flag, |
c71a8961 ACM |
1455 | }; |
1456 | ||
a9a57763 | 1457 | static struct trace_event_functions trace_blk_event_funcs = { |
c71a8961 | 1458 | .trace = blk_trace_event_print, |
08a06b83 | 1459 | .binary = blk_trace_event_print_binary, |
c71a8961 ACM |
1460 | }; |
1461 | ||
a9a57763 SR |
1462 | static struct trace_event trace_blk_event = { |
1463 | .type = TRACE_BLK, | |
1464 | .funcs = &trace_blk_event_funcs, | |
1465 | }; | |
1466 | ||
c71a8961 ACM |
1467 | static int __init init_blk_tracer(void) |
1468 | { | |
1469 | if (!register_ftrace_event(&trace_blk_event)) { | |
1470 | pr_warning("Warning: could not register block events\n"); | |
1471 | return 1; | |
1472 | } | |
1473 | ||
1474 | if (register_tracer(&blk_tracer) != 0) { | |
1475 | pr_warning("Warning: could not register the block tracer\n"); | |
1476 | unregister_ftrace_event(&trace_blk_event); | |
1477 | return 1; | |
1478 | } | |
1479 | ||
1480 | return 0; | |
1481 | } | |
1482 | ||
1483 | device_initcall(init_blk_tracer); | |
1484 | ||
1485 | static int blk_trace_remove_queue(struct request_queue *q) | |
1486 | { | |
1487 | struct blk_trace *bt; | |
1488 | ||
1489 | bt = xchg(&q->blk_trace, NULL); | |
1490 | if (bt == NULL) | |
1491 | return -EINVAL; | |
1492 | ||
17ba97e3 LZ |
1493 | if (atomic_dec_and_test(&blk_probes_ref)) |
1494 | blk_unregister_tracepoints(); | |
1495 | ||
a404d557 JK |
1496 | spin_lock_irq(&running_trace_lock); |
1497 | list_del(&bt->running_list); | |
1498 | spin_unlock_irq(&running_trace_lock); | |
ad5dd549 | 1499 | blk_trace_free(bt); |
c71a8961 ACM |
1500 | return 0; |
1501 | } | |
1502 | ||
1503 | /* | |
1504 | * Setup everything required to start tracing | |
1505 | */ | |
9908c309 LZ |
1506 | static int blk_trace_setup_queue(struct request_queue *q, |
1507 | struct block_device *bdev) | |
c71a8961 ACM |
1508 | { |
1509 | struct blk_trace *old_bt, *bt = NULL; | |
18cea459 | 1510 | int ret = -ENOMEM; |
c71a8961 | 1511 | |
c71a8961 ACM |
1512 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); |
1513 | if (!bt) | |
15152e44 | 1514 | return -ENOMEM; |
c71a8961 | 1515 | |
18cea459 LZ |
1516 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); |
1517 | if (!bt->msg_data) | |
1518 | goto free_bt; | |
1519 | ||
9908c309 | 1520 | bt->dev = bdev->bd_dev; |
c71a8961 | 1521 | bt->act_mask = (u16)-1; |
9908c309 LZ |
1522 | |
1523 | blk_trace_setup_lba(bt, bdev); | |
c71a8961 ACM |
1524 | |
1525 | old_bt = xchg(&q->blk_trace, bt); | |
1526 | if (old_bt != NULL) { | |
1527 | (void)xchg(&q->blk_trace, old_bt); | |
18cea459 LZ |
1528 | ret = -EBUSY; |
1529 | goto free_bt; | |
c71a8961 | 1530 | } |
15152e44 | 1531 | |
17ba97e3 LZ |
1532 | if (atomic_inc_return(&blk_probes_ref) == 1) |
1533 | blk_register_tracepoints(); | |
c71a8961 | 1534 | return 0; |
18cea459 LZ |
1535 | |
1536 | free_bt: | |
1537 | blk_trace_free(bt); | |
1538 | return ret; | |
c71a8961 ACM |
1539 | } |
1540 | ||
1541 | /* | |
1542 | * sysfs interface to enable and configure tracing | |
1543 | */ | |
1544 | ||
c71a8961 ACM |
1545 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, |
1546 | struct device_attribute *attr, | |
1547 | char *buf); | |
1548 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |
1549 | struct device_attribute *attr, | |
1550 | const char *buf, size_t count); | |
1551 | #define BLK_TRACE_DEVICE_ATTR(_name) \ | |
1552 | DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ | |
1553 | sysfs_blk_trace_attr_show, \ | |
1554 | sysfs_blk_trace_attr_store) | |
1555 | ||
cd649b8b | 1556 | static BLK_TRACE_DEVICE_ATTR(enable); |
c71a8961 ACM |
1557 | static BLK_TRACE_DEVICE_ATTR(act_mask); |
1558 | static BLK_TRACE_DEVICE_ATTR(pid); | |
1559 | static BLK_TRACE_DEVICE_ATTR(start_lba); | |
1560 | static BLK_TRACE_DEVICE_ATTR(end_lba); | |
1561 | ||
1562 | static struct attribute *blk_trace_attrs[] = { | |
1563 | &dev_attr_enable.attr, | |
1564 | &dev_attr_act_mask.attr, | |
1565 | &dev_attr_pid.attr, | |
1566 | &dev_attr_start_lba.attr, | |
1567 | &dev_attr_end_lba.attr, | |
1568 | NULL | |
1569 | }; | |
1570 | ||
1571 | struct attribute_group blk_trace_attr_group = { | |
1572 | .name = "trace", | |
1573 | .attrs = blk_trace_attrs, | |
1574 | }; | |
1575 | ||
09341997 LZ |
1576 | static const struct { |
1577 | int mask; | |
1578 | const char *str; | |
1579 | } mask_maps[] = { | |
1580 | { BLK_TC_READ, "read" }, | |
1581 | { BLK_TC_WRITE, "write" }, | |
c09c47ca | 1582 | { BLK_TC_FLUSH, "flush" }, |
09341997 LZ |
1583 | { BLK_TC_SYNC, "sync" }, |
1584 | { BLK_TC_QUEUE, "queue" }, | |
1585 | { BLK_TC_REQUEUE, "requeue" }, | |
1586 | { BLK_TC_ISSUE, "issue" }, | |
1587 | { BLK_TC_COMPLETE, "complete" }, | |
1588 | { BLK_TC_FS, "fs" }, | |
1589 | { BLK_TC_PC, "pc" }, | |
1590 | { BLK_TC_AHEAD, "ahead" }, | |
1591 | { BLK_TC_META, "meta" }, | |
1592 | { BLK_TC_DISCARD, "discard" }, | |
1593 | { BLK_TC_DRV_DATA, "drv_data" }, | |
c09c47ca | 1594 | { BLK_TC_FUA, "fua" }, |
09341997 LZ |
1595 | }; |
1596 | ||
1597 | static int blk_trace_str2mask(const char *str) | |
c71a8961 | 1598 | { |
09341997 | 1599 | int i; |
c71a8961 | 1600 | int mask = 0; |
9eb85125 | 1601 | char *buf, *s, *token; |
c71a8961 | 1602 | |
9eb85125 LZ |
1603 | buf = kstrdup(str, GFP_KERNEL); |
1604 | if (buf == NULL) | |
c71a8961 | 1605 | return -ENOMEM; |
9eb85125 | 1606 | s = strstrip(buf); |
c71a8961 ACM |
1607 | |
1608 | while (1) { | |
09341997 LZ |
1609 | token = strsep(&s, ","); |
1610 | if (token == NULL) | |
c71a8961 ACM |
1611 | break; |
1612 | ||
09341997 LZ |
1613 | if (*token == '\0') |
1614 | continue; | |
1615 | ||
1616 | for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { | |
1617 | if (strcasecmp(token, mask_maps[i].str) == 0) { | |
1618 | mask |= mask_maps[i].mask; | |
1619 | break; | |
1620 | } | |
1621 | } | |
1622 | if (i == ARRAY_SIZE(mask_maps)) { | |
1623 | mask = -EINVAL; | |
1624 | break; | |
1625 | } | |
c71a8961 | 1626 | } |
9eb85125 | 1627 | kfree(buf); |
c71a8961 ACM |
1628 | |
1629 | return mask; | |
1630 | } | |
1631 | ||
09341997 LZ |
1632 | static ssize_t blk_trace_mask2str(char *buf, int mask) |
1633 | { | |
1634 | int i; | |
1635 | char *p = buf; | |
1636 | ||
1637 | for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { | |
1638 | if (mask & mask_maps[i].mask) { | |
1639 | p += sprintf(p, "%s%s", | |
1640 | (p == buf) ? "" : ",", mask_maps[i].str); | |
1641 | } | |
1642 | } | |
1643 | *p++ = '\n'; | |
1644 | ||
1645 | return p - buf; | |
1646 | } | |
1647 | ||
b125130b LZ |
1648 | static struct request_queue *blk_trace_get_queue(struct block_device *bdev) |
1649 | { | |
1650 | if (bdev->bd_disk == NULL) | |
1651 | return NULL; | |
1652 | ||
1653 | return bdev_get_queue(bdev); | |
1654 | } | |
1655 | ||
c71a8961 ACM |
1656 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, |
1657 | struct device_attribute *attr, | |
1658 | char *buf) | |
1659 | { | |
1660 | struct hd_struct *p = dev_to_part(dev); | |
1661 | struct request_queue *q; | |
1662 | struct block_device *bdev; | |
1663 | ssize_t ret = -ENXIO; | |
1664 | ||
c71a8961 ACM |
1665 | bdev = bdget(part_devt(p)); |
1666 | if (bdev == NULL) | |
01b284f9 | 1667 | goto out; |
c71a8961 | 1668 | |
b125130b | 1669 | q = blk_trace_get_queue(bdev); |
c71a8961 ACM |
1670 | if (q == NULL) |
1671 | goto out_bdput; | |
b125130b | 1672 | |
c71a8961 | 1673 | mutex_lock(&bdev->bd_mutex); |
cd649b8b LZ |
1674 | |
1675 | if (attr == &dev_attr_enable) { | |
1676 | ret = sprintf(buf, "%u\n", !!q->blk_trace); | |
1677 | goto out_unlock_bdev; | |
1678 | } | |
1679 | ||
c71a8961 ACM |
1680 | if (q->blk_trace == NULL) |
1681 | ret = sprintf(buf, "disabled\n"); | |
1682 | else if (attr == &dev_attr_act_mask) | |
09341997 | 1683 | ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); |
c71a8961 ACM |
1684 | else if (attr == &dev_attr_pid) |
1685 | ret = sprintf(buf, "%u\n", q->blk_trace->pid); | |
1686 | else if (attr == &dev_attr_start_lba) | |
1687 | ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); | |
1688 | else if (attr == &dev_attr_end_lba) | |
1689 | ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); | |
cd649b8b LZ |
1690 | |
1691 | out_unlock_bdev: | |
c71a8961 ACM |
1692 | mutex_unlock(&bdev->bd_mutex); |
1693 | out_bdput: | |
1694 | bdput(bdev); | |
01b284f9 | 1695 | out: |
c71a8961 ACM |
1696 | return ret; |
1697 | } | |
1698 | ||
1699 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |
1700 | struct device_attribute *attr, | |
1701 | const char *buf, size_t count) | |
1702 | { | |
1703 | struct block_device *bdev; | |
1704 | struct request_queue *q; | |
1705 | struct hd_struct *p; | |
1706 | u64 value; | |
09341997 | 1707 | ssize_t ret = -EINVAL; |
c71a8961 ACM |
1708 | |
1709 | if (count == 0) | |
1710 | goto out; | |
1711 | ||
1712 | if (attr == &dev_attr_act_mask) { | |
1713 | if (sscanf(buf, "%llx", &value) != 1) { | |
1714 | /* Assume it is a list of trace category names */ | |
09341997 LZ |
1715 | ret = blk_trace_str2mask(buf); |
1716 | if (ret < 0) | |
c71a8961 | 1717 | goto out; |
09341997 | 1718 | value = ret; |
c71a8961 ACM |
1719 | } |
1720 | } else if (sscanf(buf, "%llu", &value) != 1) | |
1721 | goto out; | |
1722 | ||
09341997 LZ |
1723 | ret = -ENXIO; |
1724 | ||
c71a8961 ACM |
1725 | p = dev_to_part(dev); |
1726 | bdev = bdget(part_devt(p)); | |
1727 | if (bdev == NULL) | |
01b284f9 | 1728 | goto out; |
c71a8961 | 1729 | |
b125130b | 1730 | q = blk_trace_get_queue(bdev); |
c71a8961 ACM |
1731 | if (q == NULL) |
1732 | goto out_bdput; | |
1733 | ||
1734 | mutex_lock(&bdev->bd_mutex); | |
cd649b8b LZ |
1735 | |
1736 | if (attr == &dev_attr_enable) { | |
1737 | if (value) | |
9908c309 | 1738 | ret = blk_trace_setup_queue(q, bdev); |
cd649b8b LZ |
1739 | else |
1740 | ret = blk_trace_remove_queue(q); | |
1741 | goto out_unlock_bdev; | |
1742 | } | |
1743 | ||
c71a8961 ACM |
1744 | ret = 0; |
1745 | if (q->blk_trace == NULL) | |
9908c309 | 1746 | ret = blk_trace_setup_queue(q, bdev); |
c71a8961 ACM |
1747 | |
1748 | if (ret == 0) { | |
1749 | if (attr == &dev_attr_act_mask) | |
1750 | q->blk_trace->act_mask = value; | |
1751 | else if (attr == &dev_attr_pid) | |
1752 | q->blk_trace->pid = value; | |
1753 | else if (attr == &dev_attr_start_lba) | |
1754 | q->blk_trace->start_lba = value; | |
1755 | else if (attr == &dev_attr_end_lba) | |
1756 | q->blk_trace->end_lba = value; | |
c71a8961 | 1757 | } |
cd649b8b LZ |
1758 | |
1759 | out_unlock_bdev: | |
c71a8961 ACM |
1760 | mutex_unlock(&bdev->bd_mutex); |
1761 | out_bdput: | |
1762 | bdput(bdev); | |
c71a8961 | 1763 | out: |
cd649b8b | 1764 | return ret ? ret : count; |
c71a8961 | 1765 | } |
cd649b8b | 1766 | |
1d54ad6d LZ |
1767 | int blk_trace_init_sysfs(struct device *dev) |
1768 | { | |
1769 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); | |
1770 | } | |
1771 | ||
48c0d4d4 ZK |
1772 | void blk_trace_remove_sysfs(struct device *dev) |
1773 | { | |
1774 | sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); | |
1775 | } | |
1776 | ||
55782138 LZ |
1777 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
1778 | ||
1779 | #ifdef CONFIG_EVENT_TRACING | |
1780 | ||
1781 | void blk_dump_cmd(char *buf, struct request *rq) | |
1782 | { | |
1783 | int i, end; | |
1784 | int len = rq->cmd_len; | |
1785 | unsigned char *cmd = rq->cmd; | |
1786 | ||
33659ebb | 1787 | if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { |
55782138 LZ |
1788 | buf[0] = '\0'; |
1789 | return; | |
1790 | } | |
1791 | ||
1792 | for (end = len - 1; end >= 0; end--) | |
1793 | if (cmd[end]) | |
1794 | break; | |
1795 | end++; | |
1796 | ||
1797 | for (i = 0; i < len; i++) { | |
1798 | buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]); | |
1799 | if (i == end && end != len - 1) { | |
1800 | sprintf(buf, " .."); | |
1801 | break; | |
1802 | } | |
1803 | } | |
1804 | } | |
1805 | ||
1806 | void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |
1807 | { | |
1808 | int i = 0; | |
1809 | ||
c09c47ca NK |
1810 | if (rw & REQ_FLUSH) |
1811 | rwbs[i++] = 'F'; | |
1812 | ||
55782138 LZ |
1813 | if (rw & WRITE) |
1814 | rwbs[i++] = 'W'; | |
7b6d91da | 1815 | else if (rw & REQ_DISCARD) |
55782138 LZ |
1816 | rwbs[i++] = 'D'; |
1817 | else if (bytes) | |
1818 | rwbs[i++] = 'R'; | |
1819 | else | |
1820 | rwbs[i++] = 'N'; | |
1821 | ||
c09c47ca NK |
1822 | if (rw & REQ_FUA) |
1823 | rwbs[i++] = 'F'; | |
7b6d91da | 1824 | if (rw & REQ_RAHEAD) |
55782138 | 1825 | rwbs[i++] = 'A'; |
7b6d91da | 1826 | if (rw & REQ_SYNC) |
55782138 | 1827 | rwbs[i++] = 'S'; |
7b6d91da | 1828 | if (rw & REQ_META) |
55782138 | 1829 | rwbs[i++] = 'M'; |
8d57a98c AH |
1830 | if (rw & REQ_SECURE) |
1831 | rwbs[i++] = 'E'; | |
55782138 LZ |
1832 | |
1833 | rwbs[i] = '\0'; | |
1834 | } | |
9ca8f8e5 | 1835 | EXPORT_SYMBOL_GPL(blk_fill_rwbs); |
55782138 | 1836 | |
55782138 LZ |
1837 | #endif /* CONFIG_EVENT_TRACING */ |
1838 |