Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/time.h>
27 #include <linux/uaccess.h>
28
29 #include <trace/events/block.h>
30
31 #include "trace_output.h"
32
33 #ifdef CONFIG_BLK_DEV_IO_TRACE
34
35 static unsigned int blktrace_seq __read_mostly = 1;
36
37 static struct trace_array *blk_tr;
38 static bool blk_tracer_enabled __read_mostly;
39
40 /* Select an alternative, minimalistic output than the original one */
41 #define TRACE_BLK_OPT_CLASSIC   0x1
42
43 static struct tracer_opt blk_tracer_opts[] = {
44         /* Default disable the minimalistic output */
45         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
46         { }
47 };
48
49 static struct tracer_flags blk_tracer_flags = {
50         .val  = 0,
51         .opts = blk_tracer_opts,
52 };
53
54 /* Global reference count of probes */
55 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
56
57 static void blk_register_tracepoints(void);
58 static void blk_unregister_tracepoints(void);
59
60 /*
61  * Send out a notify message.
62  */
63 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
64                        const void *data, size_t len)
65 {
66         struct blk_io_trace *t;
67         struct ring_buffer_event *event = NULL;
68         struct ring_buffer *buffer = NULL;
69         int pc = 0;
70         int cpu = smp_processor_id();
71         bool blk_tracer = blk_tracer_enabled;
72
73         if (blk_tracer) {
74                 buffer = blk_tr->buffer;
75                 pc = preempt_count();
76                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
77                                                   sizeof(*t) + len,
78                                                   0, pc);
79                 if (!event)
80                         return;
81                 t = ring_buffer_event_data(event);
82                 goto record_it;
83         }
84
85         if (!bt->rchan)
86                 return;
87
88         t = relay_reserve(bt->rchan, sizeof(*t) + len);
89         if (t) {
90                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
91                 t->time = ktime_to_ns(ktime_get());
92 record_it:
93                 t->device = bt->dev;
94                 t->action = action;
95                 t->pid = pid;
96                 t->cpu = cpu;
97                 t->pdu_len = len;
98                 memcpy((void *) t + sizeof(*t), data, len);
99
100                 if (blk_tracer)
101                         trace_buffer_unlock_commit(buffer, event, 0, pc);
102         }
103 }
104
105 /*
106  * Send out a notify for this process, if we haven't done so since a trace
107  * started
108  */
109 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
110 {
111         tsk->btrace_seq = blktrace_seq;
112         trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
113 }
114
115 static void trace_note_time(struct blk_trace *bt)
116 {
117         struct timespec now;
118         unsigned long flags;
119         u32 words[2];
120
121         getnstimeofday(&now);
122         words[0] = now.tv_sec;
123         words[1] = now.tv_nsec;
124
125         local_irq_save(flags);
126         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
127         local_irq_restore(flags);
128 }
129
130 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
131 {
132         int n;
133         va_list args;
134         unsigned long flags;
135         char *buf;
136
137         if (unlikely(bt->trace_state != Blktrace_running &&
138                      !blk_tracer_enabled))
139                 return;
140
141         local_irq_save(flags);
142         buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
143         va_start(args, fmt);
144         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
145         va_end(args);
146
147         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
148         local_irq_restore(flags);
149 }
150 EXPORT_SYMBOL_GPL(__trace_note_message);
151
152 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
153                          pid_t pid)
154 {
155         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
156                 return 1;
157         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
158                 return 1;
159         if (bt->pid && pid != bt->pid)
160                 return 1;
161
162         return 0;
163 }
164
165 /*
166  * Data direction bit lookup
167  */
168 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
169                                  BLK_TC_ACT(BLK_TC_WRITE) };
170
171 #define BLK_TC_HARDBARRIER      BLK_TC_BARRIER
172 #define BLK_TC_RAHEAD           BLK_TC_AHEAD
173
174 /* The ilog2() calls fall out because they're constant */
175 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
176           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
177
178 /*
179  * The worker for the various blk_add_trace*() types. Fills out a
180  * blk_io_trace structure and places it in a per-cpu subbuffer.
181  */
182 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
183                      int rw, u32 what, int error, int pdu_len, void *pdu_data)
184 {
185         struct task_struct *tsk = current;
186         struct ring_buffer_event *event = NULL;
187         struct ring_buffer *buffer = NULL;
188         struct blk_io_trace *t;
189         unsigned long flags = 0;
190         unsigned long *sequence;
191         pid_t pid;
192         int cpu, pc = 0;
193         bool blk_tracer = blk_tracer_enabled;
194
195         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
196                 return;
197
198         what |= ddir_act[rw & WRITE];
199         what |= MASK_TC_BIT(rw, HARDBARRIER);
200         what |= MASK_TC_BIT(rw, SYNC);
201         what |= MASK_TC_BIT(rw, RAHEAD);
202         what |= MASK_TC_BIT(rw, META);
203         what |= MASK_TC_BIT(rw, DISCARD);
204
205         pid = tsk->pid;
206         if (act_log_check(bt, what, sector, pid))
207                 return;
208         cpu = raw_smp_processor_id();
209
210         if (blk_tracer) {
211                 tracing_record_cmdline(current);
212
213                 buffer = blk_tr->buffer;
214                 pc = preempt_count();
215                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
216                                                   sizeof(*t) + pdu_len,
217                                                   0, pc);
218                 if (!event)
219                         return;
220                 t = ring_buffer_event_data(event);
221                 goto record_it;
222         }
223
224         /*
225          * A word about the locking here - we disable interrupts to reserve
226          * some space in the relay per-cpu buffer, to prevent an irq
227          * from coming in and stepping on our toes.
228          */
229         local_irq_save(flags);
230
231         if (unlikely(tsk->btrace_seq != blktrace_seq))
232                 trace_note_tsk(bt, tsk);
233
234         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
235         if (t) {
236                 sequence = per_cpu_ptr(bt->sequence, cpu);
237
238                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
239                 t->sequence = ++(*sequence);
240                 t->time = ktime_to_ns(ktime_get());
241 record_it:
242                 /*
243                  * These two are not needed in ftrace as they are in the
244                  * generic trace_entry, filled by tracing_generic_entry_update,
245                  * but for the trace_event->bin() synthesizer benefit we do it
246                  * here too.
247                  */
248                 t->cpu = cpu;
249                 t->pid = pid;
250
251                 t->sector = sector;
252                 t->bytes = bytes;
253                 t->action = what;
254                 t->device = bt->dev;
255                 t->error = error;
256                 t->pdu_len = pdu_len;
257
258                 if (pdu_len)
259                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
260
261                 if (blk_tracer) {
262                         trace_buffer_unlock_commit(buffer, event, 0, pc);
263                         return;
264                 }
265         }
266
267         local_irq_restore(flags);
268 }
269
270 static struct dentry *blk_tree_root;
271 static DEFINE_MUTEX(blk_tree_mutex);
272
273 static void blk_trace_free(struct blk_trace *bt)
274 {
275         debugfs_remove(bt->msg_file);
276         debugfs_remove(bt->dropped_file);
277         relay_close(bt->rchan);
278         debugfs_remove(bt->dir);
279         free_percpu(bt->sequence);
280         free_percpu(bt->msg_data);
281         kfree(bt);
282 }
283
284 static void blk_trace_cleanup(struct blk_trace *bt)
285 {
286         blk_trace_free(bt);
287         if (atomic_dec_and_test(&blk_probes_ref))
288                 blk_unregister_tracepoints();
289 }
290
291 int blk_trace_remove(struct request_queue *q)
292 {
293         struct blk_trace *bt;
294
295         bt = xchg(&q->blk_trace, NULL);
296         if (!bt)
297                 return -EINVAL;
298
299         if (bt->trace_state != Blktrace_running)
300                 blk_trace_cleanup(bt);
301
302         return 0;
303 }
304 EXPORT_SYMBOL_GPL(blk_trace_remove);
305
306 static int blk_dropped_open(struct inode *inode, struct file *filp)
307 {
308         filp->private_data = inode->i_private;
309
310         return 0;
311 }
312
313 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
314                                 size_t count, loff_t *ppos)
315 {
316         struct blk_trace *bt = filp->private_data;
317         char buf[16];
318
319         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
320
321         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
322 }
323
324 static const struct file_operations blk_dropped_fops = {
325         .owner =        THIS_MODULE,
326         .open =         blk_dropped_open,
327         .read =         blk_dropped_read,
328         .llseek =       default_llseek,
329 };
330
331 static int blk_msg_open(struct inode *inode, struct file *filp)
332 {
333         filp->private_data = inode->i_private;
334
335         return 0;
336 }
337
338 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
339                                 size_t count, loff_t *ppos)
340 {
341         char *msg;
342         struct blk_trace *bt;
343
344         if (count >= BLK_TN_MAX_MSG)
345                 return -EINVAL;
346
347         msg = kmalloc(count + 1, GFP_KERNEL);
348         if (msg == NULL)
349                 return -ENOMEM;
350
351         if (copy_from_user(msg, buffer, count)) {
352                 kfree(msg);
353                 return -EFAULT;
354         }
355
356         msg[count] = '\0';
357         bt = filp->private_data;
358         __trace_note_message(bt, "%s", msg);
359         kfree(msg);
360
361         return count;
362 }
363
364 static const struct file_operations blk_msg_fops = {
365         .owner =        THIS_MODULE,
366         .open =         blk_msg_open,
367         .write =        blk_msg_write,
368         .llseek =       noop_llseek,
369 };
370
371 /*
372  * Keep track of how many times we encountered a full subbuffer, to aid
373  * the user space app in telling how many lost events there were.
374  */
375 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
376                                      void *prev_subbuf, size_t prev_padding)
377 {
378         struct blk_trace *bt;
379
380         if (!relay_buf_full(buf))
381                 return 1;
382
383         bt = buf->chan->private_data;
384         atomic_inc(&bt->dropped);
385         return 0;
386 }
387
388 static int blk_remove_buf_file_callback(struct dentry *dentry)
389 {
390         debugfs_remove(dentry);
391
392         return 0;
393 }
394
395 static struct dentry *blk_create_buf_file_callback(const char *filename,
396                                                    struct dentry *parent,
397                                                    int mode,
398                                                    struct rchan_buf *buf,
399                                                    int *is_global)
400 {
401         return debugfs_create_file(filename, mode, parent, buf,
402                                         &relay_file_operations);
403 }
404
405 static struct rchan_callbacks blk_relay_callbacks = {
406         .subbuf_start           = blk_subbuf_start_callback,
407         .create_buf_file        = blk_create_buf_file_callback,
408         .remove_buf_file        = blk_remove_buf_file_callback,
409 };
410
411 static void blk_trace_setup_lba(struct blk_trace *bt,
412                                 struct block_device *bdev)
413 {
414         struct hd_struct *part = NULL;
415
416         if (bdev)
417                 part = bdev->bd_part;
418
419         if (part) {
420                 bt->start_lba = part->start_sect;
421                 bt->end_lba = part->start_sect + part->nr_sects;
422         } else {
423                 bt->start_lba = 0;
424                 bt->end_lba = -1ULL;
425         }
426 }
427
428 /*
429  * Setup everything required to start tracing
430  */
431 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
432                        struct block_device *bdev,
433                        struct blk_user_trace_setup *buts)
434 {
435         struct blk_trace *old_bt, *bt = NULL;
436         struct dentry *dir = NULL;
437         int ret, i;
438
439         if (!buts->buf_size || !buts->buf_nr)
440                 return -EINVAL;
441
442         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
443         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
444
445         /*
446          * some device names have larger paths - convert the slashes
447          * to underscores for this to work as expected
448          */
449         for (i = 0; i < strlen(buts->name); i++)
450                 if (buts->name[i] == '/')
451                         buts->name[i] = '_';
452
453         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
454         if (!bt)
455                 return -ENOMEM;
456
457         ret = -ENOMEM;
458         bt->sequence = alloc_percpu(unsigned long);
459         if (!bt->sequence)
460                 goto err;
461
462         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
463         if (!bt->msg_data)
464                 goto err;
465
466         ret = -ENOENT;
467
468         mutex_lock(&blk_tree_mutex);
469         if (!blk_tree_root) {
470                 blk_tree_root = debugfs_create_dir("block", NULL);
471                 if (!blk_tree_root) {
472                         mutex_unlock(&blk_tree_mutex);
473                         goto err;
474                 }
475         }
476         mutex_unlock(&blk_tree_mutex);
477
478         dir = debugfs_create_dir(buts->name, blk_tree_root);
479
480         if (!dir)
481                 goto err;
482
483         bt->dir = dir;
484         bt->dev = dev;
485         atomic_set(&bt->dropped, 0);
486
487         ret = -EIO;
488         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
489                                                &blk_dropped_fops);
490         if (!bt->dropped_file)
491                 goto err;
492
493         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
494         if (!bt->msg_file)
495                 goto err;
496
497         bt->rchan = relay_open("trace", dir, buts->buf_size,
498                                 buts->buf_nr, &blk_relay_callbacks, bt);
499         if (!bt->rchan)
500                 goto err;
501
502         bt->act_mask = buts->act_mask;
503         if (!bt->act_mask)
504                 bt->act_mask = (u16) -1;
505
506         blk_trace_setup_lba(bt, bdev);
507
508         /* overwrite with user settings */
509         if (buts->start_lba)
510                 bt->start_lba = buts->start_lba;
511         if (buts->end_lba)
512                 bt->end_lba = buts->end_lba;
513
514         bt->pid = buts->pid;
515         bt->trace_state = Blktrace_setup;
516
517         ret = -EBUSY;
518         old_bt = xchg(&q->blk_trace, bt);
519         if (old_bt) {
520                 (void) xchg(&q->blk_trace, old_bt);
521                 goto err;
522         }
523
524         if (atomic_inc_return(&blk_probes_ref) == 1)
525                 blk_register_tracepoints();
526
527         return 0;
528 err:
529         blk_trace_free(bt);
530         return ret;
531 }
532
533 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
534                     struct block_device *bdev,
535                     char __user *arg)
536 {
537         struct blk_user_trace_setup buts;
538         int ret;
539
540         ret = copy_from_user(&buts, arg, sizeof(buts));
541         if (ret)
542                 return -EFAULT;
543
544         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
545         if (ret)
546                 return ret;
547
548         if (copy_to_user(arg, &buts, sizeof(buts))) {
549                 blk_trace_remove(q);
550                 return -EFAULT;
551         }
552         return 0;
553 }
554 EXPORT_SYMBOL_GPL(blk_trace_setup);
555
556 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
557 static int compat_blk_trace_setup(struct request_queue *q, char *name,
558                                   dev_t dev, struct block_device *bdev,
559                                   char __user *arg)
560 {
561         struct blk_user_trace_setup buts;
562         struct compat_blk_user_trace_setup cbuts;
563         int ret;
564
565         if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
566                 return -EFAULT;
567
568         buts = (struct blk_user_trace_setup) {
569                 .act_mask = cbuts.act_mask,
570                 .buf_size = cbuts.buf_size,
571                 .buf_nr = cbuts.buf_nr,
572                 .start_lba = cbuts.start_lba,
573                 .end_lba = cbuts.end_lba,
574                 .pid = cbuts.pid,
575         };
576         memcpy(&buts.name, &cbuts.name, 32);
577
578         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
579         if (ret)
580                 return ret;
581
582         if (copy_to_user(arg, &buts.name, 32)) {
583                 blk_trace_remove(q);
584                 return -EFAULT;
585         }
586
587         return 0;
588 }
589 #endif
590
591 int blk_trace_startstop(struct request_queue *q, int start)
592 {
593         int ret;
594         struct blk_trace *bt = q->blk_trace;
595
596         if (bt == NULL)
597                 return -EINVAL;
598
599         /*
600          * For starting a trace, we can transition from a setup or stopped
601          * trace. For stopping a trace, the state must be running
602          */
603         ret = -EINVAL;
604         if (start) {
605                 if (bt->trace_state == Blktrace_setup ||
606                     bt->trace_state == Blktrace_stopped) {
607                         blktrace_seq++;
608                         smp_mb();
609                         bt->trace_state = Blktrace_running;
610
611                         trace_note_time(bt);
612                         ret = 0;
613                 }
614         } else {
615                 if (bt->trace_state == Blktrace_running) {
616                         bt->trace_state = Blktrace_stopped;
617                         relay_flush(bt->rchan);
618                         ret = 0;
619                 }
620         }
621
622         return ret;
623 }
624 EXPORT_SYMBOL_GPL(blk_trace_startstop);
625
626 /**
627  * blk_trace_ioctl: - handle the ioctls associated with tracing
628  * @bdev:       the block device
629  * @cmd:        the ioctl cmd
630  * @arg:        the argument data, if any
631  *
632  **/
633 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
634 {
635         struct request_queue *q;
636         int ret, start = 0;
637         char b[BDEVNAME_SIZE];
638
639         q = bdev_get_queue(bdev);
640         if (!q)
641                 return -ENXIO;
642
643         mutex_lock(&bdev->bd_mutex);
644
645         switch (cmd) {
646         case BLKTRACESETUP:
647                 bdevname(bdev, b);
648                 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
649                 break;
650 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
651         case BLKTRACESETUP32:
652                 bdevname(bdev, b);
653                 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
654                 break;
655 #endif
656         case BLKTRACESTART:
657                 start = 1;
658         case BLKTRACESTOP:
659                 ret = blk_trace_startstop(q, start);
660                 break;
661         case BLKTRACETEARDOWN:
662                 ret = blk_trace_remove(q);
663                 break;
664         default:
665                 ret = -ENOTTY;
666                 break;
667         }
668
669         mutex_unlock(&bdev->bd_mutex);
670         return ret;
671 }
672
673 /**
674  * blk_trace_shutdown: - stop and cleanup trace structures
675  * @q:    the request queue associated with the device
676  *
677  **/
678 void blk_trace_shutdown(struct request_queue *q)
679 {
680         if (q->blk_trace) {
681                 blk_trace_startstop(q, 0);
682                 blk_trace_remove(q);
683         }
684 }
685
686 /*
687  * blktrace probes
688  */
689
690 /**
691  * blk_add_trace_rq - Add a trace for a request oriented action
692  * @q:          queue the io is for
693  * @rq:         the source request
694  * @what:       the action
695  *
696  * Description:
697  *     Records an action against a request. Will log the bio offset + size.
698  *
699  **/
700 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
701                                     u32 what)
702 {
703         struct blk_trace *bt = q->blk_trace;
704         int rw = rq->cmd_flags & 0x03;
705
706         if (likely(!bt))
707                 return;
708
709         if (rq->cmd_flags & REQ_DISCARD)
710                 rw |= REQ_DISCARD;
711
712         if (rq->cmd_flags & REQ_SECURE)
713                 rw |= REQ_SECURE;
714
715         if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
716                 what |= BLK_TC_ACT(BLK_TC_PC);
717                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
718                                 what, rq->errors, rq->cmd_len, rq->cmd);
719         } else  {
720                 what |= BLK_TC_ACT(BLK_TC_FS);
721                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
722                                 what, rq->errors, 0, NULL);
723         }
724 }
725
726 static void blk_add_trace_rq_abort(void *ignore,
727                                    struct request_queue *q, struct request *rq)
728 {
729         blk_add_trace_rq(q, rq, BLK_TA_ABORT);
730 }
731
732 static void blk_add_trace_rq_insert(void *ignore,
733                                     struct request_queue *q, struct request *rq)
734 {
735         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
736 }
737
738 static void blk_add_trace_rq_issue(void *ignore,
739                                    struct request_queue *q, struct request *rq)
740 {
741         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
742 }
743
744 static void blk_add_trace_rq_requeue(void *ignore,
745                                      struct request_queue *q,
746                                      struct request *rq)
747 {
748         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
749 }
750
751 static void blk_add_trace_rq_complete(void *ignore,
752                                       struct request_queue *q,
753                                       struct request *rq)
754 {
755         blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
756 }
757
758 /**
759  * blk_add_trace_bio - Add a trace for a bio oriented action
760  * @q:          queue the io is for
761  * @bio:        the source bio
762  * @what:       the action
763  *
764  * Description:
765  *     Records an action against a bio. Will log the bio offset + size.
766  *
767  **/
768 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
769                                      u32 what)
770 {
771         struct blk_trace *bt = q->blk_trace;
772
773         if (likely(!bt))
774                 return;
775
776         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
777                         !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
778 }
779
780 static void blk_add_trace_bio_bounce(void *ignore,
781                                      struct request_queue *q, struct bio *bio)
782 {
783         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
784 }
785
786 static void blk_add_trace_bio_complete(void *ignore,
787                                        struct request_queue *q, struct bio *bio)
788 {
789         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
790 }
791
792 static void blk_add_trace_bio_backmerge(void *ignore,
793                                         struct request_queue *q,
794                                         struct bio *bio)
795 {
796         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
797 }
798
799 static void blk_add_trace_bio_frontmerge(void *ignore,
800                                          struct request_queue *q,
801                                          struct bio *bio)
802 {
803         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
804 }
805
806 static void blk_add_trace_bio_queue(void *ignore,
807                                     struct request_queue *q, struct bio *bio)
808 {
809         blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
810 }
811
812 static void blk_add_trace_getrq(void *ignore,
813                                 struct request_queue *q,
814                                 struct bio *bio, int rw)
815 {
816         if (bio)
817                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
818         else {
819                 struct blk_trace *bt = q->blk_trace;
820
821                 if (bt)
822                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
823         }
824 }
825
826
827 static void blk_add_trace_sleeprq(void *ignore,
828                                   struct request_queue *q,
829                                   struct bio *bio, int rw)
830 {
831         if (bio)
832                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
833         else {
834                 struct blk_trace *bt = q->blk_trace;
835
836                 if (bt)
837                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
838                                         0, 0, NULL);
839         }
840 }
841
842 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
843 {
844         struct blk_trace *bt = q->blk_trace;
845
846         if (bt)
847                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
848 }
849
850 static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
851 {
852         struct blk_trace *bt = q->blk_trace;
853
854         if (bt) {
855                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
856                 __be64 rpdu = cpu_to_be64(pdu);
857
858                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
859                                 sizeof(rpdu), &rpdu);
860         }
861 }
862
863 static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
864 {
865         struct blk_trace *bt = q->blk_trace;
866
867         if (bt) {
868                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
869                 __be64 rpdu = cpu_to_be64(pdu);
870
871                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
872                                 sizeof(rpdu), &rpdu);
873         }
874 }
875
876 static void blk_add_trace_split(void *ignore,
877                                 struct request_queue *q, struct bio *bio,
878                                 unsigned int pdu)
879 {
880         struct blk_trace *bt = q->blk_trace;
881
882         if (bt) {
883                 __be64 rpdu = cpu_to_be64(pdu);
884
885                 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
886                                 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
887                                 sizeof(rpdu), &rpdu);
888         }
889 }
890
891 /**
892  * blk_add_trace_remap - Add a trace for a remap operation
893  * @ignore:     trace callback data parameter (not used)
894  * @q:          queue the io is for
895  * @bio:        the source bio
896  * @dev:        target device
897  * @from:       source sector
898  *
899  * Description:
900  *     Device mapper or raid target sometimes need to split a bio because
901  *     it spans a stripe (or similar). Add a trace for that action.
902  *
903  **/
904 static void blk_add_trace_remap(void *ignore,
905                                 struct request_queue *q, struct bio *bio,
906                                 dev_t dev, sector_t from)
907 {
908         struct blk_trace *bt = q->blk_trace;
909         struct blk_io_trace_remap r;
910
911         if (likely(!bt))
912                 return;
913
914         r.device_from = cpu_to_be32(dev);
915         r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
916         r.sector_from = cpu_to_be64(from);
917
918         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
919                         BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
920                         sizeof(r), &r);
921 }
922
923 /**
924  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
925  * @ignore:     trace callback data parameter (not used)
926  * @q:          queue the io is for
927  * @rq:         the source request
928  * @dev:        target device
929  * @from:       source sector
930  *
931  * Description:
932  *     Device mapper remaps request to other devices.
933  *     Add a trace for that action.
934  *
935  **/
936 static void blk_add_trace_rq_remap(void *ignore,
937                                    struct request_queue *q,
938                                    struct request *rq, dev_t dev,
939                                    sector_t from)
940 {
941         struct blk_trace *bt = q->blk_trace;
942         struct blk_io_trace_remap r;
943
944         if (likely(!bt))
945                 return;
946
947         r.device_from = cpu_to_be32(dev);
948         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
949         r.sector_from = cpu_to_be64(from);
950
951         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
952                         rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
953                         sizeof(r), &r);
954 }
955
956 /**
957  * blk_add_driver_data - Add binary message with driver-specific data
958  * @q:          queue the io is for
959  * @rq:         io request
960  * @data:       driver-specific data
961  * @len:        length of driver-specific data
962  *
963  * Description:
964  *     Some drivers might want to write driver-specific data per request.
965  *
966  **/
967 void blk_add_driver_data(struct request_queue *q,
968                          struct request *rq,
969                          void *data, size_t len)
970 {
971         struct blk_trace *bt = q->blk_trace;
972
973         if (likely(!bt))
974                 return;
975
976         if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
977                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
978                                 BLK_TA_DRV_DATA, rq->errors, len, data);
979         else
980                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
981                                 BLK_TA_DRV_DATA, rq->errors, len, data);
982 }
983 EXPORT_SYMBOL_GPL(blk_add_driver_data);
984
985 static void blk_register_tracepoints(void)
986 {
987         int ret;
988
989         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
990         WARN_ON(ret);
991         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
992         WARN_ON(ret);
993         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
994         WARN_ON(ret);
995         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
996         WARN_ON(ret);
997         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
998         WARN_ON(ret);
999         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1000         WARN_ON(ret);
1001         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1002         WARN_ON(ret);
1003         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1004         WARN_ON(ret);
1005         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1006         WARN_ON(ret);
1007         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1008         WARN_ON(ret);
1009         ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1010         WARN_ON(ret);
1011         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1012         WARN_ON(ret);
1013         ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1014         WARN_ON(ret);
1015         ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1016         WARN_ON(ret);
1017         ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1018         WARN_ON(ret);
1019         ret = register_trace_block_split(blk_add_trace_split, NULL);
1020         WARN_ON(ret);
1021         ret = register_trace_block_remap(blk_add_trace_remap, NULL);
1022         WARN_ON(ret);
1023         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1024         WARN_ON(ret);
1025 }
1026
1027 static void blk_unregister_tracepoints(void)
1028 {
1029         unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1030         unregister_trace_block_remap(blk_add_trace_remap, NULL);
1031         unregister_trace_block_split(blk_add_trace_split, NULL);
1032         unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1033         unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1034         unregister_trace_block_plug(blk_add_trace_plug, NULL);
1035         unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1036         unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1037         unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1038         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1039         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1040         unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1041         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1042         unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1043         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1044         unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1045         unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1046         unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
1047
1048         tracepoint_synchronize_unregister();
1049 }
1050
1051 /*
1052  * struct blk_io_tracer formatting routines
1053  */
1054
1055 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1056 {
1057         int i = 0;
1058         int tc = t->action >> BLK_TC_SHIFT;
1059
1060         if (t->action == BLK_TN_MESSAGE) {
1061                 rwbs[i++] = 'N';
1062                 goto out;
1063         }
1064
1065         if (tc & BLK_TC_DISCARD)
1066                 rwbs[i++] = 'D';
1067         else if (tc & BLK_TC_WRITE)
1068                 rwbs[i++] = 'W';
1069         else if (t->bytes)
1070                 rwbs[i++] = 'R';
1071         else
1072                 rwbs[i++] = 'N';
1073
1074         if (tc & BLK_TC_AHEAD)
1075                 rwbs[i++] = 'A';
1076         if (tc & BLK_TC_BARRIER)
1077                 rwbs[i++] = 'B';
1078         if (tc & BLK_TC_SYNC)
1079                 rwbs[i++] = 'S';
1080         if (tc & BLK_TC_META)
1081                 rwbs[i++] = 'M';
1082 out:
1083         rwbs[i] = '\0';
1084 }
1085
1086 static inline
1087 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1088 {
1089         return (const struct blk_io_trace *)ent;
1090 }
1091
1092 static inline const void *pdu_start(const struct trace_entry *ent)
1093 {
1094         return te_blk_io_trace(ent) + 1;
1095 }
1096
1097 static inline u32 t_action(const struct trace_entry *ent)
1098 {
1099         return te_blk_io_trace(ent)->action;
1100 }
1101
1102 static inline u32 t_bytes(const struct trace_entry *ent)
1103 {
1104         return te_blk_io_trace(ent)->bytes;
1105 }
1106
1107 static inline u32 t_sec(const struct trace_entry *ent)
1108 {
1109         return te_blk_io_trace(ent)->bytes >> 9;
1110 }
1111
1112 static inline unsigned long long t_sector(const struct trace_entry *ent)
1113 {
1114         return te_blk_io_trace(ent)->sector;
1115 }
1116
1117 static inline __u16 t_error(const struct trace_entry *ent)
1118 {
1119         return te_blk_io_trace(ent)->error;
1120 }
1121
1122 static __u64 get_pdu_int(const struct trace_entry *ent)
1123 {
1124         const __u64 *val = pdu_start(ent);
1125         return be64_to_cpu(*val);
1126 }
1127
1128 static void get_pdu_remap(const struct trace_entry *ent,
1129                           struct blk_io_trace_remap *r)
1130 {
1131         const struct blk_io_trace_remap *__r = pdu_start(ent);
1132         __u64 sector_from = __r->sector_from;
1133
1134         r->device_from = be32_to_cpu(__r->device_from);
1135         r->device_to   = be32_to_cpu(__r->device_to);
1136         r->sector_from = be64_to_cpu(sector_from);
1137 }
1138
1139 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1140
1141 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1142 {
1143         char rwbs[6];
1144         unsigned long long ts  = iter->ts;
1145         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1146         unsigned secs          = (unsigned long)ts;
1147         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1148
1149         fill_rwbs(rwbs, t);
1150
1151         return trace_seq_printf(&iter->seq,
1152                                 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1153                                 MAJOR(t->device), MINOR(t->device), iter->cpu,
1154                                 secs, nsec_rem, iter->ent->pid, act, rwbs);
1155 }
1156
1157 static int blk_log_action(struct trace_iterator *iter, const char *act)
1158 {
1159         char rwbs[6];
1160         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1161
1162         fill_rwbs(rwbs, t);
1163         return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1164                                 MAJOR(t->device), MINOR(t->device), act, rwbs);
1165 }
1166
1167 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1168 {
1169         const unsigned char *pdu_buf;
1170         int pdu_len;
1171         int i, end, ret;
1172
1173         pdu_buf = pdu_start(ent);
1174         pdu_len = te_blk_io_trace(ent)->pdu_len;
1175
1176         if (!pdu_len)
1177                 return 1;
1178
1179         /* find the last zero that needs to be printed */
1180         for (end = pdu_len - 1; end >= 0; end--)
1181                 if (pdu_buf[end])
1182                         break;
1183         end++;
1184
1185         if (!trace_seq_putc(s, '('))
1186                 return 0;
1187
1188         for (i = 0; i < pdu_len; i++) {
1189
1190                 ret = trace_seq_printf(s, "%s%02x",
1191                                        i == 0 ? "" : " ", pdu_buf[i]);
1192                 if (!ret)
1193                         return ret;
1194
1195                 /*
1196                  * stop when the rest is just zeroes and indicate so
1197                  * with a ".." appended
1198                  */
1199                 if (i == end && end != pdu_len - 1)
1200                         return trace_seq_puts(s, " ..) ");
1201         }
1202
1203         return trace_seq_puts(s, ") ");
1204 }
1205
1206 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1207 {
1208         char cmd[TASK_COMM_LEN];
1209
1210         trace_find_cmdline(ent->pid, cmd);
1211
1212         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1213                 int ret;
1214
1215                 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1216                 if (!ret)
1217                         return 0;
1218                 ret = blk_log_dump_pdu(s, ent);
1219                 if (!ret)
1220                         return 0;
1221                 return trace_seq_printf(s, "[%s]\n", cmd);
1222         } else {
1223                 if (t_sec(ent))
1224                         return trace_seq_printf(s, "%llu + %u [%s]\n",
1225                                                 t_sector(ent), t_sec(ent), cmd);
1226                 return trace_seq_printf(s, "[%s]\n", cmd);
1227         }
1228 }
1229
1230 static int blk_log_with_error(struct trace_seq *s,
1231                               const struct trace_entry *ent)
1232 {
1233         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1234                 int ret;
1235
1236                 ret = blk_log_dump_pdu(s, ent);
1237                 if (ret)
1238                         return trace_seq_printf(s, "[%d]\n", t_error(ent));
1239                 return 0;
1240         } else {
1241                 if (t_sec(ent))
1242                         return trace_seq_printf(s, "%llu + %u [%d]\n",
1243                                                 t_sector(ent),
1244                                                 t_sec(ent), t_error(ent));
1245                 return trace_seq_printf(s, "%llu [%d]\n",
1246                                         t_sector(ent), t_error(ent));
1247         }
1248 }
1249
1250 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1251 {
1252         struct blk_io_trace_remap r = { .device_from = 0, };
1253
1254         get_pdu_remap(ent, &r);
1255         return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1256                                 t_sector(ent), t_sec(ent),
1257                                 MAJOR(r.device_from), MINOR(r.device_from),
1258                                 (unsigned long long)r.sector_from);
1259 }
1260
1261 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1262 {
1263         char cmd[TASK_COMM_LEN];
1264
1265         trace_find_cmdline(ent->pid, cmd);
1266
1267         return trace_seq_printf(s, "[%s]\n", cmd);
1268 }
1269
1270 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1271 {
1272         char cmd[TASK_COMM_LEN];
1273
1274         trace_find_cmdline(ent->pid, cmd);
1275
1276         return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1277 }
1278
1279 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1280 {
1281         char cmd[TASK_COMM_LEN];
1282
1283         trace_find_cmdline(ent->pid, cmd);
1284
1285         return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1286                                 get_pdu_int(ent), cmd);
1287 }
1288
1289 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1290 {
1291         int ret;
1292         const struct blk_io_trace *t = te_blk_io_trace(ent);
1293
1294         ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1295         if (ret)
1296                 return trace_seq_putc(s, '\n');
1297         return ret;
1298 }
1299
1300 /*
1301  * struct tracer operations
1302  */
1303
1304 static void blk_tracer_print_header(struct seq_file *m)
1305 {
1306         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1307                 return;
1308         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1309                     "#  |     |     |           |   |   |\n");
1310 }
1311
1312 static void blk_tracer_start(struct trace_array *tr)
1313 {
1314         blk_tracer_enabled = true;
1315 }
1316
1317 static int blk_tracer_init(struct trace_array *tr)
1318 {
1319         blk_tr = tr;
1320         blk_tracer_start(tr);
1321         return 0;
1322 }
1323
1324 static void blk_tracer_stop(struct trace_array *tr)
1325 {
1326         blk_tracer_enabled = false;
1327 }
1328
1329 static void blk_tracer_reset(struct trace_array *tr)
1330 {
1331         blk_tracer_stop(tr);
1332 }
1333
1334 static const struct {
1335         const char *act[2];
1336         int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
1337 } what2act[] = {
1338         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1339         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1340         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1341         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1342         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1343         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1344         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1345         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1346         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1347         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1348         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1349         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1350         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1351         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1352         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1353 };
1354
1355 static enum print_line_t print_one_line(struct trace_iterator *iter,
1356                                         bool classic)
1357 {
1358         struct trace_seq *s = &iter->seq;
1359         const struct blk_io_trace *t;
1360         u16 what;
1361         int ret;
1362         bool long_act;
1363         blk_log_action_t *log_action;
1364
1365         t          = te_blk_io_trace(iter->ent);
1366         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1367         long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
1368         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1369
1370         if (t->action == BLK_TN_MESSAGE) {
1371                 ret = log_action(iter, long_act ? "message" : "m");
1372                 if (ret)
1373                         ret = blk_log_msg(s, iter->ent);
1374                 goto out;
1375         }
1376
1377         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1378                 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1379         else {
1380                 ret = log_action(iter, what2act[what].act[long_act]);
1381                 if (ret)
1382                         ret = what2act[what].print(s, iter->ent);
1383         }
1384 out:
1385         return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1386 }
1387
1388 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1389                                                int flags, struct trace_event *event)
1390 {
1391         return print_one_line(iter, false);
1392 }
1393
1394 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1395 {
1396         struct trace_seq *s = &iter->seq;
1397         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1398         const int offset = offsetof(struct blk_io_trace, sector);
1399         struct blk_io_trace old = {
1400                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1401                 .time     = iter->ts,
1402         };
1403
1404         if (!trace_seq_putmem(s, &old, offset))
1405                 return 0;
1406         return trace_seq_putmem(s, &t->sector,
1407                                 sizeof(old) - offset + t->pdu_len);
1408 }
1409
1410 static enum print_line_t
1411 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1412                              struct trace_event *event)
1413 {
1414         return blk_trace_synthesize_old_trace(iter) ?
1415                         TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1416 }
1417
1418 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1419 {
1420         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1421                 return TRACE_TYPE_UNHANDLED;
1422
1423         return print_one_line(iter, true);
1424 }
1425
1426 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1427 {
1428         /* don't output context-info for blk_classic output */
1429         if (bit == TRACE_BLK_OPT_CLASSIC) {
1430                 if (set)
1431                         trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1432                 else
1433                         trace_flags |= TRACE_ITER_CONTEXT_INFO;
1434         }
1435         return 0;
1436 }
1437
1438 static struct tracer blk_tracer __read_mostly = {
1439         .name           = "blk",
1440         .init           = blk_tracer_init,
1441         .reset          = blk_tracer_reset,
1442         .start          = blk_tracer_start,
1443         .stop           = blk_tracer_stop,
1444         .print_header   = blk_tracer_print_header,
1445         .print_line     = blk_tracer_print_line,
1446         .flags          = &blk_tracer_flags,
1447         .set_flag       = blk_tracer_set_flag,
1448 };
1449
1450 static struct trace_event_functions trace_blk_event_funcs = {
1451         .trace          = blk_trace_event_print,
1452         .binary         = blk_trace_event_print_binary,
1453 };
1454
1455 static struct trace_event trace_blk_event = {
1456         .type           = TRACE_BLK,
1457         .funcs          = &trace_blk_event_funcs,
1458 };
1459
1460 static int __init init_blk_tracer(void)
1461 {
1462         if (!register_ftrace_event(&trace_blk_event)) {
1463                 pr_warning("Warning: could not register block events\n");
1464                 return 1;
1465         }
1466
1467         if (register_tracer(&blk_tracer) != 0) {
1468                 pr_warning("Warning: could not register the block tracer\n");
1469                 unregister_ftrace_event(&trace_blk_event);
1470                 return 1;
1471         }
1472
1473         return 0;
1474 }
1475
1476 device_initcall(init_blk_tracer);
1477
1478 static int blk_trace_remove_queue(struct request_queue *q)
1479 {
1480         struct blk_trace *bt;
1481
1482         bt = xchg(&q->blk_trace, NULL);
1483         if (bt == NULL)
1484                 return -EINVAL;
1485
1486         if (atomic_dec_and_test(&blk_probes_ref))
1487                 blk_unregister_tracepoints();
1488
1489         blk_trace_free(bt);
1490         return 0;
1491 }
1492
1493 /*
1494  * Setup everything required to start tracing
1495  */
1496 static int blk_trace_setup_queue(struct request_queue *q,
1497                                  struct block_device *bdev)
1498 {
1499         struct blk_trace *old_bt, *bt = NULL;
1500         int ret = -ENOMEM;
1501
1502         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1503         if (!bt)
1504                 return -ENOMEM;
1505
1506         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1507         if (!bt->msg_data)
1508                 goto free_bt;
1509
1510         bt->dev = bdev->bd_dev;
1511         bt->act_mask = (u16)-1;
1512
1513         blk_trace_setup_lba(bt, bdev);
1514
1515         old_bt = xchg(&q->blk_trace, bt);
1516         if (old_bt != NULL) {
1517                 (void)xchg(&q->blk_trace, old_bt);
1518                 ret = -EBUSY;
1519                 goto free_bt;
1520         }
1521
1522         if (atomic_inc_return(&blk_probes_ref) == 1)
1523                 blk_register_tracepoints();
1524         return 0;
1525
1526 free_bt:
1527         blk_trace_free(bt);
1528         return ret;
1529 }
1530
1531 /*
1532  * sysfs interface to enable and configure tracing
1533  */
1534
1535 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1536                                          struct device_attribute *attr,
1537                                          char *buf);
1538 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1539                                           struct device_attribute *attr,
1540                                           const char *buf, size_t count);
1541 #define BLK_TRACE_DEVICE_ATTR(_name) \
1542         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1543                     sysfs_blk_trace_attr_show, \
1544                     sysfs_blk_trace_attr_store)
1545
1546 static BLK_TRACE_DEVICE_ATTR(enable);
1547 static BLK_TRACE_DEVICE_ATTR(act_mask);
1548 static BLK_TRACE_DEVICE_ATTR(pid);
1549 static BLK_TRACE_DEVICE_ATTR(start_lba);
1550 static BLK_TRACE_DEVICE_ATTR(end_lba);
1551
1552 static struct attribute *blk_trace_attrs[] = {
1553         &dev_attr_enable.attr,
1554         &dev_attr_act_mask.attr,
1555         &dev_attr_pid.attr,
1556         &dev_attr_start_lba.attr,
1557         &dev_attr_end_lba.attr,
1558         NULL
1559 };
1560
1561 struct attribute_group blk_trace_attr_group = {
1562         .name  = "trace",
1563         .attrs = blk_trace_attrs,
1564 };
1565
1566 static const struct {
1567         int mask;
1568         const char *str;
1569 } mask_maps[] = {
1570         { BLK_TC_READ,          "read"          },
1571         { BLK_TC_WRITE,         "write"         },
1572         { BLK_TC_BARRIER,       "barrier"       },
1573         { BLK_TC_SYNC,          "sync"          },
1574         { BLK_TC_QUEUE,         "queue"         },
1575         { BLK_TC_REQUEUE,       "requeue"       },
1576         { BLK_TC_ISSUE,         "issue"         },
1577         { BLK_TC_COMPLETE,      "complete"      },
1578         { BLK_TC_FS,            "fs"            },
1579         { BLK_TC_PC,            "pc"            },
1580         { BLK_TC_AHEAD,         "ahead"         },
1581         { BLK_TC_META,          "meta"          },
1582         { BLK_TC_DISCARD,       "discard"       },
1583         { BLK_TC_DRV_DATA,      "drv_data"      },
1584 };
1585
1586 static int blk_trace_str2mask(const char *str)
1587 {
1588         int i;
1589         int mask = 0;
1590         char *buf, *s, *token;
1591
1592         buf = kstrdup(str, GFP_KERNEL);
1593         if (buf == NULL)
1594                 return -ENOMEM;
1595         s = strstrip(buf);
1596
1597         while (1) {
1598                 token = strsep(&s, ",");
1599                 if (token == NULL)
1600                         break;
1601
1602                 if (*token == '\0')
1603                         continue;
1604
1605                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1606                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1607                                 mask |= mask_maps[i].mask;
1608                                 break;
1609                         }
1610                 }
1611                 if (i == ARRAY_SIZE(mask_maps)) {
1612                         mask = -EINVAL;
1613                         break;
1614                 }
1615         }
1616         kfree(buf);
1617
1618         return mask;
1619 }
1620
1621 static ssize_t blk_trace_mask2str(char *buf, int mask)
1622 {
1623         int i;
1624         char *p = buf;
1625
1626         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1627                 if (mask & mask_maps[i].mask) {
1628                         p += sprintf(p, "%s%s",
1629                                     (p == buf) ? "" : ",", mask_maps[i].str);
1630                 }
1631         }
1632         *p++ = '\n';
1633
1634         return p - buf;
1635 }
1636
1637 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1638 {
1639         if (bdev->bd_disk == NULL)
1640                 return NULL;
1641
1642         return bdev_get_queue(bdev);
1643 }
1644
1645 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1646                                          struct device_attribute *attr,
1647                                          char *buf)
1648 {
1649         struct hd_struct *p = dev_to_part(dev);
1650         struct request_queue *q;
1651         struct block_device *bdev;
1652         ssize_t ret = -ENXIO;
1653
1654         bdev = bdget(part_devt(p));
1655         if (bdev == NULL)
1656                 goto out;
1657
1658         q = blk_trace_get_queue(bdev);
1659         if (q == NULL)
1660                 goto out_bdput;
1661
1662         mutex_lock(&bdev->bd_mutex);
1663
1664         if (attr == &dev_attr_enable) {
1665                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1666                 goto out_unlock_bdev;
1667         }
1668
1669         if (q->blk_trace == NULL)
1670                 ret = sprintf(buf, "disabled\n");
1671         else if (attr == &dev_attr_act_mask)
1672                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1673         else if (attr == &dev_attr_pid)
1674                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1675         else if (attr == &dev_attr_start_lba)
1676                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1677         else if (attr == &dev_attr_end_lba)
1678                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1679
1680 out_unlock_bdev:
1681         mutex_unlock(&bdev->bd_mutex);
1682 out_bdput:
1683         bdput(bdev);
1684 out:
1685         return ret;
1686 }
1687
1688 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1689                                           struct device_attribute *attr,
1690                                           const char *buf, size_t count)
1691 {
1692         struct block_device *bdev;
1693         struct request_queue *q;
1694         struct hd_struct *p;
1695         u64 value;
1696         ssize_t ret = -EINVAL;
1697
1698         if (count == 0)
1699                 goto out;
1700
1701         if (attr == &dev_attr_act_mask) {
1702                 if (sscanf(buf, "%llx", &value) != 1) {
1703                         /* Assume it is a list of trace category names */
1704                         ret = blk_trace_str2mask(buf);
1705                         if (ret < 0)
1706                                 goto out;
1707                         value = ret;
1708                 }
1709         } else if (sscanf(buf, "%llu", &value) != 1)
1710                 goto out;
1711
1712         ret = -ENXIO;
1713
1714         p = dev_to_part(dev);
1715         bdev = bdget(part_devt(p));
1716         if (bdev == NULL)
1717                 goto out;
1718
1719         q = blk_trace_get_queue(bdev);
1720         if (q == NULL)
1721                 goto out_bdput;
1722
1723         mutex_lock(&bdev->bd_mutex);
1724
1725         if (attr == &dev_attr_enable) {
1726                 if (value)
1727                         ret = blk_trace_setup_queue(q, bdev);
1728                 else
1729                         ret = blk_trace_remove_queue(q);
1730                 goto out_unlock_bdev;
1731         }
1732
1733         ret = 0;
1734         if (q->blk_trace == NULL)
1735                 ret = blk_trace_setup_queue(q, bdev);
1736
1737         if (ret == 0) {
1738                 if (attr == &dev_attr_act_mask)
1739                         q->blk_trace->act_mask = value;
1740                 else if (attr == &dev_attr_pid)
1741                         q->blk_trace->pid = value;
1742                 else if (attr == &dev_attr_start_lba)
1743                         q->blk_trace->start_lba = value;
1744                 else if (attr == &dev_attr_end_lba)
1745                         q->blk_trace->end_lba = value;
1746         }
1747
1748 out_unlock_bdev:
1749         mutex_unlock(&bdev->bd_mutex);
1750 out_bdput:
1751         bdput(bdev);
1752 out:
1753         return ret ? ret : count;
1754 }
1755
1756 int blk_trace_init_sysfs(struct device *dev)
1757 {
1758         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1759 }
1760
1761 void blk_trace_remove_sysfs(struct device *dev)
1762 {
1763         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1764 }
1765
1766 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1767
1768 #ifdef CONFIG_EVENT_TRACING
1769
1770 void blk_dump_cmd(char *buf, struct request *rq)
1771 {
1772         int i, end;
1773         int len = rq->cmd_len;
1774         unsigned char *cmd = rq->cmd;
1775
1776         if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
1777                 buf[0] = '\0';
1778                 return;
1779         }
1780
1781         for (end = len - 1; end >= 0; end--)
1782                 if (cmd[end])
1783                         break;
1784         end++;
1785
1786         for (i = 0; i < len; i++) {
1787                 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1788                 if (i == end && end != len - 1) {
1789                         sprintf(buf, " ..");
1790                         break;
1791                 }
1792         }
1793 }
1794
1795 void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1796 {
1797         int i = 0;
1798
1799         if (rw & WRITE)
1800                 rwbs[i++] = 'W';
1801         else if (rw & REQ_DISCARD)
1802                 rwbs[i++] = 'D';
1803         else if (bytes)
1804                 rwbs[i++] = 'R';
1805         else
1806                 rwbs[i++] = 'N';
1807
1808         if (rw & REQ_RAHEAD)
1809                 rwbs[i++] = 'A';
1810         if (rw & REQ_HARDBARRIER)
1811                 rwbs[i++] = 'B';
1812         if (rw & REQ_SYNC)
1813                 rwbs[i++] = 'S';
1814         if (rw & REQ_META)
1815                 rwbs[i++] = 'M';
1816         if (rw & REQ_SECURE)
1817                 rwbs[i++] = 'E';
1818
1819         rwbs[i] = '\0';
1820 }
1821
1822 void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1823 {
1824         int rw = rq->cmd_flags & 0x03;
1825         int bytes;
1826
1827         if (rq->cmd_flags & REQ_DISCARD)
1828                 rw |= REQ_DISCARD;
1829
1830         if (rq->cmd_flags & REQ_SECURE)
1831                 rw |= REQ_SECURE;
1832
1833         bytes = blk_rq_bytes(rq);
1834
1835         blk_fill_rwbs(rwbs, rw, bytes);
1836 }
1837
1838 #endif /* CONFIG_EVENT_TRACING */
1839