Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[pandora-kernel.git] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/smp_lock.h>
26 #include <linux/time.h>
27 #include <linux/uaccess.h>
28
29 #include <trace/events/block.h>
30
31 #include "trace_output.h"
32
33 #ifdef CONFIG_BLK_DEV_IO_TRACE
34
35 static unsigned int blktrace_seq __read_mostly = 1;
36
37 static struct trace_array *blk_tr;
38 static bool blk_tracer_enabled __read_mostly;
39
40 /* Select an alternative, minimalistic output than the original one */
41 #define TRACE_BLK_OPT_CLASSIC   0x1
42
43 static struct tracer_opt blk_tracer_opts[] = {
44         /* Default disable the minimalistic output */
45         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
46         { }
47 };
48
49 static struct tracer_flags blk_tracer_flags = {
50         .val  = 0,
51         .opts = blk_tracer_opts,
52 };
53
54 /* Global reference count of probes */
55 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
56
57 static void blk_register_tracepoints(void);
58 static void blk_unregister_tracepoints(void);
59
60 /*
61  * Send out a notify message.
62  */
63 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
64                        const void *data, size_t len)
65 {
66         struct blk_io_trace *t;
67         struct ring_buffer_event *event = NULL;
68         struct ring_buffer *buffer = NULL;
69         int pc = 0;
70         int cpu = smp_processor_id();
71         bool blk_tracer = blk_tracer_enabled;
72
73         if (blk_tracer) {
74                 buffer = blk_tr->buffer;
75                 pc = preempt_count();
76                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
77                                                   sizeof(*t) + len,
78                                                   0, pc);
79                 if (!event)
80                         return;
81                 t = ring_buffer_event_data(event);
82                 goto record_it;
83         }
84
85         if (!bt->rchan)
86                 return;
87
88         t = relay_reserve(bt->rchan, sizeof(*t) + len);
89         if (t) {
90                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
91                 t->time = ktime_to_ns(ktime_get());
92 record_it:
93                 t->device = bt->dev;
94                 t->action = action;
95                 t->pid = pid;
96                 t->cpu = cpu;
97                 t->pdu_len = len;
98                 memcpy((void *) t + sizeof(*t), data, len);
99
100                 if (blk_tracer)
101                         trace_buffer_unlock_commit(buffer, event, 0, pc);
102         }
103 }
104
105 /*
106  * Send out a notify for this process, if we haven't done so since a trace
107  * started
108  */
109 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
110 {
111         tsk->btrace_seq = blktrace_seq;
112         trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
113 }
114
115 static void trace_note_time(struct blk_trace *bt)
116 {
117         struct timespec now;
118         unsigned long flags;
119         u32 words[2];
120
121         getnstimeofday(&now);
122         words[0] = now.tv_sec;
123         words[1] = now.tv_nsec;
124
125         local_irq_save(flags);
126         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
127         local_irq_restore(flags);
128 }
129
130 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
131 {
132         int n;
133         va_list args;
134         unsigned long flags;
135         char *buf;
136
137         if (unlikely(bt->trace_state != Blktrace_running &&
138                      !blk_tracer_enabled))
139                 return;
140
141         local_irq_save(flags);
142         buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
143         va_start(args, fmt);
144         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
145         va_end(args);
146
147         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
148         local_irq_restore(flags);
149 }
150 EXPORT_SYMBOL_GPL(__trace_note_message);
151
152 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
153                          pid_t pid)
154 {
155         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
156                 return 1;
157         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
158                 return 1;
159         if (bt->pid && pid != bt->pid)
160                 return 1;
161
162         return 0;
163 }
164
165 /*
166  * Data direction bit lookup
167  */
168 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
169                                  BLK_TC_ACT(BLK_TC_WRITE) };
170
171 /* The ilog2() calls fall out because they're constant */
172 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
173           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
174
175 /*
176  * The worker for the various blk_add_trace*() types. Fills out a
177  * blk_io_trace structure and places it in a per-cpu subbuffer.
178  */
179 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
180                      int rw, u32 what, int error, int pdu_len, void *pdu_data)
181 {
182         struct task_struct *tsk = current;
183         struct ring_buffer_event *event = NULL;
184         struct ring_buffer *buffer = NULL;
185         struct blk_io_trace *t;
186         unsigned long flags = 0;
187         unsigned long *sequence;
188         pid_t pid;
189         int cpu, pc = 0;
190         bool blk_tracer = blk_tracer_enabled;
191
192         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
193                 return;
194
195         what |= ddir_act[rw & WRITE];
196         what |= MASK_TC_BIT(rw, BARRIER);
197         what |= MASK_TC_BIT(rw, SYNCIO);
198         what |= MASK_TC_BIT(rw, AHEAD);
199         what |= MASK_TC_BIT(rw, META);
200         what |= MASK_TC_BIT(rw, DISCARD);
201
202         pid = tsk->pid;
203         if (act_log_check(bt, what, sector, pid))
204                 return;
205         cpu = raw_smp_processor_id();
206
207         if (blk_tracer) {
208                 tracing_record_cmdline(current);
209
210                 buffer = blk_tr->buffer;
211                 pc = preempt_count();
212                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
213                                                   sizeof(*t) + pdu_len,
214                                                   0, pc);
215                 if (!event)
216                         return;
217                 t = ring_buffer_event_data(event);
218                 goto record_it;
219         }
220
221         /*
222          * A word about the locking here - we disable interrupts to reserve
223          * some space in the relay per-cpu buffer, to prevent an irq
224          * from coming in and stepping on our toes.
225          */
226         local_irq_save(flags);
227
228         if (unlikely(tsk->btrace_seq != blktrace_seq))
229                 trace_note_tsk(bt, tsk);
230
231         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
232         if (t) {
233                 sequence = per_cpu_ptr(bt->sequence, cpu);
234
235                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
236                 t->sequence = ++(*sequence);
237                 t->time = ktime_to_ns(ktime_get());
238 record_it:
239                 /*
240                  * These two are not needed in ftrace as they are in the
241                  * generic trace_entry, filled by tracing_generic_entry_update,
242                  * but for the trace_event->bin() synthesizer benefit we do it
243                  * here too.
244                  */
245                 t->cpu = cpu;
246                 t->pid = pid;
247
248                 t->sector = sector;
249                 t->bytes = bytes;
250                 t->action = what;
251                 t->device = bt->dev;
252                 t->error = error;
253                 t->pdu_len = pdu_len;
254
255                 if (pdu_len)
256                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
257
258                 if (blk_tracer) {
259                         trace_buffer_unlock_commit(buffer, event, 0, pc);
260                         return;
261                 }
262         }
263
264         local_irq_restore(flags);
265 }
266
267 static struct dentry *blk_tree_root;
268 static DEFINE_MUTEX(blk_tree_mutex);
269
270 static void blk_trace_free(struct blk_trace *bt)
271 {
272         debugfs_remove(bt->msg_file);
273         debugfs_remove(bt->dropped_file);
274         relay_close(bt->rchan);
275         debugfs_remove(bt->dir);
276         free_percpu(bt->sequence);
277         free_percpu(bt->msg_data);
278         kfree(bt);
279 }
280
281 static void blk_trace_cleanup(struct blk_trace *bt)
282 {
283         blk_trace_free(bt);
284         if (atomic_dec_and_test(&blk_probes_ref))
285                 blk_unregister_tracepoints();
286 }
287
288 int blk_trace_remove(struct request_queue *q)
289 {
290         struct blk_trace *bt;
291
292         bt = xchg(&q->blk_trace, NULL);
293         if (!bt)
294                 return -EINVAL;
295
296         if (bt->trace_state != Blktrace_running)
297                 blk_trace_cleanup(bt);
298
299         return 0;
300 }
301 EXPORT_SYMBOL_GPL(blk_trace_remove);
302
303 static int blk_dropped_open(struct inode *inode, struct file *filp)
304 {
305         filp->private_data = inode->i_private;
306
307         return 0;
308 }
309
310 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
311                                 size_t count, loff_t *ppos)
312 {
313         struct blk_trace *bt = filp->private_data;
314         char buf[16];
315
316         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
317
318         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
319 }
320
321 static const struct file_operations blk_dropped_fops = {
322         .owner =        THIS_MODULE,
323         .open =         blk_dropped_open,
324         .read =         blk_dropped_read,
325 };
326
327 static int blk_msg_open(struct inode *inode, struct file *filp)
328 {
329         filp->private_data = inode->i_private;
330
331         return 0;
332 }
333
334 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
335                                 size_t count, loff_t *ppos)
336 {
337         char *msg;
338         struct blk_trace *bt;
339
340         if (count >= BLK_TN_MAX_MSG)
341                 return -EINVAL;
342
343         msg = kmalloc(count + 1, GFP_KERNEL);
344         if (msg == NULL)
345                 return -ENOMEM;
346
347         if (copy_from_user(msg, buffer, count)) {
348                 kfree(msg);
349                 return -EFAULT;
350         }
351
352         msg[count] = '\0';
353         bt = filp->private_data;
354         __trace_note_message(bt, "%s", msg);
355         kfree(msg);
356
357         return count;
358 }
359
360 static const struct file_operations blk_msg_fops = {
361         .owner =        THIS_MODULE,
362         .open =         blk_msg_open,
363         .write =        blk_msg_write,
364 };
365
366 /*
367  * Keep track of how many times we encountered a full subbuffer, to aid
368  * the user space app in telling how many lost events there were.
369  */
370 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
371                                      void *prev_subbuf, size_t prev_padding)
372 {
373         struct blk_trace *bt;
374
375         if (!relay_buf_full(buf))
376                 return 1;
377
378         bt = buf->chan->private_data;
379         atomic_inc(&bt->dropped);
380         return 0;
381 }
382
383 static int blk_remove_buf_file_callback(struct dentry *dentry)
384 {
385         debugfs_remove(dentry);
386
387         return 0;
388 }
389
390 static struct dentry *blk_create_buf_file_callback(const char *filename,
391                                                    struct dentry *parent,
392                                                    int mode,
393                                                    struct rchan_buf *buf,
394                                                    int *is_global)
395 {
396         return debugfs_create_file(filename, mode, parent, buf,
397                                         &relay_file_operations);
398 }
399
400 static struct rchan_callbacks blk_relay_callbacks = {
401         .subbuf_start           = blk_subbuf_start_callback,
402         .create_buf_file        = blk_create_buf_file_callback,
403         .remove_buf_file        = blk_remove_buf_file_callback,
404 };
405
406 static void blk_trace_setup_lba(struct blk_trace *bt,
407                                 struct block_device *bdev)
408 {
409         struct hd_struct *part = NULL;
410
411         if (bdev)
412                 part = bdev->bd_part;
413
414         if (part) {
415                 bt->start_lba = part->start_sect;
416                 bt->end_lba = part->start_sect + part->nr_sects;
417         } else {
418                 bt->start_lba = 0;
419                 bt->end_lba = -1ULL;
420         }
421 }
422
423 /*
424  * Setup everything required to start tracing
425  */
426 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
427                        struct block_device *bdev,
428                        struct blk_user_trace_setup *buts)
429 {
430         struct blk_trace *old_bt, *bt = NULL;
431         struct dentry *dir = NULL;
432         int ret, i;
433
434         if (!buts->buf_size || !buts->buf_nr)
435                 return -EINVAL;
436
437         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
438         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
439
440         /*
441          * some device names have larger paths - convert the slashes
442          * to underscores for this to work as expected
443          */
444         for (i = 0; i < strlen(buts->name); i++)
445                 if (buts->name[i] == '/')
446                         buts->name[i] = '_';
447
448         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
449         if (!bt)
450                 return -ENOMEM;
451
452         ret = -ENOMEM;
453         bt->sequence = alloc_percpu(unsigned long);
454         if (!bt->sequence)
455                 goto err;
456
457         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
458         if (!bt->msg_data)
459                 goto err;
460
461         ret = -ENOENT;
462
463         mutex_lock(&blk_tree_mutex);
464         if (!blk_tree_root) {
465                 blk_tree_root = debugfs_create_dir("block", NULL);
466                 if (!blk_tree_root) {
467                         mutex_unlock(&blk_tree_mutex);
468                         goto err;
469                 }
470         }
471         mutex_unlock(&blk_tree_mutex);
472
473         dir = debugfs_create_dir(buts->name, blk_tree_root);
474
475         if (!dir)
476                 goto err;
477
478         bt->dir = dir;
479         bt->dev = dev;
480         atomic_set(&bt->dropped, 0);
481
482         ret = -EIO;
483         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
484                                                &blk_dropped_fops);
485         if (!bt->dropped_file)
486                 goto err;
487
488         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
489         if (!bt->msg_file)
490                 goto err;
491
492         bt->rchan = relay_open("trace", dir, buts->buf_size,
493                                 buts->buf_nr, &blk_relay_callbacks, bt);
494         if (!bt->rchan)
495                 goto err;
496
497         bt->act_mask = buts->act_mask;
498         if (!bt->act_mask)
499                 bt->act_mask = (u16) -1;
500
501         blk_trace_setup_lba(bt, bdev);
502
503         /* overwrite with user settings */
504         if (buts->start_lba)
505                 bt->start_lba = buts->start_lba;
506         if (buts->end_lba)
507                 bt->end_lba = buts->end_lba;
508
509         bt->pid = buts->pid;
510         bt->trace_state = Blktrace_setup;
511
512         ret = -EBUSY;
513         old_bt = xchg(&q->blk_trace, bt);
514         if (old_bt) {
515                 (void) xchg(&q->blk_trace, old_bt);
516                 goto err;
517         }
518
519         if (atomic_inc_return(&blk_probes_ref) == 1)
520                 blk_register_tracepoints();
521
522         return 0;
523 err:
524         blk_trace_free(bt);
525         return ret;
526 }
527
528 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
529                     struct block_device *bdev,
530                     char __user *arg)
531 {
532         struct blk_user_trace_setup buts;
533         int ret;
534
535         ret = copy_from_user(&buts, arg, sizeof(buts));
536         if (ret)
537                 return -EFAULT;
538
539         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
540         if (ret)
541                 return ret;
542
543         if (copy_to_user(arg, &buts, sizeof(buts))) {
544                 blk_trace_remove(q);
545                 return -EFAULT;
546         }
547         return 0;
548 }
549 EXPORT_SYMBOL_GPL(blk_trace_setup);
550
551 int blk_trace_startstop(struct request_queue *q, int start)
552 {
553         int ret;
554         struct blk_trace *bt = q->blk_trace;
555
556         if (bt == NULL)
557                 return -EINVAL;
558
559         /*
560          * For starting a trace, we can transition from a setup or stopped
561          * trace. For stopping a trace, the state must be running
562          */
563         ret = -EINVAL;
564         if (start) {
565                 if (bt->trace_state == Blktrace_setup ||
566                     bt->trace_state == Blktrace_stopped) {
567                         blktrace_seq++;
568                         smp_mb();
569                         bt->trace_state = Blktrace_running;
570
571                         trace_note_time(bt);
572                         ret = 0;
573                 }
574         } else {
575                 if (bt->trace_state == Blktrace_running) {
576                         bt->trace_state = Blktrace_stopped;
577                         relay_flush(bt->rchan);
578                         ret = 0;
579                 }
580         }
581
582         return ret;
583 }
584 EXPORT_SYMBOL_GPL(blk_trace_startstop);
585
586 /**
587  * blk_trace_ioctl: - handle the ioctls associated with tracing
588  * @bdev:       the block device
589  * @cmd:        the ioctl cmd
590  * @arg:        the argument data, if any
591  *
592  **/
593 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
594 {
595         struct request_queue *q;
596         int ret, start = 0;
597         char b[BDEVNAME_SIZE];
598
599         q = bdev_get_queue(bdev);
600         if (!q)
601                 return -ENXIO;
602
603         mutex_lock(&bdev->bd_mutex);
604
605         switch (cmd) {
606         case BLKTRACESETUP:
607                 bdevname(bdev, b);
608                 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
609                 break;
610         case BLKTRACESTART:
611                 start = 1;
612         case BLKTRACESTOP:
613                 ret = blk_trace_startstop(q, start);
614                 break;
615         case BLKTRACETEARDOWN:
616                 ret = blk_trace_remove(q);
617                 break;
618         default:
619                 ret = -ENOTTY;
620                 break;
621         }
622
623         mutex_unlock(&bdev->bd_mutex);
624         return ret;
625 }
626
627 /**
628  * blk_trace_shutdown: - stop and cleanup trace structures
629  * @q:    the request queue associated with the device
630  *
631  **/
632 void blk_trace_shutdown(struct request_queue *q)
633 {
634         if (q->blk_trace) {
635                 blk_trace_startstop(q, 0);
636                 blk_trace_remove(q);
637         }
638 }
639
640 /*
641  * blktrace probes
642  */
643
644 /**
645  * blk_add_trace_rq - Add a trace for a request oriented action
646  * @q:          queue the io is for
647  * @rq:         the source request
648  * @what:       the action
649  *
650  * Description:
651  *     Records an action against a request. Will log the bio offset + size.
652  *
653  **/
654 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
655                                     u32 what)
656 {
657         struct blk_trace *bt = q->blk_trace;
658         int rw = rq->cmd_flags & 0x03;
659
660         if (likely(!bt))
661                 return;
662
663         if (blk_discard_rq(rq))
664                 rw |= (1 << BIO_RW_DISCARD);
665
666         if (blk_pc_request(rq)) {
667                 what |= BLK_TC_ACT(BLK_TC_PC);
668                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
669                                 what, rq->errors, rq->cmd_len, rq->cmd);
670         } else  {
671                 what |= BLK_TC_ACT(BLK_TC_FS);
672                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
673                                 what, rq->errors, 0, NULL);
674         }
675 }
676
677 static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
678 {
679         blk_add_trace_rq(q, rq, BLK_TA_ABORT);
680 }
681
682 static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
683 {
684         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
685 }
686
687 static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
688 {
689         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
690 }
691
692 static void blk_add_trace_rq_requeue(struct request_queue *q,
693                                      struct request *rq)
694 {
695         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
696 }
697
698 static void blk_add_trace_rq_complete(struct request_queue *q,
699                                       struct request *rq)
700 {
701         blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
702 }
703
704 /**
705  * blk_add_trace_bio - Add a trace for a bio oriented action
706  * @q:          queue the io is for
707  * @bio:        the source bio
708  * @what:       the action
709  *
710  * Description:
711  *     Records an action against a bio. Will log the bio offset + size.
712  *
713  **/
714 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
715                                      u32 what)
716 {
717         struct blk_trace *bt = q->blk_trace;
718
719         if (likely(!bt))
720                 return;
721
722         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
723                         !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
724 }
725
726 static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
727 {
728         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
729 }
730
731 static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
732 {
733         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
734 }
735
736 static void blk_add_trace_bio_backmerge(struct request_queue *q,
737                                         struct bio *bio)
738 {
739         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
740 }
741
742 static void blk_add_trace_bio_frontmerge(struct request_queue *q,
743                                          struct bio *bio)
744 {
745         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
746 }
747
748 static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
749 {
750         blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
751 }
752
753 static void blk_add_trace_getrq(struct request_queue *q,
754                                 struct bio *bio, int rw)
755 {
756         if (bio)
757                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
758         else {
759                 struct blk_trace *bt = q->blk_trace;
760
761                 if (bt)
762                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
763         }
764 }
765
766
767 static void blk_add_trace_sleeprq(struct request_queue *q,
768                                   struct bio *bio, int rw)
769 {
770         if (bio)
771                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
772         else {
773                 struct blk_trace *bt = q->blk_trace;
774
775                 if (bt)
776                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
777                                         0, 0, NULL);
778         }
779 }
780
781 static void blk_add_trace_plug(struct request_queue *q)
782 {
783         struct blk_trace *bt = q->blk_trace;
784
785         if (bt)
786                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
787 }
788
789 static void blk_add_trace_unplug_io(struct request_queue *q)
790 {
791         struct blk_trace *bt = q->blk_trace;
792
793         if (bt) {
794                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
795                 __be64 rpdu = cpu_to_be64(pdu);
796
797                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
798                                 sizeof(rpdu), &rpdu);
799         }
800 }
801
802 static void blk_add_trace_unplug_timer(struct request_queue *q)
803 {
804         struct blk_trace *bt = q->blk_trace;
805
806         if (bt) {
807                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
808                 __be64 rpdu = cpu_to_be64(pdu);
809
810                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
811                                 sizeof(rpdu), &rpdu);
812         }
813 }
814
815 static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
816                                 unsigned int pdu)
817 {
818         struct blk_trace *bt = q->blk_trace;
819
820         if (bt) {
821                 __be64 rpdu = cpu_to_be64(pdu);
822
823                 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
824                                 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
825                                 sizeof(rpdu), &rpdu);
826         }
827 }
828
829 /**
830  * blk_add_trace_remap - Add a trace for a remap operation
831  * @q:          queue the io is for
832  * @bio:        the source bio
833  * @dev:        target device
834  * @from:       source sector
835  *
836  * Description:
837  *     Device mapper or raid target sometimes need to split a bio because
838  *     it spans a stripe (or similar). Add a trace for that action.
839  *
840  **/
841 static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
842                                        dev_t dev, sector_t from)
843 {
844         struct blk_trace *bt = q->blk_trace;
845         struct blk_io_trace_remap r;
846
847         if (likely(!bt))
848                 return;
849
850         r.device_from = cpu_to_be32(dev);
851         r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
852         r.sector_from = cpu_to_be64(from);
853
854         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
855                         BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
856                         sizeof(r), &r);
857 }
858
859 /**
860  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
861  * @q:          queue the io is for
862  * @rq:         the source request
863  * @dev:        target device
864  * @from:       source sector
865  *
866  * Description:
867  *     Device mapper remaps request to other devices.
868  *     Add a trace for that action.
869  *
870  **/
871 static void blk_add_trace_rq_remap(struct request_queue *q,
872                                    struct request *rq, dev_t dev,
873                                    sector_t from)
874 {
875         struct blk_trace *bt = q->blk_trace;
876         struct blk_io_trace_remap r;
877
878         if (likely(!bt))
879                 return;
880
881         r.device_from = cpu_to_be32(dev);
882         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
883         r.sector_from = cpu_to_be64(from);
884
885         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
886                         rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
887                         sizeof(r), &r);
888 }
889
890 /**
891  * blk_add_driver_data - Add binary message with driver-specific data
892  * @q:          queue the io is for
893  * @rq:         io request
894  * @data:       driver-specific data
895  * @len:        length of driver-specific data
896  *
897  * Description:
898  *     Some drivers might want to write driver-specific data per request.
899  *
900  **/
901 void blk_add_driver_data(struct request_queue *q,
902                          struct request *rq,
903                          void *data, size_t len)
904 {
905         struct blk_trace *bt = q->blk_trace;
906
907         if (likely(!bt))
908                 return;
909
910         if (blk_pc_request(rq))
911                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
912                                 BLK_TA_DRV_DATA, rq->errors, len, data);
913         else
914                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
915                                 BLK_TA_DRV_DATA, rq->errors, len, data);
916 }
917 EXPORT_SYMBOL_GPL(blk_add_driver_data);
918
919 static void blk_register_tracepoints(void)
920 {
921         int ret;
922
923         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
924         WARN_ON(ret);
925         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
926         WARN_ON(ret);
927         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
928         WARN_ON(ret);
929         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
930         WARN_ON(ret);
931         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
932         WARN_ON(ret);
933         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
934         WARN_ON(ret);
935         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
936         WARN_ON(ret);
937         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
938         WARN_ON(ret);
939         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
940         WARN_ON(ret);
941         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
942         WARN_ON(ret);
943         ret = register_trace_block_getrq(blk_add_trace_getrq);
944         WARN_ON(ret);
945         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
946         WARN_ON(ret);
947         ret = register_trace_block_plug(blk_add_trace_plug);
948         WARN_ON(ret);
949         ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
950         WARN_ON(ret);
951         ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
952         WARN_ON(ret);
953         ret = register_trace_block_split(blk_add_trace_split);
954         WARN_ON(ret);
955         ret = register_trace_block_remap(blk_add_trace_remap);
956         WARN_ON(ret);
957         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
958         WARN_ON(ret);
959 }
960
961 static void blk_unregister_tracepoints(void)
962 {
963         unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
964         unregister_trace_block_remap(blk_add_trace_remap);
965         unregister_trace_block_split(blk_add_trace_split);
966         unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
967         unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
968         unregister_trace_block_plug(blk_add_trace_plug);
969         unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
970         unregister_trace_block_getrq(blk_add_trace_getrq);
971         unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
972         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
973         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
974         unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
975         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
976         unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
977         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
978         unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
979         unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
980         unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
981
982         tracepoint_synchronize_unregister();
983 }
984
985 /*
986  * struct blk_io_tracer formatting routines
987  */
988
989 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
990 {
991         int i = 0;
992         int tc = t->action >> BLK_TC_SHIFT;
993
994         if (t->action == BLK_TN_MESSAGE) {
995                 rwbs[i++] = 'N';
996                 goto out;
997         }
998
999         if (tc & BLK_TC_DISCARD)
1000                 rwbs[i++] = 'D';
1001         else if (tc & BLK_TC_WRITE)
1002                 rwbs[i++] = 'W';
1003         else if (t->bytes)
1004                 rwbs[i++] = 'R';
1005         else
1006                 rwbs[i++] = 'N';
1007
1008         if (tc & BLK_TC_AHEAD)
1009                 rwbs[i++] = 'A';
1010         if (tc & BLK_TC_BARRIER)
1011                 rwbs[i++] = 'B';
1012         if (tc & BLK_TC_SYNC)
1013                 rwbs[i++] = 'S';
1014         if (tc & BLK_TC_META)
1015                 rwbs[i++] = 'M';
1016 out:
1017         rwbs[i] = '\0';
1018 }
1019
1020 static inline
1021 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1022 {
1023         return (const struct blk_io_trace *)ent;
1024 }
1025
1026 static inline const void *pdu_start(const struct trace_entry *ent)
1027 {
1028         return te_blk_io_trace(ent) + 1;
1029 }
1030
1031 static inline u32 t_action(const struct trace_entry *ent)
1032 {
1033         return te_blk_io_trace(ent)->action;
1034 }
1035
1036 static inline u32 t_bytes(const struct trace_entry *ent)
1037 {
1038         return te_blk_io_trace(ent)->bytes;
1039 }
1040
1041 static inline u32 t_sec(const struct trace_entry *ent)
1042 {
1043         return te_blk_io_trace(ent)->bytes >> 9;
1044 }
1045
1046 static inline unsigned long long t_sector(const struct trace_entry *ent)
1047 {
1048         return te_blk_io_trace(ent)->sector;
1049 }
1050
1051 static inline __u16 t_error(const struct trace_entry *ent)
1052 {
1053         return te_blk_io_trace(ent)->error;
1054 }
1055
1056 static __u64 get_pdu_int(const struct trace_entry *ent)
1057 {
1058         const __u64 *val = pdu_start(ent);
1059         return be64_to_cpu(*val);
1060 }
1061
1062 static void get_pdu_remap(const struct trace_entry *ent,
1063                           struct blk_io_trace_remap *r)
1064 {
1065         const struct blk_io_trace_remap *__r = pdu_start(ent);
1066         __u64 sector_from = __r->sector_from;
1067
1068         r->device_from = be32_to_cpu(__r->device_from);
1069         r->device_to   = be32_to_cpu(__r->device_to);
1070         r->sector_from = be64_to_cpu(sector_from);
1071 }
1072
1073 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1074
1075 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1076 {
1077         char rwbs[6];
1078         unsigned long long ts  = iter->ts;
1079         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1080         unsigned secs          = (unsigned long)ts;
1081         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1082
1083         fill_rwbs(rwbs, t);
1084
1085         return trace_seq_printf(&iter->seq,
1086                                 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1087                                 MAJOR(t->device), MINOR(t->device), iter->cpu,
1088                                 secs, nsec_rem, iter->ent->pid, act, rwbs);
1089 }
1090
1091 static int blk_log_action(struct trace_iterator *iter, const char *act)
1092 {
1093         char rwbs[6];
1094         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1095
1096         fill_rwbs(rwbs, t);
1097         return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1098                                 MAJOR(t->device), MINOR(t->device), act, rwbs);
1099 }
1100
1101 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1102 {
1103         const unsigned char *pdu_buf;
1104         int pdu_len;
1105         int i, end, ret;
1106
1107         pdu_buf = pdu_start(ent);
1108         pdu_len = te_blk_io_trace(ent)->pdu_len;
1109
1110         if (!pdu_len)
1111                 return 1;
1112
1113         /* find the last zero that needs to be printed */
1114         for (end = pdu_len - 1; end >= 0; end--)
1115                 if (pdu_buf[end])
1116                         break;
1117         end++;
1118
1119         if (!trace_seq_putc(s, '('))
1120                 return 0;
1121
1122         for (i = 0; i < pdu_len; i++) {
1123
1124                 ret = trace_seq_printf(s, "%s%02x",
1125                                        i == 0 ? "" : " ", pdu_buf[i]);
1126                 if (!ret)
1127                         return ret;
1128
1129                 /*
1130                  * stop when the rest is just zeroes and indicate so
1131                  * with a ".." appended
1132                  */
1133                 if (i == end && end != pdu_len - 1)
1134                         return trace_seq_puts(s, " ..) ");
1135         }
1136
1137         return trace_seq_puts(s, ") ");
1138 }
1139
1140 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1141 {
1142         char cmd[TASK_COMM_LEN];
1143
1144         trace_find_cmdline(ent->pid, cmd);
1145
1146         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1147                 int ret;
1148
1149                 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1150                 if (!ret)
1151                         return 0;
1152                 ret = blk_log_dump_pdu(s, ent);
1153                 if (!ret)
1154                         return 0;
1155                 return trace_seq_printf(s, "[%s]\n", cmd);
1156         } else {
1157                 if (t_sec(ent))
1158                         return trace_seq_printf(s, "%llu + %u [%s]\n",
1159                                                 t_sector(ent), t_sec(ent), cmd);
1160                 return trace_seq_printf(s, "[%s]\n", cmd);
1161         }
1162 }
1163
1164 static int blk_log_with_error(struct trace_seq *s,
1165                               const struct trace_entry *ent)
1166 {
1167         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1168                 int ret;
1169
1170                 ret = blk_log_dump_pdu(s, ent);
1171                 if (ret)
1172                         return trace_seq_printf(s, "[%d]\n", t_error(ent));
1173                 return 0;
1174         } else {
1175                 if (t_sec(ent))
1176                         return trace_seq_printf(s, "%llu + %u [%d]\n",
1177                                                 t_sector(ent),
1178                                                 t_sec(ent), t_error(ent));
1179                 return trace_seq_printf(s, "%llu [%d]\n",
1180                                         t_sector(ent), t_error(ent));
1181         }
1182 }
1183
1184 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1185 {
1186         struct blk_io_trace_remap r = { .device_from = 0, };
1187
1188         get_pdu_remap(ent, &r);
1189         return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1190                                 t_sector(ent), t_sec(ent),
1191                                 MAJOR(r.device_from), MINOR(r.device_from),
1192                                 (unsigned long long)r.sector_from);
1193 }
1194
1195 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1196 {
1197         char cmd[TASK_COMM_LEN];
1198
1199         trace_find_cmdline(ent->pid, cmd);
1200
1201         return trace_seq_printf(s, "[%s]\n", cmd);
1202 }
1203
1204 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1205 {
1206         char cmd[TASK_COMM_LEN];
1207
1208         trace_find_cmdline(ent->pid, cmd);
1209
1210         return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1211 }
1212
1213 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1214 {
1215         char cmd[TASK_COMM_LEN];
1216
1217         trace_find_cmdline(ent->pid, cmd);
1218
1219         return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1220                                 get_pdu_int(ent), cmd);
1221 }
1222
1223 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1224 {
1225         int ret;
1226         const struct blk_io_trace *t = te_blk_io_trace(ent);
1227
1228         ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1229         if (ret)
1230                 return trace_seq_putc(s, '\n');
1231         return ret;
1232 }
1233
1234 /*
1235  * struct tracer operations
1236  */
1237
1238 static void blk_tracer_print_header(struct seq_file *m)
1239 {
1240         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1241                 return;
1242         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1243                     "#  |     |     |           |   |   |\n");
1244 }
1245
1246 static void blk_tracer_start(struct trace_array *tr)
1247 {
1248         blk_tracer_enabled = true;
1249 }
1250
1251 static int blk_tracer_init(struct trace_array *tr)
1252 {
1253         blk_tr = tr;
1254         blk_tracer_start(tr);
1255         return 0;
1256 }
1257
1258 static void blk_tracer_stop(struct trace_array *tr)
1259 {
1260         blk_tracer_enabled = false;
1261 }
1262
1263 static void blk_tracer_reset(struct trace_array *tr)
1264 {
1265         blk_tracer_stop(tr);
1266 }
1267
1268 static const struct {
1269         const char *act[2];
1270         int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
1271 } what2act[] = {
1272         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1273         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1274         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1275         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1276         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1277         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1278         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1279         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1280         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1281         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1282         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1283         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1284         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1285         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1286         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1287 };
1288
1289 static enum print_line_t print_one_line(struct trace_iterator *iter,
1290                                         bool classic)
1291 {
1292         struct trace_seq *s = &iter->seq;
1293         const struct blk_io_trace *t;
1294         u16 what;
1295         int ret;
1296         bool long_act;
1297         blk_log_action_t *log_action;
1298
1299         t          = te_blk_io_trace(iter->ent);
1300         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1301         long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
1302         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1303
1304         if (t->action == BLK_TN_MESSAGE) {
1305                 ret = log_action(iter, long_act ? "message" : "m");
1306                 if (ret)
1307                         ret = blk_log_msg(s, iter->ent);
1308                 goto out;
1309         }
1310
1311         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1312                 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1313         else {
1314                 ret = log_action(iter, what2act[what].act[long_act]);
1315                 if (ret)
1316                         ret = what2act[what].print(s, iter->ent);
1317         }
1318 out:
1319         return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1320 }
1321
1322 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1323                                                int flags)
1324 {
1325         return print_one_line(iter, false);
1326 }
1327
1328 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1329 {
1330         struct trace_seq *s = &iter->seq;
1331         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1332         const int offset = offsetof(struct blk_io_trace, sector);
1333         struct blk_io_trace old = {
1334                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1335                 .time     = iter->ts,
1336         };
1337
1338         if (!trace_seq_putmem(s, &old, offset))
1339                 return 0;
1340         return trace_seq_putmem(s, &t->sector,
1341                                 sizeof(old) - offset + t->pdu_len);
1342 }
1343
1344 static enum print_line_t
1345 blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
1346 {
1347         return blk_trace_synthesize_old_trace(iter) ?
1348                         TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1349 }
1350
1351 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1352 {
1353         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1354                 return TRACE_TYPE_UNHANDLED;
1355
1356         return print_one_line(iter, true);
1357 }
1358
1359 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1360 {
1361         /* don't output context-info for blk_classic output */
1362         if (bit == TRACE_BLK_OPT_CLASSIC) {
1363                 if (set)
1364                         trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1365                 else
1366                         trace_flags |= TRACE_ITER_CONTEXT_INFO;
1367         }
1368         return 0;
1369 }
1370
1371 static struct tracer blk_tracer __read_mostly = {
1372         .name           = "blk",
1373         .init           = blk_tracer_init,
1374         .reset          = blk_tracer_reset,
1375         .start          = blk_tracer_start,
1376         .stop           = blk_tracer_stop,
1377         .print_header   = blk_tracer_print_header,
1378         .print_line     = blk_tracer_print_line,
1379         .flags          = &blk_tracer_flags,
1380         .set_flag       = blk_tracer_set_flag,
1381 };
1382
1383 static struct trace_event trace_blk_event = {
1384         .type           = TRACE_BLK,
1385         .trace          = blk_trace_event_print,
1386         .binary         = blk_trace_event_print_binary,
1387 };
1388
1389 static int __init init_blk_tracer(void)
1390 {
1391         if (!register_ftrace_event(&trace_blk_event)) {
1392                 pr_warning("Warning: could not register block events\n");
1393                 return 1;
1394         }
1395
1396         if (register_tracer(&blk_tracer) != 0) {
1397                 pr_warning("Warning: could not register the block tracer\n");
1398                 unregister_ftrace_event(&trace_blk_event);
1399                 return 1;
1400         }
1401
1402         return 0;
1403 }
1404
1405 device_initcall(init_blk_tracer);
1406
1407 static int blk_trace_remove_queue(struct request_queue *q)
1408 {
1409         struct blk_trace *bt;
1410
1411         bt = xchg(&q->blk_trace, NULL);
1412         if (bt == NULL)
1413                 return -EINVAL;
1414
1415         if (atomic_dec_and_test(&blk_probes_ref))
1416                 blk_unregister_tracepoints();
1417
1418         blk_trace_free(bt);
1419         return 0;
1420 }
1421
1422 /*
1423  * Setup everything required to start tracing
1424  */
1425 static int blk_trace_setup_queue(struct request_queue *q,
1426                                  struct block_device *bdev)
1427 {
1428         struct blk_trace *old_bt, *bt = NULL;
1429         int ret = -ENOMEM;
1430
1431         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1432         if (!bt)
1433                 return -ENOMEM;
1434
1435         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1436         if (!bt->msg_data)
1437                 goto free_bt;
1438
1439         bt->dev = bdev->bd_dev;
1440         bt->act_mask = (u16)-1;
1441
1442         blk_trace_setup_lba(bt, bdev);
1443
1444         old_bt = xchg(&q->blk_trace, bt);
1445         if (old_bt != NULL) {
1446                 (void)xchg(&q->blk_trace, old_bt);
1447                 ret = -EBUSY;
1448                 goto free_bt;
1449         }
1450
1451         if (atomic_inc_return(&blk_probes_ref) == 1)
1452                 blk_register_tracepoints();
1453         return 0;
1454
1455 free_bt:
1456         blk_trace_free(bt);
1457         return ret;
1458 }
1459
1460 /*
1461  * sysfs interface to enable and configure tracing
1462  */
1463
1464 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1465                                          struct device_attribute *attr,
1466                                          char *buf);
1467 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1468                                           struct device_attribute *attr,
1469                                           const char *buf, size_t count);
1470 #define BLK_TRACE_DEVICE_ATTR(_name) \
1471         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1472                     sysfs_blk_trace_attr_show, \
1473                     sysfs_blk_trace_attr_store)
1474
1475 static BLK_TRACE_DEVICE_ATTR(enable);
1476 static BLK_TRACE_DEVICE_ATTR(act_mask);
1477 static BLK_TRACE_DEVICE_ATTR(pid);
1478 static BLK_TRACE_DEVICE_ATTR(start_lba);
1479 static BLK_TRACE_DEVICE_ATTR(end_lba);
1480
1481 static struct attribute *blk_trace_attrs[] = {
1482         &dev_attr_enable.attr,
1483         &dev_attr_act_mask.attr,
1484         &dev_attr_pid.attr,
1485         &dev_attr_start_lba.attr,
1486         &dev_attr_end_lba.attr,
1487         NULL
1488 };
1489
1490 struct attribute_group blk_trace_attr_group = {
1491         .name  = "trace",
1492         .attrs = blk_trace_attrs,
1493 };
1494
1495 static const struct {
1496         int mask;
1497         const char *str;
1498 } mask_maps[] = {
1499         { BLK_TC_READ,          "read"          },
1500         { BLK_TC_WRITE,         "write"         },
1501         { BLK_TC_BARRIER,       "barrier"       },
1502         { BLK_TC_SYNC,          "sync"          },
1503         { BLK_TC_QUEUE,         "queue"         },
1504         { BLK_TC_REQUEUE,       "requeue"       },
1505         { BLK_TC_ISSUE,         "issue"         },
1506         { BLK_TC_COMPLETE,      "complete"      },
1507         { BLK_TC_FS,            "fs"            },
1508         { BLK_TC_PC,            "pc"            },
1509         { BLK_TC_AHEAD,         "ahead"         },
1510         { BLK_TC_META,          "meta"          },
1511         { BLK_TC_DISCARD,       "discard"       },
1512         { BLK_TC_DRV_DATA,      "drv_data"      },
1513 };
1514
1515 static int blk_trace_str2mask(const char *str)
1516 {
1517         int i;
1518         int mask = 0;
1519         char *buf, *s, *token;
1520
1521         buf = kstrdup(str, GFP_KERNEL);
1522         if (buf == NULL)
1523                 return -ENOMEM;
1524         s = strstrip(buf);
1525
1526         while (1) {
1527                 token = strsep(&s, ",");
1528                 if (token == NULL)
1529                         break;
1530
1531                 if (*token == '\0')
1532                         continue;
1533
1534                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1535                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1536                                 mask |= mask_maps[i].mask;
1537                                 break;
1538                         }
1539                 }
1540                 if (i == ARRAY_SIZE(mask_maps)) {
1541                         mask = -EINVAL;
1542                         break;
1543                 }
1544         }
1545         kfree(buf);
1546
1547         return mask;
1548 }
1549
1550 static ssize_t blk_trace_mask2str(char *buf, int mask)
1551 {
1552         int i;
1553         char *p = buf;
1554
1555         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1556                 if (mask & mask_maps[i].mask) {
1557                         p += sprintf(p, "%s%s",
1558                                     (p == buf) ? "" : ",", mask_maps[i].str);
1559                 }
1560         }
1561         *p++ = '\n';
1562
1563         return p - buf;
1564 }
1565
1566 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1567 {
1568         if (bdev->bd_disk == NULL)
1569                 return NULL;
1570
1571         return bdev_get_queue(bdev);
1572 }
1573
1574 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1575                                          struct device_attribute *attr,
1576                                          char *buf)
1577 {
1578         struct hd_struct *p = dev_to_part(dev);
1579         struct request_queue *q;
1580         struct block_device *bdev;
1581         ssize_t ret = -ENXIO;
1582
1583         lock_kernel();
1584         bdev = bdget(part_devt(p));
1585         if (bdev == NULL)
1586                 goto out_unlock_kernel;
1587
1588         q = blk_trace_get_queue(bdev);
1589         if (q == NULL)
1590                 goto out_bdput;
1591
1592         mutex_lock(&bdev->bd_mutex);
1593
1594         if (attr == &dev_attr_enable) {
1595                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1596                 goto out_unlock_bdev;
1597         }
1598
1599         if (q->blk_trace == NULL)
1600                 ret = sprintf(buf, "disabled\n");
1601         else if (attr == &dev_attr_act_mask)
1602                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1603         else if (attr == &dev_attr_pid)
1604                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1605         else if (attr == &dev_attr_start_lba)
1606                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1607         else if (attr == &dev_attr_end_lba)
1608                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1609
1610 out_unlock_bdev:
1611         mutex_unlock(&bdev->bd_mutex);
1612 out_bdput:
1613         bdput(bdev);
1614 out_unlock_kernel:
1615         unlock_kernel();
1616         return ret;
1617 }
1618
1619 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1620                                           struct device_attribute *attr,
1621                                           const char *buf, size_t count)
1622 {
1623         struct block_device *bdev;
1624         struct request_queue *q;
1625         struct hd_struct *p;
1626         u64 value;
1627         ssize_t ret = -EINVAL;
1628
1629         if (count == 0)
1630                 goto out;
1631
1632         if (attr == &dev_attr_act_mask) {
1633                 if (sscanf(buf, "%llx", &value) != 1) {
1634                         /* Assume it is a list of trace category names */
1635                         ret = blk_trace_str2mask(buf);
1636                         if (ret < 0)
1637                                 goto out;
1638                         value = ret;
1639                 }
1640         } else if (sscanf(buf, "%llu", &value) != 1)
1641                 goto out;
1642
1643         ret = -ENXIO;
1644
1645         lock_kernel();
1646         p = dev_to_part(dev);
1647         bdev = bdget(part_devt(p));
1648         if (bdev == NULL)
1649                 goto out_unlock_kernel;
1650
1651         q = blk_trace_get_queue(bdev);
1652         if (q == NULL)
1653                 goto out_bdput;
1654
1655         mutex_lock(&bdev->bd_mutex);
1656
1657         if (attr == &dev_attr_enable) {
1658                 if (value)
1659                         ret = blk_trace_setup_queue(q, bdev);
1660                 else
1661                         ret = blk_trace_remove_queue(q);
1662                 goto out_unlock_bdev;
1663         }
1664
1665         ret = 0;
1666         if (q->blk_trace == NULL)
1667                 ret = blk_trace_setup_queue(q, bdev);
1668
1669         if (ret == 0) {
1670                 if (attr == &dev_attr_act_mask)
1671                         q->blk_trace->act_mask = value;
1672                 else if (attr == &dev_attr_pid)
1673                         q->blk_trace->pid = value;
1674                 else if (attr == &dev_attr_start_lba)
1675                         q->blk_trace->start_lba = value;
1676                 else if (attr == &dev_attr_end_lba)
1677                         q->blk_trace->end_lba = value;
1678         }
1679
1680 out_unlock_bdev:
1681         mutex_unlock(&bdev->bd_mutex);
1682 out_bdput:
1683         bdput(bdev);
1684 out_unlock_kernel:
1685         unlock_kernel();
1686 out:
1687         return ret ? ret : count;
1688 }
1689
1690 int blk_trace_init_sysfs(struct device *dev)
1691 {
1692         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1693 }
1694
1695 void blk_trace_remove_sysfs(struct device *dev)
1696 {
1697         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1698 }
1699
1700 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1701
1702 #ifdef CONFIG_EVENT_TRACING
1703
1704 void blk_dump_cmd(char *buf, struct request *rq)
1705 {
1706         int i, end;
1707         int len = rq->cmd_len;
1708         unsigned char *cmd = rq->cmd;
1709
1710         if (!blk_pc_request(rq)) {
1711                 buf[0] = '\0';
1712                 return;
1713         }
1714
1715         for (end = len - 1; end >= 0; end--)
1716                 if (cmd[end])
1717                         break;
1718         end++;
1719
1720         for (i = 0; i < len; i++) {
1721                 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1722                 if (i == end && end != len - 1) {
1723                         sprintf(buf, " ..");
1724                         break;
1725                 }
1726         }
1727 }
1728
1729 void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1730 {
1731         int i = 0;
1732
1733         if (rw & WRITE)
1734                 rwbs[i++] = 'W';
1735         else if (rw & 1 << BIO_RW_DISCARD)
1736                 rwbs[i++] = 'D';
1737         else if (bytes)
1738                 rwbs[i++] = 'R';
1739         else
1740                 rwbs[i++] = 'N';
1741
1742         if (rw & 1 << BIO_RW_AHEAD)
1743                 rwbs[i++] = 'A';
1744         if (rw & 1 << BIO_RW_BARRIER)
1745                 rwbs[i++] = 'B';
1746         if (rw & 1 << BIO_RW_SYNCIO)
1747                 rwbs[i++] = 'S';
1748         if (rw & 1 << BIO_RW_META)
1749                 rwbs[i++] = 'M';
1750
1751         rwbs[i] = '\0';
1752 }
1753
1754 void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1755 {
1756         int rw = rq->cmd_flags & 0x03;
1757         int bytes;
1758
1759         if (blk_discard_rq(rq))
1760                 rw |= (1 << BIO_RW_DISCARD);
1761
1762         bytes = blk_rq_bytes(rq);
1763
1764         blk_fill_rwbs(rwbs, rw, bytes);
1765 }
1766
1767 #endif /* CONFIG_EVENT_TRACING */
1768