Merge branch 'spi/merge' of git://git.secretlab.ca/git/linux-2.6
[pandora-kernel.git] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/time.h>
27 #include <linux/uaccess.h>
28
29 #include <trace/events/block.h>
30
31 #include "trace_output.h"
32
33 #ifdef CONFIG_BLK_DEV_IO_TRACE
34
35 static unsigned int blktrace_seq __read_mostly = 1;
36
37 static struct trace_array *blk_tr;
38 static bool blk_tracer_enabled __read_mostly;
39
40 /* Select an alternative, minimalistic output than the original one */
41 #define TRACE_BLK_OPT_CLASSIC   0x1
42
43 static struct tracer_opt blk_tracer_opts[] = {
44         /* Default disable the minimalistic output */
45         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
46         { }
47 };
48
49 static struct tracer_flags blk_tracer_flags = {
50         .val  = 0,
51         .opts = blk_tracer_opts,
52 };
53
54 /* Global reference count of probes */
55 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
56
57 static void blk_register_tracepoints(void);
58 static void blk_unregister_tracepoints(void);
59
60 /*
61  * Send out a notify message.
62  */
63 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
64                        const void *data, size_t len)
65 {
66         struct blk_io_trace *t;
67         struct ring_buffer_event *event = NULL;
68         struct ring_buffer *buffer = NULL;
69         int pc = 0;
70         int cpu = smp_processor_id();
71         bool blk_tracer = blk_tracer_enabled;
72
73         if (blk_tracer) {
74                 buffer = blk_tr->buffer;
75                 pc = preempt_count();
76                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
77                                                   sizeof(*t) + len,
78                                                   0, pc);
79                 if (!event)
80                         return;
81                 t = ring_buffer_event_data(event);
82                 goto record_it;
83         }
84
85         if (!bt->rchan)
86                 return;
87
88         t = relay_reserve(bt->rchan, sizeof(*t) + len);
89         if (t) {
90                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
91                 t->time = ktime_to_ns(ktime_get());
92 record_it:
93                 t->device = bt->dev;
94                 t->action = action;
95                 t->pid = pid;
96                 t->cpu = cpu;
97                 t->pdu_len = len;
98                 memcpy((void *) t + sizeof(*t), data, len);
99
100                 if (blk_tracer)
101                         trace_buffer_unlock_commit(buffer, event, 0, pc);
102         }
103 }
104
105 /*
106  * Send out a notify for this process, if we haven't done so since a trace
107  * started
108  */
109 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
110 {
111         tsk->btrace_seq = blktrace_seq;
112         trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
113 }
114
115 static void trace_note_time(struct blk_trace *bt)
116 {
117         struct timespec now;
118         unsigned long flags;
119         u32 words[2];
120
121         getnstimeofday(&now);
122         words[0] = now.tv_sec;
123         words[1] = now.tv_nsec;
124
125         local_irq_save(flags);
126         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
127         local_irq_restore(flags);
128 }
129
130 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
131 {
132         int n;
133         va_list args;
134         unsigned long flags;
135         char *buf;
136
137         if (unlikely(bt->trace_state != Blktrace_running &&
138                      !blk_tracer_enabled))
139                 return;
140
141         /*
142          * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
143          * message to the trace.
144          */
145         if (!(bt->act_mask & BLK_TC_NOTIFY))
146                 return;
147
148         local_irq_save(flags);
149         buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
150         va_start(args, fmt);
151         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
152         va_end(args);
153
154         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
155         local_irq_restore(flags);
156 }
157 EXPORT_SYMBOL_GPL(__trace_note_message);
158
159 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
160                          pid_t pid)
161 {
162         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
163                 return 1;
164         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
165                 return 1;
166         if (bt->pid && pid != bt->pid)
167                 return 1;
168
169         return 0;
170 }
171
172 /*
173  * Data direction bit lookup
174  */
175 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
176                                  BLK_TC_ACT(BLK_TC_WRITE) };
177
178 #define BLK_TC_RAHEAD           BLK_TC_AHEAD
179
180 /* The ilog2() calls fall out because they're constant */
181 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
182           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
183
184 /*
185  * The worker for the various blk_add_trace*() types. Fills out a
186  * blk_io_trace structure and places it in a per-cpu subbuffer.
187  */
188 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
189                      int rw, u32 what, int error, int pdu_len, void *pdu_data)
190 {
191         struct task_struct *tsk = current;
192         struct ring_buffer_event *event = NULL;
193         struct ring_buffer *buffer = NULL;
194         struct blk_io_trace *t;
195         unsigned long flags = 0;
196         unsigned long *sequence;
197         pid_t pid;
198         int cpu, pc = 0;
199         bool blk_tracer = blk_tracer_enabled;
200
201         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
202                 return;
203
204         what |= ddir_act[rw & WRITE];
205         what |= MASK_TC_BIT(rw, SYNC);
206         what |= MASK_TC_BIT(rw, RAHEAD);
207         what |= MASK_TC_BIT(rw, META);
208         what |= MASK_TC_BIT(rw, DISCARD);
209
210         pid = tsk->pid;
211         if (act_log_check(bt, what, sector, pid))
212                 return;
213         cpu = raw_smp_processor_id();
214
215         if (blk_tracer) {
216                 tracing_record_cmdline(current);
217
218                 buffer = blk_tr->buffer;
219                 pc = preempt_count();
220                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
221                                                   sizeof(*t) + pdu_len,
222                                                   0, pc);
223                 if (!event)
224                         return;
225                 t = ring_buffer_event_data(event);
226                 goto record_it;
227         }
228
229         /*
230          * A word about the locking here - we disable interrupts to reserve
231          * some space in the relay per-cpu buffer, to prevent an irq
232          * from coming in and stepping on our toes.
233          */
234         local_irq_save(flags);
235
236         if (unlikely(tsk->btrace_seq != blktrace_seq))
237                 trace_note_tsk(bt, tsk);
238
239         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
240         if (t) {
241                 sequence = per_cpu_ptr(bt->sequence, cpu);
242
243                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
244                 t->sequence = ++(*sequence);
245                 t->time = ktime_to_ns(ktime_get());
246 record_it:
247                 /*
248                  * These two are not needed in ftrace as they are in the
249                  * generic trace_entry, filled by tracing_generic_entry_update,
250                  * but for the trace_event->bin() synthesizer benefit we do it
251                  * here too.
252                  */
253                 t->cpu = cpu;
254                 t->pid = pid;
255
256                 t->sector = sector;
257                 t->bytes = bytes;
258                 t->action = what;
259                 t->device = bt->dev;
260                 t->error = error;
261                 t->pdu_len = pdu_len;
262
263                 if (pdu_len)
264                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
265
266                 if (blk_tracer) {
267                         trace_buffer_unlock_commit(buffer, event, 0, pc);
268                         return;
269                 }
270         }
271
272         local_irq_restore(flags);
273 }
274
275 static struct dentry *blk_tree_root;
276 static DEFINE_MUTEX(blk_tree_mutex);
277
278 static void blk_trace_free(struct blk_trace *bt)
279 {
280         debugfs_remove(bt->msg_file);
281         debugfs_remove(bt->dropped_file);
282         relay_close(bt->rchan);
283         debugfs_remove(bt->dir);
284         free_percpu(bt->sequence);
285         free_percpu(bt->msg_data);
286         kfree(bt);
287 }
288
289 static void blk_trace_cleanup(struct blk_trace *bt)
290 {
291         blk_trace_free(bt);
292         if (atomic_dec_and_test(&blk_probes_ref))
293                 blk_unregister_tracepoints();
294 }
295
296 int blk_trace_remove(struct request_queue *q)
297 {
298         struct blk_trace *bt;
299
300         bt = xchg(&q->blk_trace, NULL);
301         if (!bt)
302                 return -EINVAL;
303
304         if (bt->trace_state != Blktrace_running)
305                 blk_trace_cleanup(bt);
306
307         return 0;
308 }
309 EXPORT_SYMBOL_GPL(blk_trace_remove);
310
311 static int blk_dropped_open(struct inode *inode, struct file *filp)
312 {
313         filp->private_data = inode->i_private;
314
315         return 0;
316 }
317
318 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
319                                 size_t count, loff_t *ppos)
320 {
321         struct blk_trace *bt = filp->private_data;
322         char buf[16];
323
324         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
325
326         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
327 }
328
329 static const struct file_operations blk_dropped_fops = {
330         .owner =        THIS_MODULE,
331         .open =         blk_dropped_open,
332         .read =         blk_dropped_read,
333         .llseek =       default_llseek,
334 };
335
336 static int blk_msg_open(struct inode *inode, struct file *filp)
337 {
338         filp->private_data = inode->i_private;
339
340         return 0;
341 }
342
343 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
344                                 size_t count, loff_t *ppos)
345 {
346         char *msg;
347         struct blk_trace *bt;
348
349         if (count >= BLK_TN_MAX_MSG)
350                 return -EINVAL;
351
352         msg = kmalloc(count + 1, GFP_KERNEL);
353         if (msg == NULL)
354                 return -ENOMEM;
355
356         if (copy_from_user(msg, buffer, count)) {
357                 kfree(msg);
358                 return -EFAULT;
359         }
360
361         msg[count] = '\0';
362         bt = filp->private_data;
363         __trace_note_message(bt, "%s", msg);
364         kfree(msg);
365
366         return count;
367 }
368
369 static const struct file_operations blk_msg_fops = {
370         .owner =        THIS_MODULE,
371         .open =         blk_msg_open,
372         .write =        blk_msg_write,
373         .llseek =       noop_llseek,
374 };
375
376 /*
377  * Keep track of how many times we encountered a full subbuffer, to aid
378  * the user space app in telling how many lost events there were.
379  */
380 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
381                                      void *prev_subbuf, size_t prev_padding)
382 {
383         struct blk_trace *bt;
384
385         if (!relay_buf_full(buf))
386                 return 1;
387
388         bt = buf->chan->private_data;
389         atomic_inc(&bt->dropped);
390         return 0;
391 }
392
393 static int blk_remove_buf_file_callback(struct dentry *dentry)
394 {
395         debugfs_remove(dentry);
396
397         return 0;
398 }
399
400 static struct dentry *blk_create_buf_file_callback(const char *filename,
401                                                    struct dentry *parent,
402                                                    int mode,
403                                                    struct rchan_buf *buf,
404                                                    int *is_global)
405 {
406         return debugfs_create_file(filename, mode, parent, buf,
407                                         &relay_file_operations);
408 }
409
410 static struct rchan_callbacks blk_relay_callbacks = {
411         .subbuf_start           = blk_subbuf_start_callback,
412         .create_buf_file        = blk_create_buf_file_callback,
413         .remove_buf_file        = blk_remove_buf_file_callback,
414 };
415
416 static void blk_trace_setup_lba(struct blk_trace *bt,
417                                 struct block_device *bdev)
418 {
419         struct hd_struct *part = NULL;
420
421         if (bdev)
422                 part = bdev->bd_part;
423
424         if (part) {
425                 bt->start_lba = part->start_sect;
426                 bt->end_lba = part->start_sect + part->nr_sects;
427         } else {
428                 bt->start_lba = 0;
429                 bt->end_lba = -1ULL;
430         }
431 }
432
433 /*
434  * Setup everything required to start tracing
435  */
436 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
437                        struct block_device *bdev,
438                        struct blk_user_trace_setup *buts)
439 {
440         struct blk_trace *old_bt, *bt = NULL;
441         struct dentry *dir = NULL;
442         int ret, i;
443
444         if (!buts->buf_size || !buts->buf_nr)
445                 return -EINVAL;
446
447         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
448         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
449
450         /*
451          * some device names have larger paths - convert the slashes
452          * to underscores for this to work as expected
453          */
454         for (i = 0; i < strlen(buts->name); i++)
455                 if (buts->name[i] == '/')
456                         buts->name[i] = '_';
457
458         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
459         if (!bt)
460                 return -ENOMEM;
461
462         ret = -ENOMEM;
463         bt->sequence = alloc_percpu(unsigned long);
464         if (!bt->sequence)
465                 goto err;
466
467         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
468         if (!bt->msg_data)
469                 goto err;
470
471         ret = -ENOENT;
472
473         mutex_lock(&blk_tree_mutex);
474         if (!blk_tree_root) {
475                 blk_tree_root = debugfs_create_dir("block", NULL);
476                 if (!blk_tree_root) {
477                         mutex_unlock(&blk_tree_mutex);
478                         goto err;
479                 }
480         }
481         mutex_unlock(&blk_tree_mutex);
482
483         dir = debugfs_create_dir(buts->name, blk_tree_root);
484
485         if (!dir)
486                 goto err;
487
488         bt->dir = dir;
489         bt->dev = dev;
490         atomic_set(&bt->dropped, 0);
491
492         ret = -EIO;
493         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
494                                                &blk_dropped_fops);
495         if (!bt->dropped_file)
496                 goto err;
497
498         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
499         if (!bt->msg_file)
500                 goto err;
501
502         bt->rchan = relay_open("trace", dir, buts->buf_size,
503                                 buts->buf_nr, &blk_relay_callbacks, bt);
504         if (!bt->rchan)
505                 goto err;
506
507         bt->act_mask = buts->act_mask;
508         if (!bt->act_mask)
509                 bt->act_mask = (u16) -1;
510
511         blk_trace_setup_lba(bt, bdev);
512
513         /* overwrite with user settings */
514         if (buts->start_lba)
515                 bt->start_lba = buts->start_lba;
516         if (buts->end_lba)
517                 bt->end_lba = buts->end_lba;
518
519         bt->pid = buts->pid;
520         bt->trace_state = Blktrace_setup;
521
522         ret = -EBUSY;
523         old_bt = xchg(&q->blk_trace, bt);
524         if (old_bt) {
525                 (void) xchg(&q->blk_trace, old_bt);
526                 goto err;
527         }
528
529         if (atomic_inc_return(&blk_probes_ref) == 1)
530                 blk_register_tracepoints();
531
532         return 0;
533 err:
534         blk_trace_free(bt);
535         return ret;
536 }
537
538 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
539                     struct block_device *bdev,
540                     char __user *arg)
541 {
542         struct blk_user_trace_setup buts;
543         int ret;
544
545         ret = copy_from_user(&buts, arg, sizeof(buts));
546         if (ret)
547                 return -EFAULT;
548
549         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
550         if (ret)
551                 return ret;
552
553         if (copy_to_user(arg, &buts, sizeof(buts))) {
554                 blk_trace_remove(q);
555                 return -EFAULT;
556         }
557         return 0;
558 }
559 EXPORT_SYMBOL_GPL(blk_trace_setup);
560
561 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
562 static int compat_blk_trace_setup(struct request_queue *q, char *name,
563                                   dev_t dev, struct block_device *bdev,
564                                   char __user *arg)
565 {
566         struct blk_user_trace_setup buts;
567         struct compat_blk_user_trace_setup cbuts;
568         int ret;
569
570         if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
571                 return -EFAULT;
572
573         buts = (struct blk_user_trace_setup) {
574                 .act_mask = cbuts.act_mask,
575                 .buf_size = cbuts.buf_size,
576                 .buf_nr = cbuts.buf_nr,
577                 .start_lba = cbuts.start_lba,
578                 .end_lba = cbuts.end_lba,
579                 .pid = cbuts.pid,
580         };
581         memcpy(&buts.name, &cbuts.name, 32);
582
583         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
584         if (ret)
585                 return ret;
586
587         if (copy_to_user(arg, &buts.name, 32)) {
588                 blk_trace_remove(q);
589                 return -EFAULT;
590         }
591
592         return 0;
593 }
594 #endif
595
596 int blk_trace_startstop(struct request_queue *q, int start)
597 {
598         int ret;
599         struct blk_trace *bt = q->blk_trace;
600
601         if (bt == NULL)
602                 return -EINVAL;
603
604         /*
605          * For starting a trace, we can transition from a setup or stopped
606          * trace. For stopping a trace, the state must be running
607          */
608         ret = -EINVAL;
609         if (start) {
610                 if (bt->trace_state == Blktrace_setup ||
611                     bt->trace_state == Blktrace_stopped) {
612                         blktrace_seq++;
613                         smp_mb();
614                         bt->trace_state = Blktrace_running;
615
616                         trace_note_time(bt);
617                         ret = 0;
618                 }
619         } else {
620                 if (bt->trace_state == Blktrace_running) {
621                         bt->trace_state = Blktrace_stopped;
622                         relay_flush(bt->rchan);
623                         ret = 0;
624                 }
625         }
626
627         return ret;
628 }
629 EXPORT_SYMBOL_GPL(blk_trace_startstop);
630
631 /**
632  * blk_trace_ioctl: - handle the ioctls associated with tracing
633  * @bdev:       the block device
634  * @cmd:        the ioctl cmd
635  * @arg:        the argument data, if any
636  *
637  **/
638 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
639 {
640         struct request_queue *q;
641         int ret, start = 0;
642         char b[BDEVNAME_SIZE];
643
644         q = bdev_get_queue(bdev);
645         if (!q)
646                 return -ENXIO;
647
648         mutex_lock(&bdev->bd_mutex);
649
650         switch (cmd) {
651         case BLKTRACESETUP:
652                 bdevname(bdev, b);
653                 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
654                 break;
655 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
656         case BLKTRACESETUP32:
657                 bdevname(bdev, b);
658                 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
659                 break;
660 #endif
661         case BLKTRACESTART:
662                 start = 1;
663         case BLKTRACESTOP:
664                 ret = blk_trace_startstop(q, start);
665                 break;
666         case BLKTRACETEARDOWN:
667                 ret = blk_trace_remove(q);
668                 break;
669         default:
670                 ret = -ENOTTY;
671                 break;
672         }
673
674         mutex_unlock(&bdev->bd_mutex);
675         return ret;
676 }
677
678 /**
679  * blk_trace_shutdown: - stop and cleanup trace structures
680  * @q:    the request queue associated with the device
681  *
682  **/
683 void blk_trace_shutdown(struct request_queue *q)
684 {
685         if (q->blk_trace) {
686                 blk_trace_startstop(q, 0);
687                 blk_trace_remove(q);
688         }
689 }
690
691 /*
692  * blktrace probes
693  */
694
695 /**
696  * blk_add_trace_rq - Add a trace for a request oriented action
697  * @q:          queue the io is for
698  * @rq:         the source request
699  * @what:       the action
700  *
701  * Description:
702  *     Records an action against a request. Will log the bio offset + size.
703  *
704  **/
705 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
706                                     u32 what)
707 {
708         struct blk_trace *bt = q->blk_trace;
709         int rw = rq->cmd_flags & 0x03;
710
711         if (likely(!bt))
712                 return;
713
714         if (rq->cmd_flags & REQ_DISCARD)
715                 rw |= REQ_DISCARD;
716
717         if (rq->cmd_flags & REQ_SECURE)
718                 rw |= REQ_SECURE;
719
720         if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
721                 what |= BLK_TC_ACT(BLK_TC_PC);
722                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
723                                 what, rq->errors, rq->cmd_len, rq->cmd);
724         } else  {
725                 what |= BLK_TC_ACT(BLK_TC_FS);
726                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
727                                 what, rq->errors, 0, NULL);
728         }
729 }
730
731 static void blk_add_trace_rq_abort(void *ignore,
732                                    struct request_queue *q, struct request *rq)
733 {
734         blk_add_trace_rq(q, rq, BLK_TA_ABORT);
735 }
736
737 static void blk_add_trace_rq_insert(void *ignore,
738                                     struct request_queue *q, struct request *rq)
739 {
740         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
741 }
742
743 static void blk_add_trace_rq_issue(void *ignore,
744                                    struct request_queue *q, struct request *rq)
745 {
746         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
747 }
748
749 static void blk_add_trace_rq_requeue(void *ignore,
750                                      struct request_queue *q,
751                                      struct request *rq)
752 {
753         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
754 }
755
756 static void blk_add_trace_rq_complete(void *ignore,
757                                       struct request_queue *q,
758                                       struct request *rq)
759 {
760         blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
761 }
762
763 /**
764  * blk_add_trace_bio - Add a trace for a bio oriented action
765  * @q:          queue the io is for
766  * @bio:        the source bio
767  * @what:       the action
768  * @error:      error, if any
769  *
770  * Description:
771  *     Records an action against a bio. Will log the bio offset + size.
772  *
773  **/
774 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
775                               u32 what, int error)
776 {
777         struct blk_trace *bt = q->blk_trace;
778
779         if (likely(!bt))
780                 return;
781
782         if (!error && !bio_flagged(bio, BIO_UPTODATE))
783                 error = EIO;
784
785         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
786                         error, 0, NULL);
787 }
788
789 static void blk_add_trace_bio_bounce(void *ignore,
790                                      struct request_queue *q, struct bio *bio)
791 {
792         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
793 }
794
795 static void blk_add_trace_bio_complete(void *ignore,
796                                        struct request_queue *q, struct bio *bio,
797                                        int error)
798 {
799         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
800 }
801
802 static void blk_add_trace_bio_backmerge(void *ignore,
803                                         struct request_queue *q,
804                                         struct bio *bio)
805 {
806         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
807 }
808
809 static void blk_add_trace_bio_frontmerge(void *ignore,
810                                          struct request_queue *q,
811                                          struct bio *bio)
812 {
813         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
814 }
815
816 static void blk_add_trace_bio_queue(void *ignore,
817                                     struct request_queue *q, struct bio *bio)
818 {
819         blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
820 }
821
822 static void blk_add_trace_getrq(void *ignore,
823                                 struct request_queue *q,
824                                 struct bio *bio, int rw)
825 {
826         if (bio)
827                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
828         else {
829                 struct blk_trace *bt = q->blk_trace;
830
831                 if (bt)
832                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
833         }
834 }
835
836
837 static void blk_add_trace_sleeprq(void *ignore,
838                                   struct request_queue *q,
839                                   struct bio *bio, int rw)
840 {
841         if (bio)
842                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
843         else {
844                 struct blk_trace *bt = q->blk_trace;
845
846                 if (bt)
847                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
848                                         0, 0, NULL);
849         }
850 }
851
852 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
853 {
854         struct blk_trace *bt = q->blk_trace;
855
856         if (bt)
857                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
858 }
859
860 static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
861 {
862         struct blk_trace *bt = q->blk_trace;
863
864         if (bt) {
865                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
866                 __be64 rpdu = cpu_to_be64(pdu);
867
868                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
869                                 sizeof(rpdu), &rpdu);
870         }
871 }
872
873 static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
874 {
875         struct blk_trace *bt = q->blk_trace;
876
877         if (bt) {
878                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
879                 __be64 rpdu = cpu_to_be64(pdu);
880
881                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
882                                 sizeof(rpdu), &rpdu);
883         }
884 }
885
886 static void blk_add_trace_split(void *ignore,
887                                 struct request_queue *q, struct bio *bio,
888                                 unsigned int pdu)
889 {
890         struct blk_trace *bt = q->blk_trace;
891
892         if (bt) {
893                 __be64 rpdu = cpu_to_be64(pdu);
894
895                 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
896                                 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
897                                 sizeof(rpdu), &rpdu);
898         }
899 }
900
901 /**
902  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
903  * @ignore:     trace callback data parameter (not used)
904  * @q:          queue the io is for
905  * @bio:        the source bio
906  * @dev:        target device
907  * @from:       source sector
908  *
909  * Description:
910  *     Device mapper or raid target sometimes need to split a bio because
911  *     it spans a stripe (or similar). Add a trace for that action.
912  *
913  **/
914 static void blk_add_trace_bio_remap(void *ignore,
915                                     struct request_queue *q, struct bio *bio,
916                                     dev_t dev, sector_t from)
917 {
918         struct blk_trace *bt = q->blk_trace;
919         struct blk_io_trace_remap r;
920
921         if (likely(!bt))
922                 return;
923
924         r.device_from = cpu_to_be32(dev);
925         r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
926         r.sector_from = cpu_to_be64(from);
927
928         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
929                         BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
930                         sizeof(r), &r);
931 }
932
933 /**
934  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
935  * @ignore:     trace callback data parameter (not used)
936  * @q:          queue the io is for
937  * @rq:         the source request
938  * @dev:        target device
939  * @from:       source sector
940  *
941  * Description:
942  *     Device mapper remaps request to other devices.
943  *     Add a trace for that action.
944  *
945  **/
946 static void blk_add_trace_rq_remap(void *ignore,
947                                    struct request_queue *q,
948                                    struct request *rq, dev_t dev,
949                                    sector_t from)
950 {
951         struct blk_trace *bt = q->blk_trace;
952         struct blk_io_trace_remap r;
953
954         if (likely(!bt))
955                 return;
956
957         r.device_from = cpu_to_be32(dev);
958         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
959         r.sector_from = cpu_to_be64(from);
960
961         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
962                         rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
963                         sizeof(r), &r);
964 }
965
966 /**
967  * blk_add_driver_data - Add binary message with driver-specific data
968  * @q:          queue the io is for
969  * @rq:         io request
970  * @data:       driver-specific data
971  * @len:        length of driver-specific data
972  *
973  * Description:
974  *     Some drivers might want to write driver-specific data per request.
975  *
976  **/
977 void blk_add_driver_data(struct request_queue *q,
978                          struct request *rq,
979                          void *data, size_t len)
980 {
981         struct blk_trace *bt = q->blk_trace;
982
983         if (likely(!bt))
984                 return;
985
986         if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
987                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
988                                 BLK_TA_DRV_DATA, rq->errors, len, data);
989         else
990                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
991                                 BLK_TA_DRV_DATA, rq->errors, len, data);
992 }
993 EXPORT_SYMBOL_GPL(blk_add_driver_data);
994
995 static void blk_register_tracepoints(void)
996 {
997         int ret;
998
999         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
1000         WARN_ON(ret);
1001         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1002         WARN_ON(ret);
1003         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1004         WARN_ON(ret);
1005         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1006         WARN_ON(ret);
1007         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1008         WARN_ON(ret);
1009         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1010         WARN_ON(ret);
1011         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1012         WARN_ON(ret);
1013         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1014         WARN_ON(ret);
1015         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1016         WARN_ON(ret);
1017         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1018         WARN_ON(ret);
1019         ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1020         WARN_ON(ret);
1021         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1022         WARN_ON(ret);
1023         ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1024         WARN_ON(ret);
1025         ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1026         WARN_ON(ret);
1027         ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1028         WARN_ON(ret);
1029         ret = register_trace_block_split(blk_add_trace_split, NULL);
1030         WARN_ON(ret);
1031         ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1032         WARN_ON(ret);
1033         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1034         WARN_ON(ret);
1035 }
1036
1037 static void blk_unregister_tracepoints(void)
1038 {
1039         unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1040         unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1041         unregister_trace_block_split(blk_add_trace_split, NULL);
1042         unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1043         unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1044         unregister_trace_block_plug(blk_add_trace_plug, NULL);
1045         unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1046         unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1047         unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1048         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1049         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1050         unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1051         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1052         unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1053         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1054         unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1055         unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1056         unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
1057
1058         tracepoint_synchronize_unregister();
1059 }
1060
1061 /*
1062  * struct blk_io_tracer formatting routines
1063  */
1064
1065 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1066 {
1067         int i = 0;
1068         int tc = t->action >> BLK_TC_SHIFT;
1069
1070         if (t->action == BLK_TN_MESSAGE) {
1071                 rwbs[i++] = 'N';
1072                 goto out;
1073         }
1074
1075         if (tc & BLK_TC_DISCARD)
1076                 rwbs[i++] = 'D';
1077         else if (tc & BLK_TC_WRITE)
1078                 rwbs[i++] = 'W';
1079         else if (t->bytes)
1080                 rwbs[i++] = 'R';
1081         else
1082                 rwbs[i++] = 'N';
1083
1084         if (tc & BLK_TC_AHEAD)
1085                 rwbs[i++] = 'A';
1086         if (tc & BLK_TC_BARRIER)
1087                 rwbs[i++] = 'B';
1088         if (tc & BLK_TC_SYNC)
1089                 rwbs[i++] = 'S';
1090         if (tc & BLK_TC_META)
1091                 rwbs[i++] = 'M';
1092 out:
1093         rwbs[i] = '\0';
1094 }
1095
1096 static inline
1097 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1098 {
1099         return (const struct blk_io_trace *)ent;
1100 }
1101
1102 static inline const void *pdu_start(const struct trace_entry *ent)
1103 {
1104         return te_blk_io_trace(ent) + 1;
1105 }
1106
1107 static inline u32 t_action(const struct trace_entry *ent)
1108 {
1109         return te_blk_io_trace(ent)->action;
1110 }
1111
1112 static inline u32 t_bytes(const struct trace_entry *ent)
1113 {
1114         return te_blk_io_trace(ent)->bytes;
1115 }
1116
1117 static inline u32 t_sec(const struct trace_entry *ent)
1118 {
1119         return te_blk_io_trace(ent)->bytes >> 9;
1120 }
1121
1122 static inline unsigned long long t_sector(const struct trace_entry *ent)
1123 {
1124         return te_blk_io_trace(ent)->sector;
1125 }
1126
1127 static inline __u16 t_error(const struct trace_entry *ent)
1128 {
1129         return te_blk_io_trace(ent)->error;
1130 }
1131
1132 static __u64 get_pdu_int(const struct trace_entry *ent)
1133 {
1134         const __u64 *val = pdu_start(ent);
1135         return be64_to_cpu(*val);
1136 }
1137
1138 static void get_pdu_remap(const struct trace_entry *ent,
1139                           struct blk_io_trace_remap *r)
1140 {
1141         const struct blk_io_trace_remap *__r = pdu_start(ent);
1142         __u64 sector_from = __r->sector_from;
1143
1144         r->device_from = be32_to_cpu(__r->device_from);
1145         r->device_to   = be32_to_cpu(__r->device_to);
1146         r->sector_from = be64_to_cpu(sector_from);
1147 }
1148
1149 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1150
1151 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1152 {
1153         char rwbs[6];
1154         unsigned long long ts  = iter->ts;
1155         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1156         unsigned secs          = (unsigned long)ts;
1157         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1158
1159         fill_rwbs(rwbs, t);
1160
1161         return trace_seq_printf(&iter->seq,
1162                                 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1163                                 MAJOR(t->device), MINOR(t->device), iter->cpu,
1164                                 secs, nsec_rem, iter->ent->pid, act, rwbs);
1165 }
1166
1167 static int blk_log_action(struct trace_iterator *iter, const char *act)
1168 {
1169         char rwbs[6];
1170         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1171
1172         fill_rwbs(rwbs, t);
1173         return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1174                                 MAJOR(t->device), MINOR(t->device), act, rwbs);
1175 }
1176
1177 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1178 {
1179         const unsigned char *pdu_buf;
1180         int pdu_len;
1181         int i, end, ret;
1182
1183         pdu_buf = pdu_start(ent);
1184         pdu_len = te_blk_io_trace(ent)->pdu_len;
1185
1186         if (!pdu_len)
1187                 return 1;
1188
1189         /* find the last zero that needs to be printed */
1190         for (end = pdu_len - 1; end >= 0; end--)
1191                 if (pdu_buf[end])
1192                         break;
1193         end++;
1194
1195         if (!trace_seq_putc(s, '('))
1196                 return 0;
1197
1198         for (i = 0; i < pdu_len; i++) {
1199
1200                 ret = trace_seq_printf(s, "%s%02x",
1201                                        i == 0 ? "" : " ", pdu_buf[i]);
1202                 if (!ret)
1203                         return ret;
1204
1205                 /*
1206                  * stop when the rest is just zeroes and indicate so
1207                  * with a ".." appended
1208                  */
1209                 if (i == end && end != pdu_len - 1)
1210                         return trace_seq_puts(s, " ..) ");
1211         }
1212
1213         return trace_seq_puts(s, ") ");
1214 }
1215
1216 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1217 {
1218         char cmd[TASK_COMM_LEN];
1219
1220         trace_find_cmdline(ent->pid, cmd);
1221
1222         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1223                 int ret;
1224
1225                 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1226                 if (!ret)
1227                         return 0;
1228                 ret = blk_log_dump_pdu(s, ent);
1229                 if (!ret)
1230                         return 0;
1231                 return trace_seq_printf(s, "[%s]\n", cmd);
1232         } else {
1233                 if (t_sec(ent))
1234                         return trace_seq_printf(s, "%llu + %u [%s]\n",
1235                                                 t_sector(ent), t_sec(ent), cmd);
1236                 return trace_seq_printf(s, "[%s]\n", cmd);
1237         }
1238 }
1239
1240 static int blk_log_with_error(struct trace_seq *s,
1241                               const struct trace_entry *ent)
1242 {
1243         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1244                 int ret;
1245
1246                 ret = blk_log_dump_pdu(s, ent);
1247                 if (ret)
1248                         return trace_seq_printf(s, "[%d]\n", t_error(ent));
1249                 return 0;
1250         } else {
1251                 if (t_sec(ent))
1252                         return trace_seq_printf(s, "%llu + %u [%d]\n",
1253                                                 t_sector(ent),
1254                                                 t_sec(ent), t_error(ent));
1255                 return trace_seq_printf(s, "%llu [%d]\n",
1256                                         t_sector(ent), t_error(ent));
1257         }
1258 }
1259
1260 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1261 {
1262         struct blk_io_trace_remap r = { .device_from = 0, };
1263
1264         get_pdu_remap(ent, &r);
1265         return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1266                                 t_sector(ent), t_sec(ent),
1267                                 MAJOR(r.device_from), MINOR(r.device_from),
1268                                 (unsigned long long)r.sector_from);
1269 }
1270
1271 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1272 {
1273         char cmd[TASK_COMM_LEN];
1274
1275         trace_find_cmdline(ent->pid, cmd);
1276
1277         return trace_seq_printf(s, "[%s]\n", cmd);
1278 }
1279
1280 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1281 {
1282         char cmd[TASK_COMM_LEN];
1283
1284         trace_find_cmdline(ent->pid, cmd);
1285
1286         return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1287 }
1288
1289 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1290 {
1291         char cmd[TASK_COMM_LEN];
1292
1293         trace_find_cmdline(ent->pid, cmd);
1294
1295         return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1296                                 get_pdu_int(ent), cmd);
1297 }
1298
1299 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1300 {
1301         int ret;
1302         const struct blk_io_trace *t = te_blk_io_trace(ent);
1303
1304         ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1305         if (ret)
1306                 return trace_seq_putc(s, '\n');
1307         return ret;
1308 }
1309
1310 /*
1311  * struct tracer operations
1312  */
1313
1314 static void blk_tracer_print_header(struct seq_file *m)
1315 {
1316         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1317                 return;
1318         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1319                     "#  |     |     |           |   |   |\n");
1320 }
1321
1322 static void blk_tracer_start(struct trace_array *tr)
1323 {
1324         blk_tracer_enabled = true;
1325 }
1326
1327 static int blk_tracer_init(struct trace_array *tr)
1328 {
1329         blk_tr = tr;
1330         blk_tracer_start(tr);
1331         return 0;
1332 }
1333
1334 static void blk_tracer_stop(struct trace_array *tr)
1335 {
1336         blk_tracer_enabled = false;
1337 }
1338
1339 static void blk_tracer_reset(struct trace_array *tr)
1340 {
1341         blk_tracer_stop(tr);
1342 }
1343
1344 static const struct {
1345         const char *act[2];
1346         int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
1347 } what2act[] = {
1348         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1349         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1350         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1351         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1352         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1353         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1354         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1355         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1356         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1357         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1358         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1359         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1360         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1361         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1362         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1363 };
1364
1365 static enum print_line_t print_one_line(struct trace_iterator *iter,
1366                                         bool classic)
1367 {
1368         struct trace_seq *s = &iter->seq;
1369         const struct blk_io_trace *t;
1370         u16 what;
1371         int ret;
1372         bool long_act;
1373         blk_log_action_t *log_action;
1374
1375         t          = te_blk_io_trace(iter->ent);
1376         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1377         long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
1378         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1379
1380         if (t->action == BLK_TN_MESSAGE) {
1381                 ret = log_action(iter, long_act ? "message" : "m");
1382                 if (ret)
1383                         ret = blk_log_msg(s, iter->ent);
1384                 goto out;
1385         }
1386
1387         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1388                 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1389         else {
1390                 ret = log_action(iter, what2act[what].act[long_act]);
1391                 if (ret)
1392                         ret = what2act[what].print(s, iter->ent);
1393         }
1394 out:
1395         return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1396 }
1397
1398 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1399                                                int flags, struct trace_event *event)
1400 {
1401         return print_one_line(iter, false);
1402 }
1403
1404 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1405 {
1406         struct trace_seq *s = &iter->seq;
1407         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1408         const int offset = offsetof(struct blk_io_trace, sector);
1409         struct blk_io_trace old = {
1410                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1411                 .time     = iter->ts,
1412         };
1413
1414         if (!trace_seq_putmem(s, &old, offset))
1415                 return 0;
1416         return trace_seq_putmem(s, &t->sector,
1417                                 sizeof(old) - offset + t->pdu_len);
1418 }
1419
1420 static enum print_line_t
1421 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1422                              struct trace_event *event)
1423 {
1424         return blk_trace_synthesize_old_trace(iter) ?
1425                         TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1426 }
1427
1428 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1429 {
1430         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1431                 return TRACE_TYPE_UNHANDLED;
1432
1433         return print_one_line(iter, true);
1434 }
1435
1436 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1437 {
1438         /* don't output context-info for blk_classic output */
1439         if (bit == TRACE_BLK_OPT_CLASSIC) {
1440                 if (set)
1441                         trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1442                 else
1443                         trace_flags |= TRACE_ITER_CONTEXT_INFO;
1444         }
1445         return 0;
1446 }
1447
1448 static struct tracer blk_tracer __read_mostly = {
1449         .name           = "blk",
1450         .init           = blk_tracer_init,
1451         .reset          = blk_tracer_reset,
1452         .start          = blk_tracer_start,
1453         .stop           = blk_tracer_stop,
1454         .print_header   = blk_tracer_print_header,
1455         .print_line     = blk_tracer_print_line,
1456         .flags          = &blk_tracer_flags,
1457         .set_flag       = blk_tracer_set_flag,
1458 };
1459
1460 static struct trace_event_functions trace_blk_event_funcs = {
1461         .trace          = blk_trace_event_print,
1462         .binary         = blk_trace_event_print_binary,
1463 };
1464
1465 static struct trace_event trace_blk_event = {
1466         .type           = TRACE_BLK,
1467         .funcs          = &trace_blk_event_funcs,
1468 };
1469
1470 static int __init init_blk_tracer(void)
1471 {
1472         if (!register_ftrace_event(&trace_blk_event)) {
1473                 pr_warning("Warning: could not register block events\n");
1474                 return 1;
1475         }
1476
1477         if (register_tracer(&blk_tracer) != 0) {
1478                 pr_warning("Warning: could not register the block tracer\n");
1479                 unregister_ftrace_event(&trace_blk_event);
1480                 return 1;
1481         }
1482
1483         return 0;
1484 }
1485
1486 device_initcall(init_blk_tracer);
1487
1488 static int blk_trace_remove_queue(struct request_queue *q)
1489 {
1490         struct blk_trace *bt;
1491
1492         bt = xchg(&q->blk_trace, NULL);
1493         if (bt == NULL)
1494                 return -EINVAL;
1495
1496         if (atomic_dec_and_test(&blk_probes_ref))
1497                 blk_unregister_tracepoints();
1498
1499         blk_trace_free(bt);
1500         return 0;
1501 }
1502
1503 /*
1504  * Setup everything required to start tracing
1505  */
1506 static int blk_trace_setup_queue(struct request_queue *q,
1507                                  struct block_device *bdev)
1508 {
1509         struct blk_trace *old_bt, *bt = NULL;
1510         int ret = -ENOMEM;
1511
1512         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1513         if (!bt)
1514                 return -ENOMEM;
1515
1516         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1517         if (!bt->msg_data)
1518                 goto free_bt;
1519
1520         bt->dev = bdev->bd_dev;
1521         bt->act_mask = (u16)-1;
1522
1523         blk_trace_setup_lba(bt, bdev);
1524
1525         old_bt = xchg(&q->blk_trace, bt);
1526         if (old_bt != NULL) {
1527                 (void)xchg(&q->blk_trace, old_bt);
1528                 ret = -EBUSY;
1529                 goto free_bt;
1530         }
1531
1532         if (atomic_inc_return(&blk_probes_ref) == 1)
1533                 blk_register_tracepoints();
1534         return 0;
1535
1536 free_bt:
1537         blk_trace_free(bt);
1538         return ret;
1539 }
1540
1541 /*
1542  * sysfs interface to enable and configure tracing
1543  */
1544
1545 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1546                                          struct device_attribute *attr,
1547                                          char *buf);
1548 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1549                                           struct device_attribute *attr,
1550                                           const char *buf, size_t count);
1551 #define BLK_TRACE_DEVICE_ATTR(_name) \
1552         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1553                     sysfs_blk_trace_attr_show, \
1554                     sysfs_blk_trace_attr_store)
1555
1556 static BLK_TRACE_DEVICE_ATTR(enable);
1557 static BLK_TRACE_DEVICE_ATTR(act_mask);
1558 static BLK_TRACE_DEVICE_ATTR(pid);
1559 static BLK_TRACE_DEVICE_ATTR(start_lba);
1560 static BLK_TRACE_DEVICE_ATTR(end_lba);
1561
1562 static struct attribute *blk_trace_attrs[] = {
1563         &dev_attr_enable.attr,
1564         &dev_attr_act_mask.attr,
1565         &dev_attr_pid.attr,
1566         &dev_attr_start_lba.attr,
1567         &dev_attr_end_lba.attr,
1568         NULL
1569 };
1570
1571 struct attribute_group blk_trace_attr_group = {
1572         .name  = "trace",
1573         .attrs = blk_trace_attrs,
1574 };
1575
1576 static const struct {
1577         int mask;
1578         const char *str;
1579 } mask_maps[] = {
1580         { BLK_TC_READ,          "read"          },
1581         { BLK_TC_WRITE,         "write"         },
1582         { BLK_TC_BARRIER,       "barrier"       },
1583         { BLK_TC_SYNC,          "sync"          },
1584         { BLK_TC_QUEUE,         "queue"         },
1585         { BLK_TC_REQUEUE,       "requeue"       },
1586         { BLK_TC_ISSUE,         "issue"         },
1587         { BLK_TC_COMPLETE,      "complete"      },
1588         { BLK_TC_FS,            "fs"            },
1589         { BLK_TC_PC,            "pc"            },
1590         { BLK_TC_AHEAD,         "ahead"         },
1591         { BLK_TC_META,          "meta"          },
1592         { BLK_TC_DISCARD,       "discard"       },
1593         { BLK_TC_DRV_DATA,      "drv_data"      },
1594 };
1595
1596 static int blk_trace_str2mask(const char *str)
1597 {
1598         int i;
1599         int mask = 0;
1600         char *buf, *s, *token;
1601
1602         buf = kstrdup(str, GFP_KERNEL);
1603         if (buf == NULL)
1604                 return -ENOMEM;
1605         s = strstrip(buf);
1606
1607         while (1) {
1608                 token = strsep(&s, ",");
1609                 if (token == NULL)
1610                         break;
1611
1612                 if (*token == '\0')
1613                         continue;
1614
1615                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1616                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1617                                 mask |= mask_maps[i].mask;
1618                                 break;
1619                         }
1620                 }
1621                 if (i == ARRAY_SIZE(mask_maps)) {
1622                         mask = -EINVAL;
1623                         break;
1624                 }
1625         }
1626         kfree(buf);
1627
1628         return mask;
1629 }
1630
1631 static ssize_t blk_trace_mask2str(char *buf, int mask)
1632 {
1633         int i;
1634         char *p = buf;
1635
1636         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1637                 if (mask & mask_maps[i].mask) {
1638                         p += sprintf(p, "%s%s",
1639                                     (p == buf) ? "" : ",", mask_maps[i].str);
1640                 }
1641         }
1642         *p++ = '\n';
1643
1644         return p - buf;
1645 }
1646
1647 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1648 {
1649         if (bdev->bd_disk == NULL)
1650                 return NULL;
1651
1652         return bdev_get_queue(bdev);
1653 }
1654
1655 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1656                                          struct device_attribute *attr,
1657                                          char *buf)
1658 {
1659         struct hd_struct *p = dev_to_part(dev);
1660         struct request_queue *q;
1661         struct block_device *bdev;
1662         ssize_t ret = -ENXIO;
1663
1664         bdev = bdget(part_devt(p));
1665         if (bdev == NULL)
1666                 goto out;
1667
1668         q = blk_trace_get_queue(bdev);
1669         if (q == NULL)
1670                 goto out_bdput;
1671
1672         mutex_lock(&bdev->bd_mutex);
1673
1674         if (attr == &dev_attr_enable) {
1675                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1676                 goto out_unlock_bdev;
1677         }
1678
1679         if (q->blk_trace == NULL)
1680                 ret = sprintf(buf, "disabled\n");
1681         else if (attr == &dev_attr_act_mask)
1682                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1683         else if (attr == &dev_attr_pid)
1684                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1685         else if (attr == &dev_attr_start_lba)
1686                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1687         else if (attr == &dev_attr_end_lba)
1688                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1689
1690 out_unlock_bdev:
1691         mutex_unlock(&bdev->bd_mutex);
1692 out_bdput:
1693         bdput(bdev);
1694 out:
1695         return ret;
1696 }
1697
1698 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1699                                           struct device_attribute *attr,
1700                                           const char *buf, size_t count)
1701 {
1702         struct block_device *bdev;
1703         struct request_queue *q;
1704         struct hd_struct *p;
1705         u64 value;
1706         ssize_t ret = -EINVAL;
1707
1708         if (count == 0)
1709                 goto out;
1710
1711         if (attr == &dev_attr_act_mask) {
1712                 if (sscanf(buf, "%llx", &value) != 1) {
1713                         /* Assume it is a list of trace category names */
1714                         ret = blk_trace_str2mask(buf);
1715                         if (ret < 0)
1716                                 goto out;
1717                         value = ret;
1718                 }
1719         } else if (sscanf(buf, "%llu", &value) != 1)
1720                 goto out;
1721
1722         ret = -ENXIO;
1723
1724         p = dev_to_part(dev);
1725         bdev = bdget(part_devt(p));
1726         if (bdev == NULL)
1727                 goto out;
1728
1729         q = blk_trace_get_queue(bdev);
1730         if (q == NULL)
1731                 goto out_bdput;
1732
1733         mutex_lock(&bdev->bd_mutex);
1734
1735         if (attr == &dev_attr_enable) {
1736                 if (value)
1737                         ret = blk_trace_setup_queue(q, bdev);
1738                 else
1739                         ret = blk_trace_remove_queue(q);
1740                 goto out_unlock_bdev;
1741         }
1742
1743         ret = 0;
1744         if (q->blk_trace == NULL)
1745                 ret = blk_trace_setup_queue(q, bdev);
1746
1747         if (ret == 0) {
1748                 if (attr == &dev_attr_act_mask)
1749                         q->blk_trace->act_mask = value;
1750                 else if (attr == &dev_attr_pid)
1751                         q->blk_trace->pid = value;
1752                 else if (attr == &dev_attr_start_lba)
1753                         q->blk_trace->start_lba = value;
1754                 else if (attr == &dev_attr_end_lba)
1755                         q->blk_trace->end_lba = value;
1756         }
1757
1758 out_unlock_bdev:
1759         mutex_unlock(&bdev->bd_mutex);
1760 out_bdput:
1761         bdput(bdev);
1762 out:
1763         return ret ? ret : count;
1764 }
1765
1766 int blk_trace_init_sysfs(struct device *dev)
1767 {
1768         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1769 }
1770
1771 void blk_trace_remove_sysfs(struct device *dev)
1772 {
1773         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1774 }
1775
1776 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1777
1778 #ifdef CONFIG_EVENT_TRACING
1779
1780 void blk_dump_cmd(char *buf, struct request *rq)
1781 {
1782         int i, end;
1783         int len = rq->cmd_len;
1784         unsigned char *cmd = rq->cmd;
1785
1786         if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
1787                 buf[0] = '\0';
1788                 return;
1789         }
1790
1791         for (end = len - 1; end >= 0; end--)
1792                 if (cmd[end])
1793                         break;
1794         end++;
1795
1796         for (i = 0; i < len; i++) {
1797                 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1798                 if (i == end && end != len - 1) {
1799                         sprintf(buf, " ..");
1800                         break;
1801                 }
1802         }
1803 }
1804
1805 void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1806 {
1807         int i = 0;
1808
1809         if (rw & WRITE)
1810                 rwbs[i++] = 'W';
1811         else if (rw & REQ_DISCARD)
1812                 rwbs[i++] = 'D';
1813         else if (bytes)
1814                 rwbs[i++] = 'R';
1815         else
1816                 rwbs[i++] = 'N';
1817
1818         if (rw & REQ_RAHEAD)
1819                 rwbs[i++] = 'A';
1820         if (rw & REQ_SYNC)
1821                 rwbs[i++] = 'S';
1822         if (rw & REQ_META)
1823                 rwbs[i++] = 'M';
1824         if (rw & REQ_SECURE)
1825                 rwbs[i++] = 'E';
1826
1827         rwbs[i] = '\0';
1828 }
1829
1830 void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1831 {
1832         int rw = rq->cmd_flags & 0x03;
1833         int bytes;
1834
1835         if (rq->cmd_flags & REQ_DISCARD)
1836                 rw |= REQ_DISCARD;
1837
1838         if (rq->cmd_flags & REQ_SECURE)
1839                 rw |= REQ_SECURE;
1840
1841         bytes = blk_rq_bytes(rq);
1842
1843         blk_fill_rwbs(rwbs, rw, bytes);
1844 }
1845
1846 #endif /* CONFIG_EVENT_TRACING */
1847