Merge branch 'stable/swiotlb-0.9' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / include / linux / perf_event.h
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22  * User-space ABI bits:
23  */
24
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29         PERF_TYPE_HARDWARE                      = 0,
30         PERF_TYPE_SOFTWARE                      = 1,
31         PERF_TYPE_TRACEPOINT                    = 2,
32         PERF_TYPE_HW_CACHE                      = 3,
33         PERF_TYPE_RAW                           = 4,
34         PERF_TYPE_BREAKPOINT                    = 5,
35
36         PERF_TYPE_MAX,                          /* non-ABI */
37 };
38
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45         /*
46          * Common hardware events, generalized by the kernel:
47          */
48         PERF_COUNT_HW_CPU_CYCLES                = 0,
49         PERF_COUNT_HW_INSTRUCTIONS              = 1,
50         PERF_COUNT_HW_CACHE_REFERENCES          = 2,
51         PERF_COUNT_HW_CACHE_MISSES              = 3,
52         PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
53         PERF_COUNT_HW_BRANCH_MISSES             = 5,
54         PERF_COUNT_HW_BUS_CYCLES                = 6,
55
56         PERF_COUNT_HW_MAX,                      /* non-ABI */
57 };
58
59 /*
60  * Generalized hardware cache events:
61  *
62  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
63  *       { read, write, prefetch } x
64  *       { accesses, misses }
65  */
66 enum perf_hw_cache_id {
67         PERF_COUNT_HW_CACHE_L1D                 = 0,
68         PERF_COUNT_HW_CACHE_L1I                 = 1,
69         PERF_COUNT_HW_CACHE_LL                  = 2,
70         PERF_COUNT_HW_CACHE_DTLB                = 3,
71         PERF_COUNT_HW_CACHE_ITLB                = 4,
72         PERF_COUNT_HW_CACHE_BPU                 = 5,
73
74         PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
75 };
76
77 enum perf_hw_cache_op_id {
78         PERF_COUNT_HW_CACHE_OP_READ             = 0,
79         PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
80         PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
81
82         PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
83 };
84
85 enum perf_hw_cache_op_result_id {
86         PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
87         PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
88
89         PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
90 };
91
92 /*
93  * Special "software" events provided by the kernel, even if the hardware
94  * does not support performance events. These events measure various
95  * physical and sw events of the kernel (and allow the profiling of them as
96  * well):
97  */
98 enum perf_sw_ids {
99         PERF_COUNT_SW_CPU_CLOCK                 = 0,
100         PERF_COUNT_SW_TASK_CLOCK                = 1,
101         PERF_COUNT_SW_PAGE_FAULTS               = 2,
102         PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
103         PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
104         PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
105         PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
106         PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
107         PERF_COUNT_SW_EMULATION_FAULTS          = 8,
108
109         PERF_COUNT_SW_MAX,                      /* non-ABI */
110 };
111
112 /*
113  * Bits that can be set in attr.sample_type to request information
114  * in the overflow packets.
115  */
116 enum perf_event_sample_format {
117         PERF_SAMPLE_IP                          = 1U << 0,
118         PERF_SAMPLE_TID                         = 1U << 1,
119         PERF_SAMPLE_TIME                        = 1U << 2,
120         PERF_SAMPLE_ADDR                        = 1U << 3,
121         PERF_SAMPLE_READ                        = 1U << 4,
122         PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
123         PERF_SAMPLE_ID                          = 1U << 6,
124         PERF_SAMPLE_CPU                         = 1U << 7,
125         PERF_SAMPLE_PERIOD                      = 1U << 8,
126         PERF_SAMPLE_STREAM_ID                   = 1U << 9,
127         PERF_SAMPLE_RAW                         = 1U << 10,
128
129         PERF_SAMPLE_MAX = 1U << 11,             /* non-ABI */
130 };
131
132 /*
133  * The format of the data returned by read() on a perf event fd,
134  * as specified by attr.read_format:
135  *
136  * struct read_format {
137  *      { u64           value;
138  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
139  *        { u64         time_running; } && PERF_FORMAT_RUNNING
140  *        { u64         id;           } && PERF_FORMAT_ID
141  *      } && !PERF_FORMAT_GROUP
142  *
143  *      { u64           nr;
144  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
145  *        { u64         time_running; } && PERF_FORMAT_RUNNING
146  *        { u64         value;
147  *          { u64       id;           } && PERF_FORMAT_ID
148  *        }             cntr[nr];
149  *      } && PERF_FORMAT_GROUP
150  * };
151  */
152 enum perf_event_read_format {
153         PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
154         PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
155         PERF_FORMAT_ID                          = 1U << 2,
156         PERF_FORMAT_GROUP                       = 1U << 3,
157
158         PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
159 };
160
161 #define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
162
163 /*
164  * Hardware event_id to monitor via a performance monitoring event:
165  */
166 struct perf_event_attr {
167
168         /*
169          * Major type: hardware/software/tracepoint/etc.
170          */
171         __u32                   type;
172
173         /*
174          * Size of the attr structure, for fwd/bwd compat.
175          */
176         __u32                   size;
177
178         /*
179          * Type specific configuration information.
180          */
181         __u64                   config;
182
183         union {
184                 __u64           sample_period;
185                 __u64           sample_freq;
186         };
187
188         __u64                   sample_type;
189         __u64                   read_format;
190
191         __u64                   disabled       :  1, /* off by default        */
192                                 inherit        :  1, /* children inherit it   */
193                                 pinned         :  1, /* must always be on PMU */
194                                 exclusive      :  1, /* only group on PMU     */
195                                 exclude_user   :  1, /* don't count user      */
196                                 exclude_kernel :  1, /* ditto kernel          */
197                                 exclude_hv     :  1, /* ditto hypervisor      */
198                                 exclude_idle   :  1, /* don't count when idle */
199                                 mmap           :  1, /* include mmap data     */
200                                 comm           :  1, /* include comm data     */
201                                 freq           :  1, /* use freq, not period  */
202                                 inherit_stat   :  1, /* per task counts       */
203                                 enable_on_exec :  1, /* next exec enables     */
204                                 task           :  1, /* trace fork/exit       */
205                                 watermark      :  1, /* wakeup_watermark      */
206                                 /*
207                                  * precise_ip:
208                                  *
209                                  *  0 - SAMPLE_IP can have arbitrary skid
210                                  *  1 - SAMPLE_IP must have constant skid
211                                  *  2 - SAMPLE_IP requested to have 0 skid
212                                  *  3 - SAMPLE_IP must have 0 skid
213                                  *
214                                  *  See also PERF_RECORD_MISC_EXACT_IP
215                                  */
216                                 precise_ip     :  2, /* skid constraint       */
217                                 mmap_data      :  1, /* non-exec mmap data    */
218
219                                 __reserved_1   : 46;
220
221         union {
222                 __u32           wakeup_events;    /* wakeup every n events */
223                 __u32           wakeup_watermark; /* bytes before wakeup   */
224         };
225
226         __u32                   bp_type;
227         __u64                   bp_addr;
228         __u64                   bp_len;
229 };
230
231 /*
232  * Ioctls that can be done on a perf event fd:
233  */
234 #define PERF_EVENT_IOC_ENABLE           _IO ('$', 0)
235 #define PERF_EVENT_IOC_DISABLE          _IO ('$', 1)
236 #define PERF_EVENT_IOC_REFRESH          _IO ('$', 2)
237 #define PERF_EVENT_IOC_RESET            _IO ('$', 3)
238 #define PERF_EVENT_IOC_PERIOD           _IOW('$', 4, __u64)
239 #define PERF_EVENT_IOC_SET_OUTPUT       _IO ('$', 5)
240 #define PERF_EVENT_IOC_SET_FILTER       _IOW('$', 6, char *)
241
242 enum perf_event_ioc_flags {
243         PERF_IOC_FLAG_GROUP             = 1U << 0,
244 };
245
246 /*
247  * Structure of the page that can be mapped via mmap
248  */
249 struct perf_event_mmap_page {
250         __u32   version;                /* version number of this structure */
251         __u32   compat_version;         /* lowest version this is compat with */
252
253         /*
254          * Bits needed to read the hw events in user-space.
255          *
256          *   u32 seq;
257          *   s64 count;
258          *
259          *   do {
260          *     seq = pc->lock;
261          *
262          *     barrier()
263          *     if (pc->index) {
264          *       count = pmc_read(pc->index - 1);
265          *       count += pc->offset;
266          *     } else
267          *       goto regular_read;
268          *
269          *     barrier();
270          *   } while (pc->lock != seq);
271          *
272          * NOTE: for obvious reason this only works on self-monitoring
273          *       processes.
274          */
275         __u32   lock;                   /* seqlock for synchronization */
276         __u32   index;                  /* hardware event identifier */
277         __s64   offset;                 /* add to hardware event value */
278         __u64   time_enabled;           /* time event active */
279         __u64   time_running;           /* time event on cpu */
280
281                 /*
282                  * Hole for extension of the self monitor capabilities
283                  */
284
285         __u64   __reserved[123];        /* align to 1k */
286
287         /*
288          * Control data for the mmap() data buffer.
289          *
290          * User-space reading the @data_head value should issue an rmb(), on
291          * SMP capable platforms, after reading this value -- see
292          * perf_event_wakeup().
293          *
294          * When the mapping is PROT_WRITE the @data_tail value should be
295          * written by userspace to reflect the last read data. In this case
296          * the kernel will not over-write unread data.
297          */
298         __u64   data_head;              /* head in the data section */
299         __u64   data_tail;              /* user-space written tail */
300 };
301
302 #define PERF_RECORD_MISC_CPUMODE_MASK           (7 << 0)
303 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN        (0 << 0)
304 #define PERF_RECORD_MISC_KERNEL                 (1 << 0)
305 #define PERF_RECORD_MISC_USER                   (2 << 0)
306 #define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
307 #define PERF_RECORD_MISC_GUEST_KERNEL           (4 << 0)
308 #define PERF_RECORD_MISC_GUEST_USER             (5 << 0)
309
310 /*
311  * Indicates that the content of PERF_SAMPLE_IP points to
312  * the actual instruction that triggered the event. See also
313  * perf_event_attr::precise_ip.
314  */
315 #define PERF_RECORD_MISC_EXACT_IP               (1 << 14)
316 /*
317  * Reserve the last bit to indicate some extended misc field
318  */
319 #define PERF_RECORD_MISC_EXT_RESERVED           (1 << 15)
320
321 struct perf_event_header {
322         __u32   type;
323         __u16   misc;
324         __u16   size;
325 };
326
327 enum perf_event_type {
328
329         /*
330          * The MMAP events record the PROT_EXEC mappings so that we can
331          * correlate userspace IPs to code. They have the following structure:
332          *
333          * struct {
334          *      struct perf_event_header        header;
335          *
336          *      u32                             pid, tid;
337          *      u64                             addr;
338          *      u64                             len;
339          *      u64                             pgoff;
340          *      char                            filename[];
341          * };
342          */
343         PERF_RECORD_MMAP                        = 1,
344
345         /*
346          * struct {
347          *      struct perf_event_header        header;
348          *      u64                             id;
349          *      u64                             lost;
350          * };
351          */
352         PERF_RECORD_LOST                        = 2,
353
354         /*
355          * struct {
356          *      struct perf_event_header        header;
357          *
358          *      u32                             pid, tid;
359          *      char                            comm[];
360          * };
361          */
362         PERF_RECORD_COMM                        = 3,
363
364         /*
365          * struct {
366          *      struct perf_event_header        header;
367          *      u32                             pid, ppid;
368          *      u32                             tid, ptid;
369          *      u64                             time;
370          * };
371          */
372         PERF_RECORD_EXIT                        = 4,
373
374         /*
375          * struct {
376          *      struct perf_event_header        header;
377          *      u64                             time;
378          *      u64                             id;
379          *      u64                             stream_id;
380          * };
381          */
382         PERF_RECORD_THROTTLE                    = 5,
383         PERF_RECORD_UNTHROTTLE                  = 6,
384
385         /*
386          * struct {
387          *      struct perf_event_header        header;
388          *      u32                             pid, ppid;
389          *      u32                             tid, ptid;
390          *      u64                             time;
391          * };
392          */
393         PERF_RECORD_FORK                        = 7,
394
395         /*
396          * struct {
397          *      struct perf_event_header        header;
398          *      u32                             pid, tid;
399          *
400          *      struct read_format              values;
401          * };
402          */
403         PERF_RECORD_READ                        = 8,
404
405         /*
406          * struct {
407          *      struct perf_event_header        header;
408          *
409          *      { u64                   ip;       } && PERF_SAMPLE_IP
410          *      { u32                   pid, tid; } && PERF_SAMPLE_TID
411          *      { u64                   time;     } && PERF_SAMPLE_TIME
412          *      { u64                   addr;     } && PERF_SAMPLE_ADDR
413          *      { u64                   id;       } && PERF_SAMPLE_ID
414          *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
415          *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
416          *      { u64                   period;   } && PERF_SAMPLE_PERIOD
417          *
418          *      { struct read_format    values;   } && PERF_SAMPLE_READ
419          *
420          *      { u64                   nr,
421          *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
422          *
423          *      #
424          *      # The RAW record below is opaque data wrt the ABI
425          *      #
426          *      # That is, the ABI doesn't make any promises wrt to
427          *      # the stability of its content, it may vary depending
428          *      # on event, hardware, kernel version and phase of
429          *      # the moon.
430          *      #
431          *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
432          *      #
433          *
434          *      { u32                   size;
435          *        char                  data[size];}&& PERF_SAMPLE_RAW
436          * };
437          */
438         PERF_RECORD_SAMPLE                      = 9,
439
440         PERF_RECORD_MAX,                        /* non-ABI */
441 };
442
443 enum perf_callchain_context {
444         PERF_CONTEXT_HV                 = (__u64)-32,
445         PERF_CONTEXT_KERNEL             = (__u64)-128,
446         PERF_CONTEXT_USER               = (__u64)-512,
447
448         PERF_CONTEXT_GUEST              = (__u64)-2048,
449         PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
450         PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
451
452         PERF_CONTEXT_MAX                = (__u64)-4095,
453 };
454
455 #define PERF_FLAG_FD_NO_GROUP   (1U << 0)
456 #define PERF_FLAG_FD_OUTPUT     (1U << 1)
457
458 #ifdef __KERNEL__
459 /*
460  * Kernel-internal data types and definitions:
461  */
462
463 #ifdef CONFIG_PERF_EVENTS
464 # include <asm/perf_event.h>
465 # include <asm/local64.h>
466 #endif
467
468 struct perf_guest_info_callbacks {
469         int (*is_in_guest) (void);
470         int (*is_user_mode) (void);
471         unsigned long (*get_guest_ip) (void);
472 };
473
474 #ifdef CONFIG_HAVE_HW_BREAKPOINT
475 #include <asm/hw_breakpoint.h>
476 #endif
477
478 #include <linux/list.h>
479 #include <linux/mutex.h>
480 #include <linux/rculist.h>
481 #include <linux/rcupdate.h>
482 #include <linux/spinlock.h>
483 #include <linux/hrtimer.h>
484 #include <linux/fs.h>
485 #include <linux/pid_namespace.h>
486 #include <linux/workqueue.h>
487 #include <linux/ftrace.h>
488 #include <linux/cpu.h>
489 #include <linux/irq_work.h>
490 #include <linux/jump_label_ref.h>
491 #include <asm/atomic.h>
492 #include <asm/local.h>
493
494 #define PERF_MAX_STACK_DEPTH            255
495
496 struct perf_callchain_entry {
497         __u64                           nr;
498         __u64                           ip[PERF_MAX_STACK_DEPTH];
499 };
500
501 struct perf_raw_record {
502         u32                             size;
503         void                            *data;
504 };
505
506 struct perf_branch_entry {
507         __u64                           from;
508         __u64                           to;
509         __u64                           flags;
510 };
511
512 struct perf_branch_stack {
513         __u64                           nr;
514         struct perf_branch_entry        entries[0];
515 };
516
517 struct task_struct;
518
519 /**
520  * struct hw_perf_event - performance event hardware details:
521  */
522 struct hw_perf_event {
523 #ifdef CONFIG_PERF_EVENTS
524         union {
525                 struct { /* hardware */
526                         u64             config;
527                         u64             last_tag;
528                         unsigned long   config_base;
529                         unsigned long   event_base;
530                         int             idx;
531                         int             last_cpu;
532                 };
533                 struct { /* software */
534                         struct hrtimer  hrtimer;
535                 };
536 #ifdef CONFIG_HAVE_HW_BREAKPOINT
537                 struct { /* breakpoint */
538                         struct arch_hw_breakpoint       info;
539                         struct list_head                bp_list;
540                         /*
541                          * Crufty hack to avoid the chicken and egg
542                          * problem hw_breakpoint has with context
543                          * creation and event initalization.
544                          */
545                         struct task_struct              *bp_target;
546                 };
547 #endif
548         };
549         int                             state;
550         local64_t                       prev_count;
551         u64                             sample_period;
552         u64                             last_period;
553         local64_t                       period_left;
554         u64                             interrupts;
555
556         u64                             freq_time_stamp;
557         u64                             freq_count_stamp;
558 #endif
559 };
560
561 /*
562  * hw_perf_event::state flags
563  */
564 #define PERF_HES_STOPPED        0x01 /* the counter is stopped */
565 #define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
566 #define PERF_HES_ARCH           0x04
567
568 struct perf_event;
569
570 /*
571  * Common implementation detail of pmu::{start,commit,cancel}_txn
572  */
573 #define PERF_EVENT_TXN 0x1
574
575 /**
576  * struct pmu - generic performance monitoring unit
577  */
578 struct pmu {
579         struct list_head                entry;
580
581         int * __percpu                  pmu_disable_count;
582         struct perf_cpu_context * __percpu pmu_cpu_context;
583         int                             task_ctx_nr;
584
585         /*
586          * Fully disable/enable this PMU, can be used to protect from the PMI
587          * as well as for lazy/batch writing of the MSRs.
588          */
589         void (*pmu_enable)              (struct pmu *pmu); /* optional */
590         void (*pmu_disable)             (struct pmu *pmu); /* optional */
591
592         /*
593          * Try and initialize the event for this PMU.
594          * Should return -ENOENT when the @event doesn't match this PMU.
595          */
596         int (*event_init)               (struct perf_event *event);
597
598 #define PERF_EF_START   0x01            /* start the counter when adding    */
599 #define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
600 #define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
601
602         /*
603          * Adds/Removes a counter to/from the PMU, can be done inside
604          * a transaction, see the ->*_txn() methods.
605          */
606         int  (*add)                     (struct perf_event *event, int flags);
607         void (*del)                     (struct perf_event *event, int flags);
608
609         /*
610          * Starts/Stops a counter present on the PMU. The PMI handler
611          * should stop the counter when perf_event_overflow() returns
612          * !0. ->start() will be used to continue.
613          */
614         void (*start)                   (struct perf_event *event, int flags);
615         void (*stop)                    (struct perf_event *event, int flags);
616
617         /*
618          * Updates the counter value of the event.
619          */
620         void (*read)                    (struct perf_event *event);
621
622         /*
623          * Group events scheduling is treated as a transaction, add
624          * group events as a whole and perform one schedulability test.
625          * If the test fails, roll back the whole group
626          *
627          * Start the transaction, after this ->add() doesn't need to
628          * do schedulability tests.
629          */
630         void (*start_txn)       (struct pmu *pmu); /* optional */
631         /*
632          * If ->start_txn() disabled the ->add() schedulability test
633          * then ->commit_txn() is required to perform one. On success
634          * the transaction is closed. On error the transaction is kept
635          * open until ->cancel_txn() is called.
636          */
637         int  (*commit_txn)      (struct pmu *pmu); /* optional */
638         /*
639          * Will cancel the transaction, assumes ->del() is called
640          * for each successfull ->add() during the transaction.
641          */
642         void (*cancel_txn)      (struct pmu *pmu); /* optional */
643 };
644
645 /**
646  * enum perf_event_active_state - the states of a event
647  */
648 enum perf_event_active_state {
649         PERF_EVENT_STATE_ERROR          = -2,
650         PERF_EVENT_STATE_OFF            = -1,
651         PERF_EVENT_STATE_INACTIVE       =  0,
652         PERF_EVENT_STATE_ACTIVE         =  1,
653 };
654
655 struct file;
656
657 #define PERF_BUFFER_WRITABLE            0x01
658
659 struct perf_buffer {
660         atomic_t                        refcount;
661         struct rcu_head                 rcu_head;
662 #ifdef CONFIG_PERF_USE_VMALLOC
663         struct work_struct              work;
664         int                             page_order;     /* allocation order  */
665 #endif
666         int                             nr_pages;       /* nr of data pages  */
667         int                             writable;       /* are we writable   */
668
669         atomic_t                        poll;           /* POLL_ for wakeups */
670
671         local_t                         head;           /* write position    */
672         local_t                         nest;           /* nested writers    */
673         local_t                         events;         /* event limit       */
674         local_t                         wakeup;         /* wakeup stamp      */
675         local_t                         lost;           /* nr records lost   */
676
677         long                            watermark;      /* wakeup watermark  */
678
679         struct perf_event_mmap_page     *user_page;
680         void                            *data_pages[0];
681 };
682
683 struct perf_sample_data;
684
685 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
686                                         struct perf_sample_data *,
687                                         struct pt_regs *regs);
688
689 enum perf_group_flag {
690         PERF_GROUP_SOFTWARE = 0x1,
691 };
692
693 #define SWEVENT_HLIST_BITS      8
694 #define SWEVENT_HLIST_SIZE      (1 << SWEVENT_HLIST_BITS)
695
696 struct swevent_hlist {
697         struct hlist_head       heads[SWEVENT_HLIST_SIZE];
698         struct rcu_head         rcu_head;
699 };
700
701 #define PERF_ATTACH_CONTEXT     0x01
702 #define PERF_ATTACH_GROUP       0x02
703 #define PERF_ATTACH_TASK        0x04
704
705 /**
706  * struct perf_event - performance event kernel representation:
707  */
708 struct perf_event {
709 #ifdef CONFIG_PERF_EVENTS
710         struct list_head                group_entry;
711         struct list_head                event_entry;
712         struct list_head                sibling_list;
713         struct hlist_node               hlist_entry;
714         int                             nr_siblings;
715         int                             group_flags;
716         struct perf_event               *group_leader;
717         struct pmu                      *pmu;
718
719         enum perf_event_active_state    state;
720         unsigned int                    attach_state;
721         local64_t                       count;
722         atomic64_t                      child_count;
723
724         /*
725          * These are the total time in nanoseconds that the event
726          * has been enabled (i.e. eligible to run, and the task has
727          * been scheduled in, if this is a per-task event)
728          * and running (scheduled onto the CPU), respectively.
729          *
730          * They are computed from tstamp_enabled, tstamp_running and
731          * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
732          */
733         u64                             total_time_enabled;
734         u64                             total_time_running;
735
736         /*
737          * These are timestamps used for computing total_time_enabled
738          * and total_time_running when the event is in INACTIVE or
739          * ACTIVE state, measured in nanoseconds from an arbitrary point
740          * in time.
741          * tstamp_enabled: the notional time when the event was enabled
742          * tstamp_running: the notional time when the event was scheduled on
743          * tstamp_stopped: in INACTIVE state, the notional time when the
744          *      event was scheduled off.
745          */
746         u64                             tstamp_enabled;
747         u64                             tstamp_running;
748         u64                             tstamp_stopped;
749
750         struct perf_event_attr          attr;
751         struct hw_perf_event            hw;
752
753         struct perf_event_context       *ctx;
754         struct file                     *filp;
755
756         /*
757          * These accumulate total time (in nanoseconds) that children
758          * events have been enabled and running, respectively.
759          */
760         atomic64_t                      child_total_time_enabled;
761         atomic64_t                      child_total_time_running;
762
763         /*
764          * Protect attach/detach and child_list:
765          */
766         struct mutex                    child_mutex;
767         struct list_head                child_list;
768         struct perf_event               *parent;
769
770         int                             oncpu;
771         int                             cpu;
772
773         struct list_head                owner_entry;
774         struct task_struct              *owner;
775
776         /* mmap bits */
777         struct mutex                    mmap_mutex;
778         atomic_t                        mmap_count;
779         int                             mmap_locked;
780         struct user_struct              *mmap_user;
781         struct perf_buffer              *buffer;
782
783         /* poll related */
784         wait_queue_head_t               waitq;
785         struct fasync_struct            *fasync;
786
787         /* delayed work for NMIs and such */
788         int                             pending_wakeup;
789         int                             pending_kill;
790         int                             pending_disable;
791         struct irq_work                 pending;
792
793         atomic_t                        event_limit;
794
795         void (*destroy)(struct perf_event *);
796         struct rcu_head                 rcu_head;
797
798         struct pid_namespace            *ns;
799         u64                             id;
800
801         perf_overflow_handler_t         overflow_handler;
802
803 #ifdef CONFIG_EVENT_TRACING
804         struct ftrace_event_call        *tp_event;
805         struct event_filter             *filter;
806 #endif
807
808 #endif /* CONFIG_PERF_EVENTS */
809 };
810
811 enum perf_event_context_type {
812         task_context,
813         cpu_context,
814 };
815
816 /**
817  * struct perf_event_context - event context structure
818  *
819  * Used as a container for task events and CPU events as well:
820  */
821 struct perf_event_context {
822         enum perf_event_context_type    type;
823         struct pmu                      *pmu;
824         /*
825          * Protect the states of the events in the list,
826          * nr_active, and the list:
827          */
828         raw_spinlock_t                  lock;
829         /*
830          * Protect the list of events.  Locking either mutex or lock
831          * is sufficient to ensure the list doesn't change; to change
832          * the list you need to lock both the mutex and the spinlock.
833          */
834         struct mutex                    mutex;
835
836         struct list_head                pinned_groups;
837         struct list_head                flexible_groups;
838         struct list_head                event_list;
839         int                             nr_events;
840         int                             nr_active;
841         int                             is_active;
842         int                             nr_stat;
843         atomic_t                        refcount;
844         struct task_struct              *task;
845
846         /*
847          * Context clock, runs when context enabled.
848          */
849         u64                             time;
850         u64                             timestamp;
851
852         /*
853          * These fields let us detect when two contexts have both
854          * been cloned (inherited) from a common ancestor.
855          */
856         struct perf_event_context       *parent_ctx;
857         u64                             parent_gen;
858         u64                             generation;
859         int                             pin_count;
860         struct rcu_head                 rcu_head;
861 };
862
863 /*
864  * Number of contexts where an event can trigger:
865  *      task, softirq, hardirq, nmi.
866  */
867 #define PERF_NR_CONTEXTS        4
868
869 /**
870  * struct perf_event_cpu_context - per cpu event context structure
871  */
872 struct perf_cpu_context {
873         struct perf_event_context       ctx;
874         struct perf_event_context       *task_ctx;
875         int                             active_oncpu;
876         int                             exclusive;
877         struct list_head                rotation_list;
878         int                             jiffies_interval;
879 };
880
881 struct perf_output_handle {
882         struct perf_event               *event;
883         struct perf_buffer              *buffer;
884         unsigned long                   wakeup;
885         unsigned long                   size;
886         void                            *addr;
887         int                             page;
888         int                             nmi;
889         int                             sample;
890 };
891
892 #ifdef CONFIG_PERF_EVENTS
893
894 extern int perf_pmu_register(struct pmu *pmu);
895 extern void perf_pmu_unregister(struct pmu *pmu);
896
897 extern int perf_num_counters(void);
898 extern const char *perf_pmu_name(void);
899 extern void __perf_event_task_sched_in(struct task_struct *task);
900 extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
901
902 extern atomic_t perf_task_events;
903
904 static inline void perf_event_task_sched_in(struct task_struct *task)
905 {
906         COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
907 }
908
909 static inline
910 void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
911 {
912         COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
913 }
914
915 extern int perf_event_init_task(struct task_struct *child);
916 extern void perf_event_exit_task(struct task_struct *child);
917 extern void perf_event_free_task(struct task_struct *task);
918 extern void perf_event_delayed_put(struct task_struct *task);
919 extern void perf_event_print_debug(void);
920 extern void perf_pmu_disable(struct pmu *pmu);
921 extern void perf_pmu_enable(struct pmu *pmu);
922 extern int perf_event_task_disable(void);
923 extern int perf_event_task_enable(void);
924 extern void perf_event_update_userpage(struct perf_event *event);
925 extern int perf_event_release_kernel(struct perf_event *event);
926 extern struct perf_event *
927 perf_event_create_kernel_counter(struct perf_event_attr *attr,
928                                 int cpu,
929                                 struct task_struct *task,
930                                 perf_overflow_handler_t callback);
931 extern u64 perf_event_read_value(struct perf_event *event,
932                                  u64 *enabled, u64 *running);
933
934 struct perf_sample_data {
935         u64                             type;
936
937         u64                             ip;
938         struct {
939                 u32     pid;
940                 u32     tid;
941         }                               tid_entry;
942         u64                             time;
943         u64                             addr;
944         u64                             id;
945         u64                             stream_id;
946         struct {
947                 u32     cpu;
948                 u32     reserved;
949         }                               cpu_entry;
950         u64                             period;
951         struct perf_callchain_entry     *callchain;
952         struct perf_raw_record          *raw;
953 };
954
955 static inline
956 void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
957 {
958         data->addr = addr;
959         data->raw  = NULL;
960 }
961
962 extern void perf_output_sample(struct perf_output_handle *handle,
963                                struct perf_event_header *header,
964                                struct perf_sample_data *data,
965                                struct perf_event *event);
966 extern void perf_prepare_sample(struct perf_event_header *header,
967                                 struct perf_sample_data *data,
968                                 struct perf_event *event,
969                                 struct pt_regs *regs);
970
971 extern int perf_event_overflow(struct perf_event *event, int nmi,
972                                  struct perf_sample_data *data,
973                                  struct pt_regs *regs);
974
975 /*
976  * Return 1 for a software event, 0 for a hardware event
977  */
978 static inline int is_software_event(struct perf_event *event)
979 {
980         return event->pmu->task_ctx_nr == perf_sw_context;
981 }
982
983 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
984
985 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
986
987 #ifndef perf_arch_fetch_caller_regs
988 static inline void
989 perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
990 #endif
991
992 /*
993  * Take a snapshot of the regs. Skip ip and frame pointer to
994  * the nth caller. We only need a few of the regs:
995  * - ip for PERF_SAMPLE_IP
996  * - cs for user_mode() tests
997  * - bp for callchains
998  * - eflags, for future purposes, just in case
999  */
1000 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1001 {
1002         memset(regs, 0, sizeof(*regs));
1003
1004         perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1005 }
1006
1007 static __always_inline void
1008 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
1009 {
1010         struct pt_regs hot_regs;
1011
1012         JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
1013         return;
1014
1015 have_event:
1016         if (!regs) {
1017                 perf_fetch_caller_regs(&hot_regs);
1018                 regs = &hot_regs;
1019         }
1020         __perf_sw_event(event_id, nr, nmi, regs, addr);
1021 }
1022
1023 extern void perf_event_mmap(struct vm_area_struct *vma);
1024 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1025 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1026 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1027
1028 extern void perf_event_comm(struct task_struct *tsk);
1029 extern void perf_event_fork(struct task_struct *tsk);
1030
1031 /* Callchains */
1032 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1033
1034 extern void perf_callchain_user(struct perf_callchain_entry *entry,
1035                                 struct pt_regs *regs);
1036 extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
1037                                   struct pt_regs *regs);
1038
1039
1040 static inline void
1041 perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1042 {
1043         if (entry->nr < PERF_MAX_STACK_DEPTH)
1044                 entry->ip[entry->nr++] = ip;
1045 }
1046
1047 extern int sysctl_perf_event_paranoid;
1048 extern int sysctl_perf_event_mlock;
1049 extern int sysctl_perf_event_sample_rate;
1050
1051 static inline bool perf_paranoid_tracepoint_raw(void)
1052 {
1053         return sysctl_perf_event_paranoid > -1;
1054 }
1055
1056 static inline bool perf_paranoid_cpu(void)
1057 {
1058         return sysctl_perf_event_paranoid > 0;
1059 }
1060
1061 static inline bool perf_paranoid_kernel(void)
1062 {
1063         return sysctl_perf_event_paranoid > 1;
1064 }
1065
1066 extern void perf_event_init(void);
1067 extern void perf_tp_event(u64 addr, u64 count, void *record,
1068                           int entry_size, struct pt_regs *regs,
1069                           struct hlist_head *head, int rctx);
1070 extern void perf_bp_event(struct perf_event *event, void *data);
1071
1072 #ifndef perf_misc_flags
1073 #define perf_misc_flags(regs)   (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1074                                  PERF_RECORD_MISC_KERNEL)
1075 #define perf_instruction_pointer(regs)  instruction_pointer(regs)
1076 #endif
1077
1078 extern int perf_output_begin(struct perf_output_handle *handle,
1079                              struct perf_event *event, unsigned int size,
1080                              int nmi, int sample);
1081 extern void perf_output_end(struct perf_output_handle *handle);
1082 extern void perf_output_copy(struct perf_output_handle *handle,
1083                              const void *buf, unsigned int len);
1084 extern int perf_swevent_get_recursion_context(void);
1085 extern void perf_swevent_put_recursion_context(int rctx);
1086 extern void perf_event_enable(struct perf_event *event);
1087 extern void perf_event_disable(struct perf_event *event);
1088 extern void perf_event_task_tick(void);
1089 #else
1090 static inline void
1091 perf_event_task_sched_in(struct task_struct *task)                      { }
1092 static inline void
1093 perf_event_task_sched_out(struct task_struct *task,
1094                             struct task_struct *next)                   { }
1095 static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1096 static inline void perf_event_exit_task(struct task_struct *child)      { }
1097 static inline void perf_event_free_task(struct task_struct *task)       { }
1098 static inline void perf_event_delayed_put(struct task_struct *task)     { }
1099 static inline void perf_event_print_debug(void)                         { }
1100 static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1101 static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1102
1103 static inline void
1104 perf_sw_event(u32 event_id, u64 nr, int nmi,
1105                      struct pt_regs *regs, u64 addr)                    { }
1106 static inline void
1107 perf_bp_event(struct perf_event *event, void *data)                     { }
1108
1109 static inline int perf_register_guest_info_callbacks
1110 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1111 static inline int perf_unregister_guest_info_callbacks
1112 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1113
1114 static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1115 static inline void perf_event_comm(struct task_struct *tsk)             { }
1116 static inline void perf_event_fork(struct task_struct *tsk)             { }
1117 static inline void perf_event_init(void)                                { }
1118 static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1119 static inline void perf_swevent_put_recursion_context(int rctx)         { }
1120 static inline void perf_event_enable(struct perf_event *event)          { }
1121 static inline void perf_event_disable(struct perf_event *event)         { }
1122 static inline void perf_event_task_tick(void)                           { }
1123 #endif
1124
1125 #define perf_output_put(handle, x) \
1126         perf_output_copy((handle), &(x), sizeof(x))
1127
1128 /*
1129  * This has to have a higher priority than migration_notifier in sched.c.
1130  */
1131 #define perf_cpu_notifier(fn)                                   \
1132 do {                                                            \
1133         static struct notifier_block fn##_nb __cpuinitdata =    \
1134                 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1135         fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,             \
1136                 (void *)(unsigned long)smp_processor_id());     \
1137         fn(&fn##_nb, (unsigned long)CPU_STARTING,               \
1138                 (void *)(unsigned long)smp_processor_id());     \
1139         fn(&fn##_nb, (unsigned long)CPU_ONLINE,                 \
1140                 (void *)(unsigned long)smp_processor_id());     \
1141         register_cpu_notifier(&fn##_nb);                        \
1142 } while (0)
1143
1144 #endif /* __KERNEL__ */
1145 #endif /* _LINUX_PERF_EVENT_H */