Merge branches 'x86-urgent-for-linus' and 'irq-urgent-for-linus' of git://git.kernel...
[pandora-kernel.git] / include / linux / perf_event.h
index ee9f1e7..e0786e3 100644 (file)
@@ -2,8 +2,8 @@
  * Performance events:
  *
  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
- *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
- *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
+ *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
+ *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  *
  * Data type definitions, declarations, prototypes.
  *
@@ -52,6 +52,8 @@ enum perf_hw_id {
        PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
        PERF_COUNT_HW_BRANCH_MISSES             = 5,
        PERF_COUNT_HW_BUS_CYCLES                = 6,
+       PERF_COUNT_HW_STALLED_CYCLES_FRONTEND   = 7,
+       PERF_COUNT_HW_STALLED_CYCLES_BACKEND    = 8,
 
        PERF_COUNT_HW_MAX,                      /* non-ABI */
 };
@@ -135,14 +137,14 @@ enum perf_event_sample_format {
  *
  * struct read_format {
  *     { u64           value;
- *       { u64         time_enabled; } && PERF_FORMAT_ENABLED
- *       { u64         time_running; } && PERF_FORMAT_RUNNING
+ *       { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  *       { u64         id;           } && PERF_FORMAT_ID
  *     } && !PERF_FORMAT_GROUP
  *
  *     { u64           nr;
- *       { u64         time_enabled; } && PERF_FORMAT_ENABLED
- *       { u64         time_running; } && PERF_FORMAT_RUNNING
+ *       { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  *       { u64         value;
  *         { u64       id;           } && PERF_FORMAT_ID
  *       }             cntr[nr];
@@ -468,9 +470,9 @@ enum perf_callchain_context {
        PERF_CONTEXT_MAX                = (__u64)-4095,
 };
 
-#define PERF_FLAG_FD_NO_GROUP  (1U << 0)
-#define PERF_FLAG_FD_OUTPUT    (1U << 1)
-#define PERF_FLAG_PID_CGROUP   (1U << 2) /* pid=cgroup id, per-cpu mode only */
+#define PERF_FLAG_FD_NO_GROUP          (1U << 0)
+#define PERF_FLAG_FD_OUTPUT            (1U << 1)
+#define PERF_FLAG_PID_CGROUP           (1U << 2) /* pid=cgroup id, per-cpu mode only */
 
 #ifdef __KERNEL__
 /*
@@ -484,9 +486,9 @@ enum perf_callchain_context {
 #endif
 
 struct perf_guest_info_callbacks {
-       int (*is_in_guest) (void);
-       int (*is_user_mode) (void);
-       unsigned long (*get_guest_ip) (void);
+       int                             (*is_in_guest)(void);
+       int                             (*is_user_mode)(void);
+       unsigned long                   (*get_guest_ip)(void);
 };
 
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -505,7 +507,7 @@ struct perf_guest_info_callbacks {
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/irq_work.h>
-#include <linux/jump_label_ref.h>
+#include <linux/jump_label.h>
 #include <asm/atomic.h>
 #include <asm/local.h>
 
@@ -652,19 +654,19 @@ struct pmu {
         * Start the transaction, after this ->add() doesn't need to
         * do schedulability tests.
         */
-       void (*start_txn)       (struct pmu *pmu); /* optional */
+       void (*start_txn)               (struct pmu *pmu); /* optional */
        /*
         * If ->start_txn() disabled the ->add() schedulability test
         * then ->commit_txn() is required to perform one. On success
         * the transaction is closed. On error the transaction is kept
         * open until ->cancel_txn() is called.
         */
-       int  (*commit_txn)      (struct pmu *pmu); /* optional */
+       int  (*commit_txn)              (struct pmu *pmu); /* optional */
        /*
         * Will cancel the transaction, assumes ->del() is called
         * for each successful ->add() during the transaction.
         */
-       void (*cancel_txn)      (struct pmu *pmu); /* optional */
+       void (*cancel_txn)              (struct pmu *pmu); /* optional */
 };
 
 /**
@@ -712,15 +714,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
                                        struct pt_regs *regs);
 
 enum perf_group_flag {
-       PERF_GROUP_SOFTWARE = 0x1,
+       PERF_GROUP_SOFTWARE             = 0x1,
 };
 
-#define SWEVENT_HLIST_BITS     8
-#define SWEVENT_HLIST_SIZE     (1 << SWEVENT_HLIST_BITS)
+#define SWEVENT_HLIST_BITS             8
+#define SWEVENT_HLIST_SIZE             (1 << SWEVENT_HLIST_BITS)
 
 struct swevent_hlist {
-       struct hlist_head       heads[SWEVENT_HLIST_SIZE];
-       struct rcu_head         rcu_head;
+       struct hlist_head               heads[SWEVENT_HLIST_SIZE];
+       struct rcu_head                 rcu_head;
 };
 
 #define PERF_ATTACH_CONTEXT    0x01
@@ -733,13 +735,13 @@ struct swevent_hlist {
  * This is a per-cpu dynamically allocated data structure.
  */
 struct perf_cgroup_info {
-       u64 time;
-       u64 timestamp;
+       u64                             time;
+       u64                             timestamp;
 };
 
 struct perf_cgroup {
-       struct cgroup_subsys_state css;
-       struct perf_cgroup_info *info;  /* timing info, one per cpu */
+       struct                          cgroup_subsys_state css;
+       struct                          perf_cgroup_info *info; /* timing info, one per cpu */
 };
 #endif
 
@@ -923,7 +925,7 @@ struct perf_event_context {
 
 /*
  * Number of contexts where an event can trigger:
- *     task, softirq, hardirq, nmi.
+ *     task, softirq, hardirq, nmi.
  */
 #define PERF_NR_CONTEXTS       4
 
@@ -1001,8 +1003,7 @@ struct perf_sample_data {
        struct perf_raw_record          *raw;
 };
 
-static inline
-void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
+static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
 {
        data->addr = addr;
        data->raw  = NULL;
@@ -1034,13 +1035,12 @@ static inline int is_software_event(struct perf_event *event)
        return event->pmu->task_ctx_nr == perf_sw_context;
 }
 
-extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
 
 #ifndef perf_arch_fetch_caller_regs
-static inline void
-perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
+static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
 #endif
 
 /*
@@ -1063,26 +1063,24 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
 {
        struct pt_regs hot_regs;
 
-       JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
-       return;
-
-have_event:
-       if (!regs) {
-               perf_fetch_caller_regs(&hot_regs);
-               regs = &hot_regs;
+       if (static_branch(&perf_swevent_enabled[event_id])) {
+               if (!regs) {
+                       perf_fetch_caller_regs(&hot_regs);
+                       regs = &hot_regs;
+               }
+               __perf_sw_event(event_id, nr, nmi, regs, addr);
        }
-       __perf_sw_event(event_id, nr, nmi, regs, addr);
 }
 
-extern atomic_t perf_sched_events;
+extern struct jump_label_key perf_sched_events;
 
 static inline void perf_event_task_sched_in(struct task_struct *task)
 {
-       COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
+       if (static_branch(&perf_sched_events))
+               __perf_event_task_sched_in(task);
 }
 
-static inline
-void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
 {
        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
 
@@ -1100,14 +1098,10 @@ extern void perf_event_fork(struct task_struct *tsk);
 /* Callchains */
 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 
-extern void perf_callchain_user(struct perf_callchain_entry *entry,
-                               struct pt_regs *regs);
-extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
-                                 struct pt_regs *regs);
-
+extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
 
-static inline void
-perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
 {
        if (entry->nr < PERF_MAX_STACK_DEPTH)
                entry->ip[entry->nr++] = ip;
@@ -1143,9 +1137,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record,
 extern void perf_bp_event(struct perf_event *event, void *data);
 
 #ifndef perf_misc_flags
-#define perf_misc_flags(regs)  (user_mode(regs) ? PERF_RECORD_MISC_USER : \
-                                PERF_RECORD_MISC_KERNEL)
-#define perf_instruction_pointer(regs) instruction_pointer(regs)
+# define perf_misc_flags(regs) \
+               (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
+# define perf_instruction_pointer(regs)        instruction_pointer(regs)
 #endif
 
 extern int perf_output_begin(struct perf_output_handle *handle,
@@ -1180,9 +1174,9 @@ static inline void
 perf_bp_event(struct perf_event *event, void *data)                    { }
 
 static inline int perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
+(struct perf_guest_info_callbacks *callbacks)                          { return 0; }
 static inline int perf_unregister_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
+(struct perf_guest_info_callbacks *callbacks)                          { return 0; }
 
 static inline void perf_event_mmap(struct vm_area_struct *vma)         { }
 static inline void perf_event_comm(struct task_struct *tsk)            { }
@@ -1195,23 +1189,22 @@ static inline void perf_event_disable(struct perf_event *event)         { }
 static inline void perf_event_task_tick(void)                          { }
 #endif
 
-#define perf_output_put(handle, x) \
-       perf_output_copy((handle), &(x), sizeof(x))
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
 
 /*
  * This has to have a higher priority than migration_notifier in sched.c.
  */
-#define perf_cpu_notifier(fn)                                  \
-do {                                                           \
-       static struct notifier_block fn##_nb __cpuinitdata =    \
-               { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
-       fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,             \
-               (void *)(unsigned long)smp_processor_id());     \
-       fn(&fn##_nb, (unsigned long)CPU_STARTING,               \
-               (void *)(unsigned long)smp_processor_id());     \
-       fn(&fn##_nb, (unsigned long)CPU_ONLINE,                 \
-               (void *)(unsigned long)smp_processor_id());     \
-       register_cpu_notifier(&fn##_nb);                        \
+#define perf_cpu_notifier(fn)                                          \
+do {                                                                   \
+       static struct notifier_block fn##_nb __cpuinitdata =            \
+               { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
+       fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
+               (void *)(unsigned long)smp_processor_id());             \
+       fn(&fn##_nb, (unsigned long)CPU_STARTING,                       \
+               (void *)(unsigned long)smp_processor_id());             \
+       fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
+               (void *)(unsigned long)smp_processor_id());             \
+       register_cpu_notifier(&fn##_nb);                                \
 } while (0)
 
 #endif /* __KERNEL__ */