perf_counter: Rename L2 to LL cache
[pandora-kernel.git] / include / linux / perf_counter.h
index 282d8cc..20cf5af 100644 (file)
 /*
  * attr.type
  */
-enum perf_event_types {
+enum perf_type_id {
        PERF_TYPE_HARDWARE              = 0,
        PERF_TYPE_SOFTWARE              = 1,
        PERF_TYPE_TRACEPOINT            = 2,
        PERF_TYPE_HW_CACHE              = 3,
+       PERF_TYPE_RAW                   = 4,
 
-       /*
-        * available TYPE space, raw is the max value.
-        */
-
-       PERF_TYPE_RAW                   = 128,
+       PERF_TYPE_MAX,                  /* non ABI */
 };
 
 /*
  * Generalized performance counter event types, used by the attr.event_id
  * parameter of the sys_perf_counter_open() syscall:
  */
-enum attr_ids {
+enum perf_hw_id {
        /*
         * Common hardware events, generalized by the kernel:
         */
-       PERF_COUNT_CPU_CYCLES           = 0,
-       PERF_COUNT_INSTRUCTIONS         = 1,
-       PERF_COUNT_CACHE_REFERENCES     = 2,
-       PERF_COUNT_CACHE_MISSES         = 3,
-       PERF_COUNT_BRANCH_INSTRUCTIONS  = 4,
-       PERF_COUNT_BRANCH_MISSES        = 5,
-       PERF_COUNT_BUS_CYCLES           = 6,
-
-       PERF_HW_EVENTS_MAX              = 7,
+       PERF_COUNT_HW_CPU_CYCLES                = 0,
+       PERF_COUNT_HW_INSTRUCTIONS              = 1,
+       PERF_COUNT_HW_CACHE_REFERENCES          = 2,
+       PERF_COUNT_HW_CACHE_MISSES              = 3,
+       PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
+       PERF_COUNT_HW_BRANCH_MISSES             = 5,
+       PERF_COUNT_HW_BUS_CYCLES                = 6,
+
+       PERF_COUNT_HW_MAX,              /* non ABI */
 };
 
 /*
  * Generalized hardware cache counters:
  *
- *       { L1-D, L1-I, L2, LLC, ITLB, DTLB, BPU } x
+ *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
  *       { read, write, prefetch } x
  *       { accesses, misses }
  */
-enum hw_cache_id {
-       PERF_COUNT_HW_CACHE_L1D,
-       PERF_COUNT_HW_CACHE_L1I,
-       PERF_COUNT_HW_CACHE_L2,
-       PERF_COUNT_HW_CACHE_DTLB,
-       PERF_COUNT_HW_CACHE_ITLB,
-       PERF_COUNT_HW_CACHE_BPU,
-
-       PERF_COUNT_HW_CACHE_MAX,
+enum perf_hw_cache_id {
+       PERF_COUNT_HW_CACHE_L1D         = 0,
+       PERF_COUNT_HW_CACHE_L1I         = 1,
+       PERF_COUNT_HW_CACHE_LL          = 2,
+       PERF_COUNT_HW_CACHE_DTLB        = 3,
+       PERF_COUNT_HW_CACHE_ITLB        = 4,
+       PERF_COUNT_HW_CACHE_BPU         = 5,
+
+       PERF_COUNT_HW_CACHE_MAX,        /* non ABI */
 };
 
-enum hw_cache_op_id {
-       PERF_COUNT_HW_CACHE_OP_READ,
-       PERF_COUNT_HW_CACHE_OP_WRITE,
-       PERF_COUNT_HW_CACHE_OP_PREFETCH,
+enum perf_hw_cache_op_id {
+       PERF_COUNT_HW_CACHE_OP_READ     = 0,
+       PERF_COUNT_HW_CACHE_OP_WRITE    = 1,
+       PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
 
-       PERF_COUNT_HW_CACHE_OP_MAX,
+       PERF_COUNT_HW_CACHE_OP_MAX,     /* non ABI */
 };
 
-enum hw_cache_op_result_id {
-       PERF_COUNT_HW_CACHE_RESULT_ACCESS,
-       PERF_COUNT_HW_CACHE_RESULT_MISS,
+enum perf_hw_cache_op_result_id {
+       PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
+       PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
 
-       PERF_COUNT_HW_CACHE_RESULT_MAX,
+       PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non ABI */
 };
 
 /*
@@ -95,16 +92,16 @@ enum hw_cache_op_result_id {
  * physical and sw events of the kernel (and allow the profiling of them as
  * well):
  */
-enum sw_event_ids {
-       PERF_COUNT_CPU_CLOCK            = 0,
-       PERF_COUNT_TASK_CLOCK           = 1,
-       PERF_COUNT_PAGE_FAULTS          = 2,
-       PERF_COUNT_CONTEXT_SWITCHES     = 3,
-       PERF_COUNT_CPU_MIGRATIONS       = 4,
-       PERF_COUNT_PAGE_FAULTS_MIN      = 5,
-       PERF_COUNT_PAGE_FAULTS_MAJ      = 6,
-
-       PERF_SW_EVENTS_MAX              = 7,
+enum perf_sw_ids {
+       PERF_COUNT_SW_CPU_CLOCK         = 0,
+       PERF_COUNT_SW_TASK_CLOCK        = 1,
+       PERF_COUNT_SW_PAGE_FAULTS       = 2,
+       PERF_COUNT_SW_CONTEXT_SWITCHES  = 3,
+       PERF_COUNT_SW_CPU_MIGRATIONS    = 4,
+       PERF_COUNT_SW_PAGE_FAULTS_MIN   = 5,
+       PERF_COUNT_SW_PAGE_FAULTS_MAJ   = 6,
+
+       PERF_COUNT_SW_MAX,              /* non ABI */
 };
 
 /*
@@ -366,6 +363,7 @@ struct hw_perf_counter {
        };
        atomic64_t                      prev_count;
        u64                             sample_period;
+       u64                             last_period;
        atomic64_t                      period_left;
        u64                             interrupts;
 
@@ -605,8 +603,15 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
               struct perf_counter_context *ctx, int cpu);
 extern void perf_counter_update_userpage(struct perf_counter *counter);
 
-extern int perf_counter_overflow(struct perf_counter *counter,
-                                int nmi, struct pt_regs *regs, u64 addr);
+struct perf_sample_data {
+       struct pt_regs          *regs;
+       u64                     addr;
+       u64                     period;
+};
+
+extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
+                                struct perf_sample_data *data);
+
 /*
  * Return 1 for a software counter, 0 for a hardware counter
  */
@@ -640,9 +645,9 @@ struct perf_callchain_entry {
 
 extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
 
-extern int sysctl_perf_counter_priv;
+extern int sysctl_perf_counter_paranoid;
 extern int sysctl_perf_counter_mlock;
-extern int sysctl_perf_counter_limit;
+extern int sysctl_perf_counter_sample_rate;
 
 extern void perf_counter_init(void);