Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / arch / x86 / include / asm / perf_event.h
index befd172..254883d 100644 (file)
@@ -5,7 +5,7 @@
  * Performance event hw details:
  */
 
-#define X86_PMC_MAX_GENERIC                                    8
+#define X86_PMC_MAX_GENERIC                                   32
 #define X86_PMC_MAX_FIXED                                      3
 
 #define X86_PMC_IDX_GENERIC                                    0
 #define MSR_ARCH_PERFMON_EVENTSEL0                          0x186
 #define MSR_ARCH_PERFMON_EVENTSEL1                          0x187
 
-#define ARCH_PERFMON_EVENTSEL0_ENABLE                    (1 << 22)
-#define ARCH_PERFMON_EVENTSEL_ANY                        (1 << 21)
-#define ARCH_PERFMON_EVENTSEL_INT                        (1 << 20)
-#define ARCH_PERFMON_EVENTSEL_OS                         (1 << 17)
-#define ARCH_PERFMON_EVENTSEL_USR                        (1 << 16)
-
-/*
- * Includes eventsel and unit mask as well:
- */
-
-
-#define INTEL_ARCH_EVTSEL_MASK         0x000000FFULL
-#define INTEL_ARCH_UNIT_MASK           0x0000FF00ULL
-#define INTEL_ARCH_EDGE_MASK           0x00040000ULL
-#define INTEL_ARCH_INV_MASK            0x00800000ULL
-#define INTEL_ARCH_CNT_MASK            0xFF000000ULL
-#define INTEL_ARCH_EVENT_MASK  (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
-
-/*
- * filter mask to validate fixed counter events.
- * the following filters disqualify for fixed counters:
- *  - inv
- *  - edge
- *  - cnt-mask
- *  The other filters are supported by fixed counters.
- *  The any-thread option is supported starting with v3.
- */
-#define INTEL_ARCH_FIXED_MASK \
-       (INTEL_ARCH_CNT_MASK| \
-        INTEL_ARCH_INV_MASK| \
-        INTEL_ARCH_EDGE_MASK|\
-        INTEL_ARCH_UNIT_MASK|\
-        INTEL_ARCH_EVTSEL_MASK)
+#define ARCH_PERFMON_EVENTSEL_EVENT                    0x000000FFULL
+#define ARCH_PERFMON_EVENTSEL_UMASK                    0x0000FF00ULL
+#define ARCH_PERFMON_EVENTSEL_USR                      (1ULL << 16)
+#define ARCH_PERFMON_EVENTSEL_OS                       (1ULL << 17)
+#define ARCH_PERFMON_EVENTSEL_EDGE                     (1ULL << 18)
+#define ARCH_PERFMON_EVENTSEL_INT                      (1ULL << 20)
+#define ARCH_PERFMON_EVENTSEL_ANY                      (1ULL << 21)
+#define ARCH_PERFMON_EVENTSEL_ENABLE                   (1ULL << 22)
+#define ARCH_PERFMON_EVENTSEL_INV                      (1ULL << 23)
+#define ARCH_PERFMON_EVENTSEL_CMASK                    0xFF000000ULL
+
+#define AMD64_EVENTSEL_EVENT   \
+       (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
+#define INTEL_ARCH_EVENT_MASK  \
+       (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
+
+#define X86_RAW_EVENT_MASK             \
+       (ARCH_PERFMON_EVENTSEL_EVENT |  \
+        ARCH_PERFMON_EVENTSEL_UMASK |  \
+        ARCH_PERFMON_EVENTSEL_EDGE  |  \
+        ARCH_PERFMON_EVENTSEL_INV   |  \
+        ARCH_PERFMON_EVENTSEL_CMASK)
+#define AMD64_RAW_EVENT_MASK           \
+       (X86_RAW_EVENT_MASK          |  \
+        AMD64_EVENTSEL_EVENT)
 
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL                0x3c
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK                (0x00 << 8)
@@ -67,7 +59,7 @@
 union cpuid10_eax {
        struct {
                unsigned int version_id:8;
-               unsigned int num_events:8;
+               unsigned int num_counters:8;
                unsigned int bit_width:8;
                unsigned int mask_length:8;
        } split;
@@ -76,7 +68,7 @@ union cpuid10_eax {
 
 union cpuid10_edx {
        struct {
-               unsigned int num_events_fixed:4;
+               unsigned int num_counters_fixed:4;
                unsigned int reserved:28;
        } split;
        unsigned int full;
@@ -117,6 +109,18 @@ union cpuid10_edx {
  */
 #define X86_PMC_IDX_FIXED_BTS                          (X86_PMC_IDX_FIXED + 16)
 
+/* IbsFetchCtl bits/masks */
+#define IBS_FETCH_RAND_EN              (1ULL<<57)
+#define IBS_FETCH_VAL                  (1ULL<<49)
+#define IBS_FETCH_ENABLE               (1ULL<<48)
+#define IBS_FETCH_CNT                  0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT              0x0000FFFFULL
+
+/* IbsOpCtl bits */
+#define IBS_OP_CNT_CTL                 (1ULL<<19)
+#define IBS_OP_VAL                     (1ULL<<18)
+#define IBS_OP_ENABLE                  (1ULL<<17)
+#define IBS_OP_MAX_CNT                 0x0000FFFFULL
 
 #ifdef CONFIG_PERF_EVENTS
 extern void init_hw_perf_events(void);
@@ -124,6 +128,18 @@ extern void perf_events_lapic_init(void);
 
 #define PERF_EVENT_INDEX_OFFSET                        0
 
+/*
+ * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
+ * This flag is otherwise unused and ABI specified to be 0, so nobody should
+ * care what we do with it.
+ */
+#define PERF_EFLAGS_EXACT      (1UL << 3)
+
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs)  perf_misc_flags(regs)
+
 #else
 static inline void init_hw_perf_events(void)           { }
 static inline void perf_events_lapic_init(void)        { }