Merge branch 'master' into for-linus
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_p6.c
1 #ifdef CONFIG_CPU_SUP_INTEL
2
3 /*
4  * Not sure about some of these
5  */
6 static const u64 p6_perfmon_event_map[] =
7 {
8   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
9   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
10   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
11   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
12   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
13   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
14   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
15 };
16
17 static u64 p6_pmu_event_map(int hw_event)
18 {
19         return p6_perfmon_event_map[hw_event];
20 }
21
22 /*
23  * Event setting that is specified not to count anything.
24  * We use this to effectively disable a counter.
25  *
26  * L2_RQSTS with 0 MESI unit mask.
27  */
28 #define P6_NOP_EVENT                    0x0000002EULL
29
30 static u64 p6_pmu_raw_event(u64 hw_event)
31 {
32 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
33 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
34 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
35 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
36 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
37
38 #define P6_EVNTSEL_MASK                 \
39         (P6_EVNTSEL_EVENT_MASK |        \
40          P6_EVNTSEL_UNIT_MASK  |        \
41          P6_EVNTSEL_EDGE_MASK  |        \
42          P6_EVNTSEL_INV_MASK   |        \
43          P6_EVNTSEL_REG_MASK)
44
45         return hw_event & P6_EVNTSEL_MASK;
46 }
47
48 static struct event_constraint p6_event_constraints[] =
49 {
50         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
51         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
52         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
53         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
54         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
55         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
56         EVENT_CONSTRAINT_END
57 };
58
59 static void p6_pmu_disable_all(void)
60 {
61         u64 val;
62
63         /* p6 only has one enable register */
64         rdmsrl(MSR_P6_EVNTSEL0, val);
65         val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
66         wrmsrl(MSR_P6_EVNTSEL0, val);
67 }
68
69 static void p6_pmu_enable_all(void)
70 {
71         unsigned long val;
72
73         /* p6 only has one enable register */
74         rdmsrl(MSR_P6_EVNTSEL0, val);
75         val |= ARCH_PERFMON_EVENTSEL_ENABLE;
76         wrmsrl(MSR_P6_EVNTSEL0, val);
77 }
78
79 static inline void
80 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
81 {
82         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
83         u64 val = P6_NOP_EVENT;
84
85         if (cpuc->enabled)
86                 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
87
88         (void)checking_wrmsrl(hwc->config_base + idx, val);
89 }
90
91 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
92 {
93         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
94         u64 val;
95
96         val = hwc->config;
97         if (cpuc->enabled)
98                 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
99
100         (void)checking_wrmsrl(hwc->config_base + idx, val);
101 }
102
103 static __initconst struct x86_pmu p6_pmu = {
104         .name                   = "p6",
105         .handle_irq             = x86_pmu_handle_irq,
106         .disable_all            = p6_pmu_disable_all,
107         .enable_all             = p6_pmu_enable_all,
108         .enable                 = p6_pmu_enable_event,
109         .disable                = p6_pmu_disable_event,
110         .eventsel               = MSR_P6_EVNTSEL0,
111         .perfctr                = MSR_P6_PERFCTR0,
112         .event_map              = p6_pmu_event_map,
113         .raw_event              = p6_pmu_raw_event,
114         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
115         .apic                   = 1,
116         .max_period             = (1ULL << 31) - 1,
117         .version                = 0,
118         .num_events             = 2,
119         /*
120          * Events have 40 bits implemented. However they are designed such
121          * that bits [32-39] are sign extensions of bit 31. As such the
122          * effective width of a event for P6-like PMU is 32 bits only.
123          *
124          * See IA-32 Intel Architecture Software developer manual Vol 3B
125          */
126         .event_bits             = 32,
127         .event_mask             = (1ULL << 32) - 1,
128         .get_event_constraints  = x86_get_event_constraints,
129         .event_constraints      = p6_event_constraints,
130 };
131
132 static __init int p6_pmu_init(void)
133 {
134         switch (boot_cpu_data.x86_model) {
135         case 1:
136         case 3:  /* Pentium Pro */
137         case 5:
138         case 6:  /* Pentium II */
139         case 7:
140         case 8:
141         case 11: /* Pentium III */
142         case 9:
143         case 13:
144                 /* Pentium M */
145                 break;
146         default:
147                 pr_cont("unsupported p6 CPU model %d ",
148                         boot_cpu_data.x86_model);
149                 return -ENODEV;
150         }
151
152         x86_pmu = p6_pmu;
153
154         return 0;
155 }
156
157 #endif /* CONFIG_CPU_SUP_INTEL */