1 #ifdef CONFIG_CPU_SUP_INTEL
4 * Intel PerfMon, used on Core and later.
6 static const u64 intel_perfmon_event_map[] =
8 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
9 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
11 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
17 static struct event_constraint intel_core_event_constraints[] =
19 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
28 static struct event_constraint intel_core2_event_constraints[] =
30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
51 static struct event_constraint intel_nehalem_event_constraints[] =
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
67 static struct event_constraint intel_westmere_event_constraints[] =
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
75 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
79 static struct event_constraint intel_gen_event_constraints[] =
81 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
82 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
83 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
87 static u64 intel_pmu_event_map(int hw_event)
89 return intel_perfmon_event_map[hw_event];
92 static __initconst const u64 westmere_hw_cache_event_ids
93 [PERF_COUNT_HW_CACHE_MAX]
94 [PERF_COUNT_HW_CACHE_OP_MAX]
95 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
99 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
100 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
103 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
104 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
106 [ C(OP_PREFETCH) ] = {
107 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
108 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
113 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
114 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
117 [ C(RESULT_ACCESS) ] = -1,
118 [ C(RESULT_MISS) ] = -1,
120 [ C(OP_PREFETCH) ] = {
121 [ C(RESULT_ACCESS) ] = 0x0,
122 [ C(RESULT_MISS) ] = 0x0,
127 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
128 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
131 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
132 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
134 [ C(OP_PREFETCH) ] = {
135 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
136 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
141 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
142 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
145 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
146 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
148 [ C(OP_PREFETCH) ] = {
149 [ C(RESULT_ACCESS) ] = 0x0,
150 [ C(RESULT_MISS) ] = 0x0,
155 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
156 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
159 [ C(RESULT_ACCESS) ] = -1,
160 [ C(RESULT_MISS) ] = -1,
162 [ C(OP_PREFETCH) ] = {
163 [ C(RESULT_ACCESS) ] = -1,
164 [ C(RESULT_MISS) ] = -1,
169 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
170 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
173 [ C(RESULT_ACCESS) ] = -1,
174 [ C(RESULT_MISS) ] = -1,
176 [ C(OP_PREFETCH) ] = {
177 [ C(RESULT_ACCESS) ] = -1,
178 [ C(RESULT_MISS) ] = -1,
183 static __initconst const u64 nehalem_hw_cache_event_ids
184 [PERF_COUNT_HW_CACHE_MAX]
185 [PERF_COUNT_HW_CACHE_OP_MAX]
186 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
190 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
191 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
194 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
195 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
197 [ C(OP_PREFETCH) ] = {
198 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
199 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
204 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
205 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
208 [ C(RESULT_ACCESS) ] = -1,
209 [ C(RESULT_MISS) ] = -1,
211 [ C(OP_PREFETCH) ] = {
212 [ C(RESULT_ACCESS) ] = 0x0,
213 [ C(RESULT_MISS) ] = 0x0,
218 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
219 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
222 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
223 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
225 [ C(OP_PREFETCH) ] = {
226 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
227 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
232 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
233 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
236 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
237 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
239 [ C(OP_PREFETCH) ] = {
240 [ C(RESULT_ACCESS) ] = 0x0,
241 [ C(RESULT_MISS) ] = 0x0,
246 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
247 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
250 [ C(RESULT_ACCESS) ] = -1,
251 [ C(RESULT_MISS) ] = -1,
253 [ C(OP_PREFETCH) ] = {
254 [ C(RESULT_ACCESS) ] = -1,
255 [ C(RESULT_MISS) ] = -1,
260 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
261 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
264 [ C(RESULT_ACCESS) ] = -1,
265 [ C(RESULT_MISS) ] = -1,
267 [ C(OP_PREFETCH) ] = {
268 [ C(RESULT_ACCESS) ] = -1,
269 [ C(RESULT_MISS) ] = -1,
274 static __initconst const u64 core2_hw_cache_event_ids
275 [PERF_COUNT_HW_CACHE_MAX]
276 [PERF_COUNT_HW_CACHE_OP_MAX]
277 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
281 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
282 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
285 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
286 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
288 [ C(OP_PREFETCH) ] = {
289 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
290 [ C(RESULT_MISS) ] = 0,
295 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
296 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
299 [ C(RESULT_ACCESS) ] = -1,
300 [ C(RESULT_MISS) ] = -1,
302 [ C(OP_PREFETCH) ] = {
303 [ C(RESULT_ACCESS) ] = 0,
304 [ C(RESULT_MISS) ] = 0,
309 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
310 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
313 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
314 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
316 [ C(OP_PREFETCH) ] = {
317 [ C(RESULT_ACCESS) ] = 0,
318 [ C(RESULT_MISS) ] = 0,
323 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
324 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
327 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
328 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
330 [ C(OP_PREFETCH) ] = {
331 [ C(RESULT_ACCESS) ] = 0,
332 [ C(RESULT_MISS) ] = 0,
337 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
338 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
341 [ C(RESULT_ACCESS) ] = -1,
342 [ C(RESULT_MISS) ] = -1,
344 [ C(OP_PREFETCH) ] = {
345 [ C(RESULT_ACCESS) ] = -1,
346 [ C(RESULT_MISS) ] = -1,
351 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
352 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
355 [ C(RESULT_ACCESS) ] = -1,
356 [ C(RESULT_MISS) ] = -1,
358 [ C(OP_PREFETCH) ] = {
359 [ C(RESULT_ACCESS) ] = -1,
360 [ C(RESULT_MISS) ] = -1,
365 static __initconst const u64 atom_hw_cache_event_ids
366 [PERF_COUNT_HW_CACHE_MAX]
367 [PERF_COUNT_HW_CACHE_OP_MAX]
368 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
372 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
373 [ C(RESULT_MISS) ] = 0,
376 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
377 [ C(RESULT_MISS) ] = 0,
379 [ C(OP_PREFETCH) ] = {
380 [ C(RESULT_ACCESS) ] = 0x0,
381 [ C(RESULT_MISS) ] = 0,
386 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
387 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
390 [ C(RESULT_ACCESS) ] = -1,
391 [ C(RESULT_MISS) ] = -1,
393 [ C(OP_PREFETCH) ] = {
394 [ C(RESULT_ACCESS) ] = 0,
395 [ C(RESULT_MISS) ] = 0,
400 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
401 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
404 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
405 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
407 [ C(OP_PREFETCH) ] = {
408 [ C(RESULT_ACCESS) ] = 0,
409 [ C(RESULT_MISS) ] = 0,
414 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
415 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
418 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
419 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
421 [ C(OP_PREFETCH) ] = {
422 [ C(RESULT_ACCESS) ] = 0,
423 [ C(RESULT_MISS) ] = 0,
428 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
429 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
432 [ C(RESULT_ACCESS) ] = -1,
433 [ C(RESULT_MISS) ] = -1,
435 [ C(OP_PREFETCH) ] = {
436 [ C(RESULT_ACCESS) ] = -1,
437 [ C(RESULT_MISS) ] = -1,
442 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
443 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
446 [ C(RESULT_ACCESS) ] = -1,
447 [ C(RESULT_MISS) ] = -1,
449 [ C(OP_PREFETCH) ] = {
450 [ C(RESULT_ACCESS) ] = -1,
451 [ C(RESULT_MISS) ] = -1,
456 static void intel_pmu_disable_all(void)
458 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
460 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
462 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
463 intel_pmu_disable_bts();
465 intel_pmu_pebs_disable_all();
466 intel_pmu_lbr_disable_all();
469 static void intel_pmu_enable_all(int added)
471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
473 intel_pmu_pebs_enable_all();
474 intel_pmu_lbr_enable_all();
475 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
477 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
478 struct perf_event *event =
479 cpuc->events[X86_PMC_IDX_FIXED_BTS];
481 if (WARN_ON_ONCE(!event))
484 intel_pmu_enable_bts(event->hw.config);
490 * Intel Errata AAK100 (model 26)
491 * Intel Errata AAP53 (model 30)
492 * Intel Errata BD53 (model 44)
494 * The official story:
495 * These chips need to be 'reset' when adding counters by programming the
496 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
497 * in sequence on the same PMC or on different PMCs.
499 * In practise it appears some of these events do in fact count, and
500 * we need to programm all 4 events.
502 static void intel_pmu_nhm_workaround(void)
504 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
505 static const unsigned long nhm_magic[4] = {
511 struct perf_event *event;
515 * The Errata requires below steps:
516 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
517 * 2) Configure 4 PERFEVTSELx with the magic events and clear
518 * the corresponding PMCx;
519 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
520 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
521 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
525 * The real steps we choose are a little different from above.
526 * A) To reduce MSR operations, we don't run step 1) as they
527 * are already cleared before this function is called;
528 * B) Call x86_perf_event_update to save PMCx before configuring
529 * PERFEVTSELx with magic number;
530 * C) With step 5), we do clear only when the PERFEVTSELx is
531 * not used currently.
532 * D) Call x86_perf_event_set_period to restore PMCx;
535 /* We always operate 4 pairs of PERF Counters */
536 for (i = 0; i < 4; i++) {
537 event = cpuc->events[i];
539 x86_perf_event_update(event);
542 for (i = 0; i < 4; i++) {
543 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
544 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
547 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
548 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
550 for (i = 0; i < 4; i++) {
551 event = cpuc->events[i];
554 x86_perf_event_set_period(event);
555 __x86_pmu_enable_event(&event->hw,
556 ARCH_PERFMON_EVENTSEL_ENABLE);
558 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
562 static void intel_pmu_nhm_enable_all(int added)
565 intel_pmu_nhm_workaround();
566 intel_pmu_enable_all(added);
569 static inline u64 intel_pmu_get_status(void)
573 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
578 static inline void intel_pmu_ack_status(u64 ack)
580 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
583 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
585 int idx = hwc->idx - X86_PMC_IDX_FIXED;
588 mask = 0xfULL << (idx * 4);
590 rdmsrl(hwc->config_base, ctrl_val);
592 wrmsrl(hwc->config_base, ctrl_val);
595 static void intel_pmu_disable_event(struct perf_event *event)
597 struct hw_perf_event *hwc = &event->hw;
599 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
600 intel_pmu_disable_bts();
601 intel_pmu_drain_bts_buffer();
605 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
606 intel_pmu_disable_fixed(hwc);
610 x86_pmu_disable_event(event);
612 if (unlikely(event->attr.precise_ip))
613 intel_pmu_pebs_disable(event);
616 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
618 int idx = hwc->idx - X86_PMC_IDX_FIXED;
619 u64 ctrl_val, bits, mask;
622 * Enable IRQ generation (0x8),
623 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
627 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
629 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
633 * ANY bit is supported in v3 and up
635 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
639 mask = 0xfULL << (idx * 4);
641 rdmsrl(hwc->config_base, ctrl_val);
644 wrmsrl(hwc->config_base, ctrl_val);
647 static void intel_pmu_enable_event(struct perf_event *event)
649 struct hw_perf_event *hwc = &event->hw;
651 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
652 if (!__this_cpu_read(cpu_hw_events.enabled))
655 intel_pmu_enable_bts(hwc->config);
659 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
660 intel_pmu_enable_fixed(hwc);
664 if (unlikely(event->attr.precise_ip))
665 intel_pmu_pebs_enable(event);
667 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
671 * Save and restart an expired event. Called by NMI contexts,
672 * so it has to be careful about preempting normal event ops:
674 static int intel_pmu_save_and_restart(struct perf_event *event)
676 x86_perf_event_update(event);
677 return x86_perf_event_set_period(event);
680 static void intel_pmu_reset(void)
682 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
686 if (!x86_pmu.num_counters)
689 local_irq_save(flags);
691 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
693 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
694 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
695 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
697 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
698 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
701 ds->bts_index = ds->bts_buffer_base;
703 local_irq_restore(flags);
707 * This handler is triggered by the local APIC, so the APIC IRQ handling
710 static int intel_pmu_handle_irq(struct pt_regs *regs)
712 struct perf_sample_data data;
713 struct cpu_hw_events *cpuc;
718 perf_sample_data_init(&data, 0);
720 cpuc = &__get_cpu_var(cpu_hw_events);
722 intel_pmu_disable_all();
723 handled = intel_pmu_drain_bts_buffer();
724 status = intel_pmu_get_status();
726 intel_pmu_enable_all(0);
732 intel_pmu_ack_status(status);
734 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
735 perf_event_print_debug();
740 inc_irq_stat(apic_perf_irqs);
742 intel_pmu_lbr_read();
745 * PEBS overflow sets bit 62 in the global status register
747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
749 x86_pmu.drain_pebs(regs);
752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
753 struct perf_event *event = cpuc->events[bit];
757 if (!test_bit(bit, cpuc->active_mask))
760 if (!intel_pmu_save_and_restart(event))
763 data.period = event->hw.last_period;
765 if (perf_event_overflow(event, 1, &data, regs))
766 x86_pmu_stop(event, 0);
770 * Repeat if there is more work to be done:
772 status = intel_pmu_get_status();
777 intel_pmu_enable_all(0);
781 static struct event_constraint *
782 intel_bts_constraints(struct perf_event *event)
784 struct hw_perf_event *hwc = &event->hw;
785 unsigned int hw_event, bts_event;
787 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
788 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
790 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
791 return &bts_constraint;
796 static struct event_constraint *
797 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
799 struct event_constraint *c;
801 c = intel_bts_constraints(event);
805 c = intel_pebs_constraints(event);
809 return x86_get_event_constraints(cpuc, event);
812 static int intel_pmu_hw_config(struct perf_event *event)
814 int ret = x86_pmu_hw_config(event);
819 if (event->attr.precise_ip &&
820 (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
822 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
823 * (0x003c) so that we can use it with PEBS.
825 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
826 * PEBS capable. However we can use INST_RETIRED.ANY_P
827 * (0x00c0), which is a PEBS capable event, to get the same
830 * INST_RETIRED.ANY_P counts the number of cycles that retires
831 * CNTMASK instructions. By setting CNTMASK to a value (16)
832 * larger than the maximum number of instructions that can be
833 * retired per cycle (4) and then inverting the condition, we
834 * count all cycles that retire 16 or less instructions, which
837 * Thereby we gain a PEBS capable cycle counter.
839 u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
841 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
842 event->hw.config = alt_config;
845 if (event->attr.type != PERF_TYPE_RAW)
848 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
851 if (x86_pmu.version < 3)
854 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
857 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
862 static __initconst const struct x86_pmu core_pmu = {
864 .handle_irq = x86_pmu_handle_irq,
865 .disable_all = x86_pmu_disable_all,
866 .enable_all = x86_pmu_enable_all,
867 .enable = x86_pmu_enable_event,
868 .disable = x86_pmu_disable_event,
869 .hw_config = x86_pmu_hw_config,
870 .schedule_events = x86_schedule_events,
871 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
872 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
873 .event_map = intel_pmu_event_map,
874 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
877 * Intel PMCs cannot be accessed sanely above 32 bit width,
878 * so we install an artificial 1<<31 period regardless of
879 * the generic event period:
881 .max_period = (1ULL << 31) - 1,
882 .get_event_constraints = intel_get_event_constraints,
883 .event_constraints = intel_core_event_constraints,
886 static void intel_pmu_cpu_starting(int cpu)
888 init_debug_store_on_cpu(cpu);
890 * Deal with CPUs that don't clear their LBRs on power-up.
892 intel_pmu_lbr_reset();
895 static void intel_pmu_cpu_dying(int cpu)
897 fini_debug_store_on_cpu(cpu);
900 static __initconst const struct x86_pmu intel_pmu = {
902 .handle_irq = intel_pmu_handle_irq,
903 .disable_all = intel_pmu_disable_all,
904 .enable_all = intel_pmu_enable_all,
905 .enable = intel_pmu_enable_event,
906 .disable = intel_pmu_disable_event,
907 .hw_config = intel_pmu_hw_config,
908 .schedule_events = x86_schedule_events,
909 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
910 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
911 .event_map = intel_pmu_event_map,
912 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
915 * Intel PMCs cannot be accessed sanely above 32 bit width,
916 * so we install an artificial 1<<31 period regardless of
917 * the generic event period:
919 .max_period = (1ULL << 31) - 1,
920 .get_event_constraints = intel_get_event_constraints,
922 .cpu_starting = intel_pmu_cpu_starting,
923 .cpu_dying = intel_pmu_cpu_dying,
926 static void intel_clovertown_quirks(void)
929 * PEBS is unreliable due to:
931 * AJ67 - PEBS may experience CPL leaks
932 * AJ68 - PEBS PMI may be delayed by one event
933 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
934 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
936 * AJ67 could be worked around by restricting the OS/USR flags.
937 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
939 * AJ106 could possibly be worked around by not allowing LBR
940 * usage from PEBS, including the fixup.
941 * AJ68 could possibly be worked around by always programming
942 * a pebs_event_reset[0] value and coping with the lost events.
944 * But taken together it might just make sense to not enable PEBS on
947 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
949 x86_pmu.pebs_constraints = NULL;
952 static __init int intel_pmu_init(void)
954 union cpuid10_edx edx;
955 union cpuid10_eax eax;
960 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
961 switch (boot_cpu_data.x86) {
963 return p6_pmu_init();
965 return p4_pmu_init();
971 * Check whether the Architectural PerfMon supports
972 * Branch Misses Retired hw_event or not.
974 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
975 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
978 version = eax.split.version_id;
984 x86_pmu.version = version;
985 x86_pmu.num_counters = eax.split.num_counters;
986 x86_pmu.cntval_bits = eax.split.bit_width;
987 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
990 * Quirk: v2 perfmon does not report fixed-purpose events, so
991 * assume at least 3 events:
994 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
997 * v2 and above have a perf capabilities MSR
1002 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1003 x86_pmu.intel_cap.capabilities = capabilities;
1009 * Install the hw-cache-events table:
1011 switch (boot_cpu_data.x86_model) {
1012 case 14: /* 65 nm core solo/duo, "Yonah" */
1013 pr_cont("Core events, ");
1016 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1017 x86_pmu.quirks = intel_clovertown_quirks;
1018 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1019 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1020 case 29: /* six-core 45 nm xeon "Dunnington" */
1021 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1022 sizeof(hw_cache_event_ids));
1024 intel_pmu_lbr_init_core();
1026 x86_pmu.event_constraints = intel_core2_event_constraints;
1027 pr_cont("Core2 events, ");
1030 case 26: /* 45 nm nehalem, "Bloomfield" */
1031 case 30: /* 45 nm nehalem, "Lynnfield" */
1032 case 46: /* 45 nm nehalem-ex, "Beckton" */
1033 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1034 sizeof(hw_cache_event_ids));
1036 intel_pmu_lbr_init_nhm();
1038 x86_pmu.event_constraints = intel_nehalem_event_constraints;
1039 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1040 pr_cont("Nehalem events, ");
1044 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1045 sizeof(hw_cache_event_ids));
1047 intel_pmu_lbr_init_atom();
1049 x86_pmu.event_constraints = intel_gen_event_constraints;
1050 pr_cont("Atom events, ");
1053 case 37: /* 32 nm nehalem, "Clarkdale" */
1054 case 44: /* 32 nm nehalem, "Gulftown" */
1055 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1056 sizeof(hw_cache_event_ids));
1058 intel_pmu_lbr_init_nhm();
1060 x86_pmu.event_constraints = intel_westmere_event_constraints;
1061 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1062 pr_cont("Westmere events, ");
1067 * default constraints for v2 and up
1069 x86_pmu.event_constraints = intel_gen_event_constraints;
1070 pr_cont("generic architected perfmon, ");
1075 #else /* CONFIG_CPU_SUP_INTEL */
1077 static int intel_pmu_init(void)
1082 #endif /* CONFIG_CPU_SUP_INTEL */