2 * Performance events x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
12 * For licencing details see kernel-base/COPYING
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
28 #include <asm/stacktrace.h>
31 static u64 perf_event_mask __read_mostly;
33 /* The maximal number of PEBS events: */
34 #define MAX_PEBS_EVENTS 4
36 /* The size of a BTS record in bytes: */
37 #define BTS_RECORD_SIZE 24
39 /* The size of a per-cpu BTS buffer in bytes: */
40 #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
42 /* The BTS overflow threshold in bytes from the end of the buffer: */
43 #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
47 * Bits in the debugctlmsr controlling branch tracing.
49 #define X86_DEBUGCTL_TR (1 << 6)
50 #define X86_DEBUGCTL_BTS (1 << 7)
51 #define X86_DEBUGCTL_BTINT (1 << 8)
52 #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
53 #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
56 * A debug store configuration.
58 * We only support architectures that use 64bit fields.
63 u64 bts_absolute_maximum;
64 u64 bts_interrupt_threshold;
67 u64 pebs_absolute_maximum;
68 u64 pebs_interrupt_threshold;
69 u64 pebs_event_reset[MAX_PEBS_EVENTS];
72 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
74 struct event_constraint {
75 u64 idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)];
80 struct cpu_hw_events {
81 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
82 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
83 unsigned long interrupts;
85 struct debug_store *ds;
89 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
90 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
93 #define EVENT_CONSTRAINT(c, n, m) { \
98 #define EVENT_CONSTRAINT_END \
99 { .code = 0, .cmask = 0, .idxmsk[0] = 0 }
101 #define for_each_event_constraint(e, c) \
102 for ((e) = (c); (e)->cmask; (e)++)
105 * struct x86_pmu - generic x86 pmu
110 int (*handle_irq)(struct pt_regs *);
111 void (*disable_all)(void);
112 void (*enable_all)(void);
113 void (*enable)(struct hw_perf_event *, int);
114 void (*disable)(struct hw_perf_event *, int);
117 u64 (*event_map)(int);
118 u64 (*raw_event)(u64);
121 int num_events_fixed;
127 void (*enable_bts)(u64 config);
128 void (*disable_bts)(void);
129 void (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk);
130 void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event);
131 const struct event_constraint *event_constraints;
134 static struct x86_pmu x86_pmu __read_mostly;
136 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
140 static int x86_perf_event_set_period(struct perf_event *event,
141 struct hw_perf_event *hwc, int idx);
144 * Not sure about some of these
146 static const u64 p6_perfmon_event_map[] =
148 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
149 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
150 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
151 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
152 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
153 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
154 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
157 static u64 p6_pmu_event_map(int hw_event)
159 return p6_perfmon_event_map[hw_event];
163 * Event setting that is specified not to count anything.
164 * We use this to effectively disable a counter.
166 * L2_RQSTS with 0 MESI unit mask.
168 #define P6_NOP_EVENT 0x0000002EULL
170 static u64 p6_pmu_raw_event(u64 hw_event)
172 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
173 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
174 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
175 #define P6_EVNTSEL_INV_MASK 0x00800000ULL
176 #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
178 #define P6_EVNTSEL_MASK \
179 (P6_EVNTSEL_EVENT_MASK | \
180 P6_EVNTSEL_UNIT_MASK | \
181 P6_EVNTSEL_EDGE_MASK | \
182 P6_EVNTSEL_INV_MASK | \
185 return hw_event & P6_EVNTSEL_MASK;
188 static struct event_constraint intel_p6_event_constraints[] =
190 EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK), /* FLOPS */
191 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
192 EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
193 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
194 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
195 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
200 * Intel PerfMon v3. Used on Core2 and later.
202 static const u64 intel_perfmon_event_map[] =
204 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
205 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
206 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
207 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
208 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
209 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
210 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
213 static struct event_constraint intel_core_event_constraints[] =
215 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
216 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
217 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
218 EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
219 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
220 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
221 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
222 EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */
223 EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */
224 EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */
225 EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */
229 static struct event_constraint intel_nehalem_event_constraints[] =
231 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
232 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
233 EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */
234 EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */
235 EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */
236 EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */
237 EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */
238 EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */
239 EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */
240 EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
241 EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */
242 EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */
246 static struct event_constraint intel_gen_event_constraints[] =
248 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
249 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
253 static u64 intel_pmu_event_map(int hw_event)
255 return intel_perfmon_event_map[hw_event];
259 * Generalized hw caching related hw_event table, filled
260 * in on a per model basis. A value of 0 means
261 * 'not supported', -1 means 'hw_event makes no sense on
262 * this CPU', any other value means the raw hw_event
266 #define C(x) PERF_COUNT_HW_CACHE_##x
268 static u64 __read_mostly hw_cache_event_ids
269 [PERF_COUNT_HW_CACHE_MAX]
270 [PERF_COUNT_HW_CACHE_OP_MAX]
271 [PERF_COUNT_HW_CACHE_RESULT_MAX];
273 static __initconst u64 nehalem_hw_cache_event_ids
274 [PERF_COUNT_HW_CACHE_MAX]
275 [PERF_COUNT_HW_CACHE_OP_MAX]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
280 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
281 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
284 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
285 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
287 [ C(OP_PREFETCH) ] = {
288 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
289 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
294 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
295 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
298 [ C(RESULT_ACCESS) ] = -1,
299 [ C(RESULT_MISS) ] = -1,
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = 0x0,
303 [ C(RESULT_MISS) ] = 0x0,
308 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
309 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
312 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
313 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
315 [ C(OP_PREFETCH) ] = {
316 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
317 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
322 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
323 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
326 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
327 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
329 [ C(OP_PREFETCH) ] = {
330 [ C(RESULT_ACCESS) ] = 0x0,
331 [ C(RESULT_MISS) ] = 0x0,
336 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
337 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
340 [ C(RESULT_ACCESS) ] = -1,
341 [ C(RESULT_MISS) ] = -1,
343 [ C(OP_PREFETCH) ] = {
344 [ C(RESULT_ACCESS) ] = -1,
345 [ C(RESULT_MISS) ] = -1,
350 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
351 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
354 [ C(RESULT_ACCESS) ] = -1,
355 [ C(RESULT_MISS) ] = -1,
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = -1,
359 [ C(RESULT_MISS) ] = -1,
364 static __initconst u64 core2_hw_cache_event_ids
365 [PERF_COUNT_HW_CACHE_MAX]
366 [PERF_COUNT_HW_CACHE_OP_MAX]
367 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
371 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
372 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
375 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
376 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
378 [ C(OP_PREFETCH) ] = {
379 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
380 [ C(RESULT_MISS) ] = 0,
385 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
386 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
389 [ C(RESULT_ACCESS) ] = -1,
390 [ C(RESULT_MISS) ] = -1,
392 [ C(OP_PREFETCH) ] = {
393 [ C(RESULT_ACCESS) ] = 0,
394 [ C(RESULT_MISS) ] = 0,
399 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
400 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
403 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
404 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
406 [ C(OP_PREFETCH) ] = {
407 [ C(RESULT_ACCESS) ] = 0,
408 [ C(RESULT_MISS) ] = 0,
413 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
414 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
417 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
418 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
420 [ C(OP_PREFETCH) ] = {
421 [ C(RESULT_ACCESS) ] = 0,
422 [ C(RESULT_MISS) ] = 0,
427 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
428 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
431 [ C(RESULT_ACCESS) ] = -1,
432 [ C(RESULT_MISS) ] = -1,
434 [ C(OP_PREFETCH) ] = {
435 [ C(RESULT_ACCESS) ] = -1,
436 [ C(RESULT_MISS) ] = -1,
441 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
442 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
445 [ C(RESULT_ACCESS) ] = -1,
446 [ C(RESULT_MISS) ] = -1,
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = -1,
450 [ C(RESULT_MISS) ] = -1,
455 static __initconst u64 atom_hw_cache_event_ids
456 [PERF_COUNT_HW_CACHE_MAX]
457 [PERF_COUNT_HW_CACHE_OP_MAX]
458 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
462 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
463 [ C(RESULT_MISS) ] = 0,
466 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
467 [ C(RESULT_MISS) ] = 0,
469 [ C(OP_PREFETCH) ] = {
470 [ C(RESULT_ACCESS) ] = 0x0,
471 [ C(RESULT_MISS) ] = 0,
476 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
477 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
480 [ C(RESULT_ACCESS) ] = -1,
481 [ C(RESULT_MISS) ] = -1,
483 [ C(OP_PREFETCH) ] = {
484 [ C(RESULT_ACCESS) ] = 0,
485 [ C(RESULT_MISS) ] = 0,
490 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
491 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
494 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
495 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
497 [ C(OP_PREFETCH) ] = {
498 [ C(RESULT_ACCESS) ] = 0,
499 [ C(RESULT_MISS) ] = 0,
504 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
505 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
508 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
509 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
511 [ C(OP_PREFETCH) ] = {
512 [ C(RESULT_ACCESS) ] = 0,
513 [ C(RESULT_MISS) ] = 0,
518 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
519 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
522 [ C(RESULT_ACCESS) ] = -1,
523 [ C(RESULT_MISS) ] = -1,
525 [ C(OP_PREFETCH) ] = {
526 [ C(RESULT_ACCESS) ] = -1,
527 [ C(RESULT_MISS) ] = -1,
532 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
533 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
536 [ C(RESULT_ACCESS) ] = -1,
537 [ C(RESULT_MISS) ] = -1,
539 [ C(OP_PREFETCH) ] = {
540 [ C(RESULT_ACCESS) ] = -1,
541 [ C(RESULT_MISS) ] = -1,
546 static u64 intel_pmu_raw_event(u64 hw_event)
548 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
549 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
550 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
551 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
552 #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
554 #define CORE_EVNTSEL_MASK \
555 (INTEL_ARCH_EVTSEL_MASK | \
556 INTEL_ARCH_UNIT_MASK | \
557 INTEL_ARCH_EDGE_MASK | \
558 INTEL_ARCH_INV_MASK | \
561 return hw_event & CORE_EVNTSEL_MASK;
564 static __initconst u64 amd_hw_cache_event_ids
565 [PERF_COUNT_HW_CACHE_MAX]
566 [PERF_COUNT_HW_CACHE_OP_MAX]
567 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
571 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
572 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
575 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
576 [ C(RESULT_MISS) ] = 0,
578 [ C(OP_PREFETCH) ] = {
579 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
580 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
585 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
586 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
589 [ C(RESULT_ACCESS) ] = -1,
590 [ C(RESULT_MISS) ] = -1,
592 [ C(OP_PREFETCH) ] = {
593 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
594 [ C(RESULT_MISS) ] = 0,
599 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
600 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
603 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
604 [ C(RESULT_MISS) ] = 0,
606 [ C(OP_PREFETCH) ] = {
607 [ C(RESULT_ACCESS) ] = 0,
608 [ C(RESULT_MISS) ] = 0,
613 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
614 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
617 [ C(RESULT_ACCESS) ] = 0,
618 [ C(RESULT_MISS) ] = 0,
620 [ C(OP_PREFETCH) ] = {
621 [ C(RESULT_ACCESS) ] = 0,
622 [ C(RESULT_MISS) ] = 0,
627 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
628 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
631 [ C(RESULT_ACCESS) ] = -1,
632 [ C(RESULT_MISS) ] = -1,
634 [ C(OP_PREFETCH) ] = {
635 [ C(RESULT_ACCESS) ] = -1,
636 [ C(RESULT_MISS) ] = -1,
641 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
642 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
645 [ C(RESULT_ACCESS) ] = -1,
646 [ C(RESULT_MISS) ] = -1,
648 [ C(OP_PREFETCH) ] = {
649 [ C(RESULT_ACCESS) ] = -1,
650 [ C(RESULT_MISS) ] = -1,
656 * AMD Performance Monitor K7 and later.
658 static const u64 amd_perfmon_event_map[] =
660 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
661 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
662 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
663 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
664 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
665 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
668 static u64 amd_pmu_event_map(int hw_event)
670 return amd_perfmon_event_map[hw_event];
673 static u64 amd_pmu_raw_event(u64 hw_event)
675 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
676 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
677 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
678 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
679 #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
681 #define K7_EVNTSEL_MASK \
682 (K7_EVNTSEL_EVENT_MASK | \
683 K7_EVNTSEL_UNIT_MASK | \
684 K7_EVNTSEL_EDGE_MASK | \
685 K7_EVNTSEL_INV_MASK | \
688 return hw_event & K7_EVNTSEL_MASK;
692 * Propagate event elapsed time into the generic event.
693 * Can only be executed on the CPU where the event is active.
694 * Returns the delta events processed.
697 x86_perf_event_update(struct perf_event *event,
698 struct hw_perf_event *hwc, int idx)
700 int shift = 64 - x86_pmu.event_bits;
701 u64 prev_raw_count, new_raw_count;
704 if (idx == X86_PMC_IDX_FIXED_BTS)
708 * Careful: an NMI might modify the previous event value.
710 * Our tactic to handle this is to first atomically read and
711 * exchange a new raw count - then add that new-prev delta
712 * count to the generic event atomically:
715 prev_raw_count = atomic64_read(&hwc->prev_count);
716 rdmsrl(hwc->event_base + idx, new_raw_count);
718 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
719 new_raw_count) != prev_raw_count)
723 * Now we have the new raw value and have updated the prev
724 * timestamp already. We can now calculate the elapsed delta
725 * (event-)time and add that to the generic event.
727 * Careful, not all hw sign-extends above the physical width
730 delta = (new_raw_count << shift) - (prev_raw_count << shift);
733 atomic64_add(delta, &event->count);
734 atomic64_sub(delta, &hwc->period_left);
736 return new_raw_count;
739 static atomic_t active_events;
740 static DEFINE_MUTEX(pmc_reserve_mutex);
742 static bool reserve_pmc_hardware(void)
744 #ifdef CONFIG_X86_LOCAL_APIC
747 if (nmi_watchdog == NMI_LOCAL_APIC)
748 disable_lapic_nmi_watchdog();
750 for (i = 0; i < x86_pmu.num_events; i++) {
751 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
755 for (i = 0; i < x86_pmu.num_events; i++) {
756 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
763 #ifdef CONFIG_X86_LOCAL_APIC
765 for (i--; i >= 0; i--)
766 release_evntsel_nmi(x86_pmu.eventsel + i);
768 i = x86_pmu.num_events;
771 for (i--; i >= 0; i--)
772 release_perfctr_nmi(x86_pmu.perfctr + i);
774 if (nmi_watchdog == NMI_LOCAL_APIC)
775 enable_lapic_nmi_watchdog();
781 static void release_pmc_hardware(void)
783 #ifdef CONFIG_X86_LOCAL_APIC
786 for (i = 0; i < x86_pmu.num_events; i++) {
787 release_perfctr_nmi(x86_pmu.perfctr + i);
788 release_evntsel_nmi(x86_pmu.eventsel + i);
791 if (nmi_watchdog == NMI_LOCAL_APIC)
792 enable_lapic_nmi_watchdog();
796 static inline bool bts_available(void)
798 return x86_pmu.enable_bts != NULL;
801 static inline void init_debug_store_on_cpu(int cpu)
803 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
808 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
809 (u32)((u64)(unsigned long)ds),
810 (u32)((u64)(unsigned long)ds >> 32));
813 static inline void fini_debug_store_on_cpu(int cpu)
815 if (!per_cpu(cpu_hw_events, cpu).ds)
818 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
821 static void release_bts_hardware(void)
825 if (!bts_available())
830 for_each_online_cpu(cpu)
831 fini_debug_store_on_cpu(cpu);
833 for_each_possible_cpu(cpu) {
834 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
839 per_cpu(cpu_hw_events, cpu).ds = NULL;
841 kfree((void *)(unsigned long)ds->bts_buffer_base);
848 static int reserve_bts_hardware(void)
852 if (!bts_available())
857 for_each_possible_cpu(cpu) {
858 struct debug_store *ds;
862 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
863 if (unlikely(!buffer))
866 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
872 ds->bts_buffer_base = (u64)(unsigned long)buffer;
873 ds->bts_index = ds->bts_buffer_base;
874 ds->bts_absolute_maximum =
875 ds->bts_buffer_base + BTS_BUFFER_SIZE;
876 ds->bts_interrupt_threshold =
877 ds->bts_absolute_maximum - BTS_OVFL_TH;
879 per_cpu(cpu_hw_events, cpu).ds = ds;
884 release_bts_hardware();
886 for_each_online_cpu(cpu)
887 init_debug_store_on_cpu(cpu);
895 static void hw_perf_event_destroy(struct perf_event *event)
897 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
898 release_pmc_hardware();
899 release_bts_hardware();
900 mutex_unlock(&pmc_reserve_mutex);
904 static inline int x86_pmu_initialized(void)
906 return x86_pmu.handle_irq != NULL;
910 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
912 unsigned int cache_type, cache_op, cache_result;
915 config = attr->config;
917 cache_type = (config >> 0) & 0xff;
918 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
921 cache_op = (config >> 8) & 0xff;
922 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
925 cache_result = (config >> 16) & 0xff;
926 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
929 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
942 static void intel_pmu_enable_bts(u64 config)
944 unsigned long debugctlmsr;
946 debugctlmsr = get_debugctlmsr();
948 debugctlmsr |= X86_DEBUGCTL_TR;
949 debugctlmsr |= X86_DEBUGCTL_BTS;
950 debugctlmsr |= X86_DEBUGCTL_BTINT;
952 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
953 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
955 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
956 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
958 update_debugctlmsr(debugctlmsr);
961 static void intel_pmu_disable_bts(void)
963 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
964 unsigned long debugctlmsr;
969 debugctlmsr = get_debugctlmsr();
972 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
973 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
975 update_debugctlmsr(debugctlmsr);
979 * Setup the hardware configuration for a given attr_type
981 static int __hw_perf_event_init(struct perf_event *event)
983 struct perf_event_attr *attr = &event->attr;
984 struct hw_perf_event *hwc = &event->hw;
988 if (!x86_pmu_initialized())
992 if (!atomic_inc_not_zero(&active_events)) {
993 mutex_lock(&pmc_reserve_mutex);
994 if (atomic_read(&active_events) == 0) {
995 if (!reserve_pmc_hardware())
998 err = reserve_bts_hardware();
1001 atomic_inc(&active_events);
1002 mutex_unlock(&pmc_reserve_mutex);
1007 event->destroy = hw_perf_event_destroy;
1010 * Generate PMC IRQs:
1011 * (keep 'enabled' bit clear for now)
1013 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1018 * Count user and OS events unless requested not to.
1020 if (!attr->exclude_user)
1021 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1022 if (!attr->exclude_kernel)
1023 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1025 if (!hwc->sample_period) {
1026 hwc->sample_period = x86_pmu.max_period;
1027 hwc->last_period = hwc->sample_period;
1028 atomic64_set(&hwc->period_left, hwc->sample_period);
1031 * If we have a PMU initialized but no APIC
1032 * interrupts, we cannot sample hardware
1033 * events (user-space has to fall back and
1034 * sample via a hrtimer based software event):
1041 * Raw hw_event type provide the config in the hw_event structure
1043 if (attr->type == PERF_TYPE_RAW) {
1044 hwc->config |= x86_pmu.raw_event(attr->config);
1048 if (attr->type == PERF_TYPE_HW_CACHE)
1049 return set_ext_hw_attr(hwc, attr);
1051 if (attr->config >= x86_pmu.max_events)
1057 config = x86_pmu.event_map(attr->config);
1068 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1069 (hwc->sample_period == 1)) {
1070 /* BTS is not supported by this architecture. */
1071 if (!bts_available())
1074 /* BTS is currently only allowed for user-mode. */
1075 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1079 hwc->config |= config;
1084 static void p6_pmu_disable_all(void)
1086 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1095 /* p6 only has one enable register */
1096 rdmsrl(MSR_P6_EVNTSEL0, val);
1097 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1098 wrmsrl(MSR_P6_EVNTSEL0, val);
1101 static void intel_pmu_disable_all(void)
1103 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1111 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1113 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1114 intel_pmu_disable_bts();
1117 static void amd_pmu_disable_all(void)
1119 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1127 * ensure we write the disable before we start disabling the
1128 * events proper, so that amd_pmu_enable_event() does the
1133 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1136 if (!test_bit(idx, cpuc->active_mask))
1138 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1139 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1141 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1142 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1146 void hw_perf_disable(void)
1148 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1150 if (!x86_pmu_initialized())
1156 x86_pmu.disable_all();
1159 static void p6_pmu_enable_all(void)
1161 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1170 /* p6 only has one enable register */
1171 rdmsrl(MSR_P6_EVNTSEL0, val);
1172 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1173 wrmsrl(MSR_P6_EVNTSEL0, val);
1176 static void intel_pmu_enable_all(void)
1178 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1186 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1188 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1189 struct perf_event *event =
1190 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1192 if (WARN_ON_ONCE(!event))
1195 intel_pmu_enable_bts(event->hw.config);
1199 static void amd_pmu_enable_all(void)
1201 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1210 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1211 struct perf_event *event = cpuc->events[idx];
1214 if (!test_bit(idx, cpuc->active_mask))
1217 val = event->hw.config;
1218 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1219 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1223 static const struct pmu pmu;
1225 static inline int is_x86_event(struct perf_event *event)
1227 return event->pmu == &pmu;
1230 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1235 u64 constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1236 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1237 struct hw_perf_event *hwc;
1239 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1241 for (i = 0; i < n; i++) {
1242 x86_pmu.get_event_constraints(cpuc,
1243 cpuc->event_list[i],
1248 * weight = number of possible counters
1250 * 1 = most constrained, only works on one counter
1251 * wmax = least constrained, works on any counter
1253 * assign events to counters starting with most
1254 * constrained events.
1256 wmax = x86_pmu.num_events;
1259 * when fixed event counters are present,
1260 * wmax is incremented by 1 to account
1261 * for one more choice
1263 if (x86_pmu.num_events_fixed)
1267 for (w = 1; num && w <= wmax; w++) {
1268 /* for each event */
1269 for (i = 0; i < n; i++) {
1270 c = (unsigned long *)constraints[i];
1271 hwc = &cpuc->event_list[i]->hw;
1273 weight = bitmap_weight(c, X86_PMC_IDX_MAX);
1278 * try to reuse previous assignment
1280 * This is possible despite the fact that
1281 * events or events order may have changed.
1283 * What matters is the level of constraints
1284 * of an event and this is constant for now.
1286 * This is possible also because we always
1287 * scan from most to least constrained. Thus,
1288 * if a counter can be reused, it means no,
1289 * more constrained events, needed it. And
1290 * next events will either compete for it
1291 * (which cannot be solved anyway) or they
1292 * have fewer constraints, and they can use
1296 if (j != -1 && !test_bit(j, used_mask))
1299 for_each_bit(j, c, X86_PMC_IDX_MAX) {
1300 if (!test_bit(j, used_mask))
1304 if (j == X86_PMC_IDX_MAX)
1307 set_bit(j, used_mask);
1310 pr_debug("CPU%d config=0x%llx idx=%d assign=%c\n",
1314 assign ? 'y' : 'n');
1323 * scheduling failed or is just a simulation,
1324 * free resources if necessary
1326 if (!assign || num) {
1327 for (i = 0; i < n; i++) {
1328 if (x86_pmu.put_event_constraints)
1329 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1332 return num ? -ENOSPC : 0;
1336 * dogrp: true if must collect siblings events (group)
1337 * returns total number of events and error code
1339 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1341 struct perf_event *event;
1344 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1346 /* current number of events already accepted */
1349 if (is_x86_event(leader)) {
1352 cpuc->event_list[n] = leader;
1358 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1359 if (!is_x86_event(event) ||
1360 event->state == PERF_EVENT_STATE_OFF)
1366 cpuc->event_list[n] = event;
1373 static inline void x86_assign_hw_event(struct perf_event *event,
1374 struct hw_perf_event *hwc, int idx)
1378 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1379 hwc->config_base = 0;
1380 hwc->event_base = 0;
1381 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1382 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1384 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1385 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1388 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1390 hwc->config_base = x86_pmu.eventsel;
1391 hwc->event_base = x86_pmu.perfctr;
1395 void hw_perf_enable(void)
1397 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1398 struct perf_event *event;
1399 struct hw_perf_event *hwc;
1402 if (!x86_pmu_initialized())
1404 if (cpuc->n_added) {
1406 * apply assignment obtained either from
1407 * hw_perf_group_sched_in() or x86_pmu_enable()
1409 * step1: save events moving to new counters
1410 * step2: reprogram moved events into new counters
1412 for (i = 0; i < cpuc->n_events; i++) {
1414 event = cpuc->event_list[i];
1417 if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1420 x86_pmu.disable(hwc, hwc->idx);
1422 clear_bit(hwc->idx, cpuc->active_mask);
1424 cpuc->events[hwc->idx] = NULL;
1426 x86_perf_event_update(event, hwc, hwc->idx);
1431 for (i = 0; i < cpuc->n_events; i++) {
1433 event = cpuc->event_list[i];
1436 if (hwc->idx == -1) {
1437 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1438 x86_perf_event_set_period(event, hwc, hwc->idx);
1441 * need to mark as active because x86_pmu_disable()
1442 * clear active_mask and eventsp[] yet it preserves
1445 set_bit(hwc->idx, cpuc->active_mask);
1446 cpuc->events[hwc->idx] = event;
1448 x86_pmu.enable(hwc, hwc->idx);
1449 perf_event_update_userpage(event);
1452 perf_events_lapic_init();
1454 x86_pmu.enable_all();
1457 static inline u64 intel_pmu_get_status(void)
1461 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1466 static inline void intel_pmu_ack_status(u64 ack)
1468 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1471 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1473 (void)checking_wrmsrl(hwc->config_base + idx,
1474 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1477 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1479 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1483 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1485 int idx = __idx - X86_PMC_IDX_FIXED;
1488 mask = 0xfULL << (idx * 4);
1490 rdmsrl(hwc->config_base, ctrl_val);
1492 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1496 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1498 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1499 u64 val = P6_NOP_EVENT;
1502 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1504 (void)checking_wrmsrl(hwc->config_base + idx, val);
1508 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1510 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1511 intel_pmu_disable_bts();
1515 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1516 intel_pmu_disable_fixed(hwc, idx);
1520 x86_pmu_disable_event(hwc, idx);
1524 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1526 x86_pmu_disable_event(hwc, idx);
1529 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1532 * Set the next IRQ period, based on the hwc->period_left value.
1533 * To be called with the event disabled in hw:
1536 x86_perf_event_set_period(struct perf_event *event,
1537 struct hw_perf_event *hwc, int idx)
1539 s64 left = atomic64_read(&hwc->period_left);
1540 s64 period = hwc->sample_period;
1543 if (idx == X86_PMC_IDX_FIXED_BTS)
1547 * If we are way outside a reasonable range then just skip forward:
1549 if (unlikely(left <= -period)) {
1551 atomic64_set(&hwc->period_left, left);
1552 hwc->last_period = period;
1556 if (unlikely(left <= 0)) {
1558 atomic64_set(&hwc->period_left, left);
1559 hwc->last_period = period;
1563 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1565 if (unlikely(left < 2))
1568 if (left > x86_pmu.max_period)
1569 left = x86_pmu.max_period;
1571 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1574 * The hw event starts counting from this event offset,
1575 * mark it to be able to extra future deltas:
1577 atomic64_set(&hwc->prev_count, (u64)-left);
1579 err = checking_wrmsrl(hwc->event_base + idx,
1580 (u64)(-left) & x86_pmu.event_mask);
1582 perf_event_update_userpage(event);
1588 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1590 int idx = __idx - X86_PMC_IDX_FIXED;
1591 u64 ctrl_val, bits, mask;
1595 * Enable IRQ generation (0x8),
1596 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1600 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1602 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1605 mask = 0xfULL << (idx * 4);
1607 rdmsrl(hwc->config_base, ctrl_val);
1610 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1613 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1615 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1620 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1622 (void)checking_wrmsrl(hwc->config_base + idx, val);
1626 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1628 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1629 if (!__get_cpu_var(cpu_hw_events).enabled)
1632 intel_pmu_enable_bts(hwc->config);
1636 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1637 intel_pmu_enable_fixed(hwc, idx);
1641 x86_pmu_enable_event(hwc, idx);
1644 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1646 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1649 x86_pmu_enable_event(hwc, idx);
1653 * activate a single event
1655 * The event is added to the group of enabled events
1656 * but only if it can be scehduled with existing events.
1658 * Called with PMU disabled. If successful and return value 1,
1659 * then guaranteed to call perf_enable() and hw_perf_enable()
1661 static int x86_pmu_enable(struct perf_event *event)
1663 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1664 struct hw_perf_event *hwc;
1665 int assign[X86_PMC_IDX_MAX];
1670 n0 = cpuc->n_events;
1671 n = collect_events(cpuc, event, false);
1675 ret = x86_schedule_events(cpuc, n, assign);
1679 * copy new assignment, now we know it is possible
1680 * will be used by hw_perf_enable()
1682 memcpy(cpuc->assign, assign, n*sizeof(int));
1685 cpuc->n_added = n - n0;
1688 x86_perf_event_set_period(event, hwc, hwc->idx);
1693 static void x86_pmu_unthrottle(struct perf_event *event)
1695 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1696 struct hw_perf_event *hwc = &event->hw;
1698 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1699 cpuc->events[hwc->idx] != event))
1702 x86_pmu.enable(hwc, hwc->idx);
1705 void perf_event_print_debug(void)
1707 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1708 struct cpu_hw_events *cpuc;
1709 unsigned long flags;
1712 if (!x86_pmu.num_events)
1715 local_irq_save(flags);
1717 cpu = smp_processor_id();
1718 cpuc = &per_cpu(cpu_hw_events, cpu);
1720 if (x86_pmu.version >= 2) {
1721 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1722 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1723 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1724 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1727 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1728 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1729 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1730 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1732 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1734 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1735 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1736 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1738 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1740 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1741 cpu, idx, pmc_ctrl);
1742 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1743 cpu, idx, pmc_count);
1744 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1745 cpu, idx, prev_left);
1747 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1748 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1750 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1751 cpu, idx, pmc_count);
1753 local_irq_restore(flags);
1756 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1758 struct debug_store *ds = cpuc->ds;
1764 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1765 struct bts_record *at, *top;
1766 struct perf_output_handle handle;
1767 struct perf_event_header header;
1768 struct perf_sample_data data;
1769 struct pt_regs regs;
1777 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1778 top = (struct bts_record *)(unsigned long)ds->bts_index;
1783 ds->bts_index = ds->bts_buffer_base;
1786 data.period = event->hw.last_period;
1792 * Prepare a generic sample, i.e. fill in the invariant fields.
1793 * We will overwrite the from and to address before we output
1796 perf_prepare_sample(&header, &data, event, ®s);
1798 if (perf_output_begin(&handle, event,
1799 header.size * (top - at), 1, 1))
1802 for (; at < top; at++) {
1806 perf_output_sample(&handle, &header, &data, event);
1809 perf_output_end(&handle);
1811 /* There's new data available. */
1812 event->hw.interrupts++;
1813 event->pending_kill = POLL_IN;
1816 static void x86_pmu_disable(struct perf_event *event)
1818 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1819 struct hw_perf_event *hwc = &event->hw;
1820 int i, idx = hwc->idx;
1823 * Must be done before we disable, otherwise the nmi handler
1824 * could reenable again:
1826 clear_bit(idx, cpuc->active_mask);
1827 x86_pmu.disable(hwc, idx);
1830 * Make sure the cleared pointer becomes visible before we
1831 * (potentially) free the event:
1836 * Drain the remaining delta count out of a event
1837 * that we are disabling:
1839 x86_perf_event_update(event, hwc, idx);
1841 /* Drain the remaining BTS records. */
1842 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1843 intel_pmu_drain_bts_buffer(cpuc);
1845 cpuc->events[idx] = NULL;
1847 for (i = 0; i < cpuc->n_events; i++) {
1848 if (event == cpuc->event_list[i]) {
1850 if (x86_pmu.put_event_constraints)
1851 x86_pmu.put_event_constraints(cpuc, event);
1853 while (++i < cpuc->n_events)
1854 cpuc->event_list[i-1] = cpuc->event_list[i];
1859 perf_event_update_userpage(event);
1863 * Save and restart an expired event. Called by NMI contexts,
1864 * so it has to be careful about preempting normal event ops:
1866 static int intel_pmu_save_and_restart(struct perf_event *event)
1868 struct hw_perf_event *hwc = &event->hw;
1872 x86_perf_event_update(event, hwc, idx);
1873 ret = x86_perf_event_set_period(event, hwc, idx);
1875 if (event->state == PERF_EVENT_STATE_ACTIVE)
1876 intel_pmu_enable_event(hwc, idx);
1881 static void intel_pmu_reset(void)
1883 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1884 unsigned long flags;
1887 if (!x86_pmu.num_events)
1890 local_irq_save(flags);
1892 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1894 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1895 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1896 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1898 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1899 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1902 ds->bts_index = ds->bts_buffer_base;
1904 local_irq_restore(flags);
1907 static int p6_pmu_handle_irq(struct pt_regs *regs)
1909 struct perf_sample_data data;
1910 struct cpu_hw_events *cpuc;
1911 struct perf_event *event;
1912 struct hw_perf_event *hwc;
1913 int idx, handled = 0;
1919 cpuc = &__get_cpu_var(cpu_hw_events);
1921 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1922 if (!test_bit(idx, cpuc->active_mask))
1925 event = cpuc->events[idx];
1928 val = x86_perf_event_update(event, hwc, idx);
1929 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1936 data.period = event->hw.last_period;
1938 if (!x86_perf_event_set_period(event, hwc, idx))
1941 if (perf_event_overflow(event, 1, &data, regs))
1942 p6_pmu_disable_event(hwc, idx);
1946 inc_irq_stat(apic_perf_irqs);
1952 * This handler is triggered by the local APIC, so the APIC IRQ handling
1955 static int intel_pmu_handle_irq(struct pt_regs *regs)
1957 struct perf_sample_data data;
1958 struct cpu_hw_events *cpuc;
1965 cpuc = &__get_cpu_var(cpu_hw_events);
1968 intel_pmu_drain_bts_buffer(cpuc);
1969 status = intel_pmu_get_status();
1977 if (++loops > 100) {
1978 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1979 perf_event_print_debug();
1985 inc_irq_stat(apic_perf_irqs);
1987 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1988 struct perf_event *event = cpuc->events[bit];
1990 clear_bit(bit, (unsigned long *) &status);
1991 if (!test_bit(bit, cpuc->active_mask))
1994 if (!intel_pmu_save_and_restart(event))
1997 data.period = event->hw.last_period;
1999 if (perf_event_overflow(event, 1, &data, regs))
2000 intel_pmu_disable_event(&event->hw, bit);
2003 intel_pmu_ack_status(ack);
2006 * Repeat if there is more work to be done:
2008 status = intel_pmu_get_status();
2017 static int amd_pmu_handle_irq(struct pt_regs *regs)
2019 struct perf_sample_data data;
2020 struct cpu_hw_events *cpuc;
2021 struct perf_event *event;
2022 struct hw_perf_event *hwc;
2023 int idx, handled = 0;
2029 cpuc = &__get_cpu_var(cpu_hw_events);
2031 for (idx = 0; idx < x86_pmu.num_events; idx++) {
2032 if (!test_bit(idx, cpuc->active_mask))
2035 event = cpuc->events[idx];
2038 val = x86_perf_event_update(event, hwc, idx);
2039 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2046 data.period = event->hw.last_period;
2048 if (!x86_perf_event_set_period(event, hwc, idx))
2051 if (perf_event_overflow(event, 1, &data, regs))
2052 amd_pmu_disable_event(hwc, idx);
2056 inc_irq_stat(apic_perf_irqs);
2061 void smp_perf_pending_interrupt(struct pt_regs *regs)
2065 inc_irq_stat(apic_pending_irqs);
2066 perf_event_do_pending();
2070 void set_perf_event_pending(void)
2072 #ifdef CONFIG_X86_LOCAL_APIC
2073 if (!x86_pmu.apic || !x86_pmu_initialized())
2076 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2080 void perf_events_lapic_init(void)
2082 #ifdef CONFIG_X86_LOCAL_APIC
2083 if (!x86_pmu.apic || !x86_pmu_initialized())
2087 * Always use NMI for PMU
2089 apic_write(APIC_LVTPC, APIC_DM_NMI);
2093 static int __kprobes
2094 perf_event_nmi_handler(struct notifier_block *self,
2095 unsigned long cmd, void *__args)
2097 struct die_args *args = __args;
2098 struct pt_regs *regs;
2100 if (!atomic_read(&active_events))
2114 #ifdef CONFIG_X86_LOCAL_APIC
2115 apic_write(APIC_LVTPC, APIC_DM_NMI);
2118 * Can't rely on the handled return value to say it was our NMI, two
2119 * events could trigger 'simultaneously' raising two back-to-back NMIs.
2121 * If the first NMI handles both, the latter will be empty and daze
2124 x86_pmu.handle_irq(regs);
2129 static struct event_constraint bts_constraint = {
2132 .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
2135 static int intel_special_constraints(struct perf_event *event,
2138 unsigned int hw_event;
2140 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2142 if (unlikely((hw_event ==
2143 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2144 (event->hw.sample_period == 1))) {
2146 bitmap_copy((unsigned long *)idxmsk,
2147 (unsigned long *)bts_constraint.idxmsk,
2154 static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2155 struct perf_event *event,
2158 const struct event_constraint *c;
2163 bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX);
2165 if (intel_special_constraints(event, idxmsk))
2168 if (x86_pmu.event_constraints) {
2169 for_each_event_constraint(c, x86_pmu.event_constraints) {
2170 if ((event->hw.config & c->cmask) == c->code) {
2172 bitmap_copy((unsigned long *)idxmsk,
2173 (unsigned long *)c->idxmsk,
2179 /* no constraints, means supports all generic counters */
2180 bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
2183 static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
2184 struct perf_event *event,
2189 static int x86_event_sched_in(struct perf_event *event,
2190 struct perf_cpu_context *cpuctx, int cpu)
2194 event->state = PERF_EVENT_STATE_ACTIVE;
2196 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2198 if (!is_x86_event(event))
2199 ret = event->pmu->enable(event);
2201 if (!ret && !is_software_event(event))
2202 cpuctx->active_oncpu++;
2204 if (!ret && event->attr.exclusive)
2205 cpuctx->exclusive = 1;
2210 static void x86_event_sched_out(struct perf_event *event,
2211 struct perf_cpu_context *cpuctx, int cpu)
2213 event->state = PERF_EVENT_STATE_INACTIVE;
2216 if (!is_x86_event(event))
2217 event->pmu->disable(event);
2219 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2221 if (!is_software_event(event))
2222 cpuctx->active_oncpu--;
2224 if (event->attr.exclusive || !cpuctx->active_oncpu)
2225 cpuctx->exclusive = 0;
2229 * Called to enable a whole group of events.
2230 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2231 * Assumes the caller has disabled interrupts and has
2232 * frozen the PMU with hw_perf_save_disable.
2234 * called with PMU disabled. If successful and return value 1,
2235 * then guaranteed to call perf_enable() and hw_perf_enable()
2237 int hw_perf_group_sched_in(struct perf_event *leader,
2238 struct perf_cpu_context *cpuctx,
2239 struct perf_event_context *ctx, int cpu)
2241 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2242 struct perf_event *sub;
2243 int assign[X86_PMC_IDX_MAX];
2246 /* n0 = total number of events */
2247 n0 = collect_events(cpuc, leader, true);
2251 ret = x86_schedule_events(cpuc, n0, assign);
2255 ret = x86_event_sched_in(leader, cpuctx, cpu);
2260 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2261 if (sub->state != PERF_EVENT_STATE_OFF) {
2262 ret = x86_event_sched_in(sub, cpuctx, cpu);
2269 * copy new assignment, now we know it is possible
2270 * will be used by hw_perf_enable()
2272 memcpy(cpuc->assign, assign, n0*sizeof(int));
2274 cpuc->n_events = n0;
2276 ctx->nr_active += n1;
2279 * 1 means successful and events are active
2280 * This is not quite true because we defer
2281 * actual activation until hw_perf_enable() but
2282 * this way we* ensure caller won't try to enable
2287 x86_event_sched_out(leader, cpuctx, cpu);
2289 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2290 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2291 x86_event_sched_out(sub, cpuctx, cpu);
2299 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2300 .notifier_call = perf_event_nmi_handler,
2305 static __initconst struct x86_pmu p6_pmu = {
2307 .handle_irq = p6_pmu_handle_irq,
2308 .disable_all = p6_pmu_disable_all,
2309 .enable_all = p6_pmu_enable_all,
2310 .enable = p6_pmu_enable_event,
2311 .disable = p6_pmu_disable_event,
2312 .eventsel = MSR_P6_EVNTSEL0,
2313 .perfctr = MSR_P6_PERFCTR0,
2314 .event_map = p6_pmu_event_map,
2315 .raw_event = p6_pmu_raw_event,
2316 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
2318 .max_period = (1ULL << 31) - 1,
2322 * Events have 40 bits implemented. However they are designed such
2323 * that bits [32-39] are sign extensions of bit 31. As such the
2324 * effective width of a event for P6-like PMU is 32 bits only.
2326 * See IA-32 Intel Architecture Software developer manual Vol 3B
2329 .event_mask = (1ULL << 32) - 1,
2330 .get_event_constraints = intel_get_event_constraints,
2331 .event_constraints = intel_p6_event_constraints
2334 static __initconst struct x86_pmu intel_pmu = {
2336 .handle_irq = intel_pmu_handle_irq,
2337 .disable_all = intel_pmu_disable_all,
2338 .enable_all = intel_pmu_enable_all,
2339 .enable = intel_pmu_enable_event,
2340 .disable = intel_pmu_disable_event,
2341 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2342 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2343 .event_map = intel_pmu_event_map,
2344 .raw_event = intel_pmu_raw_event,
2345 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2348 * Intel PMCs cannot be accessed sanely above 32 bit width,
2349 * so we install an artificial 1<<31 period regardless of
2350 * the generic event period:
2352 .max_period = (1ULL << 31) - 1,
2353 .enable_bts = intel_pmu_enable_bts,
2354 .disable_bts = intel_pmu_disable_bts,
2355 .get_event_constraints = intel_get_event_constraints
2358 static __initconst struct x86_pmu amd_pmu = {
2360 .handle_irq = amd_pmu_handle_irq,
2361 .disable_all = amd_pmu_disable_all,
2362 .enable_all = amd_pmu_enable_all,
2363 .enable = amd_pmu_enable_event,
2364 .disable = amd_pmu_disable_event,
2365 .eventsel = MSR_K7_EVNTSEL0,
2366 .perfctr = MSR_K7_PERFCTR0,
2367 .event_map = amd_pmu_event_map,
2368 .raw_event = amd_pmu_raw_event,
2369 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
2372 .event_mask = (1ULL << 48) - 1,
2374 /* use highest bit to detect overflow */
2375 .max_period = (1ULL << 47) - 1,
2376 .get_event_constraints = amd_get_event_constraints
2379 static __init int p6_pmu_init(void)
2381 switch (boot_cpu_data.x86_model) {
2383 case 3: /* Pentium Pro */
2385 case 6: /* Pentium II */
2388 case 11: /* Pentium III */
2394 pr_cont("unsupported p6 CPU model %d ",
2395 boot_cpu_data.x86_model);
2404 static __init int intel_pmu_init(void)
2406 union cpuid10_edx edx;
2407 union cpuid10_eax eax;
2408 unsigned int unused;
2412 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2413 /* check for P6 processor family */
2414 if (boot_cpu_data.x86 == 6) {
2415 return p6_pmu_init();
2422 * Check whether the Architectural PerfMon supports
2423 * Branch Misses Retired hw_event or not.
2425 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2426 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2429 version = eax.split.version_id;
2433 x86_pmu = intel_pmu;
2434 x86_pmu.version = version;
2435 x86_pmu.num_events = eax.split.num_events;
2436 x86_pmu.event_bits = eax.split.bit_width;
2437 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
2440 * Quirk: v2 perfmon does not report fixed-purpose events, so
2441 * assume at least 3 events:
2443 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2446 * Install the hw-cache-events table:
2448 switch (boot_cpu_data.x86_model) {
2449 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2450 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2451 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2452 case 29: /* six-core 45 nm xeon "Dunnington" */
2453 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2454 sizeof(hw_cache_event_ids));
2456 x86_pmu.event_constraints = intel_core_event_constraints;
2457 pr_cont("Core2 events, ");
2460 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2461 sizeof(hw_cache_event_ids));
2463 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2464 pr_cont("Nehalem/Corei7 events, ");
2467 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2468 sizeof(hw_cache_event_ids));
2470 x86_pmu.event_constraints = intel_gen_event_constraints;
2471 pr_cont("Atom events, ");
2475 * default constraints for v2 and up
2477 x86_pmu.event_constraints = intel_gen_event_constraints;
2478 pr_cont("generic architected perfmon, ");
2483 static __init int amd_pmu_init(void)
2485 /* Performance-monitoring supported from K7 and later: */
2486 if (boot_cpu_data.x86 < 6)
2491 /* Events are common for all AMDs */
2492 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2493 sizeof(hw_cache_event_ids));
2498 static void __init pmu_check_apic(void)
2504 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2505 pr_info("no hardware sampling interrupt available.\n");
2508 void __init init_hw_perf_events(void)
2512 pr_info("Performance Events: ");
2514 switch (boot_cpu_data.x86_vendor) {
2515 case X86_VENDOR_INTEL:
2516 err = intel_pmu_init();
2518 case X86_VENDOR_AMD:
2519 err = amd_pmu_init();
2525 pr_cont("no PMU driver, software events only.\n");
2531 pr_cont("%s PMU driver.\n", x86_pmu.name);
2533 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2534 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2535 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2536 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2538 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2539 perf_max_events = x86_pmu.num_events;
2541 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2542 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2543 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2544 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2548 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2549 x86_pmu.intel_ctrl = perf_event_mask;
2551 perf_events_lapic_init();
2552 register_die_notifier(&perf_event_nmi_notifier);
2554 pr_info("... version: %d\n", x86_pmu.version);
2555 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2556 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2557 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2558 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2559 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2560 pr_info("... event mask: %016Lx\n", perf_event_mask);
2563 static inline void x86_pmu_read(struct perf_event *event)
2565 x86_perf_event_update(event, &event->hw, event->hw.idx);
2568 static const struct pmu pmu = {
2569 .enable = x86_pmu_enable,
2570 .disable = x86_pmu_disable,
2571 .read = x86_pmu_read,
2572 .unthrottle = x86_pmu_unthrottle,
2576 * validate a single event group
2578 * validation include:
2579 * - check events are compatible which each other
2580 * - events do not compete for the same counter
2581 * - number of events <= number of counters
2583 * validation ensures the group can be loaded onto the
2584 * PMU if it was the only group available.
2586 static int validate_group(struct perf_event *event)
2588 struct perf_event *leader = event->group_leader;
2589 struct cpu_hw_events fake_cpuc;
2592 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
2595 * the event is not yet connected with its
2596 * siblings therefore we must first collect
2597 * existing siblings, then add the new event
2598 * before we can simulate the scheduling
2600 n = collect_events(&fake_cpuc, leader, true);
2604 fake_cpuc.n_events = n;
2605 n = collect_events(&fake_cpuc, event, false);
2609 fake_cpuc.n_events = n;
2611 return x86_schedule_events(&fake_cpuc, n, NULL);
2614 const struct pmu *hw_perf_event_init(struct perf_event *event)
2618 err = __hw_perf_event_init(event);
2620 if (event->group_leader != event)
2621 err = validate_group(event);
2625 event->destroy(event);
2626 return ERR_PTR(err);
2637 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2639 if (entry->nr < PERF_MAX_STACK_DEPTH)
2640 entry->ip[entry->nr++] = ip;
2643 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2644 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2648 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2650 /* Ignore warnings */
2653 static void backtrace_warning(void *data, char *msg)
2655 /* Ignore warnings */
2658 static int backtrace_stack(void *data, char *name)
2663 static void backtrace_address(void *data, unsigned long addr, int reliable)
2665 struct perf_callchain_entry *entry = data;
2668 callchain_store(entry, addr);
2671 static const struct stacktrace_ops backtrace_ops = {
2672 .warning = backtrace_warning,
2673 .warning_symbol = backtrace_warning_symbol,
2674 .stack = backtrace_stack,
2675 .address = backtrace_address,
2676 .walk_stack = print_context_stack_bp,
2679 #include "../dumpstack.h"
2682 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2684 callchain_store(entry, PERF_CONTEXT_KERNEL);
2685 callchain_store(entry, regs->ip);
2687 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2691 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2693 static unsigned long
2694 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2696 unsigned long offset, addr = (unsigned long)from;
2697 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2698 unsigned long size, len = 0;
2704 ret = __get_user_pages_fast(addr, 1, 0, &page);
2708 offset = addr & (PAGE_SIZE - 1);
2709 size = min(PAGE_SIZE - offset, n - len);
2711 map = kmap_atomic(page, type);
2712 memcpy(to, map+offset, size);
2713 kunmap_atomic(map, type);
2725 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2727 unsigned long bytes;
2729 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2731 return bytes == sizeof(*frame);
2735 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2737 struct stack_frame frame;
2738 const void __user *fp;
2740 if (!user_mode(regs))
2741 regs = task_pt_regs(current);
2743 fp = (void __user *)regs->bp;
2745 callchain_store(entry, PERF_CONTEXT_USER);
2746 callchain_store(entry, regs->ip);
2748 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2749 frame.next_frame = NULL;
2750 frame.return_address = 0;
2752 if (!copy_stack_frame(fp, &frame))
2755 if ((unsigned long)fp < regs->sp)
2758 callchain_store(entry, frame.return_address);
2759 fp = frame.next_frame;
2764 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2771 is_user = user_mode(regs);
2773 if (is_user && current->state != TASK_RUNNING)
2777 perf_callchain_kernel(regs, entry);
2780 perf_callchain_user(regs, entry);
2783 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2785 struct perf_callchain_entry *entry;
2788 entry = &__get_cpu_var(pmc_nmi_entry);
2790 entry = &__get_cpu_var(pmc_irq_entry);
2794 perf_do_callchain(regs, entry);
2799 void hw_perf_event_setup_online(int cpu)
2801 init_debug_store_on_cpu(cpu);