1 #include <linux/bitops.h>
2 #include <linux/types.h>
3 #include <linux/slab.h>
5 #include <asm/kaiser.h>
6 #include <asm/perf_event.h>
8 #include "perf_event.h"
11 DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store);
13 /* The size of a BTS record in bytes: */
14 #define BTS_RECORD_SIZE 24
16 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
17 #define PEBS_BUFFER_SIZE PAGE_SIZE
20 * pebs_record_32 for p4 and core not supported
22 struct pebs_record_32 {
30 struct pebs_record_core {
35 u64 r12, r13, r14, r15;
38 struct pebs_record_nhm {
43 u64 r12, r13, r14, r15;
44 u64 status, dla, dse, lat;
47 void init_debug_store_on_cpu(int cpu)
49 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
54 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
55 (u32)((u64)(unsigned long)ds),
56 (u32)((u64)(unsigned long)ds >> 32));
59 void fini_debug_store_on_cpu(int cpu)
61 if (!per_cpu(cpu_hw_events, cpu).ds)
64 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
67 static void *dsalloc(size_t size, gfp_t flags, int node)
69 #ifdef CONFIG_PAGE_TABLE_ISOLATION
70 unsigned int order = get_order(size);
74 page = alloc_pages_node(node, flags | __GFP_ZERO, order);
77 addr = (unsigned long)page_address(page);
78 if (kaiser_add_mapping(addr, size, __PAGE_KERNEL) < 0) {
79 __free_pages(page, order);
84 return kmalloc_node(size, flags | __GFP_ZERO, node);
88 static void dsfree(const void *buffer, size_t size)
90 #ifdef CONFIG_PAGE_TABLE_ISOLATION
93 kaiser_remove_mapping((unsigned long)buffer, size);
94 free_pages((unsigned long)buffer, get_order(size));
100 static int alloc_pebs_buffer(int cpu)
102 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
103 int node = cpu_to_node(cpu);
104 int max, thresh = 1; /* always use a single PEBS record */
110 buffer = dsalloc(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
111 if (unlikely(!buffer))
114 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
116 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
117 ds->pebs_index = ds->pebs_buffer_base;
118 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
119 max * x86_pmu.pebs_record_size;
121 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
122 thresh * x86_pmu.pebs_record_size;
127 static void release_pebs_buffer(int cpu)
129 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
131 if (!ds || !x86_pmu.pebs)
134 dsfree((void *)(unsigned long)ds->pebs_buffer_base, PEBS_BUFFER_SIZE);
135 ds->pebs_buffer_base = 0;
138 static int alloc_bts_buffer(int cpu)
140 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
141 int node = cpu_to_node(cpu);
148 buffer = dsalloc(BTS_BUFFER_SIZE, GFP_KERNEL, node);
149 if (unlikely(!buffer))
152 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
155 ds->bts_buffer_base = (u64)(unsigned long)buffer;
156 ds->bts_index = ds->bts_buffer_base;
157 ds->bts_absolute_maximum = ds->bts_buffer_base +
158 max * BTS_RECORD_SIZE;
159 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
160 thresh * BTS_RECORD_SIZE;
165 static void release_bts_buffer(int cpu)
167 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
169 if (!ds || !x86_pmu.bts)
172 dsfree((void *)(unsigned long)ds->bts_buffer_base, BTS_BUFFER_SIZE);
173 ds->bts_buffer_base = 0;
176 static int alloc_ds_buffer(int cpu)
178 struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
180 memset(ds, 0, sizeof(*ds));
181 per_cpu(cpu_hw_events, cpu).ds = ds;
186 static void release_ds_buffer(int cpu)
188 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
193 per_cpu(cpu_hw_events, cpu).ds = NULL;
196 void release_ds_buffers(void)
200 if (!x86_pmu.bts && !x86_pmu.pebs)
204 for_each_online_cpu(cpu)
205 fini_debug_store_on_cpu(cpu);
207 for_each_possible_cpu(cpu) {
208 release_pebs_buffer(cpu);
209 release_bts_buffer(cpu);
210 release_ds_buffer(cpu);
215 void reserve_ds_buffers(void)
217 int bts_err = 0, pebs_err = 0;
220 x86_pmu.bts_active = 0;
221 x86_pmu.pebs_active = 0;
223 if (!x86_pmu.bts && !x86_pmu.pebs)
234 for_each_possible_cpu(cpu) {
235 if (alloc_ds_buffer(cpu)) {
240 if (!bts_err && alloc_bts_buffer(cpu))
243 if (!pebs_err && alloc_pebs_buffer(cpu))
246 if (bts_err && pebs_err)
251 for_each_possible_cpu(cpu)
252 release_bts_buffer(cpu);
256 for_each_possible_cpu(cpu)
257 release_pebs_buffer(cpu);
260 if (bts_err && pebs_err) {
261 for_each_possible_cpu(cpu)
262 release_ds_buffer(cpu);
264 if (x86_pmu.bts && !bts_err)
265 x86_pmu.bts_active = 1;
267 if (x86_pmu.pebs && !pebs_err)
268 x86_pmu.pebs_active = 1;
270 for_each_online_cpu(cpu)
271 init_debug_store_on_cpu(cpu);
281 struct event_constraint bts_constraint =
282 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
284 void intel_pmu_enable_bts(u64 config)
286 unsigned long debugctlmsr;
288 debugctlmsr = get_debugctlmsr();
290 debugctlmsr |= DEBUGCTLMSR_TR;
291 debugctlmsr |= DEBUGCTLMSR_BTS;
292 debugctlmsr |= DEBUGCTLMSR_BTINT;
294 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
295 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
297 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
298 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
300 update_debugctlmsr(debugctlmsr);
303 void intel_pmu_disable_bts(void)
305 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
306 unsigned long debugctlmsr;
311 debugctlmsr = get_debugctlmsr();
314 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
315 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
317 update_debugctlmsr(debugctlmsr);
320 int intel_pmu_drain_bts_buffer(void)
322 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
323 struct debug_store *ds = cpuc->ds;
329 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
330 struct bts_record *at, *top;
331 struct perf_output_handle handle;
332 struct perf_event_header header;
333 struct perf_sample_data data;
339 if (!x86_pmu.bts_active)
342 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
343 top = (struct bts_record *)(unsigned long)ds->bts_index;
348 ds->bts_index = ds->bts_buffer_base;
350 perf_sample_data_init(&data, 0);
351 data.period = event->hw.last_period;
355 * Prepare a generic sample, i.e. fill in the invariant fields.
356 * We will overwrite the from and to address before we output
359 perf_prepare_sample(&header, &data, event, ®s);
361 if (perf_output_begin(&handle, event, header.size * (top - at)))
364 for (; at < top; at++) {
368 perf_output_sample(&handle, &header, &data, event);
371 perf_output_end(&handle);
373 /* There's new data available. */
374 event->hw.interrupts++;
375 event->pending_kill = POLL_IN;
382 struct event_constraint intel_core2_pebs_event_constraints[] = {
383 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
384 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
385 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
386 INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
387 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
391 struct event_constraint intel_atom_pebs_event_constraints[] = {
392 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
393 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
394 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
398 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
399 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
400 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
401 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
402 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
403 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
404 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
405 INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
406 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
407 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
408 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
409 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
413 struct event_constraint intel_westmere_pebs_event_constraints[] = {
414 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
415 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
416 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
417 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
418 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
419 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
420 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
421 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
422 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
423 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
424 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
428 struct event_constraint intel_snb_pebs_event_constraints[] = {
429 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
430 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
431 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
432 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
433 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
434 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
435 INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
436 INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
437 INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
438 INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
439 INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
440 INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
441 INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
442 INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
443 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
444 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
445 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
449 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
451 struct event_constraint *c;
453 if (!event->attr.precise_ip)
456 if (x86_pmu.pebs_constraints) {
457 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
458 if ((event->hw.config & c->cmask) == c->code)
463 return &emptyconstraint;
466 void intel_pmu_pebs_enable(struct perf_event *event)
468 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
469 struct hw_perf_event *hwc = &event->hw;
471 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
473 cpuc->pebs_enabled |= 1ULL << hwc->idx;
474 WARN_ON_ONCE(cpuc->enabled);
476 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
477 intel_pmu_lbr_enable(event);
480 void intel_pmu_pebs_disable(struct perf_event *event)
482 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
483 struct hw_perf_event *hwc = &event->hw;
485 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
487 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
489 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
491 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
492 intel_pmu_lbr_disable(event);
495 void intel_pmu_pebs_enable_all(void)
497 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
499 if (cpuc->pebs_enabled)
500 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
503 void intel_pmu_pebs_disable_all(void)
505 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
507 if (cpuc->pebs_enabled)
508 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
511 #include <asm/insn.h>
513 static inline bool kernel_ip(unsigned long ip)
516 return ip > PAGE_OFFSET;
522 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
524 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
525 unsigned long from = cpuc->lbr_entries[0].from;
526 unsigned long old_to, to = cpuc->lbr_entries[0].to;
527 unsigned long ip = regs->ip;
531 * We don't need to fixup if the PEBS assist is fault like
533 if (!x86_pmu.intel_cap.pebs_trap)
537 * No LBR entry, no basic block, no rewinding
539 if (!cpuc->lbr_stack.nr || !from || !to)
543 * Basic blocks should never cross user/kernel boundaries
545 if (kernel_ip(ip) != kernel_ip(to))
549 * unsigned math, either ip is before the start (impossible) or
550 * the basic block is larger than 1 page (sanity)
552 if ((ip - to) > PAGE_SIZE)
556 * We sampled a branch insn, rewind using the LBR stack
565 u8 buf[MAX_INSN_SIZE];
569 if (!kernel_ip(ip)) {
570 int bytes, size = MAX_INSN_SIZE;
572 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
581 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
583 insn_init(&insn, kaddr, is_64bit);
584 insn_get_length(&insn);
594 * Even though we decoded the basic block, the instruction stream
595 * never matched the given IP, either the TO or the IP got corrupted.
600 static void __intel_pmu_pebs_event(struct perf_event *event,
601 struct pt_regs *iregs, void *__pebs)
604 * We cast to pebs_record_core since that is a subset of
605 * both formats and we don't use the other fields in this
608 struct pebs_record_core *pebs = __pebs;
609 struct perf_sample_data data;
612 if (!intel_pmu_save_and_restart(event))
615 perf_sample_data_init(&data, 0);
616 data.period = event->hw.last_period;
619 * We use the interrupt regs as a base because the PEBS record
620 * does not contain a full regs set, specifically it seems to
621 * lack segment descriptors, which get used by things like
624 * In the simple case fix up only the IP and BP,SP regs, for
625 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
626 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
633 if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s))
634 regs.flags |= PERF_EFLAGS_EXACT;
636 regs.flags &= ~PERF_EFLAGS_EXACT;
638 if (perf_event_overflow(event, &data, ®s))
639 x86_pmu_stop(event, 0);
642 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
644 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
645 struct debug_store *ds = cpuc->ds;
646 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
647 struct pebs_record_core *at, *top;
650 if (!x86_pmu.pebs_active)
653 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
654 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
657 * Whatever else happens, drain the thing
659 ds->pebs_index = ds->pebs_buffer_base;
661 if (!test_bit(0, cpuc->active_mask))
664 WARN_ON_ONCE(!event);
666 if (!event->attr.precise_ip)
674 * Should not happen, we program the threshold at 1 and do not
680 __intel_pmu_pebs_event(event, iregs, at);
683 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
685 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
686 struct debug_store *ds = cpuc->ds;
687 struct pebs_record_nhm *at, *top;
688 struct perf_event *event = NULL;
692 if (!x86_pmu.pebs_active)
695 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
696 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
698 ds->pebs_index = ds->pebs_buffer_base;
705 * Should not happen, we program the threshold at 1 and do not
708 WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
710 for ( ; at < top; at++) {
711 for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
712 event = cpuc->events[bit];
713 if (!test_bit(bit, cpuc->active_mask))
716 WARN_ON_ONCE(!event);
718 if (!event->attr.precise_ip)
721 if (__test_and_set_bit(bit, (unsigned long *)&status))
727 if (!event || bit >= MAX_PEBS_EVENTS)
730 __intel_pmu_pebs_event(event, iregs, at);
735 * BTS, PEBS probe and setup
738 void intel_ds_init(void)
741 * No support for 32bit formats
743 if (!boot_cpu_has(X86_FEATURE_DTES64))
746 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
747 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
749 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
750 int format = x86_pmu.intel_cap.pebs_format;
754 printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
755 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
756 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
760 printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
761 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
762 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
766 printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
772 void perf_restore_debug_store(void)
774 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
776 if (!x86_pmu.bts && !x86_pmu.pebs)
779 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);