2 * Performance counter x86 architecture code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
7 * For licencing details see kernel-base/COPYING
10 #include <linux/perf_counter.h>
11 #include <linux/capability.h>
12 #include <linux/notifier.h>
13 #include <linux/hardirq.h>
14 #include <linux/kprobes.h>
15 #include <linux/module.h>
16 #include <linux/kdebug.h>
17 #include <linux/sched.h>
19 #include <asm/perf_counter.h>
22 static bool perf_counters_initialized __read_mostly;
25 * Number of (generic) HW counters:
27 static int nr_counters_generic __read_mostly;
28 static u64 perf_counter_mask __read_mostly;
29 static u64 counter_value_mask __read_mostly;
31 static int nr_counters_fixed __read_mostly;
33 struct cpu_hw_counters {
34 struct perf_counter *counters[X86_PMC_IDX_MAX];
35 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
36 unsigned long interrupts;
41 * Intel PerfMon v3. Used on Core2 and later.
43 static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
45 static const int intel_perfmon_event_map[] =
47 [PERF_COUNT_CPU_CYCLES] = 0x003c,
48 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
49 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
50 [PERF_COUNT_CACHE_MISSES] = 0x412e,
51 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
52 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
53 [PERF_COUNT_BUS_CYCLES] = 0x013c,
56 static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
59 * Propagate counter elapsed time into the generic counter.
60 * Can only be executed on the CPU where the counter is active.
61 * Returns the delta events processed.
64 x86_perf_counter_update(struct perf_counter *counter,
65 struct hw_perf_counter *hwc, int idx)
67 u64 prev_raw_count, new_raw_count, delta;
70 * Careful: an NMI might modify the previous counter value.
72 * Our tactic to handle this is to first atomically read and
73 * exchange a new raw count - then add that new-prev delta
74 * count to the generic counter atomically:
77 prev_raw_count = atomic64_read(&hwc->prev_count);
78 rdmsrl(hwc->counter_base + idx, new_raw_count);
80 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
81 new_raw_count) != prev_raw_count)
85 * Now we have the new raw value and have updated the prev
86 * timestamp already. We can now calculate the elapsed delta
87 * (counter-)time and add that to the generic counter.
89 * Careful, not all hw sign-extends above the physical width
90 * of the count, so we do that by clipping the delta to 32 bits:
92 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
94 atomic64_add(delta, &counter->count);
95 atomic64_sub(delta, &hwc->period_left);
99 * Setup the hardware configuration for a given hw_event_type
101 static int __hw_perf_counter_init(struct perf_counter *counter)
103 struct perf_counter_hw_event *hw_event = &counter->hw_event;
104 struct hw_perf_counter *hwc = &counter->hw;
106 if (unlikely(!perf_counters_initialized))
110 * Count user events, and generate PMC IRQs:
111 * (keep 'enabled' bit clear for now)
113 hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;
116 * If privileged enough, count OS events too, and allow
117 * NMI events as well:
120 if (capable(CAP_SYS_ADMIN)) {
121 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
126 hwc->irq_period = hw_event->irq_period;
128 * Intel PMCs cannot be accessed sanely above 32 bit width,
129 * so we install an artificial 1<<31 period regardless of
130 * the generic counter period:
132 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
133 hwc->irq_period = 0x7FFFFFFF;
135 atomic64_set(&hwc->period_left, hwc->irq_period);
138 * Raw event type provide the config in the event structure
141 hwc->config |= hw_event->type;
143 if (hw_event->type >= max_intel_perfmon_events)
148 hwc->config |= intel_perfmon_event_map[hw_event->type];
150 counter->wakeup_pending = 0;
155 u64 hw_perf_save_disable(void)
159 if (unlikely(!perf_counters_initialized))
162 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
163 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
167 EXPORT_SYMBOL_GPL(hw_perf_save_disable);
169 void hw_perf_restore(u64 ctrl)
171 if (unlikely(!perf_counters_initialized))
174 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
176 EXPORT_SYMBOL_GPL(hw_perf_restore);
179 __pmc_fixed_disable(struct perf_counter *counter,
180 struct hw_perf_counter *hwc, unsigned int __idx)
182 int idx = __idx - X86_PMC_IDX_FIXED;
186 mask = 0xfULL << (idx * 4);
188 rdmsrl(hwc->config_base, ctrl_val);
190 err = checking_wrmsrl(hwc->config_base, ctrl_val);
194 __pmc_generic_disable(struct perf_counter *counter,
195 struct hw_perf_counter *hwc, unsigned int idx)
197 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
198 __pmc_fixed_disable(counter, hwc, idx);
200 wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
203 static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
206 * Set the next IRQ period, based on the hwc->period_left value.
207 * To be called with the counter disabled in hw:
210 __hw_perf_counter_set_period(struct perf_counter *counter,
211 struct hw_perf_counter *hwc, int idx)
213 s64 left = atomic64_read(&hwc->period_left);
214 s32 period = hwc->irq_period;
218 * If we are way outside a reasoable range then just skip forward:
220 if (unlikely(left <= -period)) {
222 atomic64_set(&hwc->period_left, left);
225 if (unlikely(left <= 0)) {
227 atomic64_set(&hwc->period_left, left);
230 per_cpu(prev_left[idx], smp_processor_id()) = left;
233 * The hw counter starts counting from this counter offset,
234 * mark it to be able to extra future deltas:
236 atomic64_set(&hwc->prev_count, (u64)-left);
238 err = checking_wrmsrl(hwc->counter_base + idx,
239 (u64)(-left) & counter_value_mask);
243 __pmc_fixed_enable(struct perf_counter *counter,
244 struct hw_perf_counter *hwc, unsigned int __idx)
246 int idx = __idx - X86_PMC_IDX_FIXED;
247 u64 ctrl_val, bits, mask;
251 * Enable IRQ generation (0x8) and ring-3 counting (0x2),
252 * and enable ring-0 counting if allowed:
254 bits = 0x8ULL | 0x2ULL;
255 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
258 mask = 0xfULL << (idx * 4);
260 rdmsrl(hwc->config_base, ctrl_val);
263 err = checking_wrmsrl(hwc->config_base, ctrl_val);
267 __pmc_generic_enable(struct perf_counter *counter,
268 struct hw_perf_counter *hwc, int idx)
270 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
271 __pmc_fixed_enable(counter, hwc, idx);
273 wrmsr(hwc->config_base + idx,
274 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
278 fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
282 if (unlikely(hwc->nmi))
285 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
287 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_INSTRUCTIONS]))
288 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
289 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_CPU_CYCLES]))
290 return X86_PMC_IDX_FIXED_CPU_CYCLES;
291 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_BUS_CYCLES]))
292 return X86_PMC_IDX_FIXED_BUS_CYCLES;
298 * Find a PMC slot for the freshly enabled / scheduled in counter:
300 static int pmc_generic_enable(struct perf_counter *counter)
302 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
303 struct hw_perf_counter *hwc = &counter->hw;
306 idx = fixed_mode_idx(counter, hwc);
309 * Try to get the fixed counter, if that is already taken
310 * then try to get a generic counter:
312 if (test_and_set_bit(idx, cpuc->used))
315 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
317 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
318 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
321 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
325 /* Try to get the previous generic counter again */
326 if (test_and_set_bit(idx, cpuc->used)) {
328 idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
329 if (idx == nr_counters_generic)
332 set_bit(idx, cpuc->used);
335 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
336 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
339 perf_counters_lapic_init(hwc->nmi);
341 __pmc_generic_disable(counter, hwc, idx);
343 cpuc->counters[idx] = counter;
345 * Make it visible before enabling the hw:
349 __hw_perf_counter_set_period(counter, hwc, idx);
350 __pmc_generic_enable(counter, hwc, idx);
355 void perf_counter_print_debug(void)
357 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
358 struct cpu_hw_counters *cpuc;
361 if (!nr_counters_generic)
366 cpu = smp_processor_id();
367 cpuc = &per_cpu(cpu_hw_counters, cpu);
369 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
370 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
371 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
372 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
374 printk(KERN_INFO "\n");
375 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl);
376 printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status);
377 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
378 printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed);
379 printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
381 for (idx = 0; idx < nr_counters_generic; idx++) {
382 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
383 rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count);
385 prev_left = per_cpu(prev_left[idx], cpu);
387 printk(KERN_INFO "CPU#%d: gen-PMC%d ctrl: %016llx\n",
389 printk(KERN_INFO "CPU#%d: gen-PMC%d count: %016llx\n",
390 cpu, idx, pmc_count);
391 printk(KERN_INFO "CPU#%d: gen-PMC%d left: %016llx\n",
392 cpu, idx, prev_left);
394 for (idx = 0; idx < nr_counters_fixed; idx++) {
395 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
397 printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n",
398 cpu, idx, pmc_count);
403 static void pmc_generic_disable(struct perf_counter *counter)
405 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
406 struct hw_perf_counter *hwc = &counter->hw;
407 unsigned int idx = hwc->idx;
409 __pmc_generic_disable(counter, hwc, idx);
411 clear_bit(idx, cpuc->used);
412 cpuc->counters[idx] = NULL;
414 * Make sure the cleared pointer becomes visible before we
415 * (potentially) free the counter:
420 * Drain the remaining delta count out of a counter
421 * that we are disabling:
423 x86_perf_counter_update(counter, hwc, idx);
426 static void perf_store_irq_data(struct perf_counter *counter, u64 data)
428 struct perf_data *irqdata = counter->irqdata;
430 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
433 u64 *p = (u64 *) &irqdata->data[irqdata->len];
436 irqdata->len += sizeof(u64);
441 * Save and restart an expired counter. Called by NMI contexts,
442 * so it has to be careful about preempting normal counter ops:
444 static void perf_save_and_restart(struct perf_counter *counter)
446 struct hw_perf_counter *hwc = &counter->hw;
449 x86_perf_counter_update(counter, hwc, idx);
450 __hw_perf_counter_set_period(counter, hwc, idx);
452 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
453 __pmc_generic_enable(counter, hwc, idx);
457 perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
459 struct perf_counter *counter, *group_leader = sibling->group_leader;
462 * Store sibling timestamps (if any):
464 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
466 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
467 perf_store_irq_data(sibling, counter->hw_event.type);
468 perf_store_irq_data(sibling, atomic64_read(&counter->count));
473 * Maximum interrupt frequency of 100KHz per CPU
475 #define PERFMON_MAX_INTERRUPTS 100000/HZ
478 * This handler is triggered by the local APIC, so the APIC IRQ handling
481 static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
483 int bit, cpu = smp_processor_id();
485 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
487 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
489 /* Disable counters globally */
490 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
493 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
498 inc_irq_stat(apic_perf_irqs);
500 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
501 struct perf_counter *counter = cpuc->counters[bit];
503 clear_bit(bit, (unsigned long *) &status);
507 perf_save_and_restart(counter);
509 switch (counter->hw_event.record_type) {
510 case PERF_RECORD_SIMPLE:
512 case PERF_RECORD_IRQ:
513 perf_store_irq_data(counter, instruction_pointer(regs));
515 case PERF_RECORD_GROUP:
516 perf_handle_group(counter, &status, &ack);
520 * From NMI context we cannot call into the scheduler to
521 * do a task wakeup - but we mark these generic as
522 * wakeup_pending and initate a wakeup callback:
525 counter->wakeup_pending = 1;
526 set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
528 wake_up(&counter->waitq);
532 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
535 * Repeat if there is more work to be done:
537 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
542 * Restore - do not reenable when global enable is off or throttled:
544 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
545 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
548 void perf_counter_unthrottle(void)
550 struct cpu_hw_counters *cpuc;
553 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
556 if (unlikely(!perf_counters_initialized))
559 cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
560 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
561 if (printk_ratelimit())
562 printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
563 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
565 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable);
566 if (unlikely(cpuc->global_enable && !global_enable))
567 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
568 cpuc->interrupts = 0;
571 void smp_perf_counter_interrupt(struct pt_regs *regs)
574 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
575 __smp_perf_counter_interrupt(regs, 0);
581 * This handler is triggered by NMI contexts:
583 void perf_counter_notify(struct pt_regs *regs)
585 struct cpu_hw_counters *cpuc;
589 local_irq_save(flags);
590 cpu = smp_processor_id();
591 cpuc = &per_cpu(cpu_hw_counters, cpu);
593 for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
594 struct perf_counter *counter = cpuc->counters[bit];
599 if (counter->wakeup_pending) {
600 counter->wakeup_pending = 0;
601 wake_up(&counter->waitq);
605 local_irq_restore(flags);
608 void perf_counters_lapic_init(int nmi)
612 if (!perf_counters_initialized)
615 * Enable the performance counter vector in the APIC LVT:
617 apic_val = apic_read(APIC_LVTERR);
619 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
621 apic_write(APIC_LVTPC, APIC_DM_NMI);
623 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
624 apic_write(APIC_LVTERR, apic_val);
628 perf_counter_nmi_handler(struct notifier_block *self,
629 unsigned long cmd, void *__args)
631 struct die_args *args = __args;
632 struct pt_regs *regs;
634 if (likely(cmd != DIE_NMI_IPI))
639 apic_write(APIC_LVTPC, APIC_DM_NMI);
640 __smp_perf_counter_interrupt(regs, 1);
645 static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
646 .notifier_call = perf_counter_nmi_handler,
651 void __init init_hw_perf_counters(void)
653 union cpuid10_eax eax;
656 union cpuid10_edx edx;
658 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
662 * Check whether the Architectural PerfMon supports
663 * Branch Misses Retired Event or not.
665 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
666 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
669 printk(KERN_INFO "Intel Performance Monitoring support detected.\n");
671 printk(KERN_INFO "... version: %d\n", eax.split.version_id);
672 printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters);
673 nr_counters_generic = eax.split.num_counters;
674 if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
675 nr_counters_generic = X86_PMC_MAX_GENERIC;
676 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
677 nr_counters_generic, X86_PMC_MAX_GENERIC);
679 perf_counter_mask = (1 << nr_counters_generic) - 1;
680 perf_max_counters = nr_counters_generic;
682 printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width);
683 counter_value_mask = (1ULL << eax.split.bit_width) - 1;
684 printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask);
686 printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length);
688 nr_counters_fixed = edx.split.num_counters_fixed;
689 if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
690 nr_counters_fixed = X86_PMC_MAX_FIXED;
691 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
692 nr_counters_fixed, X86_PMC_MAX_FIXED);
694 printk(KERN_INFO "... fixed counters: %d\n", nr_counters_fixed);
696 perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
698 printk(KERN_INFO "... counter mask: %016Lx\n", perf_counter_mask);
699 perf_counters_initialized = true;
701 perf_counters_lapic_init(0);
702 register_die_notifier(&perf_counter_nmi_notifier);
705 static void pmc_generic_read(struct perf_counter *counter)
707 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
710 static const struct hw_perf_counter_ops x86_perf_counter_ops = {
711 .enable = pmc_generic_enable,
712 .disable = pmc_generic_disable,
713 .read = pmc_generic_read,
716 const struct hw_perf_counter_ops *
717 hw_perf_counter_init(struct perf_counter *counter)
721 err = __hw_perf_counter_init(counter);
725 return &x86_perf_counter_ops;