2 * Performance counter x86 architecture code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
7 * For licencing details see kernel-base/COPYING
10 #include <linux/perf_counter.h>
11 #include <linux/capability.h>
12 #include <linux/notifier.h>
13 #include <linux/hardirq.h>
14 #include <linux/kprobes.h>
15 #include <linux/module.h>
16 #include <linux/kdebug.h>
17 #include <linux/sched.h>
19 #include <asm/perf_counter.h>
22 static bool perf_counters_initialized __read_mostly;
25 * Number of (generic) HW counters:
27 static int nr_hw_counters __read_mostly;
28 static u32 perf_counter_mask __read_mostly;
30 static int nr_hw_counters_fixed __read_mostly;
32 struct cpu_hw_counters {
33 struct perf_counter *generic[X86_PMC_MAX_GENERIC];
34 unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)];
36 struct perf_counter *fixed[X86_PMC_MAX_FIXED];
37 unsigned long used_fixed[BITS_TO_LONGS(X86_PMC_MAX_FIXED)];
41 * Intel PerfMon v3. Used on Core2 and later.
43 static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
45 static const int intel_perfmon_event_map[] =
47 [PERF_COUNT_CYCLES] = 0x003c,
48 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
49 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
50 [PERF_COUNT_CACHE_MISSES] = 0x412e,
51 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
52 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
55 static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
58 * Propagate counter elapsed time into the generic counter.
59 * Can only be executed on the CPU where the counter is active.
60 * Returns the delta events processed.
63 x86_perf_counter_update(struct perf_counter *counter,
64 struct hw_perf_counter *hwc, int idx)
66 u64 prev_raw_count, new_raw_count, delta;
69 * Careful: an NMI might modify the previous counter value.
71 * Our tactic to handle this is to first atomically read and
72 * exchange a new raw count - then add that new-prev delta
73 * count to the generic counter atomically:
76 prev_raw_count = atomic64_read(&hwc->prev_count);
77 rdmsrl(hwc->counter_base + idx, new_raw_count);
79 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
80 new_raw_count) != prev_raw_count)
84 * Now we have the new raw value and have updated the prev
85 * timestamp already. We can now calculate the elapsed delta
86 * (counter-)time and add that to the generic counter.
88 * Careful, not all hw sign-extends above the physical width
89 * of the count, so we do that by clipping the delta to 32 bits:
91 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
93 atomic64_add(delta, &counter->count);
94 atomic64_sub(delta, &hwc->period_left);
98 * Setup the hardware configuration for a given hw_event_type
100 static int __hw_perf_counter_init(struct perf_counter *counter)
102 struct perf_counter_hw_event *hw_event = &counter->hw_event;
103 struct hw_perf_counter *hwc = &counter->hw;
105 if (unlikely(!perf_counters_initialized))
109 * Count user events, and generate PMC IRQs:
110 * (keep 'enabled' bit clear for now)
112 hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;
115 * If privileged enough, count OS events too, and allow
116 * NMI events as well:
119 if (capable(CAP_SYS_ADMIN)) {
120 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
125 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
126 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
128 hwc->irq_period = hw_event->irq_period;
130 * Intel PMCs cannot be accessed sanely above 32 bit width,
131 * so we install an artificial 1<<31 period regardless of
132 * the generic counter period:
134 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
135 hwc->irq_period = 0x7FFFFFFF;
137 atomic64_set(&hwc->period_left, hwc->irq_period);
140 * Raw event type provide the config in the event structure
143 hwc->config |= hw_event->type;
145 if (hw_event->type >= max_intel_perfmon_events)
150 hwc->config |= intel_perfmon_event_map[hw_event->type];
152 counter->wakeup_pending = 0;
157 void hw_perf_enable_all(void)
159 if (unlikely(!perf_counters_initialized))
162 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
165 u64 hw_perf_save_disable(void)
169 if (unlikely(!perf_counters_initialized))
172 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
173 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
177 EXPORT_SYMBOL_GPL(hw_perf_save_disable);
179 void hw_perf_restore(u64 ctrl)
181 if (unlikely(!perf_counters_initialized))
184 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
186 EXPORT_SYMBOL_GPL(hw_perf_restore);
189 __pmc_generic_disable(struct perf_counter *counter,
190 struct hw_perf_counter *hwc, unsigned int idx)
194 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
197 static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]);
200 * Set the next IRQ period, based on the hwc->period_left value.
201 * To be called with the counter disabled in hw:
204 __hw_perf_counter_set_period(struct perf_counter *counter,
205 struct hw_perf_counter *hwc, int idx)
207 s32 left = atomic64_read(&hwc->period_left);
208 s32 period = hwc->irq_period;
211 * If we are way outside a reasoable range then just skip forward:
213 if (unlikely(left <= -period)) {
215 atomic64_set(&hwc->period_left, left);
218 if (unlikely(left <= 0)) {
220 atomic64_set(&hwc->period_left, left);
223 per_cpu(prev_left[idx], smp_processor_id()) = left;
226 * The hw counter starts counting from this counter offset,
227 * mark it to be able to extra future deltas:
229 atomic64_set(&hwc->prev_count, (u64)(s64)-left);
231 wrmsr(hwc->counter_base + idx, -left, 0);
235 __pmc_generic_enable(struct perf_counter *counter,
236 struct hw_perf_counter *hwc, int idx)
238 wrmsr(hwc->config_base + idx,
239 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
243 * Find a PMC slot for the freshly enabled / scheduled in counter:
245 static void pmc_generic_enable(struct perf_counter *counter)
247 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
248 struct hw_perf_counter *hwc = &counter->hw;
251 /* Try to get the previous counter again */
252 if (test_and_set_bit(idx, cpuc->used)) {
253 idx = find_first_zero_bit(cpuc->used, nr_hw_counters);
254 set_bit(idx, cpuc->used);
258 perf_counters_lapic_init(hwc->nmi);
260 __pmc_generic_disable(counter, hwc, idx);
262 cpuc->generic[idx] = counter;
264 __hw_perf_counter_set_period(counter, hwc, idx);
265 __pmc_generic_enable(counter, hwc, idx);
268 void perf_counter_print_debug(void)
270 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left;
278 cpu = smp_processor_id();
280 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
281 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
282 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
284 printk(KERN_INFO "\n");
285 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl);
286 printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status);
287 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
289 for (idx = 0; idx < nr_hw_counters; idx++) {
290 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
291 rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count);
293 prev_left = per_cpu(prev_left[idx], cpu);
295 printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n",
297 printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n",
298 cpu, idx, pmc_count);
299 printk(KERN_INFO "CPU#%d: PMC%d left: %016llx\n",
300 cpu, idx, prev_left);
305 static void pmc_generic_disable(struct perf_counter *counter)
307 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
308 struct hw_perf_counter *hwc = &counter->hw;
309 unsigned int idx = hwc->idx;
311 __pmc_generic_disable(counter, hwc, idx);
313 clear_bit(idx, cpuc->used);
314 cpuc->generic[idx] = NULL;
317 * Drain the remaining delta count out of a counter
318 * that we are disabling:
320 x86_perf_counter_update(counter, hwc, idx);
323 static void perf_store_irq_data(struct perf_counter *counter, u64 data)
325 struct perf_data *irqdata = counter->irqdata;
327 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
330 u64 *p = (u64 *) &irqdata->data[irqdata->len];
333 irqdata->len += sizeof(u64);
338 * Save and restart an expired counter. Called by NMI contexts,
339 * so it has to be careful about preempting normal counter ops:
341 static void perf_save_and_restart(struct perf_counter *counter)
343 struct hw_perf_counter *hwc = &counter->hw;
347 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
349 x86_perf_counter_update(counter, hwc, idx);
350 __hw_perf_counter_set_period(counter, hwc, idx);
352 if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
353 __pmc_generic_enable(counter, hwc, idx);
357 perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
359 struct perf_counter *counter, *group_leader = sibling->group_leader;
362 * Store sibling timestamps (if any):
364 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
365 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
366 perf_store_irq_data(sibling, counter->hw_event.type);
367 perf_store_irq_data(sibling, atomic64_read(&counter->count));
372 * This handler is triggered by the local APIC, so the APIC IRQ handling
375 static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
377 int bit, cpu = smp_processor_id();
378 u64 ack, status, saved_global;
379 struct cpu_hw_counters *cpuc;
381 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
383 /* Disable counters globally */
384 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
387 cpuc = &per_cpu(cpu_hw_counters, cpu);
389 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
395 for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
396 struct perf_counter *counter = cpuc->generic[bit];
398 clear_bit(bit, (unsigned long *) &status);
402 perf_save_and_restart(counter);
404 switch (counter->hw_event.record_type) {
405 case PERF_RECORD_SIMPLE:
407 case PERF_RECORD_IRQ:
408 perf_store_irq_data(counter, instruction_pointer(regs));
410 case PERF_RECORD_GROUP:
411 perf_handle_group(counter, &status, &ack);
415 * From NMI context we cannot call into the scheduler to
416 * do a task wakeup - but we mark these generic as
417 * wakeup_pending and initate a wakeup callback:
420 counter->wakeup_pending = 1;
421 set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
423 wake_up(&counter->waitq);
427 wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0);
430 * Repeat if there is more work to be done:
432 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
437 * Restore - do not reenable when global enable is off:
439 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0);
442 void smp_perf_counter_interrupt(struct pt_regs *regs)
445 inc_irq_stat(apic_perf_irqs);
446 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
447 __smp_perf_counter_interrupt(regs, 0);
453 * This handler is triggered by NMI contexts:
455 void perf_counter_notify(struct pt_regs *regs)
457 struct cpu_hw_counters *cpuc;
461 local_irq_save(flags);
462 cpu = smp_processor_id();
463 cpuc = &per_cpu(cpu_hw_counters, cpu);
465 for_each_bit(bit, cpuc->used, nr_hw_counters) {
466 struct perf_counter *counter = cpuc->generic[bit];
471 if (counter->wakeup_pending) {
472 counter->wakeup_pending = 0;
473 wake_up(&counter->waitq);
477 local_irq_restore(flags);
480 void __cpuinit perf_counters_lapic_init(int nmi)
484 if (!perf_counters_initialized)
487 * Enable the performance counter vector in the APIC LVT:
489 apic_val = apic_read(APIC_LVTERR);
491 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
493 apic_write(APIC_LVTPC, APIC_DM_NMI);
495 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
496 apic_write(APIC_LVTERR, apic_val);
500 perf_counter_nmi_handler(struct notifier_block *self,
501 unsigned long cmd, void *__args)
503 struct die_args *args = __args;
504 struct pt_regs *regs;
506 if (likely(cmd != DIE_NMI_IPI))
511 apic_write(APIC_LVTPC, APIC_DM_NMI);
512 __smp_perf_counter_interrupt(regs, 1);
517 static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
518 .notifier_call = perf_counter_nmi_handler
521 void __init init_hw_perf_counters(void)
523 union cpuid10_eax eax;
526 union cpuid10_edx edx;
528 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
532 * Check whether the Architectural PerfMon supports
533 * Branch Misses Retired Event or not.
535 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
536 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
539 printk(KERN_INFO "Intel Performance Monitoring support detected.\n");
541 printk(KERN_INFO "... version: %d\n", eax.split.version_id);
542 printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters);
543 nr_hw_counters = eax.split.num_counters;
544 if (nr_hw_counters > X86_PMC_MAX_GENERIC) {
545 nr_hw_counters = X86_PMC_MAX_GENERIC;
546 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
547 nr_hw_counters, X86_PMC_MAX_GENERIC);
549 perf_counter_mask = (1 << nr_hw_counters) - 1;
550 perf_max_counters = nr_hw_counters;
552 printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width);
553 printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length);
555 nr_hw_counters_fixed = edx.split.num_counters_fixed;
556 if (nr_hw_counters_fixed > X86_PMC_MAX_FIXED) {
557 nr_hw_counters_fixed = X86_PMC_MAX_FIXED;
558 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
559 nr_hw_counters_fixed, X86_PMC_MAX_FIXED);
561 printk(KERN_INFO "... fixed counters: %d\n", nr_hw_counters_fixed);
563 perf_counters_initialized = true;
565 perf_counters_lapic_init(0);
566 register_die_notifier(&perf_counter_nmi_notifier);
569 static void pmc_generic_read(struct perf_counter *counter)
571 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
574 static const struct hw_perf_counter_ops x86_perf_counter_ops = {
575 .hw_perf_counter_enable = pmc_generic_enable,
576 .hw_perf_counter_disable = pmc_generic_disable,
577 .hw_perf_counter_read = pmc_generic_read,
580 const struct hw_perf_counter_ops *
581 hw_perf_counter_init(struct perf_counter *counter)
585 err = __hw_perf_counter_init(counter);
589 return &x86_perf_counter_ops;