2 * Performance counter x86 architecture code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2009 Jaswinder Singh Rajput
8 * For licencing details see kernel-base/COPYING
11 #include <linux/perf_counter.h>
12 #include <linux/capability.h>
13 #include <linux/notifier.h>
14 #include <linux/hardirq.h>
15 #include <linux/kprobes.h>
16 #include <linux/module.h>
17 #include <linux/kdebug.h>
18 #include <linux/sched.h>
20 #include <asm/perf_counter.h>
23 static bool perf_counters_initialized __read_mostly;
26 * Number of (generic) HW counters:
28 static int nr_counters_generic __read_mostly;
29 static u64 perf_counter_mask __read_mostly;
30 static u64 counter_value_mask __read_mostly;
32 static int nr_counters_fixed __read_mostly;
34 struct cpu_hw_counters {
35 struct perf_counter *counters[X86_PMC_IDX_MAX];
36 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
37 unsigned long interrupts;
42 * struct pmc_x86_ops - performance counter x86 ops
45 u64 (*save_disable_all)(void);
46 void (*restore_all)(u64 ctrl);
49 int (*event_map)(int event);
53 static struct pmc_x86_ops *pmc_ops;
55 static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
58 * Intel PerfMon v3. Used on Core2 and later.
60 static const int intel_perfmon_event_map[] =
62 [PERF_COUNT_CPU_CYCLES] = 0x003c,
63 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
64 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
65 [PERF_COUNT_CACHE_MISSES] = 0x412e,
66 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
67 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
68 [PERF_COUNT_BUS_CYCLES] = 0x013c,
71 static int pmc_intel_event_map(int event)
73 return intel_perfmon_event_map[event];
77 * AMD Performance Monitor K7 and later.
79 static const int amd_perfmon_event_map[] =
81 [PERF_COUNT_CPU_CYCLES] = 0x0076,
82 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
83 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
84 [PERF_COUNT_CACHE_MISSES] = 0x0081,
85 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
86 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
89 static int pmc_amd_event_map(int event)
91 return amd_perfmon_event_map[event];
95 * Propagate counter elapsed time into the generic counter.
96 * Can only be executed on the CPU where the counter is active.
97 * Returns the delta events processed.
100 x86_perf_counter_update(struct perf_counter *counter,
101 struct hw_perf_counter *hwc, int idx)
103 u64 prev_raw_count, new_raw_count, delta;
106 * Careful: an NMI might modify the previous counter value.
108 * Our tactic to handle this is to first atomically read and
109 * exchange a new raw count - then add that new-prev delta
110 * count to the generic counter atomically:
113 prev_raw_count = atomic64_read(&hwc->prev_count);
114 rdmsrl(hwc->counter_base + idx, new_raw_count);
116 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
117 new_raw_count) != prev_raw_count)
121 * Now we have the new raw value and have updated the prev
122 * timestamp already. We can now calculate the elapsed delta
123 * (counter-)time and add that to the generic counter.
125 * Careful, not all hw sign-extends above the physical width
126 * of the count, so we do that by clipping the delta to 32 bits:
128 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
130 atomic64_add(delta, &counter->count);
131 atomic64_sub(delta, &hwc->period_left);
135 * Setup the hardware configuration for a given hw_event_type
137 static int __hw_perf_counter_init(struct perf_counter *counter)
139 struct perf_counter_hw_event *hw_event = &counter->hw_event;
140 struct hw_perf_counter *hwc = &counter->hw;
142 if (unlikely(!perf_counters_initialized))
147 * (keep 'enabled' bit clear for now)
149 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
152 * Count user and OS events unless requested not to.
154 if (!hw_event->exclude_user)
155 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
156 if (!hw_event->exclude_kernel)
157 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
160 * If privileged enough, allow NMI events:
163 if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
166 hwc->irq_period = hw_event->irq_period;
168 * Intel PMCs cannot be accessed sanely above 32 bit width,
169 * so we install an artificial 1<<31 period regardless of
170 * the generic counter period:
172 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
173 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
174 hwc->irq_period = 0x7FFFFFFF;
176 atomic64_set(&hwc->period_left, hwc->irq_period);
179 * Raw event type provide the config in the event structure
182 hwc->config |= hw_event->type;
184 if (hw_event->type >= pmc_ops->max_events)
189 hwc->config |= pmc_ops->event_map(hw_event->type);
191 counter->wakeup_pending = 0;
196 static u64 pmc_intel_save_disable_all(void)
200 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
201 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
206 static u64 pmc_amd_save_disable_all(void)
211 for (idx = 0; idx < nr_counters_generic; idx++) {
212 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
213 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
215 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
216 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
222 u64 hw_perf_save_disable(void)
224 if (unlikely(!perf_counters_initialized))
227 return pmc_ops->save_disable_all();
229 EXPORT_SYMBOL_GPL(hw_perf_save_disable);
231 static void pmc_intel_restore_all(u64 ctrl)
233 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
236 static void pmc_amd_restore_all(u64 ctrl)
241 for (idx = 0; idx < nr_counters_generic; idx++) {
242 if (ctrl & (1 << idx)) {
243 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
244 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
245 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
250 void hw_perf_restore(u64 ctrl)
252 if (unlikely(!perf_counters_initialized))
255 pmc_ops->restore_all(ctrl);
257 EXPORT_SYMBOL_GPL(hw_perf_restore);
260 __pmc_fixed_disable(struct perf_counter *counter,
261 struct hw_perf_counter *hwc, unsigned int __idx)
263 int idx = __idx - X86_PMC_IDX_FIXED;
267 mask = 0xfULL << (idx * 4);
269 rdmsrl(hwc->config_base, ctrl_val);
271 err = checking_wrmsrl(hwc->config_base, ctrl_val);
275 __pmc_generic_disable(struct perf_counter *counter,
276 struct hw_perf_counter *hwc, unsigned int idx)
278 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
279 __pmc_fixed_disable(counter, hwc, idx);
281 wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
284 static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
287 * Set the next IRQ period, based on the hwc->period_left value.
288 * To be called with the counter disabled in hw:
291 __hw_perf_counter_set_period(struct perf_counter *counter,
292 struct hw_perf_counter *hwc, int idx)
294 s64 left = atomic64_read(&hwc->period_left);
295 s32 period = hwc->irq_period;
299 * If we are way outside a reasoable range then just skip forward:
301 if (unlikely(left <= -period)) {
303 atomic64_set(&hwc->period_left, left);
306 if (unlikely(left <= 0)) {
308 atomic64_set(&hwc->period_left, left);
311 per_cpu(prev_left[idx], smp_processor_id()) = left;
314 * The hw counter starts counting from this counter offset,
315 * mark it to be able to extra future deltas:
317 atomic64_set(&hwc->prev_count, (u64)-left);
319 err = checking_wrmsrl(hwc->counter_base + idx,
320 (u64)(-left) & counter_value_mask);
324 __pmc_fixed_enable(struct perf_counter *counter,
325 struct hw_perf_counter *hwc, unsigned int __idx)
327 int idx = __idx - X86_PMC_IDX_FIXED;
328 u64 ctrl_val, bits, mask;
332 * Enable IRQ generation (0x8),
333 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
337 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
339 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
342 mask = 0xfULL << (idx * 4);
344 rdmsrl(hwc->config_base, ctrl_val);
347 err = checking_wrmsrl(hwc->config_base, ctrl_val);
351 __pmc_generic_enable(struct perf_counter *counter,
352 struct hw_perf_counter *hwc, int idx)
354 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
355 __pmc_fixed_enable(counter, hwc, idx);
357 wrmsr(hwc->config_base + idx,
358 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
362 fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
366 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
369 if (unlikely(hwc->nmi))
372 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
374 if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS)))
375 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
376 if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES)))
377 return X86_PMC_IDX_FIXED_CPU_CYCLES;
378 if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES)))
379 return X86_PMC_IDX_FIXED_BUS_CYCLES;
385 * Find a PMC slot for the freshly enabled / scheduled in counter:
387 static int pmc_generic_enable(struct perf_counter *counter)
389 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
390 struct hw_perf_counter *hwc = &counter->hw;
393 idx = fixed_mode_idx(counter, hwc);
396 * Try to get the fixed counter, if that is already taken
397 * then try to get a generic counter:
399 if (test_and_set_bit(idx, cpuc->used))
402 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
404 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
405 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
408 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
412 /* Try to get the previous generic counter again */
413 if (test_and_set_bit(idx, cpuc->used)) {
415 idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
416 if (idx == nr_counters_generic)
419 set_bit(idx, cpuc->used);
422 hwc->config_base = pmc_ops->eventsel;
423 hwc->counter_base = pmc_ops->perfctr;
426 perf_counters_lapic_init(hwc->nmi);
428 __pmc_generic_disable(counter, hwc, idx);
430 cpuc->counters[idx] = counter;
432 * Make it visible before enabling the hw:
436 __hw_perf_counter_set_period(counter, hwc, idx);
437 __pmc_generic_enable(counter, hwc, idx);
442 void perf_counter_print_debug(void)
444 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
445 struct cpu_hw_counters *cpuc;
448 if (!nr_counters_generic)
453 cpu = smp_processor_id();
454 cpuc = &per_cpu(cpu_hw_counters, cpu);
456 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
457 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
458 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
459 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
460 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
463 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
464 pr_info("CPU#%d: status: %016llx\n", cpu, status);
465 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
466 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
468 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
470 for (idx = 0; idx < nr_counters_generic; idx++) {
471 rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl);
472 rdmsrl(pmc_ops->perfctr + idx, pmc_count);
474 prev_left = per_cpu(prev_left[idx], cpu);
476 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
478 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
479 cpu, idx, pmc_count);
480 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
481 cpu, idx, prev_left);
483 for (idx = 0; idx < nr_counters_fixed; idx++) {
484 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
486 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
487 cpu, idx, pmc_count);
492 static void pmc_generic_disable(struct perf_counter *counter)
494 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
495 struct hw_perf_counter *hwc = &counter->hw;
496 unsigned int idx = hwc->idx;
498 __pmc_generic_disable(counter, hwc, idx);
500 clear_bit(idx, cpuc->used);
501 cpuc->counters[idx] = NULL;
503 * Make sure the cleared pointer becomes visible before we
504 * (potentially) free the counter:
509 * Drain the remaining delta count out of a counter
510 * that we are disabling:
512 x86_perf_counter_update(counter, hwc, idx);
515 static void perf_store_irq_data(struct perf_counter *counter, u64 data)
517 struct perf_data *irqdata = counter->irqdata;
519 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
522 u64 *p = (u64 *) &irqdata->data[irqdata->len];
525 irqdata->len += sizeof(u64);
530 * Save and restart an expired counter. Called by NMI contexts,
531 * so it has to be careful about preempting normal counter ops:
533 static void perf_save_and_restart(struct perf_counter *counter)
535 struct hw_perf_counter *hwc = &counter->hw;
538 x86_perf_counter_update(counter, hwc, idx);
539 __hw_perf_counter_set_period(counter, hwc, idx);
541 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
542 __pmc_generic_enable(counter, hwc, idx);
546 perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
548 struct perf_counter *counter, *group_leader = sibling->group_leader;
551 * Store sibling timestamps (if any):
553 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
555 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
556 perf_store_irq_data(sibling, counter->hw_event.type);
557 perf_store_irq_data(sibling, atomic64_read(&counter->count));
562 * Maximum interrupt frequency of 100KHz per CPU
564 #define PERFMON_MAX_INTERRUPTS (100000/HZ)
567 * This handler is triggered by the local APIC, so the APIC IRQ handling
570 static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
572 int bit, cpu = smp_processor_id();
574 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
576 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
578 /* Disable counters globally */
579 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
582 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
587 inc_irq_stat(apic_perf_irqs);
589 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
590 struct perf_counter *counter = cpuc->counters[bit];
592 clear_bit(bit, (unsigned long *) &status);
596 perf_save_and_restart(counter);
598 switch (counter->hw_event.record_type) {
599 case PERF_RECORD_SIMPLE:
601 case PERF_RECORD_IRQ:
602 perf_store_irq_data(counter, instruction_pointer(regs));
604 case PERF_RECORD_GROUP:
605 perf_handle_group(counter, &status, &ack);
609 * From NMI context we cannot call into the scheduler to
610 * do a task wakeup - but we mark these generic as
611 * wakeup_pending and initate a wakeup callback:
614 counter->wakeup_pending = 1;
615 set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
617 wake_up(&counter->waitq);
621 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
624 * Repeat if there is more work to be done:
626 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
631 * Restore - do not reenable when global enable is off or throttled:
633 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
634 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
637 void perf_counter_unthrottle(void)
639 struct cpu_hw_counters *cpuc;
642 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
645 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
648 if (unlikely(!perf_counters_initialized))
651 cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
652 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
653 if (printk_ratelimit())
654 printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
655 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
657 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable);
658 if (unlikely(cpuc->global_enable && !global_enable))
659 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
660 cpuc->interrupts = 0;
663 void smp_perf_counter_interrupt(struct pt_regs *regs)
666 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
667 __smp_perf_counter_interrupt(regs, 0);
673 * This handler is triggered by NMI contexts:
675 void perf_counter_notify(struct pt_regs *regs)
677 struct cpu_hw_counters *cpuc;
681 local_irq_save(flags);
682 cpu = smp_processor_id();
683 cpuc = &per_cpu(cpu_hw_counters, cpu);
685 for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
686 struct perf_counter *counter = cpuc->counters[bit];
691 if (counter->wakeup_pending) {
692 counter->wakeup_pending = 0;
693 wake_up(&counter->waitq);
697 local_irq_restore(flags);
700 void perf_counters_lapic_init(int nmi)
704 if (!perf_counters_initialized)
707 * Enable the performance counter vector in the APIC LVT:
709 apic_val = apic_read(APIC_LVTERR);
711 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
713 apic_write(APIC_LVTPC, APIC_DM_NMI);
715 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
716 apic_write(APIC_LVTERR, apic_val);
720 perf_counter_nmi_handler(struct notifier_block *self,
721 unsigned long cmd, void *__args)
723 struct die_args *args = __args;
724 struct pt_regs *regs;
726 if (likely(cmd != DIE_NMI_IPI))
731 apic_write(APIC_LVTPC, APIC_DM_NMI);
732 __smp_perf_counter_interrupt(regs, 1);
737 static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
738 .notifier_call = perf_counter_nmi_handler,
743 static struct pmc_x86_ops pmc_intel_ops = {
744 .save_disable_all = pmc_intel_save_disable_all,
745 .restore_all = pmc_intel_restore_all,
746 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
747 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
748 .event_map = pmc_intel_event_map,
749 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
752 static struct pmc_x86_ops pmc_amd_ops = {
753 .save_disable_all = pmc_amd_save_disable_all,
754 .restore_all = pmc_amd_restore_all,
755 .eventsel = MSR_K7_EVNTSEL0,
756 .perfctr = MSR_K7_PERFCTR0,
757 .event_map = pmc_amd_event_map,
758 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
761 static struct pmc_x86_ops *pmc_intel_init(void)
763 union cpuid10_eax eax;
766 union cpuid10_edx edx;
769 * Check whether the Architectural PerfMon supports
770 * Branch Misses Retired Event or not.
772 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
773 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
776 pr_info("Intel Performance Monitoring support detected.\n");
777 pr_info("... version: %d\n", eax.split.version_id);
778 pr_info("... bit width: %d\n", eax.split.bit_width);
779 pr_info("... mask length: %d\n", eax.split.mask_length);
781 nr_counters_generic = eax.split.num_counters;
782 nr_counters_fixed = edx.split.num_counters_fixed;
783 counter_value_mask = (1ULL << eax.split.bit_width) - 1;
785 return &pmc_intel_ops;
788 static struct pmc_x86_ops *pmc_amd_init(void)
790 nr_counters_generic = 4;
791 nr_counters_fixed = 0;
793 pr_info("AMD Performance Monitoring support detected.\n");
798 void __init init_hw_perf_counters(void)
800 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
803 switch (boot_cpu_data.x86_vendor) {
804 case X86_VENDOR_INTEL:
805 pmc_ops = pmc_intel_init();
808 pmc_ops = pmc_amd_init();
814 pr_info("... num counters: %d\n", nr_counters_generic);
815 if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
816 nr_counters_generic = X86_PMC_MAX_GENERIC;
817 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
818 nr_counters_generic, X86_PMC_MAX_GENERIC);
820 perf_counter_mask = (1 << nr_counters_generic) - 1;
821 perf_max_counters = nr_counters_generic;
823 pr_info("... value mask: %016Lx\n", counter_value_mask);
825 if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
826 nr_counters_fixed = X86_PMC_MAX_FIXED;
827 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
828 nr_counters_fixed, X86_PMC_MAX_FIXED);
830 pr_info("... fixed counters: %d\n", nr_counters_fixed);
832 perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
834 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
835 perf_counters_initialized = true;
837 perf_counters_lapic_init(0);
838 register_die_notifier(&perf_counter_nmi_notifier);
841 static void pmc_generic_read(struct perf_counter *counter)
843 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
846 static const struct hw_perf_counter_ops x86_perf_counter_ops = {
847 .enable = pmc_generic_enable,
848 .disable = pmc_generic_disable,
849 .read = pmc_generic_read,
852 const struct hw_perf_counter_ops *
853 hw_perf_counter_init(struct perf_counter *counter)
857 err = __hw_perf_counter_init(counter);
861 return &x86_perf_counter_ops;