*
* Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
+ * Copyright(C) 2009 Jaswinder Singh Rajput
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
+#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
-#include <asm/intel_arch_perfmon.h>
#include <asm/apic.h>
static bool perf_counters_initialized __read_mostly;
/*
* Number of (generic) HW counters:
*/
-static int nr_hw_counters __read_mostly;
-static u32 perf_counter_mask __read_mostly;
+static int nr_counters_generic __read_mostly;
+static u64 perf_counter_mask __read_mostly;
+static u64 counter_value_mask __read_mostly;
+static int counter_value_bits __read_mostly;
-/* No support for fixed function counters yet */
-
-#define MAX_HW_COUNTERS 8
+static int nr_counters_fixed __read_mostly;
struct cpu_hw_counters {
- struct perf_counter *counters[MAX_HW_COUNTERS];
- unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)];
- int enable_all;
+ struct perf_counter *counters[X86_PMC_IDX_MAX];
+ unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long interrupts;
+ u64 throttle_ctrl;
+ unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ int enabled;
};
/*
- * Intel PerfMon v3. Used on Core2 and later.
+ * struct pmc_x86_ops - performance counter x86 ops
*/
-static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
+struct pmc_x86_ops {
+ u64 (*save_disable_all)(void);
+ void (*restore_all)(u64);
+ u64 (*get_status)(u64);
+ void (*ack_status)(u64);
+ void (*enable)(int, u64);
+ void (*disable)(int, u64);
+ unsigned eventsel;
+ unsigned perfctr;
+ u64 (*event_map)(int);
+ u64 (*raw_event)(u64);
+ int max_events;
+};
-const int intel_perfmon_event_map[] =
+static struct pmc_x86_ops *pmc_ops;
+
+static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
+ .enabled = 1,
+};
+
+/*
+ * Intel PerfMon v3. Used on Core2 and later.
+ */
+static const u64 intel_perfmon_event_map[] =
{
- [PERF_COUNT_CYCLES] = 0x003c,
+ [PERF_COUNT_CPU_CYCLES] = 0x003c,
[PERF_COUNT_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
[PERF_COUNT_CACHE_MISSES] = 0x412e,
[PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_BUS_CYCLES] = 0x013c,
};
-const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
+static u64 pmc_intel_event_map(int event)
+{
+ return intel_perfmon_event_map[event];
+}
+
+static u64 pmc_intel_raw_event(u64 event)
+{
+#define CORE_EVNTSEL_EVENT_MASK 0x000000FF
+#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00
+#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000
+
+#define CORE_EVNTSEL_MASK \
+ (CORE_EVNTSEL_EVENT_MASK | \
+ CORE_EVNTSEL_UNIT_MASK | \
+ CORE_EVNTSEL_COUNTER_MASK)
+
+ return event & CORE_EVNTSEL_MASK;
+}
+
+/*
+ * AMD Performance Monitor K7 and later.
+ */
+static const u64 amd_perfmon_event_map[] =
+{
+ [PERF_COUNT_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
+ [PERF_COUNT_CACHE_MISSES] = 0x0081,
+ [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
+};
+
+static u64 pmc_amd_event_map(int event)
+{
+ return amd_perfmon_event_map[event];
+}
+
+static u64 pmc_amd_raw_event(u64 event)
+{
+#define K7_EVNTSEL_EVENT_MASK 0x7000000FF
+#define K7_EVNTSEL_UNIT_MASK 0x00000FF00
+#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000
+
+#define K7_EVNTSEL_MASK \
+ (K7_EVNTSEL_EVENT_MASK | \
+ K7_EVNTSEL_UNIT_MASK | \
+ K7_EVNTSEL_COUNTER_MASK)
+
+ return event & K7_EVNTSEL_MASK;
+}
+
+/*
+ * Propagate counter elapsed time into the generic counter.
+ * Can only be executed on the CPU where the counter is active.
+ * Returns the delta events processed.
+ */
+static void
+x86_perf_counter_update(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, int idx)
+{
+ u64 prev_raw_count, new_raw_count, delta;
+
+ /*
+ * Careful: an NMI might modify the previous counter value.
+ *
+ * Our tactic to handle this is to first atomically read and
+ * exchange a new raw count - then add that new-prev delta
+ * count to the generic counter atomically:
+ */
+again:
+ prev_raw_count = atomic64_read(&hwc->prev_count);
+ rdmsrl(hwc->counter_base + idx, new_raw_count);
+
+ if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (counter-)time and add that to the generic counter.
+ *
+ * Careful, not all hw sign-extends above the physical width
+ * of the count, so we do that by clipping the delta to 32 bits:
+ */
+ delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
+
+ atomic64_add(delta, &counter->count);
+ atomic64_sub(delta, &hwc->period_left);
+}
/*
* Setup the hardware configuration for a given hw_event_type
*/
-int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
+static int __hw_perf_counter_init(struct perf_counter *counter)
{
+ struct perf_counter_hw_event *hw_event = &counter->hw_event;
struct hw_perf_counter *hwc = &counter->hw;
if (unlikely(!perf_counters_initialized))
return -EINVAL;
/*
- * Count user events, and generate PMC IRQs:
+ * Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
*/
- hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;
+ hwc->config = ARCH_PERFMON_EVENTSEL_INT;
/*
- * If privileged enough, count OS events too, and allow
- * NMI events as well:
+ * Count user and OS events unless requested not to.
*/
- hwc->nmi = 0;
- if (capable(CAP_SYS_ADMIN)) {
+ if (!hw_event->exclude_user)
+ hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
+ if (!hw_event->exclude_kernel)
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
- if (hw_event_type & PERF_COUNT_NMI)
- hwc->nmi = 1;
- }
- hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
- hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
+ /*
+ * If privileged enough, allow NMI events:
+ */
+ hwc->nmi = 0;
+ if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
+ hwc->nmi = 1;
- hwc->irq_period = counter->__irq_period;
+ hwc->irq_period = hw_event->irq_period;
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
* the generic counter period:
*/
- if (!hwc->irq_period)
- hwc->irq_period = 0x7FFFFFFF;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
+ hwc->irq_period = 0x7FFFFFFF;
- hwc->next_count = -((s32) hwc->irq_period);
+ atomic64_set(&hwc->period_left, hwc->irq_period);
/*
- * Negative event types mean raw encoded event+umask values:
+ * Raw event type provide the config in the event structure
*/
- if (hw_event_type < 0) {
- counter->hw_event_type = -hw_event_type;
- counter->hw_event_type &= ~PERF_COUNT_NMI;
+ if (hw_event->raw) {
+ hwc->config |= pmc_ops->raw_event(hw_event->type);
} else {
- hw_event_type &= ~PERF_COUNT_NMI;
- if (hw_event_type >= max_intel_perfmon_events)
+ if (hw_event->type >= pmc_ops->max_events)
return -EINVAL;
/*
* The generic map:
*/
- counter->hw_event_type = intel_perfmon_event_map[hw_event_type];
+ hwc->config |= pmc_ops->event_map(hw_event->type);
}
- hwc->config |= counter->hw_event_type;
counter->wakeup_pending = 0;
return 0;
}
-static void __hw_perf_enable_all(void)
+static u64 pmc_intel_save_disable_all(void)
{
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
+ u64 ctrl;
+
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+ return ctrl;
}
-void hw_perf_enable_all(void)
+static u64 pmc_amd_save_disable_all(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ int enabled, idx;
+
+ enabled = cpuc->enabled;
+ cpuc->enabled = 0;
+ barrier();
+
+ for (idx = 0; idx < nr_counters_generic; idx++) {
+ u64 val;
+
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ }
+ }
+
+ return enabled;
+}
- cpuc->enable_all = 1;
- __hw_perf_enable_all();
+u64 hw_perf_save_disable(void)
+{
+ if (unlikely(!perf_counters_initialized))
+ return 0;
+
+ return pmc_ops->save_disable_all();
+}
+/*
+ * Exported because of ACPI idle
+ */
+EXPORT_SYMBOL_GPL(hw_perf_save_disable);
+
+static void pmc_intel_restore_all(u64 ctrl)
+{
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
}
-void hw_perf_disable_all(void)
+static void pmc_amd_restore_all(u64 ctrl)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ int idx;
- cpuc->enable_all = 0;
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
+ cpuc->enabled = ctrl;
+ barrier();
+ if (!ctrl)
+ return;
+
+ for (idx = 0; idx < nr_counters_generic; idx++) {
+ if (test_bit(idx, cpuc->active_mask)) {
+ u64 val;
+
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ }
+ }
}
-static inline void
-__hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
+void hw_perf_restore(u64 ctrl)
{
- wrmsr(hwc->config_base + idx, hwc->config, 0);
+ if (unlikely(!perf_counters_initialized))
+ return;
+
+ pmc_ops->restore_all(ctrl);
}
+/*
+ * Exported because of ACPI idle
+ */
+EXPORT_SYMBOL_GPL(hw_perf_restore);
-static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]);
+static u64 pmc_intel_get_status(u64 mask)
+{
+ u64 status;
+
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
-static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx)
+ return status;
+}
+
+static u64 pmc_amd_get_status(u64 mask)
{
- per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count;
+ u64 status = 0;
+ int idx;
- wrmsr(hwc->counter_base + idx, hwc->next_count, 0);
+ for (idx = 0; idx < nr_counters_generic; idx++) {
+ s64 val;
+
+ if (!(mask & (1 << idx)))
+ continue;
+
+ rdmsrl(MSR_K7_PERFCTR0 + idx, val);
+ val <<= (64 - counter_value_bits);
+ if (val >= 0)
+ status |= (1 << idx);
+ }
+
+ return status;
}
-static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx)
+static u64 hw_perf_get_status(u64 mask)
{
- wrmsr(hwc->config_base + idx,
- hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
+ if (unlikely(!perf_counters_initialized))
+ return 0;
+
+ return pmc_ops->get_status(mask);
+}
+
+static void pmc_intel_ack_status(u64 ack)
+{
+ wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
+}
+
+static void pmc_amd_ack_status(u64 ack)
+{
+}
+
+static void hw_perf_ack_status(u64 ack)
+{
+ if (unlikely(!perf_counters_initialized))
+ return;
+
+ pmc_ops->ack_status(ack);
}
-void hw_perf_counter_enable(struct perf_counter *counter)
+static void pmc_intel_enable(int idx, u64 config)
+{
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
+ config | ARCH_PERFMON_EVENTSEL0_ENABLE);
+}
+
+static void pmc_amd_enable(int idx, u64 config)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
- int idx = hwc->idx;
- /* Try to get the previous counter again */
- if (test_and_set_bit(idx, cpuc->used)) {
- idx = find_first_zero_bit(cpuc->used, nr_hw_counters);
- set_bit(idx, cpuc->used);
- hwc->idx = idx;
- }
+ set_bit(idx, cpuc->active_mask);
+ if (cpuc->enabled)
+ config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- perf_counters_lapic_init(hwc->nmi);
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
+}
- __hw_perf_counter_disable(hwc, idx);
+static void hw_perf_enable(int idx, u64 config)
+{
+ if (unlikely(!perf_counters_initialized))
+ return;
- cpuc->counters[idx] = counter;
+ pmc_ops->enable(idx, config);
+}
- __hw_perf_counter_set_period(hwc, idx);
- __hw_perf_counter_enable(hwc, idx);
+static void pmc_intel_disable(int idx, u64 config)
+{
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
}
-#ifdef CONFIG_X86_64
-static inline void atomic64_counter_set(struct perf_counter *counter, u64 val)
+static void pmc_amd_disable(int idx, u64 config)
{
- atomic64_set(&counter->count, val);
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+
+ clear_bit(idx, cpuc->active_mask);
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
+
}
-static inline u64 atomic64_counter_read(struct perf_counter *counter)
+static void hw_perf_disable(int idx, u64 config)
{
- return atomic64_read(&counter->count);
+ if (unlikely(!perf_counters_initialized))
+ return;
+
+ pmc_ops->disable(idx, config);
}
-#else
-/*
- * Todo: add proper atomic64_t support to 32-bit x86:
- */
-static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64)
+
+static inline void
+__pmc_fixed_disable(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, unsigned int __idx)
{
- u32 *val32 = (void *)&val64;
+ int idx = __idx - X86_PMC_IDX_FIXED;
+ u64 ctrl_val, mask;
+ int err;
- atomic_set(counter->count32 + 0, *(val32 + 0));
- atomic_set(counter->count32 + 1, *(val32 + 1));
+ mask = 0xfULL << (idx * 4);
+
+ rdmsrl(hwc->config_base, ctrl_val);
+ ctrl_val &= ~mask;
+ err = checking_wrmsrl(hwc->config_base, ctrl_val);
}
-static inline u64 atomic64_counter_read(struct perf_counter *counter)
+static inline void
+__pmc_generic_disable(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, unsigned int idx)
{
- return atomic_read(counter->count32 + 0) |
- (u64) atomic_read(counter->count32 + 1) << 32;
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
+ __pmc_fixed_disable(counter, hwc, idx);
+ else
+ hw_perf_disable(idx, hwc->config);
}
-#endif
-static void __hw_perf_save_counter(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
+
+/*
+ * Set the next IRQ period, based on the hwc->period_left value.
+ * To be called with the counter disabled in hw:
+ */
+static void
+__hw_perf_counter_set_period(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, int idx)
{
- s64 raw = -1;
- s64 delta;
+ s64 left = atomic64_read(&hwc->period_left);
+ s32 period = hwc->irq_period;
int err;
/*
- * Get the raw hw counter value:
+ * If we are way outside a reasoable range then just skip forward:
*/
- err = rdmsrl_safe(hwc->counter_base + idx, &raw);
- WARN_ON_ONCE(err);
+ if (unlikely(left <= -period)) {
+ left = period;
+ atomic64_set(&hwc->period_left, left);
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ atomic64_set(&hwc->period_left, left);
+ }
+
+ per_cpu(prev_left[idx], smp_processor_id()) = left;
/*
- * Rebase it to zero (it started counting at -irq_period),
- * to see the delta since ->prev_count:
+ * The hw counter starts counting from this counter offset,
+ * mark it to be able to extra future deltas:
*/
- delta = (s64)hwc->irq_period + (s64)(s32)raw;
+ atomic64_set(&hwc->prev_count, (u64)-left);
- atomic64_counter_set(counter, hwc->prev_count + delta);
+ err = checking_wrmsrl(hwc->counter_base + idx,
+ (u64)(-left) & counter_value_mask);
+}
+
+static inline void
+__pmc_fixed_enable(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, unsigned int __idx)
+{
+ int idx = __idx - X86_PMC_IDX_FIXED;
+ u64 ctrl_val, bits, mask;
+ int err;
/*
- * Adjust the ->prev_count offset - if we went beyond
- * irq_period of units, then we got an IRQ and the counter
- * was set back to -irq_period:
+ * Enable IRQ generation (0x8),
+ * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
+ * if requested:
*/
- while (delta >= (s64)hwc->irq_period) {
- hwc->prev_count += hwc->irq_period;
- delta -= (s64)hwc->irq_period;
+ bits = 0x8ULL;
+ if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
+ bits |= 0x2;
+ if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+ bits |= 0x1;
+ bits <<= (idx * 4);
+ mask = 0xfULL << (idx * 4);
+
+ rdmsrl(hwc->config_base, ctrl_val);
+ ctrl_val &= ~mask;
+ ctrl_val |= bits;
+ err = checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static void
+__pmc_generic_enable(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, int idx)
+{
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
+ __pmc_fixed_enable(counter, hwc, idx);
+ else
+ hw_perf_enable(idx, hwc->config);
+}
+
+static int
+fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
+{
+ unsigned int event;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return -1;
+
+ if (unlikely(hwc->nmi))
+ return -1;
+
+ event = hwc->config & ARCH_PERFMON_EVENT_MASK;
+
+ if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS)))
+ return X86_PMC_IDX_FIXED_INSTRUCTIONS;
+ if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES)))
+ return X86_PMC_IDX_FIXED_CPU_CYCLES;
+ if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES)))
+ return X86_PMC_IDX_FIXED_BUS_CYCLES;
+
+ return -1;
+}
+
+/*
+ * Find a PMC slot for the freshly enabled / scheduled in counter:
+ */
+static int pmc_generic_enable(struct perf_counter *counter)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct hw_perf_counter *hwc = &counter->hw;
+ int idx;
+
+ idx = fixed_mode_idx(counter, hwc);
+ if (idx >= 0) {
+ /*
+ * Try to get the fixed counter, if that is already taken
+ * then try to get a generic counter:
+ */
+ if (test_and_set_bit(idx, cpuc->used))
+ goto try_generic;
+
+ hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
+ /*
+ * We set it so that counter_base + idx in wrmsr/rdmsr maps to
+ * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
+ */
+ hwc->counter_base =
+ MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
+ hwc->idx = idx;
+ } else {
+ idx = hwc->idx;
+ /* Try to get the previous generic counter again */
+ if (test_and_set_bit(idx, cpuc->used)) {
+try_generic:
+ idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
+ if (idx == nr_counters_generic)
+ return -EAGAIN;
+
+ set_bit(idx, cpuc->used);
+ hwc->idx = idx;
+ }
+ hwc->config_base = pmc_ops->eventsel;
+ hwc->counter_base = pmc_ops->perfctr;
}
+ perf_counters_lapic_init(hwc->nmi);
+
+ __pmc_generic_disable(counter, hwc, idx);
+
+ cpuc->counters[idx] = counter;
/*
- * Calculate the next raw counter value we'll write into
- * the counter at the next sched-in time:
+ * Make it visible before enabling the hw:
*/
- delta -= (s64)hwc->irq_period;
+ smp_wmb();
+
+ __hw_perf_counter_set_period(counter, hwc, idx);
+ __pmc_generic_enable(counter, hwc, idx);
- hwc->next_count = (s32)delta;
+ return 0;
}
void perf_counter_print_debug(void)
{
- u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count;
- int cpu, err, idx;
+ u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
+ struct cpu_hw_counters *cpuc;
+ int cpu, idx;
+
+ if (!nr_counters_generic)
+ return;
local_irq_disable();
cpu = smp_processor_id();
+ cpuc = &per_cpu(cpu_hw_counters, cpu);
- err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_CTRL, &ctrl);
- WARN_ON_ONCE(err);
-
- err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_STATUS, &status);
- WARN_ON_ONCE(err);
-
- err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_OVF_CTRL, &overflow);
- WARN_ON_ONCE(err);
-
- printk(KERN_INFO "\n");
- printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl);
- printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status);
- printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
-
- for (idx = 0; idx < nr_hw_counters; idx++) {
- err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl);
- WARN_ON_ONCE(err);
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
+ rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
+
+ pr_info("\n");
+ pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
+ pr_info("CPU#%d: status: %016llx\n", cpu, status);
+ pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
+ pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
+ }
+ pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
- err = rdmsrl_safe(MSR_ARCH_PERFMON_PERFCTR0 + idx, &pmc_count);
- WARN_ON_ONCE(err);
+ for (idx = 0; idx < nr_counters_generic; idx++) {
+ rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl);
+ rdmsrl(pmc_ops->perfctr + idx, pmc_count);
- next_count = per_cpu(prev_next_count[idx], cpu);
+ prev_left = per_cpu(prev_left[idx], cpu);
- printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n",
+ pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
cpu, idx, pmc_ctrl);
- printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n",
+ pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
+ cpu, idx, pmc_count);
+ pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
+ cpu, idx, prev_left);
+ }
+ for (idx = 0; idx < nr_counters_fixed; idx++) {
+ rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
+
+ pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
- printk(KERN_INFO "CPU#%d: PMC%d next: %016llx\n",
- cpu, idx, next_count);
}
local_irq_enable();
}
-void hw_perf_counter_disable(struct perf_counter *counter)
+static void pmc_generic_disable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
unsigned int idx = hwc->idx;
- __hw_perf_counter_disable(hwc, idx);
+ __pmc_generic_disable(counter, hwc, idx);
clear_bit(idx, cpuc->used);
cpuc->counters[idx] = NULL;
- __hw_perf_save_counter(counter, hwc, idx);
-}
-
-void hw_perf_counter_read(struct perf_counter *counter)
-{
- struct hw_perf_counter *hwc = &counter->hw;
- unsigned long addr = hwc->counter_base + hwc->idx;
- s64 offs, val = -1LL;
- s32 val32;
- int err;
-
- /* Careful: NMI might modify the counter offset */
- do {
- offs = hwc->prev_count;
- err = rdmsrl_safe(addr, &val);
- WARN_ON_ONCE(err);
- } while (offs != hwc->prev_count);
+ /*
+ * Make sure the cleared pointer becomes visible before we
+ * (potentially) free the counter:
+ */
+ smp_wmb();
- val32 = (s32) val;
- val = (s64)hwc->irq_period + (s64)val32;
- atomic64_counter_set(counter, hwc->prev_count + val);
+ /*
+ * Drain the remaining delta count out of a counter
+ * that we are disabling:
+ */
+ x86_perf_counter_update(counter, hwc, idx);
}
static void perf_store_irq_data(struct perf_counter *counter, u64 data)
}
/*
- * NMI-safe enable method:
+ * Save and restart an expired counter. Called by NMI contexts,
+ * so it has to be careful about preempting normal counter ops:
*/
static void perf_save_and_restart(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
- u64 pmc_ctrl;
- int err;
-
- err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl);
- WARN_ON_ONCE(err);
- __hw_perf_save_counter(counter, hwc, idx);
- __hw_perf_counter_set_period(hwc, idx);
+ x86_perf_counter_update(counter, hwc, idx);
+ __hw_perf_counter_set_period(counter, hwc, idx);
- if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
- __hw_perf_counter_enable(hwc, idx);
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE)
+ __pmc_generic_enable(counter, hwc, idx);
}
static void
-perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
+perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
{
- struct perf_counter_context *ctx = leader->ctx;
- struct perf_counter *counter;
- int bit;
+ struct perf_counter *counter, *group_leader = sibling->group_leader;
- list_for_each_entry(counter, &ctx->counters, list) {
- if (counter->record_type != PERF_RECORD_SIMPLE ||
- counter == leader)
- continue;
+ /*
+ * Store sibling timestamps (if any):
+ */
+ list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
- if (counter->active) {
- /*
- * When counter was not in the overflow mask, we have to
- * read it from hardware. We read it as well, when it
- * has not been read yet and clear the bit in the
- * status mask.
- */
- bit = counter->hw.idx;
- if (!test_bit(bit, (unsigned long *) overflown) ||
- test_bit(bit, (unsigned long *) status)) {
- clear_bit(bit, (unsigned long *) status);
- perf_save_and_restart(counter);
- }
- }
- perf_store_irq_data(leader, counter->hw_event_type);
- perf_store_irq_data(leader, atomic64_counter_read(counter));
+ x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+ perf_store_irq_data(sibling, counter->hw_event.type);
+ perf_store_irq_data(sibling, atomic64_read(&counter->count));
}
}
+/*
+ * Maximum interrupt frequency of 100KHz per CPU
+ */
+#define PERFMON_MAX_INTERRUPTS (100000/HZ)
+
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
-static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
+static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
int bit, cpu = smp_processor_id();
- struct cpu_hw_counters *cpuc;
u64 ack, status;
+ struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
+ int ret = 0;
- /* Disable counters globally */
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
- ack_APIC_irq();
-
- cpuc = &per_cpu(cpu_hw_counters, cpu);
+ cpuc->throttle_ctrl = hw_perf_save_disable();
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ status = hw_perf_get_status(cpuc->throttle_ctrl);
if (!status)
goto out;
+ ret = 1;
again:
+ inc_irq_stat(apic_perf_irqs);
ack = status;
- for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
+ for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_counter *counter = cpuc->counters[bit];
clear_bit(bit, (unsigned long *) &status);
perf_save_and_restart(counter);
- switch (counter->record_type) {
+ switch (counter->hw_event.record_type) {
case PERF_RECORD_SIMPLE:
continue;
case PERF_RECORD_IRQ:
perf_store_irq_data(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
- perf_store_irq_data(counter, counter->hw_event_type);
- perf_store_irq_data(counter,
- atomic64_counter_read(counter));
perf_handle_group(counter, &status, &ack);
break;
}
/*
* From NMI context we cannot call into the scheduler to
- * do a task wakeup - but we mark these counters as
+ * do a task wakeup - but we mark these generic as
* wakeup_pending and initate a wakeup callback:
*/
if (nmi) {
}
}
- wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0);
+ hw_perf_ack_status(ack);
/*
* Repeat if there is more work to be done:
*/
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ status = hw_perf_get_status(cpuc->throttle_ctrl);
if (status)
goto again;
out:
/*
- * Do not reenable when global enable is off:
+ * Restore - do not reenable when global enable is off or throttled:
*/
- if (cpuc->enable_all)
- __hw_perf_enable_all();
+ if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
+ hw_perf_restore(cpuc->throttle_ctrl);
+
+ return ret;
+}
+
+void perf_counter_unthrottle(void)
+{
+ struct cpu_hw_counters *cpuc;
+
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return;
+
+ if (unlikely(!perf_counters_initialized))
+ return;
+
+ cpuc = &__get_cpu_var(cpu_hw_counters);
+ if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
+ hw_perf_restore(cpuc->throttle_ctrl);
+ }
+ cpuc->interrupts = 0;
}
void smp_perf_counter_interrupt(struct pt_regs *regs)
{
irq_enter();
-#ifdef CONFIG_X86_64
- add_pda(apic_perf_irqs, 1);
-#else
- per_cpu(irq_stat, smp_processor_id()).apic_perf_irqs++;
-#endif
apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
+ ack_APIC_irq();
__smp_perf_counter_interrupt(regs, 0);
-
irq_exit();
}
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
- for_each_bit(bit, cpuc->used, nr_hw_counters) {
+ for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
struct perf_counter *counter = cpuc->counters[bit];
if (!counter)
local_irq_restore(flags);
}
-void __cpuinit perf_counters_lapic_init(int nmi)
+void perf_counters_lapic_init(int nmi)
{
u32 apic_val;
{
struct die_args *args = __args;
struct pt_regs *regs;
+ int ret;
+
+ switch (cmd) {
+ case DIE_NMI:
+ case DIE_NMI_IPI:
+ break;
- if (likely(cmd != DIE_NMI_IPI))
+ default:
return NOTIFY_DONE;
+ }
regs = args->regs;
apic_write(APIC_LVTPC, APIC_DM_NMI);
- __smp_perf_counter_interrupt(regs, 1);
+ ret = __smp_perf_counter_interrupt(regs, 1);
- return NOTIFY_STOP;
+ return ret ? NOTIFY_STOP : NOTIFY_OK;
}
static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
- .notifier_call = perf_counter_nmi_handler
+ .notifier_call = perf_counter_nmi_handler,
+ .next = NULL,
+ .priority = 1
};
-void __init init_hw_perf_counters(void)
+static struct pmc_x86_ops pmc_intel_ops = {
+ .save_disable_all = pmc_intel_save_disable_all,
+ .restore_all = pmc_intel_restore_all,
+ .get_status = pmc_intel_get_status,
+ .ack_status = pmc_intel_ack_status,
+ .enable = pmc_intel_enable,
+ .disable = pmc_intel_disable,
+ .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .event_map = pmc_intel_event_map,
+ .raw_event = pmc_intel_raw_event,
+ .max_events = ARRAY_SIZE(intel_perfmon_event_map),
+};
+
+static struct pmc_x86_ops pmc_amd_ops = {
+ .save_disable_all = pmc_amd_save_disable_all,
+ .restore_all = pmc_amd_restore_all,
+ .get_status = pmc_amd_get_status,
+ .ack_status = pmc_amd_ack_status,
+ .enable = pmc_amd_enable,
+ .disable = pmc_amd_disable,
+ .eventsel = MSR_K7_EVNTSEL0,
+ .perfctr = MSR_K7_PERFCTR0,
+ .event_map = pmc_amd_event_map,
+ .raw_event = pmc_amd_raw_event,
+ .max_events = ARRAY_SIZE(amd_perfmon_event_map),
+};
+
+static struct pmc_x86_ops *pmc_intel_init(void)
{
union cpuid10_eax eax;
- unsigned int unused;
unsigned int ebx;
-
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return;
+ unsigned int unused;
+ union cpuid10_edx edx;
/*
* Check whether the Architectural PerfMon supports
* Branch Misses Retired Event or not.
*/
- cpuid(10, &(eax.full), &ebx, &unused, &unused);
+ cpuid(10, &eax.full, &ebx, &unused, &edx.full);
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
+ return NULL;
+
+ pr_info("Intel Performance Monitoring support detected.\n");
+ pr_info("... version: %d\n", eax.split.version_id);
+ pr_info("... bit width: %d\n", eax.split.bit_width);
+ pr_info("... mask length: %d\n", eax.split.mask_length);
+
+ nr_counters_generic = eax.split.num_counters;
+ nr_counters_fixed = edx.split.num_counters_fixed;
+ counter_value_mask = (1ULL << eax.split.bit_width) - 1;
+
+ return &pmc_intel_ops;
+}
+
+static struct pmc_x86_ops *pmc_amd_init(void)
+{
+ nr_counters_generic = 4;
+ nr_counters_fixed = 0;
+ counter_value_mask = 0x0000FFFFFFFFFFFFULL;
+ counter_value_bits = 48;
+
+ pr_info("AMD Performance Monitoring support detected.\n");
+
+ return &pmc_amd_ops;
+}
+
+void __init init_hw_perf_counters(void)
+{
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
return;
- printk(KERN_INFO "Intel Performance Monitoring support detected.\n");
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ pmc_ops = pmc_intel_init();
+ break;
+ case X86_VENDOR_AMD:
+ pmc_ops = pmc_amd_init();
+ break;
+ }
+ if (!pmc_ops)
+ return;
- printk(KERN_INFO "... version: %d\n", eax.split.version_id);
- printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters);
- nr_hw_counters = eax.split.num_counters;
- if (nr_hw_counters > MAX_HW_COUNTERS) {
- nr_hw_counters = MAX_HW_COUNTERS;
+ pr_info("... num counters: %d\n", nr_counters_generic);
+ if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
+ nr_counters_generic = X86_PMC_MAX_GENERIC;
WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
- nr_hw_counters, MAX_HW_COUNTERS);
+ nr_counters_generic, X86_PMC_MAX_GENERIC);
}
- perf_counter_mask = (1 << nr_hw_counters) - 1;
- perf_max_counters = nr_hw_counters;
+ perf_counter_mask = (1 << nr_counters_generic) - 1;
+ perf_max_counters = nr_counters_generic;
+
+ pr_info("... value mask: %016Lx\n", counter_value_mask);
- printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width);
- printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length);
+ if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
+ nr_counters_fixed = X86_PMC_MAX_FIXED;
+ WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
+ nr_counters_fixed, X86_PMC_MAX_FIXED);
+ }
+ pr_info("... fixed counters: %d\n", nr_counters_fixed);
+
+ perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+
+ pr_info("... counter mask: %016Lx\n", perf_counter_mask);
+ perf_counters_initialized = true;
perf_counters_lapic_init(0);
register_die_notifier(&perf_counter_nmi_notifier);
+}
- perf_counters_initialized = true;
+static void pmc_generic_read(struct perf_counter *counter)
+{
+ x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+}
+
+static const struct hw_perf_counter_ops x86_perf_counter_ops = {
+ .enable = pmc_generic_enable,
+ .disable = pmc_generic_disable,
+ .read = pmc_generic_read,
+};
+
+const struct hw_perf_counter_ops *
+hw_perf_counter_init(struct perf_counter *counter)
+{
+ int err;
+
+ err = __hw_perf_counter_init(counter);
+ if (err)
+ return NULL;
+
+ return &x86_perf_counter_ops;
}