4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/bitmap.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/perf_event.h>
20 #include <linux/platform_device.h>
21 #include <linux/spinlock.h>
22 #include <linux/uaccess.h>
24 #include <asm/cputype.h>
26 #include <asm/irq_regs.h>
28 #include <asm/stacktrace.h>
31 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
32 * another platform that supports more, we need to increase this to be the
33 * largest of all platforms.
35 * ARMv7 supports up to 32 events:
36 * cycle counter CCNT + 31 events counters CNT0..30.
37 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
39 #define ARMPMU_MAX_HWEVENTS 32
41 static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
42 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
43 static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
45 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
47 /* Set at runtime when we know what CPU type we are. */
48 static struct arm_pmu *cpu_pmu;
51 armpmu_get_pmu_id(void)
60 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
63 armpmu_get_max_events(void)
68 max_events = cpu_pmu->num_events;
72 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
74 int perf_num_counters(void)
76 return armpmu_get_max_events();
78 EXPORT_SYMBOL_GPL(perf_num_counters);
80 #define HW_OP_UNSUPPORTED 0xFFFF
83 PERF_COUNT_HW_CACHE_##_x
85 #define CACHE_OP_UNSUPPORTED 0xFFFF
88 armpmu_map_cache_event(const unsigned (*cache_map)
89 [PERF_COUNT_HW_CACHE_MAX]
90 [PERF_COUNT_HW_CACHE_OP_MAX]
91 [PERF_COUNT_HW_CACHE_RESULT_MAX],
94 unsigned int cache_type, cache_op, cache_result, ret;
96 cache_type = (config >> 0) & 0xff;
97 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
100 cache_op = (config >> 8) & 0xff;
101 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
104 cache_result = (config >> 16) & 0xff;
105 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
108 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
110 if (ret == CACHE_OP_UNSUPPORTED)
117 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
121 if (config >= PERF_COUNT_HW_MAX)
124 mapping = (*event_map)[config];
125 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
129 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
131 return (int)(config & raw_event_mask);
134 static int map_cpu_event(struct perf_event *event,
135 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
136 const unsigned (*cache_map)
137 [PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX],
142 u64 config = event->attr.config;
144 switch (event->attr.type) {
145 case PERF_TYPE_HARDWARE:
146 return armpmu_map_event(event_map, config);
147 case PERF_TYPE_HW_CACHE:
148 return armpmu_map_cache_event(cache_map, config);
150 return armpmu_map_raw_event(raw_event_mask, config);
157 armpmu_event_set_period(struct perf_event *event,
158 struct hw_perf_event *hwc,
161 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
162 s64 left = local64_read(&hwc->period_left);
163 s64 period = hwc->sample_period;
166 if (unlikely(left <= -period)) {
168 local64_set(&hwc->period_left, left);
169 hwc->last_period = period;
173 if (unlikely(left <= 0)) {
175 local64_set(&hwc->period_left, left);
176 hwc->last_period = period;
180 if (left > (s64)armpmu->max_period)
181 left = armpmu->max_period;
183 local64_set(&hwc->prev_count, (u64)-left);
185 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
187 perf_event_update_userpage(event);
193 armpmu_event_update(struct perf_event *event,
194 struct hw_perf_event *hwc,
197 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
198 u64 delta, prev_raw_count, new_raw_count;
201 prev_raw_count = local64_read(&hwc->prev_count);
202 new_raw_count = armpmu->read_counter(idx);
204 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
205 new_raw_count) != prev_raw_count)
208 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
210 local64_add(delta, &event->count);
211 local64_sub(delta, &hwc->period_left);
213 return new_raw_count;
217 armpmu_read(struct perf_event *event)
219 struct hw_perf_event *hwc = &event->hw;
221 /* Don't read disabled counters! */
225 armpmu_event_update(event, hwc, hwc->idx);
229 armpmu_stop(struct perf_event *event, int flags)
231 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
232 struct hw_perf_event *hwc = &event->hw;
235 * ARM pmu always has to update the counter, so ignore
236 * PERF_EF_UPDATE, see comments in armpmu_start().
238 if (!(hwc->state & PERF_HES_STOPPED)) {
239 armpmu->disable(hwc, hwc->idx);
240 barrier(); /* why? */
241 armpmu_event_update(event, hwc, hwc->idx);
242 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
247 armpmu_start(struct perf_event *event, int flags)
249 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
250 struct hw_perf_event *hwc = &event->hw;
253 * ARM pmu always has to reprogram the period, so ignore
254 * PERF_EF_RELOAD, see the comment below.
256 if (flags & PERF_EF_RELOAD)
257 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
261 * Set the period again. Some counters can't be stopped, so when we
262 * were stopped we simply disabled the IRQ source and the counter
263 * may have been left counting. If we don't do this step then we may
264 * get an interrupt too soon or *way* too late if the overflow has
265 * happened since disabling.
267 armpmu_event_set_period(event, hwc, hwc->idx);
268 armpmu->enable(hwc, hwc->idx);
272 armpmu_del(struct perf_event *event, int flags)
274 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
275 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
276 struct hw_perf_event *hwc = &event->hw;
281 armpmu_stop(event, PERF_EF_UPDATE);
282 hw_events->events[idx] = NULL;
283 clear_bit(idx, hw_events->used_mask);
285 perf_event_update_userpage(event);
289 armpmu_add(struct perf_event *event, int flags)
291 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
292 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
293 struct hw_perf_event *hwc = &event->hw;
297 perf_pmu_disable(event->pmu);
299 /* If we don't have a space for the counter then finish early. */
300 idx = armpmu->get_event_idx(hw_events, hwc);
307 * If there is an event in the counter we are going to use then make
308 * sure it is disabled.
311 armpmu->disable(hwc, idx);
312 hw_events->events[idx] = event;
314 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
315 if (flags & PERF_EF_START)
316 armpmu_start(event, PERF_EF_RELOAD);
318 /* Propagate our changes to the userspace mapping. */
319 perf_event_update_userpage(event);
322 perf_pmu_enable(event->pmu);
327 validate_event(struct pmu_hw_events *hw_events,
328 struct perf_event *event)
330 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
331 struct hw_perf_event fake_event = event->hw;
332 struct pmu *leader_pmu = event->group_leader->pmu;
334 if (is_software_event(event))
337 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
340 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
343 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
347 validate_group(struct perf_event *event)
349 struct perf_event *sibling, *leader = event->group_leader;
350 struct pmu_hw_events fake_pmu;
351 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
354 * Initialise the fake PMU. We only need to populate the
355 * used_mask for the purposes of validation.
357 memset(fake_used_mask, 0, sizeof(fake_used_mask));
358 fake_pmu.used_mask = fake_used_mask;
360 if (!validate_event(&fake_pmu, leader))
363 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
364 if (!validate_event(&fake_pmu, sibling))
368 if (!validate_event(&fake_pmu, event))
374 static irqreturn_t armpmu_platform_irq(int irq, void *dev)
376 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
377 struct platform_device *plat_device = armpmu->plat_device;
378 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
380 return plat->handle_irq(irq, dev, armpmu->handle_irq);
384 armpmu_release_hardware(struct arm_pmu *armpmu)
387 struct platform_device *pmu_device = armpmu->plat_device;
389 irqs = min(pmu_device->num_resources, num_possible_cpus());
391 for (i = 0; i < irqs; ++i) {
392 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
394 irq = platform_get_irq(pmu_device, i);
396 free_irq(irq, armpmu);
399 release_pmu(armpmu->type);
403 armpmu_reserve_hardware(struct arm_pmu *armpmu)
405 struct arm_pmu_platdata *plat;
406 irq_handler_t handle_irq;
407 int i, err, irq, irqs;
408 struct platform_device *pmu_device = armpmu->plat_device;
413 err = reserve_pmu(armpmu->type);
415 pr_warning("unable to reserve pmu\n");
419 plat = dev_get_platdata(&pmu_device->dev);
420 if (plat && plat->handle_irq)
421 handle_irq = armpmu_platform_irq;
423 handle_irq = armpmu->handle_irq;
425 irqs = min(pmu_device->num_resources, num_possible_cpus());
427 pr_err("no irqs for PMUs defined\n");
431 for (i = 0; i < irqs; ++i) {
433 irq = platform_get_irq(pmu_device, i);
438 * If we have a single PMU interrupt that we can't shift,
439 * assume that we're running on a uniprocessor machine and
440 * continue. Otherwise, continue without this interrupt.
442 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
443 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
448 err = request_irq(irq, handle_irq,
449 IRQF_DISABLED | IRQF_NOBALANCING,
452 pr_err("unable to request IRQ%d for ARM PMU counters\n",
454 armpmu_release_hardware(armpmu);
458 cpumask_set_cpu(i, &armpmu->active_irqs);
465 hw_perf_event_destroy(struct perf_event *event)
467 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
468 atomic_t *active_events = &armpmu->active_events;
469 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
471 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
472 armpmu_release_hardware(armpmu);
473 mutex_unlock(pmu_reserve_mutex);
478 event_requires_mode_exclusion(struct perf_event_attr *attr)
480 return attr->exclude_idle || attr->exclude_user ||
481 attr->exclude_kernel || attr->exclude_hv;
485 __hw_perf_event_init(struct perf_event *event)
487 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
488 struct hw_perf_event *hwc = &event->hw;
491 mapping = armpmu->map_event(event);
494 pr_debug("event %x:%llx not supported\n", event->attr.type,
500 * We don't assign an index until we actually place the event onto
501 * hardware. Use -1 to signify that we haven't decided where to put it
502 * yet. For SMP systems, each core has it's own PMU so we can't do any
503 * clever allocation or constraints checking at this point.
506 hwc->config_base = 0;
511 * Check whether we need to exclude the counter from certain modes.
513 if ((!armpmu->set_event_filter ||
514 armpmu->set_event_filter(hwc, &event->attr)) &&
515 event_requires_mode_exclusion(&event->attr)) {
516 pr_debug("ARM performance counters do not support "
522 * Store the event encoding into the config_base field.
524 hwc->config_base |= (unsigned long)mapping;
526 if (!hwc->sample_period) {
528 * For non-sampling runs, limit the sample_period to half
529 * of the counter width. That way, the new counter value
530 * is far less likely to overtake the previous one unless
531 * you have some serious IRQ latency issues.
533 hwc->sample_period = armpmu->max_period >> 1;
534 hwc->last_period = hwc->sample_period;
535 local64_set(&hwc->period_left, hwc->sample_period);
539 if (event->group_leader != event) {
540 err = validate_group(event);
548 static int armpmu_event_init(struct perf_event *event)
550 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
552 atomic_t *active_events = &armpmu->active_events;
554 if (armpmu->map_event(event) == -ENOENT)
557 event->destroy = hw_perf_event_destroy;
559 if (!atomic_inc_not_zero(active_events)) {
560 mutex_lock(&armpmu->reserve_mutex);
561 if (atomic_read(active_events) == 0)
562 err = armpmu_reserve_hardware(armpmu);
565 atomic_inc(active_events);
566 mutex_unlock(&armpmu->reserve_mutex);
572 err = __hw_perf_event_init(event);
574 hw_perf_event_destroy(event);
579 static void armpmu_enable(struct pmu *pmu)
581 struct arm_pmu *armpmu = to_arm_pmu(pmu);
582 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
583 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
589 static void armpmu_disable(struct pmu *pmu)
591 struct arm_pmu *armpmu = to_arm_pmu(pmu);
595 static void __init armpmu_init(struct arm_pmu *armpmu)
597 atomic_set(&armpmu->active_events, 0);
598 mutex_init(&armpmu->reserve_mutex);
600 armpmu->pmu = (struct pmu) {
601 .pmu_enable = armpmu_enable,
602 .pmu_disable = armpmu_disable,
603 .event_init = armpmu_event_init,
606 .start = armpmu_start,
612 int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
615 return perf_pmu_register(&armpmu->pmu, name, type);
618 /* Include the PMU-specific implementations. */
619 #include "perf_event_xscale.c"
620 #include "perf_event_v6.c"
621 #include "perf_event_v7.c"
624 * Ensure the PMU has sane values out of reset.
625 * This requires SMP to be available, so exists as a separate initcall.
630 if (cpu_pmu && cpu_pmu->reset)
631 return on_each_cpu(cpu_pmu->reset, NULL, 1);
634 arch_initcall(cpu_pmu_reset);
637 * PMU platform driver and devicetree bindings.
639 static struct of_device_id armpmu_of_device_ids[] = {
640 {.compatible = "arm,cortex-a9-pmu"},
641 {.compatible = "arm,cortex-a8-pmu"},
642 {.compatible = "arm,arm1136-pmu"},
643 {.compatible = "arm,arm1176-pmu"},
647 static struct platform_device_id armpmu_plat_device_ids[] = {
652 static int __devinit armpmu_device_probe(struct platform_device *pdev)
657 cpu_pmu->plat_device = pdev;
661 static struct platform_driver armpmu_driver = {
664 .of_match_table = armpmu_of_device_ids,
666 .probe = armpmu_device_probe,
667 .id_table = armpmu_plat_device_ids,
670 static int __init register_pmu_driver(void)
672 return platform_driver_register(&armpmu_driver);
674 device_initcall(register_pmu_driver);
676 static struct pmu_hw_events *armpmu_get_cpu_events(void)
678 return &__get_cpu_var(cpu_hw_events);
681 static void __init cpu_pmu_init(struct arm_pmu *armpmu)
684 for_each_possible_cpu(cpu) {
685 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
686 events->events = per_cpu(hw_events, cpu);
687 events->used_mask = per_cpu(used_mask, cpu);
688 raw_spin_lock_init(&events->pmu_lock);
690 armpmu->get_hw_events = armpmu_get_cpu_events;
691 armpmu->type = ARM_PMU_DEVICE_CPU;
695 * CPU PMU identification and registration.
698 init_hw_perf_events(void)
700 unsigned long cpuid = read_cpuid_id();
701 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
702 unsigned long part_number = (cpuid & 0xFFF0);
705 if (0x41 == implementor) {
706 switch (part_number) {
707 case 0xB360: /* ARM1136 */
708 case 0xB560: /* ARM1156 */
709 case 0xB760: /* ARM1176 */
710 cpu_pmu = armv6pmu_init();
712 case 0xB020: /* ARM11mpcore */
713 cpu_pmu = armv6mpcore_pmu_init();
715 case 0xC080: /* Cortex-A8 */
716 cpu_pmu = armv7_a8_pmu_init();
718 case 0xC090: /* Cortex-A9 */
719 cpu_pmu = armv7_a9_pmu_init();
721 case 0xC050: /* Cortex-A5 */
722 cpu_pmu = armv7_a5_pmu_init();
724 case 0xC0F0: /* Cortex-A15 */
725 cpu_pmu = armv7_a15_pmu_init();
728 /* Intel CPUs [xscale]. */
729 } else if (0x69 == implementor) {
730 part_number = (cpuid >> 13) & 0x7;
731 switch (part_number) {
733 cpu_pmu = xscale1pmu_init();
736 cpu_pmu = xscale2pmu_init();
742 pr_info("enabled with %s PMU driver, %d counters available\n",
743 cpu_pmu->name, cpu_pmu->num_events);
744 cpu_pmu_init(cpu_pmu);
745 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
747 pr_info("no hardware support available\n");
752 early_initcall(init_hw_perf_events);
755 * Callchain handling code.
759 * The registers we're interested in are at the end of the variable
760 * length saved register structure. The fp points at the end of this
761 * structure so the address of this struct is:
762 * (struct frame_tail *)(xxx->fp)-1
764 * This code has been adapted from the ARM OProfile support.
767 struct frame_tail __user *fp;
770 } __attribute__((packed));
773 * Get the return address for a single stackframe and return a pointer to the
776 static struct frame_tail __user *
777 user_backtrace(struct frame_tail __user *tail,
778 struct perf_callchain_entry *entry)
780 struct frame_tail buftail;
782 /* Also check accessibility of one struct frame_tail beyond */
783 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
785 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
788 perf_callchain_store(entry, buftail.lr);
791 * Frame pointers should strictly progress back up the stack
792 * (towards higher addresses).
794 if (tail + 1 >= buftail.fp)
797 return buftail.fp - 1;
801 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
803 struct frame_tail __user *tail;
806 perf_callchain_store(entry, regs->ARM_pc);
811 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
813 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
814 tail && !((unsigned long)tail & 0x3))
815 tail = user_backtrace(tail, entry);
819 * Gets called by walk_stackframe() for every stackframe. This will be called
820 * whist unwinding the stackframe and is like a subroutine return so we use
824 callchain_trace(struct stackframe *fr,
827 struct perf_callchain_entry *entry = data;
828 perf_callchain_store(entry, fr->pc);
833 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
835 struct stackframe fr;
837 fr.fp = regs->ARM_fp;
838 fr.sp = regs->ARM_sp;
839 fr.lr = regs->ARM_lr;
840 fr.pc = regs->ARM_pc;
841 walk_stackframe(&fr, callchain_trace, entry);