4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/perf_event.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/uaccess.h>
23 #include <asm/cputype.h>
25 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
29 static struct platform_device *pmu_device;
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
35 static DEFINE_RAW_SPINLOCK(pmu_lock);
38 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
39 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms.
42 * ARMv7 supports up to 32 events:
43 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
46 #define ARMPMU_MAX_HWEVENTS 32
48 /* The events for a given CPU. */
49 struct cpu_hw_events {
51 * The events that are active on the CPU for the given index.
53 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
56 * A 1 bit for an index indicates that the counter is being used for
57 * an event. A 0 means that the counter can be used.
59 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
61 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
64 enum arm_perf_pmu_ids id;
65 cpumask_t active_irqs;
67 irqreturn_t (*handle_irq)(int irq_num, void *dev);
68 void (*enable)(struct hw_perf_event *evt, int idx);
69 void (*disable)(struct hw_perf_event *evt, int idx);
70 int (*get_event_idx)(struct cpu_hw_events *cpuc,
71 struct hw_perf_event *hwc);
72 int (*set_event_filter)(struct hw_perf_event *evt,
73 struct perf_event_attr *attr);
74 u32 (*read_counter)(int idx);
75 void (*write_counter)(int idx, u32 val);
78 void (*reset)(void *);
79 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
80 [PERF_COUNT_HW_CACHE_OP_MAX]
81 [PERF_COUNT_HW_CACHE_RESULT_MAX];
82 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
88 /* Set at runtime when we know what CPU type we are. */
89 static struct arm_pmu *armpmu;
92 armpmu_get_pmu_id(void)
101 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
104 armpmu_get_max_events(void)
109 max_events = armpmu->num_events;
113 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
115 int perf_num_counters(void)
117 return armpmu_get_max_events();
119 EXPORT_SYMBOL_GPL(perf_num_counters);
121 #define HW_OP_UNSUPPORTED 0xFFFF
124 PERF_COUNT_HW_CACHE_##_x
126 #define CACHE_OP_UNSUPPORTED 0xFFFF
129 armpmu_map_cache_event(u64 config)
131 unsigned int cache_type, cache_op, cache_result, ret;
133 cache_type = (config >> 0) & 0xff;
134 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
137 cache_op = (config >> 8) & 0xff;
138 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
141 cache_result = (config >> 16) & 0xff;
142 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
145 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
147 if (ret == CACHE_OP_UNSUPPORTED)
154 armpmu_map_event(u64 config)
156 int mapping = (*armpmu->event_map)[config];
157 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
161 armpmu_map_raw_event(u64 config)
163 return (int)(config & armpmu->raw_event_mask);
167 armpmu_event_set_period(struct perf_event *event,
168 struct hw_perf_event *hwc,
171 s64 left = local64_read(&hwc->period_left);
172 s64 period = hwc->sample_period;
175 if (unlikely(left <= -period)) {
177 local64_set(&hwc->period_left, left);
178 hwc->last_period = period;
182 if (unlikely(left <= 0)) {
184 local64_set(&hwc->period_left, left);
185 hwc->last_period = period;
189 if (left > (s64)armpmu->max_period)
190 left = armpmu->max_period;
192 local64_set(&hwc->prev_count, (u64)-left);
194 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
196 perf_event_update_userpage(event);
202 armpmu_event_update(struct perf_event *event,
203 struct hw_perf_event *hwc,
204 int idx, int overflow)
206 u64 delta, prev_raw_count, new_raw_count;
209 prev_raw_count = local64_read(&hwc->prev_count);
210 new_raw_count = armpmu->read_counter(idx);
212 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
213 new_raw_count) != prev_raw_count)
216 new_raw_count &= armpmu->max_period;
217 prev_raw_count &= armpmu->max_period;
220 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
222 delta = new_raw_count - prev_raw_count;
224 local64_add(delta, &event->count);
225 local64_sub(delta, &hwc->period_left);
227 return new_raw_count;
231 armpmu_read(struct perf_event *event)
233 struct hw_perf_event *hwc = &event->hw;
235 /* Don't read disabled counters! */
239 armpmu_event_update(event, hwc, hwc->idx, 0);
243 armpmu_stop(struct perf_event *event, int flags)
245 struct hw_perf_event *hwc = &event->hw;
248 * ARM pmu always has to update the counter, so ignore
249 * PERF_EF_UPDATE, see comments in armpmu_start().
251 if (!(hwc->state & PERF_HES_STOPPED)) {
252 armpmu->disable(hwc, hwc->idx);
253 barrier(); /* why? */
254 armpmu_event_update(event, hwc, hwc->idx, 0);
255 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
260 armpmu_start(struct perf_event *event, int flags)
262 struct hw_perf_event *hwc = &event->hw;
265 * ARM pmu always has to reprogram the period, so ignore
266 * PERF_EF_RELOAD, see the comment below.
268 if (flags & PERF_EF_RELOAD)
269 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
273 * Set the period again. Some counters can't be stopped, so when we
274 * were stopped we simply disabled the IRQ source and the counter
275 * may have been left counting. If we don't do this step then we may
276 * get an interrupt too soon or *way* too late if the overflow has
277 * happened since disabling.
279 armpmu_event_set_period(event, hwc, hwc->idx);
280 armpmu->enable(hwc, hwc->idx);
284 armpmu_del(struct perf_event *event, int flags)
286 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
287 struct hw_perf_event *hwc = &event->hw;
292 armpmu_stop(event, PERF_EF_UPDATE);
293 cpuc->events[idx] = NULL;
294 clear_bit(idx, cpuc->used_mask);
296 perf_event_update_userpage(event);
300 armpmu_add(struct perf_event *event, int flags)
302 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
303 struct hw_perf_event *hwc = &event->hw;
307 perf_pmu_disable(event->pmu);
309 /* If we don't have a space for the counter then finish early. */
310 idx = armpmu->get_event_idx(cpuc, hwc);
317 * If there is an event in the counter we are going to use then make
318 * sure it is disabled.
321 armpmu->disable(hwc, idx);
322 cpuc->events[idx] = event;
324 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
325 if (flags & PERF_EF_START)
326 armpmu_start(event, PERF_EF_RELOAD);
328 /* Propagate our changes to the userspace mapping. */
329 perf_event_update_userpage(event);
332 perf_pmu_enable(event->pmu);
336 static struct pmu pmu;
339 validate_event(struct cpu_hw_events *cpuc,
340 struct perf_event *event)
342 struct hw_perf_event fake_event = event->hw;
343 struct pmu *leader_pmu = event->group_leader->pmu;
345 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
348 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
352 validate_group(struct perf_event *event)
354 struct perf_event *sibling, *leader = event->group_leader;
355 struct cpu_hw_events fake_pmu;
357 memset(&fake_pmu, 0, sizeof(fake_pmu));
359 if (!validate_event(&fake_pmu, leader))
362 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
363 if (!validate_event(&fake_pmu, sibling))
367 if (!validate_event(&fake_pmu, event))
373 static irqreturn_t armpmu_platform_irq(int irq, void *dev)
375 struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev);
377 return plat->handle_irq(irq, dev, armpmu->handle_irq);
381 armpmu_release_hardware(void)
385 irqs = min(pmu_device->num_resources, num_possible_cpus());
387 for (i = 0; i < irqs; ++i) {
388 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
390 irq = platform_get_irq(pmu_device, i);
396 release_pmu(ARM_PMU_DEVICE_CPU);
400 armpmu_reserve_hardware(void)
402 struct arm_pmu_platdata *plat;
403 irq_handler_t handle_irq;
404 int i, err, irq, irqs;
406 err = reserve_pmu(ARM_PMU_DEVICE_CPU);
408 pr_warning("unable to reserve pmu\n");
412 plat = dev_get_platdata(&pmu_device->dev);
413 if (plat && plat->handle_irq)
414 handle_irq = armpmu_platform_irq;
416 handle_irq = armpmu->handle_irq;
418 irqs = min(pmu_device->num_resources, num_possible_cpus());
420 pr_err("no irqs for PMUs defined\n");
424 for (i = 0; i < irqs; ++i) {
426 irq = platform_get_irq(pmu_device, i);
431 * If we have a single PMU interrupt that we can't shift,
432 * assume that we're running on a uniprocessor machine and
433 * continue. Otherwise, continue without this interrupt.
435 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
436 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
441 err = request_irq(irq, handle_irq,
442 IRQF_DISABLED | IRQF_NOBALANCING,
445 pr_err("unable to request IRQ%d for ARM PMU counters\n",
447 armpmu_release_hardware();
451 cpumask_set_cpu(i, &armpmu->active_irqs);
457 static atomic_t active_events = ATOMIC_INIT(0);
458 static DEFINE_MUTEX(pmu_reserve_mutex);
461 hw_perf_event_destroy(struct perf_event *event)
463 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
464 armpmu_release_hardware();
465 mutex_unlock(&pmu_reserve_mutex);
470 event_requires_mode_exclusion(struct perf_event_attr *attr)
472 return attr->exclude_idle || attr->exclude_user ||
473 attr->exclude_kernel || attr->exclude_hv;
477 __hw_perf_event_init(struct perf_event *event)
479 struct hw_perf_event *hwc = &event->hw;
482 /* Decode the generic type into an ARM event identifier. */
483 if (PERF_TYPE_HARDWARE == event->attr.type) {
484 mapping = armpmu_map_event(event->attr.config);
485 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
486 mapping = armpmu_map_cache_event(event->attr.config);
487 } else if (PERF_TYPE_RAW == event->attr.type) {
488 mapping = armpmu_map_raw_event(event->attr.config);
490 pr_debug("event type %x not supported\n", event->attr.type);
495 pr_debug("event %x:%llx not supported\n", event->attr.type,
501 * We don't assign an index until we actually place the event onto
502 * hardware. Use -1 to signify that we haven't decided where to put it
503 * yet. For SMP systems, each core has it's own PMU so we can't do any
504 * clever allocation or constraints checking at this point.
507 hwc->config_base = 0;
512 * Check whether we need to exclude the counter from certain modes.
514 if ((!armpmu->set_event_filter ||
515 armpmu->set_event_filter(hwc, &event->attr)) &&
516 event_requires_mode_exclusion(&event->attr)) {
517 pr_debug("ARM performance counters do not support "
523 * Store the event encoding into the config_base field.
525 hwc->config_base |= (unsigned long)mapping;
527 if (!hwc->sample_period) {
528 hwc->sample_period = armpmu->max_period;
529 hwc->last_period = hwc->sample_period;
530 local64_set(&hwc->period_left, hwc->sample_period);
534 if (event->group_leader != event) {
535 err = validate_group(event);
543 static int armpmu_event_init(struct perf_event *event)
547 switch (event->attr.type) {
549 case PERF_TYPE_HARDWARE:
550 case PERF_TYPE_HW_CACHE:
557 event->destroy = hw_perf_event_destroy;
559 if (!atomic_inc_not_zero(&active_events)) {
560 mutex_lock(&pmu_reserve_mutex);
561 if (atomic_read(&active_events) == 0) {
562 err = armpmu_reserve_hardware();
566 atomic_inc(&active_events);
567 mutex_unlock(&pmu_reserve_mutex);
573 err = __hw_perf_event_init(event);
575 hw_perf_event_destroy(event);
580 static void armpmu_enable(struct pmu *pmu)
582 /* Enable all of the perf events on hardware. */
583 int idx, enabled = 0;
584 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
586 for (idx = 0; idx < armpmu->num_events; ++idx) {
587 struct perf_event *event = cpuc->events[idx];
592 armpmu->enable(&event->hw, idx);
600 static void armpmu_disable(struct pmu *pmu)
605 static struct pmu pmu = {
606 .pmu_enable = armpmu_enable,
607 .pmu_disable = armpmu_disable,
608 .event_init = armpmu_event_init,
611 .start = armpmu_start,
616 /* Include the PMU-specific implementations. */
617 #include "perf_event_xscale.c"
618 #include "perf_event_v6.c"
619 #include "perf_event_v7.c"
622 * Ensure the PMU has sane values out of reset.
623 * This requires SMP to be available, so exists as a separate initcall.
628 if (armpmu && armpmu->reset)
629 return on_each_cpu(armpmu->reset, NULL, 1);
632 arch_initcall(armpmu_reset);
635 * PMU platform driver and devicetree bindings.
637 static struct of_device_id armpmu_of_device_ids[] = {
638 {.compatible = "arm,cortex-a9-pmu"},
639 {.compatible = "arm,cortex-a8-pmu"},
640 {.compatible = "arm,arm1136-pmu"},
641 {.compatible = "arm,arm1176-pmu"},
645 static struct platform_device_id armpmu_plat_device_ids[] = {
650 static int __devinit armpmu_device_probe(struct platform_device *pdev)
656 static struct platform_driver armpmu_driver = {
659 .of_match_table = armpmu_of_device_ids,
661 .probe = armpmu_device_probe,
662 .id_table = armpmu_plat_device_ids,
665 static int __init register_pmu_driver(void)
667 return platform_driver_register(&armpmu_driver);
669 device_initcall(register_pmu_driver);
672 * CPU PMU identification and registration.
675 init_hw_perf_events(void)
677 unsigned long cpuid = read_cpuid_id();
678 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
679 unsigned long part_number = (cpuid & 0xFFF0);
682 if (0x41 == implementor) {
683 switch (part_number) {
684 case 0xB360: /* ARM1136 */
685 case 0xB560: /* ARM1156 */
686 case 0xB760: /* ARM1176 */
687 armpmu = armv6pmu_init();
689 case 0xB020: /* ARM11mpcore */
690 armpmu = armv6mpcore_pmu_init();
692 case 0xC080: /* Cortex-A8 */
693 armpmu = armv7_a8_pmu_init();
695 case 0xC090: /* Cortex-A9 */
696 armpmu = armv7_a9_pmu_init();
698 case 0xC050: /* Cortex-A5 */
699 armpmu = armv7_a5_pmu_init();
701 case 0xC0F0: /* Cortex-A15 */
702 armpmu = armv7_a15_pmu_init();
705 /* Intel CPUs [xscale]. */
706 } else if (0x69 == implementor) {
707 part_number = (cpuid >> 13) & 0x7;
708 switch (part_number) {
710 armpmu = xscale1pmu_init();
713 armpmu = xscale2pmu_init();
719 pr_info("enabled with %s PMU driver, %d counters available\n",
720 armpmu->name, armpmu->num_events);
721 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
723 pr_info("no hardware support available\n");
728 early_initcall(init_hw_perf_events);
731 * Callchain handling code.
735 * The registers we're interested in are at the end of the variable
736 * length saved register structure. The fp points at the end of this
737 * structure so the address of this struct is:
738 * (struct frame_tail *)(xxx->fp)-1
740 * This code has been adapted from the ARM OProfile support.
743 struct frame_tail __user *fp;
746 } __attribute__((packed));
749 * Get the return address for a single stackframe and return a pointer to the
752 static struct frame_tail __user *
753 user_backtrace(struct frame_tail __user *tail,
754 struct perf_callchain_entry *entry)
756 struct frame_tail buftail;
758 /* Also check accessibility of one struct frame_tail beyond */
759 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
761 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
764 perf_callchain_store(entry, buftail.lr);
767 * Frame pointers should strictly progress back up the stack
768 * (towards higher addresses).
770 if (tail + 1 >= buftail.fp)
773 return buftail.fp - 1;
777 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
779 struct frame_tail __user *tail;
782 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
784 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
785 tail && !((unsigned long)tail & 0x3))
786 tail = user_backtrace(tail, entry);
790 * Gets called by walk_stackframe() for every stackframe. This will be called
791 * whist unwinding the stackframe and is like a subroutine return so we use
795 callchain_trace(struct stackframe *fr,
798 struct perf_callchain_entry *entry = data;
799 perf_callchain_store(entry, fr->pc);
804 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
806 struct stackframe fr;
808 fr.fp = regs->ARM_fp;
809 fr.sp = regs->ARM_sp;
810 fr.lr = regs->ARM_lr;
811 fr.pc = regs->ARM_pc;
812 walk_stackframe(&fr, callchain_trace, entry);