4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
15 #define pr_fmt(fmt) "hw perfevents: " fmt
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/perf_event.h>
20 #include <linux/spinlock.h>
21 #include <linux/uaccess.h>
23 #include <asm/cputype.h>
25 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
29 static const struct pmu_irqs *pmu_irqs;
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
35 DEFINE_SPINLOCK(pmu_lock);
38 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
39 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms.
42 * ARMv7 supports up to 32 events:
43 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
46 #define ARMPMU_MAX_HWEVENTS 33
48 /* The events for a given CPU. */
49 struct cpu_hw_events {
51 * The events that are active on the CPU for the given index. Index 0
54 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
57 * A 1 bit for an index indicates that the counter is being used for
58 * an event. A 0 means that the counter can be used.
60 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
63 * A 1 bit for an index indicates that the counter is actively being
66 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
68 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
72 irqreturn_t (*handle_irq)(int irq_num, void *dev);
73 void (*enable)(struct hw_perf_event *evt, int idx);
74 void (*disable)(struct hw_perf_event *evt, int idx);
75 int (*event_map)(int evt);
76 u64 (*raw_event)(u64);
77 int (*get_event_idx)(struct cpu_hw_events *cpuc,
78 struct hw_perf_event *hwc);
79 u32 (*read_counter)(int idx);
80 void (*write_counter)(int idx, u32 val);
87 /* Set at runtime when we know what CPU type we are. */
88 static const struct arm_pmu *armpmu;
90 #define HW_OP_UNSUPPORTED 0xFFFF
93 PERF_COUNT_HW_CACHE_##_x
95 #define CACHE_OP_UNSUPPORTED 0xFFFF
97 static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
98 [PERF_COUNT_HW_CACHE_OP_MAX]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX];
102 armpmu_map_cache_event(u64 config)
104 unsigned int cache_type, cache_op, cache_result, ret;
106 cache_type = (config >> 0) & 0xff;
107 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
110 cache_op = (config >> 8) & 0xff;
111 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
114 cache_result = (config >> 16) & 0xff;
115 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
118 ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
120 if (ret == CACHE_OP_UNSUPPORTED)
127 armpmu_event_set_period(struct perf_event *event,
128 struct hw_perf_event *hwc,
131 s64 left = atomic64_read(&hwc->period_left);
132 s64 period = hwc->sample_period;
135 if (unlikely(left <= -period)) {
137 atomic64_set(&hwc->period_left, left);
138 hwc->last_period = period;
142 if (unlikely(left <= 0)) {
144 atomic64_set(&hwc->period_left, left);
145 hwc->last_period = period;
149 if (left > (s64)armpmu->max_period)
150 left = armpmu->max_period;
152 atomic64_set(&hwc->prev_count, (u64)-left);
154 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
156 perf_event_update_userpage(event);
162 armpmu_event_update(struct perf_event *event,
163 struct hw_perf_event *hwc,
167 s64 prev_raw_count, new_raw_count;
171 prev_raw_count = atomic64_read(&hwc->prev_count);
172 new_raw_count = armpmu->read_counter(idx);
174 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
175 new_raw_count) != prev_raw_count)
178 delta = (new_raw_count << shift) - (prev_raw_count << shift);
181 atomic64_add(delta, &event->count);
182 atomic64_sub(delta, &hwc->period_left);
184 return new_raw_count;
188 armpmu_disable(struct perf_event *event)
190 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
191 struct hw_perf_event *hwc = &event->hw;
196 clear_bit(idx, cpuc->active_mask);
197 armpmu->disable(hwc, idx);
201 armpmu_event_update(event, hwc, idx);
202 cpuc->events[idx] = NULL;
203 clear_bit(idx, cpuc->used_mask);
205 perf_event_update_userpage(event);
209 armpmu_read(struct perf_event *event)
211 struct hw_perf_event *hwc = &event->hw;
213 /* Don't read disabled counters! */
217 armpmu_event_update(event, hwc, hwc->idx);
221 armpmu_unthrottle(struct perf_event *event)
223 struct hw_perf_event *hwc = &event->hw;
226 * Set the period again. Some counters can't be stopped, so when we
227 * were throttled we simply disabled the IRQ source and the counter
228 * may have been left counting. If we don't do this step then we may
229 * get an interrupt too soon or *way* too late if the overflow has
230 * happened since disabling.
232 armpmu_event_set_period(event, hwc, hwc->idx);
233 armpmu->enable(hwc, hwc->idx);
237 armpmu_enable(struct perf_event *event)
239 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
240 struct hw_perf_event *hwc = &event->hw;
244 /* If we don't have a space for the counter then finish early. */
245 idx = armpmu->get_event_idx(cpuc, hwc);
252 * If there is an event in the counter we are going to use then make
253 * sure it is disabled.
256 armpmu->disable(hwc, idx);
257 cpuc->events[idx] = event;
258 set_bit(idx, cpuc->active_mask);
260 /* Set the period for the event. */
261 armpmu_event_set_period(event, hwc, idx);
263 /* Enable the event. */
264 armpmu->enable(hwc, idx);
266 /* Propagate our changes to the userspace mapping. */
267 perf_event_update_userpage(event);
273 static struct pmu pmu = {
274 .enable = armpmu_enable,
275 .disable = armpmu_disable,
276 .unthrottle = armpmu_unthrottle,
281 validate_event(struct cpu_hw_events *cpuc,
282 struct perf_event *event)
284 struct hw_perf_event fake_event = event->hw;
286 if (event->pmu && event->pmu != &pmu)
289 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
293 validate_group(struct perf_event *event)
295 struct perf_event *sibling, *leader = event->group_leader;
296 struct cpu_hw_events fake_pmu;
298 memset(&fake_pmu, 0, sizeof(fake_pmu));
300 if (!validate_event(&fake_pmu, leader))
303 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
304 if (!validate_event(&fake_pmu, sibling))
308 if (!validate_event(&fake_pmu, event))
315 armpmu_reserve_hardware(void)
320 pmu_irqs = reserve_pmu();
321 if (IS_ERR(pmu_irqs)) {
322 pr_warning("unable to reserve pmu\n");
323 return PTR_ERR(pmu_irqs);
328 if (pmu_irqs->num_irqs < 1) {
329 pr_err("no irqs for PMUs defined\n");
333 for (i = 0; i < pmu_irqs->num_irqs; ++i) {
334 err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq,
335 IRQF_DISABLED, "armpmu", NULL);
337 pr_warning("unable to request IRQ%d for ARM "
338 "perf counters\n", pmu_irqs->irqs[i]);
344 for (i = i - 1; i >= 0; --i)
345 free_irq(pmu_irqs->irqs[i], NULL);
346 release_pmu(pmu_irqs);
354 armpmu_release_hardware(void)
358 for (i = pmu_irqs->num_irqs - 1; i >= 0; --i)
359 free_irq(pmu_irqs->irqs[i], NULL);
362 release_pmu(pmu_irqs);
366 static atomic_t active_events = ATOMIC_INIT(0);
367 static DEFINE_MUTEX(pmu_reserve_mutex);
370 hw_perf_event_destroy(struct perf_event *event)
372 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
373 armpmu_release_hardware();
374 mutex_unlock(&pmu_reserve_mutex);
379 __hw_perf_event_init(struct perf_event *event)
381 struct hw_perf_event *hwc = &event->hw;
384 /* Decode the generic type into an ARM event identifier. */
385 if (PERF_TYPE_HARDWARE == event->attr.type) {
386 mapping = armpmu->event_map(event->attr.config);
387 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
388 mapping = armpmu_map_cache_event(event->attr.config);
389 } else if (PERF_TYPE_RAW == event->attr.type) {
390 mapping = armpmu->raw_event(event->attr.config);
392 pr_debug("event type %x not supported\n", event->attr.type);
397 pr_debug("event %x:%llx not supported\n", event->attr.type,
403 * Check whether we need to exclude the counter from certain modes.
404 * The ARM performance counters are on all of the time so if someone
405 * has asked us for some excludes then we have to fail.
407 if (event->attr.exclude_kernel || event->attr.exclude_user ||
408 event->attr.exclude_hv || event->attr.exclude_idle) {
409 pr_debug("ARM performance counters do not support "
415 * We don't assign an index until we actually place the event onto
416 * hardware. Use -1 to signify that we haven't decided where to put it
417 * yet. For SMP systems, each core has it's own PMU so we can't do any
418 * clever allocation or constraints checking at this point.
423 * Store the event encoding into the config_base field. config and
424 * event_base are unused as the only 2 things we need to know are
425 * the event mapping and the counter to use. The counter to use is
426 * also the indx and the config_base is the event type.
428 hwc->config_base = (unsigned long)mapping;
432 if (!hwc->sample_period) {
433 hwc->sample_period = armpmu->max_period;
434 hwc->last_period = hwc->sample_period;
435 atomic64_set(&hwc->period_left, hwc->sample_period);
439 if (event->group_leader != event) {
440 err = validate_group(event);
449 hw_perf_event_init(struct perf_event *event)
454 return ERR_PTR(-ENODEV);
456 event->destroy = hw_perf_event_destroy;
458 if (!atomic_inc_not_zero(&active_events)) {
459 if (atomic_read(&active_events) > perf_max_events) {
460 atomic_dec(&active_events);
461 return ERR_PTR(-ENOSPC);
464 mutex_lock(&pmu_reserve_mutex);
465 if (atomic_read(&active_events) == 0) {
466 err = armpmu_reserve_hardware();
470 atomic_inc(&active_events);
471 mutex_unlock(&pmu_reserve_mutex);
477 err = __hw_perf_event_init(event);
479 hw_perf_event_destroy(event);
481 return err ? ERR_PTR(err) : &pmu;
487 /* Enable all of the perf events on hardware. */
489 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
494 for (idx = 0; idx <= armpmu->num_events; ++idx) {
495 struct perf_event *event = cpuc->events[idx];
500 armpmu->enable(&event->hw, idx);
507 hw_perf_disable(void)
514 * ARMv6 Performance counter handling code.
516 * ARMv6 has 2 configurable performance counters and a single cycle counter.
517 * They all share a single reset bit but can be written to zero so we can use
520 * The counters can't be individually enabled or disabled so when we remove
521 * one event and replace it with another we could get spurious counts from the
522 * wrong event. However, we can take advantage of the fact that the
523 * performance counters can export events to the event bus, and the event bus
524 * itself can be monitored. This requires that we *don't* export the events to
525 * the event bus. The procedure for disabling a configurable counter is:
526 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
527 * effectively stops the counter from counting.
528 * - disable the counter's interrupt generation (each counter has it's
529 * own interrupt enable bit).
530 * Once stopped, the counter value can be written as 0 to reset.
532 * To enable a counter:
533 * - enable the counter's interrupt generation.
534 * - set the new event type.
536 * Note: the dedicated cycle counter only counts cycles and can't be
537 * enabled/disabled independently of the others. When we want to disable the
538 * cycle counter, we have to just disable the interrupt reporting and start
539 * ignoring that counter. When re-enabling, we have to reset the value and
540 * enable the interrupt.
543 enum armv6_perf_types {
544 ARMV6_PERFCTR_ICACHE_MISS = 0x0,
545 ARMV6_PERFCTR_IBUF_STALL = 0x1,
546 ARMV6_PERFCTR_DDEP_STALL = 0x2,
547 ARMV6_PERFCTR_ITLB_MISS = 0x3,
548 ARMV6_PERFCTR_DTLB_MISS = 0x4,
549 ARMV6_PERFCTR_BR_EXEC = 0x5,
550 ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
551 ARMV6_PERFCTR_INSTR_EXEC = 0x7,
552 ARMV6_PERFCTR_DCACHE_HIT = 0x9,
553 ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
554 ARMV6_PERFCTR_DCACHE_MISS = 0xB,
555 ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
556 ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
557 ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
558 ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
559 ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
560 ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
561 ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
562 ARMV6_PERFCTR_NOP = 0x20,
565 enum armv6_counters {
566 ARMV6_CYCLE_COUNTER = 1,
572 * The hardware events that we support. We do support cache operations but
573 * we have harvard caches and no way to combine instruction and data
574 * accesses/misses in hardware.
576 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
577 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
578 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
579 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
580 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
581 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
582 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
583 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
586 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
587 [PERF_COUNT_HW_CACHE_OP_MAX]
588 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
591 * The performance counters don't differentiate between read
592 * and write accesses/misses so this isn't strictly correct,
593 * but it's the best we can do. Writes and reads get
597 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
598 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
601 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
602 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
605 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
606 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
611 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
612 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
615 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
616 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
619 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
620 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
625 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
626 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
629 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
630 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
633 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
634 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
639 * The ARM performance counters can count micro DTLB misses,
640 * micro ITLB misses and main TLB misses. There isn't an event
641 * for TLB misses, so use the micro misses here and if users
642 * want the main TLB misses they can use a raw counter.
645 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
646 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
649 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
650 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
653 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
654 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
659 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
660 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
663 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
664 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
667 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
668 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
673 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
674 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
677 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
678 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
681 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
682 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
687 enum armv6mpcore_perf_types {
688 ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
689 ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
690 ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
691 ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
692 ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
693 ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
694 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
695 ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
696 ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
697 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
698 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
699 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
700 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
701 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
702 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
703 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
704 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
705 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
706 ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
707 ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
711 * The hardware events that we support. We do support cache operations but
712 * we have harvard caches and no way to combine instruction and data
713 * accesses/misses in hardware.
715 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
716 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
717 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
718 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
719 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
720 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
721 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
722 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
725 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
726 [PERF_COUNT_HW_CACHE_OP_MAX]
727 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
731 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
733 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
737 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
739 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
742 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
743 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
748 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
749 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
752 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
753 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
756 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
757 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
762 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
763 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
766 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
767 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
770 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
771 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
776 * The ARM performance counters can count micro DTLB misses,
777 * micro ITLB misses and main TLB misses. There isn't an event
778 * for TLB misses, so use the micro misses here and if users
779 * want the main TLB misses they can use a raw counter.
782 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
783 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
786 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
787 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
790 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
791 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
796 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
797 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
800 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
801 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
804 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
805 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
810 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
811 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
814 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
815 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
818 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
819 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
824 static inline unsigned long
825 armv6_pmcr_read(void)
828 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
833 armv6_pmcr_write(unsigned long val)
835 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
838 #define ARMV6_PMCR_ENABLE (1 << 0)
839 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
840 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
841 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
842 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
843 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
844 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
845 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
846 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
847 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
848 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
849 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
850 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
851 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
853 #define ARMV6_PMCR_OVERFLOWED_MASK \
854 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
855 ARMV6_PMCR_CCOUNT_OVERFLOW)
858 armv6_pmcr_has_overflowed(unsigned long pmcr)
860 return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
864 armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
865 enum armv6_counters counter)
869 if (ARMV6_CYCLE_COUNTER == counter)
870 ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
871 else if (ARMV6_COUNTER0 == counter)
872 ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
873 else if (ARMV6_COUNTER1 == counter)
874 ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
876 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
882 armv6pmu_read_counter(int counter)
884 unsigned long value = 0;
886 if (ARMV6_CYCLE_COUNTER == counter)
887 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
888 else if (ARMV6_COUNTER0 == counter)
889 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
890 else if (ARMV6_COUNTER1 == counter)
891 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
893 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
899 armv6pmu_write_counter(int counter,
902 if (ARMV6_CYCLE_COUNTER == counter)
903 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
904 else if (ARMV6_COUNTER0 == counter)
905 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
906 else if (ARMV6_COUNTER1 == counter)
907 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
909 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
913 armv6pmu_enable_event(struct hw_perf_event *hwc,
916 unsigned long val, mask, evt, flags;
918 if (ARMV6_CYCLE_COUNTER == idx) {
920 evt = ARMV6_PMCR_CCOUNT_IEN;
921 } else if (ARMV6_COUNTER0 == idx) {
922 mask = ARMV6_PMCR_EVT_COUNT0_MASK;
923 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
924 ARMV6_PMCR_COUNT0_IEN;
925 } else if (ARMV6_COUNTER1 == idx) {
926 mask = ARMV6_PMCR_EVT_COUNT1_MASK;
927 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
928 ARMV6_PMCR_COUNT1_IEN;
930 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
935 * Mask out the current event and set the counter to count the event
936 * that we're interested in.
938 spin_lock_irqsave(&pmu_lock, flags);
939 val = armv6_pmcr_read();
942 armv6_pmcr_write(val);
943 spin_unlock_irqrestore(&pmu_lock, flags);
947 armv6pmu_handle_irq(int irq_num,
950 unsigned long pmcr = armv6_pmcr_read();
951 struct perf_sample_data data;
952 struct cpu_hw_events *cpuc;
953 struct pt_regs *regs;
956 if (!armv6_pmcr_has_overflowed(pmcr))
959 regs = get_irq_regs();
962 * The interrupts are cleared by writing the overflow flags back to
963 * the control register. All of the other bits don't have any effect
964 * if they are rewritten, so write the whole value back.
966 armv6_pmcr_write(pmcr);
970 cpuc = &__get_cpu_var(cpu_hw_events);
971 for (idx = 0; idx <= armpmu->num_events; ++idx) {
972 struct perf_event *event = cpuc->events[idx];
973 struct hw_perf_event *hwc;
975 if (!test_bit(idx, cpuc->active_mask))
979 * We have a single interrupt for all counters. Check that
980 * each counter has overflowed before we process it.
982 if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
986 armpmu_event_update(event, hwc, idx);
987 data.period = event->hw.last_period;
988 if (!armpmu_event_set_period(event, hwc, idx))
991 if (perf_event_overflow(event, 0, &data, regs))
992 armpmu->disable(hwc, idx);
996 * Handle the pending perf events.
998 * Note: this call *must* be run with interrupts enabled. For
999 * platforms that can have the PMU interrupts raised as a PMI, this
1002 perf_event_do_pending();
1008 armv6pmu_start(void)
1010 unsigned long flags, val;
1012 spin_lock_irqsave(&pmu_lock, flags);
1013 val = armv6_pmcr_read();
1014 val |= ARMV6_PMCR_ENABLE;
1015 armv6_pmcr_write(val);
1016 spin_unlock_irqrestore(&pmu_lock, flags);
1022 unsigned long flags, val;
1024 spin_lock_irqsave(&pmu_lock, flags);
1025 val = armv6_pmcr_read();
1026 val &= ~ARMV6_PMCR_ENABLE;
1027 armv6_pmcr_write(val);
1028 spin_unlock_irqrestore(&pmu_lock, flags);
1032 armv6pmu_event_map(int config)
1034 int mapping = armv6_perf_map[config];
1035 if (HW_OP_UNSUPPORTED == mapping)
1036 mapping = -EOPNOTSUPP;
1041 armv6mpcore_pmu_event_map(int config)
1043 int mapping = armv6mpcore_perf_map[config];
1044 if (HW_OP_UNSUPPORTED == mapping)
1045 mapping = -EOPNOTSUPP;
1050 armv6pmu_raw_event(u64 config)
1052 return config & 0xff;
1056 armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
1057 struct hw_perf_event *event)
1059 /* Always place a cycle counter into the cycle counter. */
1060 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
1061 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
1064 return ARMV6_CYCLE_COUNTER;
1067 * For anything other than a cycle counter, try and use
1068 * counter0 and counter1.
1070 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
1071 return ARMV6_COUNTER1;
1074 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
1075 return ARMV6_COUNTER0;
1078 /* The counters are all in use. */
1084 armv6pmu_disable_event(struct hw_perf_event *hwc,
1087 unsigned long val, mask, evt, flags;
1089 if (ARMV6_CYCLE_COUNTER == idx) {
1090 mask = ARMV6_PMCR_CCOUNT_IEN;
1092 } else if (ARMV6_COUNTER0 == idx) {
1093 mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
1094 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
1095 } else if (ARMV6_COUNTER1 == idx) {
1096 mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
1097 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
1099 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1104 * Mask out the current event and set the counter to count the number
1105 * of ETM bus signal assertion cycles. The external reporting should
1106 * be disabled and so this should never increment.
1108 spin_lock_irqsave(&pmu_lock, flags);
1109 val = armv6_pmcr_read();
1112 armv6_pmcr_write(val);
1113 spin_unlock_irqrestore(&pmu_lock, flags);
1117 armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
1120 unsigned long val, mask, flags, evt = 0;
1122 if (ARMV6_CYCLE_COUNTER == idx) {
1123 mask = ARMV6_PMCR_CCOUNT_IEN;
1124 } else if (ARMV6_COUNTER0 == idx) {
1125 mask = ARMV6_PMCR_COUNT0_IEN;
1126 } else if (ARMV6_COUNTER1 == idx) {
1127 mask = ARMV6_PMCR_COUNT1_IEN;
1129 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1134 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1135 * simply disable the interrupt reporting.
1137 spin_lock_irqsave(&pmu_lock, flags);
1138 val = armv6_pmcr_read();
1141 armv6_pmcr_write(val);
1142 spin_unlock_irqrestore(&pmu_lock, flags);
1145 static const struct arm_pmu armv6pmu = {
1147 .handle_irq = armv6pmu_handle_irq,
1148 .enable = armv6pmu_enable_event,
1149 .disable = armv6pmu_disable_event,
1150 .event_map = armv6pmu_event_map,
1151 .raw_event = armv6pmu_raw_event,
1152 .read_counter = armv6pmu_read_counter,
1153 .write_counter = armv6pmu_write_counter,
1154 .get_event_idx = armv6pmu_get_event_idx,
1155 .start = armv6pmu_start,
1156 .stop = armv6pmu_stop,
1158 .max_period = (1LLU << 32) - 1,
1162 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1163 * that some of the events have different enumerations and that there is no
1164 * *hack* to stop the programmable counters. To stop the counters we simply
1165 * disable the interrupt reporting and update the event. When unthrottling we
1166 * reset the period and enable the interrupt reporting.
1168 static const struct arm_pmu armv6mpcore_pmu = {
1170 .handle_irq = armv6pmu_handle_irq,
1171 .enable = armv6pmu_enable_event,
1172 .disable = armv6mpcore_pmu_disable_event,
1173 .event_map = armv6mpcore_pmu_event_map,
1174 .raw_event = armv6pmu_raw_event,
1175 .read_counter = armv6pmu_read_counter,
1176 .write_counter = armv6pmu_write_counter,
1177 .get_event_idx = armv6pmu_get_event_idx,
1178 .start = armv6pmu_start,
1179 .stop = armv6pmu_stop,
1181 .max_period = (1LLU << 32) - 1,
1185 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1187 * Copied from ARMv6 code, with the low level code inspired
1188 * by the ARMv7 Oprofile code.
1190 * Cortex-A8 has up to 4 configurable performance counters and
1191 * a single cycle counter.
1192 * Cortex-A9 has up to 31 configurable performance counters and
1193 * a single cycle counter.
1195 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1196 * counter and all 4 performance counters together can be reset separately.
1199 #define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
1201 #define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
1203 /* Common ARMv7 event types */
1204 enum armv7_perf_types {
1205 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
1206 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
1207 ARMV7_PERFCTR_ITLB_MISS = 0x02,
1208 ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
1209 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
1210 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
1211 ARMV7_PERFCTR_DREAD = 0x06,
1212 ARMV7_PERFCTR_DWRITE = 0x07,
1214 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
1215 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
1216 ARMV7_PERFCTR_CID_WRITE = 0x0B,
1217 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1219 * - all branch instructions,
1220 * - instructions that explicitly write the PC,
1221 * - exception generating instructions.
1223 ARMV7_PERFCTR_PC_WRITE = 0x0C,
1224 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
1225 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
1226 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
1227 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
1229 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
1231 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
1234 /* ARMv7 Cortex-A8 specific event types */
1235 enum armv7_a8_perf_types {
1236 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
1238 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
1240 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
1241 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
1242 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
1243 ARMV7_PERFCTR_L2_ACCESS = 0x43,
1244 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
1245 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
1246 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
1247 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
1248 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
1249 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
1250 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
1251 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
1252 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
1253 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
1254 ARMV7_PERFCTR_L2_NEON = 0x4E,
1255 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
1256 ARMV7_PERFCTR_L1_INST = 0x50,
1257 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
1258 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
1259 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
1260 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
1261 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
1262 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
1263 ARMV7_PERFCTR_CYCLES_INST = 0x57,
1264 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
1265 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
1266 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
1268 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
1269 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
1270 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
1273 /* ARMv7 Cortex-A9 specific event types */
1274 enum armv7_a9_perf_types {
1275 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
1276 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
1277 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
1279 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
1280 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
1282 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
1283 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
1284 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
1285 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
1286 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
1287 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
1288 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
1289 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
1290 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
1292 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
1294 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
1295 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
1296 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
1297 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
1298 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
1300 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
1301 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
1302 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
1303 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
1304 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
1305 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
1306 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
1308 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
1309 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
1311 ARMV7_PERFCTR_ISB_INST = 0x90,
1312 ARMV7_PERFCTR_DSB_INST = 0x91,
1313 ARMV7_PERFCTR_DMB_INST = 0x92,
1314 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
1316 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
1317 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
1318 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
1319 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
1320 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
1321 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
1325 * Cortex-A8 HW events mapping
1327 * The hardware events that we support. We do support cache operations but
1328 * we have harvard caches and no way to combine instruction and data
1329 * accesses/misses in hardware.
1331 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1332 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1333 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
1334 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
1335 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
1336 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1337 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1338 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1341 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1342 [PERF_COUNT_HW_CACHE_OP_MAX]
1343 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1346 * The performance counters don't differentiate between read
1347 * and write accesses/misses so this isn't strictly correct,
1348 * but it's the best we can do. Writes and reads get
1352 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1353 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1356 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1357 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1359 [C(OP_PREFETCH)] = {
1360 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1361 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1366 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1367 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1370 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1371 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1373 [C(OP_PREFETCH)] = {
1374 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1375 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1380 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1381 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1384 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1385 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1387 [C(OP_PREFETCH)] = {
1388 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1389 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1394 * Only ITLB misses and DTLB refills are supported.
1395 * If users want the DTLB refills misses a raw counter
1399 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1400 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1403 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1404 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1406 [C(OP_PREFETCH)] = {
1407 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1408 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1413 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1414 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1417 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1418 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1420 [C(OP_PREFETCH)] = {
1421 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1422 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1427 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1429 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1432 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1434 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1436 [C(OP_PREFETCH)] = {
1437 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1438 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1444 * Cortex-A9 HW events mapping
1446 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
1447 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1448 [PERF_COUNT_HW_INSTRUCTIONS] =
1449 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
1450 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
1451 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
1452 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1453 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1454 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1457 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1458 [PERF_COUNT_HW_CACHE_OP_MAX]
1459 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1462 * The performance counters don't differentiate between read
1463 * and write accesses/misses so this isn't strictly correct,
1464 * but it's the best we can do. Writes and reads get
1468 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1469 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1472 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1473 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1475 [C(OP_PREFETCH)] = {
1476 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1477 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1482 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1483 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1486 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1487 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1489 [C(OP_PREFETCH)] = {
1490 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1491 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1496 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1497 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1500 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1501 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1503 [C(OP_PREFETCH)] = {
1504 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1505 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1510 * Only ITLB misses and DTLB refills are supported.
1511 * If users want the DTLB refills misses a raw counter
1515 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1516 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1519 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1520 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1522 [C(OP_PREFETCH)] = {
1523 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1524 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1529 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1530 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1533 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1534 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1536 [C(OP_PREFETCH)] = {
1537 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1538 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1543 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1545 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1548 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1550 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1552 [C(OP_PREFETCH)] = {
1553 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1554 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1560 * Perf Events counters
1562 enum armv7_counters {
1563 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
1564 ARMV7_COUNTER0 = 2, /* First event counter */
1568 * The cycle counter is ARMV7_CYCLE_COUNTER.
1569 * The first event counter is ARMV7_COUNTER0.
1570 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1572 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1575 * ARMv7 low level PMNC access
1579 * Per-CPU PMNC: config reg
1581 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1582 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1583 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1584 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1585 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1586 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1587 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1588 #define ARMV7_PMNC_N_MASK 0x1f
1589 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1592 * Available counters
1594 #define ARMV7_CNT0 0 /* First event counter */
1595 #define ARMV7_CCNT 31 /* Cycle counter */
1597 /* Perf Event to low level counters mapping */
1598 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1601 * CNTENS: counters enable reg
1603 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1604 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1607 * CNTENC: counters disable reg
1609 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1610 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1613 * INTENS: counters overflow interrupt enable reg
1615 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1616 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1619 * INTENC: counters overflow interrupt disable reg
1621 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1622 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1625 * EVTSEL: Event selection reg
1627 #define ARMV7_EVTSEL_MASK 0x7f /* Mask for writable bits */
1630 * SELECT: Counter selection reg
1632 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1635 * FLAG: counters overflow flag status reg
1637 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1638 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1639 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1640 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1642 static inline unsigned long armv7_pmnc_read(void)
1645 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1649 static inline void armv7_pmnc_write(unsigned long val)
1651 val &= ARMV7_PMNC_MASK;
1652 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1655 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1657 return pmnc & ARMV7_OVERFLOWED_MASK;
1660 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1661 enum armv7_counters counter)
1665 if (counter == ARMV7_CYCLE_COUNTER)
1666 ret = pmnc & ARMV7_FLAG_C;
1667 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
1668 ret = pmnc & ARMV7_FLAG_P(counter);
1670 pr_err("CPU%u checking wrong counter %d overflow status\n",
1671 smp_processor_id(), counter);
1676 static inline int armv7_pmnc_select_counter(unsigned int idx)
1680 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
1681 pr_err("CPU%u selecting wrong PMNC counter"
1682 " %d\n", smp_processor_id(), idx);
1686 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
1687 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
1692 static inline u32 armv7pmu_read_counter(int idx)
1694 unsigned long value = 0;
1696 if (idx == ARMV7_CYCLE_COUNTER)
1697 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1698 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1699 if (armv7_pmnc_select_counter(idx) == idx)
1700 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1703 pr_err("CPU%u reading wrong counter %d\n",
1704 smp_processor_id(), idx);
1709 static inline void armv7pmu_write_counter(int idx, u32 value)
1711 if (idx == ARMV7_CYCLE_COUNTER)
1712 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1713 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1714 if (armv7_pmnc_select_counter(idx) == idx)
1715 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1718 pr_err("CPU%u writing wrong counter %d\n",
1719 smp_processor_id(), idx);
1722 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
1724 if (armv7_pmnc_select_counter(idx) == idx) {
1725 val &= ARMV7_EVTSEL_MASK;
1726 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1730 static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
1734 if ((idx != ARMV7_CYCLE_COUNTER) &&
1735 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1736 pr_err("CPU%u enabling wrong PMNC counter"
1737 " %d\n", smp_processor_id(), idx);
1741 if (idx == ARMV7_CYCLE_COUNTER)
1742 val = ARMV7_CNTENS_C;
1744 val = ARMV7_CNTENS_P(idx);
1746 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
1751 static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
1756 if ((idx != ARMV7_CYCLE_COUNTER) &&
1757 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1758 pr_err("CPU%u disabling wrong PMNC counter"
1759 " %d\n", smp_processor_id(), idx);
1763 if (idx == ARMV7_CYCLE_COUNTER)
1764 val = ARMV7_CNTENC_C;
1766 val = ARMV7_CNTENC_P(idx);
1768 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
1773 static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
1777 if ((idx != ARMV7_CYCLE_COUNTER) &&
1778 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1779 pr_err("CPU%u enabling wrong PMNC counter"
1780 " interrupt enable %d\n", smp_processor_id(), idx);
1784 if (idx == ARMV7_CYCLE_COUNTER)
1785 val = ARMV7_INTENS_C;
1787 val = ARMV7_INTENS_P(idx);
1789 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
1794 static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
1798 if ((idx != ARMV7_CYCLE_COUNTER) &&
1799 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1800 pr_err("CPU%u disabling wrong PMNC counter"
1801 " interrupt enable %d\n", smp_processor_id(), idx);
1805 if (idx == ARMV7_CYCLE_COUNTER)
1806 val = ARMV7_INTENC_C;
1808 val = ARMV7_INTENC_P(idx);
1810 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
1815 static inline u32 armv7_pmnc_getreset_flags(void)
1820 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1822 /* Write to clear flags */
1823 val &= ARMV7_FLAG_MASK;
1824 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1830 static void armv7_pmnc_dump_regs(void)
1835 printk(KERN_INFO "PMNC registers dump:\n");
1837 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1838 printk(KERN_INFO "PMNC =0x%08x\n", val);
1840 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1841 printk(KERN_INFO "CNTENS=0x%08x\n", val);
1843 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1844 printk(KERN_INFO "INTENS=0x%08x\n", val);
1846 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1847 printk(KERN_INFO "FLAGS =0x%08x\n", val);
1849 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1850 printk(KERN_INFO "SELECT=0x%08x\n", val);
1852 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1853 printk(KERN_INFO "CCNT =0x%08x\n", val);
1855 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
1856 armv7_pmnc_select_counter(cnt);
1857 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1858 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1859 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1860 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1861 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1862 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1867 void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1869 unsigned long flags;
1872 * Enable counter and interrupt, and set the counter to count
1873 * the event that we're interested in.
1875 spin_lock_irqsave(&pmu_lock, flags);
1880 armv7_pmnc_disable_counter(idx);
1883 * Set event (if destined for PMNx counters)
1884 * We don't need to set the event if it's a cycle count
1886 if (idx != ARMV7_CYCLE_COUNTER)
1887 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1890 * Enable interrupt for this counter
1892 armv7_pmnc_enable_intens(idx);
1897 armv7_pmnc_enable_counter(idx);
1899 spin_unlock_irqrestore(&pmu_lock, flags);
1902 static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1904 unsigned long flags;
1907 * Disable counter and interrupt
1909 spin_lock_irqsave(&pmu_lock, flags);
1914 armv7_pmnc_disable_counter(idx);
1917 * Disable interrupt for this counter
1919 armv7_pmnc_disable_intens(idx);
1921 spin_unlock_irqrestore(&pmu_lock, flags);
1924 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1927 struct perf_sample_data data;
1928 struct cpu_hw_events *cpuc;
1929 struct pt_regs *regs;
1933 * Get and reset the IRQ flags
1935 pmnc = armv7_pmnc_getreset_flags();
1938 * Did an overflow occur?
1940 if (!armv7_pmnc_has_overflowed(pmnc))
1944 * Handle the counter(s) overflow(s)
1946 regs = get_irq_regs();
1950 cpuc = &__get_cpu_var(cpu_hw_events);
1951 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1952 struct perf_event *event = cpuc->events[idx];
1953 struct hw_perf_event *hwc;
1955 if (!test_bit(idx, cpuc->active_mask))
1959 * We have a single interrupt for all counters. Check that
1960 * each counter has overflowed before we process it.
1962 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1966 armpmu_event_update(event, hwc, idx);
1967 data.period = event->hw.last_period;
1968 if (!armpmu_event_set_period(event, hwc, idx))
1971 if (perf_event_overflow(event, 0, &data, regs))
1972 armpmu->disable(hwc, idx);
1976 * Handle the pending perf events.
1978 * Note: this call *must* be run with interrupts enabled. For
1979 * platforms that can have the PMU interrupts raised as a PMI, this
1982 perf_event_do_pending();
1987 static void armv7pmu_start(void)
1989 unsigned long flags;
1991 spin_lock_irqsave(&pmu_lock, flags);
1992 /* Enable all counters */
1993 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1994 spin_unlock_irqrestore(&pmu_lock, flags);
1997 static void armv7pmu_stop(void)
1999 unsigned long flags;
2001 spin_lock_irqsave(&pmu_lock, flags);
2002 /* Disable all counters */
2003 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
2004 spin_unlock_irqrestore(&pmu_lock, flags);
2007 static inline int armv7_a8_pmu_event_map(int config)
2009 int mapping = armv7_a8_perf_map[config];
2010 if (HW_OP_UNSUPPORTED == mapping)
2011 mapping = -EOPNOTSUPP;
2015 static inline int armv7_a9_pmu_event_map(int config)
2017 int mapping = armv7_a9_perf_map[config];
2018 if (HW_OP_UNSUPPORTED == mapping)
2019 mapping = -EOPNOTSUPP;
2023 static u64 armv7pmu_raw_event(u64 config)
2025 return config & 0xff;
2028 static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
2029 struct hw_perf_event *event)
2033 /* Always place a cycle counter into the cycle counter. */
2034 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
2035 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
2038 return ARMV7_CYCLE_COUNTER;
2041 * For anything other than a cycle counter, try and use
2042 * the events counters
2044 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
2045 if (!test_and_set_bit(idx, cpuc->used_mask))
2049 /* The counters are all in use. */
2054 static struct arm_pmu armv7pmu = {
2055 .handle_irq = armv7pmu_handle_irq,
2056 .enable = armv7pmu_enable_event,
2057 .disable = armv7pmu_disable_event,
2058 .raw_event = armv7pmu_raw_event,
2059 .read_counter = armv7pmu_read_counter,
2060 .write_counter = armv7pmu_write_counter,
2061 .get_event_idx = armv7pmu_get_event_idx,
2062 .start = armv7pmu_start,
2063 .stop = armv7pmu_stop,
2064 .max_period = (1LLU << 32) - 1,
2067 static u32 __init armv7_reset_read_pmnc(void)
2071 /* Initialize & Reset PMNC: C and P bits */
2072 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
2074 /* Read the nb of CNTx counters supported from PMNC */
2075 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
2077 /* Add the CPU cycles counter and return */
2082 init_hw_perf_events(void)
2084 unsigned long cpuid = read_cpuid_id();
2085 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
2086 unsigned long part_number = (cpuid & 0xFFF0);
2088 /* We only support ARM CPUs implemented by ARM at the moment. */
2089 if (0x41 == implementor) {
2090 switch (part_number) {
2091 case 0xB360: /* ARM1136 */
2092 case 0xB560: /* ARM1156 */
2093 case 0xB760: /* ARM1176 */
2095 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2096 sizeof(armv6_perf_cache_map));
2097 perf_max_events = armv6pmu.num_events;
2099 case 0xB020: /* ARM11mpcore */
2100 armpmu = &armv6mpcore_pmu;
2101 memcpy(armpmu_perf_cache_map,
2102 armv6mpcore_perf_cache_map,
2103 sizeof(armv6mpcore_perf_cache_map));
2104 perf_max_events = armv6mpcore_pmu.num_events;
2106 case 0xC080: /* Cortex-A8 */
2107 armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
2108 memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
2109 sizeof(armv7_a8_perf_cache_map));
2110 armv7pmu.event_map = armv7_a8_pmu_event_map;
2113 /* Reset PMNC and read the nb of CNTx counters
2115 armv7pmu.num_events = armv7_reset_read_pmnc();
2116 perf_max_events = armv7pmu.num_events;
2118 case 0xC090: /* Cortex-A9 */
2119 armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
2120 memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
2121 sizeof(armv7_a9_perf_cache_map));
2122 armv7pmu.event_map = armv7_a9_pmu_event_map;
2125 /* Reset PMNC and read the nb of CNTx counters
2127 armv7pmu.num_events = armv7_reset_read_pmnc();
2128 perf_max_events = armv7pmu.num_events;
2131 pr_info("no hardware support available\n");
2132 perf_max_events = -1;
2137 pr_info("enabled with %s PMU driver, %d counters available\n",
2138 armpmu->name, armpmu->num_events);
2142 arch_initcall(init_hw_perf_events);
2145 * Callchain handling code.
2148 callchain_store(struct perf_callchain_entry *entry,
2151 if (entry->nr < PERF_MAX_STACK_DEPTH)
2152 entry->ip[entry->nr++] = ip;
2156 * The registers we're interested in are at the end of the variable
2157 * length saved register structure. The fp points at the end of this
2158 * structure so the address of this struct is:
2159 * (struct frame_tail *)(xxx->fp)-1
2161 * This code has been adapted from the ARM OProfile support.
2164 struct frame_tail *fp;
2167 } __attribute__((packed));
2170 * Get the return address for a single stackframe and return a pointer to the
2173 static struct frame_tail *
2174 user_backtrace(struct frame_tail *tail,
2175 struct perf_callchain_entry *entry)
2177 struct frame_tail buftail;
2179 /* Also check accessibility of one struct frame_tail beyond */
2180 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
2182 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
2185 callchain_store(entry, buftail.lr);
2188 * Frame pointers should strictly progress back up the stack
2189 * (towards higher addresses).
2191 if (tail >= buftail.fp)
2194 return buftail.fp - 1;
2198 perf_callchain_user(struct pt_regs *regs,
2199 struct perf_callchain_entry *entry)
2201 struct frame_tail *tail;
2203 callchain_store(entry, PERF_CONTEXT_USER);
2205 if (!user_mode(regs))
2206 regs = task_pt_regs(current);
2208 tail = (struct frame_tail *)regs->ARM_fp - 1;
2210 while (tail && !((unsigned long)tail & 0x3))
2211 tail = user_backtrace(tail, entry);
2215 * Gets called by walk_stackframe() for every stackframe. This will be called
2216 * whist unwinding the stackframe and is like a subroutine return so we use
2220 callchain_trace(struct stackframe *fr,
2223 struct perf_callchain_entry *entry = data;
2224 callchain_store(entry, fr->pc);
2229 perf_callchain_kernel(struct pt_regs *regs,
2230 struct perf_callchain_entry *entry)
2232 struct stackframe fr;
2234 callchain_store(entry, PERF_CONTEXT_KERNEL);
2235 fr.fp = regs->ARM_fp;
2236 fr.sp = regs->ARM_sp;
2237 fr.lr = regs->ARM_lr;
2238 fr.pc = regs->ARM_pc;
2239 walk_stackframe(&fr, callchain_trace, entry);
2243 perf_do_callchain(struct pt_regs *regs,
2244 struct perf_callchain_entry *entry)
2251 is_user = user_mode(regs);
2253 if (!current || !current->pid)
2256 if (is_user && current->state != TASK_RUNNING)
2260 perf_callchain_kernel(regs, entry);
2263 perf_callchain_user(regs, entry);
2266 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2268 struct perf_callchain_entry *
2269 perf_callchain(struct pt_regs *regs)
2271 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
2274 perf_do_callchain(regs, entry);