2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Copyright (C) 2011 Cavium Networks, Inc.
6 * Author: Deng-Cheng Zhu
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
30 #define MIPS_MAX_HWEVENTS 4
32 struct cpu_hw_events {
33 /* Array of events on this cpu. */
34 struct perf_event *events[MIPS_MAX_HWEVENTS];
37 * Set the bit (indexed by the counter number) when the counter
38 * is used for an event.
40 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
43 * Software copy of the control register for each performance counter.
44 * MIPS CPUs vary in performance counters. They use this differently,
45 * and even may not use it.
47 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
49 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
53 /* The description of MIPS performance events. */
54 struct mips_perf_event {
55 unsigned int event_id;
57 * MIPS performance counters are indexed starting from 0.
58 * CNTR_EVEN indicates the indexes of the counters to be used are
61 unsigned int cntr_mask;
62 #define CNTR_EVEN 0x55555555
63 #define CNTR_ODD 0xaaaaaaaa
64 #define CNTR_ALL 0xffffffff
65 #ifdef CONFIG_MIPS_MT_SMP
78 static struct mips_perf_event raw_event;
79 static DEFINE_MUTEX(raw_event_mutex);
81 #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
82 #define C(x) PERF_COUNT_HW_CACHE_##x
90 u64 (*read_counter)(unsigned int idx);
91 void (*write_counter)(unsigned int idx, u64 val);
92 const struct mips_perf_event *(*map_raw_event)(u64 config);
93 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
94 const struct mips_perf_event (*cache_event_map)
95 [PERF_COUNT_HW_CACHE_MAX]
96 [PERF_COUNT_HW_CACHE_OP_MAX]
97 [PERF_COUNT_HW_CACHE_RESULT_MAX];
98 unsigned int num_counters;
101 static struct mips_pmu mipspmu;
103 #define M_CONFIG1_PC (1 << 4)
105 #define M_PERFCTL_EXL (1 << 0)
106 #define M_PERFCTL_KERNEL (1 << 1)
107 #define M_PERFCTL_SUPERVISOR (1 << 2)
108 #define M_PERFCTL_USER (1 << 3)
109 #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
110 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
111 #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
112 #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
113 #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
114 #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
115 #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
116 #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
117 #define M_PERFCTL_WIDE (1 << 30)
118 #define M_PERFCTL_MORE (1 << 31)
120 #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
123 M_PERFCTL_SUPERVISOR | \
124 M_PERFCTL_INTERRUPT_ENABLE)
126 #ifdef CONFIG_MIPS_MT_SMP
127 #define M_PERFCTL_CONFIG_MASK 0x3fff801f
129 #define M_PERFCTL_CONFIG_MASK 0x1f
131 #define M_PERFCTL_EVENT_MASK 0xfe0
134 #ifdef CONFIG_MIPS_MT_SMP
135 static int cpu_has_mipsmt_pertccounters;
137 static DEFINE_RWLOCK(pmuint_rwlock);
140 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
141 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
143 #if defined(CONFIG_HW_PERF_EVENTS)
144 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
145 0 : smp_processor_id())
147 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
148 0 : cpu_data[smp_processor_id()].vpe_id)
151 /* Copied from op_model_mipsxx.c */
152 static unsigned int vpe_shift(void)
154 if (num_possible_cpus() > 1)
160 static unsigned int counters_total_to_per_cpu(unsigned int counters)
162 return counters >> vpe_shift();
165 #else /* !CONFIG_MIPS_MT_SMP */
168 #endif /* CONFIG_MIPS_MT_SMP */
170 static void resume_local_counters(void);
171 static void pause_local_counters(void);
172 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
173 static int mipsxx_pmu_handle_shared_irq(void);
175 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
182 static u64 mipsxx_pmu_read_counter(unsigned int idx)
184 idx = mipsxx_pmu_swizzle_perf_idx(idx);
189 * The counters are unsigned, we must cast to truncate
192 return (u32)read_c0_perfcntr0();
194 return (u32)read_c0_perfcntr1();
196 return (u32)read_c0_perfcntr2();
198 return (u32)read_c0_perfcntr3();
200 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
205 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
207 idx = mipsxx_pmu_swizzle_perf_idx(idx);
211 return read_c0_perfcntr0_64();
213 return read_c0_perfcntr1_64();
215 return read_c0_perfcntr2_64();
217 return read_c0_perfcntr3_64();
219 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
224 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
226 idx = mipsxx_pmu_swizzle_perf_idx(idx);
230 write_c0_perfcntr0(val);
233 write_c0_perfcntr1(val);
236 write_c0_perfcntr2(val);
239 write_c0_perfcntr3(val);
244 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
246 idx = mipsxx_pmu_swizzle_perf_idx(idx);
250 write_c0_perfcntr0_64(val);
253 write_c0_perfcntr1_64(val);
256 write_c0_perfcntr2_64(val);
259 write_c0_perfcntr3_64(val);
264 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
266 idx = mipsxx_pmu_swizzle_perf_idx(idx);
270 return read_c0_perfctrl0();
272 return read_c0_perfctrl1();
274 return read_c0_perfctrl2();
276 return read_c0_perfctrl3();
278 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
283 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
285 idx = mipsxx_pmu_swizzle_perf_idx(idx);
289 write_c0_perfctrl0(val);
292 write_c0_perfctrl1(val);
295 write_c0_perfctrl2(val);
298 write_c0_perfctrl3(val);
303 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
304 struct hw_perf_event *hwc)
309 * We only need to care the counter mask. The range has been
310 * checked definitely.
312 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
314 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
316 * Note that some MIPS perf events can be counted by both
317 * even and odd counters, wheresas many other are only by
318 * even _or_ odd counters. This introduces an issue that
319 * when the former kind of event takes the counter the
320 * latter kind of event wants to use, then the "counter
321 * allocation" for the latter event will fail. In fact if
322 * they can be dynamically swapped, they both feel happy.
323 * But here we leave this issue alone for now.
325 if (test_bit(i, &cntr_mask) &&
326 !test_and_set_bit(i, cpuc->used_mask))
333 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
335 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
337 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
339 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
340 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
341 /* Make sure interrupt enabled. */
342 M_PERFCTL_INTERRUPT_ENABLE;
344 * We do not actually let the counter run. Leave it until start().
348 static void mipsxx_pmu_disable_event(int idx)
350 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
353 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
355 local_irq_save(flags);
356 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
357 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
358 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
359 local_irq_restore(flags);
362 static int mipspmu_event_set_period(struct perf_event *event,
363 struct hw_perf_event *hwc,
366 u64 left = local64_read(&hwc->period_left);
367 u64 period = hwc->sample_period;
370 if (unlikely((left + period) & (1ULL << 63))) {
371 /* left underflowed by more than period. */
373 local64_set(&hwc->period_left, left);
374 hwc->last_period = period;
376 } else if (unlikely((left + period) <= period)) {
377 /* left underflowed by less than period. */
379 local64_set(&hwc->period_left, left);
380 hwc->last_period = period;
384 if (left > mipspmu.max_period) {
385 left = mipspmu.max_period;
386 local64_set(&hwc->period_left, left);
389 local64_set(&hwc->prev_count, mipspmu.overflow - left);
391 mipspmu.write_counter(idx, mipspmu.overflow - left);
393 perf_event_update_userpage(event);
398 static void mipspmu_event_update(struct perf_event *event,
399 struct hw_perf_event *hwc,
402 u64 prev_raw_count, new_raw_count;
406 prev_raw_count = local64_read(&hwc->prev_count);
407 new_raw_count = mipspmu.read_counter(idx);
409 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
410 new_raw_count) != prev_raw_count)
413 delta = new_raw_count - prev_raw_count;
415 local64_add(delta, &event->count);
416 local64_sub(delta, &hwc->period_left);
419 static void mipspmu_start(struct perf_event *event, int flags)
421 struct hw_perf_event *hwc = &event->hw;
423 if (flags & PERF_EF_RELOAD)
424 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
428 /* Set the period for the event. */
429 mipspmu_event_set_period(event, hwc, hwc->idx);
431 /* Enable the event. */
432 mipsxx_pmu_enable_event(hwc, hwc->idx);
435 static void mipspmu_stop(struct perf_event *event, int flags)
437 struct hw_perf_event *hwc = &event->hw;
439 if (!(hwc->state & PERF_HES_STOPPED)) {
440 /* We are working on a local event. */
441 mipsxx_pmu_disable_event(hwc->idx);
443 mipspmu_event_update(event, hwc, hwc->idx);
444 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
448 static int mipspmu_add(struct perf_event *event, int flags)
450 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
451 struct hw_perf_event *hwc = &event->hw;
455 perf_pmu_disable(event->pmu);
457 /* To look for a free counter for this event. */
458 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
465 * If there is an event in the counter we are going to use then
466 * make sure it is disabled.
469 mipsxx_pmu_disable_event(idx);
470 cpuc->events[idx] = event;
472 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
473 if (flags & PERF_EF_START)
474 mipspmu_start(event, PERF_EF_RELOAD);
476 /* Propagate our changes to the userspace mapping. */
477 perf_event_update_userpage(event);
480 perf_pmu_enable(event->pmu);
484 static void mipspmu_del(struct perf_event *event, int flags)
486 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
487 struct hw_perf_event *hwc = &event->hw;
490 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
492 mipspmu_stop(event, PERF_EF_UPDATE);
493 cpuc->events[idx] = NULL;
494 clear_bit(idx, cpuc->used_mask);
496 perf_event_update_userpage(event);
499 static void mipspmu_read(struct perf_event *event)
501 struct hw_perf_event *hwc = &event->hw;
503 /* Don't read disabled counters! */
507 mipspmu_event_update(event, hwc, hwc->idx);
510 static void mipspmu_enable(struct pmu *pmu)
512 #ifdef CONFIG_MIPS_MT_SMP
513 write_unlock(&pmuint_rwlock);
515 resume_local_counters();
519 * MIPS performance counters can be per-TC. The control registers can
520 * not be directly accessed accross CPUs. Hence if we want to do global
521 * control, we need cross CPU calls. on_each_cpu() can help us, but we
522 * can not make sure this function is called with interrupts enabled. So
523 * here we pause local counters and then grab a rwlock and leave the
524 * counters on other CPUs alone. If any counter interrupt raises while
525 * we own the write lock, simply pause local counters on that CPU and
526 * spin in the handler. Also we know we won't be switched to another
527 * CPU after pausing local counters and before grabbing the lock.
529 static void mipspmu_disable(struct pmu *pmu)
531 pause_local_counters();
532 #ifdef CONFIG_MIPS_MT_SMP
533 write_lock(&pmuint_rwlock);
537 static atomic_t active_events = ATOMIC_INIT(0);
538 static DEFINE_MUTEX(pmu_reserve_mutex);
539 static int (*save_perf_irq)(void);
541 static int mipspmu_get_irq(void)
545 if (mipspmu.irq >= 0) {
546 /* Request my own irq handler. */
547 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
548 IRQF_PERCPU | IRQF_NOBALANCING,
549 "mips_perf_pmu", NULL);
551 pr_warning("Unable to request IRQ%d for MIPS "
552 "performance counters!\n", mipspmu.irq);
554 } else if (cp0_perfcount_irq < 0) {
556 * We are sharing the irq number with the timer interrupt.
558 save_perf_irq = perf_irq;
559 perf_irq = mipsxx_pmu_handle_shared_irq;
562 pr_warning("The platform hasn't properly defined its "
563 "interrupt controller.\n");
570 static void mipspmu_free_irq(void)
572 if (mipspmu.irq >= 0)
573 free_irq(mipspmu.irq, NULL);
574 else if (cp0_perfcount_irq < 0)
575 perf_irq = save_perf_irq;
579 * mipsxx/rm9000/loongson2 have different performance counters, they have
580 * specific low-level init routines.
582 static void reset_counters(void *arg);
583 static int __hw_perf_event_init(struct perf_event *event);
585 static void hw_perf_event_destroy(struct perf_event *event)
587 if (atomic_dec_and_mutex_lock(&active_events,
588 &pmu_reserve_mutex)) {
590 * We must not call the destroy function with interrupts
593 on_each_cpu(reset_counters,
594 (void *)(long)mipspmu.num_counters, 1);
596 mutex_unlock(&pmu_reserve_mutex);
600 static int mipspmu_event_init(struct perf_event *event)
604 switch (event->attr.type) {
606 case PERF_TYPE_HARDWARE:
607 case PERF_TYPE_HW_CACHE:
614 if (event->cpu >= nr_cpumask_bits ||
615 (event->cpu >= 0 && !cpu_online(event->cpu)))
618 if (!atomic_inc_not_zero(&active_events)) {
619 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
620 atomic_dec(&active_events);
624 mutex_lock(&pmu_reserve_mutex);
625 if (atomic_read(&active_events) == 0)
626 err = mipspmu_get_irq();
629 atomic_inc(&active_events);
630 mutex_unlock(&pmu_reserve_mutex);
636 err = __hw_perf_event_init(event);
638 hw_perf_event_destroy(event);
643 static struct pmu pmu = {
644 .pmu_enable = mipspmu_enable,
645 .pmu_disable = mipspmu_disable,
646 .event_init = mipspmu_event_init,
649 .start = mipspmu_start,
650 .stop = mipspmu_stop,
651 .read = mipspmu_read,
654 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
657 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
660 #ifdef CONFIG_MIPS_MT_SMP
661 return ((unsigned int)pev->range << 24) |
662 (pev->cntr_mask & 0xffff00) |
663 (pev->event_id & 0xff);
665 return (pev->cntr_mask & 0xffff00) |
666 (pev->event_id & 0xff);
670 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
672 const struct mips_perf_event *pev;
674 pev = ((*mipspmu.general_event_map)[idx].event_id ==
675 UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
676 &(*mipspmu.general_event_map)[idx]);
681 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
683 unsigned int cache_type, cache_op, cache_result;
684 const struct mips_perf_event *pev;
686 cache_type = (config >> 0) & 0xff;
687 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
688 return ERR_PTR(-EINVAL);
690 cache_op = (config >> 8) & 0xff;
691 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
692 return ERR_PTR(-EINVAL);
694 cache_result = (config >> 16) & 0xff;
695 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
696 return ERR_PTR(-EINVAL);
698 pev = &((*mipspmu.cache_event_map)
703 if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
704 return ERR_PTR(-EOPNOTSUPP);
710 static int validate_event(struct cpu_hw_events *cpuc,
711 struct perf_event *event)
713 struct hw_perf_event fake_hwc = event->hw;
715 /* Allow mixed event group. So return 1 to pass validation. */
716 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
719 return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0;
722 static int validate_group(struct perf_event *event)
724 struct perf_event *sibling, *leader = event->group_leader;
725 struct cpu_hw_events fake_cpuc;
727 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
729 if (!validate_event(&fake_cpuc, leader))
732 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
733 if (!validate_event(&fake_cpuc, sibling))
737 if (!validate_event(&fake_cpuc, event))
743 /* This is needed by specific irq handlers in perf_event_*.c */
744 static void handle_associated_event(struct cpu_hw_events *cpuc,
745 int idx, struct perf_sample_data *data,
746 struct pt_regs *regs)
748 struct perf_event *event = cpuc->events[idx];
749 struct hw_perf_event *hwc = &event->hw;
751 mipspmu_event_update(event, hwc, idx);
752 data->period = event->hw.last_period;
753 if (!mipspmu_event_set_period(event, hwc, idx))
756 if (perf_event_overflow(event, data, regs))
757 mipsxx_pmu_disable_event(idx);
761 static int __n_counters(void)
763 if (!(read_c0_config1() & M_CONFIG1_PC))
765 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
767 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
769 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
775 static int n_counters(void)
779 switch (current_cpu_type()) {
790 counters = __n_counters();
796 static void reset_counters(void *arg)
798 int counters = (int)(long)arg;
801 mipsxx_pmu_write_control(3, 0);
802 mipspmu.write_counter(3, 0);
804 mipsxx_pmu_write_control(2, 0);
805 mipspmu.write_counter(2, 0);
807 mipsxx_pmu_write_control(1, 0);
808 mipspmu.write_counter(1, 0);
810 mipsxx_pmu_write_control(0, 0);
811 mipspmu.write_counter(0, 0);
815 /* 24K/34K/1004K cores can share the same event map. */
816 static const struct mips_perf_event mipsxxcore_event_map
817 [PERF_COUNT_HW_MAX] = {
818 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
819 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
820 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
821 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
822 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
823 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
824 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
827 /* 74K core has different branch event code. */
828 static const struct mips_perf_event mipsxx74Kcore_event_map
829 [PERF_COUNT_HW_MAX] = {
830 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
831 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
832 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
833 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
834 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
835 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
836 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
839 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
840 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
841 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
842 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
843 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
844 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
845 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
846 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
849 /* 24K/34K/1004K cores can share the same cache event map. */
850 static const struct mips_perf_event mipsxxcore_cache_map
851 [PERF_COUNT_HW_CACHE_MAX]
852 [PERF_COUNT_HW_CACHE_OP_MAX]
853 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
856 * Like some other architectures (e.g. ARM), the performance
857 * counters don't differentiate between read and write
858 * accesses/misses, so this isn't strictly correct, but it's the
859 * best we can do. Writes and reads get combined.
862 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
863 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
866 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
867 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
870 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
871 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
876 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
877 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
880 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
881 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
884 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
886 * Note that MIPS has only "hit" events countable for
887 * the prefetch operation.
889 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
894 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
895 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
898 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
899 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
902 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
903 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
908 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
909 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
912 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
913 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
916 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
917 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
922 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
923 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
926 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
927 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
930 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
931 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
935 /* Using the same code for *HW_BRANCH* */
937 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
938 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
941 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
942 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
945 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
946 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
951 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
952 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
955 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
956 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
959 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
960 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
965 /* 74K core has completely different cache event map. */
966 static const struct mips_perf_event mipsxx74Kcore_cache_map
967 [PERF_COUNT_HW_CACHE_MAX]
968 [PERF_COUNT_HW_CACHE_OP_MAX]
969 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
972 * Like some other architectures (e.g. ARM), the performance
973 * counters don't differentiate between read and write
974 * accesses/misses, so this isn't strictly correct, but it's the
975 * best we can do. Writes and reads get combined.
978 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
979 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
982 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
983 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
986 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
987 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
992 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
993 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
996 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
997 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
1000 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
1002 * Note that MIPS has only "hit" events countable for
1003 * the prefetch operation.
1005 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1010 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1011 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
1014 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1015 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
1017 [C(OP_PREFETCH)] = {
1018 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1019 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1023 /* 74K core does not have specific DTLB events. */
1025 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1026 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1029 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1030 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1032 [C(OP_PREFETCH)] = {
1033 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1034 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1039 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1040 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1043 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1044 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1046 [C(OP_PREFETCH)] = {
1047 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1048 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1052 /* Using the same code for *HW_BRANCH* */
1054 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1055 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1058 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1059 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1061 [C(OP_PREFETCH)] = {
1062 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1063 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1068 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1069 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1072 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1073 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1075 [C(OP_PREFETCH)] = {
1076 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1077 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1083 static const struct mips_perf_event octeon_cache_map
1084 [PERF_COUNT_HW_CACHE_MAX]
1085 [PERF_COUNT_HW_CACHE_OP_MAX]
1086 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1089 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1090 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1093 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1094 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1096 [C(OP_PREFETCH)] = {
1097 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1098 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1103 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1104 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1107 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1108 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1110 [C(OP_PREFETCH)] = {
1111 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1112 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1117 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1118 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1121 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1122 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1124 [C(OP_PREFETCH)] = {
1125 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1126 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1131 * Only general DTLB misses are counted use the same event for
1135 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1136 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1139 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1140 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1142 [C(OP_PREFETCH)] = {
1143 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1144 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1149 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1150 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1153 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1154 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1156 [C(OP_PREFETCH)] = {
1157 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1158 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1162 /* Using the same code for *HW_BRANCH* */
1164 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1165 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1168 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1169 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1171 [C(OP_PREFETCH)] = {
1172 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1173 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1178 #ifdef CONFIG_MIPS_MT_SMP
1179 static void check_and_calc_range(struct perf_event *event,
1180 const struct mips_perf_event *pev)
1182 struct hw_perf_event *hwc = &event->hw;
1184 if (event->cpu >= 0) {
1185 if (pev->range > V) {
1187 * The user selected an event that is processor
1188 * wide, while expecting it to be VPE wide.
1190 hwc->config_base |= M_TC_EN_ALL;
1193 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1196 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1197 hwc->config_base |= M_TC_EN_VPE;
1200 hwc->config_base |= M_TC_EN_ALL;
1203 static void check_and_calc_range(struct perf_event *event,
1204 const struct mips_perf_event *pev)
1209 static int __hw_perf_event_init(struct perf_event *event)
1211 struct perf_event_attr *attr = &event->attr;
1212 struct hw_perf_event *hwc = &event->hw;
1213 const struct mips_perf_event *pev;
1216 /* Returning MIPS event descriptor for generic perf event. */
1217 if (PERF_TYPE_HARDWARE == event->attr.type) {
1218 if (event->attr.config >= PERF_COUNT_HW_MAX)
1220 pev = mipspmu_map_general_event(event->attr.config);
1221 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1222 pev = mipspmu_map_cache_event(event->attr.config);
1223 } else if (PERF_TYPE_RAW == event->attr.type) {
1224 /* We are working on the global raw event. */
1225 mutex_lock(&raw_event_mutex);
1226 pev = mipspmu.map_raw_event(event->attr.config);
1228 /* The event type is not (yet) supported. */
1233 if (PERF_TYPE_RAW == event->attr.type)
1234 mutex_unlock(&raw_event_mutex);
1235 return PTR_ERR(pev);
1239 * We allow max flexibility on how each individual counter shared
1240 * by the single CPU operates (the mode exclusion and the range).
1242 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1244 /* Calculate range bits and validate it. */
1245 if (num_possible_cpus() > 1)
1246 check_and_calc_range(event, pev);
1248 hwc->event_base = mipspmu_perf_event_encode(pev);
1249 if (PERF_TYPE_RAW == event->attr.type)
1250 mutex_unlock(&raw_event_mutex);
1252 if (!attr->exclude_user)
1253 hwc->config_base |= M_PERFCTL_USER;
1254 if (!attr->exclude_kernel) {
1255 hwc->config_base |= M_PERFCTL_KERNEL;
1256 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1257 hwc->config_base |= M_PERFCTL_EXL;
1259 if (!attr->exclude_hv)
1260 hwc->config_base |= M_PERFCTL_SUPERVISOR;
1262 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1264 * The event can belong to another cpu. We do not assign a local
1265 * counter for it for now.
1270 if (!hwc->sample_period) {
1271 hwc->sample_period = mipspmu.max_period;
1272 hwc->last_period = hwc->sample_period;
1273 local64_set(&hwc->period_left, hwc->sample_period);
1277 if (event->group_leader != event) {
1278 err = validate_group(event);
1283 event->destroy = hw_perf_event_destroy;
1287 static void pause_local_counters(void)
1289 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1290 int ctr = mipspmu.num_counters;
1291 unsigned long flags;
1293 local_irq_save(flags);
1296 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1297 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1298 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1300 local_irq_restore(flags);
1303 static void resume_local_counters(void)
1305 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1306 int ctr = mipspmu.num_counters;
1310 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1314 static int mipsxx_pmu_handle_shared_irq(void)
1316 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1317 struct perf_sample_data data;
1318 unsigned int counters = mipspmu.num_counters;
1320 int handled = IRQ_NONE;
1321 struct pt_regs *regs;
1323 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
1326 * First we pause the local counters, so that when we are locked
1327 * here, the counters are all paused. When it gets locked due to
1328 * perf_disable(), the timer interrupt handler will be delayed.
1330 * See also mipsxx_pmu_start().
1332 pause_local_counters();
1333 #ifdef CONFIG_MIPS_MT_SMP
1334 read_lock(&pmuint_rwlock);
1337 regs = get_irq_regs();
1339 perf_sample_data_init(&data, 0);
1342 #define HANDLE_COUNTER(n) \
1344 if (test_bit(n, cpuc->used_mask)) { \
1345 counter = mipspmu.read_counter(n); \
1346 if (counter & mipspmu.overflow) { \
1347 handle_associated_event(cpuc, n, &data, regs); \
1348 handled = IRQ_HANDLED; \
1358 * Do all the work for the pending perf events. We can do this
1359 * in here because the performance counter interrupt is a regular
1360 * interrupt, not NMI.
1362 if (handled == IRQ_HANDLED)
1365 #ifdef CONFIG_MIPS_MT_SMP
1366 read_unlock(&pmuint_rwlock);
1368 resume_local_counters();
1372 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1374 return mipsxx_pmu_handle_shared_irq();
1378 #define IS_UNSUPPORTED_24K_EVENT(r, b) \
1379 ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
1380 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
1381 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
1382 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
1383 ((b) >= 68 && (b) <= 127))
1384 #define IS_BOTH_COUNTERS_24K_EVENT(b) \
1385 ((b) == 0 || (b) == 1 || (b) == 11)
1388 #define IS_UNSUPPORTED_34K_EVENT(r, b) \
1389 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
1390 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
1391 ((b) >= 68 && (b) <= 127))
1392 #define IS_BOTH_COUNTERS_34K_EVENT(b) \
1393 ((b) == 0 || (b) == 1 || (b) == 11)
1394 #ifdef CONFIG_MIPS_MT_SMP
1395 #define IS_RANGE_P_34K_EVENT(r, b) \
1396 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1397 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1398 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1399 ((b) >= 64 && (b) <= 67))
1400 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1404 #define IS_UNSUPPORTED_74K_EVENT(r, b) \
1405 ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
1406 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
1407 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
1408 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
1409 (b) == 61 || (r) == 62 || (r) == 191 || \
1410 ((b) >= 64 && (b) <= 127))
1411 #define IS_BOTH_COUNTERS_74K_EVENT(b) \
1412 ((b) == 0 || (b) == 1)
1415 #define IS_UNSUPPORTED_1004K_EVENT(r, b) \
1416 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
1417 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
1418 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1419 ((b) == 0 || (b) == 1 || (b) == 11)
1420 #ifdef CONFIG_MIPS_MT_SMP
1421 #define IS_RANGE_P_1004K_EVENT(r, b) \
1422 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1423 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1424 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1425 (r) == 188 || (b) == 61 || (b) == 62 || \
1426 ((b) >= 64 && (b) <= 67))
1427 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1431 * User can use 0-255 raw events, where 0-127 for the events of even
1432 * counters, and 128-255 for odd counters. Note that bit 7 is used to
1433 * indicate the parity. So, for example, when user wants to take the
1434 * Event Num of 15 for odd counters (by referring to the user manual),
1435 * then 128 needs to be added to 15 as the input for the event config,
1436 * i.e., 143 (0x8F) to be used.
1438 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1440 unsigned int raw_id = config & 0xff;
1441 unsigned int base_id = raw_id & 0x7f;
1443 switch (current_cpu_type()) {
1445 if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
1446 return ERR_PTR(-EOPNOTSUPP);
1447 raw_event.event_id = base_id;
1448 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1449 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1451 raw_event.cntr_mask =
1452 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1453 #ifdef CONFIG_MIPS_MT_SMP
1455 * This is actually doing nothing. Non-multithreading
1456 * CPUs will not check and calculate the range.
1458 raw_event.range = P;
1462 if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
1463 return ERR_PTR(-EOPNOTSUPP);
1464 raw_event.event_id = base_id;
1465 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1466 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1468 raw_event.cntr_mask =
1469 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1470 #ifdef CONFIG_MIPS_MT_SMP
1471 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1472 raw_event.range = P;
1473 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1474 raw_event.range = V;
1476 raw_event.range = T;
1480 if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
1481 return ERR_PTR(-EOPNOTSUPP);
1482 raw_event.event_id = base_id;
1483 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1484 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1486 raw_event.cntr_mask =
1487 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1488 #ifdef CONFIG_MIPS_MT_SMP
1489 raw_event.range = P;
1493 if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
1494 return ERR_PTR(-EOPNOTSUPP);
1495 raw_event.event_id = base_id;
1496 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1497 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1499 raw_event.cntr_mask =
1500 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1501 #ifdef CONFIG_MIPS_MT_SMP
1502 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1503 raw_event.range = P;
1504 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1505 raw_event.range = V;
1507 raw_event.range = T;
1515 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1517 unsigned int raw_id = config & 0xff;
1518 unsigned int base_id = raw_id & 0x7f;
1521 raw_event.cntr_mask = CNTR_ALL;
1522 raw_event.event_id = base_id;
1524 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1526 return ERR_PTR(-EOPNOTSUPP);
1529 return ERR_PTR(-EOPNOTSUPP);
1540 return ERR_PTR(-EOPNOTSUPP);
1549 init_hw_perf_events(void)
1554 pr_info("Performance counters: ");
1556 counters = n_counters();
1557 if (counters == 0) {
1558 pr_cont("No available PMU.\n");
1562 #ifdef CONFIG_MIPS_MT_SMP
1563 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1564 if (!cpu_has_mipsmt_pertccounters)
1565 counters = counters_total_to_per_cpu(counters);
1568 #ifdef MSC01E_INT_BASE
1571 * Using platform specific interrupt controller defines.
1573 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1576 if (cp0_perfcount_irq >= 0)
1577 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1580 #ifdef MSC01E_INT_BASE
1584 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1586 switch (current_cpu_type()) {
1588 mipspmu.name = "mips/24K";
1589 mipspmu.general_event_map = &mipsxxcore_event_map;
1590 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1593 mipspmu.name = "mips/34K";
1594 mipspmu.general_event_map = &mipsxxcore_event_map;
1595 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1598 mipspmu.name = "mips/74K";
1599 mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1600 mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
1603 mipspmu.name = "mips/1004K";
1604 mipspmu.general_event_map = &mipsxxcore_event_map;
1605 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1607 case CPU_CAVIUM_OCTEON:
1608 case CPU_CAVIUM_OCTEON_PLUS:
1609 case CPU_CAVIUM_OCTEON2:
1610 mipspmu.name = "octeon";
1611 mipspmu.general_event_map = &octeon_event_map;
1612 mipspmu.cache_event_map = &octeon_cache_map;
1613 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1616 pr_cont("Either hardware does not support performance "
1617 "counters, or not yet implemented.\n");
1621 mipspmu.num_counters = counters;
1624 if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1625 mipspmu.max_period = (1ULL << 63) - 1;
1626 mipspmu.valid_count = (1ULL << 63) - 1;
1627 mipspmu.overflow = 1ULL << 63;
1628 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1629 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1632 mipspmu.max_period = (1ULL << 31) - 1;
1633 mipspmu.valid_count = (1ULL << 31) - 1;
1634 mipspmu.overflow = 1ULL << 31;
1635 mipspmu.read_counter = mipsxx_pmu_read_counter;
1636 mipspmu.write_counter = mipsxx_pmu_write_counter;
1640 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1642 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1643 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1644 irq < 0 ? " (share with timer interrupt)" : "");
1646 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1650 early_initcall(init_hw_perf_events);