From 90f91ae1598200e5f650c5ddb1d7165398cf7f5a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 15 Feb 2012 18:48:22 +0000 Subject: [PATCH] powerpc/perf: power_pmu_start restores incorrect values, breaking frequency events commit 9a45a9407c69d068500923480884661e2b9cc421 upstream. perf on POWER stopped working after commit e050e3f0a71b (perf: Fix broken interrupt rate throttling). That patch exposed a bug in the POWER perf_events code. Since the PMCs count upwards and take an exception when the top bit is set, we want to write 0x80000000 - left in power_pmu_start. We were instead programming in left which effectively disables the counter until we eventually hit 0x80000000. This could take seconds or longer. With the patch applied I get the expected number of samples: SAMPLE events: 9948 Signed-off-by: Anton Blanchard Acked-by: Paul Mackerras Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/perf_event.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 10a140f82cb8..64483fde95c6 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; s64 left; + unsigned long val; if (!event->hw.idx || !event->hw.sample_period) return; @@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags) event->hw.state = 0; left = local64_read(&event->hw.period_left); - write_pmc(event->hw.idx, left); + + val = 0; + if (left < 0x80000000L) + val = 0x80000000L - left; + + write_pmc(event->hw.idx, val); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); -- 2.39.2