perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_restore_all()
authorRobert Richter <robert.richter@amd.com>
Wed, 29 Apr 2009 10:47:01 +0000 (12:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 29 Apr 2009 12:51:02 +0000 (14:51 +0200)
MSR reads and writes are expensive. This patch adds checks to avoid
its usage where possible.

[ Impact: micro-optimization on AMD CPUs ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index d6d6529..75a0903 100644 (file)
@@ -334,11 +334,13 @@ static u64 pmc_amd_save_disable_all(void)
        for (idx = 0; idx < nr_counters_generic; idx++) {
                u64 val;
 
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
                rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
-               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
-                       val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
-                       wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
-               }
+               if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+                       continue;
+               val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+               wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
        }
 
        return enabled;
@@ -372,13 +374,15 @@ static void pmc_amd_restore_all(u64 ctrl)
                return;
 
        for (idx = 0; idx < nr_counters_generic; idx++) {
-               if (test_bit(idx, cpuc->active_mask)) {
-                       u64 val;
+               u64 val;
 
-                       rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
-                       val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-                       wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
-               }
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+               rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+                       continue;
+               val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+               wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
        }
 }