MSR reads and writes are expensive. This patch adds checks to avoid
its usage where possible.
[ Impact: micro-optimization on AMD CPUs ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <
1241002046-8832-5-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
for (idx = 0; idx < nr_counters_generic; idx++) {
u64 val;
for (idx = 0; idx < nr_counters_generic; idx++) {
u64 val;
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+ continue;
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
return;
for (idx = 0; idx < nr_counters_generic; idx++) {
return;
for (idx = 0; idx < nr_counters_generic; idx++) {
- if (test_bit(idx, cpuc->active_mask)) {
- u64 val;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+ continue;
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);