Since constraints are specified on the event number, not number
and unit mask shorten the constraint masks so that we'll
actually match something.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <
20100127221121.
967610372@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
INTEL_ARCH_INV_MASK| \
INTEL_ARCH_EDGE_MASK|\
INTEL_ARCH_UNIT_MASK|\
INTEL_ARCH_INV_MASK| \
INTEL_ARCH_EDGE_MASK|\
INTEL_ARCH_UNIT_MASK|\
+ INTEL_ARCH_EVTSEL_MASK)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
.weight = HWEIGHT64((u64)(n)), \
}
.weight = HWEIGHT64((u64)(n)), \
}
-#define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
-#define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
+#define INTEL_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
-#define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0)
+#define FIXED_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
-#define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++)
+#define EVENT_CONSTRAINT_END \
+ EVENT_CONSTRAINT(0, 0, 0)
+
+#define for_each_event_constraint(e, c) \
+ for ((e) = (c); (e)->cmask; (e)++)
/*
* struct x86_pmu - generic x86 pmu
/*
* struct x86_pmu - generic x86 pmu