1 #ifdef CONFIG_CPU_SUP_AMD
3 static DEFINE_RAW_SPINLOCK(amd_nb_lock);
5 static __initconst u64 amd_hw_cache_event_ids
6 [PERF_COUNT_HW_CACHE_MAX]
7 [PERF_COUNT_HW_CACHE_OP_MAX]
8 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
12 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
13 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
16 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
17 [ C(RESULT_MISS) ] = 0,
19 [ C(OP_PREFETCH) ] = {
20 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
21 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
26 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
27 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
30 [ C(RESULT_ACCESS) ] = -1,
31 [ C(RESULT_MISS) ] = -1,
33 [ C(OP_PREFETCH) ] = {
34 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
35 [ C(RESULT_MISS) ] = 0,
40 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
41 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
44 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
45 [ C(RESULT_MISS) ] = 0,
47 [ C(OP_PREFETCH) ] = {
48 [ C(RESULT_ACCESS) ] = 0,
49 [ C(RESULT_MISS) ] = 0,
54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
55 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
58 [ C(RESULT_ACCESS) ] = 0,
59 [ C(RESULT_MISS) ] = 0,
61 [ C(OP_PREFETCH) ] = {
62 [ C(RESULT_ACCESS) ] = 0,
63 [ C(RESULT_MISS) ] = 0,
68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
69 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
72 [ C(RESULT_ACCESS) ] = -1,
73 [ C(RESULT_MISS) ] = -1,
75 [ C(OP_PREFETCH) ] = {
76 [ C(RESULT_ACCESS) ] = -1,
77 [ C(RESULT_MISS) ] = -1,
82 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
83 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
86 [ C(RESULT_ACCESS) ] = -1,
87 [ C(RESULT_MISS) ] = -1,
89 [ C(OP_PREFETCH) ] = {
90 [ C(RESULT_ACCESS) ] = -1,
91 [ C(RESULT_MISS) ] = -1,
97 * AMD Performance Monitor K7 and later.
99 static const u64 amd_perfmon_event_map[] =
101 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
102 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
103 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
104 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
105 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
106 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
109 static u64 amd_pmu_event_map(int hw_event)
111 return amd_perfmon_event_map[hw_event];
114 static u64 amd_pmu_raw_event(u64 hw_event)
116 #define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
117 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
118 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
119 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
120 #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
122 #define K7_EVNTSEL_MASK \
123 (K7_EVNTSEL_EVENT_MASK | \
124 K7_EVNTSEL_UNIT_MASK | \
125 K7_EVNTSEL_EDGE_MASK | \
126 K7_EVNTSEL_INV_MASK | \
129 return hw_event & K7_EVNTSEL_MASK;
133 * AMD64 events are detected based on their event codes.
135 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
137 return (hwc->config & 0xe0) == 0xe0;
140 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
141 struct perf_event *event)
143 struct hw_perf_event *hwc = &event->hw;
144 struct amd_nb *nb = cpuc->amd_nb;
148 * only care about NB events
150 if (!(nb && amd_is_nb_event(hwc)))
154 * need to scan whole list because event may not have
155 * been assigned during scheduling
157 * no race condition possible because event can only
158 * be removed on one CPU at a time AND PMU is disabled
161 for (i = 0; i < x86_pmu.num_events; i++) {
162 if (nb->owners[i] == event) {
163 cmpxchg(nb->owners+i, event, NULL);
170 * AMD64 NorthBridge events need special treatment because
171 * counter access needs to be synchronized across all cores
172 * of a package. Refer to BKDG section 3.12
174 * NB events are events measuring L3 cache, Hypertransport
175 * traffic. They are identified by an event code >= 0xe00.
176 * They measure events on the NorthBride which is shared
177 * by all cores on a package. NB events are counted on a
178 * shared set of counters. When a NB event is programmed
179 * in a counter, the data actually comes from a shared
180 * counter. Thus, access to those counters needs to be
183 * We implement the synchronization such that no two cores
184 * can be measuring NB events using the same counters. Thus,
185 * we maintain a per-NB allocation table. The available slot
186 * is propagated using the event_constraint structure.
188 * We provide only one choice for each NB event based on
189 * the fact that only NB events have restrictions. Consequently,
190 * if a counter is available, there is a guarantee the NB event
191 * will be assigned to it. If no slot is available, an empty
192 * constraint is returned and scheduling will eventually fail
195 * Note that all cores attached the same NB compete for the same
196 * counters to host NB events, this is why we use atomic ops. Some
197 * multi-chip CPUs may have more than one NB.
199 * Given that resources are allocated (cmpxchg), they must be
200 * eventually freed for others to use. This is accomplished by
201 * calling amd_put_event_constraints().
203 * Non NB events are not impacted by this restriction.
205 static struct event_constraint *
206 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
208 struct hw_perf_event *hwc = &event->hw;
209 struct amd_nb *nb = cpuc->amd_nb;
210 struct perf_event *old = NULL;
211 int max = x86_pmu.num_events;
215 * if not NB event or no NB, then no constraints
217 if (!(nb && amd_is_nb_event(hwc)))
218 return &unconstrained;
221 * detect if already present, if so reuse
223 * cannot merge with actual allocation
224 * because of possible holes
226 * event can already be present yet not assigned (in hwc->idx)
227 * because of successive calls to x86_schedule_events() from
228 * hw_perf_group_sched_in() without hw_perf_enable()
230 for (i = 0; i < max; i++) {
232 * keep track of first free slot
234 if (k == -1 && !nb->owners[i])
237 /* already present, reuse */
238 if (nb->owners[i] == event)
242 * not present, so grab a new slot
243 * starting either at:
245 if (hwc->idx != -1) {
246 /* previous assignment */
248 } else if (k != -1) {
249 /* start from free slot found */
253 * event not found, no slot found in
254 * first pass, try again from the
261 old = cmpxchg(nb->owners+i, NULL, event);
269 return &nb->event_constraints[i];
271 return &emptyconstraint;
274 static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
279 nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
283 memset(nb, 0, sizeof(*nb));
287 * initialize all possible NB constraints
289 for (i = 0; i < x86_pmu.num_events; i++) {
290 __set_bit(i, nb->event_constraints[i].idxmsk);
291 nb->event_constraints[i].weight = 1;
296 static void amd_pmu_cpu_online(int cpu)
298 struct cpu_hw_events *cpu1, *cpu2;
299 struct amd_nb *nb = NULL;
302 if (boot_cpu_data.x86_max_cores < 2)
306 * function may be called too early in the
307 * boot process, in which case nb_id is bogus
309 nb_id = amd_get_nb_id(cpu);
310 if (nb_id == BAD_APICID)
313 cpu1 = &per_cpu(cpu_hw_events, cpu);
316 raw_spin_lock(&amd_nb_lock);
318 for_each_online_cpu(i) {
319 cpu2 = &per_cpu(cpu_hw_events, i);
323 if (nb->nb_id == nb_id)
327 nb = amd_alloc_nb(cpu, nb_id);
329 pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
330 raw_spin_unlock(&amd_nb_lock);
337 raw_spin_unlock(&amd_nb_lock);
340 static void amd_pmu_cpu_offline(int cpu)
342 struct cpu_hw_events *cpuhw;
344 if (boot_cpu_data.x86_max_cores < 2)
347 cpuhw = &per_cpu(cpu_hw_events, cpu);
349 raw_spin_lock(&amd_nb_lock);
352 if (--cpuhw->amd_nb->refcnt == 0)
353 kfree(cpuhw->amd_nb);
355 cpuhw->amd_nb = NULL;
358 raw_spin_unlock(&amd_nb_lock);
361 static __initconst struct x86_pmu amd_pmu = {
363 .handle_irq = x86_pmu_handle_irq,
364 .disable_all = x86_pmu_disable_all,
365 .enable_all = x86_pmu_enable_all,
366 .enable = x86_pmu_enable_event,
367 .disable = x86_pmu_disable_event,
368 .eventsel = MSR_K7_EVNTSEL0,
369 .perfctr = MSR_K7_PERFCTR0,
370 .event_map = amd_pmu_event_map,
371 .raw_event = amd_pmu_raw_event,
372 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
375 .event_mask = (1ULL << 48) - 1,
377 /* use highest bit to detect overflow */
378 .max_period = (1ULL << 47) - 1,
379 .get_event_constraints = amd_get_event_constraints,
380 .put_event_constraints = amd_put_event_constraints,
382 .cpu_prepare = amd_pmu_cpu_online,
383 .cpu_dead = amd_pmu_cpu_offline,
386 static __init int amd_pmu_init(void)
388 /* Performance-monitoring supported from K7 and later: */
389 if (boot_cpu_data.x86 < 6)
394 /* Events are common for all AMDs */
395 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
396 sizeof(hw_cache_event_ids));
401 #else /* CONFIG_CPU_SUP_AMD */
403 static int amd_pmu_init(void)