1 #include <linux/perf_event.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include "perf_event.h"
8 static __initconst const u64 amd_hw_cache_event_ids
9 [PERF_COUNT_HW_CACHE_MAX]
10 [PERF_COUNT_HW_CACHE_OP_MAX]
11 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
15 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
16 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
19 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
20 [ C(RESULT_MISS) ] = 0,
22 [ C(OP_PREFETCH) ] = {
23 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
24 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
29 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
30 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
33 [ C(RESULT_ACCESS) ] = -1,
34 [ C(RESULT_MISS) ] = -1,
36 [ C(OP_PREFETCH) ] = {
37 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
38 [ C(RESULT_MISS) ] = 0,
43 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
44 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
47 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
48 [ C(RESULT_MISS) ] = 0,
50 [ C(OP_PREFETCH) ] = {
51 [ C(RESULT_ACCESS) ] = 0,
52 [ C(RESULT_MISS) ] = 0,
57 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
58 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
61 [ C(RESULT_ACCESS) ] = 0,
62 [ C(RESULT_MISS) ] = 0,
64 [ C(OP_PREFETCH) ] = {
65 [ C(RESULT_ACCESS) ] = 0,
66 [ C(RESULT_MISS) ] = 0,
71 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
72 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
75 [ C(RESULT_ACCESS) ] = -1,
76 [ C(RESULT_MISS) ] = -1,
78 [ C(OP_PREFETCH) ] = {
79 [ C(RESULT_ACCESS) ] = -1,
80 [ C(RESULT_MISS) ] = -1,
85 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
86 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
89 [ C(RESULT_ACCESS) ] = -1,
90 [ C(RESULT_MISS) ] = -1,
92 [ C(OP_PREFETCH) ] = {
93 [ C(RESULT_ACCESS) ] = -1,
94 [ C(RESULT_MISS) ] = -1,
99 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
100 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
103 [ C(RESULT_ACCESS) ] = -1,
104 [ C(RESULT_MISS) ] = -1,
106 [ C(OP_PREFETCH) ] = {
107 [ C(RESULT_ACCESS) ] = -1,
108 [ C(RESULT_MISS) ] = -1,
114 * AMD Performance Monitor K7 and later.
116 static const u64 amd_perfmon_event_map[] =
118 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
119 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
120 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
121 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
122 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
123 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
124 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
125 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
128 static u64 amd_pmu_event_map(int hw_event)
130 return amd_perfmon_event_map[hw_event];
133 static int amd_pmu_hw_config(struct perf_event *event)
135 int ret = x86_pmu_hw_config(event);
140 if (event->attr.type != PERF_TYPE_RAW)
143 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
149 * AMD64 events are detected based on their event codes.
151 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
153 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
156 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
158 return (hwc->config & 0xe0) == 0xe0;
161 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
163 struct amd_nb *nb = cpuc->amd_nb;
165 return nb && nb->nb_id != -1;
168 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
169 struct perf_event *event)
171 struct hw_perf_event *hwc = &event->hw;
172 struct amd_nb *nb = cpuc->amd_nb;
176 * only care about NB events
178 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
182 * need to scan whole list because event may not have
183 * been assigned during scheduling
185 * no race condition possible because event can only
186 * be removed on one CPU at a time AND PMU is disabled
189 for (i = 0; i < x86_pmu.num_counters; i++) {
190 if (nb->owners[i] == event) {
191 cmpxchg(nb->owners+i, event, NULL);
198 * AMD64 NorthBridge events need special treatment because
199 * counter access needs to be synchronized across all cores
200 * of a package. Refer to BKDG section 3.12
202 * NB events are events measuring L3 cache, Hypertransport
203 * traffic. They are identified by an event code >= 0xe00.
204 * They measure events on the NorthBride which is shared
205 * by all cores on a package. NB events are counted on a
206 * shared set of counters. When a NB event is programmed
207 * in a counter, the data actually comes from a shared
208 * counter. Thus, access to those counters needs to be
211 * We implement the synchronization such that no two cores
212 * can be measuring NB events using the same counters. Thus,
213 * we maintain a per-NB allocation table. The available slot
214 * is propagated using the event_constraint structure.
216 * We provide only one choice for each NB event based on
217 * the fact that only NB events have restrictions. Consequently,
218 * if a counter is available, there is a guarantee the NB event
219 * will be assigned to it. If no slot is available, an empty
220 * constraint is returned and scheduling will eventually fail
223 * Note that all cores attached the same NB compete for the same
224 * counters to host NB events, this is why we use atomic ops. Some
225 * multi-chip CPUs may have more than one NB.
227 * Given that resources are allocated (cmpxchg), they must be
228 * eventually freed for others to use. This is accomplished by
229 * calling amd_put_event_constraints().
231 * Non NB events are not impacted by this restriction.
233 static struct event_constraint *
234 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
236 struct hw_perf_event *hwc = &event->hw;
237 struct amd_nb *nb = cpuc->amd_nb;
238 struct perf_event *old = NULL;
239 int max = x86_pmu.num_counters;
243 * if not NB event or no NB, then no constraints
245 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
246 return &unconstrained;
249 * detect if already present, if so reuse
251 * cannot merge with actual allocation
252 * because of possible holes
254 * event can already be present yet not assigned (in hwc->idx)
255 * because of successive calls to x86_schedule_events() from
256 * hw_perf_group_sched_in() without hw_perf_enable()
258 for (i = 0; i < max; i++) {
260 * keep track of first free slot
262 if (k == -1 && !nb->owners[i])
265 /* already present, reuse */
266 if (nb->owners[i] == event)
270 * not present, so grab a new slot
271 * starting either at:
273 if (hwc->idx != -1) {
274 /* previous assignment */
276 } else if (k != -1) {
277 /* start from free slot found */
281 * event not found, no slot found in
282 * first pass, try again from the
289 old = cmpxchg(nb->owners+i, NULL, event);
297 return &nb->event_constraints[i];
299 return &emptyconstraint;
302 static struct amd_nb *amd_alloc_nb(int cpu)
307 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
315 * initialize all possible NB constraints
317 for (i = 0; i < x86_pmu.num_counters; i++) {
318 __set_bit(i, nb->event_constraints[i].idxmsk);
319 nb->event_constraints[i].weight = 1;
324 static int amd_pmu_cpu_prepare(int cpu)
326 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
328 WARN_ON_ONCE(cpuc->amd_nb);
330 if (boot_cpu_data.x86_max_cores < 2)
333 cpuc->amd_nb = amd_alloc_nb(cpu);
340 static void amd_pmu_cpu_starting(int cpu)
342 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
346 if (boot_cpu_data.x86_max_cores < 2)
349 nb_id = amd_get_nb_id(cpu);
350 WARN_ON_ONCE(nb_id == BAD_APICID);
352 for_each_online_cpu(i) {
353 nb = per_cpu(cpu_hw_events, i).amd_nb;
354 if (WARN_ON_ONCE(!nb))
357 if (nb->nb_id == nb_id) {
358 cpuc->kfree_on_online = cpuc->amd_nb;
364 cpuc->amd_nb->nb_id = nb_id;
365 cpuc->amd_nb->refcnt++;
368 static void amd_pmu_cpu_dead(int cpu)
370 struct cpu_hw_events *cpuhw;
372 if (boot_cpu_data.x86_max_cores < 2)
375 cpuhw = &per_cpu(cpu_hw_events, cpu);
378 struct amd_nb *nb = cpuhw->amd_nb;
380 if (nb->nb_id == -1 || --nb->refcnt == 0)
383 cpuhw->amd_nb = NULL;
387 static __initconst const struct x86_pmu amd_pmu = {
389 .handle_irq = x86_pmu_handle_irq,
390 .disable_all = x86_pmu_disable_all,
391 .enable_all = x86_pmu_enable_all,
392 .enable = x86_pmu_enable_event,
393 .disable = x86_pmu_disable_event,
394 .hw_config = amd_pmu_hw_config,
395 .schedule_events = x86_schedule_events,
396 .eventsel = MSR_K7_EVNTSEL0,
397 .perfctr = MSR_K7_PERFCTR0,
398 .event_map = amd_pmu_event_map,
399 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
402 .cntval_mask = (1ULL << 48) - 1,
404 /* use highest bit to detect overflow */
405 .max_period = (1ULL << 47) - 1,
406 .get_event_constraints = amd_get_event_constraints,
407 .put_event_constraints = amd_put_event_constraints,
409 .cpu_prepare = amd_pmu_cpu_prepare,
410 .cpu_starting = amd_pmu_cpu_starting,
411 .cpu_dead = amd_pmu_cpu_dead,
416 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
418 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
419 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
420 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
421 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
422 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
423 #define AMD_EVENT_EX_LS 0x000000C0ULL
424 #define AMD_EVENT_DE 0x000000D0ULL
425 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
428 * AMD family 15h event code/PMC mappings:
430 * type = event_code & 0x0F0:
432 * 0x000 FP PERF_CTL[5:3]
433 * 0x010 FP PERF_CTL[5:3]
434 * 0x020 LS PERF_CTL[5:0]
435 * 0x030 LS PERF_CTL[5:0]
436 * 0x040 DC PERF_CTL[5:0]
437 * 0x050 DC PERF_CTL[5:0]
438 * 0x060 CU PERF_CTL[2:0]
439 * 0x070 CU PERF_CTL[2:0]
440 * 0x080 IC/DE PERF_CTL[2:0]
441 * 0x090 IC/DE PERF_CTL[2:0]
444 * 0x0C0 EX/LS PERF_CTL[5:0]
445 * 0x0D0 DE PERF_CTL[2:0]
446 * 0x0E0 NB NB_PERF_CTL[3:0]
447 * 0x0F0 NB NB_PERF_CTL[3:0]
451 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
452 * 0x003 FP PERF_CTL[3]
453 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
454 * 0x00B FP PERF_CTL[3]
455 * 0x00D FP PERF_CTL[3]
456 * 0x023 DE PERF_CTL[2:0]
457 * 0x02D LS PERF_CTL[3]
458 * 0x02E LS PERF_CTL[3,0]
459 * 0x043 CU PERF_CTL[2:0]
460 * 0x045 CU PERF_CTL[2:0]
461 * 0x046 CU PERF_CTL[2:0]
462 * 0x054 CU PERF_CTL[2:0]
463 * 0x055 CU PERF_CTL[2:0]
464 * 0x08F IC PERF_CTL[0]
465 * 0x187 DE PERF_CTL[0]
466 * 0x188 DE PERF_CTL[0]
467 * 0x0DB EX PERF_CTL[5:0]
468 * 0x0DC LS PERF_CTL[5:0]
469 * 0x0DD LS PERF_CTL[5:0]
470 * 0x0DE LS PERF_CTL[5:0]
471 * 0x0DF LS PERF_CTL[5:0]
472 * 0x1D6 EX PERF_CTL[5:0]
473 * 0x1D8 EX PERF_CTL[5:0]
475 * (*) depending on the umask all FPU counters may be used
478 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
479 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
480 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
481 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
482 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
483 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
485 static struct event_constraint *
486 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
488 struct hw_perf_event *hwc = &event->hw;
489 unsigned int event_code = amd_get_event_code(hwc);
491 switch (event_code & AMD_EVENT_TYPE_MASK) {
493 switch (event_code) {
495 if (!(hwc->config & 0x0000F000ULL))
497 if (!(hwc->config & 0x00000F00ULL))
499 return &amd_f15_PMC3;
501 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
503 return &amd_f15_PMC3;
507 return &amd_f15_PMC3;
509 return &amd_f15_PMC53;
512 case AMD_EVENT_EX_LS:
513 switch (event_code) {
520 return &amd_f15_PMC20;
522 return &amd_f15_PMC3;
524 return &amd_f15_PMC30;
526 return &amd_f15_PMC50;
529 case AMD_EVENT_IC_DE:
531 switch (event_code) {
535 return &amd_f15_PMC0;
536 case 0x0DB ... 0x0DF:
539 return &amd_f15_PMC50;
541 return &amd_f15_PMC20;
544 /* not yet implemented */
545 return &emptyconstraint;
547 return &emptyconstraint;
551 static __initconst const struct x86_pmu amd_pmu_f15h = {
552 .name = "AMD Family 15h",
553 .handle_irq = x86_pmu_handle_irq,
554 .disable_all = x86_pmu_disable_all,
555 .enable_all = x86_pmu_enable_all,
556 .enable = x86_pmu_enable_event,
557 .disable = x86_pmu_disable_event,
558 .hw_config = amd_pmu_hw_config,
559 .schedule_events = x86_schedule_events,
560 .eventsel = MSR_F15H_PERF_CTL,
561 .perfctr = MSR_F15H_PERF_CTR,
562 .event_map = amd_pmu_event_map,
563 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
566 .cntval_mask = (1ULL << 48) - 1,
568 /* use highest bit to detect overflow */
569 .max_period = (1ULL << 47) - 1,
570 .get_event_constraints = amd_get_event_constraints_f15h,
571 /* nortbridge counters not yet implemented: */
573 .put_event_constraints = amd_put_event_constraints,
575 .cpu_prepare = amd_pmu_cpu_prepare,
576 .cpu_starting = amd_pmu_cpu_starting,
577 .cpu_dead = amd_pmu_cpu_dead,
581 __init int amd_pmu_init(void)
583 /* Performance-monitoring supported from K7 and later: */
584 if (boot_cpu_data.x86 < 6)
588 * If core performance counter extensions exists, it must be
589 * family 15h, otherwise fail. See x86_pmu_addr_offset().
591 switch (boot_cpu_data.x86) {
593 if (!cpu_has_perfctr_core)
595 x86_pmu = amd_pmu_f15h;
598 if (cpu_has_perfctr_core)
604 /* Events are common for all AMDs */
605 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
606 sizeof(hw_cache_event_ids));