Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_amd.c
1 #ifdef CONFIG_CPU_SUP_AMD
2
3 static __initconst const u64 amd_hw_cache_event_ids
4                                 [PERF_COUNT_HW_CACHE_MAX]
5                                 [PERF_COUNT_HW_CACHE_OP_MAX]
6                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
7 {
8  [ C(L1D) ] = {
9         [ C(OP_READ) ] = {
10                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
11                 [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
12         },
13         [ C(OP_WRITE) ] = {
14                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
15                 [ C(RESULT_MISS)   ] = 0,
16         },
17         [ C(OP_PREFETCH) ] = {
18                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
19                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
20         },
21  },
22  [ C(L1I ) ] = {
23         [ C(OP_READ) ] = {
24                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
25                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
26         },
27         [ C(OP_WRITE) ] = {
28                 [ C(RESULT_ACCESS) ] = -1,
29                 [ C(RESULT_MISS)   ] = -1,
30         },
31         [ C(OP_PREFETCH) ] = {
32                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
33                 [ C(RESULT_MISS)   ] = 0,
34         },
35  },
36  [ C(LL  ) ] = {
37         [ C(OP_READ) ] = {
38                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
39                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
40         },
41         [ C(OP_WRITE) ] = {
42                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
43                 [ C(RESULT_MISS)   ] = 0,
44         },
45         [ C(OP_PREFETCH) ] = {
46                 [ C(RESULT_ACCESS) ] = 0,
47                 [ C(RESULT_MISS)   ] = 0,
48         },
49  },
50  [ C(DTLB) ] = {
51         [ C(OP_READ) ] = {
52                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
53                 [ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
54         },
55         [ C(OP_WRITE) ] = {
56                 [ C(RESULT_ACCESS) ] = 0,
57                 [ C(RESULT_MISS)   ] = 0,
58         },
59         [ C(OP_PREFETCH) ] = {
60                 [ C(RESULT_ACCESS) ] = 0,
61                 [ C(RESULT_MISS)   ] = 0,
62         },
63  },
64  [ C(ITLB) ] = {
65         [ C(OP_READ) ] = {
66                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
67                 [ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
68         },
69         [ C(OP_WRITE) ] = {
70                 [ C(RESULT_ACCESS) ] = -1,
71                 [ C(RESULT_MISS)   ] = -1,
72         },
73         [ C(OP_PREFETCH) ] = {
74                 [ C(RESULT_ACCESS) ] = -1,
75                 [ C(RESULT_MISS)   ] = -1,
76         },
77  },
78  [ C(BPU ) ] = {
79         [ C(OP_READ) ] = {
80                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
81                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
82         },
83         [ C(OP_WRITE) ] = {
84                 [ C(RESULT_ACCESS) ] = -1,
85                 [ C(RESULT_MISS)   ] = -1,
86         },
87         [ C(OP_PREFETCH) ] = {
88                 [ C(RESULT_ACCESS) ] = -1,
89                 [ C(RESULT_MISS)   ] = -1,
90         },
91  },
92 };
93
94 /*
95  * AMD Performance Monitor K7 and later.
96  */
97 static const u64 amd_perfmon_event_map[] =
98 {
99   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
100   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
101   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
102   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
103   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c2,
104   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c3,
105 };
106
107 static u64 amd_pmu_event_map(int hw_event)
108 {
109         return amd_perfmon_event_map[hw_event];
110 }
111
112 static int amd_pmu_hw_config(struct perf_event *event)
113 {
114         int ret = x86_pmu_hw_config(event);
115
116         if (ret)
117                 return ret;
118
119         if (event->attr.type != PERF_TYPE_RAW)
120                 return 0;
121
122         event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
123
124         return 0;
125 }
126
127 /*
128  * AMD64 events are detected based on their event codes.
129  */
130 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
131 {
132         return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
133 }
134
135 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
136 {
137         return (hwc->config & 0xe0) == 0xe0;
138 }
139
140 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
141 {
142         struct amd_nb *nb = cpuc->amd_nb;
143
144         return nb && nb->nb_id != -1;
145 }
146
147 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
148                                       struct perf_event *event)
149 {
150         struct hw_perf_event *hwc = &event->hw;
151         struct amd_nb *nb = cpuc->amd_nb;
152         int i;
153
154         /*
155          * only care about NB events
156          */
157         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
158                 return;
159
160         /*
161          * need to scan whole list because event may not have
162          * been assigned during scheduling
163          *
164          * no race condition possible because event can only
165          * be removed on one CPU at a time AND PMU is disabled
166          * when we come here
167          */
168         for (i = 0; i < x86_pmu.num_counters; i++) {
169                 if (nb->owners[i] == event) {
170                         cmpxchg(nb->owners+i, event, NULL);
171                         break;
172                 }
173         }
174 }
175
176  /*
177   * AMD64 NorthBridge events need special treatment because
178   * counter access needs to be synchronized across all cores
179   * of a package. Refer to BKDG section 3.12
180   *
181   * NB events are events measuring L3 cache, Hypertransport
182   * traffic. They are identified by an event code >= 0xe00.
183   * They measure events on the NorthBride which is shared
184   * by all cores on a package. NB events are counted on a
185   * shared set of counters. When a NB event is programmed
186   * in a counter, the data actually comes from a shared
187   * counter. Thus, access to those counters needs to be
188   * synchronized.
189   *
190   * We implement the synchronization such that no two cores
191   * can be measuring NB events using the same counters. Thus,
192   * we maintain a per-NB allocation table. The available slot
193   * is propagated using the event_constraint structure.
194   *
195   * We provide only one choice for each NB event based on
196   * the fact that only NB events have restrictions. Consequently,
197   * if a counter is available, there is a guarantee the NB event
198   * will be assigned to it. If no slot is available, an empty
199   * constraint is returned and scheduling will eventually fail
200   * for this event.
201   *
202   * Note that all cores attached the same NB compete for the same
203   * counters to host NB events, this is why we use atomic ops. Some
204   * multi-chip CPUs may have more than one NB.
205   *
206   * Given that resources are allocated (cmpxchg), they must be
207   * eventually freed for others to use. This is accomplished by
208   * calling amd_put_event_constraints().
209   *
210   * Non NB events are not impacted by this restriction.
211   */
212 static struct event_constraint *
213 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
214 {
215         struct hw_perf_event *hwc = &event->hw;
216         struct amd_nb *nb = cpuc->amd_nb;
217         struct perf_event *old = NULL;
218         int max = x86_pmu.num_counters;
219         int i, j, k = -1;
220
221         /*
222          * if not NB event or no NB, then no constraints
223          */
224         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
225                 return &unconstrained;
226
227         /*
228          * detect if already present, if so reuse
229          *
230          * cannot merge with actual allocation
231          * because of possible holes
232          *
233          * event can already be present yet not assigned (in hwc->idx)
234          * because of successive calls to x86_schedule_events() from
235          * hw_perf_group_sched_in() without hw_perf_enable()
236          */
237         for (i = 0; i < max; i++) {
238                 /*
239                  * keep track of first free slot
240                  */
241                 if (k == -1 && !nb->owners[i])
242                         k = i;
243
244                 /* already present, reuse */
245                 if (nb->owners[i] == event)
246                         goto done;
247         }
248         /*
249          * not present, so grab a new slot
250          * starting either at:
251          */
252         if (hwc->idx != -1) {
253                 /* previous assignment */
254                 i = hwc->idx;
255         } else if (k != -1) {
256                 /* start from free slot found */
257                 i = k;
258         } else {
259                 /*
260                  * event not found, no slot found in
261                  * first pass, try again from the
262                  * beginning
263                  */
264                 i = 0;
265         }
266         j = i;
267         do {
268                 old = cmpxchg(nb->owners+i, NULL, event);
269                 if (!old)
270                         break;
271                 if (++i == max)
272                         i = 0;
273         } while (i != j);
274 done:
275         if (!old)
276                 return &nb->event_constraints[i];
277
278         return &emptyconstraint;
279 }
280
281 static struct amd_nb *amd_alloc_nb(int cpu)
282 {
283         struct amd_nb *nb;
284         int i;
285
286         nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
287                           cpu_to_node(cpu));
288         if (!nb)
289                 return NULL;
290
291         nb->nb_id = -1;
292
293         /*
294          * initialize all possible NB constraints
295          */
296         for (i = 0; i < x86_pmu.num_counters; i++) {
297                 __set_bit(i, nb->event_constraints[i].idxmsk);
298                 nb->event_constraints[i].weight = 1;
299         }
300         return nb;
301 }
302
303 static int amd_pmu_cpu_prepare(int cpu)
304 {
305         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
306
307         WARN_ON_ONCE(cpuc->amd_nb);
308
309         if (boot_cpu_data.x86_max_cores < 2)
310                 return NOTIFY_OK;
311
312         cpuc->amd_nb = amd_alloc_nb(cpu);
313         if (!cpuc->amd_nb)
314                 return NOTIFY_BAD;
315
316         return NOTIFY_OK;
317 }
318
319 static void amd_pmu_cpu_starting(int cpu)
320 {
321         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
322         struct amd_nb *nb;
323         int i, nb_id;
324
325         if (boot_cpu_data.x86_max_cores < 2)
326                 return;
327
328         nb_id = amd_get_nb_id(cpu);
329         WARN_ON_ONCE(nb_id == BAD_APICID);
330
331         for_each_online_cpu(i) {
332                 nb = per_cpu(cpu_hw_events, i).amd_nb;
333                 if (WARN_ON_ONCE(!nb))
334                         continue;
335
336                 if (nb->nb_id == nb_id) {
337                         kfree(cpuc->amd_nb);
338                         cpuc->amd_nb = nb;
339                         break;
340                 }
341         }
342
343         cpuc->amd_nb->nb_id = nb_id;
344         cpuc->amd_nb->refcnt++;
345 }
346
347 static void amd_pmu_cpu_dead(int cpu)
348 {
349         struct cpu_hw_events *cpuhw;
350
351         if (boot_cpu_data.x86_max_cores < 2)
352                 return;
353
354         cpuhw = &per_cpu(cpu_hw_events, cpu);
355
356         if (cpuhw->amd_nb) {
357                 struct amd_nb *nb = cpuhw->amd_nb;
358
359                 if (nb->nb_id == -1 || --nb->refcnt == 0)
360                         kfree(nb);
361
362                 cpuhw->amd_nb = NULL;
363         }
364 }
365
366 static __initconst const struct x86_pmu amd_pmu = {
367         .name                   = "AMD",
368         .handle_irq             = x86_pmu_handle_irq,
369         .disable_all            = x86_pmu_disable_all,
370         .enable_all             = x86_pmu_enable_all,
371         .enable                 = x86_pmu_enable_event,
372         .disable                = x86_pmu_disable_event,
373         .hw_config              = amd_pmu_hw_config,
374         .schedule_events        = x86_schedule_events,
375         .eventsel               = MSR_K7_EVNTSEL0,
376         .perfctr                = MSR_K7_PERFCTR0,
377         .event_map              = amd_pmu_event_map,
378         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
379         .num_counters           = 4,
380         .cntval_bits            = 48,
381         .cntval_mask            = (1ULL << 48) - 1,
382         .apic                   = 1,
383         /* use highest bit to detect overflow */
384         .max_period             = (1ULL << 47) - 1,
385         .get_event_constraints  = amd_get_event_constraints,
386         .put_event_constraints  = amd_put_event_constraints,
387
388         .cpu_prepare            = amd_pmu_cpu_prepare,
389         .cpu_starting           = amd_pmu_cpu_starting,
390         .cpu_dead               = amd_pmu_cpu_dead,
391 };
392
393 /* AMD Family 15h */
394
395 #define AMD_EVENT_TYPE_MASK     0x000000F0ULL
396
397 #define AMD_EVENT_FP            0x00000000ULL ... 0x00000010ULL
398 #define AMD_EVENT_LS            0x00000020ULL ... 0x00000030ULL
399 #define AMD_EVENT_DC            0x00000040ULL ... 0x00000050ULL
400 #define AMD_EVENT_CU            0x00000060ULL ... 0x00000070ULL
401 #define AMD_EVENT_IC_DE         0x00000080ULL ... 0x00000090ULL
402 #define AMD_EVENT_EX_LS         0x000000C0ULL
403 #define AMD_EVENT_DE            0x000000D0ULL
404 #define AMD_EVENT_NB            0x000000E0ULL ... 0x000000F0ULL
405
406 /*
407  * AMD family 15h event code/PMC mappings:
408  *
409  * type = event_code & 0x0F0:
410  *
411  * 0x000        FP      PERF_CTL[5:3]
412  * 0x010        FP      PERF_CTL[5:3]
413  * 0x020        LS      PERF_CTL[5:0]
414  * 0x030        LS      PERF_CTL[5:0]
415  * 0x040        DC      PERF_CTL[5:0]
416  * 0x050        DC      PERF_CTL[5:0]
417  * 0x060        CU      PERF_CTL[2:0]
418  * 0x070        CU      PERF_CTL[2:0]
419  * 0x080        IC/DE   PERF_CTL[2:0]
420  * 0x090        IC/DE   PERF_CTL[2:0]
421  * 0x0A0        ---
422  * 0x0B0        ---
423  * 0x0C0        EX/LS   PERF_CTL[5:0]
424  * 0x0D0        DE      PERF_CTL[2:0]
425  * 0x0E0        NB      NB_PERF_CTL[3:0]
426  * 0x0F0        NB      NB_PERF_CTL[3:0]
427  *
428  * Exceptions:
429  *
430  * 0x000        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
431  * 0x003        FP      PERF_CTL[3]
432  * 0x004        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
433  * 0x00B        FP      PERF_CTL[3]
434  * 0x00D        FP      PERF_CTL[3]
435  * 0x023        DE      PERF_CTL[2:0]
436  * 0x02D        LS      PERF_CTL[3]
437  * 0x02E        LS      PERF_CTL[3,0]
438  * 0x043        CU      PERF_CTL[2:0]
439  * 0x045        CU      PERF_CTL[2:0]
440  * 0x046        CU      PERF_CTL[2:0]
441  * 0x054        CU      PERF_CTL[2:0]
442  * 0x055        CU      PERF_CTL[2:0]
443  * 0x08F        IC      PERF_CTL[0]
444  * 0x187        DE      PERF_CTL[0]
445  * 0x188        DE      PERF_CTL[0]
446  * 0x0DB        EX      PERF_CTL[5:0]
447  * 0x0DC        LS      PERF_CTL[5:0]
448  * 0x0DD        LS      PERF_CTL[5:0]
449  * 0x0DE        LS      PERF_CTL[5:0]
450  * 0x0DF        LS      PERF_CTL[5:0]
451  * 0x1D6        EX      PERF_CTL[5:0]
452  * 0x1D8        EX      PERF_CTL[5:0]
453  *
454  * (*) depending on the umask all FPU counters may be used
455  */
456
457 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
458 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
459 static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
460 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
461 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
462 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
463
464 static struct event_constraint *
465 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
466 {
467         struct hw_perf_event *hwc = &event->hw;
468         unsigned int event_code = amd_get_event_code(hwc);
469
470         switch (event_code & AMD_EVENT_TYPE_MASK) {
471         case AMD_EVENT_FP:
472                 switch (event_code) {
473                 case 0x000:
474                         if (!(hwc->config & 0x0000F000ULL))
475                                 break;
476                         if (!(hwc->config & 0x00000F00ULL))
477                                 break;
478                         return &amd_f15_PMC3;
479                 case 0x004:
480                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
481                                 break;
482                         return &amd_f15_PMC3;
483                 case 0x003:
484                 case 0x00B:
485                 case 0x00D:
486                         return &amd_f15_PMC3;
487                 }
488                 return &amd_f15_PMC53;
489         case AMD_EVENT_LS:
490         case AMD_EVENT_DC:
491         case AMD_EVENT_EX_LS:
492                 switch (event_code) {
493                 case 0x023:
494                 case 0x043:
495                 case 0x045:
496                 case 0x046:
497                 case 0x054:
498                 case 0x055:
499                         return &amd_f15_PMC20;
500                 case 0x02D:
501                         return &amd_f15_PMC3;
502                 case 0x02E:
503                         return &amd_f15_PMC30;
504                 default:
505                         return &amd_f15_PMC50;
506                 }
507         case AMD_EVENT_CU:
508         case AMD_EVENT_IC_DE:
509         case AMD_EVENT_DE:
510                 switch (event_code) {
511                 case 0x08F:
512                 case 0x187:
513                 case 0x188:
514                         return &amd_f15_PMC0;
515                 case 0x0DB ... 0x0DF:
516                 case 0x1D6:
517                 case 0x1D8:
518                         return &amd_f15_PMC50;
519                 default:
520                         return &amd_f15_PMC20;
521                 }
522         case AMD_EVENT_NB:
523                 /* not yet implemented */
524                 return &emptyconstraint;
525         default:
526                 return &emptyconstraint;
527         }
528 }
529
530 static __initconst const struct x86_pmu amd_pmu_f15h = {
531         .name                   = "AMD Family 15h",
532         .handle_irq             = x86_pmu_handle_irq,
533         .disable_all            = x86_pmu_disable_all,
534         .enable_all             = x86_pmu_enable_all,
535         .enable                 = x86_pmu_enable_event,
536         .disable                = x86_pmu_disable_event,
537         .hw_config              = amd_pmu_hw_config,
538         .schedule_events        = x86_schedule_events,
539         .eventsel               = MSR_F15H_PERF_CTL,
540         .perfctr                = MSR_F15H_PERF_CTR,
541         .event_map              = amd_pmu_event_map,
542         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
543         .num_counters           = 6,
544         .cntval_bits            = 48,
545         .cntval_mask            = (1ULL << 48) - 1,
546         .apic                   = 1,
547         /* use highest bit to detect overflow */
548         .max_period             = (1ULL << 47) - 1,
549         .get_event_constraints  = amd_get_event_constraints_f15h,
550         /* nortbridge counters not yet implemented: */
551 #if 0
552         .put_event_constraints  = amd_put_event_constraints,
553
554         .cpu_prepare            = amd_pmu_cpu_prepare,
555         .cpu_starting           = amd_pmu_cpu_starting,
556         .cpu_dead               = amd_pmu_cpu_dead,
557 #endif
558 };
559
560 static __init int amd_pmu_init(void)
561 {
562         /* Performance-monitoring supported from K7 and later: */
563         if (boot_cpu_data.x86 < 6)
564                 return -ENODEV;
565
566         /*
567          * If core performance counter extensions exists, it must be
568          * family 15h, otherwise fail. See x86_pmu_addr_offset().
569          */
570         switch (boot_cpu_data.x86) {
571         case 0x15:
572                 if (!cpu_has_perfctr_core)
573                         return -ENODEV;
574                 x86_pmu = amd_pmu_f15h;
575                 break;
576         default:
577                 if (cpu_has_perfctr_core)
578                         return -ENODEV;
579                 x86_pmu = amd_pmu;
580                 break;
581         }
582
583         /* Events are common for all AMDs */
584         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
585                sizeof(hw_cache_event_ids));
586
587         return 0;
588 }
589
590 #else /* CONFIG_CPU_SUP_AMD */
591
592 static int amd_pmu_init(void)
593 {
594         return 0;
595 }
596
597 #endif