3dbfb003900123c579973929ba9db943741962c9
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_amd.c
1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <asm/apicdef.h>
7
8 #include "perf_event.h"
9
10 static __initconst const u64 amd_hw_cache_event_ids
11                                 [PERF_COUNT_HW_CACHE_MAX]
12                                 [PERF_COUNT_HW_CACHE_OP_MAX]
13                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
14 {
15  [ C(L1D) ] = {
16         [ C(OP_READ) ] = {
17                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
18                 [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
19         },
20         [ C(OP_WRITE) ] = {
21                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
22                 [ C(RESULT_MISS)   ] = 0,
23         },
24         [ C(OP_PREFETCH) ] = {
25                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
26                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
27         },
28  },
29  [ C(L1I ) ] = {
30         [ C(OP_READ) ] = {
31                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
32                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
33         },
34         [ C(OP_WRITE) ] = {
35                 [ C(RESULT_ACCESS) ] = -1,
36                 [ C(RESULT_MISS)   ] = -1,
37         },
38         [ C(OP_PREFETCH) ] = {
39                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40                 [ C(RESULT_MISS)   ] = 0,
41         },
42  },
43  [ C(LL  ) ] = {
44         [ C(OP_READ) ] = {
45                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
47         },
48         [ C(OP_WRITE) ] = {
49                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
50                 [ C(RESULT_MISS)   ] = 0,
51         },
52         [ C(OP_PREFETCH) ] = {
53                 [ C(RESULT_ACCESS) ] = 0,
54                 [ C(RESULT_MISS)   ] = 0,
55         },
56  },
57  [ C(DTLB) ] = {
58         [ C(OP_READ) ] = {
59                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
60                 [ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
61         },
62         [ C(OP_WRITE) ] = {
63                 [ C(RESULT_ACCESS) ] = 0,
64                 [ C(RESULT_MISS)   ] = 0,
65         },
66         [ C(OP_PREFETCH) ] = {
67                 [ C(RESULT_ACCESS) ] = 0,
68                 [ C(RESULT_MISS)   ] = 0,
69         },
70  },
71  [ C(ITLB) ] = {
72         [ C(OP_READ) ] = {
73                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
74                 [ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
75         },
76         [ C(OP_WRITE) ] = {
77                 [ C(RESULT_ACCESS) ] = -1,
78                 [ C(RESULT_MISS)   ] = -1,
79         },
80         [ C(OP_PREFETCH) ] = {
81                 [ C(RESULT_ACCESS) ] = -1,
82                 [ C(RESULT_MISS)   ] = -1,
83         },
84  },
85  [ C(BPU ) ] = {
86         [ C(OP_READ) ] = {
87                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
88                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
89         },
90         [ C(OP_WRITE) ] = {
91                 [ C(RESULT_ACCESS) ] = -1,
92                 [ C(RESULT_MISS)   ] = -1,
93         },
94         [ C(OP_PREFETCH) ] = {
95                 [ C(RESULT_ACCESS) ] = -1,
96                 [ C(RESULT_MISS)   ] = -1,
97         },
98  },
99  [ C(NODE) ] = {
100         [ C(OP_READ) ] = {
101                 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102                 [ C(RESULT_MISS)   ] = 0x98e9, /* CPU Request to Memory, r   */
103         },
104         [ C(OP_WRITE) ] = {
105                 [ C(RESULT_ACCESS) ] = -1,
106                 [ C(RESULT_MISS)   ] = -1,
107         },
108         [ C(OP_PREFETCH) ] = {
109                 [ C(RESULT_ACCESS) ] = -1,
110                 [ C(RESULT_MISS)   ] = -1,
111         },
112  },
113 };
114
115 /*
116  * AMD Performance Monitor K7 and later.
117  */
118 static const u64 amd_perfmon_event_map[] =
119 {
120   [PERF_COUNT_HW_CPU_CYCLES]                    = 0x0076,
121   [PERF_COUNT_HW_INSTRUCTIONS]                  = 0x00c0,
122   [PERF_COUNT_HW_CACHE_REFERENCES]              = 0x0080,
123   [PERF_COUNT_HW_CACHE_MISSES]                  = 0x0081,
124   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]           = 0x00c2,
125   [PERF_COUNT_HW_BRANCH_MISSES]                 = 0x00c3,
126   [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]       = 0x00d0, /* "Decoder empty" event */
127   [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]        = 0x00d1, /* "Dispatch stalls" event */
128 };
129
130 static u64 amd_pmu_event_map(int hw_event)
131 {
132         return amd_perfmon_event_map[hw_event];
133 }
134
135 static int amd_pmu_hw_config(struct perf_event *event)
136 {
137         int ret = x86_pmu_hw_config(event);
138
139         if (ret)
140                 return ret;
141
142         if (event->attr.exclude_host && event->attr.exclude_guest)
143                 /*
144                  * When HO == GO == 1 the hardware treats that as GO == HO == 0
145                  * and will count in both modes. We don't want to count in that
146                  * case so we emulate no-counting by setting US = OS = 0.
147                  */
148                 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
149                                       ARCH_PERFMON_EVENTSEL_OS);
150         else if (event->attr.exclude_host)
151                 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
152         else if (event->attr.exclude_guest)
153                 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
154
155         if (event->attr.type != PERF_TYPE_RAW)
156                 return 0;
157
158         event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
159
160         return 0;
161 }
162
163 /*
164  * AMD64 events are detected based on their event codes.
165  */
166 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
167 {
168         return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
169 }
170
171 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
172 {
173         return (hwc->config & 0xe0) == 0xe0;
174 }
175
176 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
177 {
178         struct amd_nb *nb = cpuc->amd_nb;
179
180         return nb && nb->nb_id != -1;
181 }
182
183 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
184                                       struct perf_event *event)
185 {
186         struct hw_perf_event *hwc = &event->hw;
187         struct amd_nb *nb = cpuc->amd_nb;
188         int i;
189
190         /*
191          * only care about NB events
192          */
193         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
194                 return;
195
196         /*
197          * need to scan whole list because event may not have
198          * been assigned during scheduling
199          *
200          * no race condition possible because event can only
201          * be removed on one CPU at a time AND PMU is disabled
202          * when we come here
203          */
204         for (i = 0; i < x86_pmu.num_counters; i++) {
205                 if (nb->owners[i] == event) {
206                         cmpxchg(nb->owners+i, event, NULL);
207                         break;
208                 }
209         }
210 }
211
212  /*
213   * AMD64 NorthBridge events need special treatment because
214   * counter access needs to be synchronized across all cores
215   * of a package. Refer to BKDG section 3.12
216   *
217   * NB events are events measuring L3 cache, Hypertransport
218   * traffic. They are identified by an event code >= 0xe00.
219   * They measure events on the NorthBride which is shared
220   * by all cores on a package. NB events are counted on a
221   * shared set of counters. When a NB event is programmed
222   * in a counter, the data actually comes from a shared
223   * counter. Thus, access to those counters needs to be
224   * synchronized.
225   *
226   * We implement the synchronization such that no two cores
227   * can be measuring NB events using the same counters. Thus,
228   * we maintain a per-NB allocation table. The available slot
229   * is propagated using the event_constraint structure.
230   *
231   * We provide only one choice for each NB event based on
232   * the fact that only NB events have restrictions. Consequently,
233   * if a counter is available, there is a guarantee the NB event
234   * will be assigned to it. If no slot is available, an empty
235   * constraint is returned and scheduling will eventually fail
236   * for this event.
237   *
238   * Note that all cores attached the same NB compete for the same
239   * counters to host NB events, this is why we use atomic ops. Some
240   * multi-chip CPUs may have more than one NB.
241   *
242   * Given that resources are allocated (cmpxchg), they must be
243   * eventually freed for others to use. This is accomplished by
244   * calling amd_put_event_constraints().
245   *
246   * Non NB events are not impacted by this restriction.
247   */
248 static struct event_constraint *
249 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
250 {
251         struct hw_perf_event *hwc = &event->hw;
252         struct amd_nb *nb = cpuc->amd_nb;
253         struct perf_event *old = NULL;
254         int max = x86_pmu.num_counters;
255         int i, j, k = -1;
256
257         /*
258          * if not NB event or no NB, then no constraints
259          */
260         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
261                 return &unconstrained;
262
263         /*
264          * detect if already present, if so reuse
265          *
266          * cannot merge with actual allocation
267          * because of possible holes
268          *
269          * event can already be present yet not assigned (in hwc->idx)
270          * because of successive calls to x86_schedule_events() from
271          * hw_perf_group_sched_in() without hw_perf_enable()
272          */
273         for (i = 0; i < max; i++) {
274                 /*
275                  * keep track of first free slot
276                  */
277                 if (k == -1 && !nb->owners[i])
278                         k = i;
279
280                 /* already present, reuse */
281                 if (nb->owners[i] == event)
282                         goto done;
283         }
284         /*
285          * not present, so grab a new slot
286          * starting either at:
287          */
288         if (hwc->idx != -1) {
289                 /* previous assignment */
290                 i = hwc->idx;
291         } else if (k != -1) {
292                 /* start from free slot found */
293                 i = k;
294         } else {
295                 /*
296                  * event not found, no slot found in
297                  * first pass, try again from the
298                  * beginning
299                  */
300                 i = 0;
301         }
302         j = i;
303         do {
304                 old = cmpxchg(nb->owners+i, NULL, event);
305                 if (!old)
306                         break;
307                 if (++i == max)
308                         i = 0;
309         } while (i != j);
310 done:
311         if (!old)
312                 return &nb->event_constraints[i];
313
314         return &emptyconstraint;
315 }
316
317 static struct amd_nb *amd_alloc_nb(int cpu)
318 {
319         struct amd_nb *nb;
320         int i;
321
322         nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
323                           cpu_to_node(cpu));
324         if (!nb)
325                 return NULL;
326
327         nb->nb_id = -1;
328
329         /*
330          * initialize all possible NB constraints
331          */
332         for (i = 0; i < x86_pmu.num_counters; i++) {
333                 __set_bit(i, nb->event_constraints[i].idxmsk);
334                 nb->event_constraints[i].weight = 1;
335         }
336         return nb;
337 }
338
339 static int amd_pmu_cpu_prepare(int cpu)
340 {
341         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
342
343         WARN_ON_ONCE(cpuc->amd_nb);
344
345         if (boot_cpu_data.x86_max_cores < 2)
346                 return NOTIFY_OK;
347
348         cpuc->amd_nb = amd_alloc_nb(cpu);
349         if (!cpuc->amd_nb)
350                 return NOTIFY_BAD;
351
352         return NOTIFY_OK;
353 }
354
355 static void amd_pmu_cpu_starting(int cpu)
356 {
357         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
358         struct amd_nb *nb;
359         int i, nb_id;
360
361         cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
362
363         if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
364                 return;
365
366         nb_id = amd_get_nb_id(cpu);
367         WARN_ON_ONCE(nb_id == BAD_APICID);
368
369         for_each_online_cpu(i) {
370                 nb = per_cpu(cpu_hw_events, i).amd_nb;
371                 if (WARN_ON_ONCE(!nb))
372                         continue;
373
374                 if (nb->nb_id == nb_id) {
375                         cpuc->kfree_on_online = cpuc->amd_nb;
376                         cpuc->amd_nb = nb;
377                         break;
378                 }
379         }
380
381         cpuc->amd_nb->nb_id = nb_id;
382         cpuc->amd_nb->refcnt++;
383 }
384
385 static void amd_pmu_cpu_dead(int cpu)
386 {
387         struct cpu_hw_events *cpuhw;
388
389         if (boot_cpu_data.x86_max_cores < 2)
390                 return;
391
392         cpuhw = &per_cpu(cpu_hw_events, cpu);
393
394         if (cpuhw->amd_nb) {
395                 struct amd_nb *nb = cpuhw->amd_nb;
396
397                 if (nb->nb_id == -1 || --nb->refcnt == 0)
398                         kfree(nb);
399
400                 cpuhw->amd_nb = NULL;
401         }
402 }
403
404 static __initconst const struct x86_pmu amd_pmu = {
405         .name                   = "AMD",
406         .handle_irq             = x86_pmu_handle_irq,
407         .disable_all            = x86_pmu_disable_all,
408         .enable_all             = x86_pmu_enable_all,
409         .enable                 = x86_pmu_enable_event,
410         .disable                = x86_pmu_disable_event,
411         .hw_config              = amd_pmu_hw_config,
412         .schedule_events        = x86_schedule_events,
413         .eventsel               = MSR_K7_EVNTSEL0,
414         .perfctr                = MSR_K7_PERFCTR0,
415         .event_map              = amd_pmu_event_map,
416         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
417         .num_counters           = AMD64_NUM_COUNTERS,
418         .cntval_bits            = 48,
419         .cntval_mask            = (1ULL << 48) - 1,
420         .apic                   = 1,
421         /* use highest bit to detect overflow */
422         .max_period             = (1ULL << 47) - 1,
423         .get_event_constraints  = amd_get_event_constraints,
424         .put_event_constraints  = amd_put_event_constraints,
425
426         .cpu_prepare            = amd_pmu_cpu_prepare,
427         .cpu_starting           = amd_pmu_cpu_starting,
428         .cpu_dead               = amd_pmu_cpu_dead,
429 };
430
431 /* AMD Family 15h */
432
433 #define AMD_EVENT_TYPE_MASK     0x000000F0ULL
434
435 #define AMD_EVENT_FP            0x00000000ULL ... 0x00000010ULL
436 #define AMD_EVENT_LS            0x00000020ULL ... 0x00000030ULL
437 #define AMD_EVENT_DC            0x00000040ULL ... 0x00000050ULL
438 #define AMD_EVENT_CU            0x00000060ULL ... 0x00000070ULL
439 #define AMD_EVENT_IC_DE         0x00000080ULL ... 0x00000090ULL
440 #define AMD_EVENT_EX_LS         0x000000C0ULL
441 #define AMD_EVENT_DE            0x000000D0ULL
442 #define AMD_EVENT_NB            0x000000E0ULL ... 0x000000F0ULL
443
444 /*
445  * AMD family 15h event code/PMC mappings:
446  *
447  * type = event_code & 0x0F0:
448  *
449  * 0x000        FP      PERF_CTL[5:3]
450  * 0x010        FP      PERF_CTL[5:3]
451  * 0x020        LS      PERF_CTL[5:0]
452  * 0x030        LS      PERF_CTL[5:0]
453  * 0x040        DC      PERF_CTL[5:0]
454  * 0x050        DC      PERF_CTL[5:0]
455  * 0x060        CU      PERF_CTL[2:0]
456  * 0x070        CU      PERF_CTL[2:0]
457  * 0x080        IC/DE   PERF_CTL[2:0]
458  * 0x090        IC/DE   PERF_CTL[2:0]
459  * 0x0A0        ---
460  * 0x0B0        ---
461  * 0x0C0        EX/LS   PERF_CTL[5:0]
462  * 0x0D0        DE      PERF_CTL[2:0]
463  * 0x0E0        NB      NB_PERF_CTL[3:0]
464  * 0x0F0        NB      NB_PERF_CTL[3:0]
465  *
466  * Exceptions:
467  *
468  * 0x000        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
469  * 0x003        FP      PERF_CTL[3]
470  * 0x004        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
471  * 0x00B        FP      PERF_CTL[3]
472  * 0x00D        FP      PERF_CTL[3]
473  * 0x023        DE      PERF_CTL[2:0]
474  * 0x02D        LS      PERF_CTL[3]
475  * 0x02E        LS      PERF_CTL[3,0]
476  * 0x031        LS      PERF_CTL[2:0] (**)
477  * 0x043        CU      PERF_CTL[2:0]
478  * 0x045        CU      PERF_CTL[2:0]
479  * 0x046        CU      PERF_CTL[2:0]
480  * 0x054        CU      PERF_CTL[2:0]
481  * 0x055        CU      PERF_CTL[2:0]
482  * 0x08F        IC      PERF_CTL[0]
483  * 0x187        DE      PERF_CTL[0]
484  * 0x188        DE      PERF_CTL[0]
485  * 0x0DB        EX      PERF_CTL[5:0]
486  * 0x0DC        LS      PERF_CTL[5:0]
487  * 0x0DD        LS      PERF_CTL[5:0]
488  * 0x0DE        LS      PERF_CTL[5:0]
489  * 0x0DF        LS      PERF_CTL[5:0]
490  * 0x1C0        EX      PERF_CTL[5:3]
491  * 0x1D6        EX      PERF_CTL[5:0]
492  * 0x1D8        EX      PERF_CTL[5:0]
493  *
494  * (*)  depending on the umask all FPU counters may be used
495  * (**) only one unitmask enabled at a time
496  */
497
498 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
499 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
500 static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
501 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
502 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
503 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
504
505 static struct event_constraint *
506 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
507 {
508         struct hw_perf_event *hwc = &event->hw;
509         unsigned int event_code = amd_get_event_code(hwc);
510
511         switch (event_code & AMD_EVENT_TYPE_MASK) {
512         case AMD_EVENT_FP:
513                 switch (event_code) {
514                 case 0x000:
515                         if (!(hwc->config & 0x0000F000ULL))
516                                 break;
517                         if (!(hwc->config & 0x00000F00ULL))
518                                 break;
519                         return &amd_f15_PMC3;
520                 case 0x004:
521                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
522                                 break;
523                         return &amd_f15_PMC3;
524                 case 0x003:
525                 case 0x00B:
526                 case 0x00D:
527                         return &amd_f15_PMC3;
528                 }
529                 return &amd_f15_PMC53;
530         case AMD_EVENT_LS:
531         case AMD_EVENT_DC:
532         case AMD_EVENT_EX_LS:
533                 switch (event_code) {
534                 case 0x023:
535                 case 0x043:
536                 case 0x045:
537                 case 0x046:
538                 case 0x054:
539                 case 0x055:
540                         return &amd_f15_PMC20;
541                 case 0x02D:
542                         return &amd_f15_PMC3;
543                 case 0x02E:
544                         return &amd_f15_PMC30;
545                 case 0x031:
546                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
547                                 return &amd_f15_PMC20;
548                         return &emptyconstraint;
549                 case 0x1C0:
550                         return &amd_f15_PMC53;
551                 default:
552                         return &amd_f15_PMC50;
553                 }
554         case AMD_EVENT_CU:
555         case AMD_EVENT_IC_DE:
556         case AMD_EVENT_DE:
557                 switch (event_code) {
558                 case 0x08F:
559                 case 0x187:
560                 case 0x188:
561                         return &amd_f15_PMC0;
562                 case 0x0DB ... 0x0DF:
563                 case 0x1D6:
564                 case 0x1D8:
565                         return &amd_f15_PMC50;
566                 default:
567                         return &amd_f15_PMC20;
568                 }
569         case AMD_EVENT_NB:
570                 /* not yet implemented */
571                 return &emptyconstraint;
572         default:
573                 return &emptyconstraint;
574         }
575 }
576
577 static __initconst const struct x86_pmu amd_pmu_f15h = {
578         .name                   = "AMD Family 15h",
579         .handle_irq             = x86_pmu_handle_irq,
580         .disable_all            = x86_pmu_disable_all,
581         .enable_all             = x86_pmu_enable_all,
582         .enable                 = x86_pmu_enable_event,
583         .disable                = x86_pmu_disable_event,
584         .hw_config              = amd_pmu_hw_config,
585         .schedule_events        = x86_schedule_events,
586         .eventsel               = MSR_F15H_PERF_CTL,
587         .perfctr                = MSR_F15H_PERF_CTR,
588         .event_map              = amd_pmu_event_map,
589         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
590         .num_counters           = AMD64_NUM_COUNTERS_F15H,
591         .cntval_bits            = 48,
592         .cntval_mask            = (1ULL << 48) - 1,
593         .apic                   = 1,
594         /* use highest bit to detect overflow */
595         .max_period             = (1ULL << 47) - 1,
596         .get_event_constraints  = amd_get_event_constraints_f15h,
597         /* nortbridge counters not yet implemented: */
598 #if 0
599         .put_event_constraints  = amd_put_event_constraints,
600
601         .cpu_prepare            = amd_pmu_cpu_prepare,
602         .cpu_dead               = amd_pmu_cpu_dead,
603 #endif
604         .cpu_starting           = amd_pmu_cpu_starting,
605 };
606
607 __init int amd_pmu_init(void)
608 {
609         /* Performance-monitoring supported from K7 and later: */
610         if (boot_cpu_data.x86 < 6)
611                 return -ENODEV;
612
613         /*
614          * If core performance counter extensions exists, it must be
615          * family 15h, otherwise fail. See x86_pmu_addr_offset().
616          */
617         switch (boot_cpu_data.x86) {
618         case 0x15:
619                 if (!cpu_has_perfctr_core)
620                         return -ENODEV;
621                 x86_pmu = amd_pmu_f15h;
622                 break;
623         default:
624                 if (cpu_has_perfctr_core)
625                         return -ENODEV;
626                 x86_pmu = amd_pmu;
627                 break;
628         }
629
630         /* Events are common for all AMDs */
631         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
632                sizeof(hw_cache_event_ids));
633
634         return 0;
635 }
636
637 void amd_pmu_enable_virt(void)
638 {
639         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
640
641         cpuc->perf_ctr_virt_mask = 0;
642
643         /* Reload all events */
644         x86_pmu_disable_all();
645         x86_pmu_enable_all(0);
646 }
647 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
648
649 void amd_pmu_disable_virt(void)
650 {
651         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
652
653         /*
654          * We only mask out the Host-only bit so that host-only counting works
655          * when SVM is disabled. If someone sets up a guest-only counter when
656          * SVM is disabled the Guest-only bits still gets set and the counter
657          * will not count anything.
658          */
659         cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
660
661         /* Reload all events */
662         x86_pmu_disable_all();
663         x86_pmu_enable_all(0);
664 }
665 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);