pandora: defconfig: update
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel.c
1 /*
2  * Per core/cpu state
3  *
4  * Used to coordinate shared registers between HT threads or
5  * among events on a single PMU.
6  */
7
8 #include <linux/stddef.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13
14 #include <asm/hardirq.h>
15 #include <asm/apic.h>
16
17 #include "perf_event.h"
18
19 /*
20  * Intel PerfMon, used on Core and later.
21  */
22 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
23 {
24   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
25   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
26   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
27   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
28   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
29   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
30   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
31 };
32
33 static struct event_constraint intel_core_event_constraints[] __read_mostly =
34 {
35         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
36         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
37         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
38         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
39         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
40         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
41         EVENT_CONSTRAINT_END
42 };
43
44 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
45 {
46         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
47         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
48         /*
49          * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
50          * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
51          * ratio between these counters.
52          */
53         /* FIXED_EVENT_CONSTRAINT(0x013c, 2),  CPU_CLK_UNHALTED.REF */
54         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
55         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
56         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
57         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
58         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
59         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
60         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
61         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
62         INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
63         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
64         EVENT_CONSTRAINT_END
65 };
66
67 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
68 {
69         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71         /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
72         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
73         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
74         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
75         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
76         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
77         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
78         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
79         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
80         EVENT_CONSTRAINT_END
81 };
82
83 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
84 {
85         INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
86         EVENT_EXTRA_END
87 };
88
89 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
90 {
91         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
92         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
93         /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
94         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
95         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
96         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
97         INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
98         EVENT_CONSTRAINT_END
99 };
100
101 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
102 {
103         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
104         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
105         /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
106         INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
107         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
108         INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
109         EVENT_CONSTRAINT_END
110 };
111
112 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
113 {
114         INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
115         INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
116         EVENT_EXTRA_END
117 };
118
119 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
120 {
121         EVENT_CONSTRAINT_END
122 };
123
124 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
125 {
126         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
127         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
128         /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
129         EVENT_CONSTRAINT_END
130 };
131
132 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
133         INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
134         INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
135         EVENT_EXTRA_END
136 };
137
138 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
139         INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
140         INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
141         EVENT_EXTRA_END
142 };
143
144 static u64 intel_pmu_event_map(int hw_event)
145 {
146         return intel_perfmon_event_map[hw_event];
147 }
148
149 static __initconst const u64 snb_hw_cache_event_ids
150                                 [PERF_COUNT_HW_CACHE_MAX]
151                                 [PERF_COUNT_HW_CACHE_OP_MAX]
152                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
153 {
154  [ C(L1D) ] = {
155         [ C(OP_READ) ] = {
156                 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
157                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
158         },
159         [ C(OP_WRITE) ] = {
160                 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
161                 [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
162         },
163         [ C(OP_PREFETCH) ] = {
164                 [ C(RESULT_ACCESS) ] = 0x0,
165                 [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
166         },
167  },
168  [ C(L1I ) ] = {
169         [ C(OP_READ) ] = {
170                 [ C(RESULT_ACCESS) ] = 0x0,
171                 [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
172         },
173         [ C(OP_WRITE) ] = {
174                 [ C(RESULT_ACCESS) ] = -1,
175                 [ C(RESULT_MISS)   ] = -1,
176         },
177         [ C(OP_PREFETCH) ] = {
178                 [ C(RESULT_ACCESS) ] = 0x0,
179                 [ C(RESULT_MISS)   ] = 0x0,
180         },
181  },
182  [ C(LL  ) ] = {
183         [ C(OP_READ) ] = {
184                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
185                 [ C(RESULT_ACCESS) ] = 0x01b7,
186                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
187                 [ C(RESULT_MISS)   ] = 0x01b7,
188         },
189         [ C(OP_WRITE) ] = {
190                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
191                 [ C(RESULT_ACCESS) ] = 0x01b7,
192                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
193                 [ C(RESULT_MISS)   ] = 0x01b7,
194         },
195         [ C(OP_PREFETCH) ] = {
196                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
197                 [ C(RESULT_ACCESS) ] = 0x01b7,
198                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
199                 [ C(RESULT_MISS)   ] = 0x01b7,
200         },
201  },
202  [ C(DTLB) ] = {
203         [ C(OP_READ) ] = {
204                 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
205                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
206         },
207         [ C(OP_WRITE) ] = {
208                 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
209                 [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
210         },
211         [ C(OP_PREFETCH) ] = {
212                 [ C(RESULT_ACCESS) ] = 0x0,
213                 [ C(RESULT_MISS)   ] = 0x0,
214         },
215  },
216  [ C(ITLB) ] = {
217         [ C(OP_READ) ] = {
218                 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
219                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
220         },
221         [ C(OP_WRITE) ] = {
222                 [ C(RESULT_ACCESS) ] = -1,
223                 [ C(RESULT_MISS)   ] = -1,
224         },
225         [ C(OP_PREFETCH) ] = {
226                 [ C(RESULT_ACCESS) ] = -1,
227                 [ C(RESULT_MISS)   ] = -1,
228         },
229  },
230  [ C(BPU ) ] = {
231         [ C(OP_READ) ] = {
232                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
233                 [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
234         },
235         [ C(OP_WRITE) ] = {
236                 [ C(RESULT_ACCESS) ] = -1,
237                 [ C(RESULT_MISS)   ] = -1,
238         },
239         [ C(OP_PREFETCH) ] = {
240                 [ C(RESULT_ACCESS) ] = -1,
241                 [ C(RESULT_MISS)   ] = -1,
242         },
243  },
244  [ C(NODE) ] = {
245         [ C(OP_READ) ] = {
246                 [ C(RESULT_ACCESS) ] = -1,
247                 [ C(RESULT_MISS)   ] = -1,
248         },
249         [ C(OP_WRITE) ] = {
250                 [ C(RESULT_ACCESS) ] = -1,
251                 [ C(RESULT_MISS)   ] = -1,
252         },
253         [ C(OP_PREFETCH) ] = {
254                 [ C(RESULT_ACCESS) ] = -1,
255                 [ C(RESULT_MISS)   ] = -1,
256         },
257  },
258
259 };
260
261 static __initconst const u64 westmere_hw_cache_event_ids
262                                 [PERF_COUNT_HW_CACHE_MAX]
263                                 [PERF_COUNT_HW_CACHE_OP_MAX]
264                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
265 {
266  [ C(L1D) ] = {
267         [ C(OP_READ) ] = {
268                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
269                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
270         },
271         [ C(OP_WRITE) ] = {
272                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
273                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
274         },
275         [ C(OP_PREFETCH) ] = {
276                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
277                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
278         },
279  },
280  [ C(L1I ) ] = {
281         [ C(OP_READ) ] = {
282                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
283                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
284         },
285         [ C(OP_WRITE) ] = {
286                 [ C(RESULT_ACCESS) ] = -1,
287                 [ C(RESULT_MISS)   ] = -1,
288         },
289         [ C(OP_PREFETCH) ] = {
290                 [ C(RESULT_ACCESS) ] = 0x0,
291                 [ C(RESULT_MISS)   ] = 0x0,
292         },
293  },
294  [ C(LL  ) ] = {
295         [ C(OP_READ) ] = {
296                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
297                 [ C(RESULT_ACCESS) ] = 0x01b7,
298                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
299                 [ C(RESULT_MISS)   ] = 0x01b7,
300         },
301         /*
302          * Use RFO, not WRITEBACK, because a write miss would typically occur
303          * on RFO.
304          */
305         [ C(OP_WRITE) ] = {
306                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
307                 [ C(RESULT_ACCESS) ] = 0x01b7,
308                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
309                 [ C(RESULT_MISS)   ] = 0x01b7,
310         },
311         [ C(OP_PREFETCH) ] = {
312                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
313                 [ C(RESULT_ACCESS) ] = 0x01b7,
314                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
315                 [ C(RESULT_MISS)   ] = 0x01b7,
316         },
317  },
318  [ C(DTLB) ] = {
319         [ C(OP_READ) ] = {
320                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
321                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
322         },
323         [ C(OP_WRITE) ] = {
324                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
325                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
326         },
327         [ C(OP_PREFETCH) ] = {
328                 [ C(RESULT_ACCESS) ] = 0x0,
329                 [ C(RESULT_MISS)   ] = 0x0,
330         },
331  },
332  [ C(ITLB) ] = {
333         [ C(OP_READ) ] = {
334                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
335                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
336         },
337         [ C(OP_WRITE) ] = {
338                 [ C(RESULT_ACCESS) ] = -1,
339                 [ C(RESULT_MISS)   ] = -1,
340         },
341         [ C(OP_PREFETCH) ] = {
342                 [ C(RESULT_ACCESS) ] = -1,
343                 [ C(RESULT_MISS)   ] = -1,
344         },
345  },
346  [ C(BPU ) ] = {
347         [ C(OP_READ) ] = {
348                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
349                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
350         },
351         [ C(OP_WRITE) ] = {
352                 [ C(RESULT_ACCESS) ] = -1,
353                 [ C(RESULT_MISS)   ] = -1,
354         },
355         [ C(OP_PREFETCH) ] = {
356                 [ C(RESULT_ACCESS) ] = -1,
357                 [ C(RESULT_MISS)   ] = -1,
358         },
359  },
360  [ C(NODE) ] = {
361         [ C(OP_READ) ] = {
362                 [ C(RESULT_ACCESS) ] = 0x01b7,
363                 [ C(RESULT_MISS)   ] = 0x01b7,
364         },
365         [ C(OP_WRITE) ] = {
366                 [ C(RESULT_ACCESS) ] = 0x01b7,
367                 [ C(RESULT_MISS)   ] = 0x01b7,
368         },
369         [ C(OP_PREFETCH) ] = {
370                 [ C(RESULT_ACCESS) ] = 0x01b7,
371                 [ C(RESULT_MISS)   ] = 0x01b7,
372         },
373  },
374 };
375
376 /*
377  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
378  * See IA32 SDM Vol 3B 30.6.1.3
379  */
380
381 #define NHM_DMND_DATA_RD        (1 << 0)
382 #define NHM_DMND_RFO            (1 << 1)
383 #define NHM_DMND_IFETCH         (1 << 2)
384 #define NHM_DMND_WB             (1 << 3)
385 #define NHM_PF_DATA_RD          (1 << 4)
386 #define NHM_PF_DATA_RFO         (1 << 5)
387 #define NHM_PF_IFETCH           (1 << 6)
388 #define NHM_OFFCORE_OTHER       (1 << 7)
389 #define NHM_UNCORE_HIT          (1 << 8)
390 #define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
391 #define NHM_OTHER_CORE_HITM     (1 << 10)
392                                 /* reserved */
393 #define NHM_REMOTE_CACHE_FWD    (1 << 12)
394 #define NHM_REMOTE_DRAM         (1 << 13)
395 #define NHM_LOCAL_DRAM          (1 << 14)
396 #define NHM_NON_DRAM            (1 << 15)
397
398 #define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
399 #define NHM_REMOTE              (NHM_REMOTE_DRAM)
400
401 #define NHM_DMND_READ           (NHM_DMND_DATA_RD)
402 #define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
403 #define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
404
405 #define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
406 #define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
407 #define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
408
409 static __initconst const u64 nehalem_hw_cache_extra_regs
410                                 [PERF_COUNT_HW_CACHE_MAX]
411                                 [PERF_COUNT_HW_CACHE_OP_MAX]
412                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
413 {
414  [ C(LL  ) ] = {
415         [ C(OP_READ) ] = {
416                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
417                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
418         },
419         [ C(OP_WRITE) ] = {
420                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
421                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
422         },
423         [ C(OP_PREFETCH) ] = {
424                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
425                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
426         },
427  },
428  [ C(NODE) ] = {
429         [ C(OP_READ) ] = {
430                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
431                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
432         },
433         [ C(OP_WRITE) ] = {
434                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
435                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
436         },
437         [ C(OP_PREFETCH) ] = {
438                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
439                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
440         },
441  },
442 };
443
444 static __initconst const u64 nehalem_hw_cache_event_ids
445                                 [PERF_COUNT_HW_CACHE_MAX]
446                                 [PERF_COUNT_HW_CACHE_OP_MAX]
447                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
448 {
449  [ C(L1D) ] = {
450         [ C(OP_READ) ] = {
451                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
452                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
453         },
454         [ C(OP_WRITE) ] = {
455                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
456                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
457         },
458         [ C(OP_PREFETCH) ] = {
459                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
460                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
461         },
462  },
463  [ C(L1I ) ] = {
464         [ C(OP_READ) ] = {
465                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
466                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
467         },
468         [ C(OP_WRITE) ] = {
469                 [ C(RESULT_ACCESS) ] = -1,
470                 [ C(RESULT_MISS)   ] = -1,
471         },
472         [ C(OP_PREFETCH) ] = {
473                 [ C(RESULT_ACCESS) ] = 0x0,
474                 [ C(RESULT_MISS)   ] = 0x0,
475         },
476  },
477  [ C(LL  ) ] = {
478         [ C(OP_READ) ] = {
479                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
480                 [ C(RESULT_ACCESS) ] = 0x01b7,
481                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
482                 [ C(RESULT_MISS)   ] = 0x01b7,
483         },
484         /*
485          * Use RFO, not WRITEBACK, because a write miss would typically occur
486          * on RFO.
487          */
488         [ C(OP_WRITE) ] = {
489                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
490                 [ C(RESULT_ACCESS) ] = 0x01b7,
491                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
492                 [ C(RESULT_MISS)   ] = 0x01b7,
493         },
494         [ C(OP_PREFETCH) ] = {
495                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
496                 [ C(RESULT_ACCESS) ] = 0x01b7,
497                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
498                 [ C(RESULT_MISS)   ] = 0x01b7,
499         },
500  },
501  [ C(DTLB) ] = {
502         [ C(OP_READ) ] = {
503                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
504                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
505         },
506         [ C(OP_WRITE) ] = {
507                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
508                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
509         },
510         [ C(OP_PREFETCH) ] = {
511                 [ C(RESULT_ACCESS) ] = 0x0,
512                 [ C(RESULT_MISS)   ] = 0x0,
513         },
514  },
515  [ C(ITLB) ] = {
516         [ C(OP_READ) ] = {
517                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
518                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
519         },
520         [ C(OP_WRITE) ] = {
521                 [ C(RESULT_ACCESS) ] = -1,
522                 [ C(RESULT_MISS)   ] = -1,
523         },
524         [ C(OP_PREFETCH) ] = {
525                 [ C(RESULT_ACCESS) ] = -1,
526                 [ C(RESULT_MISS)   ] = -1,
527         },
528  },
529  [ C(BPU ) ] = {
530         [ C(OP_READ) ] = {
531                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
532                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
533         },
534         [ C(OP_WRITE) ] = {
535                 [ C(RESULT_ACCESS) ] = -1,
536                 [ C(RESULT_MISS)   ] = -1,
537         },
538         [ C(OP_PREFETCH) ] = {
539                 [ C(RESULT_ACCESS) ] = -1,
540                 [ C(RESULT_MISS)   ] = -1,
541         },
542  },
543  [ C(NODE) ] = {
544         [ C(OP_READ) ] = {
545                 [ C(RESULT_ACCESS) ] = 0x01b7,
546                 [ C(RESULT_MISS)   ] = 0x01b7,
547         },
548         [ C(OP_WRITE) ] = {
549                 [ C(RESULT_ACCESS) ] = 0x01b7,
550                 [ C(RESULT_MISS)   ] = 0x01b7,
551         },
552         [ C(OP_PREFETCH) ] = {
553                 [ C(RESULT_ACCESS) ] = 0x01b7,
554                 [ C(RESULT_MISS)   ] = 0x01b7,
555         },
556  },
557 };
558
559 static __initconst const u64 core2_hw_cache_event_ids
560                                 [PERF_COUNT_HW_CACHE_MAX]
561                                 [PERF_COUNT_HW_CACHE_OP_MAX]
562                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
563 {
564  [ C(L1D) ] = {
565         [ C(OP_READ) ] = {
566                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
567                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
568         },
569         [ C(OP_WRITE) ] = {
570                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
571                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
572         },
573         [ C(OP_PREFETCH) ] = {
574                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
575                 [ C(RESULT_MISS)   ] = 0,
576         },
577  },
578  [ C(L1I ) ] = {
579         [ C(OP_READ) ] = {
580                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
581                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
582         },
583         [ C(OP_WRITE) ] = {
584                 [ C(RESULT_ACCESS) ] = -1,
585                 [ C(RESULT_MISS)   ] = -1,
586         },
587         [ C(OP_PREFETCH) ] = {
588                 [ C(RESULT_ACCESS) ] = 0,
589                 [ C(RESULT_MISS)   ] = 0,
590         },
591  },
592  [ C(LL  ) ] = {
593         [ C(OP_READ) ] = {
594                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
595                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
596         },
597         [ C(OP_WRITE) ] = {
598                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
599                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
600         },
601         [ C(OP_PREFETCH) ] = {
602                 [ C(RESULT_ACCESS) ] = 0,
603                 [ C(RESULT_MISS)   ] = 0,
604         },
605  },
606  [ C(DTLB) ] = {
607         [ C(OP_READ) ] = {
608                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
609                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
610         },
611         [ C(OP_WRITE) ] = {
612                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
613                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
614         },
615         [ C(OP_PREFETCH) ] = {
616                 [ C(RESULT_ACCESS) ] = 0,
617                 [ C(RESULT_MISS)   ] = 0,
618         },
619  },
620  [ C(ITLB) ] = {
621         [ C(OP_READ) ] = {
622                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
623                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
624         },
625         [ C(OP_WRITE) ] = {
626                 [ C(RESULT_ACCESS) ] = -1,
627                 [ C(RESULT_MISS)   ] = -1,
628         },
629         [ C(OP_PREFETCH) ] = {
630                 [ C(RESULT_ACCESS) ] = -1,
631                 [ C(RESULT_MISS)   ] = -1,
632         },
633  },
634  [ C(BPU ) ] = {
635         [ C(OP_READ) ] = {
636                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
637                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
638         },
639         [ C(OP_WRITE) ] = {
640                 [ C(RESULT_ACCESS) ] = -1,
641                 [ C(RESULT_MISS)   ] = -1,
642         },
643         [ C(OP_PREFETCH) ] = {
644                 [ C(RESULT_ACCESS) ] = -1,
645                 [ C(RESULT_MISS)   ] = -1,
646         },
647  },
648 };
649
650 static __initconst const u64 atom_hw_cache_event_ids
651                                 [PERF_COUNT_HW_CACHE_MAX]
652                                 [PERF_COUNT_HW_CACHE_OP_MAX]
653                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
654 {
655  [ C(L1D) ] = {
656         [ C(OP_READ) ] = {
657                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
658                 [ C(RESULT_MISS)   ] = 0,
659         },
660         [ C(OP_WRITE) ] = {
661                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
662                 [ C(RESULT_MISS)   ] = 0,
663         },
664         [ C(OP_PREFETCH) ] = {
665                 [ C(RESULT_ACCESS) ] = 0x0,
666                 [ C(RESULT_MISS)   ] = 0,
667         },
668  },
669  [ C(L1I ) ] = {
670         [ C(OP_READ) ] = {
671                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
672                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
673         },
674         [ C(OP_WRITE) ] = {
675                 [ C(RESULT_ACCESS) ] = -1,
676                 [ C(RESULT_MISS)   ] = -1,
677         },
678         [ C(OP_PREFETCH) ] = {
679                 [ C(RESULT_ACCESS) ] = 0,
680                 [ C(RESULT_MISS)   ] = 0,
681         },
682  },
683  [ C(LL  ) ] = {
684         [ C(OP_READ) ] = {
685                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
686                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
687         },
688         [ C(OP_WRITE) ] = {
689                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
690                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
691         },
692         [ C(OP_PREFETCH) ] = {
693                 [ C(RESULT_ACCESS) ] = 0,
694                 [ C(RESULT_MISS)   ] = 0,
695         },
696  },
697  [ C(DTLB) ] = {
698         [ C(OP_READ) ] = {
699                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
700                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
701         },
702         [ C(OP_WRITE) ] = {
703                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
704                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
705         },
706         [ C(OP_PREFETCH) ] = {
707                 [ C(RESULT_ACCESS) ] = 0,
708                 [ C(RESULT_MISS)   ] = 0,
709         },
710  },
711  [ C(ITLB) ] = {
712         [ C(OP_READ) ] = {
713                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
714                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
715         },
716         [ C(OP_WRITE) ] = {
717                 [ C(RESULT_ACCESS) ] = -1,
718                 [ C(RESULT_MISS)   ] = -1,
719         },
720         [ C(OP_PREFETCH) ] = {
721                 [ C(RESULT_ACCESS) ] = -1,
722                 [ C(RESULT_MISS)   ] = -1,
723         },
724  },
725  [ C(BPU ) ] = {
726         [ C(OP_READ) ] = {
727                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
728                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
729         },
730         [ C(OP_WRITE) ] = {
731                 [ C(RESULT_ACCESS) ] = -1,
732                 [ C(RESULT_MISS)   ] = -1,
733         },
734         [ C(OP_PREFETCH) ] = {
735                 [ C(RESULT_ACCESS) ] = -1,
736                 [ C(RESULT_MISS)   ] = -1,
737         },
738  },
739 };
740
741 static void intel_pmu_disable_all(void)
742 {
743         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
744
745         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
746
747         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
748                 intel_pmu_disable_bts();
749
750         intel_pmu_pebs_disable_all();
751         intel_pmu_lbr_disable_all();
752 }
753
754 static void intel_pmu_enable_all(int added)
755 {
756         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
757
758         intel_pmu_pebs_enable_all();
759         intel_pmu_lbr_enable_all();
760         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
761                         x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
762
763         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
764                 struct perf_event *event =
765                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
766
767                 if (WARN_ON_ONCE(!event))
768                         return;
769
770                 intel_pmu_enable_bts(event->hw.config);
771         }
772 }
773
774 /*
775  * Workaround for:
776  *   Intel Errata AAK100 (model 26)
777  *   Intel Errata AAP53  (model 30)
778  *   Intel Errata BD53   (model 44)
779  *
780  * The official story:
781  *   These chips need to be 'reset' when adding counters by programming the
782  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
783  *   in sequence on the same PMC or on different PMCs.
784  *
785  * In practise it appears some of these events do in fact count, and
786  * we need to programm all 4 events.
787  */
788 static void intel_pmu_nhm_workaround(void)
789 {
790         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
791         static const unsigned long nhm_magic[4] = {
792                 0x4300B5,
793                 0x4300D2,
794                 0x4300B1,
795                 0x4300B1
796         };
797         struct perf_event *event;
798         int i;
799
800         /*
801          * The Errata requires below steps:
802          * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
803          * 2) Configure 4 PERFEVTSELx with the magic events and clear
804          *    the corresponding PMCx;
805          * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
806          * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
807          * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
808          */
809
810         /*
811          * The real steps we choose are a little different from above.
812          * A) To reduce MSR operations, we don't run step 1) as they
813          *    are already cleared before this function is called;
814          * B) Call x86_perf_event_update to save PMCx before configuring
815          *    PERFEVTSELx with magic number;
816          * C) With step 5), we do clear only when the PERFEVTSELx is
817          *    not used currently.
818          * D) Call x86_perf_event_set_period to restore PMCx;
819          */
820
821         /* We always operate 4 pairs of PERF Counters */
822         for (i = 0; i < 4; i++) {
823                 event = cpuc->events[i];
824                 if (event)
825                         x86_perf_event_update(event);
826         }
827
828         for (i = 0; i < 4; i++) {
829                 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
830                 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
831         }
832
833         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
834         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
835
836         for (i = 0; i < 4; i++) {
837                 event = cpuc->events[i];
838
839                 if (event) {
840                         x86_perf_event_set_period(event);
841                         __x86_pmu_enable_event(&event->hw,
842                                         ARCH_PERFMON_EVENTSEL_ENABLE);
843                 } else
844                         wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
845         }
846 }
847
848 static void intel_pmu_nhm_enable_all(int added)
849 {
850         if (added)
851                 intel_pmu_nhm_workaround();
852         intel_pmu_enable_all(added);
853 }
854
855 static inline u64 intel_pmu_get_status(void)
856 {
857         u64 status;
858
859         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
860
861         return status;
862 }
863
864 static inline void intel_pmu_ack_status(u64 ack)
865 {
866         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
867 }
868
869 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
870 {
871         int idx = hwc->idx - X86_PMC_IDX_FIXED;
872         u64 ctrl_val, mask;
873
874         mask = 0xfULL << (idx * 4);
875
876         rdmsrl(hwc->config_base, ctrl_val);
877         ctrl_val &= ~mask;
878         wrmsrl(hwc->config_base, ctrl_val);
879 }
880
881 static void intel_pmu_disable_event(struct perf_event *event)
882 {
883         struct hw_perf_event *hwc = &event->hw;
884         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
885
886         if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
887                 intel_pmu_disable_bts();
888                 intel_pmu_drain_bts_buffer();
889                 return;
890         }
891
892         cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
893         cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
894
895         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
896                 intel_pmu_disable_fixed(hwc);
897                 return;
898         }
899
900         x86_pmu_disable_event(event);
901
902         if (unlikely(event->attr.precise_ip))
903                 intel_pmu_pebs_disable(event);
904 }
905
906 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
907 {
908         int idx = hwc->idx - X86_PMC_IDX_FIXED;
909         u64 ctrl_val, bits, mask;
910
911         /*
912          * Enable IRQ generation (0x8),
913          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
914          * if requested:
915          */
916         bits = 0x8ULL;
917         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
918                 bits |= 0x2;
919         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
920                 bits |= 0x1;
921
922         /*
923          * ANY bit is supported in v3 and up
924          */
925         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
926                 bits |= 0x4;
927
928         bits <<= (idx * 4);
929         mask = 0xfULL << (idx * 4);
930
931         rdmsrl(hwc->config_base, ctrl_val);
932         ctrl_val &= ~mask;
933         ctrl_val |= bits;
934         wrmsrl(hwc->config_base, ctrl_val);
935 }
936
937 static void intel_pmu_enable_event(struct perf_event *event)
938 {
939         struct hw_perf_event *hwc = &event->hw;
940         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
941
942         if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
943                 if (!__this_cpu_read(cpu_hw_events.enabled))
944                         return;
945
946                 intel_pmu_enable_bts(hwc->config);
947                 return;
948         }
949
950         if (event->attr.exclude_host)
951                 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
952         if (event->attr.exclude_guest)
953                 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
954
955         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
956                 intel_pmu_enable_fixed(hwc);
957                 return;
958         }
959
960         if (unlikely(event->attr.precise_ip))
961                 intel_pmu_pebs_enable(event);
962
963         __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
964 }
965
966 /*
967  * Save and restart an expired event. Called by NMI contexts,
968  * so it has to be careful about preempting normal event ops:
969  */
970 int intel_pmu_save_and_restart(struct perf_event *event)
971 {
972         x86_perf_event_update(event);
973         return x86_perf_event_set_period(event);
974 }
975
976 static void intel_pmu_reset(void)
977 {
978         struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
979         unsigned long flags;
980         int idx;
981
982         if (!x86_pmu.num_counters)
983                 return;
984
985         local_irq_save(flags);
986
987         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
988
989         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
990                 checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
991                 checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
992         }
993         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
994                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
995
996         if (ds)
997                 ds->bts_index = ds->bts_buffer_base;
998
999         local_irq_restore(flags);
1000 }
1001
1002 /*
1003  * This handler is triggered by the local APIC, so the APIC IRQ handling
1004  * rules apply:
1005  */
1006 static int intel_pmu_handle_irq(struct pt_regs *regs)
1007 {
1008         struct perf_sample_data data;
1009         struct cpu_hw_events *cpuc;
1010         int bit, loops;
1011         u64 status;
1012         int handled;
1013
1014         perf_sample_data_init(&data, 0);
1015
1016         cpuc = &__get_cpu_var(cpu_hw_events);
1017
1018         /*
1019          * Some chipsets need to unmask the LVTPC in a particular spot
1020          * inside the nmi handler.  As a result, the unmasking was pushed
1021          * into all the nmi handlers.
1022          *
1023          * This handler doesn't seem to have any issues with the unmasking
1024          * so it was left at the top.
1025          */
1026         apic_write(APIC_LVTPC, APIC_DM_NMI);
1027
1028         intel_pmu_disable_all();
1029         handled = intel_pmu_drain_bts_buffer();
1030         status = intel_pmu_get_status();
1031         if (!status) {
1032                 intel_pmu_enable_all(0);
1033                 return handled;
1034         }
1035
1036         loops = 0;
1037 again:
1038         intel_pmu_ack_status(status);
1039         if (++loops > 100) {
1040                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1041                 perf_event_print_debug();
1042                 intel_pmu_reset();
1043                 goto done;
1044         }
1045
1046         inc_irq_stat(apic_perf_irqs);
1047
1048         intel_pmu_lbr_read();
1049
1050         /*
1051          * CondChgd bit 63 doesn't mean any overflow status. Ignore
1052          * and clear the bit.
1053          */
1054         if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1055                 if (!status)
1056                         goto done;
1057         }
1058
1059         /*
1060          * PEBS overflow sets bit 62 in the global status register
1061          */
1062         if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1063                 handled++;
1064                 x86_pmu.drain_pebs(regs);
1065         }
1066
1067         for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1068                 struct perf_event *event = cpuc->events[bit];
1069
1070                 handled++;
1071
1072                 if (!test_bit(bit, cpuc->active_mask))
1073                         continue;
1074
1075                 if (!intel_pmu_save_and_restart(event))
1076                         continue;
1077
1078                 data.period = event->hw.last_period;
1079
1080                 if (perf_event_overflow(event, &data, regs))
1081                         x86_pmu_stop(event, 0);
1082         }
1083
1084         /*
1085          * Repeat if there is more work to be done:
1086          */
1087         status = intel_pmu_get_status();
1088         if (status)
1089                 goto again;
1090
1091 done:
1092         intel_pmu_enable_all(0);
1093         return handled;
1094 }
1095
1096 static struct event_constraint *
1097 intel_bts_constraints(struct perf_event *event)
1098 {
1099         struct hw_perf_event *hwc = &event->hw;
1100         unsigned int hw_event, bts_event;
1101
1102         if (event->attr.freq)
1103                 return NULL;
1104
1105         hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1106         bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1107
1108         if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1109                 return &bts_constraint;
1110
1111         return NULL;
1112 }
1113
1114 static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
1115 {
1116         if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1117                 return false;
1118
1119         if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
1120                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1121                 event->hw.config |= 0x01bb;
1122                 event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
1123                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1124         } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
1125                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1126                 event->hw.config |= 0x01b7;
1127                 event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
1128                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1129         }
1130
1131         if (event->hw.extra_reg.idx == orig_idx)
1132                 return false;
1133
1134         return true;
1135 }
1136
1137 /*
1138  * manage allocation of shared extra msr for certain events
1139  *
1140  * sharing can be:
1141  * per-cpu: to be shared between the various events on a single PMU
1142  * per-core: per-cpu + shared by HT threads
1143  */
1144 static struct event_constraint *
1145 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1146                                    struct perf_event *event)
1147 {
1148         struct event_constraint *c = &emptyconstraint;
1149         struct hw_perf_event_extra *reg = &event->hw.extra_reg;
1150         struct er_account *era;
1151         unsigned long flags;
1152         int orig_idx = reg->idx;
1153
1154         /* already allocated shared msr */
1155         if (reg->alloc)
1156                 return &unconstrained;
1157
1158 again:
1159         era = &cpuc->shared_regs->regs[reg->idx];
1160         /*
1161          * we use spin_lock_irqsave() to avoid lockdep issues when
1162          * passing a fake cpuc
1163          */
1164         raw_spin_lock_irqsave(&era->lock, flags);
1165
1166         if (!atomic_read(&era->ref) || era->config == reg->config) {
1167
1168                 /* lock in msr value */
1169                 era->config = reg->config;
1170                 era->reg = reg->reg;
1171
1172                 /* one more user */
1173                 atomic_inc(&era->ref);
1174
1175                 /* no need to reallocate during incremental event scheduling */
1176                 reg->alloc = 1;
1177
1178                 /*
1179                  * All events using extra_reg are unconstrained.
1180                  * Avoids calling x86_get_event_constraints()
1181                  *
1182                  * Must revisit if extra_reg controlling events
1183                  * ever have constraints. Worst case we go through
1184                  * the regular event constraint table.
1185                  */
1186                 c = &unconstrained;
1187         } else if (intel_try_alt_er(event, orig_idx)) {
1188                 raw_spin_unlock_irqrestore(&era->lock, flags);
1189                 goto again;
1190         }
1191         raw_spin_unlock_irqrestore(&era->lock, flags);
1192
1193         return c;
1194 }
1195
1196 static void
1197 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1198                                    struct hw_perf_event_extra *reg)
1199 {
1200         struct er_account *era;
1201
1202         /*
1203          * only put constraint if extra reg was actually
1204          * allocated. Also takes care of event which do
1205          * not use an extra shared reg
1206          */
1207         if (!reg->alloc)
1208                 return;
1209
1210         era = &cpuc->shared_regs->regs[reg->idx];
1211
1212         /* one fewer user */
1213         atomic_dec(&era->ref);
1214
1215         /* allocate again next time */
1216         reg->alloc = 0;
1217 }
1218
1219 static struct event_constraint *
1220 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1221                               struct perf_event *event)
1222 {
1223         struct event_constraint *c = NULL;
1224
1225         if (event->hw.extra_reg.idx != EXTRA_REG_NONE)
1226                 c = __intel_shared_reg_get_constraints(cpuc, event);
1227
1228         return c;
1229 }
1230
1231 struct event_constraint *
1232 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1233 {
1234         struct event_constraint *c;
1235
1236         if (x86_pmu.event_constraints) {
1237                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1238                         if ((event->hw.config & c->cmask) == c->code)
1239                                 return c;
1240                 }
1241         }
1242
1243         return &unconstrained;
1244 }
1245
1246 static struct event_constraint *
1247 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1248 {
1249         struct event_constraint *c;
1250
1251         c = intel_bts_constraints(event);
1252         if (c)
1253                 return c;
1254
1255         c = intel_pebs_constraints(event);
1256         if (c)
1257                 return c;
1258
1259         c = intel_shared_regs_constraints(cpuc, event);
1260         if (c)
1261                 return c;
1262
1263         return x86_get_event_constraints(cpuc, event);
1264 }
1265
1266 static void
1267 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1268                                         struct perf_event *event)
1269 {
1270         struct hw_perf_event_extra *reg;
1271
1272         reg = &event->hw.extra_reg;
1273         if (reg->idx != EXTRA_REG_NONE)
1274                 __intel_shared_reg_put_constraints(cpuc, reg);
1275 }
1276
1277 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1278                                         struct perf_event *event)
1279 {
1280         intel_put_shared_regs_event_constraints(cpuc, event);
1281 }
1282
1283 static int intel_pmu_hw_config(struct perf_event *event)
1284 {
1285         int ret = x86_pmu_hw_config(event);
1286
1287         if (ret)
1288                 return ret;
1289
1290         if (event->attr.precise_ip &&
1291             (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1292                 /*
1293                  * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1294                  * (0x003c) so that we can use it with PEBS.
1295                  *
1296                  * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1297                  * PEBS capable. However we can use INST_RETIRED.ANY_P
1298                  * (0x00c0), which is a PEBS capable event, to get the same
1299                  * count.
1300                  *
1301                  * INST_RETIRED.ANY_P counts the number of cycles that retires
1302                  * CNTMASK instructions. By setting CNTMASK to a value (16)
1303                  * larger than the maximum number of instructions that can be
1304                  * retired per cycle (4) and then inverting the condition, we
1305                  * count all cycles that retire 16 or less instructions, which
1306                  * is every cycle.
1307                  *
1308                  * Thereby we gain a PEBS capable cycle counter.
1309                  */
1310                 u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
1311
1312                 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1313                 event->hw.config = alt_config;
1314         }
1315
1316         if (event->attr.type != PERF_TYPE_RAW)
1317                 return 0;
1318
1319         if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1320                 return 0;
1321
1322         if (x86_pmu.version < 3)
1323                 return -EINVAL;
1324
1325         if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1326                 return -EACCES;
1327
1328         event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1329
1330         return 0;
1331 }
1332
1333 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1334 {
1335         if (x86_pmu.guest_get_msrs)
1336                 return x86_pmu.guest_get_msrs(nr);
1337         *nr = 0;
1338         return NULL;
1339 }
1340 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1341
1342 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1343 {
1344         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1345         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1346
1347         arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1348         arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1349         arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1350
1351         *nr = 1;
1352         return arr;
1353 }
1354
1355 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1356 {
1357         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1358         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1359         int idx;
1360
1361         for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
1362                 struct perf_event *event = cpuc->events[idx];
1363
1364                 arr[idx].msr = x86_pmu_config_addr(idx);
1365                 arr[idx].host = arr[idx].guest = 0;
1366
1367                 if (!test_bit(idx, cpuc->active_mask))
1368                         continue;
1369
1370                 arr[idx].host = arr[idx].guest =
1371                         event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1372
1373                 if (event->attr.exclude_host)
1374                         arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1375                 else if (event->attr.exclude_guest)
1376                         arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1377         }
1378
1379         *nr = x86_pmu.num_counters;
1380         return arr;
1381 }
1382
1383 static void core_pmu_enable_event(struct perf_event *event)
1384 {
1385         if (!event->attr.exclude_host)
1386                 x86_pmu_enable_event(event);
1387 }
1388
1389 static void core_pmu_enable_all(int added)
1390 {
1391         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1392         int idx;
1393
1394         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1395                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1396
1397                 if (!test_bit(idx, cpuc->active_mask) ||
1398                                 cpuc->events[idx]->attr.exclude_host)
1399                         continue;
1400
1401                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1402         }
1403 }
1404
1405 static __initconst const struct x86_pmu core_pmu = {
1406         .name                   = "core",
1407         .handle_irq             = x86_pmu_handle_irq,
1408         .disable_all            = x86_pmu_disable_all,
1409         .enable_all             = core_pmu_enable_all,
1410         .enable                 = core_pmu_enable_event,
1411         .disable                = x86_pmu_disable_event,
1412         .hw_config              = x86_pmu_hw_config,
1413         .schedule_events        = x86_schedule_events,
1414         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
1415         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
1416         .event_map              = intel_pmu_event_map,
1417         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
1418         .apic                   = 1,
1419         /*
1420          * Intel PMCs cannot be accessed sanely above 32 bit width,
1421          * so we install an artificial 1<<31 period regardless of
1422          * the generic event period:
1423          */
1424         .max_period             = (1ULL << 31) - 1,
1425         .get_event_constraints  = intel_get_event_constraints,
1426         .put_event_constraints  = intel_put_event_constraints,
1427         .event_constraints      = intel_core_event_constraints,
1428         .guest_get_msrs         = core_guest_get_msrs,
1429 };
1430
1431 struct intel_shared_regs *allocate_shared_regs(int cpu)
1432 {
1433         struct intel_shared_regs *regs;
1434         int i;
1435
1436         regs = kzalloc_node(sizeof(struct intel_shared_regs),
1437                             GFP_KERNEL, cpu_to_node(cpu));
1438         if (regs) {
1439                 /*
1440                  * initialize the locks to keep lockdep happy
1441                  */
1442                 for (i = 0; i < EXTRA_REG_MAX; i++)
1443                         raw_spin_lock_init(&regs->regs[i].lock);
1444
1445                 regs->core_id = -1;
1446         }
1447         return regs;
1448 }
1449
1450 static int intel_pmu_cpu_prepare(int cpu)
1451 {
1452         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1453
1454         if (!x86_pmu.extra_regs)
1455                 return NOTIFY_OK;
1456
1457         cpuc->shared_regs = allocate_shared_regs(cpu);
1458         if (!cpuc->shared_regs)
1459                 return NOTIFY_BAD;
1460
1461         return NOTIFY_OK;
1462 }
1463
1464 static void intel_pmu_cpu_starting(int cpu)
1465 {
1466         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1467         int core_id = topology_core_id(cpu);
1468         int i;
1469
1470         init_debug_store_on_cpu(cpu);
1471         /*
1472          * Deal with CPUs that don't clear their LBRs on power-up.
1473          */
1474         intel_pmu_lbr_reset();
1475
1476         if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING))
1477                 return;
1478
1479         for_each_cpu(i, topology_thread_cpumask(cpu)) {
1480                 struct intel_shared_regs *pc;
1481
1482                 pc = per_cpu(cpu_hw_events, i).shared_regs;
1483                 if (pc && pc->core_id == core_id) {
1484                         cpuc->kfree_on_online = cpuc->shared_regs;
1485                         cpuc->shared_regs = pc;
1486                         break;
1487                 }
1488         }
1489
1490         cpuc->shared_regs->core_id = core_id;
1491         cpuc->shared_regs->refcnt++;
1492 }
1493
1494 static void intel_pmu_cpu_dying(int cpu)
1495 {
1496         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1497         struct intel_shared_regs *pc;
1498
1499         pc = cpuc->shared_regs;
1500         if (pc) {
1501                 if (pc->core_id == -1 || --pc->refcnt == 0)
1502                         kfree(pc);
1503                 cpuc->shared_regs = NULL;
1504         }
1505
1506         fini_debug_store_on_cpu(cpu);
1507 }
1508
1509 static __initconst const struct x86_pmu intel_pmu = {
1510         .name                   = "Intel",
1511         .handle_irq             = intel_pmu_handle_irq,
1512         .disable_all            = intel_pmu_disable_all,
1513         .enable_all             = intel_pmu_enable_all,
1514         .enable                 = intel_pmu_enable_event,
1515         .disable                = intel_pmu_disable_event,
1516         .hw_config              = intel_pmu_hw_config,
1517         .schedule_events        = x86_schedule_events,
1518         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
1519         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
1520         .event_map              = intel_pmu_event_map,
1521         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
1522         .apic                   = 1,
1523         /*
1524          * Intel PMCs cannot be accessed sanely above 32 bit width,
1525          * so we install an artificial 1<<31 period regardless of
1526          * the generic event period:
1527          */
1528         .max_period             = (1ULL << 31) - 1,
1529         .get_event_constraints  = intel_get_event_constraints,
1530         .put_event_constraints  = intel_put_event_constraints,
1531
1532         .cpu_prepare            = intel_pmu_cpu_prepare,
1533         .cpu_starting           = intel_pmu_cpu_starting,
1534         .cpu_dying              = intel_pmu_cpu_dying,
1535         .guest_get_msrs         = intel_guest_get_msrs,
1536 };
1537
1538 static void intel_clovertown_quirks(void)
1539 {
1540         /*
1541          * PEBS is unreliable due to:
1542          *
1543          *   AJ67  - PEBS may experience CPL leaks
1544          *   AJ68  - PEBS PMI may be delayed by one event
1545          *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1546          *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1547          *
1548          * AJ67 could be worked around by restricting the OS/USR flags.
1549          * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1550          *
1551          * AJ106 could possibly be worked around by not allowing LBR
1552          *       usage from PEBS, including the fixup.
1553          * AJ68  could possibly be worked around by always programming
1554          *       a pebs_event_reset[0] value and coping with the lost events.
1555          *
1556          * But taken together it might just make sense to not enable PEBS on
1557          * these chips.
1558          */
1559         printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1560         x86_pmu.pebs = 0;
1561         x86_pmu.pebs_constraints = NULL;
1562 }
1563
1564 static void intel_sandybridge_quirks(void)
1565 {
1566         printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1567         x86_pmu.pebs = 0;
1568         x86_pmu.pebs_constraints = NULL;
1569 }
1570
1571 __init int intel_pmu_init(void)
1572 {
1573         union cpuid10_edx edx;
1574         union cpuid10_eax eax;
1575         unsigned int unused;
1576         unsigned int ebx;
1577         int version;
1578
1579         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1580                 switch (boot_cpu_data.x86) {
1581                 case 0x6:
1582                         return p6_pmu_init();
1583                 case 0xf:
1584                         return p4_pmu_init();
1585                 }
1586                 return -ENODEV;
1587         }
1588
1589         /*
1590          * Check whether the Architectural PerfMon supports
1591          * Branch Misses Retired hw_event or not.
1592          */
1593         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
1594         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
1595                 return -ENODEV;
1596
1597         version = eax.split.version_id;
1598         if (version < 2)
1599                 x86_pmu = core_pmu;
1600         else
1601                 x86_pmu = intel_pmu;
1602
1603         x86_pmu.version                 = version;
1604         x86_pmu.num_counters            = eax.split.num_counters;
1605         x86_pmu.cntval_bits             = eax.split.bit_width;
1606         x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
1607
1608         /*
1609          * Quirk: v2 perfmon does not report fixed-purpose events, so
1610          * assume at least 3 events:
1611          */
1612         if (version > 1)
1613                 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1614
1615         /*
1616          * v2 and above have a perf capabilities MSR
1617          */
1618         if (version > 1) {
1619                 u64 capabilities;
1620
1621                 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1622                 x86_pmu.intel_cap.capabilities = capabilities;
1623         }
1624
1625         intel_ds_init();
1626
1627         /*
1628          * Install the hw-cache-events table:
1629          */
1630         switch (boot_cpu_data.x86_model) {
1631         case 14: /* 65 nm core solo/duo, "Yonah" */
1632                 pr_cont("Core events, ");
1633                 break;
1634
1635         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1636                 x86_pmu.quirks = intel_clovertown_quirks;
1637         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1638         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1639         case 29: /* six-core 45 nm xeon "Dunnington" */
1640                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1641                        sizeof(hw_cache_event_ids));
1642
1643                 intel_pmu_lbr_init_core();
1644
1645                 x86_pmu.event_constraints = intel_core2_event_constraints;
1646                 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
1647                 pr_cont("Core2 events, ");
1648                 break;
1649
1650         case 26: /* 45 nm nehalem, "Bloomfield" */
1651         case 30: /* 45 nm nehalem, "Lynnfield" */
1652         case 46: /* 45 nm nehalem-ex, "Beckton" */
1653                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1654                        sizeof(hw_cache_event_ids));
1655                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1656                        sizeof(hw_cache_extra_regs));
1657
1658                 intel_pmu_lbr_init_nhm();
1659
1660                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
1661                 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
1662                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1663                 x86_pmu.extra_regs = intel_nehalem_extra_regs;
1664
1665                 /* UOPS_ISSUED.STALLED_CYCLES */
1666                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1667                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1668                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
1669
1670                 if (ebx & 0x40) {
1671                         /*
1672                          * Erratum AAJ80 detected, we work it around by using
1673                          * the BR_MISP_EXEC.ANY event. This will over-count
1674                          * branch-misses, but it's still much better than the
1675                          * architectural event which is often completely bogus:
1676                          */
1677                         intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1678
1679                         pr_cont("erratum AAJ80 worked around, ");
1680                 }
1681                 pr_cont("Nehalem events, ");
1682                 break;
1683
1684         case 28: /* Atom */
1685                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1686                        sizeof(hw_cache_event_ids));
1687
1688                 intel_pmu_lbr_init_atom();
1689
1690                 x86_pmu.event_constraints = intel_gen_event_constraints;
1691                 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
1692                 pr_cont("Atom events, ");
1693                 break;
1694
1695         case 37: /* 32 nm nehalem, "Clarkdale" */
1696         case 44: /* 32 nm nehalem, "Gulftown" */
1697         case 47: /* 32 nm Xeon E7 */
1698                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1699                        sizeof(hw_cache_event_ids));
1700                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1701                        sizeof(hw_cache_extra_regs));
1702
1703                 intel_pmu_lbr_init_nhm();
1704
1705                 x86_pmu.event_constraints = intel_westmere_event_constraints;
1706                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1707                 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
1708                 x86_pmu.extra_regs = intel_westmere_extra_regs;
1709                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1710
1711                 /* UOPS_ISSUED.STALLED_CYCLES */
1712                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1713                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1714                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
1715
1716                 pr_cont("Westmere events, ");
1717                 break;
1718
1719         case 42: /* SandyBridge */
1720                 x86_pmu.quirks = intel_sandybridge_quirks;
1721         case 45: /* SandyBridge, "Romely-EP" */
1722                 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1723                        sizeof(hw_cache_event_ids));
1724
1725                 intel_pmu_lbr_init_nhm();
1726
1727                 x86_pmu.event_constraints = intel_snb_event_constraints;
1728                 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1729                 if (boot_cpu_data.x86_model == 45)
1730                         x86_pmu.extra_regs = intel_snbep_extra_regs;
1731                 else
1732                         x86_pmu.extra_regs = intel_snb_extra_regs;
1733                 /* all extra regs are per-cpu when HT is on */
1734                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1735                 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
1736
1737                 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1738                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1739                 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1740                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1;
1741
1742                 pr_cont("SandyBridge events, ");
1743                 break;
1744
1745         default:
1746                 switch (x86_pmu.version) {
1747                 case 1:
1748                         x86_pmu.event_constraints = intel_v1_event_constraints;
1749                         pr_cont("generic architected perfmon v1, ");
1750                         break;
1751                 default:
1752                         /*
1753                          * default constraints for v2 and up
1754                          */
1755                         x86_pmu.event_constraints = intel_gen_event_constraints;
1756                         pr_cont("generic architected perfmon, ");
1757                         break;
1758                 }
1759         }
1760         return 0;
1761 }