x86/debug: Add KERN_<LEVEL> to bare printks, convert printks to pr_<level>
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel.c
1 /*
2  * Per core/cpu state
3  *
4  * Used to coordinate shared registers between HT threads or
5  * among events on a single PMU.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15
16 #include <asm/hardirq.h>
17 #include <asm/apic.h>
18
19 #include "perf_event.h"
20
21 /*
22  * Intel PerfMon, used on Core and later.
23  */
24 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
25 {
26   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
27   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
28   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
29   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
30   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
31   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
32   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
33   [PERF_COUNT_HW_REF_CPU_CYCLES]        = 0x0300, /* pseudo-encoding */
34 };
35
36 static struct event_constraint intel_core_event_constraints[] __read_mostly =
37 {
38         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
44         EVENT_CONSTRAINT_END
45 };
46
47 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
48 {
49         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60         INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
62         EVENT_CONSTRAINT_END
63 };
64
65 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
66 {
67         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
78         EVENT_CONSTRAINT_END
79 };
80
81 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
82 {
83         INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
84         EVENT_EXTRA_END
85 };
86
87 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
88 {
89         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
91         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
92         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
95         INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
96         EVENT_CONSTRAINT_END
97 };
98
99 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
100 {
101         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104         INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106         INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
107         EVENT_CONSTRAINT_END
108 };
109
110 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
111 {
112         INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
113         INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
114         EVENT_EXTRA_END
115 };
116
117 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
118 {
119         EVENT_CONSTRAINT_END
120 };
121
122 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
123 {
124         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
125         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
126         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
127         EVENT_CONSTRAINT_END
128 };
129
130 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
131         INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
132         INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
133         EVENT_EXTRA_END
134 };
135
136 static u64 intel_pmu_event_map(int hw_event)
137 {
138         return intel_perfmon_event_map[hw_event];
139 }
140
141 static __initconst const u64 snb_hw_cache_event_ids
142                                 [PERF_COUNT_HW_CACHE_MAX]
143                                 [PERF_COUNT_HW_CACHE_OP_MAX]
144                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
145 {
146  [ C(L1D) ] = {
147         [ C(OP_READ) ] = {
148                 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
149                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
150         },
151         [ C(OP_WRITE) ] = {
152                 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
153                 [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
154         },
155         [ C(OP_PREFETCH) ] = {
156                 [ C(RESULT_ACCESS) ] = 0x0,
157                 [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
158         },
159  },
160  [ C(L1I ) ] = {
161         [ C(OP_READ) ] = {
162                 [ C(RESULT_ACCESS) ] = 0x0,
163                 [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
164         },
165         [ C(OP_WRITE) ] = {
166                 [ C(RESULT_ACCESS) ] = -1,
167                 [ C(RESULT_MISS)   ] = -1,
168         },
169         [ C(OP_PREFETCH) ] = {
170                 [ C(RESULT_ACCESS) ] = 0x0,
171                 [ C(RESULT_MISS)   ] = 0x0,
172         },
173  },
174  [ C(LL  ) ] = {
175         [ C(OP_READ) ] = {
176                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
177                 [ C(RESULT_ACCESS) ] = 0x01b7,
178                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
179                 [ C(RESULT_MISS)   ] = 0x01b7,
180         },
181         [ C(OP_WRITE) ] = {
182                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
183                 [ C(RESULT_ACCESS) ] = 0x01b7,
184                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
185                 [ C(RESULT_MISS)   ] = 0x01b7,
186         },
187         [ C(OP_PREFETCH) ] = {
188                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
189                 [ C(RESULT_ACCESS) ] = 0x01b7,
190                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
191                 [ C(RESULT_MISS)   ] = 0x01b7,
192         },
193  },
194  [ C(DTLB) ] = {
195         [ C(OP_READ) ] = {
196                 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
197                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
198         },
199         [ C(OP_WRITE) ] = {
200                 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
201                 [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
202         },
203         [ C(OP_PREFETCH) ] = {
204                 [ C(RESULT_ACCESS) ] = 0x0,
205                 [ C(RESULT_MISS)   ] = 0x0,
206         },
207  },
208  [ C(ITLB) ] = {
209         [ C(OP_READ) ] = {
210                 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
211                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
212         },
213         [ C(OP_WRITE) ] = {
214                 [ C(RESULT_ACCESS) ] = -1,
215                 [ C(RESULT_MISS)   ] = -1,
216         },
217         [ C(OP_PREFETCH) ] = {
218                 [ C(RESULT_ACCESS) ] = -1,
219                 [ C(RESULT_MISS)   ] = -1,
220         },
221  },
222  [ C(BPU ) ] = {
223         [ C(OP_READ) ] = {
224                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
225                 [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
226         },
227         [ C(OP_WRITE) ] = {
228                 [ C(RESULT_ACCESS) ] = -1,
229                 [ C(RESULT_MISS)   ] = -1,
230         },
231         [ C(OP_PREFETCH) ] = {
232                 [ C(RESULT_ACCESS) ] = -1,
233                 [ C(RESULT_MISS)   ] = -1,
234         },
235  },
236  [ C(NODE) ] = {
237         [ C(OP_READ) ] = {
238                 [ C(RESULT_ACCESS) ] = -1,
239                 [ C(RESULT_MISS)   ] = -1,
240         },
241         [ C(OP_WRITE) ] = {
242                 [ C(RESULT_ACCESS) ] = -1,
243                 [ C(RESULT_MISS)   ] = -1,
244         },
245         [ C(OP_PREFETCH) ] = {
246                 [ C(RESULT_ACCESS) ] = -1,
247                 [ C(RESULT_MISS)   ] = -1,
248         },
249  },
250
251 };
252
253 static __initconst const u64 westmere_hw_cache_event_ids
254                                 [PERF_COUNT_HW_CACHE_MAX]
255                                 [PERF_COUNT_HW_CACHE_OP_MAX]
256                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
257 {
258  [ C(L1D) ] = {
259         [ C(OP_READ) ] = {
260                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
261                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
262         },
263         [ C(OP_WRITE) ] = {
264                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
265                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
266         },
267         [ C(OP_PREFETCH) ] = {
268                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
269                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
270         },
271  },
272  [ C(L1I ) ] = {
273         [ C(OP_READ) ] = {
274                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
275                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
276         },
277         [ C(OP_WRITE) ] = {
278                 [ C(RESULT_ACCESS) ] = -1,
279                 [ C(RESULT_MISS)   ] = -1,
280         },
281         [ C(OP_PREFETCH) ] = {
282                 [ C(RESULT_ACCESS) ] = 0x0,
283                 [ C(RESULT_MISS)   ] = 0x0,
284         },
285  },
286  [ C(LL  ) ] = {
287         [ C(OP_READ) ] = {
288                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
289                 [ C(RESULT_ACCESS) ] = 0x01b7,
290                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
291                 [ C(RESULT_MISS)   ] = 0x01b7,
292         },
293         /*
294          * Use RFO, not WRITEBACK, because a write miss would typically occur
295          * on RFO.
296          */
297         [ C(OP_WRITE) ] = {
298                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
299                 [ C(RESULT_ACCESS) ] = 0x01b7,
300                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
301                 [ C(RESULT_MISS)   ] = 0x01b7,
302         },
303         [ C(OP_PREFETCH) ] = {
304                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
305                 [ C(RESULT_ACCESS) ] = 0x01b7,
306                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
307                 [ C(RESULT_MISS)   ] = 0x01b7,
308         },
309  },
310  [ C(DTLB) ] = {
311         [ C(OP_READ) ] = {
312                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
313                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
314         },
315         [ C(OP_WRITE) ] = {
316                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
317                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
318         },
319         [ C(OP_PREFETCH) ] = {
320                 [ C(RESULT_ACCESS) ] = 0x0,
321                 [ C(RESULT_MISS)   ] = 0x0,
322         },
323  },
324  [ C(ITLB) ] = {
325         [ C(OP_READ) ] = {
326                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
327                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
328         },
329         [ C(OP_WRITE) ] = {
330                 [ C(RESULT_ACCESS) ] = -1,
331                 [ C(RESULT_MISS)   ] = -1,
332         },
333         [ C(OP_PREFETCH) ] = {
334                 [ C(RESULT_ACCESS) ] = -1,
335                 [ C(RESULT_MISS)   ] = -1,
336         },
337  },
338  [ C(BPU ) ] = {
339         [ C(OP_READ) ] = {
340                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
341                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
342         },
343         [ C(OP_WRITE) ] = {
344                 [ C(RESULT_ACCESS) ] = -1,
345                 [ C(RESULT_MISS)   ] = -1,
346         },
347         [ C(OP_PREFETCH) ] = {
348                 [ C(RESULT_ACCESS) ] = -1,
349                 [ C(RESULT_MISS)   ] = -1,
350         },
351  },
352  [ C(NODE) ] = {
353         [ C(OP_READ) ] = {
354                 [ C(RESULT_ACCESS) ] = 0x01b7,
355                 [ C(RESULT_MISS)   ] = 0x01b7,
356         },
357         [ C(OP_WRITE) ] = {
358                 [ C(RESULT_ACCESS) ] = 0x01b7,
359                 [ C(RESULT_MISS)   ] = 0x01b7,
360         },
361         [ C(OP_PREFETCH) ] = {
362                 [ C(RESULT_ACCESS) ] = 0x01b7,
363                 [ C(RESULT_MISS)   ] = 0x01b7,
364         },
365  },
366 };
367
368 /*
369  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
370  * See IA32 SDM Vol 3B 30.6.1.3
371  */
372
373 #define NHM_DMND_DATA_RD        (1 << 0)
374 #define NHM_DMND_RFO            (1 << 1)
375 #define NHM_DMND_IFETCH         (1 << 2)
376 #define NHM_DMND_WB             (1 << 3)
377 #define NHM_PF_DATA_RD          (1 << 4)
378 #define NHM_PF_DATA_RFO         (1 << 5)
379 #define NHM_PF_IFETCH           (1 << 6)
380 #define NHM_OFFCORE_OTHER       (1 << 7)
381 #define NHM_UNCORE_HIT          (1 << 8)
382 #define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
383 #define NHM_OTHER_CORE_HITM     (1 << 10)
384                                 /* reserved */
385 #define NHM_REMOTE_CACHE_FWD    (1 << 12)
386 #define NHM_REMOTE_DRAM         (1 << 13)
387 #define NHM_LOCAL_DRAM          (1 << 14)
388 #define NHM_NON_DRAM            (1 << 15)
389
390 #define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
391 #define NHM_REMOTE              (NHM_REMOTE_DRAM)
392
393 #define NHM_DMND_READ           (NHM_DMND_DATA_RD)
394 #define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
395 #define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
396
397 #define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
398 #define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
399 #define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
400
401 static __initconst const u64 nehalem_hw_cache_extra_regs
402                                 [PERF_COUNT_HW_CACHE_MAX]
403                                 [PERF_COUNT_HW_CACHE_OP_MAX]
404                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
405 {
406  [ C(LL  ) ] = {
407         [ C(OP_READ) ] = {
408                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
409                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
410         },
411         [ C(OP_WRITE) ] = {
412                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
413                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
414         },
415         [ C(OP_PREFETCH) ] = {
416                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
417                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
418         },
419  },
420  [ C(NODE) ] = {
421         [ C(OP_READ) ] = {
422                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
423                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
424         },
425         [ C(OP_WRITE) ] = {
426                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
427                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
428         },
429         [ C(OP_PREFETCH) ] = {
430                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
431                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
432         },
433  },
434 };
435
436 static __initconst const u64 nehalem_hw_cache_event_ids
437                                 [PERF_COUNT_HW_CACHE_MAX]
438                                 [PERF_COUNT_HW_CACHE_OP_MAX]
439                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
440 {
441  [ C(L1D) ] = {
442         [ C(OP_READ) ] = {
443                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
444                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
445         },
446         [ C(OP_WRITE) ] = {
447                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
448                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
449         },
450         [ C(OP_PREFETCH) ] = {
451                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
452                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
453         },
454  },
455  [ C(L1I ) ] = {
456         [ C(OP_READ) ] = {
457                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
458                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
459         },
460         [ C(OP_WRITE) ] = {
461                 [ C(RESULT_ACCESS) ] = -1,
462                 [ C(RESULT_MISS)   ] = -1,
463         },
464         [ C(OP_PREFETCH) ] = {
465                 [ C(RESULT_ACCESS) ] = 0x0,
466                 [ C(RESULT_MISS)   ] = 0x0,
467         },
468  },
469  [ C(LL  ) ] = {
470         [ C(OP_READ) ] = {
471                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
472                 [ C(RESULT_ACCESS) ] = 0x01b7,
473                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
474                 [ C(RESULT_MISS)   ] = 0x01b7,
475         },
476         /*
477          * Use RFO, not WRITEBACK, because a write miss would typically occur
478          * on RFO.
479          */
480         [ C(OP_WRITE) ] = {
481                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
482                 [ C(RESULT_ACCESS) ] = 0x01b7,
483                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
484                 [ C(RESULT_MISS)   ] = 0x01b7,
485         },
486         [ C(OP_PREFETCH) ] = {
487                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
488                 [ C(RESULT_ACCESS) ] = 0x01b7,
489                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
490                 [ C(RESULT_MISS)   ] = 0x01b7,
491         },
492  },
493  [ C(DTLB) ] = {
494         [ C(OP_READ) ] = {
495                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
496                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
497         },
498         [ C(OP_WRITE) ] = {
499                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
500                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
501         },
502         [ C(OP_PREFETCH) ] = {
503                 [ C(RESULT_ACCESS) ] = 0x0,
504                 [ C(RESULT_MISS)   ] = 0x0,
505         },
506  },
507  [ C(ITLB) ] = {
508         [ C(OP_READ) ] = {
509                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
510                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
511         },
512         [ C(OP_WRITE) ] = {
513                 [ C(RESULT_ACCESS) ] = -1,
514                 [ C(RESULT_MISS)   ] = -1,
515         },
516         [ C(OP_PREFETCH) ] = {
517                 [ C(RESULT_ACCESS) ] = -1,
518                 [ C(RESULT_MISS)   ] = -1,
519         },
520  },
521  [ C(BPU ) ] = {
522         [ C(OP_READ) ] = {
523                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
524                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
525         },
526         [ C(OP_WRITE) ] = {
527                 [ C(RESULT_ACCESS) ] = -1,
528                 [ C(RESULT_MISS)   ] = -1,
529         },
530         [ C(OP_PREFETCH) ] = {
531                 [ C(RESULT_ACCESS) ] = -1,
532                 [ C(RESULT_MISS)   ] = -1,
533         },
534  },
535  [ C(NODE) ] = {
536         [ C(OP_READ) ] = {
537                 [ C(RESULT_ACCESS) ] = 0x01b7,
538                 [ C(RESULT_MISS)   ] = 0x01b7,
539         },
540         [ C(OP_WRITE) ] = {
541                 [ C(RESULT_ACCESS) ] = 0x01b7,
542                 [ C(RESULT_MISS)   ] = 0x01b7,
543         },
544         [ C(OP_PREFETCH) ] = {
545                 [ C(RESULT_ACCESS) ] = 0x01b7,
546                 [ C(RESULT_MISS)   ] = 0x01b7,
547         },
548  },
549 };
550
551 static __initconst const u64 core2_hw_cache_event_ids
552                                 [PERF_COUNT_HW_CACHE_MAX]
553                                 [PERF_COUNT_HW_CACHE_OP_MAX]
554                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
555 {
556  [ C(L1D) ] = {
557         [ C(OP_READ) ] = {
558                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
559                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
560         },
561         [ C(OP_WRITE) ] = {
562                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
563                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
564         },
565         [ C(OP_PREFETCH) ] = {
566                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
567                 [ C(RESULT_MISS)   ] = 0,
568         },
569  },
570  [ C(L1I ) ] = {
571         [ C(OP_READ) ] = {
572                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
573                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
574         },
575         [ C(OP_WRITE) ] = {
576                 [ C(RESULT_ACCESS) ] = -1,
577                 [ C(RESULT_MISS)   ] = -1,
578         },
579         [ C(OP_PREFETCH) ] = {
580                 [ C(RESULT_ACCESS) ] = 0,
581                 [ C(RESULT_MISS)   ] = 0,
582         },
583  },
584  [ C(LL  ) ] = {
585         [ C(OP_READ) ] = {
586                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
587                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
588         },
589         [ C(OP_WRITE) ] = {
590                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
591                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
592         },
593         [ C(OP_PREFETCH) ] = {
594                 [ C(RESULT_ACCESS) ] = 0,
595                 [ C(RESULT_MISS)   ] = 0,
596         },
597  },
598  [ C(DTLB) ] = {
599         [ C(OP_READ) ] = {
600                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
601                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
602         },
603         [ C(OP_WRITE) ] = {
604                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
605                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
606         },
607         [ C(OP_PREFETCH) ] = {
608                 [ C(RESULT_ACCESS) ] = 0,
609                 [ C(RESULT_MISS)   ] = 0,
610         },
611  },
612  [ C(ITLB) ] = {
613         [ C(OP_READ) ] = {
614                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
615                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
616         },
617         [ C(OP_WRITE) ] = {
618                 [ C(RESULT_ACCESS) ] = -1,
619                 [ C(RESULT_MISS)   ] = -1,
620         },
621         [ C(OP_PREFETCH) ] = {
622                 [ C(RESULT_ACCESS) ] = -1,
623                 [ C(RESULT_MISS)   ] = -1,
624         },
625  },
626  [ C(BPU ) ] = {
627         [ C(OP_READ) ] = {
628                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
629                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
630         },
631         [ C(OP_WRITE) ] = {
632                 [ C(RESULT_ACCESS) ] = -1,
633                 [ C(RESULT_MISS)   ] = -1,
634         },
635         [ C(OP_PREFETCH) ] = {
636                 [ C(RESULT_ACCESS) ] = -1,
637                 [ C(RESULT_MISS)   ] = -1,
638         },
639  },
640 };
641
642 static __initconst const u64 atom_hw_cache_event_ids
643                                 [PERF_COUNT_HW_CACHE_MAX]
644                                 [PERF_COUNT_HW_CACHE_OP_MAX]
645                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
646 {
647  [ C(L1D) ] = {
648         [ C(OP_READ) ] = {
649                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
650                 [ C(RESULT_MISS)   ] = 0,
651         },
652         [ C(OP_WRITE) ] = {
653                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
654                 [ C(RESULT_MISS)   ] = 0,
655         },
656         [ C(OP_PREFETCH) ] = {
657                 [ C(RESULT_ACCESS) ] = 0x0,
658                 [ C(RESULT_MISS)   ] = 0,
659         },
660  },
661  [ C(L1I ) ] = {
662         [ C(OP_READ) ] = {
663                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
664                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
665         },
666         [ C(OP_WRITE) ] = {
667                 [ C(RESULT_ACCESS) ] = -1,
668                 [ C(RESULT_MISS)   ] = -1,
669         },
670         [ C(OP_PREFETCH) ] = {
671                 [ C(RESULT_ACCESS) ] = 0,
672                 [ C(RESULT_MISS)   ] = 0,
673         },
674  },
675  [ C(LL  ) ] = {
676         [ C(OP_READ) ] = {
677                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
678                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
679         },
680         [ C(OP_WRITE) ] = {
681                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
682                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
683         },
684         [ C(OP_PREFETCH) ] = {
685                 [ C(RESULT_ACCESS) ] = 0,
686                 [ C(RESULT_MISS)   ] = 0,
687         },
688  },
689  [ C(DTLB) ] = {
690         [ C(OP_READ) ] = {
691                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
692                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
693         },
694         [ C(OP_WRITE) ] = {
695                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
696                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
697         },
698         [ C(OP_PREFETCH) ] = {
699                 [ C(RESULT_ACCESS) ] = 0,
700                 [ C(RESULT_MISS)   ] = 0,
701         },
702  },
703  [ C(ITLB) ] = {
704         [ C(OP_READ) ] = {
705                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
706                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
707         },
708         [ C(OP_WRITE) ] = {
709                 [ C(RESULT_ACCESS) ] = -1,
710                 [ C(RESULT_MISS)   ] = -1,
711         },
712         [ C(OP_PREFETCH) ] = {
713                 [ C(RESULT_ACCESS) ] = -1,
714                 [ C(RESULT_MISS)   ] = -1,
715         },
716  },
717  [ C(BPU ) ] = {
718         [ C(OP_READ) ] = {
719                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
720                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
721         },
722         [ C(OP_WRITE) ] = {
723                 [ C(RESULT_ACCESS) ] = -1,
724                 [ C(RESULT_MISS)   ] = -1,
725         },
726         [ C(OP_PREFETCH) ] = {
727                 [ C(RESULT_ACCESS) ] = -1,
728                 [ C(RESULT_MISS)   ] = -1,
729         },
730  },
731 };
732
733 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
734 {
735         /* user explicitly requested branch sampling */
736         if (has_branch_stack(event))
737                 return true;
738
739         /* implicit branch sampling to correct PEBS skid */
740         if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
741                 return true;
742
743         return false;
744 }
745
746 static void intel_pmu_disable_all(void)
747 {
748         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
749
750         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
751
752         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
753                 intel_pmu_disable_bts();
754
755         intel_pmu_pebs_disable_all();
756         intel_pmu_lbr_disable_all();
757 }
758
759 static void intel_pmu_enable_all(int added)
760 {
761         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
762
763         intel_pmu_pebs_enable_all();
764         intel_pmu_lbr_enable_all();
765         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
766                         x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
767
768         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
769                 struct perf_event *event =
770                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
771
772                 if (WARN_ON_ONCE(!event))
773                         return;
774
775                 intel_pmu_enable_bts(event->hw.config);
776         }
777 }
778
779 /*
780  * Workaround for:
781  *   Intel Errata AAK100 (model 26)
782  *   Intel Errata AAP53  (model 30)
783  *   Intel Errata BD53   (model 44)
784  *
785  * The official story:
786  *   These chips need to be 'reset' when adding counters by programming the
787  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
788  *   in sequence on the same PMC or on different PMCs.
789  *
790  * In practise it appears some of these events do in fact count, and
791  * we need to programm all 4 events.
792  */
793 static void intel_pmu_nhm_workaround(void)
794 {
795         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
796         static const unsigned long nhm_magic[4] = {
797                 0x4300B5,
798                 0x4300D2,
799                 0x4300B1,
800                 0x4300B1
801         };
802         struct perf_event *event;
803         int i;
804
805         /*
806          * The Errata requires below steps:
807          * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
808          * 2) Configure 4 PERFEVTSELx with the magic events and clear
809          *    the corresponding PMCx;
810          * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
811          * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
812          * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
813          */
814
815         /*
816          * The real steps we choose are a little different from above.
817          * A) To reduce MSR operations, we don't run step 1) as they
818          *    are already cleared before this function is called;
819          * B) Call x86_perf_event_update to save PMCx before configuring
820          *    PERFEVTSELx with magic number;
821          * C) With step 5), we do clear only when the PERFEVTSELx is
822          *    not used currently.
823          * D) Call x86_perf_event_set_period to restore PMCx;
824          */
825
826         /* We always operate 4 pairs of PERF Counters */
827         for (i = 0; i < 4; i++) {
828                 event = cpuc->events[i];
829                 if (event)
830                         x86_perf_event_update(event);
831         }
832
833         for (i = 0; i < 4; i++) {
834                 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
835                 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
836         }
837
838         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
839         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
840
841         for (i = 0; i < 4; i++) {
842                 event = cpuc->events[i];
843
844                 if (event) {
845                         x86_perf_event_set_period(event);
846                         __x86_pmu_enable_event(&event->hw,
847                                         ARCH_PERFMON_EVENTSEL_ENABLE);
848                 } else
849                         wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
850         }
851 }
852
853 static void intel_pmu_nhm_enable_all(int added)
854 {
855         if (added)
856                 intel_pmu_nhm_workaround();
857         intel_pmu_enable_all(added);
858 }
859
860 static inline u64 intel_pmu_get_status(void)
861 {
862         u64 status;
863
864         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
865
866         return status;
867 }
868
869 static inline void intel_pmu_ack_status(u64 ack)
870 {
871         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
872 }
873
874 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
875 {
876         int idx = hwc->idx - X86_PMC_IDX_FIXED;
877         u64 ctrl_val, mask;
878
879         mask = 0xfULL << (idx * 4);
880
881         rdmsrl(hwc->config_base, ctrl_val);
882         ctrl_val &= ~mask;
883         wrmsrl(hwc->config_base, ctrl_val);
884 }
885
886 static void intel_pmu_disable_event(struct perf_event *event)
887 {
888         struct hw_perf_event *hwc = &event->hw;
889         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
890
891         if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
892                 intel_pmu_disable_bts();
893                 intel_pmu_drain_bts_buffer();
894                 return;
895         }
896
897         cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
898         cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
899
900         /*
901          * must disable before any actual event
902          * because any event may be combined with LBR
903          */
904         if (intel_pmu_needs_lbr_smpl(event))
905                 intel_pmu_lbr_disable(event);
906
907         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
908                 intel_pmu_disable_fixed(hwc);
909                 return;
910         }
911
912         x86_pmu_disable_event(event);
913
914         if (unlikely(event->attr.precise_ip))
915                 intel_pmu_pebs_disable(event);
916 }
917
918 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
919 {
920         int idx = hwc->idx - X86_PMC_IDX_FIXED;
921         u64 ctrl_val, bits, mask;
922
923         /*
924          * Enable IRQ generation (0x8),
925          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
926          * if requested:
927          */
928         bits = 0x8ULL;
929         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
930                 bits |= 0x2;
931         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
932                 bits |= 0x1;
933
934         /*
935          * ANY bit is supported in v3 and up
936          */
937         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
938                 bits |= 0x4;
939
940         bits <<= (idx * 4);
941         mask = 0xfULL << (idx * 4);
942
943         rdmsrl(hwc->config_base, ctrl_val);
944         ctrl_val &= ~mask;
945         ctrl_val |= bits;
946         wrmsrl(hwc->config_base, ctrl_val);
947 }
948
949 static void intel_pmu_enable_event(struct perf_event *event)
950 {
951         struct hw_perf_event *hwc = &event->hw;
952         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
953
954         if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
955                 if (!__this_cpu_read(cpu_hw_events.enabled))
956                         return;
957
958                 intel_pmu_enable_bts(hwc->config);
959                 return;
960         }
961         /*
962          * must enabled before any actual event
963          * because any event may be combined with LBR
964          */
965         if (intel_pmu_needs_lbr_smpl(event))
966                 intel_pmu_lbr_enable(event);
967
968         if (event->attr.exclude_host)
969                 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
970         if (event->attr.exclude_guest)
971                 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
972
973         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
974                 intel_pmu_enable_fixed(hwc);
975                 return;
976         }
977
978         if (unlikely(event->attr.precise_ip))
979                 intel_pmu_pebs_enable(event);
980
981         __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
982 }
983
984 /*
985  * Save and restart an expired event. Called by NMI contexts,
986  * so it has to be careful about preempting normal event ops:
987  */
988 int intel_pmu_save_and_restart(struct perf_event *event)
989 {
990         x86_perf_event_update(event);
991         return x86_perf_event_set_period(event);
992 }
993
994 static void intel_pmu_reset(void)
995 {
996         struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
997         unsigned long flags;
998         int idx;
999
1000         if (!x86_pmu.num_counters)
1001                 return;
1002
1003         local_irq_save(flags);
1004
1005         pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1006
1007         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1008                 checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
1009                 checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
1010         }
1011         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1012                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1013
1014         if (ds)
1015                 ds->bts_index = ds->bts_buffer_base;
1016
1017         local_irq_restore(flags);
1018 }
1019
1020 /*
1021  * This handler is triggered by the local APIC, so the APIC IRQ handling
1022  * rules apply:
1023  */
1024 static int intel_pmu_handle_irq(struct pt_regs *regs)
1025 {
1026         struct perf_sample_data data;
1027         struct cpu_hw_events *cpuc;
1028         int bit, loops;
1029         u64 status;
1030         int handled;
1031
1032         cpuc = &__get_cpu_var(cpu_hw_events);
1033
1034         /*
1035          * Some chipsets need to unmask the LVTPC in a particular spot
1036          * inside the nmi handler.  As a result, the unmasking was pushed
1037          * into all the nmi handlers.
1038          *
1039          * This handler doesn't seem to have any issues with the unmasking
1040          * so it was left at the top.
1041          */
1042         apic_write(APIC_LVTPC, APIC_DM_NMI);
1043
1044         intel_pmu_disable_all();
1045         handled = intel_pmu_drain_bts_buffer();
1046         status = intel_pmu_get_status();
1047         if (!status) {
1048                 intel_pmu_enable_all(0);
1049                 return handled;
1050         }
1051
1052         loops = 0;
1053 again:
1054         intel_pmu_ack_status(status);
1055         if (++loops > 100) {
1056                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1057                 perf_event_print_debug();
1058                 intel_pmu_reset();
1059                 goto done;
1060         }
1061
1062         inc_irq_stat(apic_perf_irqs);
1063
1064         intel_pmu_lbr_read();
1065
1066         /*
1067          * PEBS overflow sets bit 62 in the global status register
1068          */
1069         if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1070                 handled++;
1071                 x86_pmu.drain_pebs(regs);
1072         }
1073
1074         for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1075                 struct perf_event *event = cpuc->events[bit];
1076
1077                 handled++;
1078
1079                 if (!test_bit(bit, cpuc->active_mask))
1080                         continue;
1081
1082                 if (!intel_pmu_save_and_restart(event))
1083                         continue;
1084
1085                 perf_sample_data_init(&data, 0, event->hw.last_period);
1086
1087                 if (has_branch_stack(event))
1088                         data.br_stack = &cpuc->lbr_stack;
1089
1090                 if (perf_event_overflow(event, &data, regs))
1091                         x86_pmu_stop(event, 0);
1092         }
1093
1094         /*
1095          * Repeat if there is more work to be done:
1096          */
1097         status = intel_pmu_get_status();
1098         if (status)
1099                 goto again;
1100
1101 done:
1102         intel_pmu_enable_all(0);
1103         return handled;
1104 }
1105
1106 static struct event_constraint *
1107 intel_bts_constraints(struct perf_event *event)
1108 {
1109         struct hw_perf_event *hwc = &event->hw;
1110         unsigned int hw_event, bts_event;
1111
1112         if (event->attr.freq)
1113                 return NULL;
1114
1115         hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1116         bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1117
1118         if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1119                 return &bts_constraint;
1120
1121         return NULL;
1122 }
1123
1124 static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
1125 {
1126         if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1127                 return false;
1128
1129         if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
1130                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1131                 event->hw.config |= 0x01bb;
1132                 event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
1133                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1134         } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
1135                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1136                 event->hw.config |= 0x01b7;
1137                 event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
1138                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1139         }
1140
1141         if (event->hw.extra_reg.idx == orig_idx)
1142                 return false;
1143
1144         return true;
1145 }
1146
1147 /*
1148  * manage allocation of shared extra msr for certain events
1149  *
1150  * sharing can be:
1151  * per-cpu: to be shared between the various events on a single PMU
1152  * per-core: per-cpu + shared by HT threads
1153  */
1154 static struct event_constraint *
1155 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1156                                    struct perf_event *event,
1157                                    struct hw_perf_event_extra *reg)
1158 {
1159         struct event_constraint *c = &emptyconstraint;
1160         struct er_account *era;
1161         unsigned long flags;
1162         int orig_idx = reg->idx;
1163
1164         /* already allocated shared msr */
1165         if (reg->alloc)
1166                 return NULL; /* call x86_get_event_constraint() */
1167
1168 again:
1169         era = &cpuc->shared_regs->regs[reg->idx];
1170         /*
1171          * we use spin_lock_irqsave() to avoid lockdep issues when
1172          * passing a fake cpuc
1173          */
1174         raw_spin_lock_irqsave(&era->lock, flags);
1175
1176         if (!atomic_read(&era->ref) || era->config == reg->config) {
1177
1178                 /* lock in msr value */
1179                 era->config = reg->config;
1180                 era->reg = reg->reg;
1181
1182                 /* one more user */
1183                 atomic_inc(&era->ref);
1184
1185                 /* no need to reallocate during incremental event scheduling */
1186                 reg->alloc = 1;
1187
1188                 /*
1189                  * need to call x86_get_event_constraint()
1190                  * to check if associated event has constraints
1191                  */
1192                 c = NULL;
1193         } else if (intel_try_alt_er(event, orig_idx)) {
1194                 raw_spin_unlock_irqrestore(&era->lock, flags);
1195                 goto again;
1196         }
1197         raw_spin_unlock_irqrestore(&era->lock, flags);
1198
1199         return c;
1200 }
1201
1202 static void
1203 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1204                                    struct hw_perf_event_extra *reg)
1205 {
1206         struct er_account *era;
1207
1208         /*
1209          * only put constraint if extra reg was actually
1210          * allocated. Also takes care of event which do
1211          * not use an extra shared reg
1212          */
1213         if (!reg->alloc)
1214                 return;
1215
1216         era = &cpuc->shared_regs->regs[reg->idx];
1217
1218         /* one fewer user */
1219         atomic_dec(&era->ref);
1220
1221         /* allocate again next time */
1222         reg->alloc = 0;
1223 }
1224
1225 static struct event_constraint *
1226 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1227                               struct perf_event *event)
1228 {
1229         struct event_constraint *c = NULL, *d;
1230         struct hw_perf_event_extra *xreg, *breg;
1231
1232         xreg = &event->hw.extra_reg;
1233         if (xreg->idx != EXTRA_REG_NONE) {
1234                 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1235                 if (c == &emptyconstraint)
1236                         return c;
1237         }
1238         breg = &event->hw.branch_reg;
1239         if (breg->idx != EXTRA_REG_NONE) {
1240                 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1241                 if (d == &emptyconstraint) {
1242                         __intel_shared_reg_put_constraints(cpuc, xreg);
1243                         c = d;
1244                 }
1245         }
1246         return c;
1247 }
1248
1249 struct event_constraint *
1250 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1251 {
1252         struct event_constraint *c;
1253
1254         if (x86_pmu.event_constraints) {
1255                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1256                         if ((event->hw.config & c->cmask) == c->code)
1257                                 return c;
1258                 }
1259         }
1260
1261         return &unconstrained;
1262 }
1263
1264 static struct event_constraint *
1265 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1266 {
1267         struct event_constraint *c;
1268
1269         c = intel_bts_constraints(event);
1270         if (c)
1271                 return c;
1272
1273         c = intel_pebs_constraints(event);
1274         if (c)
1275                 return c;
1276
1277         c = intel_shared_regs_constraints(cpuc, event);
1278         if (c)
1279                 return c;
1280
1281         return x86_get_event_constraints(cpuc, event);
1282 }
1283
1284 static void
1285 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1286                                         struct perf_event *event)
1287 {
1288         struct hw_perf_event_extra *reg;
1289
1290         reg = &event->hw.extra_reg;
1291         if (reg->idx != EXTRA_REG_NONE)
1292                 __intel_shared_reg_put_constraints(cpuc, reg);
1293
1294         reg = &event->hw.branch_reg;
1295         if (reg->idx != EXTRA_REG_NONE)
1296                 __intel_shared_reg_put_constraints(cpuc, reg);
1297 }
1298
1299 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1300                                         struct perf_event *event)
1301 {
1302         intel_put_shared_regs_event_constraints(cpuc, event);
1303 }
1304
1305 static int intel_pmu_hw_config(struct perf_event *event)
1306 {
1307         int ret = x86_pmu_hw_config(event);
1308
1309         if (ret)
1310                 return ret;
1311
1312         if (event->attr.precise_ip &&
1313             (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1314                 /*
1315                  * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1316                  * (0x003c) so that we can use it with PEBS.
1317                  *
1318                  * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1319                  * PEBS capable. However we can use INST_RETIRED.ANY_P
1320                  * (0x00c0), which is a PEBS capable event, to get the same
1321                  * count.
1322                  *
1323                  * INST_RETIRED.ANY_P counts the number of cycles that retires
1324                  * CNTMASK instructions. By setting CNTMASK to a value (16)
1325                  * larger than the maximum number of instructions that can be
1326                  * retired per cycle (4) and then inverting the condition, we
1327                  * count all cycles that retire 16 or less instructions, which
1328                  * is every cycle.
1329                  *
1330                  * Thereby we gain a PEBS capable cycle counter.
1331                  */
1332                 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1333
1334
1335                 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1336                 event->hw.config = alt_config;
1337         }
1338
1339         if (intel_pmu_needs_lbr_smpl(event)) {
1340                 ret = intel_pmu_setup_lbr_filter(event);
1341                 if (ret)
1342                         return ret;
1343         }
1344
1345         if (event->attr.type != PERF_TYPE_RAW)
1346                 return 0;
1347
1348         if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1349                 return 0;
1350
1351         if (x86_pmu.version < 3)
1352                 return -EINVAL;
1353
1354         if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1355                 return -EACCES;
1356
1357         event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1358
1359         return 0;
1360 }
1361
1362 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1363 {
1364         if (x86_pmu.guest_get_msrs)
1365                 return x86_pmu.guest_get_msrs(nr);
1366         *nr = 0;
1367         return NULL;
1368 }
1369 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1370
1371 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1372 {
1373         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1374         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1375
1376         arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1377         arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1378         arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1379
1380         *nr = 1;
1381         return arr;
1382 }
1383
1384 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1385 {
1386         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1387         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1388         int idx;
1389
1390         for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
1391                 struct perf_event *event = cpuc->events[idx];
1392
1393                 arr[idx].msr = x86_pmu_config_addr(idx);
1394                 arr[idx].host = arr[idx].guest = 0;
1395
1396                 if (!test_bit(idx, cpuc->active_mask))
1397                         continue;
1398
1399                 arr[idx].host = arr[idx].guest =
1400                         event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1401
1402                 if (event->attr.exclude_host)
1403                         arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1404                 else if (event->attr.exclude_guest)
1405                         arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1406         }
1407
1408         *nr = x86_pmu.num_counters;
1409         return arr;
1410 }
1411
1412 static void core_pmu_enable_event(struct perf_event *event)
1413 {
1414         if (!event->attr.exclude_host)
1415                 x86_pmu_enable_event(event);
1416 }
1417
1418 static void core_pmu_enable_all(int added)
1419 {
1420         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1421         int idx;
1422
1423         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1424                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1425
1426                 if (!test_bit(idx, cpuc->active_mask) ||
1427                                 cpuc->events[idx]->attr.exclude_host)
1428                         continue;
1429
1430                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1431         }
1432 }
1433
1434 PMU_FORMAT_ATTR(event,  "config:0-7"    );
1435 PMU_FORMAT_ATTR(umask,  "config:8-15"   );
1436 PMU_FORMAT_ATTR(edge,   "config:18"     );
1437 PMU_FORMAT_ATTR(pc,     "config:19"     );
1438 PMU_FORMAT_ATTR(any,    "config:21"     ); /* v3 + */
1439 PMU_FORMAT_ATTR(inv,    "config:23"     );
1440 PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
1441
1442 static struct attribute *intel_arch_formats_attr[] = {
1443         &format_attr_event.attr,
1444         &format_attr_umask.attr,
1445         &format_attr_edge.attr,
1446         &format_attr_pc.attr,
1447         &format_attr_inv.attr,
1448         &format_attr_cmask.attr,
1449         NULL,
1450 };
1451
1452 static __initconst const struct x86_pmu core_pmu = {
1453         .name                   = "core",
1454         .handle_irq             = x86_pmu_handle_irq,
1455         .disable_all            = x86_pmu_disable_all,
1456         .enable_all             = core_pmu_enable_all,
1457         .enable                 = core_pmu_enable_event,
1458         .disable                = x86_pmu_disable_event,
1459         .hw_config              = x86_pmu_hw_config,
1460         .schedule_events        = x86_schedule_events,
1461         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
1462         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
1463         .event_map              = intel_pmu_event_map,
1464         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
1465         .apic                   = 1,
1466         /*
1467          * Intel PMCs cannot be accessed sanely above 32 bit width,
1468          * so we install an artificial 1<<31 period regardless of
1469          * the generic event period:
1470          */
1471         .max_period             = (1ULL << 31) - 1,
1472         .get_event_constraints  = intel_get_event_constraints,
1473         .put_event_constraints  = intel_put_event_constraints,
1474         .event_constraints      = intel_core_event_constraints,
1475         .guest_get_msrs         = core_guest_get_msrs,
1476         .format_attrs           = intel_arch_formats_attr,
1477 };
1478
1479 struct intel_shared_regs *allocate_shared_regs(int cpu)
1480 {
1481         struct intel_shared_regs *regs;
1482         int i;
1483
1484         regs = kzalloc_node(sizeof(struct intel_shared_regs),
1485                             GFP_KERNEL, cpu_to_node(cpu));
1486         if (regs) {
1487                 /*
1488                  * initialize the locks to keep lockdep happy
1489                  */
1490                 for (i = 0; i < EXTRA_REG_MAX; i++)
1491                         raw_spin_lock_init(&regs->regs[i].lock);
1492
1493                 regs->core_id = -1;
1494         }
1495         return regs;
1496 }
1497
1498 static int intel_pmu_cpu_prepare(int cpu)
1499 {
1500         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1501
1502         if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
1503                 return NOTIFY_OK;
1504
1505         cpuc->shared_regs = allocate_shared_regs(cpu);
1506         if (!cpuc->shared_regs)
1507                 return NOTIFY_BAD;
1508
1509         return NOTIFY_OK;
1510 }
1511
1512 static void intel_pmu_cpu_starting(int cpu)
1513 {
1514         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1515         int core_id = topology_core_id(cpu);
1516         int i;
1517
1518         init_debug_store_on_cpu(cpu);
1519         /*
1520          * Deal with CPUs that don't clear their LBRs on power-up.
1521          */
1522         intel_pmu_lbr_reset();
1523
1524         cpuc->lbr_sel = NULL;
1525
1526         if (!cpuc->shared_regs)
1527                 return;
1528
1529         if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
1530                 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1531                         struct intel_shared_regs *pc;
1532
1533                         pc = per_cpu(cpu_hw_events, i).shared_regs;
1534                         if (pc && pc->core_id == core_id) {
1535                                 cpuc->kfree_on_online = cpuc->shared_regs;
1536                                 cpuc->shared_regs = pc;
1537                                 break;
1538                         }
1539                 }
1540                 cpuc->shared_regs->core_id = core_id;
1541                 cpuc->shared_regs->refcnt++;
1542         }
1543
1544         if (x86_pmu.lbr_sel_map)
1545                 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
1546 }
1547
1548 static void intel_pmu_cpu_dying(int cpu)
1549 {
1550         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1551         struct intel_shared_regs *pc;
1552
1553         pc = cpuc->shared_regs;
1554         if (pc) {
1555                 if (pc->core_id == -1 || --pc->refcnt == 0)
1556                         kfree(pc);
1557                 cpuc->shared_regs = NULL;
1558         }
1559
1560         fini_debug_store_on_cpu(cpu);
1561 }
1562
1563 static void intel_pmu_flush_branch_stack(void)
1564 {
1565         /*
1566          * Intel LBR does not tag entries with the
1567          * PID of the current task, then we need to
1568          * flush it on ctxsw
1569          * For now, we simply reset it
1570          */
1571         if (x86_pmu.lbr_nr)
1572                 intel_pmu_lbr_reset();
1573 }
1574
1575 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1576
1577 static struct attribute *intel_arch3_formats_attr[] = {
1578         &format_attr_event.attr,
1579         &format_attr_umask.attr,
1580         &format_attr_edge.attr,
1581         &format_attr_pc.attr,
1582         &format_attr_any.attr,
1583         &format_attr_inv.attr,
1584         &format_attr_cmask.attr,
1585
1586         &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
1587         NULL,
1588 };
1589
1590 static __initconst const struct x86_pmu intel_pmu = {
1591         .name                   = "Intel",
1592         .handle_irq             = intel_pmu_handle_irq,
1593         .disable_all            = intel_pmu_disable_all,
1594         .enable_all             = intel_pmu_enable_all,
1595         .enable                 = intel_pmu_enable_event,
1596         .disable                = intel_pmu_disable_event,
1597         .hw_config              = intel_pmu_hw_config,
1598         .schedule_events        = x86_schedule_events,
1599         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
1600         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
1601         .event_map              = intel_pmu_event_map,
1602         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
1603         .apic                   = 1,
1604         /*
1605          * Intel PMCs cannot be accessed sanely above 32 bit width,
1606          * so we install an artificial 1<<31 period regardless of
1607          * the generic event period:
1608          */
1609         .max_period             = (1ULL << 31) - 1,
1610         .get_event_constraints  = intel_get_event_constraints,
1611         .put_event_constraints  = intel_put_event_constraints,
1612
1613         .format_attrs           = intel_arch3_formats_attr,
1614
1615         .cpu_prepare            = intel_pmu_cpu_prepare,
1616         .cpu_starting           = intel_pmu_cpu_starting,
1617         .cpu_dying              = intel_pmu_cpu_dying,
1618         .guest_get_msrs         = intel_guest_get_msrs,
1619         .flush_branch_stack     = intel_pmu_flush_branch_stack,
1620 };
1621
1622 static __init void intel_clovertown_quirk(void)
1623 {
1624         /*
1625          * PEBS is unreliable due to:
1626          *
1627          *   AJ67  - PEBS may experience CPL leaks
1628          *   AJ68  - PEBS PMI may be delayed by one event
1629          *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1630          *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1631          *
1632          * AJ67 could be worked around by restricting the OS/USR flags.
1633          * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1634          *
1635          * AJ106 could possibly be worked around by not allowing LBR
1636          *       usage from PEBS, including the fixup.
1637          * AJ68  could possibly be worked around by always programming
1638          *       a pebs_event_reset[0] value and coping with the lost events.
1639          *
1640          * But taken together it might just make sense to not enable PEBS on
1641          * these chips.
1642          */
1643         pr_warn("PEBS disabled due to CPU errata\n");
1644         x86_pmu.pebs = 0;
1645         x86_pmu.pebs_constraints = NULL;
1646 }
1647
1648 static __init void intel_sandybridge_quirk(void)
1649 {
1650         pr_warn("PEBS disabled due to CPU errata\n");
1651         x86_pmu.pebs = 0;
1652         x86_pmu.pebs_constraints = NULL;
1653 }
1654
1655 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
1656         { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
1657         { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
1658         { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
1659         { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
1660         { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
1661         { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
1662         { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
1663 };
1664
1665 static __init void intel_arch_events_quirk(void)
1666 {
1667         int bit;
1668
1669         /* disable event that reported as not presend by cpuid */
1670         for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1671                 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
1672                 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1673                         intel_arch_events_map[bit].name);
1674         }
1675 }
1676
1677 static __init void intel_nehalem_quirk(void)
1678 {
1679         union cpuid10_ebx ebx;
1680
1681         ebx.full = x86_pmu.events_maskl;
1682         if (ebx.split.no_branch_misses_retired) {
1683                 /*
1684                  * Erratum AAJ80 detected, we work it around by using
1685                  * the BR_MISP_EXEC.ANY event. This will over-count
1686                  * branch-misses, but it's still much better than the
1687                  * architectural event which is often completely bogus:
1688                  */
1689                 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1690                 ebx.split.no_branch_misses_retired = 0;
1691                 x86_pmu.events_maskl = ebx.full;
1692                 pr_info("CPU erratum AAJ80 worked around\n");
1693         }
1694 }
1695
1696 __init int intel_pmu_init(void)
1697 {
1698         union cpuid10_edx edx;
1699         union cpuid10_eax eax;
1700         union cpuid10_ebx ebx;
1701         unsigned int unused;
1702         int version;
1703
1704         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1705                 switch (boot_cpu_data.x86) {
1706                 case 0x6:
1707                         return p6_pmu_init();
1708                 case 0xf:
1709                         return p4_pmu_init();
1710                 }
1711                 return -ENODEV;
1712         }
1713
1714         /*
1715          * Check whether the Architectural PerfMon supports
1716          * Branch Misses Retired hw_event or not.
1717          */
1718         cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
1719         if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
1720                 return -ENODEV;
1721
1722         version = eax.split.version_id;
1723         if (version < 2)
1724                 x86_pmu = core_pmu;
1725         else
1726                 x86_pmu = intel_pmu;
1727
1728         x86_pmu.version                 = version;
1729         x86_pmu.num_counters            = eax.split.num_counters;
1730         x86_pmu.cntval_bits             = eax.split.bit_width;
1731         x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
1732
1733         x86_pmu.events_maskl            = ebx.full;
1734         x86_pmu.events_mask_len         = eax.split.mask_length;
1735
1736         /*
1737          * Quirk: v2 perfmon does not report fixed-purpose events, so
1738          * assume at least 3 events:
1739          */
1740         if (version > 1)
1741                 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1742
1743         /*
1744          * v2 and above have a perf capabilities MSR
1745          */
1746         if (version > 1) {
1747                 u64 capabilities;
1748
1749                 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1750                 x86_pmu.intel_cap.capabilities = capabilities;
1751         }
1752
1753         intel_ds_init();
1754
1755         x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
1756
1757         /*
1758          * Install the hw-cache-events table:
1759          */
1760         switch (boot_cpu_data.x86_model) {
1761         case 14: /* 65 nm core solo/duo, "Yonah" */
1762                 pr_cont("Core events, ");
1763                 break;
1764
1765         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1766                 x86_add_quirk(intel_clovertown_quirk);
1767         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1768         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1769         case 29: /* six-core 45 nm xeon "Dunnington" */
1770                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1771                        sizeof(hw_cache_event_ids));
1772
1773                 intel_pmu_lbr_init_core();
1774
1775                 x86_pmu.event_constraints = intel_core2_event_constraints;
1776                 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
1777                 pr_cont("Core2 events, ");
1778                 break;
1779
1780         case 26: /* 45 nm nehalem, "Bloomfield" */
1781         case 30: /* 45 nm nehalem, "Lynnfield" */
1782         case 46: /* 45 nm nehalem-ex, "Beckton" */
1783                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1784                        sizeof(hw_cache_event_ids));
1785                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1786                        sizeof(hw_cache_extra_regs));
1787
1788                 intel_pmu_lbr_init_nhm();
1789
1790                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
1791                 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
1792                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1793                 x86_pmu.extra_regs = intel_nehalem_extra_regs;
1794
1795                 /* UOPS_ISSUED.STALLED_CYCLES */
1796                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1797                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1798                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1799                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1800                         X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1801
1802                 x86_add_quirk(intel_nehalem_quirk);
1803
1804                 pr_cont("Nehalem events, ");
1805                 break;
1806
1807         case 28: /* Atom */
1808                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1809                        sizeof(hw_cache_event_ids));
1810
1811                 intel_pmu_lbr_init_atom();
1812
1813                 x86_pmu.event_constraints = intel_gen_event_constraints;
1814                 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
1815                 pr_cont("Atom events, ");
1816                 break;
1817
1818         case 37: /* 32 nm nehalem, "Clarkdale" */
1819         case 44: /* 32 nm nehalem, "Gulftown" */
1820         case 47: /* 32 nm Xeon E7 */
1821                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1822                        sizeof(hw_cache_event_ids));
1823                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1824                        sizeof(hw_cache_extra_regs));
1825
1826                 intel_pmu_lbr_init_nhm();
1827
1828                 x86_pmu.event_constraints = intel_westmere_event_constraints;
1829                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1830                 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
1831                 x86_pmu.extra_regs = intel_westmere_extra_regs;
1832                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1833
1834                 /* UOPS_ISSUED.STALLED_CYCLES */
1835                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1836                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1837                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1838                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1839                         X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1840
1841                 pr_cont("Westmere events, ");
1842                 break;
1843
1844         case 42: /* SandyBridge */
1845                 x86_add_quirk(intel_sandybridge_quirk);
1846         case 45: /* SandyBridge, "Romely-EP" */
1847                 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1848                        sizeof(hw_cache_event_ids));
1849
1850                 intel_pmu_lbr_init_snb();
1851
1852                 x86_pmu.event_constraints = intel_snb_event_constraints;
1853                 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1854                 x86_pmu.extra_regs = intel_snb_extra_regs;
1855                 /* all extra regs are per-cpu when HT is on */
1856                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1857                 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
1858
1859                 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1860                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1861                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1862                 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1863                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1864                         X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
1865
1866                 pr_cont("SandyBridge events, ");
1867                 break;
1868
1869         default:
1870                 switch (x86_pmu.version) {
1871                 case 1:
1872                         x86_pmu.event_constraints = intel_v1_event_constraints;
1873                         pr_cont("generic architected perfmon v1, ");
1874                         break;
1875                 default:
1876                         /*
1877                          * default constraints for v2 and up
1878                          */
1879                         x86_pmu.event_constraints = intel_gen_event_constraints;
1880                         pr_cont("generic architected perfmon, ");
1881                         break;
1882                 }
1883         }
1884
1885         return 0;
1886 }