perf_events, x86: Improve x86 event scheduling
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26
27 #include <asm/apic.h>
28 #include <asm/stacktrace.h>
29 #include <asm/nmi.h>
30
31 static u64 perf_event_mask __read_mostly;
32
33 /* The maximal number of PEBS events: */
34 #define MAX_PEBS_EVENTS 4
35
36 /* The size of a BTS record in bytes: */
37 #define BTS_RECORD_SIZE         24
38
39 /* The size of a per-cpu BTS buffer in bytes: */
40 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
41
42 /* The BTS overflow threshold in bytes from the end of the buffer: */
43 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
44
45
46 /*
47  * Bits in the debugctlmsr controlling branch tracing.
48  */
49 #define X86_DEBUGCTL_TR                 (1 << 6)
50 #define X86_DEBUGCTL_BTS                (1 << 7)
51 #define X86_DEBUGCTL_BTINT              (1 << 8)
52 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
53 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
54
55 /*
56  * A debug store configuration.
57  *
58  * We only support architectures that use 64bit fields.
59  */
60 struct debug_store {
61         u64     bts_buffer_base;
62         u64     bts_index;
63         u64     bts_absolute_maximum;
64         u64     bts_interrupt_threshold;
65         u64     pebs_buffer_base;
66         u64     pebs_index;
67         u64     pebs_absolute_maximum;
68         u64     pebs_interrupt_threshold;
69         u64     pebs_event_reset[MAX_PEBS_EVENTS];
70 };
71
72 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
73
74 struct event_constraint {
75         u64     idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)];
76         int     code;
77         int     cmask;
78 };
79
80 struct cpu_hw_events {
81         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
82         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
83         unsigned long           interrupts;
84         int                     enabled;
85         struct debug_store      *ds;
86
87         int                     n_events;
88         int                     n_added;
89         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
90         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
91 };
92
93 #define EVENT_CONSTRAINT(c, n, m) { \
94         .code = (c),    \
95         .cmask = (m),   \
96         .idxmsk[0] = (n) }
97
98 #define EVENT_CONSTRAINT_END \
99         { .code = 0, .cmask = 0, .idxmsk[0] = 0 }
100
101 #define for_each_event_constraint(e, c) \
102         for ((e) = (c); (e)->cmask; (e)++)
103
104 /*
105  * struct x86_pmu - generic x86 pmu
106  */
107 struct x86_pmu {
108         const char      *name;
109         int             version;
110         int             (*handle_irq)(struct pt_regs *);
111         void            (*disable_all)(void);
112         void            (*enable_all)(void);
113         void            (*enable)(struct hw_perf_event *, int);
114         void            (*disable)(struct hw_perf_event *, int);
115         unsigned        eventsel;
116         unsigned        perfctr;
117         u64             (*event_map)(int);
118         u64             (*raw_event)(u64);
119         int             max_events;
120         int             num_events;
121         int             num_events_fixed;
122         int             event_bits;
123         u64             event_mask;
124         int             apic;
125         u64             max_period;
126         u64             intel_ctrl;
127         void            (*enable_bts)(u64 config);
128         void            (*disable_bts)(void);
129         void            (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk);
130         void            (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event);
131         const struct event_constraint *event_constraints;
132 };
133
134 static struct x86_pmu x86_pmu __read_mostly;
135
136 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
137         .enabled = 1,
138 };
139
140 static int x86_perf_event_set_period(struct perf_event *event,
141                              struct hw_perf_event *hwc, int idx);
142
143 /*
144  * Not sure about some of these
145  */
146 static const u64 p6_perfmon_event_map[] =
147 {
148   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
149   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
150   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
151   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
152   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
153   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
154   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
155 };
156
157 static u64 p6_pmu_event_map(int hw_event)
158 {
159         return p6_perfmon_event_map[hw_event];
160 }
161
162 /*
163  * Event setting that is specified not to count anything.
164  * We use this to effectively disable a counter.
165  *
166  * L2_RQSTS with 0 MESI unit mask.
167  */
168 #define P6_NOP_EVENT                    0x0000002EULL
169
170 static u64 p6_pmu_raw_event(u64 hw_event)
171 {
172 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
173 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
174 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
175 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
176 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
177
178 #define P6_EVNTSEL_MASK                 \
179         (P6_EVNTSEL_EVENT_MASK |        \
180          P6_EVNTSEL_UNIT_MASK  |        \
181          P6_EVNTSEL_EDGE_MASK  |        \
182          P6_EVNTSEL_INV_MASK   |        \
183          P6_EVNTSEL_REG_MASK)
184
185         return hw_event & P6_EVNTSEL_MASK;
186 }
187
188 static struct event_constraint intel_p6_event_constraints[] =
189 {
190         EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK),     /* FLOPS */
191         EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK),     /* FP_COMP_OPS_EXE */
192         EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK),     /* FP_ASSIST */
193         EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK),     /* MUL */
194         EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK),     /* DIV */
195         EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK),     /* CYCLES_DIV_BUSY */
196         EVENT_CONSTRAINT_END
197 };
198
199 /*
200  * Intel PerfMon v3. Used on Core2 and later.
201  */
202 static const u64 intel_perfmon_event_map[] =
203 {
204   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
205   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
206   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
207   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
208   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
209   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
210   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
211 };
212
213 static struct event_constraint intel_core_event_constraints[] =
214 {
215         EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
216         EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
217         EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
218         EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
219         EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
220         EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
221         EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
222         EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */
223         EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */
224         EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */
225         EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */
226         EVENT_CONSTRAINT_END
227 };
228
229 static struct event_constraint intel_nehalem_event_constraints[] =
230 {
231         EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
232         EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
233         EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */
234         EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */
235         EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */
236         EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */
237         EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */
238         EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */
239         EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */
240         EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
241         EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */
242         EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */
243         EVENT_CONSTRAINT_END
244 };
245
246 static struct event_constraint intel_gen_event_constraints[] =
247 {
248         EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
249         EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
250         EVENT_CONSTRAINT_END
251 };
252
253 static u64 intel_pmu_event_map(int hw_event)
254 {
255         return intel_perfmon_event_map[hw_event];
256 }
257
258 /*
259  * Generalized hw caching related hw_event table, filled
260  * in on a per model basis. A value of 0 means
261  * 'not supported', -1 means 'hw_event makes no sense on
262  * this CPU', any other value means the raw hw_event
263  * ID.
264  */
265
266 #define C(x) PERF_COUNT_HW_CACHE_##x
267
268 static u64 __read_mostly hw_cache_event_ids
269                                 [PERF_COUNT_HW_CACHE_MAX]
270                                 [PERF_COUNT_HW_CACHE_OP_MAX]
271                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
272
273 static __initconst u64 nehalem_hw_cache_event_ids
274                                 [PERF_COUNT_HW_CACHE_MAX]
275                                 [PERF_COUNT_HW_CACHE_OP_MAX]
276                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
277 {
278  [ C(L1D) ] = {
279         [ C(OP_READ) ] = {
280                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
281                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
282         },
283         [ C(OP_WRITE) ] = {
284                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
285                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
286         },
287         [ C(OP_PREFETCH) ] = {
288                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
289                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
290         },
291  },
292  [ C(L1I ) ] = {
293         [ C(OP_READ) ] = {
294                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
295                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
296         },
297         [ C(OP_WRITE) ] = {
298                 [ C(RESULT_ACCESS) ] = -1,
299                 [ C(RESULT_MISS)   ] = -1,
300         },
301         [ C(OP_PREFETCH) ] = {
302                 [ C(RESULT_ACCESS) ] = 0x0,
303                 [ C(RESULT_MISS)   ] = 0x0,
304         },
305  },
306  [ C(LL  ) ] = {
307         [ C(OP_READ) ] = {
308                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
309                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
310         },
311         [ C(OP_WRITE) ] = {
312                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
313                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
314         },
315         [ C(OP_PREFETCH) ] = {
316                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
317                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
318         },
319  },
320  [ C(DTLB) ] = {
321         [ C(OP_READ) ] = {
322                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
323                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
324         },
325         [ C(OP_WRITE) ] = {
326                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
327                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
328         },
329         [ C(OP_PREFETCH) ] = {
330                 [ C(RESULT_ACCESS) ] = 0x0,
331                 [ C(RESULT_MISS)   ] = 0x0,
332         },
333  },
334  [ C(ITLB) ] = {
335         [ C(OP_READ) ] = {
336                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
337                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
338         },
339         [ C(OP_WRITE) ] = {
340                 [ C(RESULT_ACCESS) ] = -1,
341                 [ C(RESULT_MISS)   ] = -1,
342         },
343         [ C(OP_PREFETCH) ] = {
344                 [ C(RESULT_ACCESS) ] = -1,
345                 [ C(RESULT_MISS)   ] = -1,
346         },
347  },
348  [ C(BPU ) ] = {
349         [ C(OP_READ) ] = {
350                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
351                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
352         },
353         [ C(OP_WRITE) ] = {
354                 [ C(RESULT_ACCESS) ] = -1,
355                 [ C(RESULT_MISS)   ] = -1,
356         },
357         [ C(OP_PREFETCH) ] = {
358                 [ C(RESULT_ACCESS) ] = -1,
359                 [ C(RESULT_MISS)   ] = -1,
360         },
361  },
362 };
363
364 static __initconst u64 core2_hw_cache_event_ids
365                                 [PERF_COUNT_HW_CACHE_MAX]
366                                 [PERF_COUNT_HW_CACHE_OP_MAX]
367                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
368 {
369  [ C(L1D) ] = {
370         [ C(OP_READ) ] = {
371                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
372                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
373         },
374         [ C(OP_WRITE) ] = {
375                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
376                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
377         },
378         [ C(OP_PREFETCH) ] = {
379                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
380                 [ C(RESULT_MISS)   ] = 0,
381         },
382  },
383  [ C(L1I ) ] = {
384         [ C(OP_READ) ] = {
385                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
386                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
387         },
388         [ C(OP_WRITE) ] = {
389                 [ C(RESULT_ACCESS) ] = -1,
390                 [ C(RESULT_MISS)   ] = -1,
391         },
392         [ C(OP_PREFETCH) ] = {
393                 [ C(RESULT_ACCESS) ] = 0,
394                 [ C(RESULT_MISS)   ] = 0,
395         },
396  },
397  [ C(LL  ) ] = {
398         [ C(OP_READ) ] = {
399                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
400                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
401         },
402         [ C(OP_WRITE) ] = {
403                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
404                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
405         },
406         [ C(OP_PREFETCH) ] = {
407                 [ C(RESULT_ACCESS) ] = 0,
408                 [ C(RESULT_MISS)   ] = 0,
409         },
410  },
411  [ C(DTLB) ] = {
412         [ C(OP_READ) ] = {
413                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
414                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
415         },
416         [ C(OP_WRITE) ] = {
417                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
418                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
419         },
420         [ C(OP_PREFETCH) ] = {
421                 [ C(RESULT_ACCESS) ] = 0,
422                 [ C(RESULT_MISS)   ] = 0,
423         },
424  },
425  [ C(ITLB) ] = {
426         [ C(OP_READ) ] = {
427                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
428                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
429         },
430         [ C(OP_WRITE) ] = {
431                 [ C(RESULT_ACCESS) ] = -1,
432                 [ C(RESULT_MISS)   ] = -1,
433         },
434         [ C(OP_PREFETCH) ] = {
435                 [ C(RESULT_ACCESS) ] = -1,
436                 [ C(RESULT_MISS)   ] = -1,
437         },
438  },
439  [ C(BPU ) ] = {
440         [ C(OP_READ) ] = {
441                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
442                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
443         },
444         [ C(OP_WRITE) ] = {
445                 [ C(RESULT_ACCESS) ] = -1,
446                 [ C(RESULT_MISS)   ] = -1,
447         },
448         [ C(OP_PREFETCH) ] = {
449                 [ C(RESULT_ACCESS) ] = -1,
450                 [ C(RESULT_MISS)   ] = -1,
451         },
452  },
453 };
454
455 static __initconst u64 atom_hw_cache_event_ids
456                                 [PERF_COUNT_HW_CACHE_MAX]
457                                 [PERF_COUNT_HW_CACHE_OP_MAX]
458                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
459 {
460  [ C(L1D) ] = {
461         [ C(OP_READ) ] = {
462                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
463                 [ C(RESULT_MISS)   ] = 0,
464         },
465         [ C(OP_WRITE) ] = {
466                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
467                 [ C(RESULT_MISS)   ] = 0,
468         },
469         [ C(OP_PREFETCH) ] = {
470                 [ C(RESULT_ACCESS) ] = 0x0,
471                 [ C(RESULT_MISS)   ] = 0,
472         },
473  },
474  [ C(L1I ) ] = {
475         [ C(OP_READ) ] = {
476                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
477                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
478         },
479         [ C(OP_WRITE) ] = {
480                 [ C(RESULT_ACCESS) ] = -1,
481                 [ C(RESULT_MISS)   ] = -1,
482         },
483         [ C(OP_PREFETCH) ] = {
484                 [ C(RESULT_ACCESS) ] = 0,
485                 [ C(RESULT_MISS)   ] = 0,
486         },
487  },
488  [ C(LL  ) ] = {
489         [ C(OP_READ) ] = {
490                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
491                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
492         },
493         [ C(OP_WRITE) ] = {
494                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
495                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
496         },
497         [ C(OP_PREFETCH) ] = {
498                 [ C(RESULT_ACCESS) ] = 0,
499                 [ C(RESULT_MISS)   ] = 0,
500         },
501  },
502  [ C(DTLB) ] = {
503         [ C(OP_READ) ] = {
504                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
505                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
506         },
507         [ C(OP_WRITE) ] = {
508                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
509                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
510         },
511         [ C(OP_PREFETCH) ] = {
512                 [ C(RESULT_ACCESS) ] = 0,
513                 [ C(RESULT_MISS)   ] = 0,
514         },
515  },
516  [ C(ITLB) ] = {
517         [ C(OP_READ) ] = {
518                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
519                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
520         },
521         [ C(OP_WRITE) ] = {
522                 [ C(RESULT_ACCESS) ] = -1,
523                 [ C(RESULT_MISS)   ] = -1,
524         },
525         [ C(OP_PREFETCH) ] = {
526                 [ C(RESULT_ACCESS) ] = -1,
527                 [ C(RESULT_MISS)   ] = -1,
528         },
529  },
530  [ C(BPU ) ] = {
531         [ C(OP_READ) ] = {
532                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
533                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
534         },
535         [ C(OP_WRITE) ] = {
536                 [ C(RESULT_ACCESS) ] = -1,
537                 [ C(RESULT_MISS)   ] = -1,
538         },
539         [ C(OP_PREFETCH) ] = {
540                 [ C(RESULT_ACCESS) ] = -1,
541                 [ C(RESULT_MISS)   ] = -1,
542         },
543  },
544 };
545
546 static u64 intel_pmu_raw_event(u64 hw_event)
547 {
548 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
549 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
550 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
551 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
552 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
553
554 #define CORE_EVNTSEL_MASK               \
555         (INTEL_ARCH_EVTSEL_MASK |       \
556          INTEL_ARCH_UNIT_MASK   |       \
557          INTEL_ARCH_EDGE_MASK   |       \
558          INTEL_ARCH_INV_MASK    |       \
559          INTEL_ARCH_CNT_MASK)
560
561         return hw_event & CORE_EVNTSEL_MASK;
562 }
563
564 static __initconst u64 amd_hw_cache_event_ids
565                                 [PERF_COUNT_HW_CACHE_MAX]
566                                 [PERF_COUNT_HW_CACHE_OP_MAX]
567                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
568 {
569  [ C(L1D) ] = {
570         [ C(OP_READ) ] = {
571                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
572                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
573         },
574         [ C(OP_WRITE) ] = {
575                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
576                 [ C(RESULT_MISS)   ] = 0,
577         },
578         [ C(OP_PREFETCH) ] = {
579                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
580                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
581         },
582  },
583  [ C(L1I ) ] = {
584         [ C(OP_READ) ] = {
585                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
586                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
587         },
588         [ C(OP_WRITE) ] = {
589                 [ C(RESULT_ACCESS) ] = -1,
590                 [ C(RESULT_MISS)   ] = -1,
591         },
592         [ C(OP_PREFETCH) ] = {
593                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
594                 [ C(RESULT_MISS)   ] = 0,
595         },
596  },
597  [ C(LL  ) ] = {
598         [ C(OP_READ) ] = {
599                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
600                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
601         },
602         [ C(OP_WRITE) ] = {
603                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
604                 [ C(RESULT_MISS)   ] = 0,
605         },
606         [ C(OP_PREFETCH) ] = {
607                 [ C(RESULT_ACCESS) ] = 0,
608                 [ C(RESULT_MISS)   ] = 0,
609         },
610  },
611  [ C(DTLB) ] = {
612         [ C(OP_READ) ] = {
613                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
614                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
615         },
616         [ C(OP_WRITE) ] = {
617                 [ C(RESULT_ACCESS) ] = 0,
618                 [ C(RESULT_MISS)   ] = 0,
619         },
620         [ C(OP_PREFETCH) ] = {
621                 [ C(RESULT_ACCESS) ] = 0,
622                 [ C(RESULT_MISS)   ] = 0,
623         },
624  },
625  [ C(ITLB) ] = {
626         [ C(OP_READ) ] = {
627                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
628                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
629         },
630         [ C(OP_WRITE) ] = {
631                 [ C(RESULT_ACCESS) ] = -1,
632                 [ C(RESULT_MISS)   ] = -1,
633         },
634         [ C(OP_PREFETCH) ] = {
635                 [ C(RESULT_ACCESS) ] = -1,
636                 [ C(RESULT_MISS)   ] = -1,
637         },
638  },
639  [ C(BPU ) ] = {
640         [ C(OP_READ) ] = {
641                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
642                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
643         },
644         [ C(OP_WRITE) ] = {
645                 [ C(RESULT_ACCESS) ] = -1,
646                 [ C(RESULT_MISS)   ] = -1,
647         },
648         [ C(OP_PREFETCH) ] = {
649                 [ C(RESULT_ACCESS) ] = -1,
650                 [ C(RESULT_MISS)   ] = -1,
651         },
652  },
653 };
654
655 /*
656  * AMD Performance Monitor K7 and later.
657  */
658 static const u64 amd_perfmon_event_map[] =
659 {
660   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
661   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
662   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
663   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
664   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
665   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
666 };
667
668 static u64 amd_pmu_event_map(int hw_event)
669 {
670         return amd_perfmon_event_map[hw_event];
671 }
672
673 static u64 amd_pmu_raw_event(u64 hw_event)
674 {
675 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
676 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
677 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
678 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
679 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
680
681 #define K7_EVNTSEL_MASK                 \
682         (K7_EVNTSEL_EVENT_MASK |        \
683          K7_EVNTSEL_UNIT_MASK  |        \
684          K7_EVNTSEL_EDGE_MASK  |        \
685          K7_EVNTSEL_INV_MASK   |        \
686          K7_EVNTSEL_REG_MASK)
687
688         return hw_event & K7_EVNTSEL_MASK;
689 }
690
691 /*
692  * Propagate event elapsed time into the generic event.
693  * Can only be executed on the CPU where the event is active.
694  * Returns the delta events processed.
695  */
696 static u64
697 x86_perf_event_update(struct perf_event *event,
698                         struct hw_perf_event *hwc, int idx)
699 {
700         int shift = 64 - x86_pmu.event_bits;
701         u64 prev_raw_count, new_raw_count;
702         s64 delta;
703
704         if (idx == X86_PMC_IDX_FIXED_BTS)
705                 return 0;
706
707         /*
708          * Careful: an NMI might modify the previous event value.
709          *
710          * Our tactic to handle this is to first atomically read and
711          * exchange a new raw count - then add that new-prev delta
712          * count to the generic event atomically:
713          */
714 again:
715         prev_raw_count = atomic64_read(&hwc->prev_count);
716         rdmsrl(hwc->event_base + idx, new_raw_count);
717
718         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
719                                         new_raw_count) != prev_raw_count)
720                 goto again;
721
722         /*
723          * Now we have the new raw value and have updated the prev
724          * timestamp already. We can now calculate the elapsed delta
725          * (event-)time and add that to the generic event.
726          *
727          * Careful, not all hw sign-extends above the physical width
728          * of the count.
729          */
730         delta = (new_raw_count << shift) - (prev_raw_count << shift);
731         delta >>= shift;
732
733         atomic64_add(delta, &event->count);
734         atomic64_sub(delta, &hwc->period_left);
735
736         return new_raw_count;
737 }
738
739 static atomic_t active_events;
740 static DEFINE_MUTEX(pmc_reserve_mutex);
741
742 static bool reserve_pmc_hardware(void)
743 {
744 #ifdef CONFIG_X86_LOCAL_APIC
745         int i;
746
747         if (nmi_watchdog == NMI_LOCAL_APIC)
748                 disable_lapic_nmi_watchdog();
749
750         for (i = 0; i < x86_pmu.num_events; i++) {
751                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
752                         goto perfctr_fail;
753         }
754
755         for (i = 0; i < x86_pmu.num_events; i++) {
756                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
757                         goto eventsel_fail;
758         }
759 #endif
760
761         return true;
762
763 #ifdef CONFIG_X86_LOCAL_APIC
764 eventsel_fail:
765         for (i--; i >= 0; i--)
766                 release_evntsel_nmi(x86_pmu.eventsel + i);
767
768         i = x86_pmu.num_events;
769
770 perfctr_fail:
771         for (i--; i >= 0; i--)
772                 release_perfctr_nmi(x86_pmu.perfctr + i);
773
774         if (nmi_watchdog == NMI_LOCAL_APIC)
775                 enable_lapic_nmi_watchdog();
776
777         return false;
778 #endif
779 }
780
781 static void release_pmc_hardware(void)
782 {
783 #ifdef CONFIG_X86_LOCAL_APIC
784         int i;
785
786         for (i = 0; i < x86_pmu.num_events; i++) {
787                 release_perfctr_nmi(x86_pmu.perfctr + i);
788                 release_evntsel_nmi(x86_pmu.eventsel + i);
789         }
790
791         if (nmi_watchdog == NMI_LOCAL_APIC)
792                 enable_lapic_nmi_watchdog();
793 #endif
794 }
795
796 static inline bool bts_available(void)
797 {
798         return x86_pmu.enable_bts != NULL;
799 }
800
801 static inline void init_debug_store_on_cpu(int cpu)
802 {
803         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
804
805         if (!ds)
806                 return;
807
808         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
809                      (u32)((u64)(unsigned long)ds),
810                      (u32)((u64)(unsigned long)ds >> 32));
811 }
812
813 static inline void fini_debug_store_on_cpu(int cpu)
814 {
815         if (!per_cpu(cpu_hw_events, cpu).ds)
816                 return;
817
818         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
819 }
820
821 static void release_bts_hardware(void)
822 {
823         int cpu;
824
825         if (!bts_available())
826                 return;
827
828         get_online_cpus();
829
830         for_each_online_cpu(cpu)
831                 fini_debug_store_on_cpu(cpu);
832
833         for_each_possible_cpu(cpu) {
834                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
835
836                 if (!ds)
837                         continue;
838
839                 per_cpu(cpu_hw_events, cpu).ds = NULL;
840
841                 kfree((void *)(unsigned long)ds->bts_buffer_base);
842                 kfree(ds);
843         }
844
845         put_online_cpus();
846 }
847
848 static int reserve_bts_hardware(void)
849 {
850         int cpu, err = 0;
851
852         if (!bts_available())
853                 return 0;
854
855         get_online_cpus();
856
857         for_each_possible_cpu(cpu) {
858                 struct debug_store *ds;
859                 void *buffer;
860
861                 err = -ENOMEM;
862                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
863                 if (unlikely(!buffer))
864                         break;
865
866                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
867                 if (unlikely(!ds)) {
868                         kfree(buffer);
869                         break;
870                 }
871
872                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
873                 ds->bts_index = ds->bts_buffer_base;
874                 ds->bts_absolute_maximum =
875                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
876                 ds->bts_interrupt_threshold =
877                         ds->bts_absolute_maximum - BTS_OVFL_TH;
878
879                 per_cpu(cpu_hw_events, cpu).ds = ds;
880                 err = 0;
881         }
882
883         if (err)
884                 release_bts_hardware();
885         else {
886                 for_each_online_cpu(cpu)
887                         init_debug_store_on_cpu(cpu);
888         }
889
890         put_online_cpus();
891
892         return err;
893 }
894
895 static void hw_perf_event_destroy(struct perf_event *event)
896 {
897         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
898                 release_pmc_hardware();
899                 release_bts_hardware();
900                 mutex_unlock(&pmc_reserve_mutex);
901         }
902 }
903
904 static inline int x86_pmu_initialized(void)
905 {
906         return x86_pmu.handle_irq != NULL;
907 }
908
909 static inline int
910 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
911 {
912         unsigned int cache_type, cache_op, cache_result;
913         u64 config, val;
914
915         config = attr->config;
916
917         cache_type = (config >>  0) & 0xff;
918         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
919                 return -EINVAL;
920
921         cache_op = (config >>  8) & 0xff;
922         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
923                 return -EINVAL;
924
925         cache_result = (config >> 16) & 0xff;
926         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
927                 return -EINVAL;
928
929         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
930
931         if (val == 0)
932                 return -ENOENT;
933
934         if (val == -1)
935                 return -EINVAL;
936
937         hwc->config |= val;
938
939         return 0;
940 }
941
942 static void intel_pmu_enable_bts(u64 config)
943 {
944         unsigned long debugctlmsr;
945
946         debugctlmsr = get_debugctlmsr();
947
948         debugctlmsr |= X86_DEBUGCTL_TR;
949         debugctlmsr |= X86_DEBUGCTL_BTS;
950         debugctlmsr |= X86_DEBUGCTL_BTINT;
951
952         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
953                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
954
955         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
956                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
957
958         update_debugctlmsr(debugctlmsr);
959 }
960
961 static void intel_pmu_disable_bts(void)
962 {
963         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
964         unsigned long debugctlmsr;
965
966         if (!cpuc->ds)
967                 return;
968
969         debugctlmsr = get_debugctlmsr();
970
971         debugctlmsr &=
972                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
973                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
974
975         update_debugctlmsr(debugctlmsr);
976 }
977
978 /*
979  * Setup the hardware configuration for a given attr_type
980  */
981 static int __hw_perf_event_init(struct perf_event *event)
982 {
983         struct perf_event_attr *attr = &event->attr;
984         struct hw_perf_event *hwc = &event->hw;
985         u64 config;
986         int err;
987
988         if (!x86_pmu_initialized())
989                 return -ENODEV;
990
991         err = 0;
992         if (!atomic_inc_not_zero(&active_events)) {
993                 mutex_lock(&pmc_reserve_mutex);
994                 if (atomic_read(&active_events) == 0) {
995                         if (!reserve_pmc_hardware())
996                                 err = -EBUSY;
997                         else
998                                 err = reserve_bts_hardware();
999                 }
1000                 if (!err)
1001                         atomic_inc(&active_events);
1002                 mutex_unlock(&pmc_reserve_mutex);
1003         }
1004         if (err)
1005                 return err;
1006
1007         event->destroy = hw_perf_event_destroy;
1008
1009         /*
1010          * Generate PMC IRQs:
1011          * (keep 'enabled' bit clear for now)
1012          */
1013         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1014
1015         hwc->idx = -1;
1016
1017         /*
1018          * Count user and OS events unless requested not to.
1019          */
1020         if (!attr->exclude_user)
1021                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1022         if (!attr->exclude_kernel)
1023                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1024
1025         if (!hwc->sample_period) {
1026                 hwc->sample_period = x86_pmu.max_period;
1027                 hwc->last_period = hwc->sample_period;
1028                 atomic64_set(&hwc->period_left, hwc->sample_period);
1029         } else {
1030                 /*
1031                  * If we have a PMU initialized but no APIC
1032                  * interrupts, we cannot sample hardware
1033                  * events (user-space has to fall back and
1034                  * sample via a hrtimer based software event):
1035                  */
1036                 if (!x86_pmu.apic)
1037                         return -EOPNOTSUPP;
1038         }
1039
1040         /*
1041          * Raw hw_event type provide the config in the hw_event structure
1042          */
1043         if (attr->type == PERF_TYPE_RAW) {
1044                 hwc->config |= x86_pmu.raw_event(attr->config);
1045                 return 0;
1046         }
1047
1048         if (attr->type == PERF_TYPE_HW_CACHE)
1049                 return set_ext_hw_attr(hwc, attr);
1050
1051         if (attr->config >= x86_pmu.max_events)
1052                 return -EINVAL;
1053
1054         /*
1055          * The generic map:
1056          */
1057         config = x86_pmu.event_map(attr->config);
1058
1059         if (config == 0)
1060                 return -ENOENT;
1061
1062         if (config == -1LL)
1063                 return -EINVAL;
1064
1065         /*
1066          * Branch tracing:
1067          */
1068         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1069             (hwc->sample_period == 1)) {
1070                 /* BTS is not supported by this architecture. */
1071                 if (!bts_available())
1072                         return -EOPNOTSUPP;
1073
1074                 /* BTS is currently only allowed for user-mode. */
1075                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1076                         return -EOPNOTSUPP;
1077         }
1078
1079         hwc->config |= config;
1080
1081         return 0;
1082 }
1083
1084 static void p6_pmu_disable_all(void)
1085 {
1086         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1087         u64 val;
1088
1089         if (!cpuc->enabled)
1090                 return;
1091
1092         cpuc->enabled = 0;
1093         barrier();
1094
1095         /* p6 only has one enable register */
1096         rdmsrl(MSR_P6_EVNTSEL0, val);
1097         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1098         wrmsrl(MSR_P6_EVNTSEL0, val);
1099 }
1100
1101 static void intel_pmu_disable_all(void)
1102 {
1103         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1104
1105         if (!cpuc->enabled)
1106                 return;
1107
1108         cpuc->enabled = 0;
1109         barrier();
1110
1111         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1112
1113         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1114                 intel_pmu_disable_bts();
1115 }
1116
1117 static void amd_pmu_disable_all(void)
1118 {
1119         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1120         int idx;
1121
1122         if (!cpuc->enabled)
1123                 return;
1124
1125         cpuc->enabled = 0;
1126         /*
1127          * ensure we write the disable before we start disabling the
1128          * events proper, so that amd_pmu_enable_event() does the
1129          * right thing.
1130          */
1131         barrier();
1132
1133         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1134                 u64 val;
1135
1136                 if (!test_bit(idx, cpuc->active_mask))
1137                         continue;
1138                 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1139                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1140                         continue;
1141                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1142                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1143         }
1144 }
1145
1146 void hw_perf_disable(void)
1147 {
1148         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1149
1150         if (!x86_pmu_initialized())
1151                 return;
1152
1153         if (cpuc->enabled)
1154                 cpuc->n_added = 0;
1155
1156         x86_pmu.disable_all();
1157 }
1158
1159 static void p6_pmu_enable_all(void)
1160 {
1161         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1162         unsigned long val;
1163
1164         if (cpuc->enabled)
1165                 return;
1166
1167         cpuc->enabled = 1;
1168         barrier();
1169
1170         /* p6 only has one enable register */
1171         rdmsrl(MSR_P6_EVNTSEL0, val);
1172         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1173         wrmsrl(MSR_P6_EVNTSEL0, val);
1174 }
1175
1176 static void intel_pmu_enable_all(void)
1177 {
1178         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1179
1180         if (cpuc->enabled)
1181                 return;
1182
1183         cpuc->enabled = 1;
1184         barrier();
1185
1186         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1187
1188         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1189                 struct perf_event *event =
1190                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1191
1192                 if (WARN_ON_ONCE(!event))
1193                         return;
1194
1195                 intel_pmu_enable_bts(event->hw.config);
1196         }
1197 }
1198
1199 static void amd_pmu_enable_all(void)
1200 {
1201         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1202         int idx;
1203
1204         if (cpuc->enabled)
1205                 return;
1206
1207         cpuc->enabled = 1;
1208         barrier();
1209
1210         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1211                 struct perf_event *event = cpuc->events[idx];
1212                 u64 val;
1213
1214                 if (!test_bit(idx, cpuc->active_mask))
1215                         continue;
1216
1217                 val = event->hw.config;
1218                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1219                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1220         }
1221 }
1222
1223 static const struct pmu pmu;
1224
1225 static inline int is_x86_event(struct perf_event *event)
1226 {
1227         return event->pmu == &pmu;
1228 }
1229
1230 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1231 {
1232         int i, j , w, num;
1233         int weight, wmax;
1234         unsigned long *c;
1235         u64 constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1236         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1237         struct hw_perf_event *hwc;
1238
1239         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1240
1241         for (i = 0; i < n; i++) {
1242                 x86_pmu.get_event_constraints(cpuc,
1243                                               cpuc->event_list[i],
1244                                               constraints[i]);
1245         }
1246
1247         /*
1248          * weight = number of possible counters
1249          *
1250          * 1    = most constrained, only works on one counter
1251          * wmax = least constrained, works on any counter
1252          *
1253          * assign events to counters starting with most
1254          * constrained events.
1255          */
1256         wmax = x86_pmu.num_events;
1257
1258         /*
1259          * when fixed event counters are present,
1260          * wmax is incremented by 1 to account
1261          * for one more choice
1262          */
1263         if (x86_pmu.num_events_fixed)
1264                 wmax++;
1265
1266         num = n;
1267         for (w = 1; num && w <= wmax; w++) {
1268                 /* for each event */
1269                 for (i = 0; i < n; i++) {
1270                         c = (unsigned long *)constraints[i];
1271                         hwc = &cpuc->event_list[i]->hw;
1272
1273                         weight = bitmap_weight(c, X86_PMC_IDX_MAX);
1274                         if (weight != w)
1275                                 continue;
1276
1277                         /*
1278                          * try to reuse previous assignment
1279                          *
1280                          * This is possible despite the fact that
1281                          * events or events order may have changed.
1282                          *
1283                          * What matters is the level of constraints
1284                          * of an event and this is constant for now.
1285                          *
1286                          * This is possible also because we always
1287                          * scan from most to least constrained. Thus,
1288                          * if a counter can be reused, it means no,
1289                          * more constrained events, needed it. And
1290                          * next events will either compete for it
1291                          * (which cannot be solved anyway) or they
1292                          * have fewer constraints, and they can use
1293                          * another counter.
1294                          */
1295                         j = hwc->idx;
1296                         if (j != -1 && !test_bit(j, used_mask))
1297                                 goto skip;
1298
1299                         for_each_bit(j, c, X86_PMC_IDX_MAX) {
1300                                 if (!test_bit(j, used_mask))
1301                                         break;
1302                         }
1303
1304                         if (j == X86_PMC_IDX_MAX)
1305                                 break;
1306 skip:
1307                         set_bit(j, used_mask);
1308
1309 #if 0
1310                         pr_debug("CPU%d config=0x%llx idx=%d assign=%c\n",
1311                                 smp_processor_id(),
1312                                 hwc->config,
1313                                 j,
1314                                 assign ? 'y' : 'n');
1315 #endif
1316
1317                         if (assign)
1318                                 assign[i] = j;
1319                         num--;
1320                 }
1321         }
1322         /*
1323          * scheduling failed or is just a simulation,
1324          * free resources if necessary
1325          */
1326         if (!assign || num) {
1327                 for (i = 0; i < n; i++) {
1328                         if (x86_pmu.put_event_constraints)
1329                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1330                 }
1331         }
1332         return num ? -ENOSPC : 0;
1333 }
1334
1335 /*
1336  * dogrp: true if must collect siblings events (group)
1337  * returns total number of events and error code
1338  */
1339 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1340 {
1341         struct perf_event *event;
1342         int n, max_count;
1343
1344         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1345
1346         /* current number of events already accepted */
1347         n = cpuc->n_events;
1348
1349         if (is_x86_event(leader)) {
1350                 if (n >= max_count)
1351                         return -ENOSPC;
1352                 cpuc->event_list[n] = leader;
1353                 n++;
1354         }
1355         if (!dogrp)
1356                 return n;
1357
1358         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1359                 if (!is_x86_event(event) ||
1360                     event->state == PERF_EVENT_STATE_OFF)
1361                         continue;
1362
1363                 if (n >= max_count)
1364                         return -ENOSPC;
1365
1366                 cpuc->event_list[n] = event;
1367                 n++;
1368         }
1369         return n;
1370 }
1371
1372
1373 static inline void x86_assign_hw_event(struct perf_event *event,
1374                                 struct hw_perf_event *hwc, int idx)
1375 {
1376         hwc->idx = idx;
1377
1378         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1379                 hwc->config_base = 0;
1380                 hwc->event_base = 0;
1381         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1382                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1383                 /*
1384                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1385                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1386                  */
1387                 hwc->event_base =
1388                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1389         } else {
1390                 hwc->config_base = x86_pmu.eventsel;
1391                 hwc->event_base  = x86_pmu.perfctr;
1392         }
1393 }
1394
1395 void hw_perf_enable(void)
1396 {
1397         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1398         struct perf_event *event;
1399         struct hw_perf_event *hwc;
1400         int i;
1401
1402         if (!x86_pmu_initialized())
1403                 return;
1404         if (cpuc->n_added) {
1405                 /*
1406                  * apply assignment obtained either from
1407                  * hw_perf_group_sched_in() or x86_pmu_enable()
1408                  *
1409                  * step1: save events moving to new counters
1410                  * step2: reprogram moved events into new counters
1411                  */
1412                 for (i = 0; i < cpuc->n_events; i++) {
1413
1414                         event = cpuc->event_list[i];
1415                         hwc = &event->hw;
1416
1417                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1418                                 continue;
1419
1420                         x86_pmu.disable(hwc, hwc->idx);
1421
1422                         clear_bit(hwc->idx, cpuc->active_mask);
1423                         barrier();
1424                         cpuc->events[hwc->idx] = NULL;
1425
1426                         x86_perf_event_update(event, hwc, hwc->idx);
1427
1428                         hwc->idx = -1;
1429                 }
1430
1431                 for (i = 0; i < cpuc->n_events; i++) {
1432
1433                         event = cpuc->event_list[i];
1434                         hwc = &event->hw;
1435
1436                         if (hwc->idx == -1) {
1437                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1438                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1439                         }
1440                         /*
1441                          * need to mark as active because x86_pmu_disable()
1442                          * clear active_mask and eventsp[] yet it preserves
1443                          * idx
1444                          */
1445                         set_bit(hwc->idx, cpuc->active_mask);
1446                         cpuc->events[hwc->idx] = event;
1447
1448                         x86_pmu.enable(hwc, hwc->idx);
1449                         perf_event_update_userpage(event);
1450                 }
1451                 cpuc->n_added = 0;
1452                 perf_events_lapic_init();
1453         }
1454         x86_pmu.enable_all();
1455 }
1456
1457 static inline u64 intel_pmu_get_status(void)
1458 {
1459         u64 status;
1460
1461         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1462
1463         return status;
1464 }
1465
1466 static inline void intel_pmu_ack_status(u64 ack)
1467 {
1468         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1469 }
1470
1471 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1472 {
1473         (void)checking_wrmsrl(hwc->config_base + idx,
1474                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1475 }
1476
1477 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1478 {
1479         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1480 }
1481
1482 static inline void
1483 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1484 {
1485         int idx = __idx - X86_PMC_IDX_FIXED;
1486         u64 ctrl_val, mask;
1487
1488         mask = 0xfULL << (idx * 4);
1489
1490         rdmsrl(hwc->config_base, ctrl_val);
1491         ctrl_val &= ~mask;
1492         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1493 }
1494
1495 static inline void
1496 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1497 {
1498         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1499         u64 val = P6_NOP_EVENT;
1500
1501         if (cpuc->enabled)
1502                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1503
1504         (void)checking_wrmsrl(hwc->config_base + idx, val);
1505 }
1506
1507 static inline void
1508 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1509 {
1510         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1511                 intel_pmu_disable_bts();
1512                 return;
1513         }
1514
1515         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1516                 intel_pmu_disable_fixed(hwc, idx);
1517                 return;
1518         }
1519
1520         x86_pmu_disable_event(hwc, idx);
1521 }
1522
1523 static inline void
1524 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1525 {
1526         x86_pmu_disable_event(hwc, idx);
1527 }
1528
1529 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1530
1531 /*
1532  * Set the next IRQ period, based on the hwc->period_left value.
1533  * To be called with the event disabled in hw:
1534  */
1535 static int
1536 x86_perf_event_set_period(struct perf_event *event,
1537                              struct hw_perf_event *hwc, int idx)
1538 {
1539         s64 left = atomic64_read(&hwc->period_left);
1540         s64 period = hwc->sample_period;
1541         int err, ret = 0;
1542
1543         if (idx == X86_PMC_IDX_FIXED_BTS)
1544                 return 0;
1545
1546         /*
1547          * If we are way outside a reasonable range then just skip forward:
1548          */
1549         if (unlikely(left <= -period)) {
1550                 left = period;
1551                 atomic64_set(&hwc->period_left, left);
1552                 hwc->last_period = period;
1553                 ret = 1;
1554         }
1555
1556         if (unlikely(left <= 0)) {
1557                 left += period;
1558                 atomic64_set(&hwc->period_left, left);
1559                 hwc->last_period = period;
1560                 ret = 1;
1561         }
1562         /*
1563          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1564          */
1565         if (unlikely(left < 2))
1566                 left = 2;
1567
1568         if (left > x86_pmu.max_period)
1569                 left = x86_pmu.max_period;
1570
1571         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1572
1573         /*
1574          * The hw event starts counting from this event offset,
1575          * mark it to be able to extra future deltas:
1576          */
1577         atomic64_set(&hwc->prev_count, (u64)-left);
1578
1579         err = checking_wrmsrl(hwc->event_base + idx,
1580                              (u64)(-left) & x86_pmu.event_mask);
1581
1582         perf_event_update_userpage(event);
1583
1584         return ret;
1585 }
1586
1587 static inline void
1588 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1589 {
1590         int idx = __idx - X86_PMC_IDX_FIXED;
1591         u64 ctrl_val, bits, mask;
1592         int err;
1593
1594         /*
1595          * Enable IRQ generation (0x8),
1596          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1597          * if requested:
1598          */
1599         bits = 0x8ULL;
1600         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1601                 bits |= 0x2;
1602         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1603                 bits |= 0x1;
1604         bits <<= (idx * 4);
1605         mask = 0xfULL << (idx * 4);
1606
1607         rdmsrl(hwc->config_base, ctrl_val);
1608         ctrl_val &= ~mask;
1609         ctrl_val |= bits;
1610         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1611 }
1612
1613 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1614 {
1615         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1616         u64 val;
1617
1618         val = hwc->config;
1619         if (cpuc->enabled)
1620                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1621
1622         (void)checking_wrmsrl(hwc->config_base + idx, val);
1623 }
1624
1625
1626 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1627 {
1628         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1629                 if (!__get_cpu_var(cpu_hw_events).enabled)
1630                         return;
1631
1632                 intel_pmu_enable_bts(hwc->config);
1633                 return;
1634         }
1635
1636         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1637                 intel_pmu_enable_fixed(hwc, idx);
1638                 return;
1639         }
1640
1641         x86_pmu_enable_event(hwc, idx);
1642 }
1643
1644 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1645 {
1646         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1647
1648         if (cpuc->enabled)
1649                 x86_pmu_enable_event(hwc, idx);
1650 }
1651
1652 /*
1653  * activate a single event
1654  *
1655  * The event is added to the group of enabled events
1656  * but only if it can be scehduled with existing events.
1657  *
1658  * Called with PMU disabled. If successful and return value 1,
1659  * then guaranteed to call perf_enable() and hw_perf_enable()
1660  */
1661 static int x86_pmu_enable(struct perf_event *event)
1662 {
1663         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1664         struct hw_perf_event *hwc;
1665         int assign[X86_PMC_IDX_MAX];
1666         int n, n0, ret;
1667
1668         hwc = &event->hw;
1669
1670         n0 = cpuc->n_events;
1671         n = collect_events(cpuc, event, false);
1672         if (n < 0)
1673                 return n;
1674
1675         ret = x86_schedule_events(cpuc, n, assign);
1676         if (ret)
1677                 return ret;
1678         /*
1679          * copy new assignment, now we know it is possible
1680          * will be used by hw_perf_enable()
1681          */
1682         memcpy(cpuc->assign, assign, n*sizeof(int));
1683
1684         cpuc->n_events = n;
1685         cpuc->n_added  = n - n0;
1686
1687         if (hwc->idx != -1)
1688                 x86_perf_event_set_period(event, hwc, hwc->idx);
1689
1690         return 0;
1691 }
1692
1693 static void x86_pmu_unthrottle(struct perf_event *event)
1694 {
1695         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1696         struct hw_perf_event *hwc = &event->hw;
1697
1698         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1699                                 cpuc->events[hwc->idx] != event))
1700                 return;
1701
1702         x86_pmu.enable(hwc, hwc->idx);
1703 }
1704
1705 void perf_event_print_debug(void)
1706 {
1707         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1708         struct cpu_hw_events *cpuc;
1709         unsigned long flags;
1710         int cpu, idx;
1711
1712         if (!x86_pmu.num_events)
1713                 return;
1714
1715         local_irq_save(flags);
1716
1717         cpu = smp_processor_id();
1718         cpuc = &per_cpu(cpu_hw_events, cpu);
1719
1720         if (x86_pmu.version >= 2) {
1721                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1722                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1723                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1724                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1725
1726                 pr_info("\n");
1727                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1728                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1729                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1730                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1731         }
1732         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1733
1734         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1735                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1736                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1737
1738                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1739
1740                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1741                         cpu, idx, pmc_ctrl);
1742                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1743                         cpu, idx, pmc_count);
1744                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1745                         cpu, idx, prev_left);
1746         }
1747         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1748                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1749
1750                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1751                         cpu, idx, pmc_count);
1752         }
1753         local_irq_restore(flags);
1754 }
1755
1756 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1757 {
1758         struct debug_store *ds = cpuc->ds;
1759         struct bts_record {
1760                 u64     from;
1761                 u64     to;
1762                 u64     flags;
1763         };
1764         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1765         struct bts_record *at, *top;
1766         struct perf_output_handle handle;
1767         struct perf_event_header header;
1768         struct perf_sample_data data;
1769         struct pt_regs regs;
1770
1771         if (!event)
1772                 return;
1773
1774         if (!ds)
1775                 return;
1776
1777         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1778         top = (struct bts_record *)(unsigned long)ds->bts_index;
1779
1780         if (top <= at)
1781                 return;
1782
1783         ds->bts_index = ds->bts_buffer_base;
1784
1785
1786         data.period     = event->hw.last_period;
1787         data.addr       = 0;
1788         data.raw        = NULL;
1789         regs.ip         = 0;
1790
1791         /*
1792          * Prepare a generic sample, i.e. fill in the invariant fields.
1793          * We will overwrite the from and to address before we output
1794          * the sample.
1795          */
1796         perf_prepare_sample(&header, &data, event, &regs);
1797
1798         if (perf_output_begin(&handle, event,
1799                               header.size * (top - at), 1, 1))
1800                 return;
1801
1802         for (; at < top; at++) {
1803                 data.ip         = at->from;
1804                 data.addr       = at->to;
1805
1806                 perf_output_sample(&handle, &header, &data, event);
1807         }
1808
1809         perf_output_end(&handle);
1810
1811         /* There's new data available. */
1812         event->hw.interrupts++;
1813         event->pending_kill = POLL_IN;
1814 }
1815
1816 static void x86_pmu_disable(struct perf_event *event)
1817 {
1818         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1819         struct hw_perf_event *hwc = &event->hw;
1820         int i, idx = hwc->idx;
1821
1822         /*
1823          * Must be done before we disable, otherwise the nmi handler
1824          * could reenable again:
1825          */
1826         clear_bit(idx, cpuc->active_mask);
1827         x86_pmu.disable(hwc, idx);
1828
1829         /*
1830          * Make sure the cleared pointer becomes visible before we
1831          * (potentially) free the event:
1832          */
1833         barrier();
1834
1835         /*
1836          * Drain the remaining delta count out of a event
1837          * that we are disabling:
1838          */
1839         x86_perf_event_update(event, hwc, idx);
1840
1841         /* Drain the remaining BTS records. */
1842         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1843                 intel_pmu_drain_bts_buffer(cpuc);
1844
1845         cpuc->events[idx] = NULL;
1846
1847         for (i = 0; i < cpuc->n_events; i++) {
1848                 if (event == cpuc->event_list[i]) {
1849
1850                         if (x86_pmu.put_event_constraints)
1851                                 x86_pmu.put_event_constraints(cpuc, event);
1852
1853                         while (++i < cpuc->n_events)
1854                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1855
1856                         --cpuc->n_events;
1857                 }
1858         }
1859         perf_event_update_userpage(event);
1860 }
1861
1862 /*
1863  * Save and restart an expired event. Called by NMI contexts,
1864  * so it has to be careful about preempting normal event ops:
1865  */
1866 static int intel_pmu_save_and_restart(struct perf_event *event)
1867 {
1868         struct hw_perf_event *hwc = &event->hw;
1869         int idx = hwc->idx;
1870         int ret;
1871
1872         x86_perf_event_update(event, hwc, idx);
1873         ret = x86_perf_event_set_period(event, hwc, idx);
1874
1875         if (event->state == PERF_EVENT_STATE_ACTIVE)
1876                 intel_pmu_enable_event(hwc, idx);
1877
1878         return ret;
1879 }
1880
1881 static void intel_pmu_reset(void)
1882 {
1883         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1884         unsigned long flags;
1885         int idx;
1886
1887         if (!x86_pmu.num_events)
1888                 return;
1889
1890         local_irq_save(flags);
1891
1892         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1893
1894         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1895                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1896                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1897         }
1898         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1899                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1900         }
1901         if (ds)
1902                 ds->bts_index = ds->bts_buffer_base;
1903
1904         local_irq_restore(flags);
1905 }
1906
1907 static int p6_pmu_handle_irq(struct pt_regs *regs)
1908 {
1909         struct perf_sample_data data;
1910         struct cpu_hw_events *cpuc;
1911         struct perf_event *event;
1912         struct hw_perf_event *hwc;
1913         int idx, handled = 0;
1914         u64 val;
1915
1916         data.addr = 0;
1917         data.raw = NULL;
1918
1919         cpuc = &__get_cpu_var(cpu_hw_events);
1920
1921         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1922                 if (!test_bit(idx, cpuc->active_mask))
1923                         continue;
1924
1925                 event = cpuc->events[idx];
1926                 hwc = &event->hw;
1927
1928                 val = x86_perf_event_update(event, hwc, idx);
1929                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1930                         continue;
1931
1932                 /*
1933                  * event overflow
1934                  */
1935                 handled         = 1;
1936                 data.period     = event->hw.last_period;
1937
1938                 if (!x86_perf_event_set_period(event, hwc, idx))
1939                         continue;
1940
1941                 if (perf_event_overflow(event, 1, &data, regs))
1942                         p6_pmu_disable_event(hwc, idx);
1943         }
1944
1945         if (handled)
1946                 inc_irq_stat(apic_perf_irqs);
1947
1948         return handled;
1949 }
1950
1951 /*
1952  * This handler is triggered by the local APIC, so the APIC IRQ handling
1953  * rules apply:
1954  */
1955 static int intel_pmu_handle_irq(struct pt_regs *regs)
1956 {
1957         struct perf_sample_data data;
1958         struct cpu_hw_events *cpuc;
1959         int bit, loops;
1960         u64 ack, status;
1961
1962         data.addr = 0;
1963         data.raw = NULL;
1964
1965         cpuc = &__get_cpu_var(cpu_hw_events);
1966
1967         perf_disable();
1968         intel_pmu_drain_bts_buffer(cpuc);
1969         status = intel_pmu_get_status();
1970         if (!status) {
1971                 perf_enable();
1972                 return 0;
1973         }
1974
1975         loops = 0;
1976 again:
1977         if (++loops > 100) {
1978                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1979                 perf_event_print_debug();
1980                 intel_pmu_reset();
1981                 perf_enable();
1982                 return 1;
1983         }
1984
1985         inc_irq_stat(apic_perf_irqs);
1986         ack = status;
1987         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1988                 struct perf_event *event = cpuc->events[bit];
1989
1990                 clear_bit(bit, (unsigned long *) &status);
1991                 if (!test_bit(bit, cpuc->active_mask))
1992                         continue;
1993
1994                 if (!intel_pmu_save_and_restart(event))
1995                         continue;
1996
1997                 data.period = event->hw.last_period;
1998
1999                 if (perf_event_overflow(event, 1, &data, regs))
2000                         intel_pmu_disable_event(&event->hw, bit);
2001         }
2002
2003         intel_pmu_ack_status(ack);
2004
2005         /*
2006          * Repeat if there is more work to be done:
2007          */
2008         status = intel_pmu_get_status();
2009         if (status)
2010                 goto again;
2011
2012         perf_enable();
2013
2014         return 1;
2015 }
2016
2017 static int amd_pmu_handle_irq(struct pt_regs *regs)
2018 {
2019         struct perf_sample_data data;
2020         struct cpu_hw_events *cpuc;
2021         struct perf_event *event;
2022         struct hw_perf_event *hwc;
2023         int idx, handled = 0;
2024         u64 val;
2025
2026         data.addr = 0;
2027         data.raw = NULL;
2028
2029         cpuc = &__get_cpu_var(cpu_hw_events);
2030
2031         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2032                 if (!test_bit(idx, cpuc->active_mask))
2033                         continue;
2034
2035                 event = cpuc->events[idx];
2036                 hwc = &event->hw;
2037
2038                 val = x86_perf_event_update(event, hwc, idx);
2039                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2040                         continue;
2041
2042                 /*
2043                  * event overflow
2044                  */
2045                 handled         = 1;
2046                 data.period     = event->hw.last_period;
2047
2048                 if (!x86_perf_event_set_period(event, hwc, idx))
2049                         continue;
2050
2051                 if (perf_event_overflow(event, 1, &data, regs))
2052                         amd_pmu_disable_event(hwc, idx);
2053         }
2054
2055         if (handled)
2056                 inc_irq_stat(apic_perf_irqs);
2057
2058         return handled;
2059 }
2060
2061 void smp_perf_pending_interrupt(struct pt_regs *regs)
2062 {
2063         irq_enter();
2064         ack_APIC_irq();
2065         inc_irq_stat(apic_pending_irqs);
2066         perf_event_do_pending();
2067         irq_exit();
2068 }
2069
2070 void set_perf_event_pending(void)
2071 {
2072 #ifdef CONFIG_X86_LOCAL_APIC
2073         if (!x86_pmu.apic || !x86_pmu_initialized())
2074                 return;
2075
2076         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2077 #endif
2078 }
2079
2080 void perf_events_lapic_init(void)
2081 {
2082 #ifdef CONFIG_X86_LOCAL_APIC
2083         if (!x86_pmu.apic || !x86_pmu_initialized())
2084                 return;
2085
2086         /*
2087          * Always use NMI for PMU
2088          */
2089         apic_write(APIC_LVTPC, APIC_DM_NMI);
2090 #endif
2091 }
2092
2093 static int __kprobes
2094 perf_event_nmi_handler(struct notifier_block *self,
2095                          unsigned long cmd, void *__args)
2096 {
2097         struct die_args *args = __args;
2098         struct pt_regs *regs;
2099
2100         if (!atomic_read(&active_events))
2101                 return NOTIFY_DONE;
2102
2103         switch (cmd) {
2104         case DIE_NMI:
2105         case DIE_NMI_IPI:
2106                 break;
2107
2108         default:
2109                 return NOTIFY_DONE;
2110         }
2111
2112         regs = args->regs;
2113
2114 #ifdef CONFIG_X86_LOCAL_APIC
2115         apic_write(APIC_LVTPC, APIC_DM_NMI);
2116 #endif
2117         /*
2118          * Can't rely on the handled return value to say it was our NMI, two
2119          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2120          *
2121          * If the first NMI handles both, the latter will be empty and daze
2122          * the CPU.
2123          */
2124         x86_pmu.handle_irq(regs);
2125
2126         return NOTIFY_STOP;
2127 }
2128
2129 static struct event_constraint bts_constraint = {
2130         .code = 0,
2131         .cmask = 0,
2132         .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
2133 };
2134
2135 static int intel_special_constraints(struct perf_event *event,
2136                                      u64 *idxmsk)
2137 {
2138         unsigned int hw_event;
2139
2140         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2141
2142         if (unlikely((hw_event ==
2143                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2144                      (event->hw.sample_period == 1))) {
2145
2146                 bitmap_copy((unsigned long *)idxmsk,
2147                             (unsigned long *)bts_constraint.idxmsk,
2148                             X86_PMC_IDX_MAX);
2149                 return 1;
2150         }
2151         return 0;
2152 }
2153
2154 static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2155                                         struct perf_event *event,
2156                                         u64 *idxmsk)
2157 {
2158         const struct event_constraint *c;
2159
2160         /*
2161          * cleanup bitmask
2162          */
2163         bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX);
2164
2165         if (intel_special_constraints(event, idxmsk))
2166                 return;
2167
2168         if (x86_pmu.event_constraints) {
2169                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2170                         if ((event->hw.config & c->cmask) == c->code) {
2171
2172                                 bitmap_copy((unsigned long *)idxmsk,
2173                                             (unsigned long *)c->idxmsk,
2174                                             X86_PMC_IDX_MAX);
2175                                 return;
2176                         }
2177                 }
2178         }
2179         /* no constraints, means supports all generic counters */
2180         bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
2181 }
2182
2183 static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
2184                                       struct perf_event *event,
2185                                       u64 *idxmsk)
2186 {
2187 }
2188
2189 static int x86_event_sched_in(struct perf_event *event,
2190                           struct perf_cpu_context *cpuctx, int cpu)
2191 {
2192         int ret = 0;
2193
2194         event->state = PERF_EVENT_STATE_ACTIVE;
2195         event->oncpu = cpu;
2196         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2197
2198         if (!is_x86_event(event))
2199                 ret = event->pmu->enable(event);
2200
2201         if (!ret && !is_software_event(event))
2202                 cpuctx->active_oncpu++;
2203
2204         if (!ret && event->attr.exclusive)
2205                 cpuctx->exclusive = 1;
2206
2207         return ret;
2208 }
2209
2210 static void x86_event_sched_out(struct perf_event *event,
2211                             struct perf_cpu_context *cpuctx, int cpu)
2212 {
2213         event->state = PERF_EVENT_STATE_INACTIVE;
2214         event->oncpu = -1;
2215
2216         if (!is_x86_event(event))
2217                 event->pmu->disable(event);
2218
2219         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2220
2221         if (!is_software_event(event))
2222                 cpuctx->active_oncpu--;
2223
2224         if (event->attr.exclusive || !cpuctx->active_oncpu)
2225                 cpuctx->exclusive = 0;
2226 }
2227
2228 /*
2229  * Called to enable a whole group of events.
2230  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2231  * Assumes the caller has disabled interrupts and has
2232  * frozen the PMU with hw_perf_save_disable.
2233  *
2234  * called with PMU disabled. If successful and return value 1,
2235  * then guaranteed to call perf_enable() and hw_perf_enable()
2236  */
2237 int hw_perf_group_sched_in(struct perf_event *leader,
2238                struct perf_cpu_context *cpuctx,
2239                struct perf_event_context *ctx, int cpu)
2240 {
2241         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2242         struct perf_event *sub;
2243         int assign[X86_PMC_IDX_MAX];
2244         int n0, n1, ret;
2245
2246         /* n0 = total number of events */
2247         n0 = collect_events(cpuc, leader, true);
2248         if (n0 < 0)
2249                 return n0;
2250
2251         ret = x86_schedule_events(cpuc, n0, assign);
2252         if (ret)
2253                 return ret;
2254
2255         ret = x86_event_sched_in(leader, cpuctx, cpu);
2256         if (ret)
2257                 return ret;
2258
2259         n1 = 1;
2260         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2261                 if (sub->state != PERF_EVENT_STATE_OFF) {
2262                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2263                         if (ret)
2264                                 goto undo;
2265                         ++n1;
2266                 }
2267         }
2268         /*
2269          * copy new assignment, now we know it is possible
2270          * will be used by hw_perf_enable()
2271          */
2272         memcpy(cpuc->assign, assign, n0*sizeof(int));
2273
2274         cpuc->n_events  = n0;
2275         cpuc->n_added   = n1;
2276         ctx->nr_active += n1;
2277
2278         /*
2279          * 1 means successful and events are active
2280          * This is not quite true because we defer
2281          * actual activation until hw_perf_enable() but
2282          * this way we* ensure caller won't try to enable
2283          * individual events
2284          */
2285         return 1;
2286 undo:
2287         x86_event_sched_out(leader, cpuctx, cpu);
2288         n0  = 1;
2289         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2290                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2291                         x86_event_sched_out(sub, cpuctx, cpu);
2292                         if (++n0 == n1)
2293                                 break;
2294                 }
2295         }
2296         return ret;
2297 }
2298
2299 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2300         .notifier_call          = perf_event_nmi_handler,
2301         .next                   = NULL,
2302         .priority               = 1
2303 };
2304
2305 static __initconst struct x86_pmu p6_pmu = {
2306         .name                   = "p6",
2307         .handle_irq             = p6_pmu_handle_irq,
2308         .disable_all            = p6_pmu_disable_all,
2309         .enable_all             = p6_pmu_enable_all,
2310         .enable                 = p6_pmu_enable_event,
2311         .disable                = p6_pmu_disable_event,
2312         .eventsel               = MSR_P6_EVNTSEL0,
2313         .perfctr                = MSR_P6_PERFCTR0,
2314         .event_map              = p6_pmu_event_map,
2315         .raw_event              = p6_pmu_raw_event,
2316         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2317         .apic                   = 1,
2318         .max_period             = (1ULL << 31) - 1,
2319         .version                = 0,
2320         .num_events             = 2,
2321         /*
2322          * Events have 40 bits implemented. However they are designed such
2323          * that bits [32-39] are sign extensions of bit 31. As such the
2324          * effective width of a event for P6-like PMU is 32 bits only.
2325          *
2326          * See IA-32 Intel Architecture Software developer manual Vol 3B
2327          */
2328         .event_bits             = 32,
2329         .event_mask             = (1ULL << 32) - 1,
2330         .get_event_constraints  = intel_get_event_constraints,
2331         .event_constraints      = intel_p6_event_constraints
2332 };
2333
2334 static __initconst struct x86_pmu intel_pmu = {
2335         .name                   = "Intel",
2336         .handle_irq             = intel_pmu_handle_irq,
2337         .disable_all            = intel_pmu_disable_all,
2338         .enable_all             = intel_pmu_enable_all,
2339         .enable                 = intel_pmu_enable_event,
2340         .disable                = intel_pmu_disable_event,
2341         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2342         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2343         .event_map              = intel_pmu_event_map,
2344         .raw_event              = intel_pmu_raw_event,
2345         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2346         .apic                   = 1,
2347         /*
2348          * Intel PMCs cannot be accessed sanely above 32 bit width,
2349          * so we install an artificial 1<<31 period regardless of
2350          * the generic event period:
2351          */
2352         .max_period             = (1ULL << 31) - 1,
2353         .enable_bts             = intel_pmu_enable_bts,
2354         .disable_bts            = intel_pmu_disable_bts,
2355         .get_event_constraints  = intel_get_event_constraints
2356 };
2357
2358 static __initconst struct x86_pmu amd_pmu = {
2359         .name                   = "AMD",
2360         .handle_irq             = amd_pmu_handle_irq,
2361         .disable_all            = amd_pmu_disable_all,
2362         .enable_all             = amd_pmu_enable_all,
2363         .enable                 = amd_pmu_enable_event,
2364         .disable                = amd_pmu_disable_event,
2365         .eventsel               = MSR_K7_EVNTSEL0,
2366         .perfctr                = MSR_K7_PERFCTR0,
2367         .event_map              = amd_pmu_event_map,
2368         .raw_event              = amd_pmu_raw_event,
2369         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2370         .num_events             = 4,
2371         .event_bits             = 48,
2372         .event_mask             = (1ULL << 48) - 1,
2373         .apic                   = 1,
2374         /* use highest bit to detect overflow */
2375         .max_period             = (1ULL << 47) - 1,
2376         .get_event_constraints  = amd_get_event_constraints
2377 };
2378
2379 static __init int p6_pmu_init(void)
2380 {
2381         switch (boot_cpu_data.x86_model) {
2382         case 1:
2383         case 3:  /* Pentium Pro */
2384         case 5:
2385         case 6:  /* Pentium II */
2386         case 7:
2387         case 8:
2388         case 11: /* Pentium III */
2389         case 9:
2390         case 13:
2391                 /* Pentium M */
2392                 break;
2393         default:
2394                 pr_cont("unsupported p6 CPU model %d ",
2395                         boot_cpu_data.x86_model);
2396                 return -ENODEV;
2397         }
2398
2399         x86_pmu = p6_pmu;
2400
2401         return 0;
2402 }
2403
2404 static __init int intel_pmu_init(void)
2405 {
2406         union cpuid10_edx edx;
2407         union cpuid10_eax eax;
2408         unsigned int unused;
2409         unsigned int ebx;
2410         int version;
2411
2412         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2413                 /* check for P6 processor family */
2414            if (boot_cpu_data.x86 == 6) {
2415                 return p6_pmu_init();
2416            } else {
2417                 return -ENODEV;
2418            }
2419         }
2420
2421         /*
2422          * Check whether the Architectural PerfMon supports
2423          * Branch Misses Retired hw_event or not.
2424          */
2425         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2426         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2427                 return -ENODEV;
2428
2429         version = eax.split.version_id;
2430         if (version < 2)
2431                 return -ENODEV;
2432
2433         x86_pmu                         = intel_pmu;
2434         x86_pmu.version                 = version;
2435         x86_pmu.num_events              = eax.split.num_events;
2436         x86_pmu.event_bits              = eax.split.bit_width;
2437         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2438
2439         /*
2440          * Quirk: v2 perfmon does not report fixed-purpose events, so
2441          * assume at least 3 events:
2442          */
2443         x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
2444
2445         /*
2446          * Install the hw-cache-events table:
2447          */
2448         switch (boot_cpu_data.x86_model) {
2449         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2450         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2451         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2452         case 29: /* six-core 45 nm xeon "Dunnington" */
2453                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2454                        sizeof(hw_cache_event_ids));
2455
2456                 x86_pmu.event_constraints = intel_core_event_constraints;
2457                 pr_cont("Core2 events, ");
2458                 break;
2459         case 26:
2460                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2461                        sizeof(hw_cache_event_ids));
2462
2463                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2464                 pr_cont("Nehalem/Corei7 events, ");
2465                 break;
2466         case 28:
2467                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2468                        sizeof(hw_cache_event_ids));
2469
2470                 x86_pmu.event_constraints = intel_gen_event_constraints;
2471                 pr_cont("Atom events, ");
2472                 break;
2473         default:
2474                 /*
2475                  * default constraints for v2 and up
2476                  */
2477                 x86_pmu.event_constraints = intel_gen_event_constraints;
2478                 pr_cont("generic architected perfmon, ");
2479         }
2480         return 0;
2481 }
2482
2483 static __init int amd_pmu_init(void)
2484 {
2485         /* Performance-monitoring supported from K7 and later: */
2486         if (boot_cpu_data.x86 < 6)
2487                 return -ENODEV;
2488
2489         x86_pmu = amd_pmu;
2490
2491         /* Events are common for all AMDs */
2492         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2493                sizeof(hw_cache_event_ids));
2494
2495         return 0;
2496 }
2497
2498 static void __init pmu_check_apic(void)
2499 {
2500         if (cpu_has_apic)
2501                 return;
2502
2503         x86_pmu.apic = 0;
2504         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2505         pr_info("no hardware sampling interrupt available.\n");
2506 }
2507
2508 void __init init_hw_perf_events(void)
2509 {
2510         int err;
2511
2512         pr_info("Performance Events: ");
2513
2514         switch (boot_cpu_data.x86_vendor) {
2515         case X86_VENDOR_INTEL:
2516                 err = intel_pmu_init();
2517                 break;
2518         case X86_VENDOR_AMD:
2519                 err = amd_pmu_init();
2520                 break;
2521         default:
2522                 return;
2523         }
2524         if (err != 0) {
2525                 pr_cont("no PMU driver, software events only.\n");
2526                 return;
2527         }
2528
2529         pmu_check_apic();
2530
2531         pr_cont("%s PMU driver.\n", x86_pmu.name);
2532
2533         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2534                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2535                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2536                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2537         }
2538         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2539         perf_max_events = x86_pmu.num_events;
2540
2541         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2542                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2543                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2544                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2545         }
2546
2547         perf_event_mask |=
2548                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2549         x86_pmu.intel_ctrl = perf_event_mask;
2550
2551         perf_events_lapic_init();
2552         register_die_notifier(&perf_event_nmi_notifier);
2553
2554         pr_info("... version:                %d\n",     x86_pmu.version);
2555         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2556         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2557         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2558         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2559         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2560         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2561 }
2562
2563 static inline void x86_pmu_read(struct perf_event *event)
2564 {
2565         x86_perf_event_update(event, &event->hw, event->hw.idx);
2566 }
2567
2568 static const struct pmu pmu = {
2569         .enable         = x86_pmu_enable,
2570         .disable        = x86_pmu_disable,
2571         .read           = x86_pmu_read,
2572         .unthrottle     = x86_pmu_unthrottle,
2573 };
2574
2575 /*
2576  * validate a single event group
2577  *
2578  * validation include:
2579  *      - check events are compatible which each other
2580  *      - events do not compete for the same counter
2581  *      - number of events <= number of counters
2582  *
2583  * validation ensures the group can be loaded onto the
2584  * PMU if it was the only group available.
2585  */
2586 static int validate_group(struct perf_event *event)
2587 {
2588         struct perf_event *leader = event->group_leader;
2589         struct cpu_hw_events fake_cpuc;
2590         int n;
2591
2592         memset(&fake_cpuc, 0, sizeof(fake_cpuc));
2593
2594         /*
2595          * the event is not yet connected with its
2596          * siblings therefore we must first collect
2597          * existing siblings, then add the new event
2598          * before we can simulate the scheduling
2599          */
2600         n = collect_events(&fake_cpuc, leader, true);
2601         if (n < 0)
2602                 return -ENOSPC;
2603
2604         fake_cpuc.n_events = n;
2605         n = collect_events(&fake_cpuc, event, false);
2606         if (n < 0)
2607                 return -ENOSPC;
2608
2609         fake_cpuc.n_events = n;
2610
2611         return x86_schedule_events(&fake_cpuc, n, NULL);
2612 }
2613
2614 const struct pmu *hw_perf_event_init(struct perf_event *event)
2615 {
2616         int err;
2617
2618         err = __hw_perf_event_init(event);
2619         if (!err) {
2620                 if (event->group_leader != event)
2621                         err = validate_group(event);
2622         }
2623         if (err) {
2624                 if (event->destroy)
2625                         event->destroy(event);
2626                 return ERR_PTR(err);
2627         }
2628
2629         return &pmu;
2630 }
2631
2632 /*
2633  * callchain support
2634  */
2635
2636 static inline
2637 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2638 {
2639         if (entry->nr < PERF_MAX_STACK_DEPTH)
2640                 entry->ip[entry->nr++] = ip;
2641 }
2642
2643 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2644 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2645
2646
2647 static void
2648 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2649 {
2650         /* Ignore warnings */
2651 }
2652
2653 static void backtrace_warning(void *data, char *msg)
2654 {
2655         /* Ignore warnings */
2656 }
2657
2658 static int backtrace_stack(void *data, char *name)
2659 {
2660         return 0;
2661 }
2662
2663 static void backtrace_address(void *data, unsigned long addr, int reliable)
2664 {
2665         struct perf_callchain_entry *entry = data;
2666
2667         if (reliable)
2668                 callchain_store(entry, addr);
2669 }
2670
2671 static const struct stacktrace_ops backtrace_ops = {
2672         .warning                = backtrace_warning,
2673         .warning_symbol         = backtrace_warning_symbol,
2674         .stack                  = backtrace_stack,
2675         .address                = backtrace_address,
2676         .walk_stack             = print_context_stack_bp,
2677 };
2678
2679 #include "../dumpstack.h"
2680
2681 static void
2682 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2683 {
2684         callchain_store(entry, PERF_CONTEXT_KERNEL);
2685         callchain_store(entry, regs->ip);
2686
2687         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2688 }
2689
2690 /*
2691  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2692  */
2693 static unsigned long
2694 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2695 {
2696         unsigned long offset, addr = (unsigned long)from;
2697         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2698         unsigned long size, len = 0;
2699         struct page *page;
2700         void *map;
2701         int ret;
2702
2703         do {
2704                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2705                 if (!ret)
2706                         break;
2707
2708                 offset = addr & (PAGE_SIZE - 1);
2709                 size = min(PAGE_SIZE - offset, n - len);
2710
2711                 map = kmap_atomic(page, type);
2712                 memcpy(to, map+offset, size);
2713                 kunmap_atomic(map, type);
2714                 put_page(page);
2715
2716                 len  += size;
2717                 to   += size;
2718                 addr += size;
2719
2720         } while (len < n);
2721
2722         return len;
2723 }
2724
2725 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2726 {
2727         unsigned long bytes;
2728
2729         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2730
2731         return bytes == sizeof(*frame);
2732 }
2733
2734 static void
2735 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2736 {
2737         struct stack_frame frame;
2738         const void __user *fp;
2739
2740         if (!user_mode(regs))
2741                 regs = task_pt_regs(current);
2742
2743         fp = (void __user *)regs->bp;
2744
2745         callchain_store(entry, PERF_CONTEXT_USER);
2746         callchain_store(entry, regs->ip);
2747
2748         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2749                 frame.next_frame             = NULL;
2750                 frame.return_address = 0;
2751
2752                 if (!copy_stack_frame(fp, &frame))
2753                         break;
2754
2755                 if ((unsigned long)fp < regs->sp)
2756                         break;
2757
2758                 callchain_store(entry, frame.return_address);
2759                 fp = frame.next_frame;
2760         }
2761 }
2762
2763 static void
2764 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2765 {
2766         int is_user;
2767
2768         if (!regs)
2769                 return;
2770
2771         is_user = user_mode(regs);
2772
2773         if (is_user && current->state != TASK_RUNNING)
2774                 return;
2775
2776         if (!is_user)
2777                 perf_callchain_kernel(regs, entry);
2778
2779         if (current->mm)
2780                 perf_callchain_user(regs, entry);
2781 }
2782
2783 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2784 {
2785         struct perf_callchain_entry *entry;
2786
2787         if (in_nmi())
2788                 entry = &__get_cpu_var(pmc_nmi_entry);
2789         else
2790                 entry = &__get_cpu_var(pmc_irq_entry);
2791
2792         entry->nr = 0;
2793
2794         perf_do_callchain(regs, entry);
2795
2796         return entry;
2797 }
2798
2799 void hw_perf_event_setup_online(int cpu)
2800 {
2801         init_debug_store_on_cpu(cpu);
2802 }