Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31 #include <asm/compat.h>
32 #include <asm/smp.h>
33 #include <asm/alternative.h>
34
35 #if 0
36 #undef wrmsrl
37 #define wrmsrl(msr, val)                                        \
38 do {                                                            \
39         trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
40                         (unsigned long)(val));                  \
41         native_write_msr((msr), (u32)((u64)(val)),              \
42                         (u32)((u64)(val) >> 32));               \
43 } while (0)
44 #endif
45
46 /*
47  *          |   NHM/WSM    |      SNB     |
48  * register -------------------------------
49  *          |  HT  | no HT |  HT  | no HT |
50  *-----------------------------------------
51  * offcore  | core | core  | cpu  | core  |
52  * lbr_sel  | core | core  | cpu  | core  |
53  * ld_lat   | cpu  | core  | cpu  | core  |
54  *-----------------------------------------
55  *
56  * Given that there is a small number of shared regs,
57  * we can pre-allocate their slot in the per-cpu
58  * per-core reg tables.
59  */
60 enum extra_reg_type {
61         EXTRA_REG_NONE  = -1,   /* not used */
62
63         EXTRA_REG_RSP_0 = 0,    /* offcore_response_0 */
64         EXTRA_REG_RSP_1 = 1,    /* offcore_response_1 */
65
66         EXTRA_REG_MAX           /* number of entries needed */
67 };
68
69 struct event_constraint {
70         union {
71                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
72                 u64             idxmsk64;
73         };
74         u64     code;
75         u64     cmask;
76         int     weight;
77 };
78
79 struct amd_nb {
80         int nb_id;  /* NorthBridge id */
81         int refcnt; /* reference count */
82         struct perf_event *owners[X86_PMC_IDX_MAX];
83         struct event_constraint event_constraints[X86_PMC_IDX_MAX];
84 };
85
86 struct intel_percore;
87
88 #define MAX_LBR_ENTRIES         16
89
90 struct cpu_hw_events {
91         /*
92          * Generic x86 PMC bits
93          */
94         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
95         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
96         unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
97         int                     enabled;
98
99         int                     n_events;
100         int                     n_added;
101         int                     n_txn;
102         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
103         u64                     tags[X86_PMC_IDX_MAX];
104         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
105
106         unsigned int            group_flag;
107
108         /*
109          * Intel DebugStore bits
110          */
111         struct debug_store      *ds;
112         u64                     pebs_enabled;
113
114         /*
115          * Intel LBR bits
116          */
117         int                             lbr_users;
118         void                            *lbr_context;
119         struct perf_branch_stack        lbr_stack;
120         struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
121
122         /*
123          * manage shared (per-core, per-cpu) registers
124          * used on Intel NHM/WSM/SNB
125          */
126         struct intel_shared_regs        *shared_regs;
127
128         /*
129          * AMD specific bits
130          */
131         struct amd_nb           *amd_nb;
132 };
133
134 #define __EVENT_CONSTRAINT(c, n, m, w) {\
135         { .idxmsk64 = (n) },            \
136         .code = (c),                    \
137         .cmask = (m),                   \
138         .weight = (w),                  \
139 }
140
141 #define EVENT_CONSTRAINT(c, n, m)       \
142         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
143
144 /*
145  * Constraint on the Event code.
146  */
147 #define INTEL_EVENT_CONSTRAINT(c, n)    \
148         EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
149
150 /*
151  * Constraint on the Event code + UMask + fixed-mask
152  *
153  * filter mask to validate fixed counter events.
154  * the following filters disqualify for fixed counters:
155  *  - inv
156  *  - edge
157  *  - cnt-mask
158  *  The other filters are supported by fixed counters.
159  *  The any-thread option is supported starting with v3.
160  */
161 #define FIXED_EVENT_CONSTRAINT(c, n)    \
162         EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
163
164 /*
165  * Constraint on the Event code + UMask
166  */
167 #define INTEL_UEVENT_CONSTRAINT(c, n)   \
168         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
169
170 #define EVENT_CONSTRAINT_END            \
171         EVENT_CONSTRAINT(0, 0, 0)
172
173 #define for_each_event_constraint(e, c) \
174         for ((e) = (c); (e)->weight; (e)++)
175
176 /*
177  * Per register state.
178  */
179 struct er_account {
180         raw_spinlock_t          lock;   /* per-core: protect structure */
181         u64                     config; /* extra MSR config */
182         u64                     reg;    /* extra MSR number */
183         atomic_t                ref;    /* reference count */
184 };
185
186 /*
187  * Extra registers for specific events.
188  *
189  * Some events need large masks and require external MSRs.
190  * Those extra MSRs end up being shared for all events on
191  * a PMU and sometimes between PMU of sibling HT threads.
192  * In either case, the kernel needs to handle conflicting
193  * accesses to those extra, shared, regs. The data structure
194  * to manage those registers is stored in cpu_hw_event.
195  */
196 struct extra_reg {
197         unsigned int            event;
198         unsigned int            msr;
199         u64                     config_mask;
200         u64                     valid_mask;
201         int                     idx;  /* per_xxx->regs[] reg index */
202 };
203
204 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {      \
205         .event = (e),           \
206         .msr = (ms),            \
207         .config_mask = (m),     \
208         .valid_mask = (vm),     \
209         .idx = EXTRA_REG_##i    \
210         }
211
212 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)      \
213         EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
214
215 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
216
217 union perf_capabilities {
218         struct {
219                 u64     lbr_format    : 6;
220                 u64     pebs_trap     : 1;
221                 u64     pebs_arch_reg : 1;
222                 u64     pebs_format   : 4;
223                 u64     smm_freeze    : 1;
224         };
225         u64     capabilities;
226 };
227
228 /*
229  * struct x86_pmu - generic x86 pmu
230  */
231 struct x86_pmu {
232         /*
233          * Generic x86 PMC bits
234          */
235         const char      *name;
236         int             version;
237         int             (*handle_irq)(struct pt_regs *);
238         void            (*disable_all)(void);
239         void            (*enable_all)(int added);
240         void            (*enable)(struct perf_event *);
241         void            (*disable)(struct perf_event *);
242         int             (*hw_config)(struct perf_event *event);
243         int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
244         unsigned        eventsel;
245         unsigned        perfctr;
246         u64             (*event_map)(int);
247         int             max_events;
248         int             num_counters;
249         int             num_counters_fixed;
250         int             cntval_bits;
251         u64             cntval_mask;
252         int             apic;
253         u64             max_period;
254         struct event_constraint *
255                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
256                                                  struct perf_event *event);
257
258         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
259                                                  struct perf_event *event);
260         struct event_constraint *event_constraints;
261         void            (*quirks)(void);
262         int             perfctr_second_write;
263
264         int             (*cpu_prepare)(int cpu);
265         void            (*cpu_starting)(int cpu);
266         void            (*cpu_dying)(int cpu);
267         void            (*cpu_dead)(int cpu);
268
269         /*
270          * Intel Arch Perfmon v2+
271          */
272         u64                     intel_ctrl;
273         union perf_capabilities intel_cap;
274
275         /*
276          * Intel DebugStore bits
277          */
278         int             bts, pebs;
279         int             bts_active, pebs_active;
280         int             pebs_record_size;
281         void            (*drain_pebs)(struct pt_regs *regs);
282         struct event_constraint *pebs_constraints;
283
284         /*
285          * Intel LBR
286          */
287         unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
288         int             lbr_nr;                    /* hardware stack size */
289
290         /*
291          * Extra registers for events
292          */
293         struct extra_reg *extra_regs;
294         unsigned int er_flags;
295 };
296
297 #define ERF_NO_HT_SHARING       1
298 #define ERF_HAS_RSP_1           2
299
300 static struct x86_pmu x86_pmu __read_mostly;
301
302 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
303         .enabled = 1,
304 };
305
306 static int x86_perf_event_set_period(struct perf_event *event);
307
308 /*
309  * Generalized hw caching related hw_event table, filled
310  * in on a per model basis. A value of 0 means
311  * 'not supported', -1 means 'hw_event makes no sense on
312  * this CPU', any other value means the raw hw_event
313  * ID.
314  */
315
316 #define C(x) PERF_COUNT_HW_CACHE_##x
317
318 static u64 __read_mostly hw_cache_event_ids
319                                 [PERF_COUNT_HW_CACHE_MAX]
320                                 [PERF_COUNT_HW_CACHE_OP_MAX]
321                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
322 static u64 __read_mostly hw_cache_extra_regs
323                                 [PERF_COUNT_HW_CACHE_MAX]
324                                 [PERF_COUNT_HW_CACHE_OP_MAX]
325                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
326
327 /*
328  * Propagate event elapsed time into the generic event.
329  * Can only be executed on the CPU where the event is active.
330  * Returns the delta events processed.
331  */
332 static u64
333 x86_perf_event_update(struct perf_event *event)
334 {
335         struct hw_perf_event *hwc = &event->hw;
336         int shift = 64 - x86_pmu.cntval_bits;
337         u64 prev_raw_count, new_raw_count;
338         int idx = hwc->idx;
339         s64 delta;
340
341         if (idx == X86_PMC_IDX_FIXED_BTS)
342                 return 0;
343
344         /*
345          * Careful: an NMI might modify the previous event value.
346          *
347          * Our tactic to handle this is to first atomically read and
348          * exchange a new raw count - then add that new-prev delta
349          * count to the generic event atomically:
350          */
351 again:
352         prev_raw_count = local64_read(&hwc->prev_count);
353         rdmsrl(hwc->event_base, new_raw_count);
354
355         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
356                                         new_raw_count) != prev_raw_count)
357                 goto again;
358
359         /*
360          * Now we have the new raw value and have updated the prev
361          * timestamp already. We can now calculate the elapsed delta
362          * (event-)time and add that to the generic event.
363          *
364          * Careful, not all hw sign-extends above the physical width
365          * of the count.
366          */
367         delta = (new_raw_count << shift) - (prev_raw_count << shift);
368         delta >>= shift;
369
370         local64_add(delta, &event->count);
371         local64_sub(delta, &hwc->period_left);
372
373         return new_raw_count;
374 }
375
376 static inline int x86_pmu_addr_offset(int index)
377 {
378         int offset;
379
380         /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
381         alternative_io(ASM_NOP2,
382                        "shll $1, %%eax",
383                        X86_FEATURE_PERFCTR_CORE,
384                        "=a" (offset),
385                        "a"  (index));
386
387         return offset;
388 }
389
390 static inline unsigned int x86_pmu_config_addr(int index)
391 {
392         return x86_pmu.eventsel + x86_pmu_addr_offset(index);
393 }
394
395 static inline unsigned int x86_pmu_event_addr(int index)
396 {
397         return x86_pmu.perfctr + x86_pmu_addr_offset(index);
398 }
399
400 /*
401  * Find and validate any extra registers to set up.
402  */
403 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
404 {
405         struct hw_perf_event_extra *reg;
406         struct extra_reg *er;
407
408         reg = &event->hw.extra_reg;
409
410         if (!x86_pmu.extra_regs)
411                 return 0;
412
413         for (er = x86_pmu.extra_regs; er->msr; er++) {
414                 if (er->event != (config & er->config_mask))
415                         continue;
416                 if (event->attr.config1 & ~er->valid_mask)
417                         return -EINVAL;
418
419                 reg->idx = er->idx;
420                 reg->config = event->attr.config1;
421                 reg->reg = er->msr;
422                 break;
423         }
424         return 0;
425 }
426
427 static atomic_t active_events;
428 static DEFINE_MUTEX(pmc_reserve_mutex);
429
430 #ifdef CONFIG_X86_LOCAL_APIC
431
432 static bool reserve_pmc_hardware(void)
433 {
434         int i;
435
436         for (i = 0; i < x86_pmu.num_counters; i++) {
437                 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
438                         goto perfctr_fail;
439         }
440
441         for (i = 0; i < x86_pmu.num_counters; i++) {
442                 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
443                         goto eventsel_fail;
444         }
445
446         return true;
447
448 eventsel_fail:
449         for (i--; i >= 0; i--)
450                 release_evntsel_nmi(x86_pmu_config_addr(i));
451
452         i = x86_pmu.num_counters;
453
454 perfctr_fail:
455         for (i--; i >= 0; i--)
456                 release_perfctr_nmi(x86_pmu_event_addr(i));
457
458         return false;
459 }
460
461 static void release_pmc_hardware(void)
462 {
463         int i;
464
465         for (i = 0; i < x86_pmu.num_counters; i++) {
466                 release_perfctr_nmi(x86_pmu_event_addr(i));
467                 release_evntsel_nmi(x86_pmu_config_addr(i));
468         }
469 }
470
471 #else
472
473 static bool reserve_pmc_hardware(void) { return true; }
474 static void release_pmc_hardware(void) {}
475
476 #endif
477
478 static bool check_hw_exists(void)
479 {
480         u64 val, val_new = 0;
481         int i, reg, ret = 0;
482
483         /*
484          * Check to see if the BIOS enabled any of the counters, if so
485          * complain and bail.
486          */
487         for (i = 0; i < x86_pmu.num_counters; i++) {
488                 reg = x86_pmu_config_addr(i);
489                 ret = rdmsrl_safe(reg, &val);
490                 if (ret)
491                         goto msr_fail;
492                 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
493                         goto bios_fail;
494         }
495
496         if (x86_pmu.num_counters_fixed) {
497                 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
498                 ret = rdmsrl_safe(reg, &val);
499                 if (ret)
500                         goto msr_fail;
501                 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
502                         if (val & (0x03 << i*4))
503                                 goto bios_fail;
504                 }
505         }
506
507         /*
508          * Now write a value and read it back to see if it matches,
509          * this is needed to detect certain hardware emulators (qemu/kvm)
510          * that don't trap on the MSR access and always return 0s.
511          */
512         val = 0xabcdUL;
513         ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
514         ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
515         if (ret || val != val_new)
516                 goto msr_fail;
517
518         return true;
519
520 bios_fail:
521         /*
522          * We still allow the PMU driver to operate:
523          */
524         printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
525         printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
526
527         return true;
528
529 msr_fail:
530         printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
531
532         return false;
533 }
534
535 static void reserve_ds_buffers(void);
536 static void release_ds_buffers(void);
537
538 static void hw_perf_event_destroy(struct perf_event *event)
539 {
540         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
541                 release_pmc_hardware();
542                 release_ds_buffers();
543                 mutex_unlock(&pmc_reserve_mutex);
544         }
545 }
546
547 static inline int x86_pmu_initialized(void)
548 {
549         return x86_pmu.handle_irq != NULL;
550 }
551
552 static inline int
553 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
554 {
555         struct perf_event_attr *attr = &event->attr;
556         unsigned int cache_type, cache_op, cache_result;
557         u64 config, val;
558
559         config = attr->config;
560
561         cache_type = (config >>  0) & 0xff;
562         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
563                 return -EINVAL;
564
565         cache_op = (config >>  8) & 0xff;
566         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
567                 return -EINVAL;
568
569         cache_result = (config >> 16) & 0xff;
570         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
571                 return -EINVAL;
572
573         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
574
575         if (val == 0)
576                 return -ENOENT;
577
578         if (val == -1)
579                 return -EINVAL;
580
581         hwc->config |= val;
582         attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
583         return x86_pmu_extra_regs(val, event);
584 }
585
586 static int x86_setup_perfctr(struct perf_event *event)
587 {
588         struct perf_event_attr *attr = &event->attr;
589         struct hw_perf_event *hwc = &event->hw;
590         u64 config;
591
592         if (!is_sampling_event(event)) {
593                 hwc->sample_period = x86_pmu.max_period;
594                 hwc->last_period = hwc->sample_period;
595                 local64_set(&hwc->period_left, hwc->sample_period);
596         } else {
597                 /*
598                  * If we have a PMU initialized but no APIC
599                  * interrupts, we cannot sample hardware
600                  * events (user-space has to fall back and
601                  * sample via a hrtimer based software event):
602                  */
603                 if (!x86_pmu.apic)
604                         return -EOPNOTSUPP;
605         }
606
607         /*
608          * Do not allow config1 (extended registers) to propagate,
609          * there's no sane user-space generalization yet:
610          */
611         if (attr->type == PERF_TYPE_RAW)
612                 return 0;
613
614         if (attr->type == PERF_TYPE_HW_CACHE)
615                 return set_ext_hw_attr(hwc, event);
616
617         if (attr->config >= x86_pmu.max_events)
618                 return -EINVAL;
619
620         /*
621          * The generic map:
622          */
623         config = x86_pmu.event_map(attr->config);
624
625         if (config == 0)
626                 return -ENOENT;
627
628         if (config == -1LL)
629                 return -EINVAL;
630
631         /*
632          * Branch tracing:
633          */
634         if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
635             !attr->freq && hwc->sample_period == 1) {
636                 /* BTS is not supported by this architecture. */
637                 if (!x86_pmu.bts_active)
638                         return -EOPNOTSUPP;
639
640                 /* BTS is currently only allowed for user-mode. */
641                 if (!attr->exclude_kernel)
642                         return -EOPNOTSUPP;
643         }
644
645         hwc->config |= config;
646
647         return 0;
648 }
649
650 static int x86_pmu_hw_config(struct perf_event *event)
651 {
652         if (event->attr.precise_ip) {
653                 int precise = 0;
654
655                 /* Support for constant skid */
656                 if (x86_pmu.pebs_active) {
657                         precise++;
658
659                         /* Support for IP fixup */
660                         if (x86_pmu.lbr_nr)
661                                 precise++;
662                 }
663
664                 if (event->attr.precise_ip > precise)
665                         return -EOPNOTSUPP;
666         }
667
668         /*
669          * Generate PMC IRQs:
670          * (keep 'enabled' bit clear for now)
671          */
672         event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
673
674         /*
675          * Count user and OS events unless requested not to
676          */
677         if (!event->attr.exclude_user)
678                 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
679         if (!event->attr.exclude_kernel)
680                 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
681
682         if (event->attr.type == PERF_TYPE_RAW)
683                 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
684
685         return x86_setup_perfctr(event);
686 }
687
688 /*
689  * Setup the hardware configuration for a given attr_type
690  */
691 static int __x86_pmu_event_init(struct perf_event *event)
692 {
693         int err;
694
695         if (!x86_pmu_initialized())
696                 return -ENODEV;
697
698         err = 0;
699         if (!atomic_inc_not_zero(&active_events)) {
700                 mutex_lock(&pmc_reserve_mutex);
701                 if (atomic_read(&active_events) == 0) {
702                         if (!reserve_pmc_hardware())
703                                 err = -EBUSY;
704                         else
705                                 reserve_ds_buffers();
706                 }
707                 if (!err)
708                         atomic_inc(&active_events);
709                 mutex_unlock(&pmc_reserve_mutex);
710         }
711         if (err)
712                 return err;
713
714         event->destroy = hw_perf_event_destroy;
715
716         event->hw.idx = -1;
717         event->hw.last_cpu = -1;
718         event->hw.last_tag = ~0ULL;
719
720         /* mark unused */
721         event->hw.extra_reg.idx = EXTRA_REG_NONE;
722
723         return x86_pmu.hw_config(event);
724 }
725
726 static void x86_pmu_disable_all(void)
727 {
728         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
729         int idx;
730
731         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
732                 u64 val;
733
734                 if (!test_bit(idx, cpuc->active_mask))
735                         continue;
736                 rdmsrl(x86_pmu_config_addr(idx), val);
737                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
738                         continue;
739                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
740                 wrmsrl(x86_pmu_config_addr(idx), val);
741         }
742 }
743
744 static void x86_pmu_disable(struct pmu *pmu)
745 {
746         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
747
748         if (!x86_pmu_initialized())
749                 return;
750
751         if (!cpuc->enabled)
752                 return;
753
754         cpuc->n_added = 0;
755         cpuc->enabled = 0;
756         barrier();
757
758         x86_pmu.disable_all();
759 }
760
761 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
762                                           u64 enable_mask)
763 {
764         if (hwc->extra_reg.reg)
765                 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
766         wrmsrl(hwc->config_base, hwc->config | enable_mask);
767 }
768
769 static void x86_pmu_enable_all(int added)
770 {
771         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
772         int idx;
773
774         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
775                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
776
777                 if (!test_bit(idx, cpuc->active_mask))
778                         continue;
779
780                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
781         }
782 }
783
784 static struct pmu pmu;
785
786 static inline int is_x86_event(struct perf_event *event)
787 {
788         return event->pmu == &pmu;
789 }
790
791 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
792 {
793         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
794         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
795         int i, j, w, wmax, num = 0;
796         struct hw_perf_event *hwc;
797
798         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
799
800         for (i = 0; i < n; i++) {
801                 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
802                 constraints[i] = c;
803         }
804
805         /*
806          * fastpath, try to reuse previous register
807          */
808         for (i = 0; i < n; i++) {
809                 hwc = &cpuc->event_list[i]->hw;
810                 c = constraints[i];
811
812                 /* never assigned */
813                 if (hwc->idx == -1)
814                         break;
815
816                 /* constraint still honored */
817                 if (!test_bit(hwc->idx, c->idxmsk))
818                         break;
819
820                 /* not already used */
821                 if (test_bit(hwc->idx, used_mask))
822                         break;
823
824                 __set_bit(hwc->idx, used_mask);
825                 if (assign)
826                         assign[i] = hwc->idx;
827         }
828         if (i == n)
829                 goto done;
830
831         /*
832          * begin slow path
833          */
834
835         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
836
837         /*
838          * weight = number of possible counters
839          *
840          * 1    = most constrained, only works on one counter
841          * wmax = least constrained, works on any counter
842          *
843          * assign events to counters starting with most
844          * constrained events.
845          */
846         wmax = x86_pmu.num_counters;
847
848         /*
849          * when fixed event counters are present,
850          * wmax is incremented by 1 to account
851          * for one more choice
852          */
853         if (x86_pmu.num_counters_fixed)
854                 wmax++;
855
856         for (w = 1, num = n; num && w <= wmax; w++) {
857                 /* for each event */
858                 for (i = 0; num && i < n; i++) {
859                         c = constraints[i];
860                         hwc = &cpuc->event_list[i]->hw;
861
862                         if (c->weight != w)
863                                 continue;
864
865                         for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
866                                 if (!test_bit(j, used_mask))
867                                         break;
868                         }
869
870                         if (j == X86_PMC_IDX_MAX)
871                                 break;
872
873                         __set_bit(j, used_mask);
874
875                         if (assign)
876                                 assign[i] = j;
877                         num--;
878                 }
879         }
880 done:
881         /*
882          * scheduling failed or is just a simulation,
883          * free resources if necessary
884          */
885         if (!assign || num) {
886                 for (i = 0; i < n; i++) {
887                         if (x86_pmu.put_event_constraints)
888                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
889                 }
890         }
891         return num ? -ENOSPC : 0;
892 }
893
894 /*
895  * dogrp: true if must collect siblings events (group)
896  * returns total number of events and error code
897  */
898 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
899 {
900         struct perf_event *event;
901         int n, max_count;
902
903         max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
904
905         /* current number of events already accepted */
906         n = cpuc->n_events;
907
908         if (is_x86_event(leader)) {
909                 if (n >= max_count)
910                         return -ENOSPC;
911                 cpuc->event_list[n] = leader;
912                 n++;
913         }
914         if (!dogrp)
915                 return n;
916
917         list_for_each_entry(event, &leader->sibling_list, group_entry) {
918                 if (!is_x86_event(event) ||
919                     event->state <= PERF_EVENT_STATE_OFF)
920                         continue;
921
922                 if (n >= max_count)
923                         return -ENOSPC;
924
925                 cpuc->event_list[n] = event;
926                 n++;
927         }
928         return n;
929 }
930
931 static inline void x86_assign_hw_event(struct perf_event *event,
932                                 struct cpu_hw_events *cpuc, int i)
933 {
934         struct hw_perf_event *hwc = &event->hw;
935
936         hwc->idx = cpuc->assign[i];
937         hwc->last_cpu = smp_processor_id();
938         hwc->last_tag = ++cpuc->tags[i];
939
940         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
941                 hwc->config_base = 0;
942                 hwc->event_base = 0;
943         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
944                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
945                 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
946         } else {
947                 hwc->config_base = x86_pmu_config_addr(hwc->idx);
948                 hwc->event_base  = x86_pmu_event_addr(hwc->idx);
949         }
950 }
951
952 static inline int match_prev_assignment(struct hw_perf_event *hwc,
953                                         struct cpu_hw_events *cpuc,
954                                         int i)
955 {
956         return hwc->idx == cpuc->assign[i] &&
957                 hwc->last_cpu == smp_processor_id() &&
958                 hwc->last_tag == cpuc->tags[i];
959 }
960
961 static void x86_pmu_start(struct perf_event *event, int flags);
962 static void x86_pmu_stop(struct perf_event *event, int flags);
963
964 static void x86_pmu_enable(struct pmu *pmu)
965 {
966         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
967         struct perf_event *event;
968         struct hw_perf_event *hwc;
969         int i, added = cpuc->n_added;
970
971         if (!x86_pmu_initialized())
972                 return;
973
974         if (cpuc->enabled)
975                 return;
976
977         if (cpuc->n_added) {
978                 int n_running = cpuc->n_events - cpuc->n_added;
979                 /*
980                  * apply assignment obtained either from
981                  * hw_perf_group_sched_in() or x86_pmu_enable()
982                  *
983                  * step1: save events moving to new counters
984                  * step2: reprogram moved events into new counters
985                  */
986                 for (i = 0; i < n_running; i++) {
987                         event = cpuc->event_list[i];
988                         hwc = &event->hw;
989
990                         /*
991                          * we can avoid reprogramming counter if:
992                          * - assigned same counter as last time
993                          * - running on same CPU as last time
994                          * - no other event has used the counter since
995                          */
996                         if (hwc->idx == -1 ||
997                             match_prev_assignment(hwc, cpuc, i))
998                                 continue;
999
1000                         /*
1001                          * Ensure we don't accidentally enable a stopped
1002                          * counter simply because we rescheduled.
1003                          */
1004                         if (hwc->state & PERF_HES_STOPPED)
1005                                 hwc->state |= PERF_HES_ARCH;
1006
1007                         x86_pmu_stop(event, PERF_EF_UPDATE);
1008                 }
1009
1010                 for (i = 0; i < cpuc->n_events; i++) {
1011                         event = cpuc->event_list[i];
1012                         hwc = &event->hw;
1013
1014                         if (!match_prev_assignment(hwc, cpuc, i))
1015                                 x86_assign_hw_event(event, cpuc, i);
1016                         else if (i < n_running)
1017                                 continue;
1018
1019                         if (hwc->state & PERF_HES_ARCH)
1020                                 continue;
1021
1022                         x86_pmu_start(event, PERF_EF_RELOAD);
1023                 }
1024                 cpuc->n_added = 0;
1025                 perf_events_lapic_init();
1026         }
1027
1028         cpuc->enabled = 1;
1029         barrier();
1030
1031         x86_pmu.enable_all(added);
1032 }
1033
1034 static inline void x86_pmu_disable_event(struct perf_event *event)
1035 {
1036         struct hw_perf_event *hwc = &event->hw;
1037
1038         wrmsrl(hwc->config_base, hwc->config);
1039 }
1040
1041 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1042
1043 /*
1044  * Set the next IRQ period, based on the hwc->period_left value.
1045  * To be called with the event disabled in hw:
1046  */
1047 static int
1048 x86_perf_event_set_period(struct perf_event *event)
1049 {
1050         struct hw_perf_event *hwc = &event->hw;
1051         s64 left = local64_read(&hwc->period_left);
1052         s64 period = hwc->sample_period;
1053         int ret = 0, idx = hwc->idx;
1054
1055         if (idx == X86_PMC_IDX_FIXED_BTS)
1056                 return 0;
1057
1058         /*
1059          * If we are way outside a reasonable range then just skip forward:
1060          */
1061         if (unlikely(left <= -period)) {
1062                 left = period;
1063                 local64_set(&hwc->period_left, left);
1064                 hwc->last_period = period;
1065                 ret = 1;
1066         }
1067
1068         if (unlikely(left <= 0)) {
1069                 left += period;
1070                 local64_set(&hwc->period_left, left);
1071                 hwc->last_period = period;
1072                 ret = 1;
1073         }
1074         /*
1075          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1076          */
1077         if (unlikely(left < 2))
1078                 left = 2;
1079
1080         if (left > x86_pmu.max_period)
1081                 left = x86_pmu.max_period;
1082
1083         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1084
1085         /*
1086          * The hw event starts counting from this event offset,
1087          * mark it to be able to extra future deltas:
1088          */
1089         local64_set(&hwc->prev_count, (u64)-left);
1090
1091         wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1092
1093         /*
1094          * Due to erratum on certan cpu we need
1095          * a second write to be sure the register
1096          * is updated properly
1097          */
1098         if (x86_pmu.perfctr_second_write) {
1099                 wrmsrl(hwc->event_base,
1100                         (u64)(-left) & x86_pmu.cntval_mask);
1101         }
1102
1103         perf_event_update_userpage(event);
1104
1105         return ret;
1106 }
1107
1108 static void x86_pmu_enable_event(struct perf_event *event)
1109 {
1110         if (__this_cpu_read(cpu_hw_events.enabled))
1111                 __x86_pmu_enable_event(&event->hw,
1112                                        ARCH_PERFMON_EVENTSEL_ENABLE);
1113 }
1114
1115 /*
1116  * Add a single event to the PMU.
1117  *
1118  * The event is added to the group of enabled events
1119  * but only if it can be scehduled with existing events.
1120  */
1121 static int x86_pmu_add(struct perf_event *event, int flags)
1122 {
1123         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1124         struct hw_perf_event *hwc;
1125         int assign[X86_PMC_IDX_MAX];
1126         int n, n0, ret;
1127
1128         hwc = &event->hw;
1129
1130         perf_pmu_disable(event->pmu);
1131         n0 = cpuc->n_events;
1132         ret = n = collect_events(cpuc, event, false);
1133         if (ret < 0)
1134                 goto out;
1135
1136         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1137         if (!(flags & PERF_EF_START))
1138                 hwc->state |= PERF_HES_ARCH;
1139
1140         /*
1141          * If group events scheduling transaction was started,
1142          * skip the schedulability test here, it will be performed
1143          * at commit time (->commit_txn) as a whole
1144          */
1145         if (cpuc->group_flag & PERF_EVENT_TXN)
1146                 goto done_collect;
1147
1148         ret = x86_pmu.schedule_events(cpuc, n, assign);
1149         if (ret)
1150                 goto out;
1151         /*
1152          * copy new assignment, now we know it is possible
1153          * will be used by hw_perf_enable()
1154          */
1155         memcpy(cpuc->assign, assign, n*sizeof(int));
1156
1157 done_collect:
1158         cpuc->n_events = n;
1159         cpuc->n_added += n - n0;
1160         cpuc->n_txn += n - n0;
1161
1162         ret = 0;
1163 out:
1164         perf_pmu_enable(event->pmu);
1165         return ret;
1166 }
1167
1168 static void x86_pmu_start(struct perf_event *event, int flags)
1169 {
1170         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1171         int idx = event->hw.idx;
1172
1173         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1174                 return;
1175
1176         if (WARN_ON_ONCE(idx == -1))
1177                 return;
1178
1179         if (flags & PERF_EF_RELOAD) {
1180                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1181                 x86_perf_event_set_period(event);
1182         }
1183
1184         event->hw.state = 0;
1185
1186         cpuc->events[idx] = event;
1187         __set_bit(idx, cpuc->active_mask);
1188         __set_bit(idx, cpuc->running);
1189         x86_pmu.enable(event);
1190         perf_event_update_userpage(event);
1191 }
1192
1193 void perf_event_print_debug(void)
1194 {
1195         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1196         u64 pebs;
1197         struct cpu_hw_events *cpuc;
1198         unsigned long flags;
1199         int cpu, idx;
1200
1201         if (!x86_pmu.num_counters)
1202                 return;
1203
1204         local_irq_save(flags);
1205
1206         cpu = smp_processor_id();
1207         cpuc = &per_cpu(cpu_hw_events, cpu);
1208
1209         if (x86_pmu.version >= 2) {
1210                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1211                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1212                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1213                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1214                 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1215
1216                 pr_info("\n");
1217                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1218                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1219                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1220                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1221                 pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1222         }
1223         pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1224
1225         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1226                 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1227                 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1228
1229                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1230
1231                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1232                         cpu, idx, pmc_ctrl);
1233                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1234                         cpu, idx, pmc_count);
1235                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1236                         cpu, idx, prev_left);
1237         }
1238         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1239                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1240
1241                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1242                         cpu, idx, pmc_count);
1243         }
1244         local_irq_restore(flags);
1245 }
1246
1247 static void x86_pmu_stop(struct perf_event *event, int flags)
1248 {
1249         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1250         struct hw_perf_event *hwc = &event->hw;
1251
1252         if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1253                 x86_pmu.disable(event);
1254                 cpuc->events[hwc->idx] = NULL;
1255                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1256                 hwc->state |= PERF_HES_STOPPED;
1257         }
1258
1259         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1260                 /*
1261                  * Drain the remaining delta count out of a event
1262                  * that we are disabling:
1263                  */
1264                 x86_perf_event_update(event);
1265                 hwc->state |= PERF_HES_UPTODATE;
1266         }
1267 }
1268
1269 static void x86_pmu_del(struct perf_event *event, int flags)
1270 {
1271         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1272         int i;
1273
1274         /*
1275          * If we're called during a txn, we don't need to do anything.
1276          * The events never got scheduled and ->cancel_txn will truncate
1277          * the event_list.
1278          */
1279         if (cpuc->group_flag & PERF_EVENT_TXN)
1280                 return;
1281
1282         x86_pmu_stop(event, PERF_EF_UPDATE);
1283
1284         for (i = 0; i < cpuc->n_events; i++) {
1285                 if (event == cpuc->event_list[i]) {
1286
1287                         if (x86_pmu.put_event_constraints)
1288                                 x86_pmu.put_event_constraints(cpuc, event);
1289
1290                         while (++i < cpuc->n_events)
1291                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1292
1293                         --cpuc->n_events;
1294                         break;
1295                 }
1296         }
1297         perf_event_update_userpage(event);
1298 }
1299
1300 static int x86_pmu_handle_irq(struct pt_regs *regs)
1301 {
1302         struct perf_sample_data data;
1303         struct cpu_hw_events *cpuc;
1304         struct perf_event *event;
1305         int idx, handled = 0;
1306         u64 val;
1307
1308         perf_sample_data_init(&data, 0);
1309
1310         cpuc = &__get_cpu_var(cpu_hw_events);
1311
1312         /*
1313          * Some chipsets need to unmask the LVTPC in a particular spot
1314          * inside the nmi handler.  As a result, the unmasking was pushed
1315          * into all the nmi handlers.
1316          *
1317          * This generic handler doesn't seem to have any issues where the
1318          * unmasking occurs so it was left at the top.
1319          */
1320         apic_write(APIC_LVTPC, APIC_DM_NMI);
1321
1322         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1323                 if (!test_bit(idx, cpuc->active_mask)) {
1324                         /*
1325                          * Though we deactivated the counter some cpus
1326                          * might still deliver spurious interrupts still
1327                          * in flight. Catch them:
1328                          */
1329                         if (__test_and_clear_bit(idx, cpuc->running))
1330                                 handled++;
1331                         continue;
1332                 }
1333
1334                 event = cpuc->events[idx];
1335
1336                 val = x86_perf_event_update(event);
1337                 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1338                         continue;
1339
1340                 /*
1341                  * event overflow
1342                  */
1343                 handled++;
1344                 data.period     = event->hw.last_period;
1345
1346                 if (!x86_perf_event_set_period(event))
1347                         continue;
1348
1349                 if (perf_event_overflow(event, &data, regs))
1350                         x86_pmu_stop(event, 0);
1351         }
1352
1353         if (handled)
1354                 inc_irq_stat(apic_perf_irqs);
1355
1356         return handled;
1357 }
1358
1359 void perf_events_lapic_init(void)
1360 {
1361         if (!x86_pmu.apic || !x86_pmu_initialized())
1362                 return;
1363
1364         /*
1365          * Always use NMI for PMU
1366          */
1367         apic_write(APIC_LVTPC, APIC_DM_NMI);
1368 }
1369
1370 struct pmu_nmi_state {
1371         unsigned int    marked;
1372         int             handled;
1373 };
1374
1375 static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1376
1377 static int __kprobes
1378 perf_event_nmi_handler(struct notifier_block *self,
1379                          unsigned long cmd, void *__args)
1380 {
1381         struct die_args *args = __args;
1382         unsigned int this_nmi;
1383         int handled;
1384
1385         if (!atomic_read(&active_events))
1386                 return NOTIFY_DONE;
1387
1388         switch (cmd) {
1389         case DIE_NMI:
1390                 break;
1391         case DIE_NMIUNKNOWN:
1392                 this_nmi = percpu_read(irq_stat.__nmi_count);
1393                 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1394                         /* let the kernel handle the unknown nmi */
1395                         return NOTIFY_DONE;
1396                 /*
1397                  * This one is a PMU back-to-back nmi. Two events
1398                  * trigger 'simultaneously' raising two back-to-back
1399                  * NMIs. If the first NMI handles both, the latter
1400                  * will be empty and daze the CPU. So, we drop it to
1401                  * avoid false-positive 'unknown nmi' messages.
1402                  */
1403                 return NOTIFY_STOP;
1404         default:
1405                 return NOTIFY_DONE;
1406         }
1407
1408         handled = x86_pmu.handle_irq(args->regs);
1409         if (!handled)
1410                 return NOTIFY_DONE;
1411
1412         this_nmi = percpu_read(irq_stat.__nmi_count);
1413         if ((handled > 1) ||
1414                 /* the next nmi could be a back-to-back nmi */
1415             ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1416              (__this_cpu_read(pmu_nmi.handled) > 1))) {
1417                 /*
1418                  * We could have two subsequent back-to-back nmis: The
1419                  * first handles more than one counter, the 2nd
1420                  * handles only one counter and the 3rd handles no
1421                  * counter.
1422                  *
1423                  * This is the 2nd nmi because the previous was
1424                  * handling more than one counter. We will mark the
1425                  * next (3rd) and then drop it if unhandled.
1426                  */
1427                 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1428                 __this_cpu_write(pmu_nmi.handled, handled);
1429         }
1430
1431         return NOTIFY_STOP;
1432 }
1433
1434 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1435         .notifier_call          = perf_event_nmi_handler,
1436         .next                   = NULL,
1437         .priority               = NMI_LOCAL_LOW_PRIOR,
1438 };
1439
1440 static struct event_constraint unconstrained;
1441 static struct event_constraint emptyconstraint;
1442
1443 static struct event_constraint *
1444 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1445 {
1446         struct event_constraint *c;
1447
1448         if (x86_pmu.event_constraints) {
1449                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1450                         if ((event->hw.config & c->cmask) == c->code)
1451                                 return c;
1452                 }
1453         }
1454
1455         return &unconstrained;
1456 }
1457
1458 #include "perf_event_amd.c"
1459 #include "perf_event_p6.c"
1460 #include "perf_event_p4.c"
1461 #include "perf_event_intel_lbr.c"
1462 #include "perf_event_intel_ds.c"
1463 #include "perf_event_intel.c"
1464
1465 static int __cpuinit
1466 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1467 {
1468         unsigned int cpu = (long)hcpu;
1469         int ret = NOTIFY_OK;
1470
1471         switch (action & ~CPU_TASKS_FROZEN) {
1472         case CPU_UP_PREPARE:
1473                 if (x86_pmu.cpu_prepare)
1474                         ret = x86_pmu.cpu_prepare(cpu);
1475                 break;
1476
1477         case CPU_STARTING:
1478                 if (x86_pmu.cpu_starting)
1479                         x86_pmu.cpu_starting(cpu);
1480                 break;
1481
1482         case CPU_DYING:
1483                 if (x86_pmu.cpu_dying)
1484                         x86_pmu.cpu_dying(cpu);
1485                 break;
1486
1487         case CPU_UP_CANCELED:
1488         case CPU_DEAD:
1489                 if (x86_pmu.cpu_dead)
1490                         x86_pmu.cpu_dead(cpu);
1491                 break;
1492
1493         default:
1494                 break;
1495         }
1496
1497         return ret;
1498 }
1499
1500 static void __init pmu_check_apic(void)
1501 {
1502         if (cpu_has_apic)
1503                 return;
1504
1505         x86_pmu.apic = 0;
1506         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1507         pr_info("no hardware sampling interrupt available.\n");
1508 }
1509
1510 static int __init init_hw_perf_events(void)
1511 {
1512         struct event_constraint *c;
1513         int err;
1514
1515         pr_info("Performance Events: ");
1516
1517         switch (boot_cpu_data.x86_vendor) {
1518         case X86_VENDOR_INTEL:
1519                 err = intel_pmu_init();
1520                 break;
1521         case X86_VENDOR_AMD:
1522                 err = amd_pmu_init();
1523                 break;
1524         default:
1525                 return 0;
1526         }
1527         if (err != 0) {
1528                 pr_cont("no PMU driver, software events only.\n");
1529                 return 0;
1530         }
1531
1532         pmu_check_apic();
1533
1534         /* sanity check that the hardware exists or is emulated */
1535         if (!check_hw_exists())
1536                 return 0;
1537
1538         pr_cont("%s PMU driver.\n", x86_pmu.name);
1539
1540         if (x86_pmu.quirks)
1541                 x86_pmu.quirks();
1542
1543         if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1544                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1545                      x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1546                 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1547         }
1548         x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1549
1550         if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1551                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1552                      x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1553                 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1554         }
1555
1556         x86_pmu.intel_ctrl |=
1557                 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1558
1559         perf_events_lapic_init();
1560         register_die_notifier(&perf_event_nmi_notifier);
1561
1562         unconstrained = (struct event_constraint)
1563                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1564                                    0, x86_pmu.num_counters);
1565
1566         if (x86_pmu.event_constraints) {
1567                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1568                         if (c->cmask != X86_RAW_EVENT_MASK)
1569                                 continue;
1570
1571                         c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1572                         c->weight += x86_pmu.num_counters;
1573                 }
1574         }
1575
1576         pr_info("... version:                %d\n",     x86_pmu.version);
1577         pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1578         pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1579         pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1580         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1581         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1582         pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1583
1584         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1585         perf_cpu_notifier(x86_pmu_notifier);
1586
1587         return 0;
1588 }
1589 early_initcall(init_hw_perf_events);
1590
1591 static inline void x86_pmu_read(struct perf_event *event)
1592 {
1593         x86_perf_event_update(event);
1594 }
1595
1596 /*
1597  * Start group events scheduling transaction
1598  * Set the flag to make pmu::enable() not perform the
1599  * schedulability test, it will be performed at commit time
1600  */
1601 static void x86_pmu_start_txn(struct pmu *pmu)
1602 {
1603         perf_pmu_disable(pmu);
1604         __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1605         __this_cpu_write(cpu_hw_events.n_txn, 0);
1606 }
1607
1608 /*
1609  * Stop group events scheduling transaction
1610  * Clear the flag and pmu::enable() will perform the
1611  * schedulability test.
1612  */
1613 static void x86_pmu_cancel_txn(struct pmu *pmu)
1614 {
1615         __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1616         /*
1617          * Truncate the collected events.
1618          */
1619         __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1620         __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1621         perf_pmu_enable(pmu);
1622 }
1623
1624 /*
1625  * Commit group events scheduling transaction
1626  * Perform the group schedulability test as a whole
1627  * Return 0 if success
1628  */
1629 static int x86_pmu_commit_txn(struct pmu *pmu)
1630 {
1631         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1632         int assign[X86_PMC_IDX_MAX];
1633         int n, ret;
1634
1635         n = cpuc->n_events;
1636
1637         if (!x86_pmu_initialized())
1638                 return -EAGAIN;
1639
1640         ret = x86_pmu.schedule_events(cpuc, n, assign);
1641         if (ret)
1642                 return ret;
1643
1644         /*
1645          * copy new assignment, now we know it is possible
1646          * will be used by hw_perf_enable()
1647          */
1648         memcpy(cpuc->assign, assign, n*sizeof(int));
1649
1650         cpuc->group_flag &= ~PERF_EVENT_TXN;
1651         perf_pmu_enable(pmu);
1652         return 0;
1653 }
1654 /*
1655  * a fake_cpuc is used to validate event groups. Due to
1656  * the extra reg logic, we need to also allocate a fake
1657  * per_core and per_cpu structure. Otherwise, group events
1658  * using extra reg may conflict without the kernel being
1659  * able to catch this when the last event gets added to
1660  * the group.
1661  */
1662 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1663 {
1664         kfree(cpuc->shared_regs);
1665         kfree(cpuc);
1666 }
1667
1668 static struct cpu_hw_events *allocate_fake_cpuc(void)
1669 {
1670         struct cpu_hw_events *cpuc;
1671         int cpu = raw_smp_processor_id();
1672
1673         cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1674         if (!cpuc)
1675                 return ERR_PTR(-ENOMEM);
1676
1677         /* only needed, if we have extra_regs */
1678         if (x86_pmu.extra_regs) {
1679                 cpuc->shared_regs = allocate_shared_regs(cpu);
1680                 if (!cpuc->shared_regs)
1681                         goto error;
1682         }
1683         return cpuc;
1684 error:
1685         free_fake_cpuc(cpuc);
1686         return ERR_PTR(-ENOMEM);
1687 }
1688
1689 /*
1690  * validate that we can schedule this event
1691  */
1692 static int validate_event(struct perf_event *event)
1693 {
1694         struct cpu_hw_events *fake_cpuc;
1695         struct event_constraint *c;
1696         int ret = 0;
1697
1698         fake_cpuc = allocate_fake_cpuc();
1699         if (IS_ERR(fake_cpuc))
1700                 return PTR_ERR(fake_cpuc);
1701
1702         c = x86_pmu.get_event_constraints(fake_cpuc, event);
1703
1704         if (!c || !c->weight)
1705                 ret = -ENOSPC;
1706
1707         if (x86_pmu.put_event_constraints)
1708                 x86_pmu.put_event_constraints(fake_cpuc, event);
1709
1710         free_fake_cpuc(fake_cpuc);
1711
1712         return ret;
1713 }
1714
1715 /*
1716  * validate a single event group
1717  *
1718  * validation include:
1719  *      - check events are compatible which each other
1720  *      - events do not compete for the same counter
1721  *      - number of events <= number of counters
1722  *
1723  * validation ensures the group can be loaded onto the
1724  * PMU if it was the only group available.
1725  */
1726 static int validate_group(struct perf_event *event)
1727 {
1728         struct perf_event *leader = event->group_leader;
1729         struct cpu_hw_events *fake_cpuc;
1730         int ret = -ENOSPC, n;
1731
1732         fake_cpuc = allocate_fake_cpuc();
1733         if (IS_ERR(fake_cpuc))
1734                 return PTR_ERR(fake_cpuc);
1735         /*
1736          * the event is not yet connected with its
1737          * siblings therefore we must first collect
1738          * existing siblings, then add the new event
1739          * before we can simulate the scheduling
1740          */
1741         n = collect_events(fake_cpuc, leader, true);
1742         if (n < 0)
1743                 goto out;
1744
1745         fake_cpuc->n_events = n;
1746         n = collect_events(fake_cpuc, event, false);
1747         if (n < 0)
1748                 goto out;
1749
1750         fake_cpuc->n_events = n;
1751
1752         ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1753
1754 out:
1755         free_fake_cpuc(fake_cpuc);
1756         return ret;
1757 }
1758
1759 static int x86_pmu_event_init(struct perf_event *event)
1760 {
1761         struct pmu *tmp;
1762         int err;
1763
1764         switch (event->attr.type) {
1765         case PERF_TYPE_RAW:
1766         case PERF_TYPE_HARDWARE:
1767         case PERF_TYPE_HW_CACHE:
1768                 break;
1769
1770         default:
1771                 return -ENOENT;
1772         }
1773
1774         err = __x86_pmu_event_init(event);
1775         if (!err) {
1776                 /*
1777                  * we temporarily connect event to its pmu
1778                  * such that validate_group() can classify
1779                  * it as an x86 event using is_x86_event()
1780                  */
1781                 tmp = event->pmu;
1782                 event->pmu = &pmu;
1783
1784                 if (event->group_leader != event)
1785                         err = validate_group(event);
1786                 else
1787                         err = validate_event(event);
1788
1789                 event->pmu = tmp;
1790         }
1791         if (err) {
1792                 if (event->destroy)
1793                         event->destroy(event);
1794         }
1795
1796         return err;
1797 }
1798
1799 static struct pmu pmu = {
1800         .pmu_enable     = x86_pmu_enable,
1801         .pmu_disable    = x86_pmu_disable,
1802
1803         .event_init     = x86_pmu_event_init,
1804
1805         .add            = x86_pmu_add,
1806         .del            = x86_pmu_del,
1807         .start          = x86_pmu_start,
1808         .stop           = x86_pmu_stop,
1809         .read           = x86_pmu_read,
1810
1811         .start_txn      = x86_pmu_start_txn,
1812         .cancel_txn     = x86_pmu_cancel_txn,
1813         .commit_txn     = x86_pmu_commit_txn,
1814 };
1815
1816 /*
1817  * callchain support
1818  */
1819
1820 static int backtrace_stack(void *data, char *name)
1821 {
1822         return 0;
1823 }
1824
1825 static void backtrace_address(void *data, unsigned long addr, int reliable)
1826 {
1827         struct perf_callchain_entry *entry = data;
1828
1829         perf_callchain_store(entry, addr);
1830 }
1831
1832 static const struct stacktrace_ops backtrace_ops = {
1833         .stack                  = backtrace_stack,
1834         .address                = backtrace_address,
1835         .walk_stack             = print_context_stack_bp,
1836 };
1837
1838 void
1839 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1840 {
1841         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1842                 /* TODO: We don't support guest os callchain now */
1843                 return;
1844         }
1845
1846         perf_callchain_store(entry, regs->ip);
1847
1848         dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1849 }
1850
1851 #ifdef CONFIG_COMPAT
1852 static inline int
1853 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1854 {
1855         /* 32-bit process in 64-bit kernel. */
1856         struct stack_frame_ia32 frame;
1857         const void __user *fp;
1858
1859         if (!test_thread_flag(TIF_IA32))
1860                 return 0;
1861
1862         fp = compat_ptr(regs->bp);
1863         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1864                 unsigned long bytes;
1865                 frame.next_frame     = 0;
1866                 frame.return_address = 0;
1867
1868                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1869                 if (bytes != sizeof(frame))
1870                         break;
1871
1872                 if (fp < compat_ptr(regs->sp))
1873                         break;
1874
1875                 perf_callchain_store(entry, frame.return_address);
1876                 fp = compat_ptr(frame.next_frame);
1877         }
1878         return 1;
1879 }
1880 #else
1881 static inline int
1882 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1883 {
1884     return 0;
1885 }
1886 #endif
1887
1888 void
1889 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1890 {
1891         struct stack_frame frame;
1892         const void __user *fp;
1893
1894         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1895                 /* TODO: We don't support guest os callchain now */
1896                 return;
1897         }
1898
1899         fp = (void __user *)regs->bp;
1900
1901         perf_callchain_store(entry, regs->ip);
1902
1903         if (perf_callchain_user32(regs, entry))
1904                 return;
1905
1906         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1907                 unsigned long bytes;
1908                 frame.next_frame             = NULL;
1909                 frame.return_address = 0;
1910
1911                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1912                 if (bytes != sizeof(frame))
1913                         break;
1914
1915                 if ((unsigned long)fp < regs->sp)
1916                         break;
1917
1918                 perf_callchain_store(entry, frame.return_address);
1919                 fp = frame.next_frame;
1920         }
1921 }
1922
1923 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1924 {
1925         unsigned long ip;
1926
1927         if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1928                 ip = perf_guest_cbs->get_guest_ip();
1929         else
1930                 ip = instruction_pointer(regs);
1931
1932         return ip;
1933 }
1934
1935 unsigned long perf_misc_flags(struct pt_regs *regs)
1936 {
1937         int misc = 0;
1938
1939         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1940                 if (perf_guest_cbs->is_user_mode())
1941                         misc |= PERF_RECORD_MISC_GUEST_USER;
1942                 else
1943                         misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1944         } else {
1945                 if (user_mode(regs))
1946                         misc |= PERF_RECORD_MISC_USER;
1947                 else
1948                         misc |= PERF_RECORD_MISC_KERNEL;
1949         }
1950
1951         if (regs->flags & PERF_EFLAGS_EXACT)
1952                 misc |= PERF_RECORD_MISC_EXACT_IP;
1953
1954         return misc;
1955 }