Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/highmem.h>
26 #include <linux/cpu.h>
27 #include <linux/bitops.h>
28
29 #include <asm/apic.h>
30 #include <asm/stacktrace.h>
31 #include <asm/nmi.h>
32 #include <asm/compat.h>
33
34 #if 0
35 #undef wrmsrl
36 #define wrmsrl(msr, val)                                        \
37 do {                                                            \
38         trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
39                         (unsigned long)(val));                  \
40         native_write_msr((msr), (u32)((u64)(val)),              \
41                         (u32)((u64)(val) >> 32));               \
42 } while (0)
43 #endif
44
45 /*
46  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
47  */
48 static unsigned long
49 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
50 {
51         unsigned long offset, addr = (unsigned long)from;
52         unsigned long size, len = 0;
53         struct page *page;
54         void *map;
55         int ret;
56
57         do {
58                 ret = __get_user_pages_fast(addr, 1, 0, &page);
59                 if (!ret)
60                         break;
61
62                 offset = addr & (PAGE_SIZE - 1);
63                 size = min(PAGE_SIZE - offset, n - len);
64
65                 map = kmap_atomic(page);
66                 memcpy(to, map+offset, size);
67                 kunmap_atomic(map);
68                 put_page(page);
69
70                 len  += size;
71                 to   += size;
72                 addr += size;
73
74         } while (len < n);
75
76         return len;
77 }
78
79 struct event_constraint {
80         union {
81                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
82                 u64             idxmsk64;
83         };
84         u64     code;
85         u64     cmask;
86         int     weight;
87 };
88
89 struct amd_nb {
90         int nb_id;  /* NorthBridge id */
91         int refcnt; /* reference count */
92         struct perf_event *owners[X86_PMC_IDX_MAX];
93         struct event_constraint event_constraints[X86_PMC_IDX_MAX];
94 };
95
96 #define MAX_LBR_ENTRIES         16
97
98 struct cpu_hw_events {
99         /*
100          * Generic x86 PMC bits
101          */
102         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
103         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
104         unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
105         int                     enabled;
106
107         int                     n_events;
108         int                     n_added;
109         int                     n_txn;
110         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
111         u64                     tags[X86_PMC_IDX_MAX];
112         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
113
114         unsigned int            group_flag;
115
116         /*
117          * Intel DebugStore bits
118          */
119         struct debug_store      *ds;
120         u64                     pebs_enabled;
121
122         /*
123          * Intel LBR bits
124          */
125         int                             lbr_users;
126         void                            *lbr_context;
127         struct perf_branch_stack        lbr_stack;
128         struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
129
130         /*
131          * AMD specific bits
132          */
133         struct amd_nb           *amd_nb;
134 };
135
136 #define __EVENT_CONSTRAINT(c, n, m, w) {\
137         { .idxmsk64 = (n) },            \
138         .code = (c),                    \
139         .cmask = (m),                   \
140         .weight = (w),                  \
141 }
142
143 #define EVENT_CONSTRAINT(c, n, m)       \
144         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
145
146 /*
147  * Constraint on the Event code.
148  */
149 #define INTEL_EVENT_CONSTRAINT(c, n)    \
150         EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
151
152 /*
153  * Constraint on the Event code + UMask + fixed-mask
154  *
155  * filter mask to validate fixed counter events.
156  * the following filters disqualify for fixed counters:
157  *  - inv
158  *  - edge
159  *  - cnt-mask
160  *  The other filters are supported by fixed counters.
161  *  The any-thread option is supported starting with v3.
162  */
163 #define FIXED_EVENT_CONSTRAINT(c, n)    \
164         EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
165
166 /*
167  * Constraint on the Event code + UMask
168  */
169 #define PEBS_EVENT_CONSTRAINT(c, n)     \
170         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
171
172 #define EVENT_CONSTRAINT_END            \
173         EVENT_CONSTRAINT(0, 0, 0)
174
175 #define for_each_event_constraint(e, c) \
176         for ((e) = (c); (e)->weight; (e)++)
177
178 union perf_capabilities {
179         struct {
180                 u64     lbr_format    : 6;
181                 u64     pebs_trap     : 1;
182                 u64     pebs_arch_reg : 1;
183                 u64     pebs_format   : 4;
184                 u64     smm_freeze    : 1;
185         };
186         u64     capabilities;
187 };
188
189 /*
190  * struct x86_pmu - generic x86 pmu
191  */
192 struct x86_pmu {
193         /*
194          * Generic x86 PMC bits
195          */
196         const char      *name;
197         int             version;
198         int             (*handle_irq)(struct pt_regs *);
199         void            (*disable_all)(void);
200         void            (*enable_all)(int added);
201         void            (*enable)(struct perf_event *);
202         void            (*disable)(struct perf_event *);
203         int             (*hw_config)(struct perf_event *event);
204         int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
205         unsigned        eventsel;
206         unsigned        perfctr;
207         u64             (*event_map)(int);
208         int             max_events;
209         int             num_counters;
210         int             num_counters_fixed;
211         int             cntval_bits;
212         u64             cntval_mask;
213         int             apic;
214         u64             max_period;
215         struct event_constraint *
216                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
217                                                  struct perf_event *event);
218
219         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
220                                                  struct perf_event *event);
221         struct event_constraint *event_constraints;
222         void            (*quirks)(void);
223         int             perfctr_second_write;
224
225         int             (*cpu_prepare)(int cpu);
226         void            (*cpu_starting)(int cpu);
227         void            (*cpu_dying)(int cpu);
228         void            (*cpu_dead)(int cpu);
229
230         /*
231          * Intel Arch Perfmon v2+
232          */
233         u64                     intel_ctrl;
234         union perf_capabilities intel_cap;
235
236         /*
237          * Intel DebugStore bits
238          */
239         int             bts, pebs;
240         int             bts_active, pebs_active;
241         int             pebs_record_size;
242         void            (*drain_pebs)(struct pt_regs *regs);
243         struct event_constraint *pebs_constraints;
244
245         /*
246          * Intel LBR
247          */
248         unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
249         int             lbr_nr;                    /* hardware stack size */
250 };
251
252 static struct x86_pmu x86_pmu __read_mostly;
253
254 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
255         .enabled = 1,
256 };
257
258 static int x86_perf_event_set_period(struct perf_event *event);
259
260 /*
261  * Generalized hw caching related hw_event table, filled
262  * in on a per model basis. A value of 0 means
263  * 'not supported', -1 means 'hw_event makes no sense on
264  * this CPU', any other value means the raw hw_event
265  * ID.
266  */
267
268 #define C(x) PERF_COUNT_HW_CACHE_##x
269
270 static u64 __read_mostly hw_cache_event_ids
271                                 [PERF_COUNT_HW_CACHE_MAX]
272                                 [PERF_COUNT_HW_CACHE_OP_MAX]
273                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
274
275 /*
276  * Propagate event elapsed time into the generic event.
277  * Can only be executed on the CPU where the event is active.
278  * Returns the delta events processed.
279  */
280 static u64
281 x86_perf_event_update(struct perf_event *event)
282 {
283         struct hw_perf_event *hwc = &event->hw;
284         int shift = 64 - x86_pmu.cntval_bits;
285         u64 prev_raw_count, new_raw_count;
286         int idx = hwc->idx;
287         s64 delta;
288
289         if (idx == X86_PMC_IDX_FIXED_BTS)
290                 return 0;
291
292         /*
293          * Careful: an NMI might modify the previous event value.
294          *
295          * Our tactic to handle this is to first atomically read and
296          * exchange a new raw count - then add that new-prev delta
297          * count to the generic event atomically:
298          */
299 again:
300         prev_raw_count = local64_read(&hwc->prev_count);
301         rdmsrl(hwc->event_base + idx, new_raw_count);
302
303         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
304                                         new_raw_count) != prev_raw_count)
305                 goto again;
306
307         /*
308          * Now we have the new raw value and have updated the prev
309          * timestamp already. We can now calculate the elapsed delta
310          * (event-)time and add that to the generic event.
311          *
312          * Careful, not all hw sign-extends above the physical width
313          * of the count.
314          */
315         delta = (new_raw_count << shift) - (prev_raw_count << shift);
316         delta >>= shift;
317
318         local64_add(delta, &event->count);
319         local64_sub(delta, &hwc->period_left);
320
321         return new_raw_count;
322 }
323
324 static atomic_t active_events;
325 static DEFINE_MUTEX(pmc_reserve_mutex);
326
327 #ifdef CONFIG_X86_LOCAL_APIC
328
329 static bool reserve_pmc_hardware(void)
330 {
331         int i;
332
333         if (nmi_watchdog == NMI_LOCAL_APIC)
334                 disable_lapic_nmi_watchdog();
335
336         for (i = 0; i < x86_pmu.num_counters; i++) {
337                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
338                         goto perfctr_fail;
339         }
340
341         for (i = 0; i < x86_pmu.num_counters; i++) {
342                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
343                         goto eventsel_fail;
344         }
345
346         return true;
347
348 eventsel_fail:
349         for (i--; i >= 0; i--)
350                 release_evntsel_nmi(x86_pmu.eventsel + i);
351
352         i = x86_pmu.num_counters;
353
354 perfctr_fail:
355         for (i--; i >= 0; i--)
356                 release_perfctr_nmi(x86_pmu.perfctr + i);
357
358         if (nmi_watchdog == NMI_LOCAL_APIC)
359                 enable_lapic_nmi_watchdog();
360
361         return false;
362 }
363
364 static void release_pmc_hardware(void)
365 {
366         int i;
367
368         for (i = 0; i < x86_pmu.num_counters; i++) {
369                 release_perfctr_nmi(x86_pmu.perfctr + i);
370                 release_evntsel_nmi(x86_pmu.eventsel + i);
371         }
372
373         if (nmi_watchdog == NMI_LOCAL_APIC)
374                 enable_lapic_nmi_watchdog();
375 }
376
377 #else
378
379 static bool reserve_pmc_hardware(void) { return true; }
380 static void release_pmc_hardware(void) {}
381
382 #endif
383
384 static bool check_hw_exists(void)
385 {
386         u64 val, val_new = 0;
387         int ret = 0;
388
389         val = 0xabcdUL;
390         ret |= checking_wrmsrl(x86_pmu.perfctr, val);
391         ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
392         if (ret || val != val_new)
393                 return false;
394
395         return true;
396 }
397
398 static void reserve_ds_buffers(void);
399 static void release_ds_buffers(void);
400
401 static void hw_perf_event_destroy(struct perf_event *event)
402 {
403         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
404                 release_pmc_hardware();
405                 release_ds_buffers();
406                 mutex_unlock(&pmc_reserve_mutex);
407         }
408 }
409
410 static inline int x86_pmu_initialized(void)
411 {
412         return x86_pmu.handle_irq != NULL;
413 }
414
415 static inline int
416 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
417 {
418         unsigned int cache_type, cache_op, cache_result;
419         u64 config, val;
420
421         config = attr->config;
422
423         cache_type = (config >>  0) & 0xff;
424         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
425                 return -EINVAL;
426
427         cache_op = (config >>  8) & 0xff;
428         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
429                 return -EINVAL;
430
431         cache_result = (config >> 16) & 0xff;
432         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
433                 return -EINVAL;
434
435         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
436
437         if (val == 0)
438                 return -ENOENT;
439
440         if (val == -1)
441                 return -EINVAL;
442
443         hwc->config |= val;
444
445         return 0;
446 }
447
448 static int x86_setup_perfctr(struct perf_event *event)
449 {
450         struct perf_event_attr *attr = &event->attr;
451         struct hw_perf_event *hwc = &event->hw;
452         u64 config;
453
454         if (!hwc->sample_period) {
455                 hwc->sample_period = x86_pmu.max_period;
456                 hwc->last_period = hwc->sample_period;
457                 local64_set(&hwc->period_left, hwc->sample_period);
458         } else {
459                 /*
460                  * If we have a PMU initialized but no APIC
461                  * interrupts, we cannot sample hardware
462                  * events (user-space has to fall back and
463                  * sample via a hrtimer based software event):
464                  */
465                 if (!x86_pmu.apic)
466                         return -EOPNOTSUPP;
467         }
468
469         if (attr->type == PERF_TYPE_RAW)
470                 return 0;
471
472         if (attr->type == PERF_TYPE_HW_CACHE)
473                 return set_ext_hw_attr(hwc, attr);
474
475         if (attr->config >= x86_pmu.max_events)
476                 return -EINVAL;
477
478         /*
479          * The generic map:
480          */
481         config = x86_pmu.event_map(attr->config);
482
483         if (config == 0)
484                 return -ENOENT;
485
486         if (config == -1LL)
487                 return -EINVAL;
488
489         /*
490          * Branch tracing:
491          */
492         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
493             (hwc->sample_period == 1)) {
494                 /* BTS is not supported by this architecture. */
495                 if (!x86_pmu.bts_active)
496                         return -EOPNOTSUPP;
497
498                 /* BTS is currently only allowed for user-mode. */
499                 if (!attr->exclude_kernel)
500                         return -EOPNOTSUPP;
501         }
502
503         hwc->config |= config;
504
505         return 0;
506 }
507
508 static int x86_pmu_hw_config(struct perf_event *event)
509 {
510         if (event->attr.precise_ip) {
511                 int precise = 0;
512
513                 /* Support for constant skid */
514                 if (x86_pmu.pebs_active) {
515                         precise++;
516
517                         /* Support for IP fixup */
518                         if (x86_pmu.lbr_nr)
519                                 precise++;
520                 }
521
522                 if (event->attr.precise_ip > precise)
523                         return -EOPNOTSUPP;
524         }
525
526         /*
527          * Generate PMC IRQs:
528          * (keep 'enabled' bit clear for now)
529          */
530         event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
531
532         /*
533          * Count user and OS events unless requested not to
534          */
535         if (!event->attr.exclude_user)
536                 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
537         if (!event->attr.exclude_kernel)
538                 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
539
540         if (event->attr.type == PERF_TYPE_RAW)
541                 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
542
543         return x86_setup_perfctr(event);
544 }
545
546 /*
547  * Setup the hardware configuration for a given attr_type
548  */
549 static int __x86_pmu_event_init(struct perf_event *event)
550 {
551         int err;
552
553         if (!x86_pmu_initialized())
554                 return -ENODEV;
555
556         err = 0;
557         if (!atomic_inc_not_zero(&active_events)) {
558                 mutex_lock(&pmc_reserve_mutex);
559                 if (atomic_read(&active_events) == 0) {
560                         if (!reserve_pmc_hardware())
561                                 err = -EBUSY;
562                         else
563                                 reserve_ds_buffers();
564                 }
565                 if (!err)
566                         atomic_inc(&active_events);
567                 mutex_unlock(&pmc_reserve_mutex);
568         }
569         if (err)
570                 return err;
571
572         event->destroy = hw_perf_event_destroy;
573
574         event->hw.idx = -1;
575         event->hw.last_cpu = -1;
576         event->hw.last_tag = ~0ULL;
577
578         return x86_pmu.hw_config(event);
579 }
580
581 static void x86_pmu_disable_all(void)
582 {
583         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
584         int idx;
585
586         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
587                 u64 val;
588
589                 if (!test_bit(idx, cpuc->active_mask))
590                         continue;
591                 rdmsrl(x86_pmu.eventsel + idx, val);
592                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
593                         continue;
594                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
595                 wrmsrl(x86_pmu.eventsel + idx, val);
596         }
597 }
598
599 static void x86_pmu_disable(struct pmu *pmu)
600 {
601         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
602
603         if (!x86_pmu_initialized())
604                 return;
605
606         if (!cpuc->enabled)
607                 return;
608
609         cpuc->n_added = 0;
610         cpuc->enabled = 0;
611         barrier();
612
613         x86_pmu.disable_all();
614 }
615
616 static void x86_pmu_enable_all(int added)
617 {
618         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
619         int idx;
620
621         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
622                 struct perf_event *event = cpuc->events[idx];
623                 u64 val;
624
625                 if (!test_bit(idx, cpuc->active_mask))
626                         continue;
627
628                 val = event->hw.config;
629                 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
630                 wrmsrl(x86_pmu.eventsel + idx, val);
631         }
632 }
633
634 static struct pmu pmu;
635
636 static inline int is_x86_event(struct perf_event *event)
637 {
638         return event->pmu == &pmu;
639 }
640
641 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
642 {
643         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
644         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
645         int i, j, w, wmax, num = 0;
646         struct hw_perf_event *hwc;
647
648         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
649
650         for (i = 0; i < n; i++) {
651                 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
652                 constraints[i] = c;
653         }
654
655         /*
656          * fastpath, try to reuse previous register
657          */
658         for (i = 0; i < n; i++) {
659                 hwc = &cpuc->event_list[i]->hw;
660                 c = constraints[i];
661
662                 /* never assigned */
663                 if (hwc->idx == -1)
664                         break;
665
666                 /* constraint still honored */
667                 if (!test_bit(hwc->idx, c->idxmsk))
668                         break;
669
670                 /* not already used */
671                 if (test_bit(hwc->idx, used_mask))
672                         break;
673
674                 __set_bit(hwc->idx, used_mask);
675                 if (assign)
676                         assign[i] = hwc->idx;
677         }
678         if (i == n)
679                 goto done;
680
681         /*
682          * begin slow path
683          */
684
685         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
686
687         /*
688          * weight = number of possible counters
689          *
690          * 1    = most constrained, only works on one counter
691          * wmax = least constrained, works on any counter
692          *
693          * assign events to counters starting with most
694          * constrained events.
695          */
696         wmax = x86_pmu.num_counters;
697
698         /*
699          * when fixed event counters are present,
700          * wmax is incremented by 1 to account
701          * for one more choice
702          */
703         if (x86_pmu.num_counters_fixed)
704                 wmax++;
705
706         for (w = 1, num = n; num && w <= wmax; w++) {
707                 /* for each event */
708                 for (i = 0; num && i < n; i++) {
709                         c = constraints[i];
710                         hwc = &cpuc->event_list[i]->hw;
711
712                         if (c->weight != w)
713                                 continue;
714
715                         for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
716                                 if (!test_bit(j, used_mask))
717                                         break;
718                         }
719
720                         if (j == X86_PMC_IDX_MAX)
721                                 break;
722
723                         __set_bit(j, used_mask);
724
725                         if (assign)
726                                 assign[i] = j;
727                         num--;
728                 }
729         }
730 done:
731         /*
732          * scheduling failed or is just a simulation,
733          * free resources if necessary
734          */
735         if (!assign || num) {
736                 for (i = 0; i < n; i++) {
737                         if (x86_pmu.put_event_constraints)
738                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
739                 }
740         }
741         return num ? -ENOSPC : 0;
742 }
743
744 /*
745  * dogrp: true if must collect siblings events (group)
746  * returns total number of events and error code
747  */
748 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
749 {
750         struct perf_event *event;
751         int n, max_count;
752
753         max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
754
755         /* current number of events already accepted */
756         n = cpuc->n_events;
757
758         if (is_x86_event(leader)) {
759                 if (n >= max_count)
760                         return -ENOSPC;
761                 cpuc->event_list[n] = leader;
762                 n++;
763         }
764         if (!dogrp)
765                 return n;
766
767         list_for_each_entry(event, &leader->sibling_list, group_entry) {
768                 if (!is_x86_event(event) ||
769                     event->state <= PERF_EVENT_STATE_OFF)
770                         continue;
771
772                 if (n >= max_count)
773                         return -ENOSPC;
774
775                 cpuc->event_list[n] = event;
776                 n++;
777         }
778         return n;
779 }
780
781 static inline void x86_assign_hw_event(struct perf_event *event,
782                                 struct cpu_hw_events *cpuc, int i)
783 {
784         struct hw_perf_event *hwc = &event->hw;
785
786         hwc->idx = cpuc->assign[i];
787         hwc->last_cpu = smp_processor_id();
788         hwc->last_tag = ++cpuc->tags[i];
789
790         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
791                 hwc->config_base = 0;
792                 hwc->event_base = 0;
793         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
794                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
795                 /*
796                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
797                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
798                  */
799                 hwc->event_base =
800                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
801         } else {
802                 hwc->config_base = x86_pmu.eventsel;
803                 hwc->event_base  = x86_pmu.perfctr;
804         }
805 }
806
807 static inline int match_prev_assignment(struct hw_perf_event *hwc,
808                                         struct cpu_hw_events *cpuc,
809                                         int i)
810 {
811         return hwc->idx == cpuc->assign[i] &&
812                 hwc->last_cpu == smp_processor_id() &&
813                 hwc->last_tag == cpuc->tags[i];
814 }
815
816 static void x86_pmu_start(struct perf_event *event, int flags);
817 static void x86_pmu_stop(struct perf_event *event, int flags);
818
819 static void x86_pmu_enable(struct pmu *pmu)
820 {
821         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
822         struct perf_event *event;
823         struct hw_perf_event *hwc;
824         int i, added = cpuc->n_added;
825
826         if (!x86_pmu_initialized())
827                 return;
828
829         if (cpuc->enabled)
830                 return;
831
832         if (cpuc->n_added) {
833                 int n_running = cpuc->n_events - cpuc->n_added;
834                 /*
835                  * apply assignment obtained either from
836                  * hw_perf_group_sched_in() or x86_pmu_enable()
837                  *
838                  * step1: save events moving to new counters
839                  * step2: reprogram moved events into new counters
840                  */
841                 for (i = 0; i < n_running; i++) {
842                         event = cpuc->event_list[i];
843                         hwc = &event->hw;
844
845                         /*
846                          * we can avoid reprogramming counter if:
847                          * - assigned same counter as last time
848                          * - running on same CPU as last time
849                          * - no other event has used the counter since
850                          */
851                         if (hwc->idx == -1 ||
852                             match_prev_assignment(hwc, cpuc, i))
853                                 continue;
854
855                         /*
856                          * Ensure we don't accidentally enable a stopped
857                          * counter simply because we rescheduled.
858                          */
859                         if (hwc->state & PERF_HES_STOPPED)
860                                 hwc->state |= PERF_HES_ARCH;
861
862                         x86_pmu_stop(event, PERF_EF_UPDATE);
863                 }
864
865                 for (i = 0; i < cpuc->n_events; i++) {
866                         event = cpuc->event_list[i];
867                         hwc = &event->hw;
868
869                         if (!match_prev_assignment(hwc, cpuc, i))
870                                 x86_assign_hw_event(event, cpuc, i);
871                         else if (i < n_running)
872                                 continue;
873
874                         if (hwc->state & PERF_HES_ARCH)
875                                 continue;
876
877                         x86_pmu_start(event, PERF_EF_RELOAD);
878                 }
879                 cpuc->n_added = 0;
880                 perf_events_lapic_init();
881         }
882
883         cpuc->enabled = 1;
884         barrier();
885
886         x86_pmu.enable_all(added);
887 }
888
889 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
890                                           u64 enable_mask)
891 {
892         wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
893 }
894
895 static inline void x86_pmu_disable_event(struct perf_event *event)
896 {
897         struct hw_perf_event *hwc = &event->hw;
898
899         wrmsrl(hwc->config_base + hwc->idx, hwc->config);
900 }
901
902 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
903
904 /*
905  * Set the next IRQ period, based on the hwc->period_left value.
906  * To be called with the event disabled in hw:
907  */
908 static int
909 x86_perf_event_set_period(struct perf_event *event)
910 {
911         struct hw_perf_event *hwc = &event->hw;
912         s64 left = local64_read(&hwc->period_left);
913         s64 period = hwc->sample_period;
914         int ret = 0, idx = hwc->idx;
915
916         if (idx == X86_PMC_IDX_FIXED_BTS)
917                 return 0;
918
919         /*
920          * If we are way outside a reasonable range then just skip forward:
921          */
922         if (unlikely(left <= -period)) {
923                 left = period;
924                 local64_set(&hwc->period_left, left);
925                 hwc->last_period = period;
926                 ret = 1;
927         }
928
929         if (unlikely(left <= 0)) {
930                 left += period;
931                 local64_set(&hwc->period_left, left);
932                 hwc->last_period = period;
933                 ret = 1;
934         }
935         /*
936          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
937          */
938         if (unlikely(left < 2))
939                 left = 2;
940
941         if (left > x86_pmu.max_period)
942                 left = x86_pmu.max_period;
943
944         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
945
946         /*
947          * The hw event starts counting from this event offset,
948          * mark it to be able to extra future deltas:
949          */
950         local64_set(&hwc->prev_count, (u64)-left);
951
952         wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
953
954         /*
955          * Due to erratum on certan cpu we need
956          * a second write to be sure the register
957          * is updated properly
958          */
959         if (x86_pmu.perfctr_second_write) {
960                 wrmsrl(hwc->event_base + idx,
961                         (u64)(-left) & x86_pmu.cntval_mask);
962         }
963
964         perf_event_update_userpage(event);
965
966         return ret;
967 }
968
969 static void x86_pmu_enable_event(struct perf_event *event)
970 {
971         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
972         if (cpuc->enabled)
973                 __x86_pmu_enable_event(&event->hw,
974                                        ARCH_PERFMON_EVENTSEL_ENABLE);
975 }
976
977 /*
978  * Add a single event to the PMU.
979  *
980  * The event is added to the group of enabled events
981  * but only if it can be scehduled with existing events.
982  */
983 static int x86_pmu_add(struct perf_event *event, int flags)
984 {
985         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
986         struct hw_perf_event *hwc;
987         int assign[X86_PMC_IDX_MAX];
988         int n, n0, ret;
989
990         hwc = &event->hw;
991
992         perf_pmu_disable(event->pmu);
993         n0 = cpuc->n_events;
994         ret = n = collect_events(cpuc, event, false);
995         if (ret < 0)
996                 goto out;
997
998         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
999         if (!(flags & PERF_EF_START))
1000                 hwc->state |= PERF_HES_ARCH;
1001
1002         /*
1003          * If group events scheduling transaction was started,
1004          * skip the schedulability test here, it will be peformed
1005          * at commit time (->commit_txn) as a whole
1006          */
1007         if (cpuc->group_flag & PERF_EVENT_TXN)
1008                 goto done_collect;
1009
1010         ret = x86_pmu.schedule_events(cpuc, n, assign);
1011         if (ret)
1012                 goto out;
1013         /*
1014          * copy new assignment, now we know it is possible
1015          * will be used by hw_perf_enable()
1016          */
1017         memcpy(cpuc->assign, assign, n*sizeof(int));
1018
1019 done_collect:
1020         cpuc->n_events = n;
1021         cpuc->n_added += n - n0;
1022         cpuc->n_txn += n - n0;
1023
1024         ret = 0;
1025 out:
1026         perf_pmu_enable(event->pmu);
1027         return ret;
1028 }
1029
1030 static void x86_pmu_start(struct perf_event *event, int flags)
1031 {
1032         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1033         int idx = event->hw.idx;
1034
1035         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1036                 return;
1037
1038         if (WARN_ON_ONCE(idx == -1))
1039                 return;
1040
1041         if (flags & PERF_EF_RELOAD) {
1042                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1043                 x86_perf_event_set_period(event);
1044         }
1045
1046         event->hw.state = 0;
1047
1048         cpuc->events[idx] = event;
1049         __set_bit(idx, cpuc->active_mask);
1050         __set_bit(idx, cpuc->running);
1051         x86_pmu.enable(event);
1052         perf_event_update_userpage(event);
1053 }
1054
1055 void perf_event_print_debug(void)
1056 {
1057         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1058         u64 pebs;
1059         struct cpu_hw_events *cpuc;
1060         unsigned long flags;
1061         int cpu, idx;
1062
1063         if (!x86_pmu.num_counters)
1064                 return;
1065
1066         local_irq_save(flags);
1067
1068         cpu = smp_processor_id();
1069         cpuc = &per_cpu(cpu_hw_events, cpu);
1070
1071         if (x86_pmu.version >= 2) {
1072                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1073                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1074                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1075                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1076                 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1077
1078                 pr_info("\n");
1079                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1080                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1081                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1082                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1083                 pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1084         }
1085         pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1086
1087         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1088                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1089                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1090
1091                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1092
1093                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1094                         cpu, idx, pmc_ctrl);
1095                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1096                         cpu, idx, pmc_count);
1097                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1098                         cpu, idx, prev_left);
1099         }
1100         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1101                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1102
1103                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1104                         cpu, idx, pmc_count);
1105         }
1106         local_irq_restore(flags);
1107 }
1108
1109 static void x86_pmu_stop(struct perf_event *event, int flags)
1110 {
1111         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1112         struct hw_perf_event *hwc = &event->hw;
1113
1114         if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1115                 x86_pmu.disable(event);
1116                 cpuc->events[hwc->idx] = NULL;
1117                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1118                 hwc->state |= PERF_HES_STOPPED;
1119         }
1120
1121         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1122                 /*
1123                  * Drain the remaining delta count out of a event
1124                  * that we are disabling:
1125                  */
1126                 x86_perf_event_update(event);
1127                 hwc->state |= PERF_HES_UPTODATE;
1128         }
1129 }
1130
1131 static void x86_pmu_del(struct perf_event *event, int flags)
1132 {
1133         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1134         int i;
1135
1136         /*
1137          * If we're called during a txn, we don't need to do anything.
1138          * The events never got scheduled and ->cancel_txn will truncate
1139          * the event_list.
1140          */
1141         if (cpuc->group_flag & PERF_EVENT_TXN)
1142                 return;
1143
1144         x86_pmu_stop(event, PERF_EF_UPDATE);
1145
1146         for (i = 0; i < cpuc->n_events; i++) {
1147                 if (event == cpuc->event_list[i]) {
1148
1149                         if (x86_pmu.put_event_constraints)
1150                                 x86_pmu.put_event_constraints(cpuc, event);
1151
1152                         while (++i < cpuc->n_events)
1153                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1154
1155                         --cpuc->n_events;
1156                         break;
1157                 }
1158         }
1159         perf_event_update_userpage(event);
1160 }
1161
1162 static int x86_pmu_handle_irq(struct pt_regs *regs)
1163 {
1164         struct perf_sample_data data;
1165         struct cpu_hw_events *cpuc;
1166         struct perf_event *event;
1167         int idx, handled = 0;
1168         u64 val;
1169
1170         perf_sample_data_init(&data, 0);
1171
1172         cpuc = &__get_cpu_var(cpu_hw_events);
1173
1174         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1175                 if (!test_bit(idx, cpuc->active_mask)) {
1176                         /*
1177                          * Though we deactivated the counter some cpus
1178                          * might still deliver spurious interrupts still
1179                          * in flight. Catch them:
1180                          */
1181                         if (__test_and_clear_bit(idx, cpuc->running))
1182                                 handled++;
1183                         continue;
1184                 }
1185
1186                 event = cpuc->events[idx];
1187
1188                 val = x86_perf_event_update(event);
1189                 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1190                         continue;
1191
1192                 /*
1193                  * event overflow
1194                  */
1195                 handled++;
1196                 data.period     = event->hw.last_period;
1197
1198                 if (!x86_perf_event_set_period(event))
1199                         continue;
1200
1201                 if (perf_event_overflow(event, 1, &data, regs))
1202                         x86_pmu_stop(event, 0);
1203         }
1204
1205         if (handled)
1206                 inc_irq_stat(apic_perf_irqs);
1207
1208         return handled;
1209 }
1210
1211 void perf_events_lapic_init(void)
1212 {
1213         if (!x86_pmu.apic || !x86_pmu_initialized())
1214                 return;
1215
1216         /*
1217          * Always use NMI for PMU
1218          */
1219         apic_write(APIC_LVTPC, APIC_DM_NMI);
1220 }
1221
1222 struct pmu_nmi_state {
1223         unsigned int    marked;
1224         int             handled;
1225 };
1226
1227 static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1228
1229 static int __kprobes
1230 perf_event_nmi_handler(struct notifier_block *self,
1231                          unsigned long cmd, void *__args)
1232 {
1233         struct die_args *args = __args;
1234         unsigned int this_nmi;
1235         int handled;
1236
1237         if (!atomic_read(&active_events))
1238                 return NOTIFY_DONE;
1239
1240         switch (cmd) {
1241         case DIE_NMI:
1242         case DIE_NMI_IPI:
1243                 break;
1244         case DIE_NMIUNKNOWN:
1245                 this_nmi = percpu_read(irq_stat.__nmi_count);
1246                 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1247                         /* let the kernel handle the unknown nmi */
1248                         return NOTIFY_DONE;
1249                 /*
1250                  * This one is a PMU back-to-back nmi. Two events
1251                  * trigger 'simultaneously' raising two back-to-back
1252                  * NMIs. If the first NMI handles both, the latter
1253                  * will be empty and daze the CPU. So, we drop it to
1254                  * avoid false-positive 'unknown nmi' messages.
1255                  */
1256                 return NOTIFY_STOP;
1257         default:
1258                 return NOTIFY_DONE;
1259         }
1260
1261         apic_write(APIC_LVTPC, APIC_DM_NMI);
1262
1263         handled = x86_pmu.handle_irq(args->regs);
1264         if (!handled)
1265                 return NOTIFY_DONE;
1266
1267         this_nmi = percpu_read(irq_stat.__nmi_count);
1268         if ((handled > 1) ||
1269                 /* the next nmi could be a back-to-back nmi */
1270             ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1271              (__get_cpu_var(pmu_nmi).handled > 1))) {
1272                 /*
1273                  * We could have two subsequent back-to-back nmis: The
1274                  * first handles more than one counter, the 2nd
1275                  * handles only one counter and the 3rd handles no
1276                  * counter.
1277                  *
1278                  * This is the 2nd nmi because the previous was
1279                  * handling more than one counter. We will mark the
1280                  * next (3rd) and then drop it if unhandled.
1281                  */
1282                 __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
1283                 __get_cpu_var(pmu_nmi).handled  = handled;
1284         }
1285
1286         return NOTIFY_STOP;
1287 }
1288
1289 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1290         .notifier_call          = perf_event_nmi_handler,
1291         .next                   = NULL,
1292         .priority               = 1
1293 };
1294
1295 static struct event_constraint unconstrained;
1296 static struct event_constraint emptyconstraint;
1297
1298 static struct event_constraint *
1299 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1300 {
1301         struct event_constraint *c;
1302
1303         if (x86_pmu.event_constraints) {
1304                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1305                         if ((event->hw.config & c->cmask) == c->code)
1306                                 return c;
1307                 }
1308         }
1309
1310         return &unconstrained;
1311 }
1312
1313 #include "perf_event_amd.c"
1314 #include "perf_event_p6.c"
1315 #include "perf_event_p4.c"
1316 #include "perf_event_intel_lbr.c"
1317 #include "perf_event_intel_ds.c"
1318 #include "perf_event_intel.c"
1319
1320 static int __cpuinit
1321 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1322 {
1323         unsigned int cpu = (long)hcpu;
1324         int ret = NOTIFY_OK;
1325
1326         switch (action & ~CPU_TASKS_FROZEN) {
1327         case CPU_UP_PREPARE:
1328                 if (x86_pmu.cpu_prepare)
1329                         ret = x86_pmu.cpu_prepare(cpu);
1330                 break;
1331
1332         case CPU_STARTING:
1333                 if (x86_pmu.cpu_starting)
1334                         x86_pmu.cpu_starting(cpu);
1335                 break;
1336
1337         case CPU_DYING:
1338                 if (x86_pmu.cpu_dying)
1339                         x86_pmu.cpu_dying(cpu);
1340                 break;
1341
1342         case CPU_UP_CANCELED:
1343         case CPU_DEAD:
1344                 if (x86_pmu.cpu_dead)
1345                         x86_pmu.cpu_dead(cpu);
1346                 break;
1347
1348         default:
1349                 break;
1350         }
1351
1352         return ret;
1353 }
1354
1355 static void __init pmu_check_apic(void)
1356 {
1357         if (cpu_has_apic)
1358                 return;
1359
1360         x86_pmu.apic = 0;
1361         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1362         pr_info("no hardware sampling interrupt available.\n");
1363 }
1364
1365 void __init init_hw_perf_events(void)
1366 {
1367         struct event_constraint *c;
1368         int err;
1369
1370         pr_info("Performance Events: ");
1371
1372         switch (boot_cpu_data.x86_vendor) {
1373         case X86_VENDOR_INTEL:
1374                 err = intel_pmu_init();
1375                 break;
1376         case X86_VENDOR_AMD:
1377                 err = amd_pmu_init();
1378                 break;
1379         default:
1380                 return;
1381         }
1382         if (err != 0) {
1383                 pr_cont("no PMU driver, software events only.\n");
1384                 return;
1385         }
1386
1387         pmu_check_apic();
1388
1389         /* sanity check that the hardware exists or is emulated */
1390         if (!check_hw_exists()) {
1391                 pr_cont("Broken PMU hardware detected, software events only.\n");
1392                 return;
1393         }
1394
1395         pr_cont("%s PMU driver.\n", x86_pmu.name);
1396
1397         if (x86_pmu.quirks)
1398                 x86_pmu.quirks();
1399
1400         if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1401                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1402                      x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1403                 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1404         }
1405         x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1406
1407         if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1408                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1409                      x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1410                 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1411         }
1412
1413         x86_pmu.intel_ctrl |=
1414                 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1415
1416         perf_events_lapic_init();
1417         register_die_notifier(&perf_event_nmi_notifier);
1418
1419         unconstrained = (struct event_constraint)
1420                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1421                                    0, x86_pmu.num_counters);
1422
1423         if (x86_pmu.event_constraints) {
1424                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1425                         if (c->cmask != X86_RAW_EVENT_MASK)
1426                                 continue;
1427
1428                         c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1429                         c->weight += x86_pmu.num_counters;
1430                 }
1431         }
1432
1433         pr_info("... version:                %d\n",     x86_pmu.version);
1434         pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1435         pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1436         pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1437         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1438         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1439         pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1440
1441         perf_pmu_register(&pmu);
1442         perf_cpu_notifier(x86_pmu_notifier);
1443 }
1444
1445 static inline void x86_pmu_read(struct perf_event *event)
1446 {
1447         x86_perf_event_update(event);
1448 }
1449
1450 /*
1451  * Start group events scheduling transaction
1452  * Set the flag to make pmu::enable() not perform the
1453  * schedulability test, it will be performed at commit time
1454  */
1455 static void x86_pmu_start_txn(struct pmu *pmu)
1456 {
1457         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1458
1459         perf_pmu_disable(pmu);
1460         cpuc->group_flag |= PERF_EVENT_TXN;
1461         cpuc->n_txn = 0;
1462 }
1463
1464 /*
1465  * Stop group events scheduling transaction
1466  * Clear the flag and pmu::enable() will perform the
1467  * schedulability test.
1468  */
1469 static void x86_pmu_cancel_txn(struct pmu *pmu)
1470 {
1471         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1472
1473         cpuc->group_flag &= ~PERF_EVENT_TXN;
1474         /*
1475          * Truncate the collected events.
1476          */
1477         cpuc->n_added -= cpuc->n_txn;
1478         cpuc->n_events -= cpuc->n_txn;
1479         perf_pmu_enable(pmu);
1480 }
1481
1482 /*
1483  * Commit group events scheduling transaction
1484  * Perform the group schedulability test as a whole
1485  * Return 0 if success
1486  */
1487 static int x86_pmu_commit_txn(struct pmu *pmu)
1488 {
1489         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1490         int assign[X86_PMC_IDX_MAX];
1491         int n, ret;
1492
1493         n = cpuc->n_events;
1494
1495         if (!x86_pmu_initialized())
1496                 return -EAGAIN;
1497
1498         ret = x86_pmu.schedule_events(cpuc, n, assign);
1499         if (ret)
1500                 return ret;
1501
1502         /*
1503          * copy new assignment, now we know it is possible
1504          * will be used by hw_perf_enable()
1505          */
1506         memcpy(cpuc->assign, assign, n*sizeof(int));
1507
1508         cpuc->group_flag &= ~PERF_EVENT_TXN;
1509         perf_pmu_enable(pmu);
1510         return 0;
1511 }
1512
1513 /*
1514  * validate that we can schedule this event
1515  */
1516 static int validate_event(struct perf_event *event)
1517 {
1518         struct cpu_hw_events *fake_cpuc;
1519         struct event_constraint *c;
1520         int ret = 0;
1521
1522         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1523         if (!fake_cpuc)
1524                 return -ENOMEM;
1525
1526         c = x86_pmu.get_event_constraints(fake_cpuc, event);
1527
1528         if (!c || !c->weight)
1529                 ret = -ENOSPC;
1530
1531         if (x86_pmu.put_event_constraints)
1532                 x86_pmu.put_event_constraints(fake_cpuc, event);
1533
1534         kfree(fake_cpuc);
1535
1536         return ret;
1537 }
1538
1539 /*
1540  * validate a single event group
1541  *
1542  * validation include:
1543  *      - check events are compatible which each other
1544  *      - events do not compete for the same counter
1545  *      - number of events <= number of counters
1546  *
1547  * validation ensures the group can be loaded onto the
1548  * PMU if it was the only group available.
1549  */
1550 static int validate_group(struct perf_event *event)
1551 {
1552         struct perf_event *leader = event->group_leader;
1553         struct cpu_hw_events *fake_cpuc;
1554         int ret, n;
1555
1556         ret = -ENOMEM;
1557         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1558         if (!fake_cpuc)
1559                 goto out;
1560
1561         /*
1562          * the event is not yet connected with its
1563          * siblings therefore we must first collect
1564          * existing siblings, then add the new event
1565          * before we can simulate the scheduling
1566          */
1567         ret = -ENOSPC;
1568         n = collect_events(fake_cpuc, leader, true);
1569         if (n < 0)
1570                 goto out_free;
1571
1572         fake_cpuc->n_events = n;
1573         n = collect_events(fake_cpuc, event, false);
1574         if (n < 0)
1575                 goto out_free;
1576
1577         fake_cpuc->n_events = n;
1578
1579         ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1580
1581 out_free:
1582         kfree(fake_cpuc);
1583 out:
1584         return ret;
1585 }
1586
1587 int x86_pmu_event_init(struct perf_event *event)
1588 {
1589         struct pmu *tmp;
1590         int err;
1591
1592         switch (event->attr.type) {
1593         case PERF_TYPE_RAW:
1594         case PERF_TYPE_HARDWARE:
1595         case PERF_TYPE_HW_CACHE:
1596                 break;
1597
1598         default:
1599                 return -ENOENT;
1600         }
1601
1602         err = __x86_pmu_event_init(event);
1603         if (!err) {
1604                 /*
1605                  * we temporarily connect event to its pmu
1606                  * such that validate_group() can classify
1607                  * it as an x86 event using is_x86_event()
1608                  */
1609                 tmp = event->pmu;
1610                 event->pmu = &pmu;
1611
1612                 if (event->group_leader != event)
1613                         err = validate_group(event);
1614                 else
1615                         err = validate_event(event);
1616
1617                 event->pmu = tmp;
1618         }
1619         if (err) {
1620                 if (event->destroy)
1621                         event->destroy(event);
1622         }
1623
1624         return err;
1625 }
1626
1627 static struct pmu pmu = {
1628         .pmu_enable     = x86_pmu_enable,
1629         .pmu_disable    = x86_pmu_disable,
1630
1631         .event_init     = x86_pmu_event_init,
1632
1633         .add            = x86_pmu_add,
1634         .del            = x86_pmu_del,
1635         .start          = x86_pmu_start,
1636         .stop           = x86_pmu_stop,
1637         .read           = x86_pmu_read,
1638
1639         .start_txn      = x86_pmu_start_txn,
1640         .cancel_txn     = x86_pmu_cancel_txn,
1641         .commit_txn     = x86_pmu_commit_txn,
1642 };
1643
1644 /*
1645  * callchain support
1646  */
1647
1648 static void
1649 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1650 {
1651         /* Ignore warnings */
1652 }
1653
1654 static void backtrace_warning(void *data, char *msg)
1655 {
1656         /* Ignore warnings */
1657 }
1658
1659 static int backtrace_stack(void *data, char *name)
1660 {
1661         return 0;
1662 }
1663
1664 static void backtrace_address(void *data, unsigned long addr, int reliable)
1665 {
1666         struct perf_callchain_entry *entry = data;
1667
1668         perf_callchain_store(entry, addr);
1669 }
1670
1671 static const struct stacktrace_ops backtrace_ops = {
1672         .warning                = backtrace_warning,
1673         .warning_symbol         = backtrace_warning_symbol,
1674         .stack                  = backtrace_stack,
1675         .address                = backtrace_address,
1676         .walk_stack             = print_context_stack_bp,
1677 };
1678
1679 void
1680 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1681 {
1682         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1683                 /* TODO: We don't support guest os callchain now */
1684                 return;
1685         }
1686
1687         perf_callchain_store(entry, regs->ip);
1688
1689         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
1690 }
1691
1692 #ifdef CONFIG_COMPAT
1693 static inline int
1694 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1695 {
1696         /* 32-bit process in 64-bit kernel. */
1697         struct stack_frame_ia32 frame;
1698         const void __user *fp;
1699
1700         if (!test_thread_flag(TIF_IA32))
1701                 return 0;
1702
1703         fp = compat_ptr(regs->bp);
1704         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1705                 unsigned long bytes;
1706                 frame.next_frame     = 0;
1707                 frame.return_address = 0;
1708
1709                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1710                 if (bytes != sizeof(frame))
1711                         break;
1712
1713                 if (fp < compat_ptr(regs->sp))
1714                         break;
1715
1716                 perf_callchain_store(entry, frame.return_address);
1717                 fp = compat_ptr(frame.next_frame);
1718         }
1719         return 1;
1720 }
1721 #else
1722 static inline int
1723 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1724 {
1725     return 0;
1726 }
1727 #endif
1728
1729 void
1730 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1731 {
1732         struct stack_frame frame;
1733         const void __user *fp;
1734
1735         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1736                 /* TODO: We don't support guest os callchain now */
1737                 return;
1738         }
1739
1740         fp = (void __user *)regs->bp;
1741
1742         perf_callchain_store(entry, regs->ip);
1743
1744         if (perf_callchain_user32(regs, entry))
1745                 return;
1746
1747         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1748                 unsigned long bytes;
1749                 frame.next_frame             = NULL;
1750                 frame.return_address = 0;
1751
1752                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1753                 if (bytes != sizeof(frame))
1754                         break;
1755
1756                 if ((unsigned long)fp < regs->sp)
1757                         break;
1758
1759                 perf_callchain_store(entry, frame.return_address);
1760                 fp = frame.next_frame;
1761         }
1762 }
1763
1764 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1765 {
1766         unsigned long ip;
1767
1768         if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1769                 ip = perf_guest_cbs->get_guest_ip();
1770         else
1771                 ip = instruction_pointer(regs);
1772
1773         return ip;
1774 }
1775
1776 unsigned long perf_misc_flags(struct pt_regs *regs)
1777 {
1778         int misc = 0;
1779
1780         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1781                 if (perf_guest_cbs->is_user_mode())
1782                         misc |= PERF_RECORD_MISC_GUEST_USER;
1783                 else
1784                         misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1785         } else {
1786                 if (user_mode(regs))
1787                         misc |= PERF_RECORD_MISC_USER;
1788                 else
1789                         misc |= PERF_RECORD_MISC_KERNEL;
1790         }
1791
1792         if (regs->flags & PERF_EFLAGS_EXACT)
1793                 misc |= PERF_RECORD_MISC_EXACT_IP;
1794
1795         return misc;
1796 }