KPTI: Rename to PAGE_TABLE_ISOLATION
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_ds.c
1 #include <linux/bitops.h>
2 #include <linux/types.h>
3 #include <linux/slab.h>
4
5 #include <asm/kaiser.h>
6 #include <asm/perf_event.h>
7
8 #include "perf_event.h"
9
10 static
11 DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store);
12
13 /* The size of a BTS record in bytes: */
14 #define BTS_RECORD_SIZE         24
15
16 #define BTS_BUFFER_SIZE         (PAGE_SIZE << 4)
17 #define PEBS_BUFFER_SIZE        PAGE_SIZE
18
19 /*
20  * pebs_record_32 for p4 and core not supported
21
22 struct pebs_record_32 {
23         u32 flags, ip;
24         u32 ax, bc, cx, dx;
25         u32 si, di, bp, sp;
26 };
27
28  */
29
30 struct pebs_record_core {
31         u64 flags, ip;
32         u64 ax, bx, cx, dx;
33         u64 si, di, bp, sp;
34         u64 r8,  r9,  r10, r11;
35         u64 r12, r13, r14, r15;
36 };
37
38 struct pebs_record_nhm {
39         u64 flags, ip;
40         u64 ax, bx, cx, dx;
41         u64 si, di, bp, sp;
42         u64 r8,  r9,  r10, r11;
43         u64 r12, r13, r14, r15;
44         u64 status, dla, dse, lat;
45 };
46
47 void init_debug_store_on_cpu(int cpu)
48 {
49         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
50
51         if (!ds)
52                 return;
53
54         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
55                      (u32)((u64)(unsigned long)ds),
56                      (u32)((u64)(unsigned long)ds >> 32));
57 }
58
59 void fini_debug_store_on_cpu(int cpu)
60 {
61         if (!per_cpu(cpu_hw_events, cpu).ds)
62                 return;
63
64         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
65 }
66
67 static void *dsalloc(size_t size, gfp_t flags, int node)
68 {
69 #ifdef CONFIG_PAGE_TABLE_ISOLATION
70         unsigned int order = get_order(size);
71         struct page *page;
72         unsigned long addr;
73
74         page = alloc_pages_node(node, flags | __GFP_ZERO, order);
75         if (!page)
76                 return NULL;
77         addr = (unsigned long)page_address(page);
78         if (kaiser_add_mapping(addr, size, __PAGE_KERNEL) < 0) {
79                 __free_pages(page, order);
80                 addr = 0;
81         }
82         return (void *)addr;
83 #else
84         return kmalloc_node(size, flags | __GFP_ZERO, node);
85 #endif
86 }
87
88 static void dsfree(const void *buffer, size_t size)
89 {
90 #ifdef CONFIG_PAGE_TABLE_ISOLATION
91         if (!buffer)
92                 return;
93         kaiser_remove_mapping((unsigned long)buffer, size);
94         free_pages((unsigned long)buffer, get_order(size));
95 #else
96         kfree(buffer);
97 #endif
98 }
99
100 static int alloc_pebs_buffer(int cpu)
101 {
102         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
103         int node = cpu_to_node(cpu);
104         int max, thresh = 1; /* always use a single PEBS record */
105         void *buffer;
106
107         if (!x86_pmu.pebs)
108                 return 0;
109
110         buffer = dsalloc(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
111         if (unlikely(!buffer))
112                 return -ENOMEM;
113
114         max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
115
116         ds->pebs_buffer_base = (u64)(unsigned long)buffer;
117         ds->pebs_index = ds->pebs_buffer_base;
118         ds->pebs_absolute_maximum = ds->pebs_buffer_base +
119                 max * x86_pmu.pebs_record_size;
120
121         ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
122                 thresh * x86_pmu.pebs_record_size;
123
124         return 0;
125 }
126
127 static void release_pebs_buffer(int cpu)
128 {
129         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
130
131         if (!ds || !x86_pmu.pebs)
132                 return;
133
134         dsfree((void *)(unsigned long)ds->pebs_buffer_base, PEBS_BUFFER_SIZE);
135         ds->pebs_buffer_base = 0;
136 }
137
138 static int alloc_bts_buffer(int cpu)
139 {
140         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
141         int node = cpu_to_node(cpu);
142         int max, thresh;
143         void *buffer;
144
145         if (!x86_pmu.bts)
146                 return 0;
147
148         buffer = dsalloc(BTS_BUFFER_SIZE, GFP_KERNEL, node);
149         if (unlikely(!buffer))
150                 return -ENOMEM;
151
152         max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
153         thresh = max / 16;
154
155         ds->bts_buffer_base = (u64)(unsigned long)buffer;
156         ds->bts_index = ds->bts_buffer_base;
157         ds->bts_absolute_maximum = ds->bts_buffer_base +
158                 max * BTS_RECORD_SIZE;
159         ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
160                 thresh * BTS_RECORD_SIZE;
161
162         return 0;
163 }
164
165 static void release_bts_buffer(int cpu)
166 {
167         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
168
169         if (!ds || !x86_pmu.bts)
170                 return;
171
172         dsfree((void *)(unsigned long)ds->bts_buffer_base, BTS_BUFFER_SIZE);
173         ds->bts_buffer_base = 0;
174 }
175
176 static int alloc_ds_buffer(int cpu)
177 {
178         struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
179
180         memset(ds, 0, sizeof(*ds));
181         per_cpu(cpu_hw_events, cpu).ds = ds;
182
183         return 0;
184 }
185
186 static void release_ds_buffer(int cpu)
187 {
188         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
189
190         if (!ds)
191                 return;
192
193         per_cpu(cpu_hw_events, cpu).ds = NULL;
194 }
195
196 void release_ds_buffers(void)
197 {
198         int cpu;
199
200         if (!x86_pmu.bts && !x86_pmu.pebs)
201                 return;
202
203         get_online_cpus();
204         for_each_online_cpu(cpu)
205                 fini_debug_store_on_cpu(cpu);
206
207         for_each_possible_cpu(cpu) {
208                 release_pebs_buffer(cpu);
209                 release_bts_buffer(cpu);
210                 release_ds_buffer(cpu);
211         }
212         put_online_cpus();
213 }
214
215 void reserve_ds_buffers(void)
216 {
217         int bts_err = 0, pebs_err = 0;
218         int cpu;
219
220         x86_pmu.bts_active = 0;
221         x86_pmu.pebs_active = 0;
222
223         if (!x86_pmu.bts && !x86_pmu.pebs)
224                 return;
225
226         if (!x86_pmu.bts)
227                 bts_err = 1;
228
229         if (!x86_pmu.pebs)
230                 pebs_err = 1;
231
232         get_online_cpus();
233
234         for_each_possible_cpu(cpu) {
235                 if (alloc_ds_buffer(cpu)) {
236                         bts_err = 1;
237                         pebs_err = 1;
238                 }
239
240                 if (!bts_err && alloc_bts_buffer(cpu))
241                         bts_err = 1;
242
243                 if (!pebs_err && alloc_pebs_buffer(cpu))
244                         pebs_err = 1;
245
246                 if (bts_err && pebs_err)
247                         break;
248         }
249
250         if (bts_err) {
251                 for_each_possible_cpu(cpu)
252                         release_bts_buffer(cpu);
253         }
254
255         if (pebs_err) {
256                 for_each_possible_cpu(cpu)
257                         release_pebs_buffer(cpu);
258         }
259
260         if (bts_err && pebs_err) {
261                 for_each_possible_cpu(cpu)
262                         release_ds_buffer(cpu);
263         } else {
264                 if (x86_pmu.bts && !bts_err)
265                         x86_pmu.bts_active = 1;
266
267                 if (x86_pmu.pebs && !pebs_err)
268                         x86_pmu.pebs_active = 1;
269
270                 for_each_online_cpu(cpu)
271                         init_debug_store_on_cpu(cpu);
272         }
273
274         put_online_cpus();
275 }
276
277 /*
278  * BTS
279  */
280
281 struct event_constraint bts_constraint =
282         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
283
284 void intel_pmu_enable_bts(u64 config)
285 {
286         unsigned long debugctlmsr;
287
288         debugctlmsr = get_debugctlmsr();
289
290         debugctlmsr |= DEBUGCTLMSR_TR;
291         debugctlmsr |= DEBUGCTLMSR_BTS;
292         debugctlmsr |= DEBUGCTLMSR_BTINT;
293
294         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
295                 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
296
297         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
298                 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
299
300         update_debugctlmsr(debugctlmsr);
301 }
302
303 void intel_pmu_disable_bts(void)
304 {
305         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
306         unsigned long debugctlmsr;
307
308         if (!cpuc->ds)
309                 return;
310
311         debugctlmsr = get_debugctlmsr();
312
313         debugctlmsr &=
314                 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
315                   DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
316
317         update_debugctlmsr(debugctlmsr);
318 }
319
320 int intel_pmu_drain_bts_buffer(void)
321 {
322         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
323         struct debug_store *ds = cpuc->ds;
324         struct bts_record {
325                 u64     from;
326                 u64     to;
327                 u64     flags;
328         };
329         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
330         struct bts_record *at, *top;
331         struct perf_output_handle handle;
332         struct perf_event_header header;
333         struct perf_sample_data data;
334         struct pt_regs regs;
335
336         if (!event)
337                 return 0;
338
339         if (!x86_pmu.bts_active)
340                 return 0;
341
342         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
343         top = (struct bts_record *)(unsigned long)ds->bts_index;
344
345         if (top <= at)
346                 return 0;
347
348         ds->bts_index = ds->bts_buffer_base;
349
350         perf_sample_data_init(&data, 0);
351         data.period = event->hw.last_period;
352         regs.ip     = 0;
353
354         /*
355          * Prepare a generic sample, i.e. fill in the invariant fields.
356          * We will overwrite the from and to address before we output
357          * the sample.
358          */
359         perf_prepare_sample(&header, &data, event, &regs);
360
361         if (perf_output_begin(&handle, event, header.size * (top - at)))
362                 return 1;
363
364         for (; at < top; at++) {
365                 data.ip         = at->from;
366                 data.addr       = at->to;
367
368                 perf_output_sample(&handle, &header, &data, event);
369         }
370
371         perf_output_end(&handle);
372
373         /* There's new data available. */
374         event->hw.interrupts++;
375         event->pending_kill = POLL_IN;
376         return 1;
377 }
378
379 /*
380  * PEBS
381  */
382 struct event_constraint intel_core2_pebs_event_constraints[] = {
383         INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
384         INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
385         INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
386         INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
387         INTEL_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
388         EVENT_CONSTRAINT_END
389 };
390
391 struct event_constraint intel_atom_pebs_event_constraints[] = {
392         INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
393         INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
394         INTEL_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
395         EVENT_CONSTRAINT_END
396 };
397
398 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
399         INTEL_EVENT_CONSTRAINT(0x0b, 0xf),    /* MEM_INST_RETIRED.* */
400         INTEL_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
401         INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
402         INTEL_EVENT_CONSTRAINT(0xc0, 0xf),    /* INST_RETIRED.ANY */
403         INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
404         INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
405         INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
406         INTEL_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
407         INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
408         INTEL_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
409         INTEL_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
410         EVENT_CONSTRAINT_END
411 };
412
413 struct event_constraint intel_westmere_pebs_event_constraints[] = {
414         INTEL_EVENT_CONSTRAINT(0x0b, 0xf),    /* MEM_INST_RETIRED.* */
415         INTEL_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
416         INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
417         INTEL_EVENT_CONSTRAINT(0xc0, 0xf),    /* INSTR_RETIRED.* */
418         INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
419         INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
420         INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
421         INTEL_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
422         INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
423         INTEL_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
424         INTEL_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
425         EVENT_CONSTRAINT_END
426 };
427
428 struct event_constraint intel_snb_pebs_event_constraints[] = {
429         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
430         INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
431         INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
432         INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
433         INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
434         INTEL_EVENT_CONSTRAINT(0xcd, 0x8),    /* MEM_TRANS_RETIRED.* */
435         INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
436         INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
437         INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
438         INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
439         INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
440         INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
441         INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
442         INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
443         INTEL_EVENT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
444         INTEL_EVENT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
445         INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
446         EVENT_CONSTRAINT_END
447 };
448
449 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
450 {
451         struct event_constraint *c;
452
453         if (!event->attr.precise_ip)
454                 return NULL;
455
456         if (x86_pmu.pebs_constraints) {
457                 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
458                         if ((event->hw.config & c->cmask) == c->code)
459                                 return c;
460                 }
461         }
462
463         return &emptyconstraint;
464 }
465
466 void intel_pmu_pebs_enable(struct perf_event *event)
467 {
468         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
469         struct hw_perf_event *hwc = &event->hw;
470
471         hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
472
473         cpuc->pebs_enabled |= 1ULL << hwc->idx;
474         WARN_ON_ONCE(cpuc->enabled);
475
476         if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
477                 intel_pmu_lbr_enable(event);
478 }
479
480 void intel_pmu_pebs_disable(struct perf_event *event)
481 {
482         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
483         struct hw_perf_event *hwc = &event->hw;
484
485         cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
486         if (cpuc->enabled)
487                 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
488
489         hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
490
491         if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
492                 intel_pmu_lbr_disable(event);
493 }
494
495 void intel_pmu_pebs_enable_all(void)
496 {
497         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
498
499         if (cpuc->pebs_enabled)
500                 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
501 }
502
503 void intel_pmu_pebs_disable_all(void)
504 {
505         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
506
507         if (cpuc->pebs_enabled)
508                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
509 }
510
511 #include <asm/insn.h>
512
513 static inline bool kernel_ip(unsigned long ip)
514 {
515 #ifdef CONFIG_X86_32
516         return ip > PAGE_OFFSET;
517 #else
518         return (long)ip < 0;
519 #endif
520 }
521
522 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
523 {
524         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
525         unsigned long from = cpuc->lbr_entries[0].from;
526         unsigned long old_to, to = cpuc->lbr_entries[0].to;
527         unsigned long ip = regs->ip;
528         int is_64bit = 0;
529
530         /*
531          * We don't need to fixup if the PEBS assist is fault like
532          */
533         if (!x86_pmu.intel_cap.pebs_trap)
534                 return 1;
535
536         /*
537          * No LBR entry, no basic block, no rewinding
538          */
539         if (!cpuc->lbr_stack.nr || !from || !to)
540                 return 0;
541
542         /*
543          * Basic blocks should never cross user/kernel boundaries
544          */
545         if (kernel_ip(ip) != kernel_ip(to))
546                 return 0;
547
548         /*
549          * unsigned math, either ip is before the start (impossible) or
550          * the basic block is larger than 1 page (sanity)
551          */
552         if ((ip - to) > PAGE_SIZE)
553                 return 0;
554
555         /*
556          * We sampled a branch insn, rewind using the LBR stack
557          */
558         if (ip == to) {
559                 regs->ip = from;
560                 return 1;
561         }
562
563         do {
564                 struct insn insn;
565                 u8 buf[MAX_INSN_SIZE];
566                 void *kaddr;
567
568                 old_to = to;
569                 if (!kernel_ip(ip)) {
570                         int bytes, size = MAX_INSN_SIZE;
571
572                         bytes = copy_from_user_nmi(buf, (void __user *)to, size);
573                         if (bytes != size)
574                                 return 0;
575
576                         kaddr = buf;
577                 } else
578                         kaddr = (void *)to;
579
580 #ifdef CONFIG_X86_64
581                 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
582 #endif
583                 insn_init(&insn, kaddr, is_64bit);
584                 insn_get_length(&insn);
585                 to += insn.length;
586         } while (to < ip);
587
588         if (to == ip) {
589                 regs->ip = old_to;
590                 return 1;
591         }
592
593         /*
594          * Even though we decoded the basic block, the instruction stream
595          * never matched the given IP, either the TO or the IP got corrupted.
596          */
597         return 0;
598 }
599
600 static void __intel_pmu_pebs_event(struct perf_event *event,
601                                    struct pt_regs *iregs, void *__pebs)
602 {
603         /*
604          * We cast to pebs_record_core since that is a subset of
605          * both formats and we don't use the other fields in this
606          * routine.
607          */
608         struct pebs_record_core *pebs = __pebs;
609         struct perf_sample_data data;
610         struct pt_regs regs;
611
612         if (!intel_pmu_save_and_restart(event))
613                 return;
614
615         perf_sample_data_init(&data, 0);
616         data.period = event->hw.last_period;
617
618         /*
619          * We use the interrupt regs as a base because the PEBS record
620          * does not contain a full regs set, specifically it seems to
621          * lack segment descriptors, which get used by things like
622          * user_mode().
623          *
624          * In the simple case fix up only the IP and BP,SP regs, for
625          * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
626          * A possible PERF_SAMPLE_REGS will have to transfer all regs.
627          */
628         regs = *iregs;
629         regs.ip = pebs->ip;
630         regs.bp = pebs->bp;
631         regs.sp = pebs->sp;
632
633         if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
634                 regs.flags |= PERF_EFLAGS_EXACT;
635         else
636                 regs.flags &= ~PERF_EFLAGS_EXACT;
637
638         if (perf_event_overflow(event, &data, &regs))
639                 x86_pmu_stop(event, 0);
640 }
641
642 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
643 {
644         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
645         struct debug_store *ds = cpuc->ds;
646         struct perf_event *event = cpuc->events[0]; /* PMC0 only */
647         struct pebs_record_core *at, *top;
648         int n;
649
650         if (!x86_pmu.pebs_active)
651                 return;
652
653         at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
654         top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
655
656         /*
657          * Whatever else happens, drain the thing
658          */
659         ds->pebs_index = ds->pebs_buffer_base;
660
661         if (!test_bit(0, cpuc->active_mask))
662                 return;
663
664         WARN_ON_ONCE(!event);
665
666         if (!event->attr.precise_ip)
667                 return;
668
669         n = top - at;
670         if (n <= 0)
671                 return;
672
673         /*
674          * Should not happen, we program the threshold at 1 and do not
675          * set a reset value.
676          */
677         WARN_ON_ONCE(n > 1);
678         at += n - 1;
679
680         __intel_pmu_pebs_event(event, iregs, at);
681 }
682
683 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
684 {
685         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
686         struct debug_store *ds = cpuc->ds;
687         struct pebs_record_nhm *at, *top;
688         struct perf_event *event = NULL;
689         u64 status = 0;
690         int bit, n;
691
692         if (!x86_pmu.pebs_active)
693                 return;
694
695         at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
696         top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
697
698         ds->pebs_index = ds->pebs_buffer_base;
699
700         n = top - at;
701         if (n <= 0)
702                 return;
703
704         /*
705          * Should not happen, we program the threshold at 1 and do not
706          * set a reset value.
707          */
708         WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
709
710         for ( ; at < top; at++) {
711                 for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
712                         event = cpuc->events[bit];
713                         if (!test_bit(bit, cpuc->active_mask))
714                                 continue;
715
716                         WARN_ON_ONCE(!event);
717
718                         if (!event->attr.precise_ip)
719                                 continue;
720
721                         if (__test_and_set_bit(bit, (unsigned long *)&status))
722                                 continue;
723
724                         break;
725                 }
726
727                 if (!event || bit >= MAX_PEBS_EVENTS)
728                         continue;
729
730                 __intel_pmu_pebs_event(event, iregs, at);
731         }
732 }
733
734 /*
735  * BTS, PEBS probe and setup
736  */
737
738 void intel_ds_init(void)
739 {
740         /*
741          * No support for 32bit formats
742          */
743         if (!boot_cpu_has(X86_FEATURE_DTES64))
744                 return;
745
746         x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
747         x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
748         if (x86_pmu.pebs) {
749                 char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
750                 int format = x86_pmu.intel_cap.pebs_format;
751
752                 switch (format) {
753                 case 0:
754                         printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
755                         x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
756                         x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
757                         break;
758
759                 case 1:
760                         printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
761                         x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
762                         x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
763                         break;
764
765                 default:
766                         printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
767                         x86_pmu.pebs = 0;
768                 }
769         }
770 }
771
772 void perf_restore_debug_store(void)
773 {
774         struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
775
776         if (!x86_pmu.bts && !x86_pmu.pebs)
777                 return;
778
779         wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
780 }