2 * Intel(R) Processor Trace PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * Intel PT is specified in the Intel Architecture Instruction Set Extensions
15 * Programming Reference:
16 * http://software.intel.com/en-us/intel-isa-extensions
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/device.h>
27 #include <asm/perf_event.h>
31 #include "perf_event.h"
34 static DEFINE_PER_CPU(struct pt, pt_ctx);
36 static struct pt_pmu pt_pmu;
46 * Capabilities of Intel PT hardware, such as number of address bits or
47 * supported output schemes, are cached and exported to userspace as "caps"
48 * attribute group of pt pmu device
49 * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
50 * relevant bits together with intel_pt traces.
52 * These are necessary for both trace decoding (payloads_lip, contains address
53 * width encoded in IP-related packets), and event configuration (bitmasks with
54 * permitted values for certain bit fields).
56 #define PT_CAP(_n, _l, _r, _m) \
57 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
58 .reg = _r, .mask = _m }
60 static struct pt_cap_desc {
66 PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
67 PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
68 PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
69 PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
70 PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
73 static u32 pt_cap_get(enum pt_capabilities cap)
75 struct pt_cap_desc *cd = &pt_caps[cap];
76 u32 c = pt_pmu.caps[cd->leaf * 4 + cd->reg];
77 unsigned int shift = __ffs(cd->mask);
79 return (c & cd->mask) >> shift;
82 static ssize_t pt_cap_show(struct device *cdev,
83 struct device_attribute *attr,
86 struct dev_ext_attribute *ea =
87 container_of(attr, struct dev_ext_attribute, attr);
88 enum pt_capabilities cap = (long)ea->var;
90 return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
93 static struct attribute_group pt_cap_group = {
97 PMU_FORMAT_ATTR(tsc, "config:10" );
98 PMU_FORMAT_ATTR(noretcomp, "config:11" );
100 static struct attribute *pt_formats_attr[] = {
101 &format_attr_tsc.attr,
102 &format_attr_noretcomp.attr,
106 static struct attribute_group pt_format_group = {
108 .attrs = pt_formats_attr,
111 static const struct attribute_group *pt_attr_groups[] = {
117 static int __init pt_pmu_hw_init(void)
119 struct dev_ext_attribute *de_attrs;
120 struct attribute **attrs;
124 if (test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT)) {
125 for (i = 0; i < PT_CPUID_LEAVES; i++)
127 &pt_pmu.caps[CR_EAX + i * 4],
128 &pt_pmu.caps[CR_EBX + i * 4],
129 &pt_pmu.caps[CR_ECX + i * 4],
130 &pt_pmu.caps[CR_EDX + i * 4]);
135 size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps) + 1);
136 attrs = kzalloc(size, GFP_KERNEL);
140 size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps) + 1);
141 de_attrs = kzalloc(size, GFP_KERNEL);
145 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
146 de_attrs[i].attr.attr.name = pt_caps[i].name;
148 sysfs_attr_init(&de_attrs[i].attr.attr);
149 de_attrs[i].attr.attr.mode = S_IRUGO;
150 de_attrs[i].attr.show = pt_cap_show;
151 de_attrs[i].var = (void *)i;
152 attrs[i] = &de_attrs[i].attr.attr;
155 pt_cap_group.attrs = attrs;
166 #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC)
168 static bool pt_event_valid(struct perf_event *event)
170 u64 config = event->attr.config;
172 if ((config & PT_CONFIG_MASK) != config)
179 * PT configuration helpers
180 * These all are cpu affine and operate on a local PT
183 static bool pt_is_running(void)
187 rdmsrl(MSR_IA32_RTIT_CTL, ctl);
189 return !!(ctl & RTIT_CTL_TRACEEN);
192 static void pt_config(struct perf_event *event)
196 reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
198 if (!event->attr.exclude_kernel)
200 if (!event->attr.exclude_user)
203 reg |= (event->attr.config & PT_CONFIG_MASK);
205 wrmsrl(MSR_IA32_RTIT_CTL, reg);
208 static void pt_config_start(bool start)
212 rdmsrl(MSR_IA32_RTIT_CTL, ctl);
214 ctl |= RTIT_CTL_TRACEEN;
216 ctl &= ~RTIT_CTL_TRACEEN;
217 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
220 * A wrmsr that disables trace generation serializes other PT
221 * registers and causes all data packets to be written to memory,
222 * but a fence is required for the data to become globally visible.
224 * The below WMB, separating data store and aux_head store matches
225 * the consumer's RMB that separates aux_head load and data load.
231 static void pt_config_buffer(void *buf, unsigned int topa_idx,
232 unsigned int output_off)
236 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
238 reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
240 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
244 * Keep ToPA table-related metadata on the same page as the actual table,
245 * taking up a few words from the top
248 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
251 * struct topa - page-sized ToPA table with metadata at the top
252 * @table: actual ToPA table entries, as understood by PT hardware
253 * @list: linkage to struct pt_buffer's list of tables
254 * @phys: physical address of this page
255 * @offset: offset of the first entry in this table in the buffer
256 * @size: total size of all entries in this table
257 * @last: index of the last initialized entry in this table
260 struct topa_entry table[TENTS_PER_PAGE];
261 struct list_head list;
268 /* make -1 stand for the last table entry */
269 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
272 * topa_alloc() - allocate page-sized ToPA table
273 * @cpu: CPU on which to allocate.
274 * @gfp: Allocation flags.
276 * Return: On success, return the pointer to ToPA table page.
278 static struct topa *topa_alloc(int cpu, gfp_t gfp)
280 int node = cpu_to_node(cpu);
284 p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
288 topa = page_address(p);
290 topa->phys = page_to_phys(p);
293 * In case of singe-entry ToPA, always put the self-referencing END
294 * link as the 2nd entry in the table
296 if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
297 TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
298 TOPA_ENTRY(topa, 1)->end = 1;
305 * topa_free() - free a page-sized ToPA table
306 * @topa: Table to deallocate.
308 static void topa_free(struct topa *topa)
310 free_page((unsigned long)topa);
314 * topa_insert_table() - insert a ToPA table into a buffer
315 * @buf: PT buffer that's being extended.
316 * @topa: New topa table to be inserted.
318 * If it's the first table in this buffer, set up buffer's pointers
319 * accordingly; otherwise, add a END=1 link entry to @topa to the current
320 * "last" table and adjust the last table pointer to @topa.
322 static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
324 struct topa *last = buf->last;
326 list_add_tail(&topa->list, &buf->tables);
329 buf->first = buf->last = buf->cur = topa;
333 topa->offset = last->offset + last->size;
336 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
339 BUG_ON(last->last != TENTS_PER_PAGE - 1);
341 TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
342 TOPA_ENTRY(last, -1)->end = 1;
346 * topa_table_full() - check if a ToPA table is filled up
349 static bool topa_table_full(struct topa *topa)
351 /* single-entry ToPA is a special case */
352 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
355 return topa->last == TENTS_PER_PAGE - 1;
359 * topa_insert_pages() - create a list of ToPA tables
360 * @buf: PT buffer being initialized.
361 * @gfp: Allocation flags.
363 * This initializes a list of ToPA tables with entries from
364 * the data_pages provided by rb_alloc_aux().
366 * Return: 0 on success or error code.
368 static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
370 struct topa *topa = buf->last;
374 p = virt_to_page(buf->data_pages[buf->nr_pages]);
376 order = page_private(p);
378 if (topa_table_full(topa)) {
379 topa = topa_alloc(buf->cpu, gfp);
383 topa_insert_table(buf, topa);
386 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
387 TOPA_ENTRY(topa, -1)->size = order;
388 if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
389 TOPA_ENTRY(topa, -1)->intr = 1;
390 TOPA_ENTRY(topa, -1)->stop = 1;
394 topa->size += sizes(order);
396 buf->nr_pages += 1ul << order;
402 * pt_topa_dump() - print ToPA tables and their entries
405 static void pt_topa_dump(struct pt_buffer *buf)
409 list_for_each_entry(topa, &buf->tables, list) {
412 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
413 topa->phys, topa->offset, topa->size);
414 for (i = 0; i < TENTS_PER_PAGE; i++) {
415 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
417 (unsigned long)topa->table[i].base << TOPA_SHIFT,
418 sizes(topa->table[i].size),
419 topa->table[i].end ? 'E' : ' ',
420 topa->table[i].intr ? 'I' : ' ',
421 topa->table[i].stop ? 'S' : ' ',
422 *(u64 *)&topa->table[i]);
423 if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
424 topa->table[i].stop) ||
432 * pt_buffer_advance() - advance to the next output region
435 * Advance the current pointers in the buffer to the next ToPA entry.
437 static void pt_buffer_advance(struct pt_buffer *buf)
442 if (buf->cur_idx == buf->cur->last) {
443 if (buf->cur == buf->last)
444 buf->cur = buf->first;
446 buf->cur = list_entry(buf->cur->list.next, struct topa,
453 * pt_update_head() - calculate current offsets and sizes
454 * @pt: Per-cpu pt context.
456 * Update buffer's current write pointer position and data size.
458 static void pt_update_head(struct pt *pt)
460 struct pt_buffer *buf = perf_get_aux(&pt->handle);
461 u64 topa_idx, base, old;
463 /* offset of the first region in this table from the beginning of buf */
464 base = buf->cur->offset + buf->output_off;
466 /* offset of the current output region within this table */
467 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
468 base += sizes(buf->cur->table[topa_idx].size);
471 local_set(&buf->data_size, base);
473 old = (local64_xchg(&buf->head, base) &
474 ((buf->nr_pages << PAGE_SHIFT) - 1));
476 base += buf->nr_pages << PAGE_SHIFT;
478 local_add(base - old, &buf->data_size);
483 * pt_buffer_region() - obtain current output region's address
486 static void *pt_buffer_region(struct pt_buffer *buf)
488 return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
492 * pt_buffer_region_size() - obtain current output region's size
495 static size_t pt_buffer_region_size(struct pt_buffer *buf)
497 return sizes(buf->cur->table[buf->cur_idx].size);
501 * pt_handle_status() - take care of possible status conditions
502 * @pt: Per-cpu pt context.
504 static void pt_handle_status(struct pt *pt)
506 struct pt_buffer *buf = perf_get_aux(&pt->handle);
510 rdmsrl(MSR_IA32_RTIT_STATUS, status);
512 if (status & RTIT_STATUS_ERROR) {
513 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
515 status &= ~RTIT_STATUS_ERROR;
518 if (status & RTIT_STATUS_STOPPED) {
519 status &= ~RTIT_STATUS_STOPPED;
522 * On systems that only do single-entry ToPA, hitting STOP
523 * means we are already losing data; need to let the decoder
526 if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
527 buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
528 local_inc(&buf->lost);
534 * Also on single-entry ToPA implementations, interrupt will come
535 * before the output reaches its output region's boundary.
537 if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
538 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
539 void *head = pt_buffer_region(buf);
541 /* everything within this margin needs to be zeroed out */
542 memset(head + buf->output_off, 0,
543 pt_buffer_region_size(buf) -
549 pt_buffer_advance(buf);
551 wrmsrl(MSR_IA32_RTIT_STATUS, status);
555 * pt_read_offset() - translate registers into buffer pointers
558 * Set buffer's output pointers from MSR values.
560 static void pt_read_offset(struct pt_buffer *buf)
562 u64 offset, base_topa;
564 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
565 buf->cur = phys_to_virt(base_topa);
567 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
568 /* offset within current output region */
569 buf->output_off = offset >> 32;
570 /* index of current output region within this table */
571 buf->cur_idx = (offset & 0xffffff80) >> 7;
575 * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
577 * @pg: Page offset in the buffer.
579 * When advancing to the next output region (ToPA entry), given a page offset
580 * into the buffer, we need to find the offset of the first page in the next
583 static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
585 struct topa_entry *te = buf->topa_index[pg];
588 if (buf->first == buf->last && buf->first->last == 1)
593 pg &= buf->nr_pages - 1;
594 } while (buf->topa_index[pg] == te);
600 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
602 * @handle: Current output handle.
604 * Place INT and STOP marks to prevent overwriting old data that the consumer
605 * hasn't yet collected.
607 static int pt_buffer_reset_markers(struct pt_buffer *buf,
608 struct perf_output_handle *handle)
611 unsigned long idx, npages, end;
616 /* can't stop in the middle of an output region */
617 if (buf->output_off + handle->size + 1 <
618 sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size))
622 /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
623 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
626 /* clear STOP and INT from current entry */
627 buf->topa_index[buf->stop_pos]->stop = 0;
628 buf->topa_index[buf->intr_pos]->intr = 0;
630 if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
631 npages = (handle->size + 1) >> PAGE_SHIFT;
632 end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages;
633 /*if (end > handle->wakeup >> PAGE_SHIFT)
634 end = handle->wakeup >> PAGE_SHIFT;*/
635 idx = end & (buf->nr_pages - 1);
637 idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1;
638 idx &= buf->nr_pages - 1;
642 buf->topa_index[buf->stop_pos]->stop = 1;
643 buf->topa_index[buf->intr_pos]->intr = 1;
649 * pt_buffer_setup_topa_index() - build topa_index[] table of regions
652 * topa_index[] references output regions indexed by offset into the
653 * buffer for purposes of quick reverse lookup.
655 static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
657 struct topa *cur = buf->first, *prev = buf->last;
658 struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
659 *te_prev = TOPA_ENTRY(prev, prev->last - 1);
660 int pg = 0, idx = 0, ntopa = 0;
662 while (pg < buf->nr_pages) {
665 /* pages within one topa entry */
666 for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
667 buf->topa_index[pg] = te_prev;
671 if (idx == cur->last - 1) {
672 /* advance to next topa table */
674 cur = list_entry(cur->list.next, struct topa, list);
678 te_cur = TOPA_ENTRY(cur, idx);
684 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
686 * @head: Write pointer (aux_head) from AUX buffer.
688 * Find the ToPA table and entry corresponding to given @head and set buffer's
689 * "current" pointers accordingly.
691 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
696 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
698 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
699 pg = pt_topa_next_entry(buf, pg);
701 buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
702 buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
703 (unsigned long)buf->cur) / sizeof(struct topa_entry);
704 buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
706 local64_set(&buf->head, head);
707 local_set(&buf->data_size, 0);
711 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
714 static void pt_buffer_fini_topa(struct pt_buffer *buf)
716 struct topa *topa, *iter;
718 list_for_each_entry_safe(topa, iter, &buf->tables, list) {
720 * right now, this is in free_aux() path only, so
721 * no need to unlink this table from the list
728 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
730 * @size: Total size of all regions within this ToPA.
731 * @gfp: Allocation flags.
733 static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
739 topa = topa_alloc(buf->cpu, gfp);
743 topa_insert_table(buf, topa);
745 while (buf->nr_pages < nr_pages) {
746 err = topa_insert_pages(buf, gfp);
748 pt_buffer_fini_topa(buf);
753 pt_buffer_setup_topa_index(buf);
755 /* link last table to the first one, unless we're double buffering */
756 if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
757 TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
758 TOPA_ENTRY(buf->last, -1)->end = 1;
766 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
767 * @cpu: Cpu on which to allocate, -1 means current.
768 * @pages: Array of pointers to buffer pages passed from perf core.
769 * @nr_pages: Number of pages in the buffer.
770 * @snapshot: If this is a snapshot/overwrite counter.
772 * This is a pmu::setup_aux callback that sets up ToPA tables and all the
773 * bookkeeping for an AUX buffer.
775 * Return: Our private PT buffer structure.
778 pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
780 struct pt_buffer *buf;
787 cpu = raw_smp_processor_id();
788 node = cpu_to_node(cpu);
790 buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
796 buf->snapshot = snapshot;
797 buf->data_pages = pages;
799 INIT_LIST_HEAD(&buf->tables);
801 ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
811 * pt_buffer_free_aux() - perf AUX deallocation path callback
814 static void pt_buffer_free_aux(void *data)
816 struct pt_buffer *buf = data;
818 pt_buffer_fini_topa(buf);
823 * pt_buffer_is_full() - check if the buffer is full
825 * @pt: Per-cpu pt handle.
827 * If the user hasn't read data from the output region that aux_head
828 * points to, the buffer is considered full: the user needs to read at
829 * least this region and update aux_tail to point past it.
831 static bool pt_buffer_is_full(struct pt_buffer *buf, struct pt *pt)
836 if (local_read(&buf->data_size) >= pt->handle.size)
843 * intel_pt_interrupt() - PT PMI handler
845 void intel_pt_interrupt(void)
847 struct pt *pt = this_cpu_ptr(&pt_ctx);
848 struct pt_buffer *buf;
849 struct perf_event *event = pt->handle.event;
852 * There may be a dangling PT bit in the interrupt status register
853 * after PT has been disabled by pt_event_stop(). Make sure we don't
854 * do anything (particularly, re-enable) for this event here.
856 if (!ACCESS_ONCE(pt->handle_nmi))
859 pt_config_start(false);
864 buf = perf_get_aux(&pt->handle);
870 pt_handle_status(pt);
874 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
875 local_xchg(&buf->lost, 0));
877 if (!event->hw.state) {
880 buf = perf_aux_output_begin(&pt->handle, event);
882 event->hw.state = PERF_HES_STOPPED;
886 pt_buffer_reset_offsets(buf, pt->handle.head);
887 ret = pt_buffer_reset_markers(buf, &pt->handle);
889 perf_aux_output_end(&pt->handle, 0, true);
893 pt_config_buffer(buf->cur->table, buf->cur_idx,
895 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
904 static void pt_event_start(struct perf_event *event, int mode)
906 struct pt *pt = this_cpu_ptr(&pt_ctx);
907 struct pt_buffer *buf = perf_get_aux(&pt->handle);
909 if (pt_is_running() || !buf || pt_buffer_is_full(buf, pt)) {
910 event->hw.state = PERF_HES_STOPPED;
914 ACCESS_ONCE(pt->handle_nmi) = 1;
917 pt_config_buffer(buf->cur->table, buf->cur_idx,
919 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
923 static void pt_event_stop(struct perf_event *event, int mode)
925 struct pt *pt = this_cpu_ptr(&pt_ctx);
928 * Protect against the PMI racing with disabling wrmsr,
929 * see comment in intel_pt_interrupt().
931 ACCESS_ONCE(pt->handle_nmi) = 0;
932 pt_config_start(false);
934 if (event->hw.state == PERF_HES_STOPPED)
937 event->hw.state = PERF_HES_STOPPED;
939 if (mode & PERF_EF_UPDATE) {
940 struct pt *pt = this_cpu_ptr(&pt_ctx);
941 struct pt_buffer *buf = perf_get_aux(&pt->handle);
946 if (WARN_ON_ONCE(pt->handle.event != event))
951 pt_handle_status(pt);
957 static void pt_event_del(struct perf_event *event, int mode)
959 struct pt *pt = this_cpu_ptr(&pt_ctx);
960 struct pt_buffer *buf;
962 pt_event_stop(event, PERF_EF_UPDATE);
964 buf = perf_get_aux(&pt->handle);
969 local_xchg(&buf->data_size,
970 buf->nr_pages << PAGE_SHIFT);
971 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
972 local_xchg(&buf->lost, 0));
976 static int pt_event_add(struct perf_event *event, int mode)
978 struct pt_buffer *buf;
979 struct pt *pt = this_cpu_ptr(&pt_ctx);
980 struct hw_perf_event *hwc = &event->hw;
983 if (pt->handle.event)
986 buf = perf_aux_output_begin(&pt->handle, event);
992 pt_buffer_reset_offsets(buf, pt->handle.head);
993 if (!buf->snapshot) {
994 ret = pt_buffer_reset_markers(buf, &pt->handle);
996 perf_aux_output_end(&pt->handle, 0, true);
1001 if (mode & PERF_EF_START) {
1002 pt_event_start(event, 0);
1003 if (hwc->state == PERF_HES_STOPPED) {
1004 pt_event_del(event, 0);
1008 hwc->state = PERF_HES_STOPPED;
1015 hwc->state = PERF_HES_STOPPED;
1020 static void pt_event_read(struct perf_event *event)
1024 static void pt_event_destroy(struct perf_event *event)
1026 x86_del_exclusive(x86_lbr_exclusive_pt);
1029 static int pt_event_init(struct perf_event *event)
1031 if (event->attr.type != pt_pmu.pmu.type)
1034 if (!pt_event_valid(event))
1037 if (x86_add_exclusive(x86_lbr_exclusive_pt))
1040 event->destroy = pt_event_destroy;
1045 static __init int pt_init(void)
1047 int ret, cpu, prior_warn = 0;
1049 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1051 for_each_online_cpu(cpu) {
1054 ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1055 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1061 x86_add_exclusive(x86_lbr_exclusive_pt);
1062 pr_warn("PT is enabled at boot time, doing nothing\n");
1067 ret = pt_pmu_hw_init();
1071 if (!pt_cap_get(PT_CAP_topa_output)) {
1072 pr_warn("ToPA output is not supported on this CPU\n");
1076 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
1077 pt_pmu.pmu.capabilities =
1078 PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
1080 pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1081 pt_pmu.pmu.attr_groups = pt_attr_groups;
1082 pt_pmu.pmu.task_ctx_nr = perf_sw_context;
1083 pt_pmu.pmu.event_init = pt_event_init;
1084 pt_pmu.pmu.add = pt_event_add;
1085 pt_pmu.pmu.del = pt_event_del;
1086 pt_pmu.pmu.start = pt_event_start;
1087 pt_pmu.pmu.stop = pt_event_stop;
1088 pt_pmu.pmu.read = pt_event_read;
1089 pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
1090 pt_pmu.pmu.free_aux = pt_buffer_free_aux;
1091 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1096 module_init(pt_init);