3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
17 #include <linux/oprofile.h>
18 #include <linux/device.h>
19 #include <linux/pci.h>
20 #include <linux/percpu.h>
22 #include <asm/ptrace.h>
26 #include "op_x86_model.h"
27 #include "op_counter.h"
29 #define NUM_COUNTERS 4
30 #define NUM_CONTROLS 4
31 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32 #define NUM_VIRT_COUNTERS 32
33 #define NUM_VIRT_CONTROLS 32
35 #define NUM_VIRT_COUNTERS NUM_COUNTERS
36 #define NUM_VIRT_CONTROLS NUM_CONTROLS
39 #define OP_EVENT_MASK 0x0FFF
40 #define OP_CTR_OVERFLOW (1ULL<<31)
42 #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
44 static unsigned long reset_value[NUM_VIRT_COUNTERS];
46 #ifdef CONFIG_OPROFILE_IBS
48 /* IbsFetchCtl bits/masks */
49 #define IBS_FETCH_RAND_EN (1ULL<<57)
50 #define IBS_FETCH_VAL (1ULL<<49)
51 #define IBS_FETCH_ENABLE (1ULL<<48)
52 #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
55 #define IBS_OP_CNT_CTL (1ULL<<19)
56 #define IBS_OP_VAL (1ULL<<18)
57 #define IBS_OP_ENABLE (1ULL<<17)
59 #define IBS_FETCH_SIZE 6
60 #define IBS_OP_SIZE 12
62 static int has_ibs; /* AMD Family10h and later */
64 struct op_ibs_config {
65 unsigned long op_enabled;
66 unsigned long fetch_enabled;
67 unsigned long max_cnt_fetch;
68 unsigned long max_cnt_op;
69 unsigned long rand_en;
70 unsigned long dispatched_ops;
73 static struct op_ibs_config ibs_config;
77 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
79 static void op_mux_fill_in_addresses(struct op_msrs * const msrs)
83 for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
84 int hw_counter = i % NUM_COUNTERS;
85 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
86 msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
88 msrs->multiplex[i].addr = 0;
92 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
93 struct op_msrs const * const msrs)
98 /* enable active counters */
99 for (i = 0; i < NUM_COUNTERS; ++i) {
100 int virt = op_x86_phys_to_virt(i);
101 if (!counter_config[virt].enabled)
103 rdmsrl(msrs->controls[i].addr, val);
104 val &= model->reserved;
105 val |= op_x86_get_ctrl(model, &counter_config[virt]);
106 wrmsrl(msrs->controls[i].addr, val);
112 static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { }
116 /* functions for op_amd_spec */
118 static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
122 for (i = 0; i < NUM_COUNTERS; i++) {
123 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
124 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
126 msrs->counters[i].addr = 0;
129 for (i = 0; i < NUM_CONTROLS; i++) {
130 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
131 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
133 msrs->controls[i].addr = 0;
136 op_mux_fill_in_addresses(msrs);
139 static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
140 struct op_msrs const * const msrs)
145 /* setup reset_value */
146 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
147 if (counter_config[i].enabled) {
148 reset_value[i] = counter_config[i].count;
154 /* clear all counters */
155 for (i = 0; i < NUM_CONTROLS; ++i) {
156 if (unlikely(!msrs->controls[i].addr))
158 rdmsrl(msrs->controls[i].addr, val);
159 val &= model->reserved;
160 wrmsrl(msrs->controls[i].addr, val);
163 /* avoid a false detection of ctr overflows in NMI handler */
164 for (i = 0; i < NUM_COUNTERS; ++i) {
165 if (unlikely(!msrs->counters[i].addr))
167 wrmsrl(msrs->counters[i].addr, -1LL);
170 /* enable active counters */
171 for (i = 0; i < NUM_COUNTERS; ++i) {
172 int virt = op_x86_phys_to_virt(i);
173 if (!counter_config[virt].enabled)
175 if (!msrs->counters[i].addr)
178 /* setup counter registers */
179 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
181 /* setup control registers */
182 rdmsrl(msrs->controls[i].addr, val);
183 val &= model->reserved;
184 val |= op_x86_get_ctrl(model, &counter_config[virt]);
185 wrmsrl(msrs->controls[i].addr, val);
189 #ifdef CONFIG_OPROFILE_IBS
192 op_amd_handle_ibs(struct pt_regs * const regs,
193 struct op_msrs const * const msrs)
196 struct op_entry entry;
201 if (ibs_config.fetch_enabled) {
202 rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
203 if (ctl & IBS_FETCH_VAL) {
204 rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
205 oprofile_write_reserve(&entry, regs, val,
206 IBS_FETCH_CODE, IBS_FETCH_SIZE);
207 oprofile_add_data64(&entry, val);
208 oprofile_add_data64(&entry, ctl);
209 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
210 oprofile_add_data64(&entry, val);
211 oprofile_write_commit(&entry);
213 /* reenable the IRQ */
214 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK);
215 ctl |= IBS_FETCH_ENABLE;
216 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
220 if (ibs_config.op_enabled) {
221 rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
222 if (ctl & IBS_OP_VAL) {
223 rdmsrl(MSR_AMD64_IBSOPRIP, val);
224 oprofile_write_reserve(&entry, regs, val,
225 IBS_OP_CODE, IBS_OP_SIZE);
226 oprofile_add_data64(&entry, val);
227 rdmsrl(MSR_AMD64_IBSOPDATA, val);
228 oprofile_add_data64(&entry, val);
229 rdmsrl(MSR_AMD64_IBSOPDATA2, val);
230 oprofile_add_data64(&entry, val);
231 rdmsrl(MSR_AMD64_IBSOPDATA3, val);
232 oprofile_add_data64(&entry, val);
233 rdmsrl(MSR_AMD64_IBSDCLINAD, val);
234 oprofile_add_data64(&entry, val);
235 rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
236 oprofile_add_data64(&entry, val);
237 oprofile_write_commit(&entry);
239 /* reenable the IRQ */
240 ctl &= ~IBS_OP_VAL & 0xFFFFFFFF;
241 ctl |= IBS_OP_ENABLE;
242 wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
249 static inline void op_amd_start_ibs(void)
252 if (has_ibs && ibs_config.fetch_enabled) {
253 val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
254 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
255 val |= IBS_FETCH_ENABLE;
256 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
259 if (has_ibs && ibs_config.op_enabled) {
260 val = (ibs_config.max_cnt_op >> 4) & 0xFFFF;
261 val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
262 val |= IBS_OP_ENABLE;
263 wrmsrl(MSR_AMD64_IBSOPCTL, val);
267 static void op_amd_stop_ibs(void)
269 if (has_ibs && ibs_config.fetch_enabled)
270 /* clear max count and enable */
271 wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
273 if (has_ibs && ibs_config.op_enabled)
274 /* clear max count and enable */
275 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
280 static inline int op_amd_handle_ibs(struct pt_regs * const regs,
281 struct op_msrs const * const msrs)
285 static inline void op_amd_start_ibs(void) { }
286 static inline void op_amd_stop_ibs(void) { }
290 static int op_amd_check_ctrs(struct pt_regs * const regs,
291 struct op_msrs const * const msrs)
296 for (i = 0; i < NUM_COUNTERS; ++i) {
297 int virt = op_x86_phys_to_virt(i);
298 if (!reset_value[virt])
300 rdmsrl(msrs->counters[i].addr, val);
301 /* bit is clear if overflowed: */
302 if (val & OP_CTR_OVERFLOW)
304 oprofile_add_sample(regs, virt);
305 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
308 op_amd_handle_ibs(regs, msrs);
310 /* See op_model_ppro.c */
314 static void op_amd_start(struct op_msrs const * const msrs)
319 for (i = 0; i < NUM_COUNTERS; ++i) {
320 if (!reset_value[op_x86_phys_to_virt(i)])
322 rdmsrl(msrs->controls[i].addr, val);
323 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
324 wrmsrl(msrs->controls[i].addr, val);
330 static void op_amd_stop(struct op_msrs const * const msrs)
336 * Subtle: stop on all counters to avoid race with setting our
339 for (i = 0; i < NUM_COUNTERS; ++i) {
340 if (!reset_value[op_x86_phys_to_virt(i)])
342 rdmsrl(msrs->controls[i].addr, val);
343 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
344 wrmsrl(msrs->controls[i].addr, val);
350 static void op_amd_shutdown(struct op_msrs const * const msrs)
354 for (i = 0; i < NUM_COUNTERS; ++i) {
355 if (msrs->counters[i].addr)
356 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
358 for (i = 0; i < NUM_CONTROLS; ++i) {
359 if (msrs->controls[i].addr)
360 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
364 #ifdef CONFIG_OPROFILE_IBS
366 static u8 ibs_eilvt_off;
368 static inline void apic_init_ibs_nmi_per_cpu(void *arg)
370 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
373 static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
375 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
378 static int init_ibs_nmi(void)
380 #define IBSCTL_LVTOFFSETVAL (1 << 8)
382 struct pci_dev *cpu_cfg;
387 on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
392 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
393 PCI_DEVICE_ID_AMD_10H_NB_MISC,
398 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
399 | IBSCTL_LVTOFFSETVAL);
400 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
401 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
402 pci_dev_put(cpu_cfg);
403 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
404 "IBSCTL = 0x%08x", value);
410 printk(KERN_DEBUG "No CPU node configured for IBS");
416 /* Works only for 64bit with proper numa implementation. */
417 if (nodes != num_possible_nodes()) {
418 printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
419 "found: %d, expected %d",
420 nodes, num_possible_nodes());
427 /* uninitialize the APIC for the IBS interrupts if needed */
428 static void clear_ibs_nmi(void)
431 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
434 /* initialize the APIC for the IBS interrupts if available */
435 static void ibs_init(void)
437 has_ibs = boot_cpu_has(X86_FEATURE_IBS);
442 if (init_ibs_nmi()) {
447 printk(KERN_INFO "oprofile: AMD IBS detected\n");
450 static void ibs_exit(void)
458 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
460 static int setup_ibs_files(struct super_block *sb, struct dentry *root)
465 /* architecture specific files */
466 if (create_arch_files)
467 ret = create_arch_files(sb, root);
475 /* model specific files */
477 /* setup some reasonable defaults */
478 ibs_config.max_cnt_fetch = 250000;
479 ibs_config.fetch_enabled = 0;
480 ibs_config.max_cnt_op = 250000;
481 ibs_config.op_enabled = 0;
482 ibs_config.dispatched_ops = 1;
484 dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
485 oprofilefs_create_ulong(sb, dir, "enable",
486 &ibs_config.fetch_enabled);
487 oprofilefs_create_ulong(sb, dir, "max_count",
488 &ibs_config.max_cnt_fetch);
489 oprofilefs_create_ulong(sb, dir, "rand_enable",
490 &ibs_config.rand_en);
492 dir = oprofilefs_mkdir(sb, root, "ibs_op");
493 oprofilefs_create_ulong(sb, dir, "enable",
494 &ibs_config.op_enabled);
495 oprofilefs_create_ulong(sb, dir, "max_count",
496 &ibs_config.max_cnt_op);
497 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
498 &ibs_config.dispatched_ops);
503 static int op_amd_init(struct oprofile_operations *ops)
506 create_arch_files = ops->create_files;
507 ops->create_files = setup_ibs_files;
511 static void op_amd_exit(void)
520 static int op_amd_init(struct oprofile_operations *ops)
525 static void op_amd_exit(void) {}
527 #endif /* CONFIG_OPROFILE_IBS */
529 struct op_x86_model_spec op_amd_spec = {
530 .num_counters = NUM_COUNTERS,
531 .num_controls = NUM_CONTROLS,
532 .num_virt_counters = NUM_VIRT_COUNTERS,
533 .reserved = MSR_AMD_EVENTSEL_RESERVED,
534 .event_mask = OP_EVENT_MASK,
537 .fill_in_addresses = &op_amd_fill_in_addresses,
538 .setup_ctrs = &op_amd_setup_ctrs,
539 .check_ctrs = &op_amd_check_ctrs,
540 .start = &op_amd_start,
541 .stop = &op_amd_stop,
542 .shutdown = &op_amd_shutdown,
543 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
544 .switch_ctrl = &op_mux_switch_ctrl,