3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
17 #include <linux/oprofile.h>
18 #include <linux/device.h>
19 #include <linux/pci.h>
20 #include <linux/percpu.h>
22 #include <asm/ptrace.h>
26 #include "op_x86_model.h"
27 #include "op_counter.h"
29 #define NUM_COUNTERS 4
30 #define NUM_CONTROLS 4
31 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32 #define NUM_VIRT_COUNTERS 32
33 #define NUM_VIRT_CONTROLS 32
35 #define NUM_VIRT_COUNTERS NUM_COUNTERS
36 #define NUM_VIRT_CONTROLS NUM_CONTROLS
39 #define OP_EVENT_MASK 0x0FFF
40 #define OP_CTR_OVERFLOW (1ULL<<31)
42 #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
44 static unsigned long reset_value[NUM_VIRT_COUNTERS];
46 #ifdef CONFIG_OPROFILE_IBS
48 /* IbsFetchCtl bits/masks */
49 #define IBS_FETCH_RAND_EN (1ULL<<57)
50 #define IBS_FETCH_VAL (1ULL<<49)
51 #define IBS_FETCH_ENABLE (1ULL<<48)
52 #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
55 #define IBS_OP_CNT_CTL (1ULL<<19)
56 #define IBS_OP_VAL (1ULL<<18)
57 #define IBS_OP_ENABLE (1ULL<<17)
59 #define IBS_FETCH_SIZE 6
60 #define IBS_OP_SIZE 12
62 static int has_ibs; /* AMD Family10h and later */
64 struct op_ibs_config {
65 unsigned long op_enabled;
66 unsigned long fetch_enabled;
67 unsigned long max_cnt_fetch;
68 unsigned long max_cnt_op;
69 unsigned long rand_en;
70 unsigned long dispatched_ops;
73 static struct op_ibs_config ibs_config;
77 /* functions for op_amd_spec */
79 static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
83 for (i = 0; i < NUM_COUNTERS; i++) {
84 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
85 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
87 msrs->counters[i].addr = 0;
90 for (i = 0; i < NUM_CONTROLS; i++) {
91 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
92 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
94 msrs->controls[i].addr = 0;
97 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
98 for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
99 int hw_counter = i % NUM_COUNTERS;
100 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
101 msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
103 msrs->multiplex[i].addr = 0;
108 static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
109 struct op_msrs const * const msrs)
114 /* setup reset_value */
115 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
116 if (counter_config[i].enabled) {
117 reset_value[i] = counter_config[i].count;
123 /* clear all counters */
124 for (i = 0; i < NUM_CONTROLS; ++i) {
125 if (unlikely(!msrs->controls[i].addr))
127 rdmsrl(msrs->controls[i].addr, val);
128 val &= model->reserved;
129 wrmsrl(msrs->controls[i].addr, val);
132 /* avoid a false detection of ctr overflows in NMI handler */
133 for (i = 0; i < NUM_COUNTERS; ++i) {
134 if (unlikely(!msrs->counters[i].addr))
136 wrmsrl(msrs->counters[i].addr, -1LL);
139 /* enable active counters */
140 for (i = 0; i < NUM_COUNTERS; ++i) {
141 int virt = op_x86_phys_to_virt(i);
142 if (!counter_config[virt].enabled)
144 if (!msrs->counters[i].addr)
147 /* setup counter registers */
148 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
150 /* setup control registers */
151 rdmsrl(msrs->controls[i].addr, val);
152 val &= model->reserved;
153 val |= op_x86_get_ctrl(model, &counter_config[virt]);
154 wrmsrl(msrs->controls[i].addr, val);
159 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
161 static void op_amd_switch_ctrl(struct op_x86_model_spec const *model,
162 struct op_msrs const * const msrs)
167 /* enable active counters */
168 for (i = 0; i < NUM_COUNTERS; ++i) {
169 int virt = op_x86_phys_to_virt(i);
170 if (!counter_config[virt].enabled)
172 rdmsrl(msrs->controls[i].addr, val);
173 val &= model->reserved;
174 val |= op_x86_get_ctrl(model, &counter_config[virt]);
175 wrmsrl(msrs->controls[i].addr, val);
182 #ifdef CONFIG_OPROFILE_IBS
185 op_amd_handle_ibs(struct pt_regs * const regs,
186 struct op_msrs const * const msrs)
189 struct op_entry entry;
194 if (ibs_config.fetch_enabled) {
195 rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
196 if (ctl & IBS_FETCH_VAL) {
197 rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
198 oprofile_write_reserve(&entry, regs, val,
199 IBS_FETCH_CODE, IBS_FETCH_SIZE);
200 oprofile_add_data64(&entry, val);
201 oprofile_add_data64(&entry, ctl);
202 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
203 oprofile_add_data64(&entry, val);
204 oprofile_write_commit(&entry);
206 /* reenable the IRQ */
207 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK);
208 ctl |= IBS_FETCH_ENABLE;
209 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
213 if (ibs_config.op_enabled) {
214 rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
215 if (ctl & IBS_OP_VAL) {
216 rdmsrl(MSR_AMD64_IBSOPRIP, val);
217 oprofile_write_reserve(&entry, regs, val,
218 IBS_OP_CODE, IBS_OP_SIZE);
219 oprofile_add_data64(&entry, val);
220 rdmsrl(MSR_AMD64_IBSOPDATA, val);
221 oprofile_add_data64(&entry, val);
222 rdmsrl(MSR_AMD64_IBSOPDATA2, val);
223 oprofile_add_data64(&entry, val);
224 rdmsrl(MSR_AMD64_IBSOPDATA3, val);
225 oprofile_add_data64(&entry, val);
226 rdmsrl(MSR_AMD64_IBSDCLINAD, val);
227 oprofile_add_data64(&entry, val);
228 rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
229 oprofile_add_data64(&entry, val);
230 oprofile_write_commit(&entry);
232 /* reenable the IRQ */
233 ctl &= ~IBS_OP_VAL & 0xFFFFFFFF;
234 ctl |= IBS_OP_ENABLE;
235 wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
242 static inline void op_amd_start_ibs(void)
245 if (has_ibs && ibs_config.fetch_enabled) {
246 val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
247 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
248 val |= IBS_FETCH_ENABLE;
249 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
252 if (has_ibs && ibs_config.op_enabled) {
253 val = (ibs_config.max_cnt_op >> 4) & 0xFFFF;
254 val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
255 val |= IBS_OP_ENABLE;
256 wrmsrl(MSR_AMD64_IBSOPCTL, val);
260 static void op_amd_stop_ibs(void)
262 if (has_ibs && ibs_config.fetch_enabled)
263 /* clear max count and enable */
264 wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
266 if (has_ibs && ibs_config.op_enabled)
267 /* clear max count and enable */
268 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
273 static inline int op_amd_handle_ibs(struct pt_regs * const regs,
274 struct op_msrs const * const msrs)
278 static inline void op_amd_start_ibs(void) { }
279 static inline void op_amd_stop_ibs(void) { }
283 static int op_amd_check_ctrs(struct pt_regs * const regs,
284 struct op_msrs const * const msrs)
289 for (i = 0; i < NUM_COUNTERS; ++i) {
290 int virt = op_x86_phys_to_virt(i);
291 if (!reset_value[virt])
293 rdmsrl(msrs->counters[i].addr, val);
294 /* bit is clear if overflowed: */
295 if (val & OP_CTR_OVERFLOW)
297 oprofile_add_sample(regs, virt);
298 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
301 op_amd_handle_ibs(regs, msrs);
303 /* See op_model_ppro.c */
307 static void op_amd_start(struct op_msrs const * const msrs)
312 for (i = 0; i < NUM_COUNTERS; ++i) {
313 if (!reset_value[op_x86_phys_to_virt(i)])
315 rdmsrl(msrs->controls[i].addr, val);
316 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
317 wrmsrl(msrs->controls[i].addr, val);
323 static void op_amd_stop(struct op_msrs const * const msrs)
329 * Subtle: stop on all counters to avoid race with setting our
332 for (i = 0; i < NUM_COUNTERS; ++i) {
333 if (!reset_value[op_x86_phys_to_virt(i)])
335 rdmsrl(msrs->controls[i].addr, val);
336 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
337 wrmsrl(msrs->controls[i].addr, val);
343 static void op_amd_shutdown(struct op_msrs const * const msrs)
347 for (i = 0; i < NUM_COUNTERS; ++i) {
348 if (msrs->counters[i].addr)
349 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
351 for (i = 0; i < NUM_CONTROLS; ++i) {
352 if (msrs->controls[i].addr)
353 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
357 #ifdef CONFIG_OPROFILE_IBS
359 static u8 ibs_eilvt_off;
361 static inline void apic_init_ibs_nmi_per_cpu(void *arg)
363 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
366 static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
368 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
371 static int init_ibs_nmi(void)
373 #define IBSCTL_LVTOFFSETVAL (1 << 8)
375 struct pci_dev *cpu_cfg;
380 on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
385 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
386 PCI_DEVICE_ID_AMD_10H_NB_MISC,
391 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
392 | IBSCTL_LVTOFFSETVAL);
393 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
394 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
395 pci_dev_put(cpu_cfg);
396 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
397 "IBSCTL = 0x%08x", value);
403 printk(KERN_DEBUG "No CPU node configured for IBS");
409 /* Works only for 64bit with proper numa implementation. */
410 if (nodes != num_possible_nodes()) {
411 printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
412 "found: %d, expected %d",
413 nodes, num_possible_nodes());
420 /* uninitialize the APIC for the IBS interrupts if needed */
421 static void clear_ibs_nmi(void)
424 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
427 /* initialize the APIC for the IBS interrupts if available */
428 static void ibs_init(void)
430 has_ibs = boot_cpu_has(X86_FEATURE_IBS);
435 if (init_ibs_nmi()) {
440 printk(KERN_INFO "oprofile: AMD IBS detected\n");
443 static void ibs_exit(void)
451 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
453 static int setup_ibs_files(struct super_block *sb, struct dentry *root)
458 /* architecture specific files */
459 if (create_arch_files)
460 ret = create_arch_files(sb, root);
468 /* model specific files */
470 /* setup some reasonable defaults */
471 ibs_config.max_cnt_fetch = 250000;
472 ibs_config.fetch_enabled = 0;
473 ibs_config.max_cnt_op = 250000;
474 ibs_config.op_enabled = 0;
475 ibs_config.dispatched_ops = 1;
477 dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
478 oprofilefs_create_ulong(sb, dir, "enable",
479 &ibs_config.fetch_enabled);
480 oprofilefs_create_ulong(sb, dir, "max_count",
481 &ibs_config.max_cnt_fetch);
482 oprofilefs_create_ulong(sb, dir, "rand_enable",
483 &ibs_config.rand_en);
485 dir = oprofilefs_mkdir(sb, root, "ibs_op");
486 oprofilefs_create_ulong(sb, dir, "enable",
487 &ibs_config.op_enabled);
488 oprofilefs_create_ulong(sb, dir, "max_count",
489 &ibs_config.max_cnt_op);
490 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
491 &ibs_config.dispatched_ops);
496 static int op_amd_init(struct oprofile_operations *ops)
499 create_arch_files = ops->create_files;
500 ops->create_files = setup_ibs_files;
504 static void op_amd_exit(void)
513 static int op_amd_init(struct oprofile_operations *ops)
518 static void op_amd_exit(void) {}
520 #endif /* CONFIG_OPROFILE_IBS */
522 struct op_x86_model_spec const op_amd_spec = {
523 .num_counters = NUM_COUNTERS,
524 .num_controls = NUM_CONTROLS,
525 .num_virt_counters = NUM_VIRT_COUNTERS,
526 .num_virt_controls = NUM_VIRT_CONTROLS,
527 .reserved = MSR_AMD_EVENTSEL_RESERVED,
528 .event_mask = OP_EVENT_MASK,
531 .fill_in_addresses = &op_amd_fill_in_addresses,
532 .setup_ctrs = &op_amd_setup_ctrs,
533 .check_ctrs = &op_amd_check_ctrs,
534 .start = &op_amd_start,
535 .stop = &op_amd_stop,
536 .shutdown = &op_amd_shutdown,
537 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
538 .switch_ctrl = &op_amd_switch_ctrl,