4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/sysdev.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
27 #include "op_counter.h"
28 #include "op_x86_model.h"
30 static struct op_x86_model_spec *model;
31 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
34 /* must be protected with get_online_cpus()/put_online_cpus(): */
35 static int nmi_enabled;
36 static int ctr_running;
38 struct op_counter_config counter_config[OP_MAX_COUNTER];
40 /* common functions */
42 u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
43 struct op_counter_config *counter_config)
46 u16 event = (u16)counter_config->event;
48 val |= ARCH_PERFMON_EVENTSEL_INT;
49 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
50 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
51 val |= (counter_config->unit_mask & 0xFF) << 8;
52 event &= model->event_mask ? model->event_mask : 0xFF;
54 val |= (event & 0x0F00) << 24;
60 static int profile_exceptions_notify(struct notifier_block *self,
61 unsigned long val, void *data)
63 struct die_args *args = (struct die_args *)data;
64 int ret = NOTIFY_DONE;
65 int cpu = smp_processor_id();
70 model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
79 static void nmi_cpu_save_registers(struct op_msrs *msrs)
81 struct op_msr *counters = msrs->counters;
82 struct op_msr *controls = msrs->controls;
85 for (i = 0; i < model->num_counters; ++i) {
87 rdmsrl(counters[i].addr, counters[i].saved);
90 for (i = 0; i < model->num_controls; ++i) {
92 rdmsrl(controls[i].addr, controls[i].saved);
96 static void nmi_cpu_start(void *dummy)
98 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
105 static int nmi_start(void)
108 on_each_cpu(nmi_cpu_start, NULL, 1);
114 static void nmi_cpu_stop(void *dummy)
116 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
123 static void nmi_stop(void)
126 on_each_cpu(nmi_cpu_stop, NULL, 1);
131 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
133 static DEFINE_PER_CPU(int, switch_index);
135 static inline int has_mux(void)
137 return !!model->switch_ctrl;
140 inline int op_x86_phys_to_virt(int phys)
142 return __get_cpu_var(switch_index) + phys;
145 inline int op_x86_virt_to_phys(int virt)
147 return virt % model->num_counters;
150 static void nmi_shutdown_mux(void)
157 for_each_possible_cpu(i) {
158 kfree(per_cpu(cpu_msrs, i).multiplex);
159 per_cpu(cpu_msrs, i).multiplex = NULL;
160 per_cpu(switch_index, i) = 0;
164 static int nmi_setup_mux(void)
166 size_t multiplex_size =
167 sizeof(struct op_msr) * model->num_virt_counters;
173 for_each_possible_cpu(i) {
174 per_cpu(cpu_msrs, i).multiplex =
175 kzalloc(multiplex_size, GFP_KERNEL);
176 if (!per_cpu(cpu_msrs, i).multiplex)
183 static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
186 struct op_msr *multiplex = msrs->multiplex;
191 for (i = 0; i < model->num_virt_counters; ++i) {
192 if (counter_config[i].enabled) {
193 multiplex[i].saved = -(u64)counter_config[i].count;
195 multiplex[i].saved = 0;
199 per_cpu(switch_index, cpu) = 0;
202 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
204 struct op_msr *counters = msrs->counters;
205 struct op_msr *multiplex = msrs->multiplex;
208 for (i = 0; i < model->num_counters; ++i) {
209 int virt = op_x86_phys_to_virt(i);
210 if (counters[i].addr)
211 rdmsrl(counters[i].addr, multiplex[virt].saved);
215 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
217 struct op_msr *counters = msrs->counters;
218 struct op_msr *multiplex = msrs->multiplex;
221 for (i = 0; i < model->num_counters; ++i) {
222 int virt = op_x86_phys_to_virt(i);
223 if (counters[i].addr)
224 wrmsrl(counters[i].addr, multiplex[virt].saved);
228 static void nmi_cpu_switch(void *dummy)
230 int cpu = smp_processor_id();
231 int si = per_cpu(switch_index, cpu);
232 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
235 nmi_cpu_save_mpx_registers(msrs);
237 /* move to next set */
238 si += model->num_counters;
239 if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
240 per_cpu(switch_index, cpu) = 0;
242 per_cpu(switch_index, cpu) = si;
244 model->switch_ctrl(model, msrs);
245 nmi_cpu_restore_mpx_registers(msrs);
252 * Quick check to see if multiplexing is necessary.
253 * The check should be sufficient since counters are used
256 static int nmi_multiplex_on(void)
258 return counter_config[model->num_counters].count ? 0 : -EINVAL;
261 static int nmi_switch_event(void)
264 return -ENOSYS; /* not implemented */
265 if (nmi_multiplex_on() < 0)
266 return -EINVAL; /* not necessary */
270 on_each_cpu(nmi_cpu_switch, NULL, 1);
276 static inline void mux_init(struct oprofile_operations *ops)
279 ops->switch_events = nmi_switch_event;
282 static void mux_clone(int cpu)
287 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
288 per_cpu(cpu_msrs, 0).multiplex,
289 sizeof(struct op_msr) * model->num_virt_counters);
294 inline int op_x86_phys_to_virt(int phys) { return phys; }
295 inline int op_x86_virt_to_phys(int virt) { return virt; }
296 static inline void nmi_shutdown_mux(void) { }
297 static inline int nmi_setup_mux(void) { return 1; }
299 nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
300 static inline void mux_init(struct oprofile_operations *ops) { }
301 static void mux_clone(int cpu) { }
305 static void free_msrs(void)
308 for_each_possible_cpu(i) {
309 kfree(per_cpu(cpu_msrs, i).counters);
310 per_cpu(cpu_msrs, i).counters = NULL;
311 kfree(per_cpu(cpu_msrs, i).controls);
312 per_cpu(cpu_msrs, i).controls = NULL;
317 static int allocate_msrs(void)
319 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
320 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
323 for_each_possible_cpu(i) {
324 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
326 if (!per_cpu(cpu_msrs, i).counters)
328 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
330 if (!per_cpu(cpu_msrs, i).controls)
334 if (!nmi_setup_mux())
344 static void nmi_cpu_setup(void *dummy)
346 int cpu = smp_processor_id();
347 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
348 nmi_cpu_save_registers(msrs);
349 spin_lock(&oprofilefs_lock);
350 model->setup_ctrs(model, msrs);
351 nmi_cpu_setup_mux(cpu, msrs);
352 spin_unlock(&oprofilefs_lock);
353 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
354 apic_write(APIC_LVTPC, APIC_DM_NMI);
357 static struct notifier_block profile_exceptions_nb = {
358 .notifier_call = profile_exceptions_notify,
363 static int nmi_setup(void)
368 if (!allocate_msrs())
371 /* We need to serialize save and setup for HT because the subset
372 * of msrs are distinct for save and setup operations
375 /* Assume saved/restored counters are the same on all CPUs */
376 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
380 for_each_possible_cpu(cpu) {
384 memcpy(per_cpu(cpu_msrs, cpu).counters,
385 per_cpu(cpu_msrs, 0).counters,
386 sizeof(struct op_msr) * model->num_counters);
388 memcpy(per_cpu(cpu_msrs, cpu).controls,
389 per_cpu(cpu_msrs, 0).controls,
390 sizeof(struct op_msr) * model->num_controls);
395 err = register_die_notifier(&profile_exceptions_nb);
400 on_each_cpu(nmi_cpu_setup, NULL, 1);
410 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
412 struct op_msr *counters = msrs->counters;
413 struct op_msr *controls = msrs->controls;
416 for (i = 0; i < model->num_controls; ++i) {
417 if (controls[i].addr)
418 wrmsrl(controls[i].addr, controls[i].saved);
421 for (i = 0; i < model->num_counters; ++i) {
422 if (counters[i].addr)
423 wrmsrl(counters[i].addr, counters[i].saved);
427 static void nmi_cpu_shutdown(void *dummy)
430 int cpu = smp_processor_id();
431 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
433 /* restoring APIC_LVTPC can trigger an apic error because the delivery
434 * mode and vector nr combination can be illegal. That's by design: on
435 * power on apic lvt contain a zero vector nr which are legal only for
436 * NMI delivery mode. So inhibit apic err before restoring lvtpc
438 v = apic_read(APIC_LVTERR);
439 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
440 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
441 apic_write(APIC_LVTERR, v);
442 nmi_cpu_restore_registers(msrs);
445 static void nmi_shutdown(void)
447 struct op_msrs *msrs;
450 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
454 unregister_die_notifier(&profile_exceptions_nb);
455 msrs = &get_cpu_var(cpu_msrs);
456 model->shutdown(msrs);
458 put_cpu_var(cpu_msrs);
461 static void nmi_cpu_up(void *dummy)
464 nmi_cpu_setup(dummy);
466 nmi_cpu_start(dummy);
469 static void nmi_cpu_down(void *dummy)
474 nmi_cpu_shutdown(dummy);
477 static int nmi_create_files(struct super_block *sb, struct dentry *root)
481 for (i = 0; i < model->num_virt_counters; ++i) {
485 /* quick little hack to _not_ expose a counter if it is not
486 * available for use. This should protect userspace app.
487 * NOTE: assumes 1:1 mapping here (that counters are organized
488 * sequentially in their struct assignment).
490 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
493 snprintf(buf, sizeof(buf), "%d", i);
494 dir = oprofilefs_mkdir(sb, root, buf);
495 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
496 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
497 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
498 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
499 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
500 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
506 static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
509 int cpu = (unsigned long)data;
511 case CPU_DOWN_FAILED:
513 smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
515 case CPU_DOWN_PREPARE:
516 smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
522 static struct notifier_block oprofile_cpu_nb = {
523 .notifier_call = oprofile_cpu_notifier
528 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
530 /* Only one CPU left, just stop that one */
531 if (nmi_enabled == 1)
536 static int nmi_resume(struct sys_device *dev)
538 if (nmi_enabled == 1)
543 static struct sysdev_class oprofile_sysclass = {
545 .resume = nmi_resume,
546 .suspend = nmi_suspend,
549 static struct sys_device device_oprofile = {
551 .cls = &oprofile_sysclass,
554 static int __init init_sysfs(void)
558 error = sysdev_class_register(&oprofile_sysclass);
560 error = sysdev_register(&device_oprofile);
564 static void exit_sysfs(void)
566 sysdev_unregister(&device_oprofile);
567 sysdev_class_unregister(&oprofile_sysclass);
571 #define init_sysfs() do { } while (0)
572 #define exit_sysfs() do { } while (0)
573 #endif /* CONFIG_PM */
575 static int __init p4_init(char **cpu_type)
577 __u8 cpu_model = boot_cpu_data.x86_model;
579 if (cpu_model > 6 || cpu_model == 5)
583 *cpu_type = "i386/p4";
587 switch (smp_num_siblings) {
589 *cpu_type = "i386/p4";
594 *cpu_type = "i386/p4-ht";
595 model = &op_p4_ht2_spec;
600 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
601 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
605 static int force_arch_perfmon;
606 static int force_cpu_type(const char *str, struct kernel_param *kp)
608 if (!strcmp(str, "arch_perfmon")) {
609 force_arch_perfmon = 1;
610 printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
615 module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
617 static int __init ppro_init(char **cpu_type)
619 __u8 cpu_model = boot_cpu_data.x86_model;
620 struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
622 if (force_arch_perfmon && cpu_has_arch_perfmon)
627 *cpu_type = "i386/ppro";
630 *cpu_type = "i386/pii";
634 *cpu_type = "i386/piii";
638 *cpu_type = "i386/p6_mobile";
641 *cpu_type = "i386/core";
644 *cpu_type = "i386/core_2";
648 spec = &op_arch_perfmon_spec;
649 *cpu_type = "i386/core_i7";
652 *cpu_type = "i386/atom";
663 /* in order to get sysfs right */
664 static int using_nmi;
666 int __init op_nmi_init(struct oprofile_operations *ops)
668 __u8 vendor = boot_cpu_data.x86_vendor;
669 __u8 family = boot_cpu_data.x86;
670 char *cpu_type = NULL;
678 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
682 cpu_type = "i386/athlon";
686 * Actually it could be i386/hammer too, but
687 * give user space an consistent name.
689 cpu_type = "x86-64/hammer";
692 cpu_type = "x86-64/family10";
695 cpu_type = "x86-64/family11h";
700 model = &op_amd_spec;
703 case X86_VENDOR_INTEL:
710 /* A P6-class processor */
712 ppro_init(&cpu_type);
722 if (!cpu_has_arch_perfmon)
725 /* use arch perfmon as fallback */
726 cpu_type = "i386/arch_perfmon";
727 model = &op_arch_perfmon_spec;
735 register_cpu_notifier(&oprofile_cpu_nb);
740 /* default values, can be overwritten by model */
741 ops->create_files = nmi_create_files;
742 ops->setup = nmi_setup;
743 ops->shutdown = nmi_shutdown;
744 ops->start = nmi_start;
745 ops->stop = nmi_stop;
746 ops->cpu_type = cpu_type;
749 ret = model->init(ops);
753 if (!model->num_virt_counters)
754 model->num_virt_counters = model->num_counters;
760 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
764 void op_nmi_exit(void)
769 unregister_cpu_notifier(&oprofile_cpu_nb);