x86: Simplify code by removing a !SMP #ifdefs from 'struct cpuinfo_x86'
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_amd.c
index aeefd45..3dbfb00 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/perf_event.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
        struct amd_nb *nb;
        int i, nb_id;
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+
+       if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
                return;
 
        nb_id = amd_get_nb_id(cpu);
@@ -470,6 +473,7 @@ static __initconst const struct x86_pmu amd_pmu = {
  * 0x023       DE      PERF_CTL[2:0]
  * 0x02D       LS      PERF_CTL[3]
  * 0x02E       LS      PERF_CTL[3,0]
+ * 0x031       LS      PERF_CTL[2:0] (**)
  * 0x043       CU      PERF_CTL[2:0]
  * 0x045       CU      PERF_CTL[2:0]
  * 0x046       CU      PERF_CTL[2:0]
@@ -483,10 +487,12 @@ static __initconst const struct x86_pmu amd_pmu = {
  * 0x0DD       LS      PERF_CTL[5:0]
  * 0x0DE       LS      PERF_CTL[5:0]
  * 0x0DF       LS      PERF_CTL[5:0]
+ * 0x1C0       EX      PERF_CTL[5:3]
  * 0x1D6       EX      PERF_CTL[5:0]
  * 0x1D8       EX      PERF_CTL[5:0]
  *
- * (*) depending on the umask all FPU counters may be used
+ * (*)  depending on the umask all FPU counters may be used
+ * (**) only one unitmask enabled at a time
  */
 
 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
@@ -536,6 +542,12 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
                        return &amd_f15_PMC3;
                case 0x02E:
                        return &amd_f15_PMC30;
+               case 0x031:
+                       if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
+                               return &amd_f15_PMC20;
+                       return &emptyconstraint;
+               case 0x1C0:
+                       return &amd_f15_PMC53;
                default:
                        return &amd_f15_PMC50;
                }
@@ -587,9 +599,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
        .put_event_constraints  = amd_put_event_constraints,
 
        .cpu_prepare            = amd_pmu_cpu_prepare,
-       .cpu_starting           = amd_pmu_cpu_starting,
        .cpu_dead               = amd_pmu_cpu_dead,
 #endif
+       .cpu_starting           = amd_pmu_cpu_starting,
 };
 
 __init int amd_pmu_init(void)
@@ -621,3 +633,33 @@ __init int amd_pmu_init(void)
 
        return 0;
 }
+
+void amd_pmu_enable_virt(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       cpuc->perf_ctr_virt_mask = 0;
+
+       /* Reload all events */
+       x86_pmu_disable_all();
+       x86_pmu_enable_all(0);
+}
+EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
+
+void amd_pmu_disable_virt(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       /*
+        * We only mask out the Host-only bit so that host-only counting works
+        * when SVM is disabled. If someone sets up a guest-only counter when
+        * SVM is disabled the Guest-only bits still gets set and the counter
+        * will not count anything.
+        */
+       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+
+       /* Reload all events */
+       x86_pmu_disable_all();
+       x86_pmu_enable_all(0);
+}
+EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);