Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux...
[pandora-kernel.git] / arch / x86 / kernel / cpu / cpufreq / powernow-k8.c
index d360b56..6f3dc8f 100644 (file)
@@ -1,6 +1,5 @@
-
 /*
- *   (c) 2003-2006 Advanced Micro Devices, Inc.
+ *   (c) 2003-2010 Advanced Micro Devices, Inc.
  *  Your use of this code is subject to the terms and conditions of the
  *  GNU general public license version 2. See "COPYING" or
  *  http://www.gnu.org/licenses/gpl.html
@@ -46,6 +45,7 @@
 #define PFX "powernow-k8: "
 #define VERSION "version 2.20.00"
 #include "powernow-k8.h"
+#include "mperf.h"
 
 /* serialize freq changes  */
 static DEFINE_MUTEX(fidvid_mutex);
@@ -54,6 +54,12 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
 
 static int cpu_family = CPU_OPTERON;
 
+/* core performance boost */
+static bool cpb_capable, cpb_enabled;
+static struct msr __percpu *msrs;
+
+static struct cpufreq_driver cpufreq_amd64_driver;
+
 #ifndef CONFIG_SMP
 static inline const struct cpumask *cpu_core_mask(int cpu)
 {
@@ -929,7 +935,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
                powernow_table[i].index = index;
 
                /* Frequency may be rounded for these */
-               if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
+               if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
+                                || boot_cpu_data.x86 == 0x11) {
                        powernow_table[i].frequency =
                                freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
                } else
@@ -1248,6 +1255,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        struct powernow_k8_data *data;
        struct init_on_cpu init_on_cpu;
        int rc;
+       struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
 
        if (!cpu_online(pol->cpu))
                return -ENODEV;
@@ -1322,6 +1330,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
                return -EINVAL;
        }
 
+       /* Check for APERF/MPERF support in hardware */
+       if (cpu_has(c, X86_FEATURE_APERFMPERF))
+               cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
+
        cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
 
        if (cpu_family == CPU_HW_PSTATE)
@@ -1393,8 +1405,77 @@ out:
        return khz;
 }
 
+static void _cpb_toggle_msrs(bool t)
+{
+       int cpu;
+
+       get_online_cpus();
+
+       rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+       for_each_cpu(cpu, cpu_online_mask) {
+               struct msr *reg = per_cpu_ptr(msrs, cpu);
+               if (t)
+                       reg->l &= ~BIT(25);
+               else
+                       reg->l |= BIT(25);
+       }
+       wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+       put_online_cpus();
+}
+
+/*
+ * Switch on/off core performance boosting.
+ *
+ * 0=disable
+ * 1=enable.
+ */
+static void cpb_toggle(bool t)
+{
+       if (!cpb_capable)
+               return;
+
+       if (t && !cpb_enabled) {
+               cpb_enabled = true;
+               _cpb_toggle_msrs(t);
+               printk(KERN_INFO PFX "Core Boosting enabled.\n");
+       } else if (!t && cpb_enabled) {
+               cpb_enabled = false;
+               _cpb_toggle_msrs(t);
+               printk(KERN_INFO PFX "Core Boosting disabled.\n");
+       }
+}
+
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+                                size_t count)
+{
+       int ret = -EINVAL;
+       unsigned long val = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (!ret && (val == 0 || val == 1) && cpb_capable)
+               cpb_toggle(val);
+       else
+               return -EINVAL;
+
+       return count;
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+       return sprintf(buf, "%u\n", cpb_enabled);
+}
+
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_one_rw(cpb);
+
 static struct freq_attr *powernow_k8_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
+       &cpb,
        NULL,
 };
 
@@ -1410,10 +1491,51 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
        .attr           = powernow_k8_attr,
 };
 
+/*
+ * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
+ * cannot block the remaining ones from boosting. On the CPU_UP path we
+ * simply keep the boost-disable flag in sync with the current global
+ * state.
+ */
+static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action,
+                               void *hcpu)
+{
+       unsigned cpu = (long)hcpu;
+       u32 lo, hi;
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+
+               if (!cpb_enabled) {
+                       rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+                       lo |= BIT(25);
+                       wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+               }
+               break;
+
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+               lo &= ~BIT(25);
+               wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+               break;
+
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata cpb_nb = {
+       .notifier_call          = cpb_notify,
+};
+
 /* driver entry point for init */
 static int __cpuinit powernowk8_init(void)
 {
-       unsigned int i, supported_cpus = 0;
+       unsigned int i, supported_cpus = 0, cpu;
 
        for_each_online_cpu(i) {
                int rc;
@@ -1422,15 +1544,36 @@ static int __cpuinit powernowk8_init(void)
                        supported_cpus++;
        }
 
-       if (supported_cpus == num_online_cpus()) {
-               printk(KERN_INFO PFX "Found %d %s "
-                       "processors (%d cpu cores) (" VERSION ")\n",
-                       num_online_nodes(),
-                       boot_cpu_data.x86_model_id, supported_cpus);
-               return cpufreq_register_driver(&cpufreq_amd64_driver);
+       if (supported_cpus != num_online_cpus())
+               return -ENODEV;
+
+       printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+               num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
+
+       if (boot_cpu_has(X86_FEATURE_CPB)) {
+
+               cpb_capable = true;
+
+               register_cpu_notifier(&cpb_nb);
+
+               msrs = msrs_alloc();
+               if (!msrs) {
+                       printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
+                       return -ENOMEM;
+               }
+
+               rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+               for_each_cpu(cpu, cpu_online_mask) {
+                       struct msr *reg = per_cpu_ptr(msrs, cpu);
+                       cpb_enabled |= !(!!(reg->l & BIT(25)));
+               }
+
+               printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
+                       (cpb_enabled ? "on" : "off"));
        }
 
-       return -ENODEV;
+       return cpufreq_register_driver(&cpufreq_amd64_driver);
 }
 
 /* driver entry point for term */
@@ -1438,6 +1581,13 @@ static void __exit powernowk8_exit(void)
 {
        dprintk("exit\n");
 
+       if (boot_cpu_has(X86_FEATURE_CPB)) {
+               msrs_free(msrs);
+               msrs = NULL;
+
+               unregister_cpu_notifier(&cpb_nb);
+       }
+
        cpufreq_unregister_driver(&cpufreq_amd64_driver);
 }