2 * (c) 2003-2010 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Support : mark.langsdorf@amd.com
9 * Based on the powernow-k7.c module written by Dave Jones.
10 * (C) 2003 Dave Jones on behalf of SuSE Labs
11 * (C) 2004 Dominik Brodowski <linux@brodo.de>
12 * (C) 2004 Pavel Machek <pavel@ucw.cz>
13 * Licensed under the terms of the GNU GPL License version 2.
14 * Based upon datasheets & sample CPUs kindly provided by AMD.
16 * Valuable input gratefully received from Dave Jones, Pavel Machek,
17 * Dominik Brodowski, Jacob Shin, and others.
18 * Originally developed by Paul Devriendt.
19 * Processor information obtained from Chapter 9 (Power and Thermal Management)
20 * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
21 * Opteron Processors" available for download from www.amd.com
23 * Tables for specific CPUs can be inferred from
24 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
27 #include <linux/kernel.h>
28 #include <linux/smp.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/cpufreq.h>
32 #include <linux/slab.h>
33 #include <linux/string.h>
34 #include <linux/cpumask.h>
36 #include <linux/delay.h>
40 #include <linux/acpi.h>
41 #include <linux/mutex.h>
42 #include <acpi/processor.h>
44 #define PFX "powernow-k8: "
45 #define VERSION "version 2.20.00"
46 #include "powernow-k8.h"
49 /* serialize freq changes */
50 static DEFINE_MUTEX(fidvid_mutex);
52 static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
54 static int cpu_family = CPU_OPTERON;
56 /* array to map SW pstate number to acpi state */
57 static u32 ps_to_as[8];
59 /* core performance boost */
60 static bool cpb_capable, cpb_enabled;
61 static struct msr __percpu *msrs;
63 static struct cpufreq_driver cpufreq_amd64_driver;
66 static inline const struct cpumask *cpu_core_mask(int cpu)
72 /* Return a frequency in MHz, given an input fid */
73 static u32 find_freq_from_fid(u32 fid)
75 return 800 + (fid * 100);
78 /* Return a frequency in KHz, given an input fid */
79 static u32 find_khz_freq_from_fid(u32 fid)
81 return 1000 * find_freq_from_fid(fid);
84 static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
87 return data[ps_to_as[pstate]].frequency;
90 /* Return the vco fid for an input fid
92 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
93 * only from corresponding high fids. This returns "high" fid corresponding to
96 static u32 convert_fid_to_vco_fid(u32 fid)
98 if (fid < HI_FID_TABLE_BOTTOM)
105 * Return 1 if the pending bit is set. Unless we just instructed the processor
106 * to transition to a new state, seeing this bit set is really bad news.
108 static int pending_bit_stuck(void)
112 if (cpu_family == CPU_HW_PSTATE)
115 rdmsr(MSR_FIDVID_STATUS, lo, hi);
116 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
120 * Update the global current fid / vid values from the status msr.
121 * Returns 1 on error.
123 static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
128 if (cpu_family == CPU_HW_PSTATE) {
129 rdmsr(MSR_PSTATE_STATUS, lo, hi);
130 i = lo & HW_PSTATE_MASK;
131 data->currpstate = i;
134 * a workaround for family 11h erratum 311 might cause
135 * an "out-of-range Pstate if the core is in Pstate-0
137 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
138 data->currpstate = HW_PSTATE_0;
144 pr_debug("detected change pending stuck\n");
147 rdmsr(MSR_FIDVID_STATUS, lo, hi);
148 } while (lo & MSR_S_LO_CHANGE_PENDING);
150 data->currvid = hi & MSR_S_HI_CURRENT_VID;
151 data->currfid = lo & MSR_S_LO_CURRENT_FID;
156 /* the isochronous relief time */
157 static void count_off_irt(struct powernow_k8_data *data)
159 udelay((1 << data->irt) * 10);
163 /* the voltage stabilization time */
164 static void count_off_vst(struct powernow_k8_data *data)
166 udelay(data->vstable * VST_UNITS_20US);
170 /* need to init the control msr to a safe value (for each cpu) */
171 static void fidvid_msr_init(void)
176 rdmsr(MSR_FIDVID_STATUS, lo, hi);
177 vid = hi & MSR_S_HI_CURRENT_VID;
178 fid = lo & MSR_S_LO_CURRENT_FID;
179 lo = fid | (vid << MSR_C_LO_VID_SHIFT);
180 hi = MSR_C_HI_STP_GNT_BENIGN;
181 pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
182 wrmsr(MSR_FIDVID_CTL, lo, hi);
185 /* write the new fid value along with the other control fields to the msr */
186 static int write_new_fid(struct powernow_k8_data *data, u32 fid)
189 u32 savevid = data->currvid;
192 if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
193 printk(KERN_ERR PFX "internal error - overflow on fid write\n");
198 lo |= (data->currvid << MSR_C_LO_VID_SHIFT);
199 lo |= MSR_C_LO_INIT_FID_VID;
201 pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
202 fid, lo, data->plllock * PLL_LOCK_CONVERSION);
205 wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
208 "Hardware error - pending bit very stuck - "
209 "no further pstate changes possible\n");
212 } while (query_current_values_with_pending_wait(data));
216 if (savevid != data->currvid) {
218 "vid change on fid trans, old 0x%x, new 0x%x\n",
219 savevid, data->currvid);
223 if (fid != data->currfid) {
225 "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
233 /* Write a new vid to the hardware */
234 static int write_new_vid(struct powernow_k8_data *data, u32 vid)
237 u32 savefid = data->currfid;
240 if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
241 printk(KERN_ERR PFX "internal error - overflow on vid write\n");
246 lo |= (vid << MSR_C_LO_VID_SHIFT);
247 lo |= MSR_C_LO_INIT_FID_VID;
249 pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
250 vid, lo, STOP_GRANT_5NS);
253 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
255 printk(KERN_ERR PFX "internal error - pending bit "
256 "very stuck - no further pstate "
257 "changes possible\n");
260 } while (query_current_values_with_pending_wait(data));
262 if (savefid != data->currfid) {
263 printk(KERN_ERR PFX "fid changed on vid trans, old "
265 savefid, data->currfid);
269 if (vid != data->currvid) {
270 printk(KERN_ERR PFX "vid trans failed, vid 0x%x, "
280 * Reduce the vid by the max of step or reqvid.
281 * Decreasing vid codes represent increasing voltages:
282 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
284 static int decrease_vid_code_by_step(struct powernow_k8_data *data,
285 u32 reqvid, u32 step)
287 if ((data->currvid - reqvid) > step)
288 reqvid = data->currvid - step;
290 if (write_new_vid(data, reqvid))
298 /* Change hardware pstate by single MSR write */
299 static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
301 wrmsr(MSR_PSTATE_CTRL, pstate, 0);
302 data->currpstate = pstate;
306 /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
307 static int transition_fid_vid(struct powernow_k8_data *data,
308 u32 reqfid, u32 reqvid)
310 if (core_voltage_pre_transition(data, reqvid, reqfid))
313 if (core_frequency_transition(data, reqfid))
316 if (core_voltage_post_transition(data, reqvid))
319 if (query_current_values_with_pending_wait(data))
322 if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
323 printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, "
326 reqfid, reqvid, data->currfid, data->currvid);
330 pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
331 smp_processor_id(), data->currfid, data->currvid);
336 /* Phase 1 - core voltage transition ... setup voltage */
337 static int core_voltage_pre_transition(struct powernow_k8_data *data,
338 u32 reqvid, u32 reqfid)
340 u32 rvosteps = data->rvo;
341 u32 savefid = data->currfid;
342 u32 maxvid, lo, rvomult = 1;
344 pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, "
345 "reqvid 0x%x, rvo 0x%x\n",
347 data->currfid, data->currvid, reqvid, data->rvo);
349 if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP))
352 rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
353 maxvid = 0x1f & (maxvid >> 16);
354 pr_debug("ph1 maxvid=0x%x\n", maxvid);
355 if (reqvid < maxvid) /* lower numbers are higher voltages */
358 while (data->currvid > reqvid) {
359 pr_debug("ph1: curr 0x%x, req vid 0x%x\n",
360 data->currvid, reqvid);
361 if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
365 while ((rvosteps > 0) &&
366 ((rvomult * data->rvo + data->currvid) > reqvid)) {
367 if (data->currvid == maxvid) {
370 pr_debug("ph1: changing vid for rvo, req 0x%x\n",
372 if (decrease_vid_code_by_step(data, data->currvid-1, 1))
378 if (query_current_values_with_pending_wait(data))
381 if (savefid != data->currfid) {
382 printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n",
387 pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n",
388 data->currfid, data->currvid);
393 /* Phase 2 - core frequency transition */
394 static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
396 u32 vcoreqfid, vcocurrfid, vcofiddiff;
397 u32 fid_interval, savevid = data->currvid;
399 if (data->currfid == reqfid) {
400 printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n",
405 pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, "
408 data->currfid, data->currvid, reqfid);
410 vcoreqfid = convert_fid_to_vco_fid(reqfid);
411 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
412 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
413 : vcoreqfid - vcocurrfid;
415 if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP))
418 while (vcofiddiff > 2) {
419 (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
421 if (reqfid > data->currfid) {
422 if (data->currfid > LO_FID_TABLE_TOP) {
423 if (write_new_fid(data,
424 data->currfid + fid_interval))
429 2 + convert_fid_to_vco_fid(data->currfid)))
433 if (write_new_fid(data, data->currfid - fid_interval))
437 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
438 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
439 : vcoreqfid - vcocurrfid;
442 if (write_new_fid(data, reqfid))
445 if (query_current_values_with_pending_wait(data))
448 if (data->currfid != reqfid) {
450 "ph2: mismatch, failed fid transition, "
451 "curr 0x%x, req 0x%x\n",
452 data->currfid, reqfid);
456 if (savevid != data->currvid) {
457 printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
458 savevid, data->currvid);
462 pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n",
463 data->currfid, data->currvid);
468 /* Phase 3 - core voltage transition flow ... jump to the final vid. */
469 static int core_voltage_post_transition(struct powernow_k8_data *data,
472 u32 savefid = data->currfid;
473 u32 savereqvid = reqvid;
475 pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
477 data->currfid, data->currvid);
479 if (reqvid != data->currvid) {
480 if (write_new_vid(data, reqvid))
483 if (savefid != data->currfid) {
485 "ph3: bad fid change, save 0x%x, curr 0x%x\n",
486 savefid, data->currfid);
490 if (data->currvid != reqvid) {
492 "ph3: failed vid transition\n, "
493 "req 0x%x, curr 0x%x",
494 reqvid, data->currvid);
499 if (query_current_values_with_pending_wait(data))
502 if (savereqvid != data->currvid) {
503 pr_debug("ph3 failed, currvid 0x%x\n", data->currvid);
507 if (savefid != data->currfid) {
508 pr_debug("ph3 failed, currfid changed 0x%x\n",
513 pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n",
514 data->currfid, data->currvid);
519 static void check_supported_cpu(void *_rc)
521 u32 eax, ebx, ecx, edx;
526 if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
529 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
530 if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) &&
531 ((eax & CPUID_XFAM) < CPUID_XFAM_10H))
534 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
535 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
536 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
538 "Processor cpuid %x not supported\n", eax);
542 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
543 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
545 "No frequency change capabilities detected\n");
549 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
550 if ((edx & P_STATE_TRANSITION_CAPABLE)
551 != P_STATE_TRANSITION_CAPABLE) {
553 "Power state transitions not supported\n");
556 } else { /* must be a HW Pstate capable processor */
557 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
558 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
559 cpu_family = CPU_HW_PSTATE;
567 static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
573 for (j = 0; j < data->numps; j++) {
574 if (pst[j].vid > LEAST_VID) {
575 printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n",
579 if (pst[j].vid < data->rvo) {
581 printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate"
585 if (pst[j].vid < maxvid + data->rvo) {
586 /* vid + rvo >= maxvid */
587 printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate"
591 if (pst[j].fid > MAX_FID) {
592 printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate"
596 if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
597 /* Only first fid is allowed to be in "low" range */
598 printk(KERN_ERR FW_BUG PFX "two low fids - %d : "
599 "0x%x\n", j, pst[j].fid);
602 if (pst[j].fid < lastfid)
603 lastfid = pst[j].fid;
606 printk(KERN_ERR FW_BUG PFX "lastfid invalid\n");
609 if (lastfid > LO_FID_TABLE_TOP)
610 printk(KERN_INFO FW_BUG PFX
611 "first fid not from lo freq table\n");
616 static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
619 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
622 static void print_basics(struct powernow_k8_data *data)
625 for (j = 0; j < data->numps; j++) {
626 if (data->powernow_table[j].frequency !=
627 CPUFREQ_ENTRY_INVALID) {
628 if (cpu_family == CPU_HW_PSTATE) {
630 " %d : pstate %d (%d MHz)\n", j,
631 data->powernow_table[j].index,
632 data->powernow_table[j].frequency/1000);
635 "fid 0x%x (%d MHz), vid 0x%x\n",
636 data->powernow_table[j].index & 0xff,
637 data->powernow_table[j].frequency/1000,
638 data->powernow_table[j].index >> 8);
643 printk(KERN_INFO PFX "Only %d pstates on battery\n",
647 static u32 freq_from_fid_did(u32 fid, u32 did)
651 if (boot_cpu_data.x86 == 0x10)
652 mhz = (100 * (fid + 0x10)) >> did;
653 else if (boot_cpu_data.x86 == 0x11)
654 mhz = (100 * (fid + 8)) >> did;
661 static int fill_powernow_table(struct powernow_k8_data *data,
662 struct pst_s *pst, u8 maxvid)
664 struct cpufreq_frequency_table *powernow_table;
668 /* use ACPI support to get full speed on mains power */
669 printk(KERN_WARNING PFX
670 "Only %d pstates usable (use ACPI driver for full "
671 "range\n", data->batps);
672 data->numps = data->batps;
675 for (j = 1; j < data->numps; j++) {
676 if (pst[j-1].fid >= pst[j].fid) {
677 printk(KERN_ERR PFX "PST out of sequence\n");
682 if (data->numps < 2) {
683 printk(KERN_ERR PFX "no p states to transition\n");
687 if (check_pst_table(data, pst, maxvid))
690 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
691 * (data->numps + 1)), GFP_KERNEL);
692 if (!powernow_table) {
693 printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
697 for (j = 0; j < data->numps; j++) {
699 powernow_table[j].index = pst[j].fid; /* lower 8 bits */
700 powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
701 freq = find_khz_freq_from_fid(pst[j].fid);
702 powernow_table[j].frequency = freq;
704 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
705 powernow_table[data->numps].index = 0;
707 if (query_current_values_with_pending_wait(data)) {
708 kfree(powernow_table);
712 pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
713 data->powernow_table = powernow_table;
714 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
717 for (j = 0; j < data->numps; j++)
718 if ((pst[j].fid == data->currfid) &&
719 (pst[j].vid == data->currvid))
722 pr_debug("currfid/vid do not match PST, ignoring\n");
726 /* Find and validate the PSB/PST table in BIOS. */
727 static int find_psb_table(struct powernow_k8_data *data)
736 for (i = 0xc0000; i < 0xffff0; i += 0x10) {
737 /* Scan BIOS looking for the signature. */
738 /* It can not be at ffff0 - it is too big. */
740 psb = phys_to_virt(i);
741 if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
744 pr_debug("found PSB header at 0x%p\n", psb);
746 pr_debug("table vers: 0x%x\n", psb->tableversion);
747 if (psb->tableversion != PSB_VERSION_1_4) {
748 printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n");
752 pr_debug("flags: 0x%x\n", psb->flags1);
754 printk(KERN_ERR FW_BUG PFX "unknown flags\n");
758 data->vstable = psb->vstable;
759 pr_debug("voltage stabilization time: %d(*20us)\n",
762 pr_debug("flags2: 0x%x\n", psb->flags2);
763 data->rvo = psb->flags2 & 3;
764 data->irt = ((psb->flags2) >> 2) & 3;
765 mvs = ((psb->flags2) >> 4) & 3;
766 data->vidmvs = 1 << mvs;
767 data->batps = ((psb->flags2) >> 6) & 3;
769 pr_debug("ramp voltage offset: %d\n", data->rvo);
770 pr_debug("isochronous relief time: %d\n", data->irt);
771 pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
773 pr_debug("numpst: 0x%x\n", psb->num_tables);
774 cpst = psb->num_tables;
775 if ((psb->cpuid == 0x00000fc0) ||
776 (psb->cpuid == 0x00000fe0)) {
777 thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
778 if ((thiscpuid == 0x00000fc0) ||
779 (thiscpuid == 0x00000fe0))
783 printk(KERN_ERR FW_BUG PFX "numpst must be 1\n");
787 data->plllock = psb->plllocktime;
788 pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
789 pr_debug("maxfid: 0x%x\n", psb->maxfid);
790 pr_debug("maxvid: 0x%x\n", psb->maxvid);
791 maxvid = psb->maxvid;
793 data->numps = psb->numps;
794 pr_debug("numpstates: 0x%x\n", data->numps);
795 return fill_powernow_table(data,
796 (struct pst_s *)(psb+1), maxvid);
799 * If you see this message, complain to BIOS manufacturer. If
800 * he tells you "we do not support Linux" or some similar
801 * nonsense, remember that Windows 2000 uses the same legacy
802 * mechanism that the old Linux PSB driver uses. Tell them it
803 * is broken with Windows 2000.
805 * The reference to the AMD documentation is chapter 9 in the
806 * BIOS and Kernel Developer's Guide, which is available on
809 printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n");
810 printk(KERN_ERR PFX "Make sure that your BIOS is up to date"
811 " and Cool'N'Quiet support is enabled in BIOS setup\n");
815 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
820 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
823 control = data->acpi_data.states[index].control;
824 data->irt = (control >> IRT_SHIFT) & IRT_MASK;
825 data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
826 data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
827 data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
828 data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
829 data->vstable = (control >> VST_SHIFT) & VST_MASK;
832 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
834 struct cpufreq_frequency_table *powernow_table;
835 int ret_val = -ENODEV;
838 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
839 pr_debug("register performance failed: bad ACPI data\n");
843 /* verify the data contained in the ACPI structures */
844 if (data->acpi_data.state_count <= 1) {
845 pr_debug("No ACPI P-States\n");
849 control = data->acpi_data.control_register.space_id;
850 status = data->acpi_data.status_register.space_id;
852 if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
853 (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
854 pr_debug("Invalid control/status registers (%llx - %llx)\n",
859 /* fill in data->powernow_table */
860 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
861 * (data->acpi_data.state_count + 1)), GFP_KERNEL);
862 if (!powernow_table) {
863 pr_debug("powernow_table memory alloc failure\n");
868 data->numps = data->acpi_data.state_count;
869 powernow_k8_acpi_pst_values(data, 0);
871 if (cpu_family == CPU_HW_PSTATE)
872 ret_val = fill_powernow_table_pstate(data, powernow_table);
874 ret_val = fill_powernow_table_fidvid(data, powernow_table);
878 powernow_table[data->acpi_data.state_count].frequency =
880 powernow_table[data->acpi_data.state_count].index = 0;
881 data->powernow_table = powernow_table;
883 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
886 /* notify BIOS that we exist */
887 acpi_processor_notify_smm(THIS_MODULE);
889 if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
891 "unable to alloc powernow_k8_data cpumask\n");
899 kfree(powernow_table);
902 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
904 /* data->acpi_data.state_count informs us at ->exit()
905 * whether ACPI was used */
906 data->acpi_data.state_count = 0;
911 static int fill_powernow_table_pstate(struct powernow_k8_data *data,
912 struct cpufreq_frequency_table *powernow_table)
916 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
917 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
919 for (i = 0; i < data->acpi_data.state_count; i++) {
922 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
923 if (index > data->max_hw_pstate) {
924 printk(KERN_ERR PFX "invalid pstate %d - "
925 "bad value %d.\n", i, index);
926 printk(KERN_ERR PFX "Please report to BIOS "
928 invalidate_entry(powernow_table, i);
934 /* Frequency may be rounded for these */
935 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
936 || boot_cpu_data.x86 == 0x11) {
938 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
939 if (!(hi & HW_PSTATE_VALID_MASK)) {
940 pr_debug("invalid pstate %d, ignoring\n", index);
941 invalidate_entry(powernow_table, i);
945 powernow_table[i].frequency =
946 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
948 powernow_table[i].frequency =
949 data->acpi_data.states[i].core_frequency * 1000;
951 powernow_table[i].index = index;
956 static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
957 struct cpufreq_frequency_table *powernow_table)
961 for (i = 0; i < data->acpi_data.state_count; i++) {
968 status = data->acpi_data.states[i].status;
969 fid = status & EXT_FID_MASK;
970 vid = (status >> VID_SHIFT) & EXT_VID_MASK;
972 control = data->acpi_data.states[i].control;
973 fid = control & FID_MASK;
974 vid = (control >> VID_SHIFT) & VID_MASK;
977 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
979 index = fid | (vid<<8);
980 powernow_table[i].index = index;
982 freq = find_khz_freq_from_fid(fid);
983 powernow_table[i].frequency = freq;
985 /* verify frequency is OK */
986 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
987 pr_debug("invalid freq %u kHz, ignoring\n", freq);
988 invalidate_entry(powernow_table, i);
992 /* verify voltage is OK -
993 * BIOSs are using "off" to indicate invalid */
994 if (vid == VID_OFF) {
995 pr_debug("invalid vid %u, ignoring\n", vid);
996 invalidate_entry(powernow_table, i);
1000 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
1001 printk(KERN_INFO PFX "invalid freq entries "
1002 "%u kHz vs. %u kHz\n", freq,
1004 (data->acpi_data.states[i].core_frequency
1006 invalidate_entry(powernow_table, i);
1013 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
1015 if (data->acpi_data.state_count)
1016 acpi_processor_unregister_performance(&data->acpi_data,
1018 free_cpumask_var(data->acpi_data.shared_cpu_map);
1021 static int get_transition_latency(struct powernow_k8_data *data)
1023 int max_latency = 0;
1025 for (i = 0; i < data->acpi_data.state_count; i++) {
1026 int cur_latency = data->acpi_data.states[i].transition_latency
1027 + data->acpi_data.states[i].bus_master_latency;
1028 if (cur_latency > max_latency)
1029 max_latency = cur_latency;
1031 if (max_latency == 0) {
1033 * Fam 11h and later may return 0 as transition latency. This
1034 * is intended and means "very fast". While cpufreq core and
1035 * governors currently can handle that gracefully, better set it
1036 * to 1 to avoid problems in the future.
1038 if (boot_cpu_data.x86 < 0x11)
1039 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1043 /* value in usecs, needs to be in nanoseconds */
1044 return 1000 * max_latency;
1047 /* Take a frequency, and issue the fid/vid transition command */
1048 static int transition_frequency_fidvid(struct powernow_k8_data *data,
1054 struct cpufreq_freqs freqs;
1056 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1058 /* fid/vid correctness check for k8 */
1059 /* fid are the lower 8 bits of the index we stored into
1060 * the cpufreq frequency table in find_psb_table, vid
1061 * are the upper 8 bits.
1063 fid = data->powernow_table[index].index & 0xFF;
1064 vid = (data->powernow_table[index].index & 0xFF00) >> 8;
1066 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
1068 if (query_current_values_with_pending_wait(data))
1071 if ((data->currvid == vid) && (data->currfid == fid)) {
1072 pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n",
1077 pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n",
1078 smp_processor_id(), fid, vid);
1079 freqs.old = find_khz_freq_from_fid(data->currfid);
1080 freqs.new = find_khz_freq_from_fid(fid);
1082 for_each_cpu(i, data->available_cores) {
1084 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1087 res = transition_fid_vid(data, fid, vid);
1091 freqs.new = find_khz_freq_from_fid(data->currfid);
1093 for_each_cpu(i, data->available_cores) {
1095 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1100 /* Take a frequency, and issue the hardware pstate transition command */
1101 static int transition_frequency_pstate(struct powernow_k8_data *data,
1106 struct cpufreq_freqs freqs;
1108 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1110 /* get MSR index for hardware pstate transition */
1111 pstate = index & HW_PSTATE_MASK;
1112 if (pstate > data->max_hw_pstate)
1115 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1117 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1119 for_each_cpu(i, data->available_cores) {
1121 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1124 res = transition_pstate(data, pstate);
1125 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1127 for_each_cpu(i, data->available_cores) {
1129 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1134 struct powernowk8_target_arg {
1135 struct cpufreq_policy *pol;
1140 static long powernowk8_target_fn(void *arg)
1142 struct powernowk8_target_arg *pta = arg;
1143 struct cpufreq_policy *pol = pta->pol;
1144 unsigned targfreq = pta->targfreq;
1145 unsigned relation = pta->relation;
1146 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1149 unsigned int newstate;
1155 checkfid = data->currfid;
1156 checkvid = data->currvid;
1158 if (pending_bit_stuck()) {
1159 printk(KERN_ERR PFX "failing targ, change pending bit set\n");
1163 pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
1164 pol->cpu, targfreq, pol->min, pol->max, relation);
1166 if (query_current_values_with_pending_wait(data))
1169 if (cpu_family != CPU_HW_PSTATE) {
1170 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1171 data->currfid, data->currvid);
1173 if ((checkvid != data->currvid) ||
1174 (checkfid != data->currfid)) {
1175 printk(KERN_INFO PFX
1176 "error - out of sync, fix 0x%x 0x%x, "
1178 checkfid, data->currfid,
1179 checkvid, data->currvid);
1183 if (cpufreq_frequency_table_target(pol, data->powernow_table,
1184 targfreq, relation, &newstate))
1187 mutex_lock(&fidvid_mutex);
1189 powernow_k8_acpi_pst_values(data, newstate);
1191 if (cpu_family == CPU_HW_PSTATE)
1192 ret = transition_frequency_pstate(data,
1193 data->powernow_table[newstate].index);
1195 ret = transition_frequency_fidvid(data, newstate);
1197 printk(KERN_ERR PFX "transition frequency failed\n");
1198 mutex_unlock(&fidvid_mutex);
1201 mutex_unlock(&fidvid_mutex);
1203 if (cpu_family == CPU_HW_PSTATE)
1204 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1205 data->powernow_table[newstate].index);
1207 pol->cur = find_khz_freq_from_fid(data->currfid);
1212 /* Driver entry point to switch to the target frequency */
1213 static int powernowk8_target(struct cpufreq_policy *pol,
1214 unsigned targfreq, unsigned relation)
1216 struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
1217 .relation = relation };
1219 return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
1222 /* Driver entry point to verify the policy and range of frequencies */
1223 static int powernowk8_verify(struct cpufreq_policy *pol)
1225 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1230 return cpufreq_frequency_table_verify(pol, data->powernow_table);
1233 struct init_on_cpu {
1234 struct powernow_k8_data *data;
1238 static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1240 struct init_on_cpu *init_on_cpu = _init_on_cpu;
1242 if (pending_bit_stuck()) {
1243 printk(KERN_ERR PFX "failing init, change pending bit set\n");
1244 init_on_cpu->rc = -ENODEV;
1248 if (query_current_values_with_pending_wait(init_on_cpu->data)) {
1249 init_on_cpu->rc = -ENODEV;
1253 if (cpu_family == CPU_OPTERON)
1256 init_on_cpu->rc = 0;
1259 /* per CPU init entry point to the driver */
1260 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1262 static const char ACPI_PSS_BIOS_BUG_MSG[] =
1263 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1264 FW_BUG PFX "Try again with latest BIOS.\n";
1265 struct powernow_k8_data *data;
1266 struct init_on_cpu init_on_cpu;
1268 struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
1270 if (!cpu_online(pol->cpu))
1273 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1277 data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
1279 printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
1283 data->cpu = pol->cpu;
1284 data->currpstate = HW_PSTATE_INVALID;
1286 if (powernow_k8_cpu_init_acpi(data)) {
1288 * Use the PSB BIOS structure. This is only available on
1289 * an UP version, and is deprecated by AMD.
1291 if (num_online_cpus() != 1) {
1292 printk_once(ACPI_PSS_BIOS_BUG_MSG);
1295 if (pol->cpu != 0) {
1296 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1297 "CPU other than CPU0. Complain to your BIOS "
1301 rc = find_psb_table(data);
1305 /* Take a crude guess here.
1306 * That guess was in microseconds, so multiply with 1000 */
1307 pol->cpuinfo.transition_latency = (
1308 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
1309 ((1 << data->irt) * 30)) * 1000;
1310 } else /* ACPI _PSS objects available */
1311 pol->cpuinfo.transition_latency = get_transition_latency(data);
1313 /* only run on specific CPU from here on */
1314 init_on_cpu.data = data;
1315 smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
1317 rc = init_on_cpu.rc;
1319 goto err_out_exit_acpi;
1321 if (cpu_family == CPU_HW_PSTATE)
1322 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1324 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1325 data->available_cores = pol->cpus;
1327 if (cpu_family == CPU_HW_PSTATE)
1328 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1331 pol->cur = find_khz_freq_from_fid(data->currfid);
1332 pr_debug("policy current frequency %d kHz\n", pol->cur);
1334 /* min/max the cpu is capable of */
1335 if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
1336 printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
1337 powernow_k8_cpu_exit_acpi(data);
1338 kfree(data->powernow_table);
1343 /* Check for APERF/MPERF support in hardware */
1344 if (cpu_has(c, X86_FEATURE_APERFMPERF))
1345 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
1347 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1349 if (cpu_family == CPU_HW_PSTATE)
1350 pr_debug("cpu_init done, current pstate 0x%x\n",
1353 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1354 data->currfid, data->currvid);
1356 per_cpu(powernow_data, pol->cpu) = data;
1361 powernow_k8_cpu_exit_acpi(data);
1368 static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1370 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1375 powernow_k8_cpu_exit_acpi(data);
1377 cpufreq_frequency_table_put_attr(pol->cpu);
1379 kfree(data->powernow_table);
1381 per_cpu(powernow_data, pol->cpu) = NULL;
1386 static void query_values_on_cpu(void *_err)
1389 struct powernow_k8_data *data = __this_cpu_read(powernow_data);
1391 *err = query_current_values_with_pending_wait(data);
1394 static unsigned int powernowk8_get(unsigned int cpu)
1396 struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
1397 unsigned int khz = 0;
1403 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1407 if (cpu_family == CPU_HW_PSTATE)
1408 khz = find_khz_freq_from_pstate(data->powernow_table,
1411 khz = find_khz_freq_from_fid(data->currfid);
1418 static void _cpb_toggle_msrs(bool t)
1424 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1426 for_each_cpu(cpu, cpu_online_mask) {
1427 struct msr *reg = per_cpu_ptr(msrs, cpu);
1433 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1439 * Switch on/off core performance boosting.
1444 static void cpb_toggle(bool t)
1449 if (t && !cpb_enabled) {
1451 _cpb_toggle_msrs(t);
1452 printk(KERN_INFO PFX "Core Boosting enabled.\n");
1453 } else if (!t && cpb_enabled) {
1454 cpb_enabled = false;
1455 _cpb_toggle_msrs(t);
1456 printk(KERN_INFO PFX "Core Boosting disabled.\n");
1460 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
1464 unsigned long val = 0;
1466 ret = strict_strtoul(buf, 10, &val);
1467 if (!ret && (val == 0 || val == 1) && cpb_capable)
1475 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
1477 return sprintf(buf, "%u\n", cpb_enabled);
1480 #define define_one_rw(_name) \
1481 static struct freq_attr _name = \
1482 __ATTR(_name, 0644, show_##_name, store_##_name)
1486 static struct freq_attr *powernow_k8_attr[] = {
1487 &cpufreq_freq_attr_scaling_available_freqs,
1492 static struct cpufreq_driver cpufreq_amd64_driver = {
1493 .verify = powernowk8_verify,
1494 .target = powernowk8_target,
1495 .bios_limit = acpi_processor_get_bios_limit,
1496 .init = powernowk8_cpu_init,
1497 .exit = __devexit_p(powernowk8_cpu_exit),
1498 .get = powernowk8_get,
1499 .name = "powernow-k8",
1500 .owner = THIS_MODULE,
1501 .attr = powernow_k8_attr,
1505 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1506 * cannot block the remaining ones from boosting. On the CPU_UP path we
1507 * simply keep the boost-disable flag in sync with the current global
1510 static int cpb_notify(struct notifier_block *nb, unsigned long action,
1513 unsigned cpu = (long)hcpu;
1517 case CPU_UP_PREPARE:
1518 case CPU_UP_PREPARE_FROZEN:
1521 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1523 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1527 case CPU_DOWN_PREPARE:
1528 case CPU_DOWN_PREPARE_FROZEN:
1529 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1531 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1541 static struct notifier_block cpb_nb = {
1542 .notifier_call = cpb_notify,
1545 /* driver entry point for init */
1546 static int __cpuinit powernowk8_init(void)
1548 unsigned int i, supported_cpus = 0, cpu;
1551 for_each_online_cpu(i) {
1553 smp_call_function_single(i, check_supported_cpu, &rc, 1);
1558 if (supported_cpus != num_online_cpus())
1561 printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
1562 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1564 if (boot_cpu_has(X86_FEATURE_CPB)) {
1568 msrs = msrs_alloc();
1570 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1574 register_cpu_notifier(&cpb_nb);
1576 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1578 for_each_cpu(cpu, cpu_online_mask) {
1579 struct msr *reg = per_cpu_ptr(msrs, cpu);
1580 cpb_enabled |= !(!!(reg->l & BIT(25)));
1583 printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
1584 (cpb_enabled ? "on" : "off"));
1587 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1588 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1589 unregister_cpu_notifier(&cpb_nb);
1596 /* driver entry point for term */
1597 static void __exit powernowk8_exit(void)
1601 if (boot_cpu_has(X86_FEATURE_CPB)) {
1605 unregister_cpu_notifier(&cpb_nb);
1608 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1611 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and "
1612 "Mark Langsdorf <mark.langsdorf@amd.com>");
1613 MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
1614 MODULE_LICENSE("GPL");
1616 late_initcall(powernowk8_init);
1617 module_exit(powernowk8_exit);