2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/latency.h>
44 * Include the apic definitions for x86 to have the APIC timer related defines
45 * available also for UP (on SMP it gets magically included via linux/smp.h).
46 * asm/acpi.h is not an option, as it would require more include magic. Also
47 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
54 #include <asm/uaccess.h>
56 #include <acpi/acpi_bus.h>
57 #include <acpi/processor.h>
59 #define ACPI_PROCESSOR_COMPONENT 0x01000000
60 #define ACPI_PROCESSOR_CLASS "processor"
61 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
62 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
63 ACPI_MODULE_NAME("acpi_processor")
64 #define ACPI_PROCESSOR_FILE_POWER "power"
65 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
66 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
67 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
68 static void (*pm_idle_save) (void) __read_mostly;
69 module_param(max_cstate, uint, 0644);
71 static unsigned int nocst __read_mostly;
72 module_param(nocst, uint, 0000);
75 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
76 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
77 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
78 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
79 * reduce history for more aggressive entry into C3
81 static unsigned int bm_history __read_mostly =
82 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
83 module_param(bm_history, uint, 0644);
84 /* --------------------------------------------------------------------------
86 -------------------------------------------------------------------------- */
89 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
90 * For now disable this. Probably a bug somewhere else.
92 * To skip this limit, boot/load with a large max_cstate limit.
94 static int set_max_cstate(struct dmi_system_id *id)
96 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
99 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
100 " Override with \"processor.max_cstate=%d\"\n", id->ident,
101 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
103 max_cstate = (long)id->driver_data;
108 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
109 callers to only run once -AK */
110 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
111 { set_max_cstate, "IBM ThinkPad R40e", {
112 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
113 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
114 { set_max_cstate, "IBM ThinkPad R40e", {
115 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
116 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
117 { set_max_cstate, "IBM ThinkPad R40e", {
118 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
119 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
120 { set_max_cstate, "IBM ThinkPad R40e", {
121 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
122 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
123 { set_max_cstate, "IBM ThinkPad R40e", {
124 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
125 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
126 { set_max_cstate, "IBM ThinkPad R40e", {
127 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
128 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
129 { set_max_cstate, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
132 { set_max_cstate, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
135 { set_max_cstate, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
138 { set_max_cstate, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
141 { set_max_cstate, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
144 { set_max_cstate, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
147 { set_max_cstate, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
150 { set_max_cstate, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
153 { set_max_cstate, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
156 { set_max_cstate, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
159 { set_max_cstate, "Medion 41700", {
160 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
161 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
162 { set_max_cstate, "Clevo 5600D", {
163 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
164 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
169 static inline u32 ticks_elapsed(u32 t1, u32 t2)
173 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
174 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
176 return ((0xFFFFFFFF - t1) + t2);
180 acpi_processor_power_activate(struct acpi_processor *pr,
181 struct acpi_processor_cx *new)
183 struct acpi_processor_cx *old;
188 old = pr->power.state;
191 old->promotion.count = 0;
192 new->demotion.count = 0;
194 /* Cleanup from old state. */
198 /* Disable bus master reload */
199 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
200 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
205 /* Prepare to use new state. */
208 /* Enable bus master reload */
209 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
210 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
214 pr->power.state = new;
219 static void acpi_safe_halt(void)
221 current_thread_info()->status &= ~TS_POLLING;
223 * TS_POLLING-cleared state must be visible before we
229 current_thread_info()->status |= TS_POLLING;
232 static atomic_t c3_cpu_count;
234 /* Common C-state entry for C2, C3, .. */
235 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
237 if (cstate->space_id == ACPI_CSTATE_FFH) {
238 /* Call into architectural FFH based C-state */
239 acpi_processor_ffh_cstate_enter(cstate);
242 /* IO port based C-state */
243 inb(cstate->address);
244 /* Dummy wait op - must do something useless after P_LVL2 read
245 because chipsets cannot guarantee that STPCLK# signal
246 gets asserted in time to freeze execution properly. */
247 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
251 #ifdef ARCH_APICTIMER_STOPS_ON_C3
254 * Some BIOS implementations switch to C3 in the published C2 state.
255 * This seems to be a common problem on AMD boxen, but other vendors
256 * are affected too. We pick the most conservative approach: we assume
257 * that the local APIC stops in both C2 and C3.
259 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
260 struct acpi_processor_cx *cx)
262 struct acpi_processor_power *pwr = &pr->power;
265 * Check, if one of the previous states already marked the lapic
268 if (pwr->timer_broadcast_on_state < state)
271 if (cx->type >= ACPI_STATE_C2)
272 pr->power.timer_broadcast_on_state = state;
275 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
277 cpumask_t mask = cpumask_of_cpu(pr->id);
279 if (pr->power.timer_broadcast_on_state < INT_MAX)
280 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
282 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
287 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
288 struct acpi_processor_cx *cstate) { }
289 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
293 static void acpi_processor_idle(void)
295 struct acpi_processor *pr = NULL;
296 struct acpi_processor_cx *cx = NULL;
297 struct acpi_processor_cx *next_state = NULL;
301 pr = processors[smp_processor_id()];
306 * Interrupts must be disabled during bus mastering calculations and
307 * for C2/C3 transitions.
312 * Check whether we truly need to go idle, or should
315 if (unlikely(need_resched())) {
320 cx = pr->power.state;
332 * Check for bus mastering activity (if required), record, and check
335 if (pr->flags.bm_check) {
337 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
342 pr->power.bm_activity <<= diff;
344 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
346 pr->power.bm_activity |= 0x1;
347 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
350 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
351 * the true state of bus mastering activity; forcing us to
352 * manually check the BMIDEA bit of each IDE channel.
354 else if (errata.piix4.bmisx) {
355 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
356 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
357 pr->power.bm_activity |= 0x1;
360 pr->power.bm_check_timestamp = jiffies;
363 * If bus mastering is or was active this jiffy, demote
364 * to avoid a faulty transition. Note that the processor
365 * won't enter a low-power state during this call (to this
366 * function) but should upon the next.
368 * TBD: A better policy might be to fallback to the demotion
369 * state (use it for this quantum only) istead of
370 * demoting -- and rely on duration as our sole demotion
371 * qualification. This may, however, introduce DMA
372 * issues (e.g. floppy DMA transfer overrun/underrun).
374 if ((pr->power.bm_activity & 0x1) &&
375 cx->demotion.threshold.bm) {
377 next_state = cx->demotion.state;
382 #ifdef CONFIG_HOTPLUG_CPU
384 * Check for P_LVL2_UP flag before entering C2 and above on
385 * an SMP system. We do it here instead of doing it at _CST/P_LVL
386 * detection phase, to work cleanly with logical CPU hotplug.
388 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
389 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
390 cx = &pr->power.states[ACPI_STATE_C1];
396 * Invoke the current Cx state to put the processor to sleep.
398 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
399 current_thread_info()->status &= ~TS_POLLING;
401 * TS_POLLING-cleared state must be visible before we
405 if (need_resched()) {
406 current_thread_info()->status |= TS_POLLING;
417 * Use the appropriate idle routine, the one that would
418 * be used without acpi C-states.
426 * TBD: Can't get time duration while in C1, as resumes
427 * go to an ISR rather than here. Need to instrument
428 * base interrupt handler.
430 sleep_ticks = 0xFFFFFFFF;
434 /* Get start time (ticks) */
435 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
437 acpi_cstate_enter(cx);
438 /* Get end time (ticks) */
439 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
441 #ifdef CONFIG_GENERIC_TIME
442 /* TSC halts in C2, so notify users */
445 /* Re-enable interrupts */
447 current_thread_info()->status |= TS_POLLING;
448 /* Compute time (ticks) that we were actually asleep */
450 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
455 if (pr->flags.bm_check) {
456 if (atomic_inc_return(&c3_cpu_count) ==
459 * All CPUs are trying to go to C3
460 * Disable bus master arbitration
462 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
465 /* SMP with no shared cache... Invalidate cache */
466 ACPI_FLUSH_CPU_CACHE();
469 /* Get start time (ticks) */
470 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
472 acpi_cstate_enter(cx);
473 /* Get end time (ticks) */
474 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
475 if (pr->flags.bm_check) {
476 /* Enable bus master arbitration */
477 atomic_dec(&c3_cpu_count);
478 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
481 #ifdef CONFIG_GENERIC_TIME
482 /* TSC halts in C3, so notify users */
485 /* Re-enable interrupts */
487 current_thread_info()->status |= TS_POLLING;
488 /* Compute time (ticks) that we were actually asleep */
490 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
498 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
499 cx->time += sleep_ticks;
501 next_state = pr->power.state;
503 #ifdef CONFIG_HOTPLUG_CPU
504 /* Don't do promotion/demotion */
505 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
506 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
515 * Track the number of longs (time asleep is greater than threshold)
516 * and promote when the count threshold is reached. Note that bus
517 * mastering activity may prevent promotions.
518 * Do not promote above max_cstate.
520 if (cx->promotion.state &&
521 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
522 if (sleep_ticks > cx->promotion.threshold.ticks &&
523 cx->promotion.state->latency <= system_latency_constraint()) {
524 cx->promotion.count++;
525 cx->demotion.count = 0;
526 if (cx->promotion.count >=
527 cx->promotion.threshold.count) {
528 if (pr->flags.bm_check) {
530 (pr->power.bm_activity & cx->
531 promotion.threshold.bm)) {
537 next_state = cx->promotion.state;
547 * Track the number of shorts (time asleep is less than time threshold)
548 * and demote when the usage threshold is reached.
550 if (cx->demotion.state) {
551 if (sleep_ticks < cx->demotion.threshold.ticks) {
552 cx->demotion.count++;
553 cx->promotion.count = 0;
554 if (cx->demotion.count >= cx->demotion.threshold.count) {
555 next_state = cx->demotion.state;
563 * Demote if current state exceeds max_cstate
564 * or if the latency of the current state is unacceptable
566 if ((pr->power.state - pr->power.states) > max_cstate ||
567 pr->power.state->latency > system_latency_constraint()) {
568 if (cx->demotion.state)
569 next_state = cx->demotion.state;
575 * If we're going to start using a new Cx state we must clean up
576 * from the previous and prepare to use the new.
578 if (next_state != pr->power.state)
579 acpi_processor_power_activate(pr, next_state);
582 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
585 unsigned int state_is_set = 0;
586 struct acpi_processor_cx *lower = NULL;
587 struct acpi_processor_cx *higher = NULL;
588 struct acpi_processor_cx *cx;
595 * This function sets the default Cx state policy (OS idle handler).
596 * Our scheme is to promote quickly to C2 but more conservatively
597 * to C3. We're favoring C2 for its characteristics of low latency
598 * (quick response), good power savings, and ability to allow bus
599 * mastering activity. Note that the Cx state policy is completely
600 * customizable and can be altered dynamically.
604 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
605 cx = &pr->power.states[i];
610 pr->power.state = cx;
619 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
620 cx = &pr->power.states[i];
625 cx->demotion.state = lower;
626 cx->demotion.threshold.ticks = cx->latency_ticks;
627 cx->demotion.threshold.count = 1;
628 if (cx->type == ACPI_STATE_C3)
629 cx->demotion.threshold.bm = bm_history;
636 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
637 cx = &pr->power.states[i];
642 cx->promotion.state = higher;
643 cx->promotion.threshold.ticks = cx->latency_ticks;
644 if (cx->type >= ACPI_STATE_C2)
645 cx->promotion.threshold.count = 4;
647 cx->promotion.threshold.count = 10;
648 if (higher->type == ACPI_STATE_C3)
649 cx->promotion.threshold.bm = bm_history;
658 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
667 /* if info is obtained from pblk/fadt, type equals state */
668 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
669 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
671 #ifndef CONFIG_HOTPLUG_CPU
673 * Check for P_LVL2_UP flag before entering C2 and above on
676 if ((num_online_cpus() > 1) &&
677 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
681 /* determine C2 and C3 address from pblk */
682 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
683 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
685 /* determine latencies from FADT */
686 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
687 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
689 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
690 "lvl2[0x%08x] lvl3[0x%08x]\n",
691 pr->power.states[ACPI_STATE_C2].address,
692 pr->power.states[ACPI_STATE_C3].address));
697 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
699 if (!pr->power.states[ACPI_STATE_C1].valid) {
700 /* set the first C-State to C1 */
701 /* all processors need to support C1 */
702 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
703 pr->power.states[ACPI_STATE_C1].valid = 1;
705 /* the C0 state only exists as a filler in our array */
706 pr->power.states[ACPI_STATE_C0].valid = 1;
710 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
712 acpi_status status = 0;
716 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
717 union acpi_object *cst;
725 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
726 if (ACPI_FAILURE(status)) {
727 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
731 cst = buffer.pointer;
733 /* There must be at least 2 elements */
734 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
735 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
740 count = cst->package.elements[0].integer.value;
742 /* Validate number of power states. */
743 if (count < 1 || count != cst->package.count - 1) {
744 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
749 /* Tell driver that at least _CST is supported. */
750 pr->flags.has_cst = 1;
752 for (i = 1; i <= count; i++) {
753 union acpi_object *element;
754 union acpi_object *obj;
755 struct acpi_power_register *reg;
756 struct acpi_processor_cx cx;
758 memset(&cx, 0, sizeof(cx));
760 element = &(cst->package.elements[i]);
761 if (element->type != ACPI_TYPE_PACKAGE)
764 if (element->package.count != 4)
767 obj = &(element->package.elements[0]);
769 if (obj->type != ACPI_TYPE_BUFFER)
772 reg = (struct acpi_power_register *)obj->buffer.pointer;
774 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
775 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
778 /* There should be an easy way to extract an integer... */
779 obj = &(element->package.elements[1]);
780 if (obj->type != ACPI_TYPE_INTEGER)
783 cx.type = obj->integer.value;
785 * Some buggy BIOSes won't list C1 in _CST -
786 * Let acpi_processor_get_power_info_default() handle them later
788 if (i == 1 && cx.type != ACPI_STATE_C1)
791 cx.address = reg->address;
792 cx.index = current_count + 1;
794 cx.space_id = ACPI_CSTATE_SYSTEMIO;
795 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
796 if (acpi_processor_ffh_cstate_probe
797 (pr->id, &cx, reg) == 0) {
798 cx.space_id = ACPI_CSTATE_FFH;
799 } else if (cx.type != ACPI_STATE_C1) {
801 * C1 is a special case where FIXED_HARDWARE
802 * can be handled in non-MWAIT way as well.
803 * In that case, save this _CST entry info.
804 * That is, we retain space_id of SYSTEM_IO for
806 * Otherwise, ignore this info and continue.
812 obj = &(element->package.elements[2]);
813 if (obj->type != ACPI_TYPE_INTEGER)
816 cx.latency = obj->integer.value;
818 obj = &(element->package.elements[3]);
819 if (obj->type != ACPI_TYPE_INTEGER)
822 cx.power = obj->integer.value;
825 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
828 * We support total ACPI_PROCESSOR_MAX_POWER - 1
829 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
831 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
833 "Limiting number of power states to max (%d)\n",
834 ACPI_PROCESSOR_MAX_POWER);
836 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
841 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
844 /* Validate number of power states discovered */
845 if (current_count < 2)
849 kfree(buffer.pointer);
854 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
861 * C2 latency must be less than or equal to 100
864 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
865 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
866 "latency too large [%d]\n", cx->latency));
871 * Otherwise we've met all of our C2 requirements.
872 * Normalize the C2 latency to expidite policy
875 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
880 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
881 struct acpi_processor_cx *cx)
883 static int bm_check_flag;
890 * C3 latency must be less than or equal to 1000
893 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
894 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
895 "latency too large [%d]\n", cx->latency));
900 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
901 * DMA transfers are used by any ISA device to avoid livelock.
902 * Note that we could disable Type-F DMA (as recommended by
903 * the erratum), but this is known to disrupt certain ISA
904 * devices thus we take the conservative approach.
906 else if (errata.piix4.fdma) {
907 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
908 "C3 not supported on PIIX4 with Type-F DMA\n"));
912 /* All the logic here assumes flags.bm_check is same across all CPUs */
913 if (!bm_check_flag) {
914 /* Determine whether bm_check is needed based on CPU */
915 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
916 bm_check_flag = pr->flags.bm_check;
918 pr->flags.bm_check = bm_check_flag;
921 if (pr->flags.bm_check) {
922 /* bus mastering control is necessary */
923 if (!pr->flags.bm_control) {
924 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
925 "C3 support requires bus mastering control\n"));
930 * WBINVD should be set in fadt, for C3 state to be
931 * supported on when bm_check is not required.
933 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
934 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
935 "Cache invalidation should work properly"
936 " for C3 to be enabled on SMP systems\n"));
939 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
943 * Otherwise we've met all of our C3 requirements.
944 * Normalize the C3 latency to expidite policy. Enable
945 * checking of bus mastering status (bm_check) so we can
946 * use this in our C3 policy
949 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
954 static int acpi_processor_power_verify(struct acpi_processor *pr)
957 unsigned int working = 0;
959 pr->power.timer_broadcast_on_state = INT_MAX;
961 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
962 struct acpi_processor_cx *cx = &pr->power.states[i];
970 acpi_processor_power_verify_c2(cx);
972 acpi_timer_check_state(i, pr, cx);
976 acpi_processor_power_verify_c3(pr, cx);
978 acpi_timer_check_state(i, pr, cx);
986 acpi_propagate_timer_broadcast(pr);
991 static int acpi_processor_get_power_info(struct acpi_processor *pr)
997 /* NOTE: the idle thread may not be running while calling
1000 /* Zero initialize all the C-states info. */
1001 memset(pr->power.states, 0, sizeof(pr->power.states));
1003 result = acpi_processor_get_power_info_cst(pr);
1004 if (result == -ENODEV)
1005 result = acpi_processor_get_power_info_fadt(pr);
1010 acpi_processor_get_power_info_default(pr);
1012 pr->power.count = acpi_processor_power_verify(pr);
1015 * Set Default Policy
1016 * ------------------
1017 * Now that we know which states are supported, set the default
1018 * policy. Note that this policy can be changed dynamically
1019 * (e.g. encourage deeper sleeps to conserve battery life when
1022 result = acpi_processor_set_power_policy(pr);
1027 * if one state of type C2 or C3 is available, mark this
1028 * CPU as being "idle manageable"
1030 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1031 if (pr->power.states[i].valid) {
1032 pr->power.count = i;
1033 if (pr->power.states[i].type >= ACPI_STATE_C2)
1034 pr->flags.power = 1;
1041 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1053 if (!pr->flags.power_setup_done)
1056 /* Fall back to the default idle loop */
1057 pm_idle = pm_idle_save;
1058 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1060 pr->flags.power = 0;
1061 result = acpi_processor_get_power_info(pr);
1062 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1063 pm_idle = acpi_processor_idle;
1068 /* proc interface */
1070 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1072 struct acpi_processor *pr = seq->private;
1079 seq_printf(seq, "active state: C%zd\n"
1081 "bus master activity: %08x\n"
1082 "maximum allowed latency: %d usec\n",
1083 pr->power.state ? pr->power.state - pr->power.states : 0,
1084 max_cstate, (unsigned)pr->power.bm_activity,
1085 system_latency_constraint());
1087 seq_puts(seq, "states:\n");
1089 for (i = 1; i <= pr->power.count; i++) {
1090 seq_printf(seq, " %cC%d: ",
1091 (&pr->power.states[i] ==
1092 pr->power.state ? '*' : ' '), i);
1094 if (!pr->power.states[i].valid) {
1095 seq_puts(seq, "<not supported>\n");
1099 switch (pr->power.states[i].type) {
1101 seq_printf(seq, "type[C1] ");
1104 seq_printf(seq, "type[C2] ");
1107 seq_printf(seq, "type[C3] ");
1110 seq_printf(seq, "type[--] ");
1114 if (pr->power.states[i].promotion.state)
1115 seq_printf(seq, "promotion[C%zd] ",
1116 (pr->power.states[i].promotion.state -
1119 seq_puts(seq, "promotion[--] ");
1121 if (pr->power.states[i].demotion.state)
1122 seq_printf(seq, "demotion[C%zd] ",
1123 (pr->power.states[i].demotion.state -
1126 seq_puts(seq, "demotion[--] ");
1128 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1129 pr->power.states[i].latency,
1130 pr->power.states[i].usage,
1131 (unsigned long long)pr->power.states[i].time);
1138 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1140 return single_open(file, acpi_processor_power_seq_show,
1144 static const struct file_operations acpi_processor_power_fops = {
1145 .open = acpi_processor_power_open_fs,
1147 .llseek = seq_lseek,
1148 .release = single_release,
1152 static void smp_callback(void *v)
1154 /* we already woke the CPU up, nothing more to do */
1158 * This function gets called when a part of the kernel has a new latency
1159 * requirement. This means we need to get all processors out of their C-state,
1160 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1161 * wakes them all right up.
1163 static int acpi_processor_latency_notify(struct notifier_block *b,
1164 unsigned long l, void *v)
1166 smp_call_function(smp_callback, NULL, 0, 1);
1170 static struct notifier_block acpi_processor_latency_notifier = {
1171 .notifier_call = acpi_processor_latency_notify,
1175 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1176 struct acpi_device *device)
1178 acpi_status status = 0;
1179 static int first_run;
1180 struct proc_dir_entry *entry = NULL;
1185 dmi_check_system(processor_power_dmi_table);
1186 if (max_cstate < ACPI_C_STATES_MAX)
1188 "ACPI: processor limited to max C-state %d\n",
1192 register_latency_notifier(&acpi_processor_latency_notifier);
1199 if (acpi_gbl_FADT.cst_control && !nocst) {
1201 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1202 if (ACPI_FAILURE(status)) {
1203 ACPI_EXCEPTION((AE_INFO, status,
1204 "Notifying BIOS of _CST ability failed"));
1208 acpi_processor_get_power_info(pr);
1211 * Install the idle handler if processor power management is supported.
1212 * Note that we use previously set idle handler will be used on
1213 * platforms that only support C1.
1215 if ((pr->flags.power) && (!boot_option_idle_override)) {
1216 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1217 for (i = 1; i <= pr->power.count; i++)
1218 if (pr->power.states[i].valid)
1219 printk(" C%d[C%d]", i,
1220 pr->power.states[i].type);
1224 pm_idle_save = pm_idle;
1225 pm_idle = acpi_processor_idle;
1230 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1231 S_IRUGO, acpi_device_dir(device));
1235 entry->proc_fops = &acpi_processor_power_fops;
1236 entry->data = acpi_driver_data(device);
1237 entry->owner = THIS_MODULE;
1240 pr->flags.power_setup_done = 1;
1245 int acpi_processor_power_exit(struct acpi_processor *pr,
1246 struct acpi_device *device)
1249 pr->flags.power_setup_done = 0;
1251 if (acpi_device_dir(device))
1252 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1253 acpi_device_dir(device));
1255 /* Unregister the idle handler when processor #0 is removed. */
1257 pm_idle = pm_idle_save;
1260 * We are about to unload the current idle thread pm callback
1261 * (pm_idle), Wait for all processors to update cached/local
1262 * copies of pm_idle before proceeding.
1266 unregister_latency_notifier(&acpi_processor_latency_notifier);