2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/latency.h>
42 #include <linux/clockchips.h>
45 * Include the apic definitions for x86 to have the APIC timer related defines
46 * available also for UP (on SMP it gets magically included via linux/smp.h).
47 * asm/acpi.h is not an option, as it would require more include magic. Also
48 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
55 #include <asm/uaccess.h>
57 #include <acpi/acpi_bus.h>
58 #include <acpi/processor.h>
60 #define ACPI_PROCESSOR_COMPONENT 0x01000000
61 #define ACPI_PROCESSOR_CLASS "processor"
62 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
63 ACPI_MODULE_NAME("processor_idle");
64 #define ACPI_PROCESSOR_FILE_POWER "power"
65 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
66 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
67 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
68 static void (*pm_idle_save
) (void) __read_mostly
;
69 module_param(max_cstate
, uint
, 0644);
71 static unsigned int nocst __read_mostly
;
72 module_param(nocst
, uint
, 0000);
75 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
76 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
77 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
78 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
79 * reduce history for more aggressive entry into C3
81 static unsigned int bm_history __read_mostly
=
82 (HZ
>= 800 ? 0xFFFFFFFF : ((1U << (HZ
/ 25)) - 1));
83 module_param(bm_history
, uint
, 0644);
84 /* --------------------------------------------------------------------------
86 -------------------------------------------------------------------------- */
89 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
90 * For now disable this. Probably a bug somewhere else.
92 * To skip this limit, boot/load with a large max_cstate limit.
94 static int set_max_cstate(struct dmi_system_id
*id
)
96 if (max_cstate
> ACPI_PROCESSOR_MAX_POWER
)
99 printk(KERN_NOTICE PREFIX
"%s detected - limiting to C%ld max_cstate."
100 " Override with \"processor.max_cstate=%d\"\n", id
->ident
,
101 (long)id
->driver_data
, ACPI_PROCESSOR_MAX_POWER
+ 1);
103 max_cstate
= (long)id
->driver_data
;
108 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
109 callers to only run once -AK */
110 static struct dmi_system_id __cpuinitdata processor_power_dmi_table
[] = {
111 { set_max_cstate
, "IBM ThinkPad R40e", {
112 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
113 DMI_MATCH(DMI_BIOS_VERSION
,"1SET70WW")}, (void *)1},
114 { set_max_cstate
, "IBM ThinkPad R40e", {
115 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
116 DMI_MATCH(DMI_BIOS_VERSION
,"1SET60WW")}, (void *)1},
117 { set_max_cstate
, "IBM ThinkPad R40e", {
118 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
119 DMI_MATCH(DMI_BIOS_VERSION
,"1SET43WW") }, (void*)1},
120 { set_max_cstate
, "IBM ThinkPad R40e", {
121 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
122 DMI_MATCH(DMI_BIOS_VERSION
,"1SET45WW") }, (void*)1},
123 { set_max_cstate
, "IBM ThinkPad R40e", {
124 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
125 DMI_MATCH(DMI_BIOS_VERSION
,"1SET47WW") }, (void*)1},
126 { set_max_cstate
, "IBM ThinkPad R40e", {
127 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
128 DMI_MATCH(DMI_BIOS_VERSION
,"1SET50WW") }, (void*)1},
129 { set_max_cstate
, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION
,"1SET52WW") }, (void*)1},
132 { set_max_cstate
, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION
,"1SET55WW") }, (void*)1},
135 { set_max_cstate
, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION
,"1SET56WW") }, (void*)1},
138 { set_max_cstate
, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION
,"1SET59WW") }, (void*)1},
141 { set_max_cstate
, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION
,"1SET60WW") }, (void*)1},
144 { set_max_cstate
, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION
,"1SET61WW") }, (void*)1},
147 { set_max_cstate
, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION
,"1SET62WW") }, (void*)1},
150 { set_max_cstate
, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION
,"1SET64WW") }, (void*)1},
153 { set_max_cstate
, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION
,"1SET65WW") }, (void*)1},
156 { set_max_cstate
, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION
,"1SET68WW") }, (void*)1},
159 { set_max_cstate
, "Medion 41700", {
160 DMI_MATCH(DMI_BIOS_VENDOR
,"Phoenix Technologies LTD"),
161 DMI_MATCH(DMI_BIOS_VERSION
,"R01-A1J")}, (void *)1},
162 { set_max_cstate
, "Clevo 5600D", {
163 DMI_MATCH(DMI_BIOS_VENDOR
,"Phoenix Technologies LTD"),
164 DMI_MATCH(DMI_BIOS_VERSION
,"SHE845M0.86C.0013.D.0302131307")},
169 static inline u32
ticks_elapsed(u32 t1
, u32 t2
)
173 else if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_32BIT_TIMER
))
174 return (((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
176 return ((0xFFFFFFFF - t1
) + t2
);
180 acpi_processor_power_activate(struct acpi_processor
*pr
,
181 struct acpi_processor_cx
*new)
183 struct acpi_processor_cx
*old
;
188 old
= pr
->power
.state
;
191 old
->promotion
.count
= 0;
192 new->demotion
.count
= 0;
194 /* Cleanup from old state. */
198 /* Disable bus master reload */
199 if (new->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
200 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
205 /* Prepare to use new state. */
208 /* Enable bus master reload */
209 if (old
->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
210 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1);
214 pr
->power
.state
= new;
219 static void acpi_safe_halt(void)
221 current_thread_info()->status
&= ~TS_POLLING
;
223 * TS_POLLING-cleared state must be visible before we
229 current_thread_info()->status
|= TS_POLLING
;
232 static atomic_t c3_cpu_count
;
234 /* Common C-state entry for C2, C3, .. */
235 static void acpi_cstate_enter(struct acpi_processor_cx
*cstate
)
237 if (cstate
->space_id
== ACPI_CSTATE_FFH
) {
238 /* Call into architectural FFH based C-state */
239 acpi_processor_ffh_cstate_enter(cstate
);
242 /* IO port based C-state */
243 inb(cstate
->address
);
244 /* Dummy wait op - must do something useless after P_LVL2 read
245 because chipsets cannot guarantee that STPCLK# signal
246 gets asserted in time to freeze execution properly. */
247 unused
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
251 #ifdef ARCH_APICTIMER_STOPS_ON_C3
254 * Some BIOS implementations switch to C3 in the published C2 state.
255 * This seems to be a common problem on AMD boxen, but other vendors
256 * are affected too. We pick the most conservative approach: we assume
257 * that the local APIC stops in both C2 and C3.
259 static void acpi_timer_check_state(int state
, struct acpi_processor
*pr
,
260 struct acpi_processor_cx
*cx
)
262 struct acpi_processor_power
*pwr
= &pr
->power
;
263 u8 type
= local_apic_timer_c2_ok
? ACPI_STATE_C3
: ACPI_STATE_C2
;
266 * Check, if one of the previous states already marked the lapic
269 if (pwr
->timer_broadcast_on_state
< state
)
272 if (cx
->type
>= type
)
273 pr
->power
.timer_broadcast_on_state
= state
;
276 static void acpi_propagate_timer_broadcast(struct acpi_processor
*pr
)
278 #ifdef CONFIG_GENERIC_CLOCKEVENTS
279 unsigned long reason
;
281 reason
= pr
->power
.timer_broadcast_on_state
< INT_MAX
?
282 CLOCK_EVT_NOTIFY_BROADCAST_ON
: CLOCK_EVT_NOTIFY_BROADCAST_OFF
;
284 clockevents_notify(reason
, &pr
->id
);
286 cpumask_t mask
= cpumask_of_cpu(pr
->id
);
288 if (pr
->power
.timer_broadcast_on_state
< INT_MAX
)
289 on_each_cpu(switch_APIC_timer_to_ipi
, &mask
, 1, 1);
291 on_each_cpu(switch_ipi_to_APIC_timer
, &mask
, 1, 1);
295 /* Power(C) State timer broadcast control */
296 static void acpi_state_timer_broadcast(struct acpi_processor
*pr
,
297 struct acpi_processor_cx
*cx
,
300 #ifdef CONFIG_GENERIC_CLOCKEVENTS
302 int state
= cx
- pr
->power
.states
;
304 if (state
>= pr
->power
.timer_broadcast_on_state
) {
305 unsigned long reason
;
307 reason
= broadcast
? CLOCK_EVT_NOTIFY_BROADCAST_ENTER
:
308 CLOCK_EVT_NOTIFY_BROADCAST_EXIT
;
309 clockevents_notify(reason
, &pr
->id
);
316 static void acpi_timer_check_state(int state
, struct acpi_processor
*pr
,
317 struct acpi_processor_cx
*cstate
) { }
318 static void acpi_propagate_timer_broadcast(struct acpi_processor
*pr
) { }
319 static void acpi_state_timer_broadcast(struct acpi_processor
*pr
,
320 struct acpi_processor_cx
*cx
,
327 static void acpi_processor_idle(void)
329 struct acpi_processor
*pr
= NULL
;
330 struct acpi_processor_cx
*cx
= NULL
;
331 struct acpi_processor_cx
*next_state
= NULL
;
335 pr
= processors
[smp_processor_id()];
340 * Interrupts must be disabled during bus mastering calculations and
341 * for C2/C3 transitions.
346 * Check whether we truly need to go idle, or should
349 if (unlikely(need_resched())) {
354 cx
= pr
->power
.state
;
366 * Check for bus mastering activity (if required), record, and check
369 if (pr
->flags
.bm_check
) {
371 unsigned long diff
= jiffies
- pr
->power
.bm_check_timestamp
;
376 pr
->power
.bm_activity
<<= diff
;
378 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
, &bm_status
);
380 pr
->power
.bm_activity
|= 0x1;
381 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
, 1);
384 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
385 * the true state of bus mastering activity; forcing us to
386 * manually check the BMIDEA bit of each IDE channel.
388 else if (errata
.piix4
.bmisx
) {
389 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
390 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
391 pr
->power
.bm_activity
|= 0x1;
394 pr
->power
.bm_check_timestamp
= jiffies
;
397 * If bus mastering is or was active this jiffy, demote
398 * to avoid a faulty transition. Note that the processor
399 * won't enter a low-power state during this call (to this
400 * function) but should upon the next.
402 * TBD: A better policy might be to fallback to the demotion
403 * state (use it for this quantum only) istead of
404 * demoting -- and rely on duration as our sole demotion
405 * qualification. This may, however, introduce DMA
406 * issues (e.g. floppy DMA transfer overrun/underrun).
408 if ((pr
->power
.bm_activity
& 0x1) &&
409 cx
->demotion
.threshold
.bm
) {
411 next_state
= cx
->demotion
.state
;
416 #ifdef CONFIG_HOTPLUG_CPU
418 * Check for P_LVL2_UP flag before entering C2 and above on
419 * an SMP system. We do it here instead of doing it at _CST/P_LVL
420 * detection phase, to work cleanly with logical CPU hotplug.
422 if ((cx
->type
!= ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
423 !pr
->flags
.has_cst
&& !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
424 cx
= &pr
->power
.states
[ACPI_STATE_C1
];
430 * Invoke the current Cx state to put the processor to sleep.
432 if (cx
->type
== ACPI_STATE_C2
|| cx
->type
== ACPI_STATE_C3
) {
433 current_thread_info()->status
&= ~TS_POLLING
;
435 * TS_POLLING-cleared state must be visible before we
439 if (need_resched()) {
440 current_thread_info()->status
|= TS_POLLING
;
451 * Use the appropriate idle routine, the one that would
452 * be used without acpi C-states.
460 * TBD: Can't get time duration while in C1, as resumes
461 * go to an ISR rather than here. Need to instrument
462 * base interrupt handler.
464 sleep_ticks
= 0xFFFFFFFF;
468 /* Get start time (ticks) */
469 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
471 acpi_state_timer_broadcast(pr
, cx
, 1);
472 acpi_cstate_enter(cx
);
473 /* Get end time (ticks) */
474 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
476 #ifdef CONFIG_GENERIC_TIME
477 /* TSC halts in C2, so notify users */
478 mark_tsc_unstable("possible TSC halt in C2");
480 /* Re-enable interrupts */
482 current_thread_info()->status
|= TS_POLLING
;
483 /* Compute time (ticks) that we were actually asleep */
485 ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C2_OVERHEAD
;
486 acpi_state_timer_broadcast(pr
, cx
, 0);
493 * bm_check implies we need ARB_DIS
494 * !bm_check implies we need cache flush
495 * bm_control implies whether we can do ARB_DIS
497 * That leaves a case where bm_check is set and bm_control is
498 * not set. In that case we cannot do much, we enter C3
499 * without doing anything.
501 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
502 if (atomic_inc_return(&c3_cpu_count
) ==
505 * All CPUs are trying to go to C3
506 * Disable bus master arbitration
508 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1);
510 } else if (!pr
->flags
.bm_check
) {
511 /* SMP with no shared cache... Invalidate cache */
512 ACPI_FLUSH_CPU_CACHE();
515 /* Get start time (ticks) */
516 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
518 acpi_state_timer_broadcast(pr
, cx
, 1);
519 acpi_cstate_enter(cx
);
520 /* Get end time (ticks) */
521 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
522 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
523 /* Enable bus master arbitration */
524 atomic_dec(&c3_cpu_count
);
525 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0);
528 #ifdef CONFIG_GENERIC_TIME
529 /* TSC halts in C3, so notify users */
530 mark_tsc_unstable("TSC halts in C3");
532 /* Re-enable interrupts */
534 current_thread_info()->status
|= TS_POLLING
;
535 /* Compute time (ticks) that we were actually asleep */
537 ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C3_OVERHEAD
;
538 acpi_state_timer_broadcast(pr
, cx
, 0);
546 if ((cx
->type
!= ACPI_STATE_C1
) && (sleep_ticks
> 0))
547 cx
->time
+= sleep_ticks
;
549 next_state
= pr
->power
.state
;
551 #ifdef CONFIG_HOTPLUG_CPU
552 /* Don't do promotion/demotion */
553 if ((cx
->type
== ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
554 !pr
->flags
.has_cst
&& !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
)) {
563 * Track the number of longs (time asleep is greater than threshold)
564 * and promote when the count threshold is reached. Note that bus
565 * mastering activity may prevent promotions.
566 * Do not promote above max_cstate.
568 if (cx
->promotion
.state
&&
569 ((cx
->promotion
.state
- pr
->power
.states
) <= max_cstate
)) {
570 if (sleep_ticks
> cx
->promotion
.threshold
.ticks
&&
571 cx
->promotion
.state
->latency
<= system_latency_constraint()) {
572 cx
->promotion
.count
++;
573 cx
->demotion
.count
= 0;
574 if (cx
->promotion
.count
>=
575 cx
->promotion
.threshold
.count
) {
576 if (pr
->flags
.bm_check
) {
578 (pr
->power
.bm_activity
& cx
->
579 promotion
.threshold
.bm
)) {
585 next_state
= cx
->promotion
.state
;
595 * Track the number of shorts (time asleep is less than time threshold)
596 * and demote when the usage threshold is reached.
598 if (cx
->demotion
.state
) {
599 if (sleep_ticks
< cx
->demotion
.threshold
.ticks
) {
600 cx
->demotion
.count
++;
601 cx
->promotion
.count
= 0;
602 if (cx
->demotion
.count
>= cx
->demotion
.threshold
.count
) {
603 next_state
= cx
->demotion
.state
;
611 * Demote if current state exceeds max_cstate
612 * or if the latency of the current state is unacceptable
614 if ((pr
->power
.state
- pr
->power
.states
) > max_cstate
||
615 pr
->power
.state
->latency
> system_latency_constraint()) {
616 if (cx
->demotion
.state
)
617 next_state
= cx
->demotion
.state
;
623 * If we're going to start using a new Cx state we must clean up
624 * from the previous and prepare to use the new.
626 if (next_state
!= pr
->power
.state
)
627 acpi_processor_power_activate(pr
, next_state
);
630 static int acpi_processor_set_power_policy(struct acpi_processor
*pr
)
633 unsigned int state_is_set
= 0;
634 struct acpi_processor_cx
*lower
= NULL
;
635 struct acpi_processor_cx
*higher
= NULL
;
636 struct acpi_processor_cx
*cx
;
643 * This function sets the default Cx state policy (OS idle handler).
644 * Our scheme is to promote quickly to C2 but more conservatively
645 * to C3. We're favoring C2 for its characteristics of low latency
646 * (quick response), good power savings, and ability to allow bus
647 * mastering activity. Note that the Cx state policy is completely
648 * customizable and can be altered dynamically.
652 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
653 cx
= &pr
->power
.states
[i
];
658 pr
->power
.state
= cx
;
667 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
668 cx
= &pr
->power
.states
[i
];
673 cx
->demotion
.state
= lower
;
674 cx
->demotion
.threshold
.ticks
= cx
->latency_ticks
;
675 cx
->demotion
.threshold
.count
= 1;
676 if (cx
->type
== ACPI_STATE_C3
)
677 cx
->demotion
.threshold
.bm
= bm_history
;
684 for (i
= (ACPI_PROCESSOR_MAX_POWER
- 1); i
> 0; i
--) {
685 cx
= &pr
->power
.states
[i
];
690 cx
->promotion
.state
= higher
;
691 cx
->promotion
.threshold
.ticks
= cx
->latency_ticks
;
692 if (cx
->type
>= ACPI_STATE_C2
)
693 cx
->promotion
.threshold
.count
= 4;
695 cx
->promotion
.threshold
.count
= 10;
696 if (higher
->type
== ACPI_STATE_C3
)
697 cx
->promotion
.threshold
.bm
= bm_history
;
706 static int acpi_processor_get_power_info_fadt(struct acpi_processor
*pr
)
715 /* if info is obtained from pblk/fadt, type equals state */
716 pr
->power
.states
[ACPI_STATE_C2
].type
= ACPI_STATE_C2
;
717 pr
->power
.states
[ACPI_STATE_C3
].type
= ACPI_STATE_C3
;
719 #ifndef CONFIG_HOTPLUG_CPU
721 * Check for P_LVL2_UP flag before entering C2 and above on
724 if ((num_online_cpus() > 1) &&
725 !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
729 /* determine C2 and C3 address from pblk */
730 pr
->power
.states
[ACPI_STATE_C2
].address
= pr
->pblk
+ 4;
731 pr
->power
.states
[ACPI_STATE_C3
].address
= pr
->pblk
+ 5;
733 /* determine latencies from FADT */
734 pr
->power
.states
[ACPI_STATE_C2
].latency
= acpi_gbl_FADT
.C2latency
;
735 pr
->power
.states
[ACPI_STATE_C3
].latency
= acpi_gbl_FADT
.C3latency
;
737 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
738 "lvl2[0x%08x] lvl3[0x%08x]\n",
739 pr
->power
.states
[ACPI_STATE_C2
].address
,
740 pr
->power
.states
[ACPI_STATE_C3
].address
));
745 static int acpi_processor_get_power_info_default(struct acpi_processor
*pr
)
747 if (!pr
->power
.states
[ACPI_STATE_C1
].valid
) {
748 /* set the first C-State to C1 */
749 /* all processors need to support C1 */
750 pr
->power
.states
[ACPI_STATE_C1
].type
= ACPI_STATE_C1
;
751 pr
->power
.states
[ACPI_STATE_C1
].valid
= 1;
753 /* the C0 state only exists as a filler in our array */
754 pr
->power
.states
[ACPI_STATE_C0
].valid
= 1;
758 static int acpi_processor_get_power_info_cst(struct acpi_processor
*pr
)
760 acpi_status status
= 0;
764 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
765 union acpi_object
*cst
;
773 status
= acpi_evaluate_object(pr
->handle
, "_CST", NULL
, &buffer
);
774 if (ACPI_FAILURE(status
)) {
775 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No _CST, giving up\n"));
779 cst
= buffer
.pointer
;
781 /* There must be at least 2 elements */
782 if (!cst
|| (cst
->type
!= ACPI_TYPE_PACKAGE
) || cst
->package
.count
< 2) {
783 printk(KERN_ERR PREFIX
"not enough elements in _CST\n");
788 count
= cst
->package
.elements
[0].integer
.value
;
790 /* Validate number of power states. */
791 if (count
< 1 || count
!= cst
->package
.count
- 1) {
792 printk(KERN_ERR PREFIX
"count given by _CST is not valid\n");
797 /* Tell driver that at least _CST is supported. */
798 pr
->flags
.has_cst
= 1;
800 for (i
= 1; i
<= count
; i
++) {
801 union acpi_object
*element
;
802 union acpi_object
*obj
;
803 struct acpi_power_register
*reg
;
804 struct acpi_processor_cx cx
;
806 memset(&cx
, 0, sizeof(cx
));
808 element
= &(cst
->package
.elements
[i
]);
809 if (element
->type
!= ACPI_TYPE_PACKAGE
)
812 if (element
->package
.count
!= 4)
815 obj
= &(element
->package
.elements
[0]);
817 if (obj
->type
!= ACPI_TYPE_BUFFER
)
820 reg
= (struct acpi_power_register
*)obj
->buffer
.pointer
;
822 if (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
&&
823 (reg
->space_id
!= ACPI_ADR_SPACE_FIXED_HARDWARE
))
826 /* There should be an easy way to extract an integer... */
827 obj
= &(element
->package
.elements
[1]);
828 if (obj
->type
!= ACPI_TYPE_INTEGER
)
831 cx
.type
= obj
->integer
.value
;
833 * Some buggy BIOSes won't list C1 in _CST -
834 * Let acpi_processor_get_power_info_default() handle them later
836 if (i
== 1 && cx
.type
!= ACPI_STATE_C1
)
839 cx
.address
= reg
->address
;
840 cx
.index
= current_count
+ 1;
842 cx
.space_id
= ACPI_CSTATE_SYSTEMIO
;
843 if (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
) {
844 if (acpi_processor_ffh_cstate_probe
845 (pr
->id
, &cx
, reg
) == 0) {
846 cx
.space_id
= ACPI_CSTATE_FFH
;
847 } else if (cx
.type
!= ACPI_STATE_C1
) {
849 * C1 is a special case where FIXED_HARDWARE
850 * can be handled in non-MWAIT way as well.
851 * In that case, save this _CST entry info.
852 * That is, we retain space_id of SYSTEM_IO for
854 * Otherwise, ignore this info and continue.
860 obj
= &(element
->package
.elements
[2]);
861 if (obj
->type
!= ACPI_TYPE_INTEGER
)
864 cx
.latency
= obj
->integer
.value
;
866 obj
= &(element
->package
.elements
[3]);
867 if (obj
->type
!= ACPI_TYPE_INTEGER
)
870 cx
.power
= obj
->integer
.value
;
873 memcpy(&(pr
->power
.states
[current_count
]), &cx
, sizeof(cx
));
876 * We support total ACPI_PROCESSOR_MAX_POWER - 1
877 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
879 if (current_count
>= (ACPI_PROCESSOR_MAX_POWER
- 1)) {
881 "Limiting number of power states to max (%d)\n",
882 ACPI_PROCESSOR_MAX_POWER
);
884 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
889 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found %d power states\n",
892 /* Validate number of power states discovered */
893 if (current_count
< 2)
897 kfree(buffer
.pointer
);
902 static void acpi_processor_power_verify_c2(struct acpi_processor_cx
*cx
)
909 * C2 latency must be less than or equal to 100
912 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C2_LATENCY
) {
913 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
914 "latency too large [%d]\n", cx
->latency
));
919 * Otherwise we've met all of our C2 requirements.
920 * Normalize the C2 latency to expidite policy
923 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
928 static void acpi_processor_power_verify_c3(struct acpi_processor
*pr
,
929 struct acpi_processor_cx
*cx
)
931 static int bm_check_flag
;
938 * C3 latency must be less than or equal to 1000
941 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C3_LATENCY
) {
942 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
943 "latency too large [%d]\n", cx
->latency
));
948 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
949 * DMA transfers are used by any ISA device to avoid livelock.
950 * Note that we could disable Type-F DMA (as recommended by
951 * the erratum), but this is known to disrupt certain ISA
952 * devices thus we take the conservative approach.
954 else if (errata
.piix4
.fdma
) {
955 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
956 "C3 not supported on PIIX4 with Type-F DMA\n"));
960 /* All the logic here assumes flags.bm_check is same across all CPUs */
961 if (!bm_check_flag
) {
962 /* Determine whether bm_check is needed based on CPU */
963 acpi_processor_power_init_bm_check(&(pr
->flags
), pr
->id
);
964 bm_check_flag
= pr
->flags
.bm_check
;
966 pr
->flags
.bm_check
= bm_check_flag
;
969 if (pr
->flags
.bm_check
) {
970 /* bus mastering control is necessary */
971 if (!pr
->flags
.bm_control
) {
972 /* In this case we enter C3 without bus mastering */
973 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
974 "C3 support without bus mastering control\n"));
978 * WBINVD should be set in fadt, for C3 state to be
979 * supported on when bm_check is not required.
981 if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_WBINVD
)) {
982 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
983 "Cache invalidation should work properly"
984 " for C3 to be enabled on SMP systems\n"));
987 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
991 * Otherwise we've met all of our C3 requirements.
992 * Normalize the C3 latency to expidite policy. Enable
993 * checking of bus mastering status (bm_check) so we can
994 * use this in our C3 policy
997 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
1002 static int acpi_processor_power_verify(struct acpi_processor
*pr
)
1005 unsigned int working
= 0;
1007 pr
->power
.timer_broadcast_on_state
= INT_MAX
;
1009 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
1010 struct acpi_processor_cx
*cx
= &pr
->power
.states
[i
];
1018 acpi_processor_power_verify_c2(cx
);
1020 acpi_timer_check_state(i
, pr
, cx
);
1024 acpi_processor_power_verify_c3(pr
, cx
);
1026 acpi_timer_check_state(i
, pr
, cx
);
1034 acpi_propagate_timer_broadcast(pr
);
1039 static int acpi_processor_get_power_info(struct acpi_processor
*pr
)
1045 /* NOTE: the idle thread may not be running while calling
1048 /* Zero initialize all the C-states info. */
1049 memset(pr
->power
.states
, 0, sizeof(pr
->power
.states
));
1051 result
= acpi_processor_get_power_info_cst(pr
);
1052 if (result
== -ENODEV
)
1053 result
= acpi_processor_get_power_info_fadt(pr
);
1058 acpi_processor_get_power_info_default(pr
);
1060 pr
->power
.count
= acpi_processor_power_verify(pr
);
1063 * Set Default Policy
1064 * ------------------
1065 * Now that we know which states are supported, set the default
1066 * policy. Note that this policy can be changed dynamically
1067 * (e.g. encourage deeper sleeps to conserve battery life when
1070 result
= acpi_processor_set_power_policy(pr
);
1075 * if one state of type C2 or C3 is available, mark this
1076 * CPU as being "idle manageable"
1078 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
1079 if (pr
->power
.states
[i
].valid
) {
1080 pr
->power
.count
= i
;
1081 if (pr
->power
.states
[i
].type
>= ACPI_STATE_C2
)
1082 pr
->flags
.power
= 1;
1089 int acpi_processor_cst_has_changed(struct acpi_processor
*pr
)
1101 if (!pr
->flags
.power_setup_done
)
1104 /* Fall back to the default idle loop */
1105 pm_idle
= pm_idle_save
;
1106 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1108 pr
->flags
.power
= 0;
1109 result
= acpi_processor_get_power_info(pr
);
1110 if ((pr
->flags
.power
== 1) && (pr
->flags
.power_setup_done
))
1111 pm_idle
= acpi_processor_idle
;
1116 /* proc interface */
1118 static int acpi_processor_power_seq_show(struct seq_file
*seq
, void *offset
)
1120 struct acpi_processor
*pr
= seq
->private;
1127 seq_printf(seq
, "active state: C%zd\n"
1129 "bus master activity: %08x\n"
1130 "maximum allowed latency: %d usec\n",
1131 pr
->power
.state
? pr
->power
.state
- pr
->power
.states
: 0,
1132 max_cstate
, (unsigned)pr
->power
.bm_activity
,
1133 system_latency_constraint());
1135 seq_puts(seq
, "states:\n");
1137 for (i
= 1; i
<= pr
->power
.count
; i
++) {
1138 seq_printf(seq
, " %cC%d: ",
1139 (&pr
->power
.states
[i
] ==
1140 pr
->power
.state
? '*' : ' '), i
);
1142 if (!pr
->power
.states
[i
].valid
) {
1143 seq_puts(seq
, "<not supported>\n");
1147 switch (pr
->power
.states
[i
].type
) {
1149 seq_printf(seq
, "type[C1] ");
1152 seq_printf(seq
, "type[C2] ");
1155 seq_printf(seq
, "type[C3] ");
1158 seq_printf(seq
, "type[--] ");
1162 if (pr
->power
.states
[i
].promotion
.state
)
1163 seq_printf(seq
, "promotion[C%zd] ",
1164 (pr
->power
.states
[i
].promotion
.state
-
1167 seq_puts(seq
, "promotion[--] ");
1169 if (pr
->power
.states
[i
].demotion
.state
)
1170 seq_printf(seq
, "demotion[C%zd] ",
1171 (pr
->power
.states
[i
].demotion
.state
-
1174 seq_puts(seq
, "demotion[--] ");
1176 seq_printf(seq
, "latency[%03d] usage[%08d] duration[%020llu]\n",
1177 pr
->power
.states
[i
].latency
,
1178 pr
->power
.states
[i
].usage
,
1179 (unsigned long long)pr
->power
.states
[i
].time
);
1186 static int acpi_processor_power_open_fs(struct inode
*inode
, struct file
*file
)
1188 return single_open(file
, acpi_processor_power_seq_show
,
1192 static const struct file_operations acpi_processor_power_fops
= {
1193 .open
= acpi_processor_power_open_fs
,
1195 .llseek
= seq_lseek
,
1196 .release
= single_release
,
1200 static void smp_callback(void *v
)
1202 /* we already woke the CPU up, nothing more to do */
1206 * This function gets called when a part of the kernel has a new latency
1207 * requirement. This means we need to get all processors out of their C-state,
1208 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1209 * wakes them all right up.
1211 static int acpi_processor_latency_notify(struct notifier_block
*b
,
1212 unsigned long l
, void *v
)
1214 smp_call_function(smp_callback
, NULL
, 0, 1);
1218 static struct notifier_block acpi_processor_latency_notifier
= {
1219 .notifier_call
= acpi_processor_latency_notify
,
1223 int __cpuinit
acpi_processor_power_init(struct acpi_processor
*pr
,
1224 struct acpi_device
*device
)
1226 acpi_status status
= 0;
1227 static int first_run
;
1228 struct proc_dir_entry
*entry
= NULL
;
1233 dmi_check_system(processor_power_dmi_table
);
1234 if (max_cstate
< ACPI_C_STATES_MAX
)
1236 "ACPI: processor limited to max C-state %d\n",
1240 register_latency_notifier(&acpi_processor_latency_notifier
);
1247 if (acpi_gbl_FADT
.cst_control
&& !nocst
) {
1249 acpi_os_write_port(acpi_gbl_FADT
.smi_command
, acpi_gbl_FADT
.cst_control
, 8);
1250 if (ACPI_FAILURE(status
)) {
1251 ACPI_EXCEPTION((AE_INFO
, status
,
1252 "Notifying BIOS of _CST ability failed"));
1256 acpi_processor_get_power_info(pr
);
1259 * Install the idle handler if processor power management is supported.
1260 * Note that we use previously set idle handler will be used on
1261 * platforms that only support C1.
1263 if ((pr
->flags
.power
) && (!boot_option_idle_override
)) {
1264 printk(KERN_INFO PREFIX
"CPU%d (power states:", pr
->id
);
1265 for (i
= 1; i
<= pr
->power
.count
; i
++)
1266 if (pr
->power
.states
[i
].valid
)
1267 printk(" C%d[C%d]", i
,
1268 pr
->power
.states
[i
].type
);
1272 pm_idle_save
= pm_idle
;
1273 pm_idle
= acpi_processor_idle
;
1278 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1279 S_IRUGO
, acpi_device_dir(device
));
1283 entry
->proc_fops
= &acpi_processor_power_fops
;
1284 entry
->data
= acpi_driver_data(device
);
1285 entry
->owner
= THIS_MODULE
;
1288 pr
->flags
.power_setup_done
= 1;
1293 int acpi_processor_power_exit(struct acpi_processor
*pr
,
1294 struct acpi_device
*device
)
1297 pr
->flags
.power_setup_done
= 0;
1299 if (acpi_device_dir(device
))
1300 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1301 acpi_device_dir(device
));
1303 /* Unregister the idle handler when processor #0 is removed. */
1305 pm_idle
= pm_idle_save
;
1308 * We are about to unload the current idle thread pm callback
1309 * (pm_idle), Wait for all processors to update cached/local
1310 * copies of pm_idle before proceeding.
1314 unregister_latency_notifier(&acpi_processor_latency_notifier
);