2 * OMAP4 Power Management Routines
4 * Copyright (C) 2010-2011 Texas Instruments, Inc.
5 * Rajendra Nayak <rnayak@ti.com>
6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 #include <linux/suspend.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <asm/system_misc.h>
22 #include "clockdomain.h"
23 #include "powerdomain.h"
27 struct powerdomain
*pwrdm
;
31 u32 saved_logic_state
;
33 struct list_head node
;
36 static LIST_HEAD(pwrst_list
);
39 static int omap4_pm_suspend(void)
41 struct power_state
*pwrst
;
43 u32 cpu_id
= smp_processor_id();
45 /* Save current powerdomain state */
46 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
47 pwrst
->saved_state
= pwrdm_read_next_pwrst(pwrst
->pwrdm
);
48 pwrst
->saved_logic_state
= pwrdm_read_logic_retst(pwrst
->pwrdm
);
51 /* Set targeted power domain states by suspend */
52 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
53 omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->next_state
);
54 pwrdm_set_logic_retst(pwrst
->pwrdm
, PWRDM_POWER_OFF
);
58 * For MPUSS to hit power domain retention(CSWR or OSWR),
59 * CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
60 * since CPU power domain CSWR is not supported by hardware
61 * Only master CPU follows suspend path. All other CPUs follow
62 * CPU hotplug path in system wide suspend. On OMAP4, CPU power
63 * domain CSWR is not supported by hardware.
64 * More details can be found in OMAP4430 TRM section 4.3.4.2.
66 omap4_enter_lowpower(cpu_id
, PWRDM_POWER_OFF
);
68 /* Restore next powerdomain state */
69 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
70 state
= pwrdm_read_prev_pwrst(pwrst
->pwrdm
);
71 if (state
> pwrst
->next_state
) {
72 pr_info("Powerdomain (%s) didn't enter "
74 pwrst
->pwrdm
->name
, pwrst
->next_state
);
77 omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->saved_state
);
78 pwrdm_set_logic_retst(pwrst
->pwrdm
, pwrst
->saved_logic_state
);
81 pr_crit("Could not enter target state in pm_suspend\n");
83 pr_info("Successfully put all powerdomains to target state\n");
88 static int omap4_pm_enter(suspend_state_t suspend_state
)
92 switch (suspend_state
) {
93 case PM_SUSPEND_STANDBY
:
95 ret
= omap4_pm_suspend();
104 static int omap4_pm_begin(suspend_state_t state
)
110 static void omap4_pm_end(void)
116 static const struct platform_suspend_ops omap_pm_ops
= {
117 .begin
= omap4_pm_begin
,
119 .enter
= omap4_pm_enter
,
120 .valid
= suspend_valid_only_mem
,
122 #endif /* CONFIG_SUSPEND */
125 * Enable hardware supervised mode for all clockdomains if it's
126 * supported. Initiate sleep transition for other clockdomains, if
129 static int __init
clkdms_setup(struct clockdomain
*clkdm
, void *unused
)
131 if (clkdm
->flags
& CLKDM_CAN_ENABLE_AUTO
)
132 clkdm_allow_idle(clkdm
);
133 else if (clkdm
->flags
& CLKDM_CAN_FORCE_SLEEP
&&
134 atomic_read(&clkdm
->usecount
) == 0)
140 static int __init
pwrdms_setup(struct powerdomain
*pwrdm
, void *unused
)
142 struct power_state
*pwrst
;
148 * Skip CPU0 and CPU1 power domains. CPU1 is programmed
149 * through hotplug path and CPU0 explicitly programmed
150 * further down in the code path
152 if (!strncmp(pwrdm
->name
, "cpu", 3))
156 * FIXME: Remove this check when core retention is supported
157 * Only MPUSS power domain is added in the list.
159 if (strcmp(pwrdm
->name
, "mpu_pwrdm"))
162 pwrst
= kmalloc(sizeof(struct power_state
), GFP_ATOMIC
);
166 pwrst
->pwrdm
= pwrdm
;
167 pwrst
->next_state
= PWRDM_POWER_RET
;
168 list_add(&pwrst
->node
, &pwrst_list
);
170 return omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->next_state
);
174 * omap_default_idle - OMAP4 default ilde routine.'
176 * Implements OMAP4 memory, IO ordering requirements which can't be addressed
177 * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
178 * by secondary CPU with CONFIG_CPUIDLE.
180 static void omap_default_idle(void)
192 * omap4_pm_init - Init routine for OMAP4 PM
194 * Initializes all powerdomain and clockdomain target states
195 * and all PRCM settings.
197 static int __init
omap4_pm_init(void)
200 struct clockdomain
*emif_clkdm
, *mpuss_clkdm
, *l3_1_clkdm
;
201 struct clockdomain
*ducati_clkdm
, *l3_2_clkdm
, *l4_per_clkdm
;
203 if (!cpu_is_omap44xx())
206 if (omap_rev() == OMAP4430_REV_ES1_0
) {
207 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
211 pr_err("Power Management for TI OMAP4.\n");
213 ret
= pwrdm_for_each(pwrdms_setup
, NULL
);
215 pr_err("Failed to setup powerdomains\n");
220 * The dynamic dependency between MPUSS -> MEMIF and
221 * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
222 * expected. The hardware recommendation is to enable static
223 * dependencies for these to avoid system lock ups or random crashes.
225 mpuss_clkdm
= clkdm_lookup("mpuss_clkdm");
226 emif_clkdm
= clkdm_lookup("l3_emif_clkdm");
227 l3_1_clkdm
= clkdm_lookup("l3_1_clkdm");
228 l3_2_clkdm
= clkdm_lookup("l3_2_clkdm");
229 l4_per_clkdm
= clkdm_lookup("l4_per_clkdm");
230 ducati_clkdm
= clkdm_lookup("ducati_clkdm");
231 if ((!mpuss_clkdm
) || (!emif_clkdm
) || (!l3_1_clkdm
) ||
232 (!l3_2_clkdm
) || (!ducati_clkdm
) || (!l4_per_clkdm
))
235 ret
= clkdm_add_wkdep(mpuss_clkdm
, emif_clkdm
);
236 ret
|= clkdm_add_wkdep(mpuss_clkdm
, l3_1_clkdm
);
237 ret
|= clkdm_add_wkdep(mpuss_clkdm
, l3_2_clkdm
);
238 ret
|= clkdm_add_wkdep(mpuss_clkdm
, l4_per_clkdm
);
239 ret
|= clkdm_add_wkdep(ducati_clkdm
, l3_1_clkdm
);
240 ret
|= clkdm_add_wkdep(ducati_clkdm
, l3_2_clkdm
);
242 pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
243 "wakeup dependency\n");
247 ret
= omap4_mpuss_init();
249 pr_err("Failed to initialise OMAP4 MPUSS\n");
253 (void) clkdm_for_each(clkdms_setup
, NULL
);
255 #ifdef CONFIG_SUSPEND
256 suspend_set_ops(&omap_pm_ops
);
257 #endif /* CONFIG_SUSPEND */
259 /* Overwrite the default arch_idle() */
260 pm_idle
= omap_default_idle
;
267 late_initcall(omap4_pm_init
);