x86/sysfb_efi: Fix valid BAR address range check
[deliverable/linux.git] / drivers / cpuidle / cpuidle-powernv.c
1 /*
2 * cpuidle-powernv - idle state cpuidle driver.
3 * Adapted from drivers/cpuidle/cpuidle-pseries
4 *
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/moduleparam.h>
11 #include <linux/cpuidle.h>
12 #include <linux/cpu.h>
13 #include <linux/notifier.h>
14 #include <linux/clockchips.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/opal.h>
21 #include <asm/runlatch.h>
22
23 #define MAX_POWERNV_IDLE_STATES 8
24
25 struct cpuidle_driver powernv_idle_driver = {
26 .name = "powernv_idle",
27 .owner = THIS_MODULE,
28 };
29
30 static int max_idle_state;
31 static struct cpuidle_state *cpuidle_state_table;
32 static u64 snooze_timeout;
33 static bool snooze_timeout_en;
34
35 static int snooze_loop(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv,
37 int index)
38 {
39 u64 snooze_exit_time;
40
41 local_irq_enable();
42 set_thread_flag(TIF_POLLING_NRFLAG);
43
44 snooze_exit_time = get_tb() + snooze_timeout;
45 ppc64_runlatch_off();
46 while (!need_resched()) {
47 HMT_low();
48 HMT_very_low();
49 if (snooze_timeout_en && get_tb() > snooze_exit_time)
50 break;
51 }
52
53 HMT_medium();
54 ppc64_runlatch_on();
55 clear_thread_flag(TIF_POLLING_NRFLAG);
56 smp_mb();
57 return index;
58 }
59
60 static int nap_loop(struct cpuidle_device *dev,
61 struct cpuidle_driver *drv,
62 int index)
63 {
64 ppc64_runlatch_off();
65 power7_idle();
66 ppc64_runlatch_on();
67 return index;
68 }
69
70 /* Register for fastsleep only in oneshot mode of broadcast */
71 #ifdef CONFIG_TICK_ONESHOT
72 static int fastsleep_loop(struct cpuidle_device *dev,
73 struct cpuidle_driver *drv,
74 int index)
75 {
76 unsigned long old_lpcr = mfspr(SPRN_LPCR);
77 unsigned long new_lpcr;
78
79 if (unlikely(system_state < SYSTEM_RUNNING))
80 return index;
81
82 new_lpcr = old_lpcr;
83 /* Do not exit powersave upon decrementer as we've setup the timer
84 * offload.
85 */
86 new_lpcr &= ~LPCR_PECE1;
87
88 mtspr(SPRN_LPCR, new_lpcr);
89 power7_sleep();
90
91 mtspr(SPRN_LPCR, old_lpcr);
92
93 return index;
94 }
95 #endif
96 /*
97 * States for dedicated partition case.
98 */
99 static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
100 { /* Snooze */
101 .name = "snooze",
102 .desc = "snooze",
103 .exit_latency = 0,
104 .target_residency = 0,
105 .enter = &snooze_loop },
106 };
107
108 static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
109 unsigned long action, void *hcpu)
110 {
111 int hotcpu = (unsigned long)hcpu;
112 struct cpuidle_device *dev =
113 per_cpu(cpuidle_devices, hotcpu);
114
115 if (dev && cpuidle_get_driver()) {
116 switch (action) {
117 case CPU_ONLINE:
118 case CPU_ONLINE_FROZEN:
119 cpuidle_pause_and_lock();
120 cpuidle_enable_device(dev);
121 cpuidle_resume_and_unlock();
122 break;
123
124 case CPU_DEAD:
125 case CPU_DEAD_FROZEN:
126 cpuidle_pause_and_lock();
127 cpuidle_disable_device(dev);
128 cpuidle_resume_and_unlock();
129 break;
130
131 default:
132 return NOTIFY_DONE;
133 }
134 }
135 return NOTIFY_OK;
136 }
137
138 static struct notifier_block setup_hotplug_notifier = {
139 .notifier_call = powernv_cpuidle_add_cpu_notifier,
140 };
141
142 /*
143 * powernv_cpuidle_driver_init()
144 */
145 static int powernv_cpuidle_driver_init(void)
146 {
147 int idle_state;
148 struct cpuidle_driver *drv = &powernv_idle_driver;
149
150 drv->state_count = 0;
151
152 for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
153 /* Is the state not enabled? */
154 if (cpuidle_state_table[idle_state].enter == NULL)
155 continue;
156
157 drv->states[drv->state_count] = /* structure copy */
158 cpuidle_state_table[idle_state];
159
160 drv->state_count += 1;
161 }
162
163 return 0;
164 }
165
166 static int powernv_add_idle_states(void)
167 {
168 struct device_node *power_mgt;
169 int nr_idle_states = 1; /* Snooze */
170 int dt_idle_states;
171 u32 *latency_ns, *residency_ns, *flags;
172 int i, rc;
173
174 /* Currently we have snooze statically defined */
175
176 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
177 if (!power_mgt) {
178 pr_warn("opal: PowerMgmt Node not found\n");
179 goto out;
180 }
181
182 /* Read values of any property to determine the num of idle states */
183 dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
184 if (dt_idle_states < 0) {
185 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
186 goto out;
187 }
188
189 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
190 if (of_property_read_u32_array(power_mgt,
191 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
192 pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
193 goto out_free_flags;
194 }
195
196 latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
197 rc = of_property_read_u32_array(power_mgt,
198 "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
199 if (rc) {
200 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
201 goto out_free_latency;
202 }
203
204 residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
205 rc = of_property_read_u32_array(power_mgt,
206 "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
207
208 for (i = 0; i < dt_idle_states; i++) {
209
210 /*
211 * Cpuidle accepts exit_latency and target_residency in us.
212 * Use default target_residency values if f/w does not expose it.
213 */
214 if (flags[i] & OPAL_PM_NAP_ENABLED) {
215 /* Add NAP state */
216 strcpy(powernv_states[nr_idle_states].name, "Nap");
217 strcpy(powernv_states[nr_idle_states].desc, "Nap");
218 powernv_states[nr_idle_states].flags = 0;
219 powernv_states[nr_idle_states].target_residency = 100;
220 powernv_states[nr_idle_states].enter = &nap_loop;
221 }
222
223 /*
224 * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
225 * within this config dependency check.
226 */
227 #ifdef CONFIG_TICK_ONESHOT
228 if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
229 flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
230 /* Add FASTSLEEP state */
231 strcpy(powernv_states[nr_idle_states].name, "FastSleep");
232 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
233 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
234 powernv_states[nr_idle_states].target_residency = 300000;
235 powernv_states[nr_idle_states].enter = &fastsleep_loop;
236 }
237 #endif
238 powernv_states[nr_idle_states].exit_latency =
239 ((unsigned int)latency_ns[i]) / 1000;
240
241 if (!rc) {
242 powernv_states[nr_idle_states].target_residency =
243 ((unsigned int)residency_ns[i]) / 1000;
244 }
245
246 nr_idle_states++;
247 }
248
249 kfree(residency_ns);
250 out_free_latency:
251 kfree(latency_ns);
252 out_free_flags:
253 kfree(flags);
254 out:
255 return nr_idle_states;
256 }
257
258 /*
259 * powernv_idle_probe()
260 * Choose state table for shared versus dedicated partition
261 */
262 static int powernv_idle_probe(void)
263 {
264 if (cpuidle_disable != IDLE_NO_OVERRIDE)
265 return -ENODEV;
266
267 if (firmware_has_feature(FW_FEATURE_OPAL)) {
268 cpuidle_state_table = powernv_states;
269 /* Device tree can indicate more idle states */
270 max_idle_state = powernv_add_idle_states();
271 if (max_idle_state > 1) {
272 snooze_timeout_en = true;
273 snooze_timeout = powernv_states[1].target_residency *
274 tb_ticks_per_usec;
275 }
276 } else
277 return -ENODEV;
278
279 return 0;
280 }
281
282 static int __init powernv_processor_idle_init(void)
283 {
284 int retval;
285
286 retval = powernv_idle_probe();
287 if (retval)
288 return retval;
289
290 powernv_cpuidle_driver_init();
291 retval = cpuidle_register(&powernv_idle_driver, NULL);
292 if (retval) {
293 printk(KERN_DEBUG "Registration of powernv driver failed.\n");
294 return retval;
295 }
296
297 register_cpu_notifier(&setup_hotplug_notifier);
298 printk(KERN_DEBUG "powernv_idle_driver registered\n");
299 return 0;
300 }
301
302 device_initcall(powernv_processor_idle_init);
This page took 0.046012 seconds and 5 git commands to generate.