Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * cpuidle.c - core cpuidle infrastructure | |
3 | * | |
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Shaohua Li <shaohua.li@intel.com> | |
6 | * Adam Belay <abelay@novell.com> | |
7 | * | |
8 | * This code is licenced under the GPL. | |
9 | */ | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/mutex.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/notifier.h> | |
d82b3518 | 15 | #include <linux/pm_qos_params.h> |
4f86d3a8 LB |
16 | #include <linux/cpu.h> |
17 | #include <linux/cpuidle.h> | |
18 | ||
19 | #include "cpuidle.h" | |
20 | ||
21 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | |
4f86d3a8 LB |
22 | |
23 | DEFINE_MUTEX(cpuidle_lock); | |
24 | LIST_HEAD(cpuidle_detected_devices); | |
25 | static void (*pm_idle_old)(void); | |
26 | ||
27 | static int enabled_devices; | |
28 | ||
29 | /** | |
30 | * cpuidle_idle_call - the main idle loop | |
31 | * | |
32 | * NOTE: no locks or semaphores should be used here | |
33 | */ | |
34 | static void cpuidle_idle_call(void) | |
35 | { | |
36 | struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); | |
37 | struct cpuidle_state *target_state; | |
38 | int next_state; | |
39 | ||
40 | /* check if the device is ready */ | |
41 | if (!dev || !dev->enabled) { | |
42 | if (pm_idle_old) | |
43 | pm_idle_old(); | |
44 | else | |
45 | local_irq_enable(); | |
46 | return; | |
47 | } | |
48 | ||
49 | /* ask the governor for the next state */ | |
50 | next_state = cpuidle_curr_governor->select(dev); | |
51 | if (need_resched()) | |
52 | return; | |
53 | target_state = &dev->states[next_state]; | |
54 | ||
55 | /* enter the state and update stats */ | |
56 | dev->last_residency = target_state->enter(dev, target_state); | |
57 | dev->last_state = target_state; | |
58 | target_state->time += dev->last_residency; | |
59 | target_state->usage++; | |
60 | ||
61 | /* give the governor an opportunity to reflect on the outcome */ | |
62 | if (cpuidle_curr_governor->reflect) | |
63 | cpuidle_curr_governor->reflect(dev); | |
64 | } | |
65 | ||
66 | /** | |
67 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | |
68 | */ | |
69 | void cpuidle_install_idle_handler(void) | |
70 | { | |
71 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | |
72 | /* Make sure all changes finished before we switch to new idle */ | |
73 | smp_wmb(); | |
74 | pm_idle = cpuidle_idle_call; | |
75 | } | |
76 | } | |
77 | ||
78 | /** | |
79 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | |
80 | */ | |
81 | void cpuidle_uninstall_idle_handler(void) | |
82 | { | |
83 | if (enabled_devices && (pm_idle != pm_idle_old)) { | |
84 | pm_idle = pm_idle_old; | |
85 | cpu_idle_wait(); | |
86 | } | |
87 | } | |
88 | ||
89 | /** | |
90 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | |
91 | */ | |
92 | void cpuidle_pause_and_lock(void) | |
93 | { | |
94 | mutex_lock(&cpuidle_lock); | |
95 | cpuidle_uninstall_idle_handler(); | |
96 | } | |
97 | ||
98 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | |
99 | ||
100 | /** | |
101 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | |
102 | */ | |
103 | void cpuidle_resume_and_unlock(void) | |
104 | { | |
105 | cpuidle_install_idle_handler(); | |
106 | mutex_unlock(&cpuidle_lock); | |
107 | } | |
108 | ||
109 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | |
110 | ||
111 | /** | |
112 | * cpuidle_enable_device - enables idle PM for a CPU | |
113 | * @dev: the CPU | |
114 | * | |
115 | * This function must be called between cpuidle_pause_and_lock and | |
116 | * cpuidle_resume_and_unlock when used externally. | |
117 | */ | |
118 | int cpuidle_enable_device(struct cpuidle_device *dev) | |
119 | { | |
120 | int ret, i; | |
121 | ||
122 | if (dev->enabled) | |
123 | return 0; | |
124 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | |
125 | return -EIO; | |
126 | if (!dev->state_count) | |
127 | return -EINVAL; | |
128 | ||
129 | if ((ret = cpuidle_add_state_sysfs(dev))) | |
130 | return ret; | |
131 | ||
132 | if (cpuidle_curr_governor->enable && | |
133 | (ret = cpuidle_curr_governor->enable(dev))) | |
134 | goto fail_sysfs; | |
135 | ||
136 | for (i = 0; i < dev->state_count; i++) { | |
137 | dev->states[i].usage = 0; | |
138 | dev->states[i].time = 0; | |
139 | } | |
140 | dev->last_residency = 0; | |
141 | dev->last_state = NULL; | |
142 | ||
143 | smp_wmb(); | |
144 | ||
145 | dev->enabled = 1; | |
146 | ||
147 | enabled_devices++; | |
148 | return 0; | |
149 | ||
150 | fail_sysfs: | |
151 | cpuidle_remove_state_sysfs(dev); | |
152 | ||
153 | return ret; | |
154 | } | |
155 | ||
156 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |
157 | ||
158 | /** | |
159 | * cpuidle_disable_device - disables idle PM for a CPU | |
160 | * @dev: the CPU | |
161 | * | |
162 | * This function must be called between cpuidle_pause_and_lock and | |
163 | * cpuidle_resume_and_unlock when used externally. | |
164 | */ | |
165 | void cpuidle_disable_device(struct cpuidle_device *dev) | |
166 | { | |
167 | if (!dev->enabled) | |
168 | return; | |
169 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | |
170 | return; | |
171 | ||
172 | dev->enabled = 0; | |
173 | ||
174 | if (cpuidle_curr_governor->disable) | |
175 | cpuidle_curr_governor->disable(dev); | |
176 | ||
177 | cpuidle_remove_state_sysfs(dev); | |
178 | enabled_devices--; | |
179 | } | |
180 | ||
181 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | |
182 | ||
183 | /** | |
184 | * cpuidle_register_device - registers a CPU's idle PM feature | |
185 | * @dev: the cpu | |
186 | */ | |
187 | int cpuidle_register_device(struct cpuidle_device *dev) | |
188 | { | |
189 | int ret; | |
190 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | |
191 | ||
192 | if (!sys_dev) | |
193 | return -EINVAL; | |
194 | if (!try_module_get(cpuidle_curr_driver->owner)) | |
195 | return -EINVAL; | |
196 | ||
197 | init_completion(&dev->kobj_unregister); | |
198 | ||
199 | mutex_lock(&cpuidle_lock); | |
200 | ||
201 | per_cpu(cpuidle_devices, dev->cpu) = dev; | |
202 | list_add(&dev->device_list, &cpuidle_detected_devices); | |
203 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | |
204 | mutex_unlock(&cpuidle_lock); | |
205 | module_put(cpuidle_curr_driver->owner); | |
206 | return ret; | |
207 | } | |
208 | ||
209 | cpuidle_enable_device(dev); | |
210 | cpuidle_install_idle_handler(); | |
211 | ||
212 | mutex_unlock(&cpuidle_lock); | |
213 | ||
214 | return 0; | |
215 | ||
216 | } | |
217 | ||
218 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | |
219 | ||
220 | /** | |
221 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | |
222 | * @dev: the cpu | |
223 | */ | |
224 | void cpuidle_unregister_device(struct cpuidle_device *dev) | |
225 | { | |
226 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | |
227 | ||
228 | cpuidle_pause_and_lock(); | |
229 | ||
230 | cpuidle_disable_device(dev); | |
231 | ||
232 | cpuidle_remove_sysfs(sys_dev); | |
233 | list_del(&dev->device_list); | |
234 | wait_for_completion(&dev->kobj_unregister); | |
235 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | |
236 | ||
237 | cpuidle_resume_and_unlock(); | |
238 | ||
239 | module_put(cpuidle_curr_driver->owner); | |
240 | } | |
241 | ||
242 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | |
243 | ||
244 | #ifdef CONFIG_SMP | |
245 | ||
246 | static void smp_callback(void *v) | |
247 | { | |
248 | /* we already woke the CPU up, nothing more to do */ | |
249 | } | |
250 | ||
251 | /* | |
252 | * This function gets called when a part of the kernel has a new latency | |
253 | * requirement. This means we need to get all processors out of their C-state, | |
254 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | |
255 | * wakes them all right up. | |
256 | */ | |
257 | static int cpuidle_latency_notify(struct notifier_block *b, | |
258 | unsigned long l, void *v) | |
259 | { | |
260 | smp_call_function(smp_callback, NULL, 0, 1); | |
261 | return NOTIFY_OK; | |
262 | } | |
263 | ||
264 | static struct notifier_block cpuidle_latency_notifier = { | |
265 | .notifier_call = cpuidle_latency_notify, | |
266 | }; | |
267 | ||
d82b3518 MG |
268 | static inline void latency_notifier_init(struct notifier_block *n) |
269 | { | |
270 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); | |
271 | } | |
4f86d3a8 LB |
272 | |
273 | #else /* CONFIG_SMP */ | |
274 | ||
275 | #define latency_notifier_init(x) do { } while (0) | |
276 | ||
277 | #endif /* CONFIG_SMP */ | |
278 | ||
279 | /** | |
280 | * cpuidle_init - core initializer | |
281 | */ | |
282 | static int __init cpuidle_init(void) | |
283 | { | |
284 | int ret; | |
285 | ||
286 | pm_idle_old = pm_idle; | |
287 | ||
288 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | |
289 | if (ret) | |
290 | return ret; | |
291 | ||
292 | latency_notifier_init(&cpuidle_latency_notifier); | |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
297 | core_initcall(cpuidle_init); |