Commit | Line | Data |
---|---|---|
4126c019 CC |
1 | /* |
2 | * coupled.c - helper functions to enter the same idle state on multiple cpus | |
3 | * | |
4 | * Copyright (c) 2011 Google, Inc. | |
5 | * | |
6 | * Author: Colin Cross <ccross@android.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | */ | |
18 | ||
19 | #include <linux/kernel.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/cpuidle.h> | |
22 | #include <linux/mutex.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
27 | #include "cpuidle.h" | |
28 | ||
29 | /** | |
30 | * DOC: Coupled cpuidle states | |
31 | * | |
32 | * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the | |
33 | * cpus cannot be independently powered down, either due to | |
34 | * sequencing restrictions (on Tegra 2, cpu 0 must be the last to | |
35 | * power down), or due to HW bugs (on OMAP4460, a cpu powering up | |
36 | * will corrupt the gic state unless the other cpu runs a work | |
37 | * around). Each cpu has a power state that it can enter without | |
38 | * coordinating with the other cpu (usually Wait For Interrupt, or | |
39 | * WFI), and one or more "coupled" power states that affect blocks | |
40 | * shared between the cpus (L2 cache, interrupt controller, and | |
41 | * sometimes the whole SoC). Entering a coupled power state must | |
42 | * be tightly controlled on both cpus. | |
43 | * | |
44 | * This file implements a solution, where each cpu will wait in the | |
45 | * WFI state until all cpus are ready to enter a coupled state, at | |
46 | * which point the coupled state function will be called on all | |
47 | * cpus at approximately the same time. | |
48 | * | |
49 | * Once all cpus are ready to enter idle, they are woken by an smp | |
50 | * cross call. At this point, there is a chance that one of the | |
51 | * cpus will find work to do, and choose not to enter idle. A | |
52 | * final pass is needed to guarantee that all cpus will call the | |
53 | * power state enter function at the same time. During this pass, | |
54 | * each cpu will increment the ready counter, and continue once the | |
55 | * ready counter matches the number of online coupled cpus. If any | |
56 | * cpu exits idle, the other cpus will decrement their counter and | |
57 | * retry. | |
58 | * | |
59 | * requested_state stores the deepest coupled idle state each cpu | |
60 | * is ready for. It is assumed that the states are indexed from | |
61 | * shallowest (highest power, lowest exit latency) to deepest | |
62 | * (lowest power, highest exit latency). The requested_state | |
63 | * variable is not locked. It is only written from the cpu that | |
64 | * it stores (or by the on/offlining cpu if that cpu is offline), | |
65 | * and only read after all the cpus are ready for the coupled idle | |
66 | * state are are no longer updating it. | |
67 | * | |
68 | * Three atomic counters are used. alive_count tracks the number | |
69 | * of cpus in the coupled set that are currently or soon will be | |
70 | * online. waiting_count tracks the number of cpus that are in | |
71 | * the waiting loop, in the ready loop, or in the coupled idle state. | |
72 | * ready_count tracks the number of cpus that are in the ready loop | |
73 | * or in the coupled idle state. | |
74 | * | |
75 | * To use coupled cpuidle states, a cpuidle driver must: | |
76 | * | |
77 | * Set struct cpuidle_device.coupled_cpus to the mask of all | |
78 | * coupled cpus, usually the same as cpu_possible_mask if all cpus | |
79 | * are part of the same cluster. The coupled_cpus mask must be | |
80 | * set in the struct cpuidle_device for each cpu. | |
81 | * | |
82 | * Set struct cpuidle_device.safe_state to a state that is not a | |
83 | * coupled state. This is usually WFI. | |
84 | * | |
85 | * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each | |
86 | * state that affects multiple cpus. | |
87 | * | |
88 | * Provide a struct cpuidle_state.enter function for each state | |
89 | * that affects multiple cpus. This function is guaranteed to be | |
90 | * called on all cpus at approximately the same time. The driver | |
91 | * should ensure that the cpus all abort together if any cpu tries | |
92 | * to abort once the function is called. The function should return | |
93 | * with interrupts still disabled. | |
94 | */ | |
95 | ||
96 | /** | |
97 | * struct cpuidle_coupled - data for set of cpus that share a coupled idle state | |
98 | * @coupled_cpus: mask of cpus that are part of the coupled set | |
99 | * @requested_state: array of requested states for cpus in the coupled set | |
100 | * @ready_waiting_counts: combined count of cpus in ready or waiting loops | |
101 | * @online_count: count of cpus that are online | |
102 | * @refcnt: reference count of cpuidle devices that are using this struct | |
103 | * @prevent: flag to prevent coupled idle while a cpu is hotplugging | |
104 | */ | |
105 | struct cpuidle_coupled { | |
106 | cpumask_t coupled_cpus; | |
107 | int requested_state[NR_CPUS]; | |
108 | atomic_t ready_waiting_counts; | |
f983827b | 109 | atomic_t abort_barrier; |
4126c019 CC |
110 | int online_count; |
111 | int refcnt; | |
112 | int prevent; | |
113 | }; | |
114 | ||
115 | #define WAITING_BITS 16 | |
116 | #define MAX_WAITING_CPUS (1 << WAITING_BITS) | |
117 | #define WAITING_MASK (MAX_WAITING_CPUS - 1) | |
118 | #define READY_MASK (~WAITING_MASK) | |
119 | ||
120 | #define CPUIDLE_COUPLED_NOT_IDLE (-1) | |
121 | ||
122 | static DEFINE_MUTEX(cpuidle_coupled_lock); | |
123 | static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); | |
124 | ||
125 | /* | |
f983827b | 126 | * The cpuidle_coupled_poke_pending mask is used to avoid calling |
4126c019 CC |
127 | * __smp_call_function_single with the per cpu call_single_data struct already |
128 | * in use. This prevents a deadlock where two cpus are waiting for each others | |
129 | * call_single_data struct to be available | |
130 | */ | |
f983827b CC |
131 | static cpumask_t cpuidle_coupled_poke_pending; |
132 | ||
133 | /* | |
134 | * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked | |
135 | * once to minimize entering the ready loop with a poke pending, which would | |
136 | * require aborting and retrying. | |
137 | */ | |
138 | static cpumask_t cpuidle_coupled_poked; | |
4126c019 | 139 | |
20ff51a3 CC |
140 | /** |
141 | * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus | |
142 | * @dev: cpuidle_device of the calling cpu | |
143 | * @a: atomic variable to hold the barrier | |
144 | * | |
145 | * No caller to this function will return from this function until all online | |
146 | * cpus in the same coupled group have called this function. Once any caller | |
147 | * has returned from this function, the barrier is immediately available for | |
148 | * reuse. | |
149 | * | |
caf4a36e | 150 | * The atomic variable must be initialized to 0 before any cpu calls |
20ff51a3 CC |
151 | * this function, will be reset to 0 before any cpu returns from this function. |
152 | * | |
153 | * Must only be called from within a coupled idle state handler | |
154 | * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). | |
155 | * | |
156 | * Provides full smp barrier semantics before and after calling. | |
157 | */ | |
158 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) | |
159 | { | |
160 | int n = dev->coupled->online_count; | |
161 | ||
4e857c58 | 162 | smp_mb__before_atomic(); |
20ff51a3 CC |
163 | atomic_inc(a); |
164 | ||
165 | while (atomic_read(a) < n) | |
166 | cpu_relax(); | |
167 | ||
168 | if (atomic_inc_return(a) == n * 2) { | |
169 | atomic_set(a, 0); | |
170 | return; | |
171 | } | |
172 | ||
173 | while (atomic_read(a) > n) | |
174 | cpu_relax(); | |
175 | } | |
176 | ||
4126c019 CC |
177 | /** |
178 | * cpuidle_state_is_coupled - check if a state is part of a coupled set | |
4126c019 CC |
179 | * @drv: struct cpuidle_driver for the platform |
180 | * @state: index of the target state in drv->states | |
181 | * | |
182 | * Returns true if the target state is coupled with cpus besides this one | |
183 | */ | |
4c1ed5a6 | 184 | bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) |
4126c019 CC |
185 | { |
186 | return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; | |
187 | } | |
188 | ||
189 | /** | |
190 | * cpuidle_coupled_set_ready - mark a cpu as ready | |
191 | * @coupled: the struct coupled that contains the current cpu | |
192 | */ | |
193 | static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) | |
194 | { | |
195 | atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); | |
196 | } | |
197 | ||
198 | /** | |
199 | * cpuidle_coupled_set_not_ready - mark a cpu as not ready | |
200 | * @coupled: the struct coupled that contains the current cpu | |
201 | * | |
202 | * Decrements the ready counter, unless the ready (and thus the waiting) counter | |
203 | * is equal to the number of online cpus. Prevents a race where one cpu | |
204 | * decrements the waiting counter and then re-increments it just before another | |
205 | * cpu has decremented its ready counter, leading to the ready counter going | |
206 | * down from the number of online cpus without going through the coupled idle | |
207 | * state. | |
208 | * | |
209 | * Returns 0 if the counter was decremented successfully, -EINVAL if the ready | |
210 | * counter was equal to the number of online cpus. | |
211 | */ | |
212 | static | |
213 | inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) | |
214 | { | |
215 | int all; | |
216 | int ret; | |
217 | ||
92638e2f | 218 | all = coupled->online_count | (coupled->online_count << WAITING_BITS); |
4126c019 CC |
219 | ret = atomic_add_unless(&coupled->ready_waiting_counts, |
220 | -MAX_WAITING_CPUS, all); | |
221 | ||
222 | return ret ? 0 : -EINVAL; | |
223 | } | |
224 | ||
225 | /** | |
226 | * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready | |
227 | * @coupled: the struct coupled that contains the current cpu | |
228 | * | |
229 | * Returns true if all of the cpus in a coupled set are out of the ready loop. | |
230 | */ | |
231 | static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) | |
232 | { | |
233 | int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; | |
234 | return r == 0; | |
235 | } | |
236 | ||
237 | /** | |
238 | * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready | |
239 | * @coupled: the struct coupled that contains the current cpu | |
240 | * | |
241 | * Returns true if all cpus coupled to this target state are in the ready loop | |
242 | */ | |
243 | static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) | |
244 | { | |
245 | int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; | |
246 | return r == coupled->online_count; | |
247 | } | |
248 | ||
249 | /** | |
250 | * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting | |
251 | * @coupled: the struct coupled that contains the current cpu | |
252 | * | |
253 | * Returns true if all cpus coupled to this target state are in the wait loop | |
254 | */ | |
255 | static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) | |
256 | { | |
257 | int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; | |
258 | return w == coupled->online_count; | |
259 | } | |
260 | ||
261 | /** | |
262 | * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting | |
263 | * @coupled: the struct coupled that contains the current cpu | |
264 | * | |
265 | * Returns true if all of the cpus in a coupled set are out of the waiting loop. | |
266 | */ | |
267 | static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) | |
268 | { | |
269 | int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; | |
270 | return w == 0; | |
271 | } | |
272 | ||
273 | /** | |
274 | * cpuidle_coupled_get_state - determine the deepest idle state | |
275 | * @dev: struct cpuidle_device for this cpu | |
276 | * @coupled: the struct coupled that contains the current cpu | |
277 | * | |
278 | * Returns the deepest idle state that all coupled cpus can enter | |
279 | */ | |
280 | static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, | |
281 | struct cpuidle_coupled *coupled) | |
282 | { | |
283 | int i; | |
284 | int state = INT_MAX; | |
285 | ||
286 | /* | |
287 | * Read barrier ensures that read of requested_state is ordered after | |
288 | * reads of ready_count. Matches the write barriers | |
289 | * cpuidle_set_state_waiting. | |
290 | */ | |
291 | smp_rmb(); | |
292 | ||
f9b531fe | 293 | for_each_cpu(i, &coupled->coupled_cpus) |
4126c019 CC |
294 | if (cpu_online(i) && coupled->requested_state[i] < state) |
295 | state = coupled->requested_state[i]; | |
296 | ||
297 | return state; | |
298 | } | |
299 | ||
f983827b | 300 | static void cpuidle_coupled_handle_poke(void *info) |
4126c019 CC |
301 | { |
302 | int cpu = (unsigned long)info; | |
f983827b CC |
303 | cpumask_set_cpu(cpu, &cpuidle_coupled_poked); |
304 | cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); | |
4126c019 CC |
305 | } |
306 | ||
307 | /** | |
308 | * cpuidle_coupled_poke - wake up a cpu that may be waiting | |
309 | * @cpu: target cpu | |
310 | * | |
311 | * Ensures that the target cpu exits it's waiting idle state (if it is in it) | |
312 | * and will see updates to waiting_count before it re-enters it's waiting idle | |
313 | * state. | |
314 | * | |
315 | * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu | |
316 | * either has or will soon have a pending IPI that will wake it out of idle, | |
317 | * or it is currently processing the IPI and is not in idle. | |
318 | */ | |
319 | static void cpuidle_coupled_poke(int cpu) | |
320 | { | |
321 | struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); | |
322 | ||
f983827b | 323 | if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) |
c46fff2a | 324 | smp_call_function_single_async(cpu, csd); |
4126c019 CC |
325 | } |
326 | ||
327 | /** | |
328 | * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting | |
329 | * @dev: struct cpuidle_device for this cpu | |
330 | * @coupled: the struct coupled that contains the current cpu | |
331 | * | |
332 | * Calls cpuidle_coupled_poke on all other online cpus. | |
333 | */ | |
334 | static void cpuidle_coupled_poke_others(int this_cpu, | |
335 | struct cpuidle_coupled *coupled) | |
336 | { | |
337 | int cpu; | |
338 | ||
f9b531fe | 339 | for_each_cpu(cpu, &coupled->coupled_cpus) |
4126c019 CC |
340 | if (cpu != this_cpu && cpu_online(cpu)) |
341 | cpuidle_coupled_poke(cpu); | |
342 | } | |
343 | ||
344 | /** | |
345 | * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop | |
346 | * @dev: struct cpuidle_device for this cpu | |
347 | * @coupled: the struct coupled that contains the current cpu | |
348 | * @next_state: the index in drv->states of the requested state for this cpu | |
349 | * | |
f983827b CC |
350 | * Updates the requested idle state for the specified cpuidle device. |
351 | * Returns the number of waiting cpus. | |
4126c019 | 352 | */ |
f983827b | 353 | static int cpuidle_coupled_set_waiting(int cpu, |
4126c019 CC |
354 | struct cpuidle_coupled *coupled, int next_state) |
355 | { | |
4126c019 CC |
356 | coupled->requested_state[cpu] = next_state; |
357 | ||
358 | /* | |
4126c019 CC |
359 | * The atomic_inc_return provides a write barrier to order the write |
360 | * to requested_state with the later write that increments ready_count. | |
361 | */ | |
f983827b | 362 | return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; |
4126c019 CC |
363 | } |
364 | ||
365 | /** | |
366 | * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop | |
367 | * @dev: struct cpuidle_device for this cpu | |
368 | * @coupled: the struct coupled that contains the current cpu | |
369 | * | |
370 | * Removes the requested idle state for the specified cpuidle device. | |
371 | */ | |
372 | static void cpuidle_coupled_set_not_waiting(int cpu, | |
373 | struct cpuidle_coupled *coupled) | |
374 | { | |
375 | /* | |
376 | * Decrementing waiting count can race with incrementing it in | |
377 | * cpuidle_coupled_set_waiting, but that's OK. Worst case, some | |
378 | * cpus will increment ready_count and then spin until they | |
379 | * notice that this cpu has cleared it's requested_state. | |
380 | */ | |
381 | atomic_dec(&coupled->ready_waiting_counts); | |
382 | ||
383 | coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; | |
384 | } | |
385 | ||
386 | /** | |
387 | * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop | |
388 | * @cpu: the current cpu | |
389 | * @coupled: the struct coupled that contains the current cpu | |
390 | * | |
391 | * Marks this cpu as no longer in the ready and waiting loops. Decrements | |
392 | * the waiting count first to prevent another cpu looping back in and seeing | |
393 | * this cpu as waiting just before it exits idle. | |
394 | */ | |
395 | static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) | |
396 | { | |
397 | cpuidle_coupled_set_not_waiting(cpu, coupled); | |
398 | atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); | |
399 | } | |
400 | ||
401 | /** | |
402 | * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed | |
403 | * @cpu - this cpu | |
404 | * | |
405 | * Turns on interrupts and spins until any outstanding poke interrupts have | |
406 | * been processed and the poke bit has been cleared. | |
407 | * | |
408 | * Other interrupts may also be processed while interrupts are enabled, so | |
9e19b73c | 409 | * need_resched() must be tested after this function returns to make sure |
4126c019 CC |
410 | * the interrupt didn't schedule work that should take the cpu out of idle. |
411 | * | |
9e19b73c | 412 | * Returns 0 if no poke was pending, 1 if a poke was cleared. |
4126c019 CC |
413 | */ |
414 | static int cpuidle_coupled_clear_pokes(int cpu) | |
415 | { | |
9e19b73c CC |
416 | if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) |
417 | return 0; | |
418 | ||
4126c019 | 419 | local_irq_enable(); |
f983827b | 420 | while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) |
4126c019 CC |
421 | cpu_relax(); |
422 | local_irq_disable(); | |
423 | ||
9e19b73c | 424 | return 1; |
4126c019 CC |
425 | } |
426 | ||
f983827b CC |
427 | static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) |
428 | { | |
429 | cpumask_t cpus; | |
430 | int ret; | |
431 | ||
432 | cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); | |
433 | ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); | |
434 | ||
435 | return ret; | |
436 | } | |
437 | ||
4126c019 CC |
438 | /** |
439 | * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus | |
440 | * @dev: struct cpuidle_device for the current cpu | |
441 | * @drv: struct cpuidle_driver for the platform | |
442 | * @next_state: index of the requested state in drv->states | |
443 | * | |
444 | * Coordinate with coupled cpus to enter the target state. This is a two | |
445 | * stage process. In the first stage, the cpus are operating independently, | |
446 | * and may call into cpuidle_enter_state_coupled at completely different times. | |
447 | * To save as much power as possible, the first cpus to call this function will | |
448 | * go to an intermediate state (the cpuidle_device's safe state), and wait for | |
449 | * all the other cpus to call this function. Once all coupled cpus are idle, | |
450 | * the second stage will start. Each coupled cpu will spin until all cpus have | |
451 | * guaranteed that they will call the target_state. | |
452 | * | |
453 | * This function must be called with interrupts disabled. It may enable | |
454 | * interrupts while preparing for idle, and it will always return with | |
455 | * interrupts enabled. | |
456 | */ | |
457 | int cpuidle_enter_state_coupled(struct cpuidle_device *dev, | |
458 | struct cpuidle_driver *drv, int next_state) | |
459 | { | |
460 | int entered_state = -1; | |
461 | struct cpuidle_coupled *coupled = dev->coupled; | |
f983827b | 462 | int w; |
4126c019 CC |
463 | |
464 | if (!coupled) | |
465 | return -EINVAL; | |
466 | ||
467 | while (coupled->prevent) { | |
9e19b73c CC |
468 | cpuidle_coupled_clear_pokes(dev->cpu); |
469 | if (need_resched()) { | |
4126c019 CC |
470 | local_irq_enable(); |
471 | return entered_state; | |
472 | } | |
473 | entered_state = cpuidle_enter_state(dev, drv, | |
ba6a860d | 474 | drv->safe_state_index); |
59e99856 | 475 | local_irq_disable(); |
4126c019 CC |
476 | } |
477 | ||
478 | /* Read barrier ensures online_count is read after prevent is cleared */ | |
479 | smp_rmb(); | |
480 | ||
f983827b CC |
481 | reset: |
482 | cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); | |
483 | ||
484 | w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); | |
485 | /* | |
486 | * If this is the last cpu to enter the waiting state, poke | |
487 | * all the other cpus out of their waiting state so they can | |
488 | * enter a deeper state. This can race with one of the cpus | |
489 | * exiting the waiting state due to an interrupt and | |
490 | * decrementing waiting_count, see comment below. | |
491 | */ | |
492 | if (w == coupled->online_count) { | |
493 | cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); | |
494 | cpuidle_coupled_poke_others(dev->cpu, coupled); | |
495 | } | |
4126c019 CC |
496 | |
497 | retry: | |
498 | /* | |
499 | * Wait for all coupled cpus to be idle, using the deepest state | |
f983827b CC |
500 | * allowed for a single cpu. If this was not the poking cpu, wait |
501 | * for at least one poke before leaving to avoid a race where | |
502 | * two cpus could arrive at the waiting loop at the same time, | |
503 | * but the first of the two to arrive could skip the loop without | |
504 | * processing the pokes from the last to arrive. | |
4126c019 | 505 | */ |
f983827b CC |
506 | while (!cpuidle_coupled_cpus_waiting(coupled) || |
507 | !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { | |
9e19b73c CC |
508 | if (cpuidle_coupled_clear_pokes(dev->cpu)) |
509 | continue; | |
510 | ||
511 | if (need_resched()) { | |
4126c019 CC |
512 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); |
513 | goto out; | |
514 | } | |
515 | ||
516 | if (coupled->prevent) { | |
517 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); | |
518 | goto out; | |
519 | } | |
520 | ||
521 | entered_state = cpuidle_enter_state(dev, drv, | |
ba6a860d | 522 | drv->safe_state_index); |
59e99856 | 523 | local_irq_disable(); |
4126c019 CC |
524 | } |
525 | ||
9e19b73c CC |
526 | cpuidle_coupled_clear_pokes(dev->cpu); |
527 | if (need_resched()) { | |
4126c019 CC |
528 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); |
529 | goto out; | |
530 | } | |
531 | ||
f983827b CC |
532 | /* |
533 | * Make sure final poke status for this cpu is visible before setting | |
534 | * cpu as ready. | |
535 | */ | |
536 | smp_wmb(); | |
537 | ||
4126c019 CC |
538 | /* |
539 | * All coupled cpus are probably idle. There is a small chance that | |
540 | * one of the other cpus just became active. Increment the ready count, | |
541 | * and spin until all coupled cpus have incremented the counter. Once a | |
542 | * cpu has incremented the ready counter, it cannot abort idle and must | |
543 | * spin until either all cpus have incremented the ready counter, or | |
544 | * another cpu leaves idle and decrements the waiting counter. | |
545 | */ | |
546 | ||
547 | cpuidle_coupled_set_ready(coupled); | |
548 | while (!cpuidle_coupled_cpus_ready(coupled)) { | |
549 | /* Check if any other cpus bailed out of idle. */ | |
550 | if (!cpuidle_coupled_cpus_waiting(coupled)) | |
551 | if (!cpuidle_coupled_set_not_ready(coupled)) | |
552 | goto retry; | |
553 | ||
554 | cpu_relax(); | |
555 | } | |
556 | ||
f983827b CC |
557 | /* |
558 | * Make sure read of all cpus ready is done before reading pending pokes | |
559 | */ | |
560 | smp_rmb(); | |
561 | ||
562 | /* | |
563 | * There is a small chance that a cpu left and reentered idle after this | |
564 | * cpu saw that all cpus were waiting. The cpu that reentered idle will | |
565 | * have sent this cpu a poke, which will still be pending after the | |
566 | * ready loop. The pending interrupt may be lost by the interrupt | |
567 | * controller when entering the deep idle state. It's not possible to | |
568 | * clear a pending interrupt without turning interrupts on and handling | |
569 | * it, and it's too late to turn on interrupts here, so reset the | |
570 | * coupled idle state of all cpus and retry. | |
571 | */ | |
572 | if (cpuidle_coupled_any_pokes_pending(coupled)) { | |
573 | cpuidle_coupled_set_done(dev->cpu, coupled); | |
574 | /* Wait for all cpus to see the pending pokes */ | |
575 | cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); | |
576 | goto reset; | |
577 | } | |
578 | ||
4126c019 CC |
579 | /* all cpus have acked the coupled state */ |
580 | next_state = cpuidle_coupled_get_state(dev, coupled); | |
581 | ||
582 | entered_state = cpuidle_enter_state(dev, drv, next_state); | |
583 | ||
584 | cpuidle_coupled_set_done(dev->cpu, coupled); | |
585 | ||
586 | out: | |
587 | /* | |
588 | * Normal cpuidle states are expected to return with irqs enabled. | |
589 | * That leads to an inefficiency where a cpu receiving an interrupt | |
590 | * that brings it out of idle will process that interrupt before | |
591 | * exiting the idle enter function and decrementing ready_count. All | |
592 | * other cpus will need to spin waiting for the cpu that is processing | |
593 | * the interrupt. If the driver returns with interrupts disabled, | |
594 | * all other cpus will loop back into the safe idle state instead of | |
595 | * spinning, saving power. | |
596 | * | |
597 | * Calling local_irq_enable here allows coupled states to return with | |
598 | * interrupts disabled, but won't cause problems for drivers that | |
599 | * exit with interrupts enabled. | |
600 | */ | |
601 | local_irq_enable(); | |
602 | ||
603 | /* | |
604 | * Wait until all coupled cpus have exited idle. There is no risk that | |
605 | * a cpu exits and re-enters the ready state because this cpu has | |
606 | * already decremented its waiting_count. | |
607 | */ | |
608 | while (!cpuidle_coupled_no_cpus_ready(coupled)) | |
609 | cpu_relax(); | |
610 | ||
611 | return entered_state; | |
612 | } | |
613 | ||
614 | static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) | |
615 | { | |
616 | cpumask_t cpus; | |
617 | cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); | |
618 | coupled->online_count = cpumask_weight(&cpus); | |
619 | } | |
620 | ||
621 | /** | |
622 | * cpuidle_coupled_register_device - register a coupled cpuidle device | |
623 | * @dev: struct cpuidle_device for the current cpu | |
624 | * | |
625 | * Called from cpuidle_register_device to handle coupled idle init. Finds the | |
626 | * cpuidle_coupled struct for this set of coupled cpus, or creates one if none | |
627 | * exists yet. | |
628 | */ | |
629 | int cpuidle_coupled_register_device(struct cpuidle_device *dev) | |
630 | { | |
631 | int cpu; | |
632 | struct cpuidle_device *other_dev; | |
633 | struct call_single_data *csd; | |
634 | struct cpuidle_coupled *coupled; | |
635 | ||
636 | if (cpumask_empty(&dev->coupled_cpus)) | |
637 | return 0; | |
638 | ||
f9b531fe | 639 | for_each_cpu(cpu, &dev->coupled_cpus) { |
4126c019 CC |
640 | other_dev = per_cpu(cpuidle_devices, cpu); |
641 | if (other_dev && other_dev->coupled) { | |
642 | coupled = other_dev->coupled; | |
643 | goto have_coupled; | |
644 | } | |
645 | } | |
646 | ||
647 | /* No existing coupled info found, create a new one */ | |
648 | coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); | |
649 | if (!coupled) | |
650 | return -ENOMEM; | |
651 | ||
652 | coupled->coupled_cpus = dev->coupled_cpus; | |
653 | ||
654 | have_coupled: | |
655 | dev->coupled = coupled; | |
656 | if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) | |
657 | coupled->prevent++; | |
658 | ||
659 | cpuidle_coupled_update_online_cpus(coupled); | |
660 | ||
661 | coupled->refcnt++; | |
662 | ||
663 | csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); | |
f983827b | 664 | csd->func = cpuidle_coupled_handle_poke; |
4126c019 CC |
665 | csd->info = (void *)(unsigned long)dev->cpu; |
666 | ||
667 | return 0; | |
668 | } | |
669 | ||
670 | /** | |
671 | * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device | |
672 | * @dev: struct cpuidle_device for the current cpu | |
673 | * | |
674 | * Called from cpuidle_unregister_device to tear down coupled idle. Removes the | |
675 | * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if | |
676 | * this was the last cpu in the set. | |
677 | */ | |
678 | void cpuidle_coupled_unregister_device(struct cpuidle_device *dev) | |
679 | { | |
680 | struct cpuidle_coupled *coupled = dev->coupled; | |
681 | ||
682 | if (cpumask_empty(&dev->coupled_cpus)) | |
683 | return; | |
684 | ||
685 | if (--coupled->refcnt) | |
686 | kfree(coupled); | |
687 | dev->coupled = NULL; | |
688 | } | |
689 | ||
690 | /** | |
691 | * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state | |
692 | * @coupled: the struct coupled that contains the cpu that is changing state | |
693 | * | |
694 | * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that | |
695 | * cpu_online_mask doesn't change while cpus are coordinating coupled idle. | |
696 | */ | |
697 | static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) | |
698 | { | |
699 | int cpu = get_cpu(); | |
700 | ||
701 | /* Force all cpus out of the waiting loop. */ | |
702 | coupled->prevent++; | |
703 | cpuidle_coupled_poke_others(cpu, coupled); | |
704 | put_cpu(); | |
705 | while (!cpuidle_coupled_no_cpus_waiting(coupled)) | |
706 | cpu_relax(); | |
707 | } | |
708 | ||
709 | /** | |
710 | * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state | |
711 | * @coupled: the struct coupled that contains the cpu that is changing state | |
712 | * | |
713 | * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that | |
714 | * cpu_online_mask doesn't change while cpus are coordinating coupled idle. | |
715 | */ | |
716 | static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) | |
717 | { | |
718 | int cpu = get_cpu(); | |
719 | ||
720 | /* | |
721 | * Write barrier ensures readers see the new online_count when they | |
722 | * see prevent == 0. | |
723 | */ | |
724 | smp_wmb(); | |
725 | coupled->prevent--; | |
726 | /* Force cpus out of the prevent loop. */ | |
727 | cpuidle_coupled_poke_others(cpu, coupled); | |
728 | put_cpu(); | |
729 | } | |
730 | ||
731 | /** | |
732 | * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions | |
733 | * @nb: notifier block | |
734 | * @action: hotplug transition | |
735 | * @hcpu: target cpu number | |
736 | * | |
737 | * Called when a cpu is brought on or offline using hotplug. Updates the | |
738 | * coupled cpu set appropriately | |
739 | */ | |
740 | static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, | |
741 | unsigned long action, void *hcpu) | |
742 | { | |
743 | int cpu = (unsigned long)hcpu; | |
744 | struct cpuidle_device *dev; | |
745 | ||
63c6ba43 CC |
746 | switch (action & ~CPU_TASKS_FROZEN) { |
747 | case CPU_UP_PREPARE: | |
748 | case CPU_DOWN_PREPARE: | |
749 | case CPU_ONLINE: | |
750 | case CPU_DEAD: | |
751 | case CPU_UP_CANCELED: | |
752 | case CPU_DOWN_FAILED: | |
753 | break; | |
754 | default: | |
755 | return NOTIFY_OK; | |
756 | } | |
757 | ||
4126c019 CC |
758 | mutex_lock(&cpuidle_lock); |
759 | ||
760 | dev = per_cpu(cpuidle_devices, cpu); | |
5fbbb90d | 761 | if (!dev || !dev->coupled) |
4126c019 CC |
762 | goto out; |
763 | ||
764 | switch (action & ~CPU_TASKS_FROZEN) { | |
765 | case CPU_UP_PREPARE: | |
766 | case CPU_DOWN_PREPARE: | |
767 | cpuidle_coupled_prevent_idle(dev->coupled); | |
768 | break; | |
769 | case CPU_ONLINE: | |
770 | case CPU_DEAD: | |
771 | cpuidle_coupled_update_online_cpus(dev->coupled); | |
772 | /* Fall through */ | |
773 | case CPU_UP_CANCELED: | |
774 | case CPU_DOWN_FAILED: | |
775 | cpuidle_coupled_allow_idle(dev->coupled); | |
776 | break; | |
777 | } | |
778 | ||
779 | out: | |
780 | mutex_unlock(&cpuidle_lock); | |
781 | return NOTIFY_OK; | |
782 | } | |
783 | ||
784 | static struct notifier_block cpuidle_coupled_cpu_notifier = { | |
785 | .notifier_call = cpuidle_coupled_cpu_notify, | |
786 | }; | |
787 | ||
788 | static int __init cpuidle_coupled_init(void) | |
789 | { | |
790 | return register_cpu_notifier(&cpuidle_coupled_cpu_notifier); | |
791 | } | |
792 | core_initcall(cpuidle_coupled_init); |