Commit | Line | Data |
---|---|---|
79bf2bb3 TG |
1 | /* |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
b10db7f0 | 12 | * Distribute under GPLv2. |
79bf2bb3 TG |
13 | */ |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/profile.h> | |
21 | #include <linux/sched.h> | |
8083e4ad | 22 | #include <linux/module.h> |
00b42959 | 23 | #include <linux/irq_work.h> |
79bf2bb3 | 24 | |
9e203bcc DM |
25 | #include <asm/irq_regs.h> |
26 | ||
79bf2bb3 TG |
27 | #include "tick-internal.h" |
28 | ||
29 | /* | |
30 | * Per cpu nohz control structure | |
31 | */ | |
33a5f626 | 32 | DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
79bf2bb3 TG |
33 | |
34 | /* | |
d6ad4187 | 35 | * The time, when the last jiffy update happened. Protected by jiffies_lock. |
79bf2bb3 TG |
36 | */ |
37 | static ktime_t last_jiffies_update; | |
38 | ||
289f480a IM |
39 | struct tick_sched *tick_get_tick_sched(int cpu) |
40 | { | |
41 | return &per_cpu(tick_cpu_sched, cpu); | |
42 | } | |
43 | ||
79bf2bb3 TG |
44 | /* |
45 | * Must be called with interrupts disabled ! | |
46 | */ | |
47 | static void tick_do_update_jiffies64(ktime_t now) | |
48 | { | |
49 | unsigned long ticks = 0; | |
50 | ktime_t delta; | |
51 | ||
7a14ce1d | 52 | /* |
d6ad4187 | 53 | * Do a quick check without holding jiffies_lock: |
7a14ce1d IM |
54 | */ |
55 | delta = ktime_sub(now, last_jiffies_update); | |
56 | if (delta.tv64 < tick_period.tv64) | |
57 | return; | |
58 | ||
d6ad4187 JS |
59 | /* Reevalute with jiffies_lock held */ |
60 | write_seqlock(&jiffies_lock); | |
79bf2bb3 TG |
61 | |
62 | delta = ktime_sub(now, last_jiffies_update); | |
63 | if (delta.tv64 >= tick_period.tv64) { | |
64 | ||
65 | delta = ktime_sub(delta, tick_period); | |
66 | last_jiffies_update = ktime_add(last_jiffies_update, | |
67 | tick_period); | |
68 | ||
69 | /* Slow path for long timeouts */ | |
70 | if (unlikely(delta.tv64 >= tick_period.tv64)) { | |
71 | s64 incr = ktime_to_ns(tick_period); | |
72 | ||
73 | ticks = ktime_divns(delta, incr); | |
74 | ||
75 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
76 | incr * ticks); | |
77 | } | |
78 | do_timer(++ticks); | |
49d670fb TG |
79 | |
80 | /* Keep the tick_next_period variable up to date */ | |
81 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | |
79bf2bb3 | 82 | } |
d6ad4187 | 83 | write_sequnlock(&jiffies_lock); |
79bf2bb3 TG |
84 | } |
85 | ||
86 | /* | |
87 | * Initialize and return retrieve the jiffies update. | |
88 | */ | |
89 | static ktime_t tick_init_jiffy_update(void) | |
90 | { | |
91 | ktime_t period; | |
92 | ||
d6ad4187 | 93 | write_seqlock(&jiffies_lock); |
79bf2bb3 TG |
94 | /* Did we start the jiffies update yet ? */ |
95 | if (last_jiffies_update.tv64 == 0) | |
96 | last_jiffies_update = tick_next_period; | |
97 | period = last_jiffies_update; | |
d6ad4187 | 98 | write_sequnlock(&jiffies_lock); |
79bf2bb3 TG |
99 | return period; |
100 | } | |
101 | ||
5bb96226 FW |
102 | |
103 | static void tick_sched_do_timer(ktime_t now) | |
104 | { | |
105 | int cpu = smp_processor_id(); | |
106 | ||
107 | #ifdef CONFIG_NO_HZ | |
108 | /* | |
109 | * Check if the do_timer duty was dropped. We don't care about | |
110 | * concurrency: This happens only when the cpu in charge went | |
111 | * into a long sleep. If two cpus happen to assign themself to | |
112 | * this duty, then the jiffies update is still serialized by | |
9c3f9e28 | 113 | * jiffies_lock. |
5bb96226 FW |
114 | */ |
115 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) | |
116 | tick_do_timer_cpu = cpu; | |
117 | #endif | |
118 | ||
119 | /* Check, if the jiffies need an update */ | |
120 | if (tick_do_timer_cpu == cpu) | |
121 | tick_do_update_jiffies64(now); | |
122 | } | |
123 | ||
9e8f559b FW |
124 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
125 | { | |
94a57140 | 126 | #ifdef CONFIG_NO_HZ |
9e8f559b FW |
127 | /* |
128 | * When we are idle and the tick is stopped, we have to touch | |
129 | * the watchdog as we might not schedule for a really long | |
130 | * time. This happens on complete idle SMP systems while | |
131 | * waiting on the login prompt. We also increment the "start of | |
132 | * idle" jiffy stamp so the idle accounting adjustment we do | |
133 | * when we go busy again does not account too much ticks. | |
134 | */ | |
135 | if (ts->tick_stopped) { | |
136 | touch_softlockup_watchdog(); | |
137 | if (is_idle_task(current)) | |
138 | ts->idle_jiffies++; | |
139 | } | |
94a57140 | 140 | #endif |
9e8f559b FW |
141 | update_process_times(user_mode(regs)); |
142 | profile_tick(CPU_PROFILING); | |
143 | } | |
144 | ||
a831881b FW |
145 | #ifdef CONFIG_NO_HZ_EXTENDED |
146 | static cpumask_var_t nohz_extended_mask; | |
147 | bool have_nohz_extended_mask; | |
148 | ||
149 | int tick_nohz_extended_cpu(int cpu) | |
150 | { | |
151 | if (!have_nohz_extended_mask) | |
152 | return 0; | |
153 | ||
154 | return cpumask_test_cpu(cpu, nohz_extended_mask); | |
155 | } | |
156 | ||
157 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ | |
158 | static int __init tick_nohz_extended_setup(char *str) | |
159 | { | |
160 | alloc_bootmem_cpumask_var(&nohz_extended_mask); | |
161 | if (cpulist_parse(str, nohz_extended_mask) < 0) | |
162 | pr_warning("NOHZ: Incorrect nohz_extended cpumask\n"); | |
163 | else | |
164 | have_nohz_extended_mask = true; | |
165 | return 1; | |
166 | } | |
167 | __setup("nohz_extended=", tick_nohz_extended_setup); | |
168 | ||
169 | static int __init init_tick_nohz_extended(void) | |
170 | { | |
171 | cpumask_var_t online_nohz; | |
172 | int cpu; | |
173 | ||
174 | if (!have_nohz_extended_mask) | |
175 | return 0; | |
176 | ||
177 | if (!zalloc_cpumask_var(&online_nohz, GFP_KERNEL)) { | |
178 | pr_warning("NO_HZ: Not enough memory to check extended nohz mask\n"); | |
179 | return -ENOMEM; | |
180 | } | |
181 | ||
182 | /* | |
183 | * CPUs can probably not be concurrently offlined on initcall time. | |
184 | * But we are paranoid, aren't we? | |
185 | */ | |
186 | get_online_cpus(); | |
187 | ||
188 | /* Ensure we keep a CPU outside the dynticks range for timekeeping */ | |
189 | cpumask_and(online_nohz, cpu_online_mask, nohz_extended_mask); | |
190 | if (cpumask_equal(online_nohz, cpu_online_mask)) { | |
191 | cpu = cpumask_any(cpu_online_mask); | |
192 | pr_warning("NO_HZ: Must keep at least one online CPU " | |
193 | "out of nohz_extended range\n"); | |
194 | pr_warning("NO_HZ: Clearing %d from nohz_extended range\n", cpu); | |
195 | cpumask_clear_cpu(cpu, nohz_extended_mask); | |
196 | } | |
197 | put_online_cpus(); | |
198 | free_cpumask_var(online_nohz); | |
199 | ||
200 | return 0; | |
201 | } | |
202 | core_initcall(init_tick_nohz_extended); | |
203 | #else | |
204 | #define have_nohz_extended_mask (0) | |
205 | #endif | |
206 | ||
79bf2bb3 TG |
207 | /* |
208 | * NOHZ - aka dynamic tick functionality | |
209 | */ | |
210 | #ifdef CONFIG_NO_HZ | |
211 | /* | |
212 | * NO HZ enabled ? | |
213 | */ | |
9d2ad243 | 214 | int tick_nohz_enabled __read_mostly = 1; |
79bf2bb3 TG |
215 | |
216 | /* | |
217 | * Enable / Disable tickless mode | |
218 | */ | |
219 | static int __init setup_tick_nohz(char *str) | |
220 | { | |
221 | if (!strcmp(str, "off")) | |
222 | tick_nohz_enabled = 0; | |
223 | else if (!strcmp(str, "on")) | |
224 | tick_nohz_enabled = 1; | |
225 | else | |
226 | return 0; | |
227 | return 1; | |
228 | } | |
229 | ||
230 | __setup("nohz=", setup_tick_nohz); | |
231 | ||
232 | /** | |
233 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
234 | * | |
235 | * Called from interrupt entry when the CPU was idle | |
236 | * | |
237 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
238 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
239 | * value. We do this unconditionally on any cpu, as we don't know whether the | |
240 | * cpu, which has the update task assigned is in a long sleep. | |
241 | */ | |
eed3b9cf | 242 | static void tick_nohz_update_jiffies(ktime_t now) |
79bf2bb3 TG |
243 | { |
244 | int cpu = smp_processor_id(); | |
245 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
246 | unsigned long flags; | |
79bf2bb3 | 247 | |
5df7fa1c | 248 | ts->idle_waketime = now; |
79bf2bb3 TG |
249 | |
250 | local_irq_save(flags); | |
251 | tick_do_update_jiffies64(now); | |
252 | local_irq_restore(flags); | |
02ff3755 IM |
253 | |
254 | touch_softlockup_watchdog(); | |
79bf2bb3 TG |
255 | } |
256 | ||
595aac48 AV |
257 | /* |
258 | * Updates the per cpu time idle statistics counters | |
259 | */ | |
8d63bf94 | 260 | static void |
8c215bd3 | 261 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
6378ddb5 | 262 | { |
eed3b9cf | 263 | ktime_t delta; |
6378ddb5 | 264 | |
595aac48 AV |
265 | if (ts->idle_active) { |
266 | delta = ktime_sub(now, ts->idle_entrytime); | |
8c215bd3 | 267 | if (nr_iowait_cpu(cpu) > 0) |
0224cf4c | 268 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
6beea0cd MH |
269 | else |
270 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
8c7b09f4 | 271 | ts->idle_entrytime = now; |
595aac48 | 272 | } |
8d63bf94 | 273 | |
e0e37c20 | 274 | if (last_update_time) |
8d63bf94 AV |
275 | *last_update_time = ktime_to_us(now); |
276 | ||
595aac48 AV |
277 | } |
278 | ||
279 | static void tick_nohz_stop_idle(int cpu, ktime_t now) | |
280 | { | |
281 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
282 | ||
8c215bd3 | 283 | update_ts_time_stats(cpu, ts, now, NULL); |
eed3b9cf | 284 | ts->idle_active = 0; |
56c7426b | 285 | |
eed3b9cf | 286 | sched_clock_idle_wakeup_event(0); |
6378ddb5 VP |
287 | } |
288 | ||
8c215bd3 | 289 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
6378ddb5 | 290 | { |
430ee881 | 291 | ktime_t now = ktime_get(); |
595aac48 | 292 | |
6378ddb5 VP |
293 | ts->idle_entrytime = now; |
294 | ts->idle_active = 1; | |
56c7426b | 295 | sched_clock_idle_sleep_event(); |
6378ddb5 VP |
296 | return now; |
297 | } | |
298 | ||
b1f724c3 AV |
299 | /** |
300 | * get_cpu_idle_time_us - get the total idle time of a cpu | |
301 | * @cpu: CPU number to query | |
09a1d34f MH |
302 | * @last_update_time: variable to store update time in. Do not update |
303 | * counters if NULL. | |
b1f724c3 AV |
304 | * |
305 | * Return the cummulative idle time (since boot) for a given | |
6beea0cd | 306 | * CPU, in microseconds. |
b1f724c3 AV |
307 | * |
308 | * This time is measured via accounting rather than sampling, | |
309 | * and is as accurate as ktime_get() is. | |
310 | * | |
311 | * This function returns -1 if NOHZ is not enabled. | |
312 | */ | |
6378ddb5 VP |
313 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
314 | { | |
315 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 316 | ktime_t now, idle; |
6378ddb5 | 317 | |
8083e4ad | 318 | if (!tick_nohz_enabled) |
319 | return -1; | |
320 | ||
09a1d34f MH |
321 | now = ktime_get(); |
322 | if (last_update_time) { | |
323 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
324 | idle = ts->idle_sleeptime; | |
325 | } else { | |
326 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { | |
327 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
328 | ||
329 | idle = ktime_add(ts->idle_sleeptime, delta); | |
330 | } else { | |
331 | idle = ts->idle_sleeptime; | |
332 | } | |
333 | } | |
334 | ||
335 | return ktime_to_us(idle); | |
8083e4ad | 336 | |
6378ddb5 | 337 | } |
8083e4ad | 338 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
6378ddb5 | 339 | |
6beea0cd | 340 | /** |
0224cf4c AV |
341 | * get_cpu_iowait_time_us - get the total iowait time of a cpu |
342 | * @cpu: CPU number to query | |
09a1d34f MH |
343 | * @last_update_time: variable to store update time in. Do not update |
344 | * counters if NULL. | |
0224cf4c AV |
345 | * |
346 | * Return the cummulative iowait time (since boot) for a given | |
347 | * CPU, in microseconds. | |
348 | * | |
349 | * This time is measured via accounting rather than sampling, | |
350 | * and is as accurate as ktime_get() is. | |
351 | * | |
352 | * This function returns -1 if NOHZ is not enabled. | |
353 | */ | |
354 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |
355 | { | |
356 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 357 | ktime_t now, iowait; |
0224cf4c AV |
358 | |
359 | if (!tick_nohz_enabled) | |
360 | return -1; | |
361 | ||
09a1d34f MH |
362 | now = ktime_get(); |
363 | if (last_update_time) { | |
364 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
365 | iowait = ts->iowait_sleeptime; | |
366 | } else { | |
367 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { | |
368 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
0224cf4c | 369 | |
09a1d34f MH |
370 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
371 | } else { | |
372 | iowait = ts->iowait_sleeptime; | |
373 | } | |
374 | } | |
0224cf4c | 375 | |
09a1d34f | 376 | return ktime_to_us(iowait); |
0224cf4c AV |
377 | } |
378 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |
379 | ||
84bf1bcc FW |
380 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, |
381 | ktime_t now, int cpu) | |
79bf2bb3 | 382 | { |
280f0677 | 383 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
84bf1bcc | 384 | ktime_t last_update, expires, ret = { .tv64 = 0 }; |
aa9b1630 | 385 | unsigned long rcu_delta_jiffies; |
4f86d3a8 | 386 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
98962465 | 387 | u64 time_delta; |
79bf2bb3 | 388 | |
79bf2bb3 TG |
389 | /* Read jiffies and the time when jiffies were updated last */ |
390 | do { | |
d6ad4187 | 391 | seq = read_seqbegin(&jiffies_lock); |
79bf2bb3 TG |
392 | last_update = last_jiffies_update; |
393 | last_jiffies = jiffies; | |
27185016 | 394 | time_delta = timekeeping_max_deferment(); |
d6ad4187 | 395 | } while (read_seqretry(&jiffies_lock, seq)); |
79bf2bb3 | 396 | |
74876a98 | 397 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || |
00b42959 | 398 | arch_needs_cpu(cpu) || irq_work_needs_cpu()) { |
3c5d92a0 | 399 | next_jiffies = last_jiffies + 1; |
6ba9b346 | 400 | delta_jiffies = 1; |
3c5d92a0 MS |
401 | } else { |
402 | /* Get the next timer wheel timer */ | |
403 | next_jiffies = get_next_timer_interrupt(last_jiffies); | |
404 | delta_jiffies = next_jiffies - last_jiffies; | |
aa9b1630 PM |
405 | if (rcu_delta_jiffies < delta_jiffies) { |
406 | next_jiffies = last_jiffies + rcu_delta_jiffies; | |
407 | delta_jiffies = rcu_delta_jiffies; | |
408 | } | |
3c5d92a0 | 409 | } |
79bf2bb3 TG |
410 | /* |
411 | * Do not stop the tick, if we are only one off | |
412 | * or if the cpu is required for rcu | |
413 | */ | |
6ba9b346 | 414 | if (!ts->tick_stopped && delta_jiffies == 1) |
79bf2bb3 TG |
415 | goto out; |
416 | ||
417 | /* Schedule the tick, if we are at least one jiffie off */ | |
418 | if ((long)delta_jiffies >= 1) { | |
419 | ||
00147449 WR |
420 | /* |
421 | * If this cpu is the one which updates jiffies, then | |
422 | * give up the assignment and let it be taken by the | |
423 | * cpu which runs the tick timer next, which might be | |
424 | * this cpu as well. If we don't drop this here the | |
425 | * jiffies might be stale and do_timer() never | |
27185016 TG |
426 | * invoked. Keep track of the fact that it was the one |
427 | * which had the do_timer() duty last. If this cpu is | |
428 | * the one which had the do_timer() duty last, we | |
429 | * limit the sleep time to the timekeeping | |
430 | * max_deferement value which we retrieved | |
431 | * above. Otherwise we can sleep as long as we want. | |
00147449 | 432 | */ |
27185016 | 433 | if (cpu == tick_do_timer_cpu) { |
00147449 | 434 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
27185016 TG |
435 | ts->do_timer_last = 1; |
436 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
437 | time_delta = KTIME_MAX; | |
438 | ts->do_timer_last = 0; | |
439 | } else if (!ts->do_timer_last) { | |
440 | time_delta = KTIME_MAX; | |
441 | } | |
442 | ||
00147449 | 443 | /* |
98962465 JH |
444 | * calculate the expiry time for the next timer wheel |
445 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals | |
446 | * that there is no timer pending or at least extremely | |
447 | * far into the future (12 days for HZ=1000). In this | |
448 | * case we set the expiry to the end of time. | |
449 | */ | |
450 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { | |
451 | /* | |
452 | * Calculate the time delta for the next timer event. | |
453 | * If the time delta exceeds the maximum time delta | |
454 | * permitted by the current clocksource then adjust | |
455 | * the time delta accordingly to ensure the | |
456 | * clocksource does not wrap. | |
457 | */ | |
458 | time_delta = min_t(u64, time_delta, | |
459 | tick_period.tv64 * delta_jiffies); | |
98962465 | 460 | } |
00147449 | 461 | |
27185016 TG |
462 | if (time_delta < KTIME_MAX) |
463 | expires = ktime_add_ns(last_update, time_delta); | |
464 | else | |
465 | expires.tv64 = KTIME_MAX; | |
00147449 | 466 | |
00147449 WR |
467 | /* Skip reprogram of event if its not changed */ |
468 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) | |
469 | goto out; | |
470 | ||
84bf1bcc FW |
471 | ret = expires; |
472 | ||
79bf2bb3 TG |
473 | /* |
474 | * nohz_stop_sched_tick can be called several times before | |
475 | * the nohz_restart_sched_tick is called. This happens when | |
476 | * interrupts arrive which do not cause a reschedule. In the | |
477 | * first call we save the current tick time, so we can restart | |
478 | * the scheduler tick in nohz_restart_sched_tick. | |
479 | */ | |
480 | if (!ts->tick_stopped) { | |
c1cc017c | 481 | nohz_balance_enter_idle(cpu); |
5167e8d5 | 482 | calc_load_enter_idle(); |
46cb4b7c | 483 | |
f5d411c9 | 484 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
79bf2bb3 | 485 | ts->tick_stopped = 1; |
79bf2bb3 | 486 | } |
d3ed7824 | 487 | |
eaad084b | 488 | /* |
98962465 JH |
489 | * If the expiration time == KTIME_MAX, then |
490 | * in this case we simply stop the tick timer. | |
eaad084b | 491 | */ |
98962465 | 492 | if (unlikely(expires.tv64 == KTIME_MAX)) { |
eaad084b TG |
493 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
494 | hrtimer_cancel(&ts->sched_timer); | |
495 | goto out; | |
496 | } | |
497 | ||
79bf2bb3 TG |
498 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
499 | hrtimer_start(&ts->sched_timer, expires, | |
5c333864 | 500 | HRTIMER_MODE_ABS_PINNED); |
79bf2bb3 TG |
501 | /* Check, if the timer was already in the past */ |
502 | if (hrtimer_active(&ts->sched_timer)) | |
503 | goto out; | |
4c9dc641 | 504 | } else if (!tick_program_event(expires, 0)) |
79bf2bb3 TG |
505 | goto out; |
506 | /* | |
507 | * We are past the event already. So we crossed a | |
508 | * jiffie boundary. Update jiffies and raise the | |
509 | * softirq. | |
510 | */ | |
511 | tick_do_update_jiffies64(ktime_get()); | |
79bf2bb3 TG |
512 | } |
513 | raise_softirq_irqoff(TIMER_SOFTIRQ); | |
514 | out: | |
515 | ts->next_jiffies = next_jiffies; | |
516 | ts->last_jiffies = last_jiffies; | |
4f86d3a8 | 517 | ts->sleep_length = ktime_sub(dev->next_event, now); |
84bf1bcc FW |
518 | |
519 | return ret; | |
280f0677 FW |
520 | } |
521 | ||
5b39939a FW |
522 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
523 | { | |
524 | /* | |
525 | * If this cpu is offline and it is the one which updates | |
526 | * jiffies, then give up the assignment and let it be taken by | |
527 | * the cpu which runs the tick timer next. If we don't drop | |
528 | * this here the jiffies might be stale and do_timer() never | |
529 | * invoked. | |
530 | */ | |
531 | if (unlikely(!cpu_online(cpu))) { | |
532 | if (cpu == tick_do_timer_cpu) | |
533 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
534 | } | |
535 | ||
536 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
537 | return false; | |
538 | ||
539 | if (need_resched()) | |
540 | return false; | |
541 | ||
542 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { | |
543 | static int ratelimit; | |
544 | ||
803b0eba PM |
545 | if (ratelimit < 10 && |
546 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { | |
5b39939a FW |
547 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
548 | (unsigned int) local_softirq_pending()); | |
549 | ratelimit++; | |
550 | } | |
551 | return false; | |
552 | } | |
553 | ||
554 | return true; | |
555 | } | |
556 | ||
19f5f736 FW |
557 | static void __tick_nohz_idle_enter(struct tick_sched *ts) |
558 | { | |
84bf1bcc | 559 | ktime_t now, expires; |
5b39939a | 560 | int cpu = smp_processor_id(); |
19f5f736 | 561 | |
5b39939a | 562 | now = tick_nohz_start_idle(cpu, ts); |
2ac0d98f | 563 | |
5b39939a FW |
564 | if (can_stop_idle_tick(cpu, ts)) { |
565 | int was_stopped = ts->tick_stopped; | |
566 | ||
567 | ts->idle_calls++; | |
84bf1bcc FW |
568 | |
569 | expires = tick_nohz_stop_sched_tick(ts, now, cpu); | |
570 | if (expires.tv64 > 0LL) { | |
571 | ts->idle_sleeps++; | |
572 | ts->idle_expires = expires; | |
573 | } | |
5b39939a FW |
574 | |
575 | if (!was_stopped && ts->tick_stopped) | |
576 | ts->idle_jiffies = ts->last_jiffies; | |
577 | } | |
280f0677 FW |
578 | } |
579 | ||
580 | /** | |
581 | * tick_nohz_idle_enter - stop the idle tick from the idle task | |
582 | * | |
583 | * When the next event is more than a tick into the future, stop the idle tick | |
584 | * Called when we start the idle loop. | |
2bbb6817 | 585 | * |
1268fbc7 | 586 | * The arch is responsible of calling: |
2bbb6817 FW |
587 | * |
588 | * - rcu_idle_enter() after its last use of RCU before the CPU is put | |
589 | * to sleep. | |
590 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. | |
280f0677 | 591 | */ |
1268fbc7 | 592 | void tick_nohz_idle_enter(void) |
280f0677 FW |
593 | { |
594 | struct tick_sched *ts; | |
595 | ||
1268fbc7 FW |
596 | WARN_ON_ONCE(irqs_disabled()); |
597 | ||
0db49b72 LT |
598 | /* |
599 | * Update the idle state in the scheduler domain hierarchy | |
600 | * when tick_nohz_stop_sched_tick() is called from the idle loop. | |
601 | * State will be updated to busy during the first busy tick after | |
602 | * exiting idle. | |
603 | */ | |
604 | set_cpu_sd_state_idle(); | |
605 | ||
1268fbc7 FW |
606 | local_irq_disable(); |
607 | ||
280f0677 FW |
608 | ts = &__get_cpu_var(tick_cpu_sched); |
609 | /* | |
610 | * set ts->inidle unconditionally. even if the system did not | |
611 | * switch to nohz mode the cpu frequency governers rely on the | |
612 | * update of the idle time accounting in tick_nohz_start_idle(). | |
613 | */ | |
614 | ts->inidle = 1; | |
19f5f736 | 615 | __tick_nohz_idle_enter(ts); |
1268fbc7 FW |
616 | |
617 | local_irq_enable(); | |
280f0677 | 618 | } |
4dbd2771 | 619 | EXPORT_SYMBOL_GPL(tick_nohz_idle_enter); |
280f0677 FW |
620 | |
621 | /** | |
622 | * tick_nohz_irq_exit - update next tick event from interrupt exit | |
623 | * | |
624 | * When an interrupt fires while we are idle and it doesn't cause | |
625 | * a reschedule, it may still add, modify or delete a timer, enqueue | |
626 | * an RCU callback, etc... | |
627 | * So we need to re-calculate and reprogram the next tick event. | |
628 | */ | |
629 | void tick_nohz_irq_exit(void) | |
630 | { | |
631 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
632 | ||
633 | if (!ts->inidle) | |
634 | return; | |
635 | ||
69a37bea YS |
636 | /* Cancel the timer because CPU already waken up from the C-states*/ |
637 | menu_hrtimer_cancel(); | |
19f5f736 | 638 | __tick_nohz_idle_enter(ts); |
79bf2bb3 TG |
639 | } |
640 | ||
4f86d3a8 LB |
641 | /** |
642 | * tick_nohz_get_sleep_length - return the length of the current sleep | |
643 | * | |
644 | * Called from power state control code with interrupts disabled | |
645 | */ | |
646 | ktime_t tick_nohz_get_sleep_length(void) | |
647 | { | |
648 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
649 | ||
650 | return ts->sleep_length; | |
651 | } | |
652 | ||
c34bec5a TG |
653 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
654 | { | |
655 | hrtimer_cancel(&ts->sched_timer); | |
f5d411c9 | 656 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); |
c34bec5a TG |
657 | |
658 | while (1) { | |
659 | /* Forward the time to expire in the future */ | |
660 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
661 | ||
662 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
268a3dcf | 663 | hrtimer_start_expires(&ts->sched_timer, |
5c333864 | 664 | HRTIMER_MODE_ABS_PINNED); |
c34bec5a TG |
665 | /* Check, if the timer was already in the past */ |
666 | if (hrtimer_active(&ts->sched_timer)) | |
667 | break; | |
668 | } else { | |
268a3dcf TG |
669 | if (!tick_program_event( |
670 | hrtimer_get_expires(&ts->sched_timer), 0)) | |
c34bec5a TG |
671 | break; |
672 | } | |
6f103929 | 673 | /* Reread time and update jiffies */ |
c34bec5a | 674 | now = ktime_get(); |
6f103929 | 675 | tick_do_update_jiffies64(now); |
c34bec5a TG |
676 | } |
677 | } | |
678 | ||
19f5f736 | 679 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
79bf2bb3 | 680 | { |
79bf2bb3 | 681 | /* Update jiffies first */ |
79bf2bb3 | 682 | tick_do_update_jiffies64(now); |
5aaa0b7a | 683 | update_cpu_load_nohz(); |
79bf2bb3 | 684 | |
749c8814 | 685 | calc_load_exit_idle(); |
2ac0d98f FW |
686 | touch_softlockup_watchdog(); |
687 | /* | |
688 | * Cancel the scheduled timer and restore the tick | |
689 | */ | |
690 | ts->tick_stopped = 0; | |
691 | ts->idle_exittime = now; | |
692 | ||
693 | tick_nohz_restart(ts, now); | |
694 | } | |
695 | ||
696 | static void tick_nohz_account_idle_ticks(struct tick_sched *ts) | |
697 | { | |
3f4724ea | 698 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
2ac0d98f | 699 | unsigned long ticks; |
3f4724ea FW |
700 | |
701 | if (vtime_accounting_enabled()) | |
702 | return; | |
79bf2bb3 TG |
703 | /* |
704 | * We stopped the tick in idle. Update process times would miss the | |
705 | * time we slept as update_process_times does only a 1 tick | |
706 | * accounting. Enforce that this is accounted to idle ! | |
707 | */ | |
708 | ticks = jiffies - ts->idle_jiffies; | |
709 | /* | |
710 | * We might be one off. Do not randomly account a huge number of ticks! | |
711 | */ | |
79741dd3 MS |
712 | if (ticks && ticks < LONG_MAX) |
713 | account_idle_ticks(ticks); | |
714 | #endif | |
19f5f736 FW |
715 | } |
716 | ||
79bf2bb3 | 717 | /** |
280f0677 | 718 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
79bf2bb3 TG |
719 | * |
720 | * Restart the idle tick when the CPU is woken up from idle | |
280f0677 FW |
721 | * This also exit the RCU extended quiescent state. The CPU |
722 | * can use RCU again after this function is called. | |
79bf2bb3 | 723 | */ |
280f0677 | 724 | void tick_nohz_idle_exit(void) |
79bf2bb3 TG |
725 | { |
726 | int cpu = smp_processor_id(); | |
727 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
6378ddb5 | 728 | ktime_t now; |
79bf2bb3 | 729 | |
6378ddb5 | 730 | local_irq_disable(); |
2bbb6817 | 731 | |
15f827be FW |
732 | WARN_ON_ONCE(!ts->inidle); |
733 | ||
734 | ts->inidle = 0; | |
735 | ||
69a37bea YS |
736 | /* Cancel the timer because CPU already waken up from the C-states*/ |
737 | menu_hrtimer_cancel(); | |
15f827be | 738 | if (ts->idle_active || ts->tick_stopped) |
eed3b9cf MS |
739 | now = ktime_get(); |
740 | ||
741 | if (ts->idle_active) | |
742 | tick_nohz_stop_idle(cpu, now); | |
6378ddb5 | 743 | |
2ac0d98f | 744 | if (ts->tick_stopped) { |
19f5f736 | 745 | tick_nohz_restart_sched_tick(ts, now); |
2ac0d98f | 746 | tick_nohz_account_idle_ticks(ts); |
6378ddb5 | 747 | } |
79bf2bb3 | 748 | |
79bf2bb3 TG |
749 | local_irq_enable(); |
750 | } | |
4dbd2771 | 751 | EXPORT_SYMBOL_GPL(tick_nohz_idle_exit); |
79bf2bb3 TG |
752 | |
753 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | |
754 | { | |
755 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
cc584b21 | 756 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
79bf2bb3 TG |
757 | } |
758 | ||
759 | /* | |
760 | * The nohz low res interrupt handler | |
761 | */ | |
762 | static void tick_nohz_handler(struct clock_event_device *dev) | |
763 | { | |
764 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
765 | struct pt_regs *regs = get_irq_regs(); | |
766 | ktime_t now = ktime_get(); | |
767 | ||
768 | dev->next_event.tv64 = KTIME_MAX; | |
769 | ||
5bb96226 | 770 | tick_sched_do_timer(now); |
9e8f559b | 771 | tick_sched_handle(ts, regs); |
79bf2bb3 | 772 | |
79bf2bb3 TG |
773 | while (tick_nohz_reprogram(ts, now)) { |
774 | now = ktime_get(); | |
775 | tick_do_update_jiffies64(now); | |
776 | } | |
777 | } | |
778 | ||
779 | /** | |
780 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
781 | */ | |
782 | static void tick_nohz_switch_to_nohz(void) | |
783 | { | |
784 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
785 | ktime_t next; | |
786 | ||
787 | if (!tick_nohz_enabled) | |
788 | return; | |
789 | ||
790 | local_irq_disable(); | |
791 | if (tick_switch_to_oneshot(tick_nohz_handler)) { | |
792 | local_irq_enable(); | |
793 | return; | |
794 | } | |
795 | ||
796 | ts->nohz_mode = NOHZ_MODE_LOWRES; | |
797 | ||
798 | /* | |
799 | * Recycle the hrtimer in ts, so we can share the | |
800 | * hrtimer_forward with the highres code. | |
801 | */ | |
802 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
803 | /* Get the next period */ | |
804 | next = tick_init_jiffy_update(); | |
805 | ||
806 | for (;;) { | |
cc584b21 | 807 | hrtimer_set_expires(&ts->sched_timer, next); |
79bf2bb3 TG |
808 | if (!tick_program_event(next, 0)) |
809 | break; | |
810 | next = ktime_add(next, tick_period); | |
811 | } | |
812 | local_irq_enable(); | |
79bf2bb3 TG |
813 | } |
814 | ||
fb02fbc1 TG |
815 | /* |
816 | * When NOHZ is enabled and the tick is stopped, we need to kick the | |
817 | * tick timer from irq_enter() so that the jiffies update is kept | |
818 | * alive during long running softirqs. That's ugly as hell, but | |
819 | * correctness is key even if we need to fix the offending softirq in | |
820 | * the first place. | |
821 | * | |
822 | * Note, this is different to tick_nohz_restart. We just kick the | |
823 | * timer and do not touch the other magic bits which need to be done | |
824 | * when idle is left. | |
825 | */ | |
eed3b9cf | 826 | static void tick_nohz_kick_tick(int cpu, ktime_t now) |
fb02fbc1 | 827 | { |
ae99286b TG |
828 | #if 0 |
829 | /* Switch back to 2.6.27 behaviour */ | |
830 | ||
fb02fbc1 | 831 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
eed3b9cf | 832 | ktime_t delta; |
fb02fbc1 | 833 | |
c4bd822e TG |
834 | /* |
835 | * Do not touch the tick device, when the next expiry is either | |
836 | * already reached or less/equal than the tick period. | |
837 | */ | |
268a3dcf | 838 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
c4bd822e TG |
839 | if (delta.tv64 <= tick_period.tv64) |
840 | return; | |
841 | ||
842 | tick_nohz_restart(ts, now); | |
ae99286b | 843 | #endif |
fb02fbc1 TG |
844 | } |
845 | ||
eed3b9cf MS |
846 | static inline void tick_check_nohz(int cpu) |
847 | { | |
848 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
849 | ktime_t now; | |
850 | ||
851 | if (!ts->idle_active && !ts->tick_stopped) | |
852 | return; | |
853 | now = ktime_get(); | |
854 | if (ts->idle_active) | |
855 | tick_nohz_stop_idle(cpu, now); | |
856 | if (ts->tick_stopped) { | |
857 | tick_nohz_update_jiffies(now); | |
858 | tick_nohz_kick_tick(cpu, now); | |
859 | } | |
860 | } | |
861 | ||
79bf2bb3 TG |
862 | #else |
863 | ||
864 | static inline void tick_nohz_switch_to_nohz(void) { } | |
eed3b9cf | 865 | static inline void tick_check_nohz(int cpu) { } |
79bf2bb3 TG |
866 | |
867 | #endif /* NO_HZ */ | |
868 | ||
719254fa TG |
869 | /* |
870 | * Called from irq_enter to notify about the possible interruption of idle() | |
871 | */ | |
872 | void tick_check_idle(int cpu) | |
873 | { | |
fb02fbc1 | 874 | tick_check_oneshot_broadcast(cpu); |
eed3b9cf | 875 | tick_check_nohz(cpu); |
719254fa TG |
876 | } |
877 | ||
79bf2bb3 TG |
878 | /* |
879 | * High resolution timer specific code | |
880 | */ | |
881 | #ifdef CONFIG_HIGH_RES_TIMERS | |
882 | /* | |
4c9dc641 | 883 | * We rearm the timer until we get disabled by the idle code. |
351f181f | 884 | * Called with interrupts disabled. |
79bf2bb3 TG |
885 | */ |
886 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
887 | { | |
888 | struct tick_sched *ts = | |
889 | container_of(timer, struct tick_sched, sched_timer); | |
79bf2bb3 TG |
890 | struct pt_regs *regs = get_irq_regs(); |
891 | ktime_t now = ktime_get(); | |
d3ed7824 | 892 | |
5bb96226 | 893 | tick_sched_do_timer(now); |
79bf2bb3 TG |
894 | |
895 | /* | |
896 | * Do not call, when we are not in irq context and have | |
897 | * no valid regs pointer | |
898 | */ | |
9e8f559b FW |
899 | if (regs) |
900 | tick_sched_handle(ts, regs); | |
79bf2bb3 | 901 | |
79bf2bb3 TG |
902 | hrtimer_forward(timer, now, tick_period); |
903 | ||
904 | return HRTIMER_RESTART; | |
905 | } | |
906 | ||
5307c955 MG |
907 | static int sched_skew_tick; |
908 | ||
62cf20b3 TG |
909 | static int __init skew_tick(char *str) |
910 | { | |
911 | get_option(&str, &sched_skew_tick); | |
912 | ||
913 | return 0; | |
914 | } | |
915 | early_param("skew_tick", skew_tick); | |
916 | ||
79bf2bb3 TG |
917 | /** |
918 | * tick_setup_sched_timer - setup the tick emulation timer | |
919 | */ | |
920 | void tick_setup_sched_timer(void) | |
921 | { | |
922 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
923 | ktime_t now = ktime_get(); | |
924 | ||
925 | /* | |
926 | * Emulate tick processing via per-CPU hrtimers: | |
927 | */ | |
928 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
929 | ts->sched_timer.function = tick_sched_timer; | |
79bf2bb3 | 930 | |
3704540b | 931 | /* Get the next period (per cpu) */ |
cc584b21 | 932 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
79bf2bb3 | 933 | |
9c3f9e28 | 934 | /* Offset the tick to avert jiffies_lock contention. */ |
5307c955 MG |
935 | if (sched_skew_tick) { |
936 | u64 offset = ktime_to_ns(tick_period) >> 1; | |
937 | do_div(offset, num_possible_cpus()); | |
938 | offset *= smp_processor_id(); | |
939 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | |
940 | } | |
941 | ||
79bf2bb3 TG |
942 | for (;;) { |
943 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
5c333864 AB |
944 | hrtimer_start_expires(&ts->sched_timer, |
945 | HRTIMER_MODE_ABS_PINNED); | |
79bf2bb3 TG |
946 | /* Check, if the timer was already in the past */ |
947 | if (hrtimer_active(&ts->sched_timer)) | |
948 | break; | |
949 | now = ktime_get(); | |
950 | } | |
951 | ||
952 | #ifdef CONFIG_NO_HZ | |
29c158e8 | 953 | if (tick_nohz_enabled) |
79bf2bb3 TG |
954 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
955 | #endif | |
956 | } | |
3c4fbe5e | 957 | #endif /* HIGH_RES_TIMERS */ |
79bf2bb3 | 958 | |
3c4fbe5e | 959 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
960 | void tick_cancel_sched_timer(int cpu) |
961 | { | |
962 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
963 | ||
3c4fbe5e | 964 | # ifdef CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
965 | if (ts->sched_timer.base) |
966 | hrtimer_cancel(&ts->sched_timer); | |
3c4fbe5e | 967 | # endif |
a7901766 | 968 | |
79bf2bb3 TG |
969 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
970 | } | |
3c4fbe5e | 971 | #endif |
79bf2bb3 TG |
972 | |
973 | /** | |
974 | * Async notification about clocksource changes | |
975 | */ | |
976 | void tick_clock_notify(void) | |
977 | { | |
978 | int cpu; | |
979 | ||
980 | for_each_possible_cpu(cpu) | |
981 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
982 | } | |
983 | ||
984 | /* | |
985 | * Async notification about clock event changes | |
986 | */ | |
987 | void tick_oneshot_notify(void) | |
988 | { | |
989 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
990 | ||
991 | set_bit(0, &ts->check_clocks); | |
992 | } | |
993 | ||
994 | /** | |
995 | * Check, if a change happened, which makes oneshot possible. | |
996 | * | |
997 | * Called cyclic from the hrtimer softirq (driven by the timer | |
998 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
999 | * mode, because high resolution timers are disabled (either compile | |
1000 | * or runtime). | |
1001 | */ | |
1002 | int tick_check_oneshot_change(int allow_nohz) | |
1003 | { | |
1004 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1005 | ||
1006 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
1007 | return 0; | |
1008 | ||
1009 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
1010 | return 0; | |
1011 | ||
cf4fc6cb | 1012 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
79bf2bb3 TG |
1013 | return 0; |
1014 | ||
1015 | if (!allow_nohz) | |
1016 | return 1; | |
1017 | ||
1018 | tick_nohz_switch_to_nohz(); | |
1019 | return 0; | |
1020 | } |