timer: Use hlist for the timer wheel hash buckets
[deliverable/linux.git] / kernel / time / timer.c
1 /*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
45
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
50 #include <asm/io.h>
51
52 #include "tick-internal.h"
53
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/timer.h>
56
57 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
58
59 EXPORT_SYMBOL(jiffies_64);
60
61 /*
62 * per-CPU timer vector definitions:
63 */
64 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
65 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
66 #define TVN_SIZE (1 << TVN_BITS)
67 #define TVR_SIZE (1 << TVR_BITS)
68 #define TVN_MASK (TVN_SIZE - 1)
69 #define TVR_MASK (TVR_SIZE - 1)
70 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
71
72 struct tvec {
73 struct hlist_head vec[TVN_SIZE];
74 };
75
76 struct tvec_root {
77 struct hlist_head vec[TVR_SIZE];
78 };
79
80 struct tvec_base {
81 spinlock_t lock;
82 struct timer_list *running_timer;
83 unsigned long timer_jiffies;
84 unsigned long next_timer;
85 unsigned long active_timers;
86 unsigned long all_timers;
87 int cpu;
88 struct tvec_root tv1;
89 struct tvec tv2;
90 struct tvec tv3;
91 struct tvec tv4;
92 struct tvec tv5;
93 } ____cacheline_aligned;
94
95 /*
96 * __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've
97 * made NULL special, hint: lock_timer_base()) and we cannot get a compile time
98 * pointer to per-cpu entries because we don't know where we'll map the section,
99 * even for the boot cpu.
100 *
101 * And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the
102 * rest of them.
103 */
104 struct tvec_base boot_tvec_bases;
105 EXPORT_SYMBOL(boot_tvec_bases);
106
107 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
108
109 /* Functions below help us manage 'deferrable' flag */
110 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
111 {
112 return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
113 }
114
115 static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
116 {
117 return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
118 }
119
120 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
121 {
122 return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
123 }
124
125 static inline void
126 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
127 {
128 unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
129
130 timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
131 }
132
133 static unsigned long round_jiffies_common(unsigned long j, int cpu,
134 bool force_up)
135 {
136 int rem;
137 unsigned long original = j;
138
139 /*
140 * We don't want all cpus firing their timers at once hitting the
141 * same lock or cachelines, so we skew each extra cpu with an extra
142 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
143 * already did this.
144 * The skew is done by adding 3*cpunr, then round, then subtract this
145 * extra offset again.
146 */
147 j += cpu * 3;
148
149 rem = j % HZ;
150
151 /*
152 * If the target jiffie is just after a whole second (which can happen
153 * due to delays of the timer irq, long irq off times etc etc) then
154 * we should round down to the whole second, not up. Use 1/4th second
155 * as cutoff for this rounding as an extreme upper bound for this.
156 * But never round down if @force_up is set.
157 */
158 if (rem < HZ/4 && !force_up) /* round down */
159 j = j - rem;
160 else /* round up */
161 j = j - rem + HZ;
162
163 /* now that we have rounded, subtract the extra skew again */
164 j -= cpu * 3;
165
166 /*
167 * Make sure j is still in the future. Otherwise return the
168 * unmodified value.
169 */
170 return time_is_after_jiffies(j) ? j : original;
171 }
172
173 /**
174 * __round_jiffies - function to round jiffies to a full second
175 * @j: the time in (absolute) jiffies that should be rounded
176 * @cpu: the processor number on which the timeout will happen
177 *
178 * __round_jiffies() rounds an absolute time in the future (in jiffies)
179 * up or down to (approximately) full seconds. This is useful for timers
180 * for which the exact time they fire does not matter too much, as long as
181 * they fire approximately every X seconds.
182 *
183 * By rounding these timers to whole seconds, all such timers will fire
184 * at the same time, rather than at various times spread out. The goal
185 * of this is to have the CPU wake up less, which saves power.
186 *
187 * The exact rounding is skewed for each processor to avoid all
188 * processors firing at the exact same time, which could lead
189 * to lock contention or spurious cache line bouncing.
190 *
191 * The return value is the rounded version of the @j parameter.
192 */
193 unsigned long __round_jiffies(unsigned long j, int cpu)
194 {
195 return round_jiffies_common(j, cpu, false);
196 }
197 EXPORT_SYMBOL_GPL(__round_jiffies);
198
199 /**
200 * __round_jiffies_relative - function to round jiffies to a full second
201 * @j: the time in (relative) jiffies that should be rounded
202 * @cpu: the processor number on which the timeout will happen
203 *
204 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
205 * up or down to (approximately) full seconds. This is useful for timers
206 * for which the exact time they fire does not matter too much, as long as
207 * they fire approximately every X seconds.
208 *
209 * By rounding these timers to whole seconds, all such timers will fire
210 * at the same time, rather than at various times spread out. The goal
211 * of this is to have the CPU wake up less, which saves power.
212 *
213 * The exact rounding is skewed for each processor to avoid all
214 * processors firing at the exact same time, which could lead
215 * to lock contention or spurious cache line bouncing.
216 *
217 * The return value is the rounded version of the @j parameter.
218 */
219 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
220 {
221 unsigned long j0 = jiffies;
222
223 /* Use j0 because jiffies might change while we run */
224 return round_jiffies_common(j + j0, cpu, false) - j0;
225 }
226 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
227
228 /**
229 * round_jiffies - function to round jiffies to a full second
230 * @j: the time in (absolute) jiffies that should be rounded
231 *
232 * round_jiffies() rounds an absolute time in the future (in jiffies)
233 * up or down to (approximately) full seconds. This is useful for timers
234 * for which the exact time they fire does not matter too much, as long as
235 * they fire approximately every X seconds.
236 *
237 * By rounding these timers to whole seconds, all such timers will fire
238 * at the same time, rather than at various times spread out. The goal
239 * of this is to have the CPU wake up less, which saves power.
240 *
241 * The return value is the rounded version of the @j parameter.
242 */
243 unsigned long round_jiffies(unsigned long j)
244 {
245 return round_jiffies_common(j, raw_smp_processor_id(), false);
246 }
247 EXPORT_SYMBOL_GPL(round_jiffies);
248
249 /**
250 * round_jiffies_relative - function to round jiffies to a full second
251 * @j: the time in (relative) jiffies that should be rounded
252 *
253 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
254 * up or down to (approximately) full seconds. This is useful for timers
255 * for which the exact time they fire does not matter too much, as long as
256 * they fire approximately every X seconds.
257 *
258 * By rounding these timers to whole seconds, all such timers will fire
259 * at the same time, rather than at various times spread out. The goal
260 * of this is to have the CPU wake up less, which saves power.
261 *
262 * The return value is the rounded version of the @j parameter.
263 */
264 unsigned long round_jiffies_relative(unsigned long j)
265 {
266 return __round_jiffies_relative(j, raw_smp_processor_id());
267 }
268 EXPORT_SYMBOL_GPL(round_jiffies_relative);
269
270 /**
271 * __round_jiffies_up - function to round jiffies up to a full second
272 * @j: the time in (absolute) jiffies that should be rounded
273 * @cpu: the processor number on which the timeout will happen
274 *
275 * This is the same as __round_jiffies() except that it will never
276 * round down. This is useful for timeouts for which the exact time
277 * of firing does not matter too much, as long as they don't fire too
278 * early.
279 */
280 unsigned long __round_jiffies_up(unsigned long j, int cpu)
281 {
282 return round_jiffies_common(j, cpu, true);
283 }
284 EXPORT_SYMBOL_GPL(__round_jiffies_up);
285
286 /**
287 * __round_jiffies_up_relative - function to round jiffies up to a full second
288 * @j: the time in (relative) jiffies that should be rounded
289 * @cpu: the processor number on which the timeout will happen
290 *
291 * This is the same as __round_jiffies_relative() except that it will never
292 * round down. This is useful for timeouts for which the exact time
293 * of firing does not matter too much, as long as they don't fire too
294 * early.
295 */
296 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
297 {
298 unsigned long j0 = jiffies;
299
300 /* Use j0 because jiffies might change while we run */
301 return round_jiffies_common(j + j0, cpu, true) - j0;
302 }
303 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
304
305 /**
306 * round_jiffies_up - function to round jiffies up to a full second
307 * @j: the time in (absolute) jiffies that should be rounded
308 *
309 * This is the same as round_jiffies() except that it will never
310 * round down. This is useful for timeouts for which the exact time
311 * of firing does not matter too much, as long as they don't fire too
312 * early.
313 */
314 unsigned long round_jiffies_up(unsigned long j)
315 {
316 return round_jiffies_common(j, raw_smp_processor_id(), true);
317 }
318 EXPORT_SYMBOL_GPL(round_jiffies_up);
319
320 /**
321 * round_jiffies_up_relative - function to round jiffies up to a full second
322 * @j: the time in (relative) jiffies that should be rounded
323 *
324 * This is the same as round_jiffies_relative() except that it will never
325 * round down. This is useful for timeouts for which the exact time
326 * of firing does not matter too much, as long as they don't fire too
327 * early.
328 */
329 unsigned long round_jiffies_up_relative(unsigned long j)
330 {
331 return __round_jiffies_up_relative(j, raw_smp_processor_id());
332 }
333 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
334
335 /**
336 * set_timer_slack - set the allowed slack for a timer
337 * @timer: the timer to be modified
338 * @slack_hz: the amount of time (in jiffies) allowed for rounding
339 *
340 * Set the amount of time, in jiffies, that a certain timer has
341 * in terms of slack. By setting this value, the timer subsystem
342 * will schedule the actual timer somewhere between
343 * the time mod_timer() asks for, and that time plus the slack.
344 *
345 * By setting the slack to -1, a percentage of the delay is used
346 * instead.
347 */
348 void set_timer_slack(struct timer_list *timer, int slack_hz)
349 {
350 timer->slack = slack_hz;
351 }
352 EXPORT_SYMBOL_GPL(set_timer_slack);
353
354 static void
355 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
356 {
357 unsigned long expires = timer->expires;
358 unsigned long idx = expires - base->timer_jiffies;
359 struct hlist_head *vec;
360
361 if (idx < TVR_SIZE) {
362 int i = expires & TVR_MASK;
363 vec = base->tv1.vec + i;
364 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
365 int i = (expires >> TVR_BITS) & TVN_MASK;
366 vec = base->tv2.vec + i;
367 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
368 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
369 vec = base->tv3.vec + i;
370 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
371 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
372 vec = base->tv4.vec + i;
373 } else if ((signed long) idx < 0) {
374 /*
375 * Can happen if you add a timer with expires == jiffies,
376 * or you set a timer to go off in the past
377 */
378 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
379 } else {
380 int i;
381 /* If the timeout is larger than MAX_TVAL (on 64-bit
382 * architectures or with CONFIG_BASE_SMALL=1) then we
383 * use the maximum timeout.
384 */
385 if (idx > MAX_TVAL) {
386 idx = MAX_TVAL;
387 expires = idx + base->timer_jiffies;
388 }
389 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
390 vec = base->tv5.vec + i;
391 }
392
393 hlist_add_head(&timer->entry, vec);
394 }
395
396 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
397 {
398 /* Advance base->jiffies, if the base is empty */
399 if (!base->all_timers++)
400 base->timer_jiffies = jiffies;
401
402 __internal_add_timer(base, timer);
403 /*
404 * Update base->active_timers and base->next_timer
405 */
406 if (!tbase_get_deferrable(timer->base)) {
407 if (!base->active_timers++ ||
408 time_before(timer->expires, base->next_timer))
409 base->next_timer = timer->expires;
410 }
411
412 /*
413 * Check whether the other CPU is in dynticks mode and needs
414 * to be triggered to reevaluate the timer wheel.
415 * We are protected against the other CPU fiddling
416 * with the timer by holding the timer base lock. This also
417 * makes sure that a CPU on the way to stop its tick can not
418 * evaluate the timer wheel.
419 *
420 * Spare the IPI for deferrable timers on idle targets though.
421 * The next busy ticks will take care of it. Except full dynticks
422 * require special care against races with idle_cpu(), lets deal
423 * with that later.
424 */
425 if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(base->cpu))
426 wake_up_nohz_cpu(base->cpu);
427 }
428
429 #ifdef CONFIG_TIMER_STATS
430 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
431 {
432 if (timer->start_site)
433 return;
434
435 timer->start_site = addr;
436 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
437 timer->start_pid = current->pid;
438 }
439
440 static void timer_stats_account_timer(struct timer_list *timer)
441 {
442 unsigned int flag = 0;
443
444 if (likely(!timer->start_site))
445 return;
446 if (unlikely(tbase_get_deferrable(timer->base)))
447 flag |= TIMER_STATS_FLAG_DEFERRABLE;
448
449 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
450 timer->function, timer->start_comm, flag);
451 }
452
453 #else
454 static void timer_stats_account_timer(struct timer_list *timer) {}
455 #endif
456
457 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
458
459 static struct debug_obj_descr timer_debug_descr;
460
461 static void *timer_debug_hint(void *addr)
462 {
463 return ((struct timer_list *) addr)->function;
464 }
465
466 /*
467 * fixup_init is called when:
468 * - an active object is initialized
469 */
470 static int timer_fixup_init(void *addr, enum debug_obj_state state)
471 {
472 struct timer_list *timer = addr;
473
474 switch (state) {
475 case ODEBUG_STATE_ACTIVE:
476 del_timer_sync(timer);
477 debug_object_init(timer, &timer_debug_descr);
478 return 1;
479 default:
480 return 0;
481 }
482 }
483
484 /* Stub timer callback for improperly used timers. */
485 static void stub_timer(unsigned long data)
486 {
487 WARN_ON(1);
488 }
489
490 /*
491 * fixup_activate is called when:
492 * - an active object is activated
493 * - an unknown object is activated (might be a statically initialized object)
494 */
495 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
496 {
497 struct timer_list *timer = addr;
498
499 switch (state) {
500
501 case ODEBUG_STATE_NOTAVAILABLE:
502 /*
503 * This is not really a fixup. The timer was
504 * statically initialized. We just make sure that it
505 * is tracked in the object tracker.
506 */
507 if (timer->entry.pprev == NULL &&
508 timer->entry.next == TIMER_ENTRY_STATIC) {
509 debug_object_init(timer, &timer_debug_descr);
510 debug_object_activate(timer, &timer_debug_descr);
511 return 0;
512 } else {
513 setup_timer(timer, stub_timer, 0);
514 return 1;
515 }
516 return 0;
517
518 case ODEBUG_STATE_ACTIVE:
519 WARN_ON(1);
520
521 default:
522 return 0;
523 }
524 }
525
526 /*
527 * fixup_free is called when:
528 * - an active object is freed
529 */
530 static int timer_fixup_free(void *addr, enum debug_obj_state state)
531 {
532 struct timer_list *timer = addr;
533
534 switch (state) {
535 case ODEBUG_STATE_ACTIVE:
536 del_timer_sync(timer);
537 debug_object_free(timer, &timer_debug_descr);
538 return 1;
539 default:
540 return 0;
541 }
542 }
543
544 /*
545 * fixup_assert_init is called when:
546 * - an untracked/uninit-ed object is found
547 */
548 static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
549 {
550 struct timer_list *timer = addr;
551
552 switch (state) {
553 case ODEBUG_STATE_NOTAVAILABLE:
554 if (timer->entry.next == TIMER_ENTRY_STATIC) {
555 /*
556 * This is not really a fixup. The timer was
557 * statically initialized. We just make sure that it
558 * is tracked in the object tracker.
559 */
560 debug_object_init(timer, &timer_debug_descr);
561 return 0;
562 } else {
563 setup_timer(timer, stub_timer, 0);
564 return 1;
565 }
566 default:
567 return 0;
568 }
569 }
570
571 static struct debug_obj_descr timer_debug_descr = {
572 .name = "timer_list",
573 .debug_hint = timer_debug_hint,
574 .fixup_init = timer_fixup_init,
575 .fixup_activate = timer_fixup_activate,
576 .fixup_free = timer_fixup_free,
577 .fixup_assert_init = timer_fixup_assert_init,
578 };
579
580 static inline void debug_timer_init(struct timer_list *timer)
581 {
582 debug_object_init(timer, &timer_debug_descr);
583 }
584
585 static inline void debug_timer_activate(struct timer_list *timer)
586 {
587 debug_object_activate(timer, &timer_debug_descr);
588 }
589
590 static inline void debug_timer_deactivate(struct timer_list *timer)
591 {
592 debug_object_deactivate(timer, &timer_debug_descr);
593 }
594
595 static inline void debug_timer_free(struct timer_list *timer)
596 {
597 debug_object_free(timer, &timer_debug_descr);
598 }
599
600 static inline void debug_timer_assert_init(struct timer_list *timer)
601 {
602 debug_object_assert_init(timer, &timer_debug_descr);
603 }
604
605 static void do_init_timer(struct timer_list *timer, unsigned int flags,
606 const char *name, struct lock_class_key *key);
607
608 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
609 const char *name, struct lock_class_key *key)
610 {
611 debug_object_init_on_stack(timer, &timer_debug_descr);
612 do_init_timer(timer, flags, name, key);
613 }
614 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
615
616 void destroy_timer_on_stack(struct timer_list *timer)
617 {
618 debug_object_free(timer, &timer_debug_descr);
619 }
620 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
621
622 #else
623 static inline void debug_timer_init(struct timer_list *timer) { }
624 static inline void debug_timer_activate(struct timer_list *timer) { }
625 static inline void debug_timer_deactivate(struct timer_list *timer) { }
626 static inline void debug_timer_assert_init(struct timer_list *timer) { }
627 #endif
628
629 static inline void debug_init(struct timer_list *timer)
630 {
631 debug_timer_init(timer);
632 trace_timer_init(timer);
633 }
634
635 static inline void
636 debug_activate(struct timer_list *timer, unsigned long expires)
637 {
638 debug_timer_activate(timer);
639 trace_timer_start(timer, expires, tbase_get_deferrable(timer->base));
640 }
641
642 static inline void debug_deactivate(struct timer_list *timer)
643 {
644 debug_timer_deactivate(timer);
645 trace_timer_cancel(timer);
646 }
647
648 static inline void debug_assert_init(struct timer_list *timer)
649 {
650 debug_timer_assert_init(timer);
651 }
652
653 static void do_init_timer(struct timer_list *timer, unsigned int flags,
654 const char *name, struct lock_class_key *key)
655 {
656 struct tvec_base *base = raw_cpu_read(tvec_bases);
657
658 timer->entry.pprev = NULL;
659 timer->base = (void *)((unsigned long)base | flags);
660 timer->slack = -1;
661 #ifdef CONFIG_TIMER_STATS
662 timer->start_site = NULL;
663 timer->start_pid = -1;
664 memset(timer->start_comm, 0, TASK_COMM_LEN);
665 #endif
666 lockdep_init_map(&timer->lockdep_map, name, key, 0);
667 }
668
669 /**
670 * init_timer_key - initialize a timer
671 * @timer: the timer to be initialized
672 * @flags: timer flags
673 * @name: name of the timer
674 * @key: lockdep class key of the fake lock used for tracking timer
675 * sync lock dependencies
676 *
677 * init_timer_key() must be done to a timer prior calling *any* of the
678 * other timer functions.
679 */
680 void init_timer_key(struct timer_list *timer, unsigned int flags,
681 const char *name, struct lock_class_key *key)
682 {
683 debug_init(timer);
684 do_init_timer(timer, flags, name, key);
685 }
686 EXPORT_SYMBOL(init_timer_key);
687
688 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
689 {
690 struct hlist_node *entry = &timer->entry;
691
692 debug_deactivate(timer);
693
694 __hlist_del(entry);
695 if (clear_pending)
696 entry->pprev = NULL;
697 entry->next = LIST_POISON2;
698 }
699
700 static inline void
701 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
702 {
703 detach_timer(timer, true);
704 if (!tbase_get_deferrable(timer->base))
705 base->active_timers--;
706 base->all_timers--;
707 }
708
709 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
710 bool clear_pending)
711 {
712 if (!timer_pending(timer))
713 return 0;
714
715 detach_timer(timer, clear_pending);
716 if (!tbase_get_deferrable(timer->base)) {
717 base->active_timers--;
718 if (timer->expires == base->next_timer)
719 base->next_timer = base->timer_jiffies;
720 }
721 /* If this was the last timer, advance base->jiffies */
722 if (!--base->all_timers)
723 base->timer_jiffies = jiffies;
724 return 1;
725 }
726
727 /*
728 * We are using hashed locking: holding per_cpu(tvec_bases).lock
729 * means that all timers which are tied to this base via timer->base are
730 * locked, and the base itself is locked too.
731 *
732 * So __run_timers/migrate_timers can safely modify all timers which could
733 * be found on ->tvX lists.
734 *
735 * When the timer's base is locked, and the timer removed from list, it is
736 * possible to set timer->base = NULL and drop the lock: the timer remains
737 * locked.
738 */
739 static struct tvec_base *lock_timer_base(struct timer_list *timer,
740 unsigned long *flags)
741 __acquires(timer->base->lock)
742 {
743 struct tvec_base *base;
744
745 for (;;) {
746 struct tvec_base *prelock_base = timer->base;
747 base = tbase_get_base(prelock_base);
748 if (likely(base != NULL)) {
749 spin_lock_irqsave(&base->lock, *flags);
750 if (likely(prelock_base == timer->base))
751 return base;
752 /* The timer has migrated to another CPU */
753 spin_unlock_irqrestore(&base->lock, *flags);
754 }
755 cpu_relax();
756 }
757 }
758
759 static inline int
760 __mod_timer(struct timer_list *timer, unsigned long expires,
761 bool pending_only, int pinned)
762 {
763 struct tvec_base *base, *new_base;
764 unsigned long flags;
765 int ret = 0 , cpu;
766
767 timer_stats_timer_set_start_info(timer);
768 BUG_ON(!timer->function);
769
770 base = lock_timer_base(timer, &flags);
771
772 ret = detach_if_pending(timer, base, false);
773 if (!ret && pending_only)
774 goto out_unlock;
775
776 debug_activate(timer, expires);
777
778 cpu = get_nohz_timer_target(pinned);
779 new_base = per_cpu(tvec_bases, cpu);
780
781 if (base != new_base) {
782 /*
783 * We are trying to schedule the timer on the local CPU.
784 * However we can't change timer's base while it is running,
785 * otherwise del_timer_sync() can't detect that the timer's
786 * handler yet has not finished. This also guarantees that
787 * the timer is serialized wrt itself.
788 */
789 if (likely(base->running_timer != timer)) {
790 /* See the comment in lock_timer_base() */
791 timer_set_base(timer, NULL);
792 spin_unlock(&base->lock);
793 base = new_base;
794 spin_lock(&base->lock);
795 timer_set_base(timer, base);
796 }
797 }
798
799 timer->expires = expires;
800 internal_add_timer(base, timer);
801
802 out_unlock:
803 spin_unlock_irqrestore(&base->lock, flags);
804
805 return ret;
806 }
807
808 /**
809 * mod_timer_pending - modify a pending timer's timeout
810 * @timer: the pending timer to be modified
811 * @expires: new timeout in jiffies
812 *
813 * mod_timer_pending() is the same for pending timers as mod_timer(),
814 * but will not re-activate and modify already deleted timers.
815 *
816 * It is useful for unserialized use of timers.
817 */
818 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
819 {
820 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
821 }
822 EXPORT_SYMBOL(mod_timer_pending);
823
824 /*
825 * Decide where to put the timer while taking the slack into account
826 *
827 * Algorithm:
828 * 1) calculate the maximum (absolute) time
829 * 2) calculate the highest bit where the expires and new max are different
830 * 3) use this bit to make a mask
831 * 4) use the bitmask to round down the maximum time, so that all last
832 * bits are zeros
833 */
834 static inline
835 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
836 {
837 unsigned long expires_limit, mask;
838 int bit;
839
840 if (timer->slack >= 0) {
841 expires_limit = expires + timer->slack;
842 } else {
843 long delta = expires - jiffies;
844
845 if (delta < 256)
846 return expires;
847
848 expires_limit = expires + delta / 256;
849 }
850 mask = expires ^ expires_limit;
851 if (mask == 0)
852 return expires;
853
854 bit = find_last_bit(&mask, BITS_PER_LONG);
855
856 mask = (1UL << bit) - 1;
857
858 expires_limit = expires_limit & ~(mask);
859
860 return expires_limit;
861 }
862
863 /**
864 * mod_timer - modify a timer's timeout
865 * @timer: the timer to be modified
866 * @expires: new timeout in jiffies
867 *
868 * mod_timer() is a more efficient way to update the expire field of an
869 * active timer (if the timer is inactive it will be activated)
870 *
871 * mod_timer(timer, expires) is equivalent to:
872 *
873 * del_timer(timer); timer->expires = expires; add_timer(timer);
874 *
875 * Note that if there are multiple unserialized concurrent users of the
876 * same timer, then mod_timer() is the only safe way to modify the timeout,
877 * since add_timer() cannot modify an already running timer.
878 *
879 * The function returns whether it has modified a pending timer or not.
880 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
881 * active timer returns 1.)
882 */
883 int mod_timer(struct timer_list *timer, unsigned long expires)
884 {
885 expires = apply_slack(timer, expires);
886
887 /*
888 * This is a common optimization triggered by the
889 * networking code - if the timer is re-modified
890 * to be the same thing then just return:
891 */
892 if (timer_pending(timer) && timer->expires == expires)
893 return 1;
894
895 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
896 }
897 EXPORT_SYMBOL(mod_timer);
898
899 /**
900 * mod_timer_pinned - modify a timer's timeout
901 * @timer: the timer to be modified
902 * @expires: new timeout in jiffies
903 *
904 * mod_timer_pinned() is a way to update the expire field of an
905 * active timer (if the timer is inactive it will be activated)
906 * and to ensure that the timer is scheduled on the current CPU.
907 *
908 * Note that this does not prevent the timer from being migrated
909 * when the current CPU goes offline. If this is a problem for
910 * you, use CPU-hotplug notifiers to handle it correctly, for
911 * example, cancelling the timer when the corresponding CPU goes
912 * offline.
913 *
914 * mod_timer_pinned(timer, expires) is equivalent to:
915 *
916 * del_timer(timer); timer->expires = expires; add_timer(timer);
917 */
918 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
919 {
920 if (timer->expires == expires && timer_pending(timer))
921 return 1;
922
923 return __mod_timer(timer, expires, false, TIMER_PINNED);
924 }
925 EXPORT_SYMBOL(mod_timer_pinned);
926
927 /**
928 * add_timer - start a timer
929 * @timer: the timer to be added
930 *
931 * The kernel will do a ->function(->data) callback from the
932 * timer interrupt at the ->expires point in the future. The
933 * current time is 'jiffies'.
934 *
935 * The timer's ->expires, ->function (and if the handler uses it, ->data)
936 * fields must be set prior calling this function.
937 *
938 * Timers with an ->expires field in the past will be executed in the next
939 * timer tick.
940 */
941 void add_timer(struct timer_list *timer)
942 {
943 BUG_ON(timer_pending(timer));
944 mod_timer(timer, timer->expires);
945 }
946 EXPORT_SYMBOL(add_timer);
947
948 /**
949 * add_timer_on - start a timer on a particular CPU
950 * @timer: the timer to be added
951 * @cpu: the CPU to start it on
952 *
953 * This is not very scalable on SMP. Double adds are not possible.
954 */
955 void add_timer_on(struct timer_list *timer, int cpu)
956 {
957 struct tvec_base *base = per_cpu(tvec_bases, cpu);
958 unsigned long flags;
959
960 timer_stats_timer_set_start_info(timer);
961 BUG_ON(timer_pending(timer) || !timer->function);
962 spin_lock_irqsave(&base->lock, flags);
963 timer_set_base(timer, base);
964 debug_activate(timer, timer->expires);
965 internal_add_timer(base, timer);
966 spin_unlock_irqrestore(&base->lock, flags);
967 }
968 EXPORT_SYMBOL_GPL(add_timer_on);
969
970 /**
971 * del_timer - deactive a timer.
972 * @timer: the timer to be deactivated
973 *
974 * del_timer() deactivates a timer - this works on both active and inactive
975 * timers.
976 *
977 * The function returns whether it has deactivated a pending timer or not.
978 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
979 * active timer returns 1.)
980 */
981 int del_timer(struct timer_list *timer)
982 {
983 struct tvec_base *base;
984 unsigned long flags;
985 int ret = 0;
986
987 debug_assert_init(timer);
988
989 timer_stats_timer_clear_start_info(timer);
990 if (timer_pending(timer)) {
991 base = lock_timer_base(timer, &flags);
992 ret = detach_if_pending(timer, base, true);
993 spin_unlock_irqrestore(&base->lock, flags);
994 }
995
996 return ret;
997 }
998 EXPORT_SYMBOL(del_timer);
999
1000 /**
1001 * try_to_del_timer_sync - Try to deactivate a timer
1002 * @timer: timer do del
1003 *
1004 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1005 * exit the timer is not queued and the handler is not running on any CPU.
1006 */
1007 int try_to_del_timer_sync(struct timer_list *timer)
1008 {
1009 struct tvec_base *base;
1010 unsigned long flags;
1011 int ret = -1;
1012
1013 debug_assert_init(timer);
1014
1015 base = lock_timer_base(timer, &flags);
1016
1017 if (base->running_timer != timer) {
1018 timer_stats_timer_clear_start_info(timer);
1019 ret = detach_if_pending(timer, base, true);
1020 }
1021 spin_unlock_irqrestore(&base->lock, flags);
1022
1023 return ret;
1024 }
1025 EXPORT_SYMBOL(try_to_del_timer_sync);
1026
1027 #ifdef CONFIG_SMP
1028 static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
1029
1030 /**
1031 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1032 * @timer: the timer to be deactivated
1033 *
1034 * This function only differs from del_timer() on SMP: besides deactivating
1035 * the timer it also makes sure the handler has finished executing on other
1036 * CPUs.
1037 *
1038 * Synchronization rules: Callers must prevent restarting of the timer,
1039 * otherwise this function is meaningless. It must not be called from
1040 * interrupt contexts unless the timer is an irqsafe one. The caller must
1041 * not hold locks which would prevent completion of the timer's
1042 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1043 * timer is not queued and the handler is not running on any CPU.
1044 *
1045 * Note: For !irqsafe timers, you must not hold locks that are held in
1046 * interrupt context while calling this function. Even if the lock has
1047 * nothing to do with the timer in question. Here's why:
1048 *
1049 * CPU0 CPU1
1050 * ---- ----
1051 * <SOFTIRQ>
1052 * call_timer_fn();
1053 * base->running_timer = mytimer;
1054 * spin_lock_irq(somelock);
1055 * <IRQ>
1056 * spin_lock(somelock);
1057 * del_timer_sync(mytimer);
1058 * while (base->running_timer == mytimer);
1059 *
1060 * Now del_timer_sync() will never return and never release somelock.
1061 * The interrupt on the other CPU is waiting to grab somelock but
1062 * it has interrupted the softirq that CPU0 is waiting to finish.
1063 *
1064 * The function returns whether it has deactivated a pending timer or not.
1065 */
1066 int del_timer_sync(struct timer_list *timer)
1067 {
1068 #ifdef CONFIG_LOCKDEP
1069 unsigned long flags;
1070
1071 /*
1072 * If lockdep gives a backtrace here, please reference
1073 * the synchronization rules above.
1074 */
1075 local_irq_save(flags);
1076 lock_map_acquire(&timer->lockdep_map);
1077 lock_map_release(&timer->lockdep_map);
1078 local_irq_restore(flags);
1079 #endif
1080 /*
1081 * don't use it in hardirq context, because it
1082 * could lead to deadlock.
1083 */
1084 WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
1085 for (;;) {
1086 int ret = try_to_del_timer_sync(timer);
1087 if (ret >= 0)
1088 return ret;
1089 cpu_relax();
1090 }
1091 }
1092 EXPORT_SYMBOL(del_timer_sync);
1093 #endif
1094
1095 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1096 {
1097 /* cascade all the timers from tv up one level */
1098 struct timer_list *timer;
1099 struct hlist_node *tmp;
1100 struct hlist_head tv_list;
1101
1102 hlist_move_list(tv->vec + index, &tv_list);
1103
1104 /*
1105 * We are removing _all_ timers from the list, so we
1106 * don't have to detach them individually.
1107 */
1108 hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1109 BUG_ON(tbase_get_base(timer->base) != base);
1110 /* No accounting, while moving them */
1111 __internal_add_timer(base, timer);
1112 }
1113
1114 return index;
1115 }
1116
1117 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1118 unsigned long data)
1119 {
1120 int count = preempt_count();
1121
1122 #ifdef CONFIG_LOCKDEP
1123 /*
1124 * It is permissible to free the timer from inside the
1125 * function that is called from it, this we need to take into
1126 * account for lockdep too. To avoid bogus "held lock freed"
1127 * warnings as well as problems when looking into
1128 * timer->lockdep_map, make a copy and use that here.
1129 */
1130 struct lockdep_map lockdep_map;
1131
1132 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1133 #endif
1134 /*
1135 * Couple the lock chain with the lock chain at
1136 * del_timer_sync() by acquiring the lock_map around the fn()
1137 * call here and in del_timer_sync().
1138 */
1139 lock_map_acquire(&lockdep_map);
1140
1141 trace_timer_expire_entry(timer);
1142 fn(data);
1143 trace_timer_expire_exit(timer);
1144
1145 lock_map_release(&lockdep_map);
1146
1147 if (count != preempt_count()) {
1148 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1149 fn, count, preempt_count());
1150 /*
1151 * Restore the preempt count. That gives us a decent
1152 * chance to survive and extract information. If the
1153 * callback kept a lock held, bad luck, but not worse
1154 * than the BUG() we had.
1155 */
1156 preempt_count_set(count);
1157 }
1158 }
1159
1160 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1161
1162 /**
1163 * __run_timers - run all expired timers (if any) on this CPU.
1164 * @base: the timer vector to be processed.
1165 *
1166 * This function cascades all vectors and executes all expired timer
1167 * vectors.
1168 */
1169 static inline void __run_timers(struct tvec_base *base)
1170 {
1171 struct timer_list *timer;
1172
1173 spin_lock_irq(&base->lock);
1174
1175 while (time_after_eq(jiffies, base->timer_jiffies)) {
1176 struct hlist_head work_list;
1177 struct hlist_head *head = &work_list;
1178 int index;
1179
1180 if (!base->all_timers) {
1181 base->timer_jiffies = jiffies;
1182 break;
1183 }
1184
1185 index = base->timer_jiffies & TVR_MASK;
1186
1187 /*
1188 * Cascade timers:
1189 */
1190 if (!index &&
1191 (!cascade(base, &base->tv2, INDEX(0))) &&
1192 (!cascade(base, &base->tv3, INDEX(1))) &&
1193 !cascade(base, &base->tv4, INDEX(2)))
1194 cascade(base, &base->tv5, INDEX(3));
1195 ++base->timer_jiffies;
1196 hlist_move_list(base->tv1.vec + index, head);
1197 while (!hlist_empty(head)) {
1198 void (*fn)(unsigned long);
1199 unsigned long data;
1200 bool irqsafe;
1201
1202 timer = hlist_entry(head->first, struct timer_list, entry);
1203 fn = timer->function;
1204 data = timer->data;
1205 irqsafe = tbase_get_irqsafe(timer->base);
1206
1207 timer_stats_account_timer(timer);
1208
1209 base->running_timer = timer;
1210 detach_expired_timer(timer, base);
1211
1212 if (irqsafe) {
1213 spin_unlock(&base->lock);
1214 call_timer_fn(timer, fn, data);
1215 spin_lock(&base->lock);
1216 } else {
1217 spin_unlock_irq(&base->lock);
1218 call_timer_fn(timer, fn, data);
1219 spin_lock_irq(&base->lock);
1220 }
1221 }
1222 }
1223 base->running_timer = NULL;
1224 spin_unlock_irq(&base->lock);
1225 }
1226
1227 #ifdef CONFIG_NO_HZ_COMMON
1228 /*
1229 * Find out when the next timer event is due to happen. This
1230 * is used on S/390 to stop all activity when a CPU is idle.
1231 * This function needs to be called with interrupts disabled.
1232 */
1233 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1234 {
1235 unsigned long timer_jiffies = base->timer_jiffies;
1236 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1237 int index, slot, array, found = 0;
1238 struct timer_list *nte;
1239 struct tvec *varray[4];
1240
1241 /* Look for timer events in tv1. */
1242 index = slot = timer_jiffies & TVR_MASK;
1243 do {
1244 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1245 if (tbase_get_deferrable(nte->base))
1246 continue;
1247
1248 found = 1;
1249 expires = nte->expires;
1250 /* Look at the cascade bucket(s)? */
1251 if (!index || slot < index)
1252 goto cascade;
1253 return expires;
1254 }
1255 slot = (slot + 1) & TVR_MASK;
1256 } while (slot != index);
1257
1258 cascade:
1259 /* Calculate the next cascade event */
1260 if (index)
1261 timer_jiffies += TVR_SIZE - index;
1262 timer_jiffies >>= TVR_BITS;
1263
1264 /* Check tv2-tv5. */
1265 varray[0] = &base->tv2;
1266 varray[1] = &base->tv3;
1267 varray[2] = &base->tv4;
1268 varray[3] = &base->tv5;
1269
1270 for (array = 0; array < 4; array++) {
1271 struct tvec *varp = varray[array];
1272
1273 index = slot = timer_jiffies & TVN_MASK;
1274 do {
1275 hlist_for_each_entry(nte, varp->vec + slot, entry) {
1276 if (tbase_get_deferrable(nte->base))
1277 continue;
1278
1279 found = 1;
1280 if (time_before(nte->expires, expires))
1281 expires = nte->expires;
1282 }
1283 /*
1284 * Do we still search for the first timer or are
1285 * we looking up the cascade buckets ?
1286 */
1287 if (found) {
1288 /* Look at the cascade bucket(s)? */
1289 if (!index || slot < index)
1290 break;
1291 return expires;
1292 }
1293 slot = (slot + 1) & TVN_MASK;
1294 } while (slot != index);
1295
1296 if (index)
1297 timer_jiffies += TVN_SIZE - index;
1298 timer_jiffies >>= TVN_BITS;
1299 }
1300 return expires;
1301 }
1302
1303 /*
1304 * Check, if the next hrtimer event is before the next timer wheel
1305 * event:
1306 */
1307 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1308 {
1309 u64 nextevt = hrtimer_get_next_event();
1310
1311 /*
1312 * If high resolution timers are enabled
1313 * hrtimer_get_next_event() returns KTIME_MAX.
1314 */
1315 if (expires <= nextevt)
1316 return expires;
1317
1318 /*
1319 * If the next timer is already expired, return the tick base
1320 * time so the tick is fired immediately.
1321 */
1322 if (nextevt <= basem)
1323 return basem;
1324
1325 /*
1326 * Round up to the next jiffie. High resolution timers are
1327 * off, so the hrtimers are expired in the tick and we need to
1328 * make sure that this tick really expires the timer to avoid
1329 * a ping pong of the nohz stop code.
1330 *
1331 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1332 */
1333 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1334 }
1335
1336 /**
1337 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1338 * @basej: base time jiffies
1339 * @basem: base time clock monotonic
1340 *
1341 * Returns the tick aligned clock monotonic time of the next pending
1342 * timer or KTIME_MAX if no timer is pending.
1343 */
1344 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1345 {
1346 struct tvec_base *base = __this_cpu_read(tvec_bases);
1347 u64 expires = KTIME_MAX;
1348 unsigned long nextevt;
1349
1350 /*
1351 * Pretend that there is no timer pending if the cpu is offline.
1352 * Possible pending timers will be migrated later to an active cpu.
1353 */
1354 if (cpu_is_offline(smp_processor_id()))
1355 return expires;
1356
1357 spin_lock(&base->lock);
1358 if (base->active_timers) {
1359 if (time_before_eq(base->next_timer, base->timer_jiffies))
1360 base->next_timer = __next_timer_interrupt(base);
1361 nextevt = base->next_timer;
1362 if (time_before_eq(nextevt, basej))
1363 expires = basem;
1364 else
1365 expires = basem + (nextevt - basej) * TICK_NSEC;
1366 }
1367 spin_unlock(&base->lock);
1368
1369 return cmp_next_hrtimer_event(basem, expires);
1370 }
1371 #endif
1372
1373 /*
1374 * Called from the timer interrupt handler to charge one tick to the current
1375 * process. user_tick is 1 if the tick is user time, 0 for system.
1376 */
1377 void update_process_times(int user_tick)
1378 {
1379 struct task_struct *p = current;
1380
1381 /* Note: this timer irq context must be accounted for as well. */
1382 account_process_tick(p, user_tick);
1383 run_local_timers();
1384 rcu_check_callbacks(user_tick);
1385 #ifdef CONFIG_IRQ_WORK
1386 if (in_irq())
1387 irq_work_tick();
1388 #endif
1389 scheduler_tick();
1390 run_posix_cpu_timers(p);
1391 }
1392
1393 /*
1394 * This function runs timers and the timer-tq in bottom half context.
1395 */
1396 static void run_timer_softirq(struct softirq_action *h)
1397 {
1398 struct tvec_base *base = __this_cpu_read(tvec_bases);
1399
1400 if (time_after_eq(jiffies, base->timer_jiffies))
1401 __run_timers(base);
1402 }
1403
1404 /*
1405 * Called by the local, per-CPU timer interrupt on SMP.
1406 */
1407 void run_local_timers(void)
1408 {
1409 hrtimer_run_queues();
1410 raise_softirq(TIMER_SOFTIRQ);
1411 }
1412
1413 #ifdef __ARCH_WANT_SYS_ALARM
1414
1415 /*
1416 * For backwards compatibility? This can be done in libc so Alpha
1417 * and all newer ports shouldn't need it.
1418 */
1419 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1420 {
1421 return alarm_setitimer(seconds);
1422 }
1423
1424 #endif
1425
1426 static void process_timeout(unsigned long __data)
1427 {
1428 wake_up_process((struct task_struct *)__data);
1429 }
1430
1431 /**
1432 * schedule_timeout - sleep until timeout
1433 * @timeout: timeout value in jiffies
1434 *
1435 * Make the current task sleep until @timeout jiffies have
1436 * elapsed. The routine will return immediately unless
1437 * the current task state has been set (see set_current_state()).
1438 *
1439 * You can set the task state as follows -
1440 *
1441 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1442 * pass before the routine returns. The routine will return 0
1443 *
1444 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1445 * delivered to the current task. In this case the remaining time
1446 * in jiffies will be returned, or 0 if the timer expired in time
1447 *
1448 * The current task state is guaranteed to be TASK_RUNNING when this
1449 * routine returns.
1450 *
1451 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1452 * the CPU away without a bound on the timeout. In this case the return
1453 * value will be %MAX_SCHEDULE_TIMEOUT.
1454 *
1455 * In all cases the return value is guaranteed to be non-negative.
1456 */
1457 signed long __sched schedule_timeout(signed long timeout)
1458 {
1459 struct timer_list timer;
1460 unsigned long expire;
1461
1462 switch (timeout)
1463 {
1464 case MAX_SCHEDULE_TIMEOUT:
1465 /*
1466 * These two special cases are useful to be comfortable
1467 * in the caller. Nothing more. We could take
1468 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1469 * but I' d like to return a valid offset (>=0) to allow
1470 * the caller to do everything it want with the retval.
1471 */
1472 schedule();
1473 goto out;
1474 default:
1475 /*
1476 * Another bit of PARANOID. Note that the retval will be
1477 * 0 since no piece of kernel is supposed to do a check
1478 * for a negative retval of schedule_timeout() (since it
1479 * should never happens anyway). You just have the printk()
1480 * that will tell you if something is gone wrong and where.
1481 */
1482 if (timeout < 0) {
1483 printk(KERN_ERR "schedule_timeout: wrong timeout "
1484 "value %lx\n", timeout);
1485 dump_stack();
1486 current->state = TASK_RUNNING;
1487 goto out;
1488 }
1489 }
1490
1491 expire = timeout + jiffies;
1492
1493 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1494 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1495 schedule();
1496 del_singleshot_timer_sync(&timer);
1497
1498 /* Remove the timer from the object tracker */
1499 destroy_timer_on_stack(&timer);
1500
1501 timeout = expire - jiffies;
1502
1503 out:
1504 return timeout < 0 ? 0 : timeout;
1505 }
1506 EXPORT_SYMBOL(schedule_timeout);
1507
1508 /*
1509 * We can use __set_current_state() here because schedule_timeout() calls
1510 * schedule() unconditionally.
1511 */
1512 signed long __sched schedule_timeout_interruptible(signed long timeout)
1513 {
1514 __set_current_state(TASK_INTERRUPTIBLE);
1515 return schedule_timeout(timeout);
1516 }
1517 EXPORT_SYMBOL(schedule_timeout_interruptible);
1518
1519 signed long __sched schedule_timeout_killable(signed long timeout)
1520 {
1521 __set_current_state(TASK_KILLABLE);
1522 return schedule_timeout(timeout);
1523 }
1524 EXPORT_SYMBOL(schedule_timeout_killable);
1525
1526 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1527 {
1528 __set_current_state(TASK_UNINTERRUPTIBLE);
1529 return schedule_timeout(timeout);
1530 }
1531 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1532
1533 #ifdef CONFIG_HOTPLUG_CPU
1534 static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1535 {
1536 struct timer_list *timer;
1537
1538 while (!hlist_empty(head)) {
1539 timer = hlist_entry(head->first, struct timer_list, entry);
1540 /* We ignore the accounting on the dying cpu */
1541 detach_timer(timer, false);
1542 timer_set_base(timer, new_base);
1543 internal_add_timer(new_base, timer);
1544 }
1545 }
1546
1547 static void migrate_timers(int cpu)
1548 {
1549 struct tvec_base *old_base;
1550 struct tvec_base *new_base;
1551 int i;
1552
1553 BUG_ON(cpu_online(cpu));
1554 old_base = per_cpu(tvec_bases, cpu);
1555 new_base = get_cpu_var(tvec_bases);
1556 /*
1557 * The caller is globally serialized and nobody else
1558 * takes two locks at once, deadlock is not possible.
1559 */
1560 spin_lock_irq(&new_base->lock);
1561 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1562
1563 BUG_ON(old_base->running_timer);
1564
1565 for (i = 0; i < TVR_SIZE; i++)
1566 migrate_timer_list(new_base, old_base->tv1.vec + i);
1567 for (i = 0; i < TVN_SIZE; i++) {
1568 migrate_timer_list(new_base, old_base->tv2.vec + i);
1569 migrate_timer_list(new_base, old_base->tv3.vec + i);
1570 migrate_timer_list(new_base, old_base->tv4.vec + i);
1571 migrate_timer_list(new_base, old_base->tv5.vec + i);
1572 }
1573
1574 old_base->active_timers = 0;
1575 old_base->all_timers = 0;
1576
1577 spin_unlock(&old_base->lock);
1578 spin_unlock_irq(&new_base->lock);
1579 put_cpu_var(tvec_bases);
1580 }
1581
1582 static int timer_cpu_notify(struct notifier_block *self,
1583 unsigned long action, void *hcpu)
1584 {
1585 switch (action) {
1586 case CPU_DEAD:
1587 case CPU_DEAD_FROZEN:
1588 migrate_timers((long)hcpu);
1589 break;
1590 default:
1591 break;
1592 }
1593
1594 return NOTIFY_OK;
1595 }
1596
1597 static inline void timer_register_cpu_notifier(void)
1598 {
1599 cpu_notifier(timer_cpu_notify, 0);
1600 }
1601 #else
1602 static inline void timer_register_cpu_notifier(void) { }
1603 #endif /* CONFIG_HOTPLUG_CPU */
1604
1605 static void __init init_timer_cpu(struct tvec_base *base, int cpu)
1606 {
1607 BUG_ON(base != tbase_get_base(base));
1608
1609 base->cpu = cpu;
1610 per_cpu(tvec_bases, cpu) = base;
1611 spin_lock_init(&base->lock);
1612
1613 base->timer_jiffies = jiffies;
1614 base->next_timer = base->timer_jiffies;
1615 }
1616
1617 static void __init init_timer_cpus(void)
1618 {
1619 struct tvec_base *base;
1620 int local_cpu = smp_processor_id();
1621 int cpu;
1622
1623 for_each_possible_cpu(cpu) {
1624 if (cpu == local_cpu)
1625 base = &boot_tvec_bases;
1626 #ifdef CONFIG_SMP
1627 else
1628 base = per_cpu_ptr(&__tvec_bases, cpu);
1629 #endif
1630
1631 init_timer_cpu(base, cpu);
1632 }
1633 }
1634
1635 void __init init_timers(void)
1636 {
1637 /* ensure there are enough low bits for flags in timer->base pointer */
1638 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
1639
1640 init_timer_cpus();
1641 init_timer_stats();
1642 timer_register_cpu_notifier();
1643 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1644 }
1645
1646 /**
1647 * msleep - sleep safely even with waitqueue interruptions
1648 * @msecs: Time in milliseconds to sleep for
1649 */
1650 void msleep(unsigned int msecs)
1651 {
1652 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1653
1654 while (timeout)
1655 timeout = schedule_timeout_uninterruptible(timeout);
1656 }
1657
1658 EXPORT_SYMBOL(msleep);
1659
1660 /**
1661 * msleep_interruptible - sleep waiting for signals
1662 * @msecs: Time in milliseconds to sleep for
1663 */
1664 unsigned long msleep_interruptible(unsigned int msecs)
1665 {
1666 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1667
1668 while (timeout && !signal_pending(current))
1669 timeout = schedule_timeout_interruptible(timeout);
1670 return jiffies_to_msecs(timeout);
1671 }
1672
1673 EXPORT_SYMBOL(msleep_interruptible);
1674
1675 static void __sched do_usleep_range(unsigned long min, unsigned long max)
1676 {
1677 ktime_t kmin;
1678 unsigned long delta;
1679
1680 kmin = ktime_set(0, min * NSEC_PER_USEC);
1681 delta = (max - min) * NSEC_PER_USEC;
1682 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1683 }
1684
1685 /**
1686 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1687 * @min: Minimum time in usecs to sleep
1688 * @max: Maximum time in usecs to sleep
1689 */
1690 void __sched usleep_range(unsigned long min, unsigned long max)
1691 {
1692 __set_current_state(TASK_UNINTERRUPTIBLE);
1693 do_usleep_range(min, max);
1694 }
1695 EXPORT_SYMBOL(usleep_range);
This page took 0.064492 seconds and 6 git commands to generate.