Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4 | * Kernel internal timers, kernel timekeeping, basic process system calls | |
5 | * | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
97a41e26 | 36 | #include <linux/delay.h> |
79bf2bb3 | 37 | #include <linux/tick.h> |
82f67cd9 | 38 | #include <linux/kallsyms.h> |
1da177e4 LT |
39 | |
40 | #include <asm/uaccess.h> | |
41 | #include <asm/unistd.h> | |
42 | #include <asm/div64.h> | |
43 | #include <asm/timex.h> | |
44 | #include <asm/io.h> | |
45 | ||
ecea8d19 TG |
46 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
47 | ||
48 | EXPORT_SYMBOL(jiffies_64); | |
49 | ||
1da177e4 LT |
50 | /* |
51 | * per-CPU timer vector definitions: | |
52 | */ | |
1da177e4 LT |
53 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
54 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
55 | #define TVN_SIZE (1 << TVN_BITS) | |
56 | #define TVR_SIZE (1 << TVR_BITS) | |
57 | #define TVN_MASK (TVN_SIZE - 1) | |
58 | #define TVR_MASK (TVR_SIZE - 1) | |
59 | ||
60 | typedef struct tvec_s { | |
61 | struct list_head vec[TVN_SIZE]; | |
62 | } tvec_t; | |
63 | ||
64 | typedef struct tvec_root_s { | |
65 | struct list_head vec[TVR_SIZE]; | |
66 | } tvec_root_t; | |
67 | ||
68 | struct tvec_t_base_s { | |
3691c519 ON |
69 | spinlock_t lock; |
70 | struct timer_list *running_timer; | |
1da177e4 | 71 | unsigned long timer_jiffies; |
1da177e4 LT |
72 | tvec_root_t tv1; |
73 | tvec_t tv2; | |
74 | tvec_t tv3; | |
75 | tvec_t tv4; | |
76 | tvec_t tv5; | |
77 | } ____cacheline_aligned_in_smp; | |
78 | ||
79 | typedef struct tvec_t_base_s tvec_base_t; | |
ba6edfcd | 80 | |
3691c519 ON |
81 | tvec_base_t boot_tvec_bases; |
82 | EXPORT_SYMBOL(boot_tvec_bases); | |
51d8c5ed | 83 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 84 | |
4c36a5de AV |
85 | /** |
86 | * __round_jiffies - function to round jiffies to a full second | |
87 | * @j: the time in (absolute) jiffies that should be rounded | |
88 | * @cpu: the processor number on which the timeout will happen | |
89 | * | |
72fd4a35 | 90 | * __round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
91 | * up or down to (approximately) full seconds. This is useful for timers |
92 | * for which the exact time they fire does not matter too much, as long as | |
93 | * they fire approximately every X seconds. | |
94 | * | |
95 | * By rounding these timers to whole seconds, all such timers will fire | |
96 | * at the same time, rather than at various times spread out. The goal | |
97 | * of this is to have the CPU wake up less, which saves power. | |
98 | * | |
99 | * The exact rounding is skewed for each processor to avoid all | |
100 | * processors firing at the exact same time, which could lead | |
101 | * to lock contention or spurious cache line bouncing. | |
102 | * | |
72fd4a35 | 103 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
104 | */ |
105 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
106 | { | |
107 | int rem; | |
108 | unsigned long original = j; | |
109 | ||
110 | /* | |
111 | * We don't want all cpus firing their timers at once hitting the | |
112 | * same lock or cachelines, so we skew each extra cpu with an extra | |
113 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
114 | * already did this. | |
115 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
116 | * extra offset again. | |
117 | */ | |
118 | j += cpu * 3; | |
119 | ||
120 | rem = j % HZ; | |
121 | ||
122 | /* | |
123 | * If the target jiffie is just after a whole second (which can happen | |
124 | * due to delays of the timer irq, long irq off times etc etc) then | |
125 | * we should round down to the whole second, not up. Use 1/4th second | |
126 | * as cutoff for this rounding as an extreme upper bound for this. | |
127 | */ | |
128 | if (rem < HZ/4) /* round down */ | |
129 | j = j - rem; | |
130 | else /* round up */ | |
131 | j = j - rem + HZ; | |
132 | ||
133 | /* now that we have rounded, subtract the extra skew again */ | |
134 | j -= cpu * 3; | |
135 | ||
136 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | |
137 | return original; | |
138 | return j; | |
139 | } | |
140 | EXPORT_SYMBOL_GPL(__round_jiffies); | |
141 | ||
142 | /** | |
143 | * __round_jiffies_relative - function to round jiffies to a full second | |
144 | * @j: the time in (relative) jiffies that should be rounded | |
145 | * @cpu: the processor number on which the timeout will happen | |
146 | * | |
72fd4a35 | 147 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
148 | * up or down to (approximately) full seconds. This is useful for timers |
149 | * for which the exact time they fire does not matter too much, as long as | |
150 | * they fire approximately every X seconds. | |
151 | * | |
152 | * By rounding these timers to whole seconds, all such timers will fire | |
153 | * at the same time, rather than at various times spread out. The goal | |
154 | * of this is to have the CPU wake up less, which saves power. | |
155 | * | |
156 | * The exact rounding is skewed for each processor to avoid all | |
157 | * processors firing at the exact same time, which could lead | |
158 | * to lock contention or spurious cache line bouncing. | |
159 | * | |
72fd4a35 | 160 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
161 | */ |
162 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
163 | { | |
164 | /* | |
165 | * In theory the following code can skip a jiffy in case jiffies | |
166 | * increments right between the addition and the later subtraction. | |
167 | * However since the entire point of this function is to use approximate | |
168 | * timeouts, it's entirely ok to not handle that. | |
169 | */ | |
170 | return __round_jiffies(j + jiffies, cpu) - jiffies; | |
171 | } | |
172 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
173 | ||
174 | /** | |
175 | * round_jiffies - function to round jiffies to a full second | |
176 | * @j: the time in (absolute) jiffies that should be rounded | |
177 | * | |
72fd4a35 | 178 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
179 | * up or down to (approximately) full seconds. This is useful for timers |
180 | * for which the exact time they fire does not matter too much, as long as | |
181 | * they fire approximately every X seconds. | |
182 | * | |
183 | * By rounding these timers to whole seconds, all such timers will fire | |
184 | * at the same time, rather than at various times spread out. The goal | |
185 | * of this is to have the CPU wake up less, which saves power. | |
186 | * | |
72fd4a35 | 187 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
188 | */ |
189 | unsigned long round_jiffies(unsigned long j) | |
190 | { | |
191 | return __round_jiffies(j, raw_smp_processor_id()); | |
192 | } | |
193 | EXPORT_SYMBOL_GPL(round_jiffies); | |
194 | ||
195 | /** | |
196 | * round_jiffies_relative - function to round jiffies to a full second | |
197 | * @j: the time in (relative) jiffies that should be rounded | |
198 | * | |
72fd4a35 | 199 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
200 | * up or down to (approximately) full seconds. This is useful for timers |
201 | * for which the exact time they fire does not matter too much, as long as | |
202 | * they fire approximately every X seconds. | |
203 | * | |
204 | * By rounding these timers to whole seconds, all such timers will fire | |
205 | * at the same time, rather than at various times spread out. The goal | |
206 | * of this is to have the CPU wake up less, which saves power. | |
207 | * | |
72fd4a35 | 208 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
209 | */ |
210 | unsigned long round_jiffies_relative(unsigned long j) | |
211 | { | |
212 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
213 | } | |
214 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
215 | ||
216 | ||
1da177e4 LT |
217 | static inline void set_running_timer(tvec_base_t *base, |
218 | struct timer_list *timer) | |
219 | { | |
220 | #ifdef CONFIG_SMP | |
3691c519 | 221 | base->running_timer = timer; |
1da177e4 LT |
222 | #endif |
223 | } | |
224 | ||
1da177e4 LT |
225 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
226 | { | |
227 | unsigned long expires = timer->expires; | |
228 | unsigned long idx = expires - base->timer_jiffies; | |
229 | struct list_head *vec; | |
230 | ||
231 | if (idx < TVR_SIZE) { | |
232 | int i = expires & TVR_MASK; | |
233 | vec = base->tv1.vec + i; | |
234 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
235 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
236 | vec = base->tv2.vec + i; | |
237 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
238 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
239 | vec = base->tv3.vec + i; | |
240 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
241 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
242 | vec = base->tv4.vec + i; | |
243 | } else if ((signed long) idx < 0) { | |
244 | /* | |
245 | * Can happen if you add a timer with expires == jiffies, | |
246 | * or you set a timer to go off in the past | |
247 | */ | |
248 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
249 | } else { | |
250 | int i; | |
251 | /* If the timeout is larger than 0xffffffff on 64-bit | |
252 | * architectures then we use the maximum timeout: | |
253 | */ | |
254 | if (idx > 0xffffffffUL) { | |
255 | idx = 0xffffffffUL; | |
256 | expires = idx + base->timer_jiffies; | |
257 | } | |
258 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
259 | vec = base->tv5.vec + i; | |
260 | } | |
261 | /* | |
262 | * Timers are FIFO: | |
263 | */ | |
264 | list_add_tail(&timer->entry, vec); | |
265 | } | |
266 | ||
82f67cd9 IM |
267 | #ifdef CONFIG_TIMER_STATS |
268 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |
269 | { | |
270 | if (timer->start_site) | |
271 | return; | |
272 | ||
273 | timer->start_site = addr; | |
274 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
275 | timer->start_pid = current->pid; | |
276 | } | |
277 | #endif | |
278 | ||
2aae4a10 | 279 | /** |
55c888d6 ON |
280 | * init_timer - initialize a timer. |
281 | * @timer: the timer to be initialized | |
282 | * | |
283 | * init_timer() must be done to a timer prior calling *any* of the | |
284 | * other timer functions. | |
285 | */ | |
286 | void fastcall init_timer(struct timer_list *timer) | |
287 | { | |
288 | timer->entry.next = NULL; | |
bfe5d834 | 289 | timer->base = __raw_get_cpu_var(tvec_bases); |
82f67cd9 IM |
290 | #ifdef CONFIG_TIMER_STATS |
291 | timer->start_site = NULL; | |
292 | timer->start_pid = -1; | |
293 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
294 | #endif | |
55c888d6 ON |
295 | } |
296 | EXPORT_SYMBOL(init_timer); | |
297 | ||
298 | static inline void detach_timer(struct timer_list *timer, | |
82f67cd9 | 299 | int clear_pending) |
55c888d6 ON |
300 | { |
301 | struct list_head *entry = &timer->entry; | |
302 | ||
303 | __list_del(entry->prev, entry->next); | |
304 | if (clear_pending) | |
305 | entry->next = NULL; | |
306 | entry->prev = LIST_POISON2; | |
307 | } | |
308 | ||
309 | /* | |
3691c519 | 310 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
311 | * means that all timers which are tied to this base via timer->base are |
312 | * locked, and the base itself is locked too. | |
313 | * | |
314 | * So __run_timers/migrate_timers can safely modify all timers which could | |
315 | * be found on ->tvX lists. | |
316 | * | |
317 | * When the timer's base is locked, and the timer removed from list, it is | |
318 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
319 | * locked. | |
320 | */ | |
3691c519 | 321 | static tvec_base_t *lock_timer_base(struct timer_list *timer, |
55c888d6 | 322 | unsigned long *flags) |
89e7e374 | 323 | __acquires(timer->base->lock) |
55c888d6 | 324 | { |
3691c519 | 325 | tvec_base_t *base; |
55c888d6 ON |
326 | |
327 | for (;;) { | |
328 | base = timer->base; | |
329 | if (likely(base != NULL)) { | |
330 | spin_lock_irqsave(&base->lock, *flags); | |
331 | if (likely(base == timer->base)) | |
332 | return base; | |
333 | /* The timer has migrated to another CPU */ | |
334 | spin_unlock_irqrestore(&base->lock, *flags); | |
335 | } | |
336 | cpu_relax(); | |
337 | } | |
338 | } | |
339 | ||
1da177e4 LT |
340 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
341 | { | |
3691c519 | 342 | tvec_base_t *base, *new_base; |
1da177e4 LT |
343 | unsigned long flags; |
344 | int ret = 0; | |
345 | ||
82f67cd9 | 346 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 347 | BUG_ON(!timer->function); |
1da177e4 | 348 | |
55c888d6 ON |
349 | base = lock_timer_base(timer, &flags); |
350 | ||
351 | if (timer_pending(timer)) { | |
352 | detach_timer(timer, 0); | |
353 | ret = 1; | |
354 | } | |
355 | ||
a4a6198b | 356 | new_base = __get_cpu_var(tvec_bases); |
1da177e4 | 357 | |
3691c519 | 358 | if (base != new_base) { |
1da177e4 | 359 | /* |
55c888d6 ON |
360 | * We are trying to schedule the timer on the local CPU. |
361 | * However we can't change timer's base while it is running, | |
362 | * otherwise del_timer_sync() can't detect that the timer's | |
363 | * handler yet has not finished. This also guarantees that | |
364 | * the timer is serialized wrt itself. | |
1da177e4 | 365 | */ |
a2c348fe | 366 | if (likely(base->running_timer != timer)) { |
55c888d6 ON |
367 | /* See the comment in lock_timer_base() */ |
368 | timer->base = NULL; | |
369 | spin_unlock(&base->lock); | |
a2c348fe ON |
370 | base = new_base; |
371 | spin_lock(&base->lock); | |
372 | timer->base = base; | |
1da177e4 LT |
373 | } |
374 | } | |
375 | ||
1da177e4 | 376 | timer->expires = expires; |
a2c348fe ON |
377 | internal_add_timer(base, timer); |
378 | spin_unlock_irqrestore(&base->lock, flags); | |
1da177e4 LT |
379 | |
380 | return ret; | |
381 | } | |
382 | ||
383 | EXPORT_SYMBOL(__mod_timer); | |
384 | ||
2aae4a10 | 385 | /** |
1da177e4 LT |
386 | * add_timer_on - start a timer on a particular CPU |
387 | * @timer: the timer to be added | |
388 | * @cpu: the CPU to start it on | |
389 | * | |
390 | * This is not very scalable on SMP. Double adds are not possible. | |
391 | */ | |
392 | void add_timer_on(struct timer_list *timer, int cpu) | |
393 | { | |
a4a6198b | 394 | tvec_base_t *base = per_cpu(tvec_bases, cpu); |
1da177e4 | 395 | unsigned long flags; |
55c888d6 | 396 | |
82f67cd9 | 397 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 398 | BUG_ON(timer_pending(timer) || !timer->function); |
3691c519 ON |
399 | spin_lock_irqsave(&base->lock, flags); |
400 | timer->base = base; | |
1da177e4 | 401 | internal_add_timer(base, timer); |
3691c519 | 402 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
403 | } |
404 | ||
405 | ||
2aae4a10 | 406 | /** |
1da177e4 LT |
407 | * mod_timer - modify a timer's timeout |
408 | * @timer: the timer to be modified | |
2aae4a10 | 409 | * @expires: new timeout in jiffies |
1da177e4 | 410 | * |
72fd4a35 | 411 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
412 | * active timer (if the timer is inactive it will be activated) |
413 | * | |
414 | * mod_timer(timer, expires) is equivalent to: | |
415 | * | |
416 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
417 | * | |
418 | * Note that if there are multiple unserialized concurrent users of the | |
419 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
420 | * since add_timer() cannot modify an already running timer. | |
421 | * | |
422 | * The function returns whether it has modified a pending timer or not. | |
423 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
424 | * active timer returns 1.) | |
425 | */ | |
426 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
427 | { | |
428 | BUG_ON(!timer->function); | |
429 | ||
82f67cd9 | 430 | timer_stats_timer_set_start_info(timer); |
1da177e4 LT |
431 | /* |
432 | * This is a common optimization triggered by the | |
433 | * networking code - if the timer is re-modified | |
434 | * to be the same thing then just return: | |
435 | */ | |
436 | if (timer->expires == expires && timer_pending(timer)) | |
437 | return 1; | |
438 | ||
439 | return __mod_timer(timer, expires); | |
440 | } | |
441 | ||
442 | EXPORT_SYMBOL(mod_timer); | |
443 | ||
2aae4a10 | 444 | /** |
1da177e4 LT |
445 | * del_timer - deactive a timer. |
446 | * @timer: the timer to be deactivated | |
447 | * | |
448 | * del_timer() deactivates a timer - this works on both active and inactive | |
449 | * timers. | |
450 | * | |
451 | * The function returns whether it has deactivated a pending timer or not. | |
452 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
453 | * active timer returns 1.) | |
454 | */ | |
455 | int del_timer(struct timer_list *timer) | |
456 | { | |
3691c519 | 457 | tvec_base_t *base; |
1da177e4 | 458 | unsigned long flags; |
55c888d6 | 459 | int ret = 0; |
1da177e4 | 460 | |
82f67cd9 | 461 | timer_stats_timer_clear_start_info(timer); |
55c888d6 ON |
462 | if (timer_pending(timer)) { |
463 | base = lock_timer_base(timer, &flags); | |
464 | if (timer_pending(timer)) { | |
465 | detach_timer(timer, 1); | |
466 | ret = 1; | |
467 | } | |
1da177e4 | 468 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 469 | } |
1da177e4 | 470 | |
55c888d6 | 471 | return ret; |
1da177e4 LT |
472 | } |
473 | ||
474 | EXPORT_SYMBOL(del_timer); | |
475 | ||
476 | #ifdef CONFIG_SMP | |
2aae4a10 REB |
477 | /** |
478 | * try_to_del_timer_sync - Try to deactivate a timer | |
479 | * @timer: timer do del | |
480 | * | |
fd450b73 ON |
481 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
482 | * exit the timer is not queued and the handler is not running on any CPU. | |
483 | * | |
484 | * It must not be called from interrupt contexts. | |
485 | */ | |
486 | int try_to_del_timer_sync(struct timer_list *timer) | |
487 | { | |
3691c519 | 488 | tvec_base_t *base; |
fd450b73 ON |
489 | unsigned long flags; |
490 | int ret = -1; | |
491 | ||
492 | base = lock_timer_base(timer, &flags); | |
493 | ||
494 | if (base->running_timer == timer) | |
495 | goto out; | |
496 | ||
497 | ret = 0; | |
498 | if (timer_pending(timer)) { | |
499 | detach_timer(timer, 1); | |
500 | ret = 1; | |
501 | } | |
502 | out: | |
503 | spin_unlock_irqrestore(&base->lock, flags); | |
504 | ||
505 | return ret; | |
506 | } | |
507 | ||
2aae4a10 | 508 | /** |
1da177e4 LT |
509 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
510 | * @timer: the timer to be deactivated | |
511 | * | |
512 | * This function only differs from del_timer() on SMP: besides deactivating | |
513 | * the timer it also makes sure the handler has finished executing on other | |
514 | * CPUs. | |
515 | * | |
72fd4a35 | 516 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 LT |
517 | * otherwise this function is meaningless. It must not be called from |
518 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
519 | * completion of the timer's handler. The timer's handler must not call |
520 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
521 | * not running on any CPU. | |
1da177e4 LT |
522 | * |
523 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
524 | */ |
525 | int del_timer_sync(struct timer_list *timer) | |
526 | { | |
fd450b73 ON |
527 | for (;;) { |
528 | int ret = try_to_del_timer_sync(timer); | |
529 | if (ret >= 0) | |
530 | return ret; | |
a0009652 | 531 | cpu_relax(); |
fd450b73 | 532 | } |
1da177e4 | 533 | } |
1da177e4 | 534 | |
55c888d6 | 535 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
536 | #endif |
537 | ||
538 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
539 | { | |
540 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
541 | struct timer_list *timer, *tmp; |
542 | struct list_head tv_list; | |
543 | ||
544 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 545 | |
1da177e4 | 546 | /* |
3439dd86 P |
547 | * We are removing _all_ timers from the list, so we |
548 | * don't have to detach them individually. | |
1da177e4 | 549 | */ |
3439dd86 P |
550 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
551 | BUG_ON(timer->base != base); | |
552 | internal_add_timer(base, timer); | |
1da177e4 | 553 | } |
1da177e4 LT |
554 | |
555 | return index; | |
556 | } | |
557 | ||
2aae4a10 REB |
558 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
559 | ||
560 | /** | |
1da177e4 LT |
561 | * __run_timers - run all expired timers (if any) on this CPU. |
562 | * @base: the timer vector to be processed. | |
563 | * | |
564 | * This function cascades all vectors and executes all expired timer | |
565 | * vectors. | |
566 | */ | |
1da177e4 LT |
567 | static inline void __run_timers(tvec_base_t *base) |
568 | { | |
569 | struct timer_list *timer; | |
570 | ||
3691c519 | 571 | spin_lock_irq(&base->lock); |
1da177e4 | 572 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 573 | struct list_head work_list; |
1da177e4 LT |
574 | struct list_head *head = &work_list; |
575 | int index = base->timer_jiffies & TVR_MASK; | |
626ab0e6 | 576 | |
1da177e4 LT |
577 | /* |
578 | * Cascade timers: | |
579 | */ | |
580 | if (!index && | |
581 | (!cascade(base, &base->tv2, INDEX(0))) && | |
582 | (!cascade(base, &base->tv3, INDEX(1))) && | |
583 | !cascade(base, &base->tv4, INDEX(2))) | |
584 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
585 | ++base->timer_jiffies; |
586 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 587 | while (!list_empty(head)) { |
1da177e4 LT |
588 | void (*fn)(unsigned long); |
589 | unsigned long data; | |
590 | ||
591 | timer = list_entry(head->next,struct timer_list,entry); | |
592 | fn = timer->function; | |
593 | data = timer->data; | |
594 | ||
82f67cd9 IM |
595 | timer_stats_account_timer(timer); |
596 | ||
1da177e4 | 597 | set_running_timer(base, timer); |
55c888d6 | 598 | detach_timer(timer, 1); |
3691c519 | 599 | spin_unlock_irq(&base->lock); |
1da177e4 | 600 | { |
be5b4fbd | 601 | int preempt_count = preempt_count(); |
1da177e4 LT |
602 | fn(data); |
603 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
604 | printk(KERN_WARNING "huh, entered %p " |
605 | "with preempt_count %08x, exited" | |
606 | " with %08x?\n", | |
607 | fn, preempt_count, | |
608 | preempt_count()); | |
1da177e4 LT |
609 | BUG(); |
610 | } | |
611 | } | |
3691c519 | 612 | spin_lock_irq(&base->lock); |
1da177e4 LT |
613 | } |
614 | } | |
615 | set_running_timer(base, NULL); | |
3691c519 | 616 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
617 | } |
618 | ||
fd064b9b | 619 | #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ) |
1da177e4 LT |
620 | /* |
621 | * Find out when the next timer event is due to happen. This | |
622 | * is used on S/390 to stop all activity when a cpus is idle. | |
623 | * This functions needs to be called disabled. | |
624 | */ | |
1cfd6849 | 625 | static unsigned long __next_timer_interrupt(tvec_base_t *base) |
1da177e4 | 626 | { |
1cfd6849 TG |
627 | unsigned long timer_jiffies = base->timer_jiffies; |
628 | unsigned long expires = timer_jiffies + (LONG_MAX >> 1); | |
629 | int index, slot, array, found = 0; | |
1da177e4 | 630 | struct timer_list *nte; |
1da177e4 | 631 | tvec_t *varray[4]; |
1da177e4 LT |
632 | |
633 | /* Look for timer events in tv1. */ | |
1cfd6849 | 634 | index = slot = timer_jiffies & TVR_MASK; |
1da177e4 | 635 | do { |
1cfd6849 TG |
636 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
637 | found = 1; | |
1da177e4 | 638 | expires = nte->expires; |
1cfd6849 TG |
639 | /* Look at the cascade bucket(s)? */ |
640 | if (!index || slot < index) | |
641 | goto cascade; | |
642 | return expires; | |
1da177e4 | 643 | } |
1cfd6849 TG |
644 | slot = (slot + 1) & TVR_MASK; |
645 | } while (slot != index); | |
646 | ||
647 | cascade: | |
648 | /* Calculate the next cascade event */ | |
649 | if (index) | |
650 | timer_jiffies += TVR_SIZE - index; | |
651 | timer_jiffies >>= TVR_BITS; | |
1da177e4 LT |
652 | |
653 | /* Check tv2-tv5. */ | |
654 | varray[0] = &base->tv2; | |
655 | varray[1] = &base->tv3; | |
656 | varray[2] = &base->tv4; | |
657 | varray[3] = &base->tv5; | |
1cfd6849 TG |
658 | |
659 | for (array = 0; array < 4; array++) { | |
660 | tvec_t *varp = varray[array]; | |
661 | ||
662 | index = slot = timer_jiffies & TVN_MASK; | |
1da177e4 | 663 | do { |
1cfd6849 TG |
664 | list_for_each_entry(nte, varp->vec + slot, entry) { |
665 | found = 1; | |
1da177e4 LT |
666 | if (time_before(nte->expires, expires)) |
667 | expires = nte->expires; | |
1cfd6849 TG |
668 | } |
669 | /* | |
670 | * Do we still search for the first timer or are | |
671 | * we looking up the cascade buckets ? | |
672 | */ | |
673 | if (found) { | |
674 | /* Look at the cascade bucket(s)? */ | |
675 | if (!index || slot < index) | |
676 | break; | |
677 | return expires; | |
678 | } | |
679 | slot = (slot + 1) & TVN_MASK; | |
680 | } while (slot != index); | |
681 | ||
682 | if (index) | |
683 | timer_jiffies += TVN_SIZE - index; | |
684 | timer_jiffies >>= TVN_BITS; | |
1da177e4 | 685 | } |
1cfd6849 TG |
686 | return expires; |
687 | } | |
69239749 | 688 | |
1cfd6849 TG |
689 | /* |
690 | * Check, if the next hrtimer event is before the next timer wheel | |
691 | * event: | |
692 | */ | |
693 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | |
694 | unsigned long expires) | |
695 | { | |
696 | ktime_t hr_delta = hrtimer_get_next_event(); | |
697 | struct timespec tsdelta; | |
698 | ||
699 | if (hr_delta.tv64 == KTIME_MAX) | |
700 | return expires; | |
0662b713 | 701 | |
1cfd6849 TG |
702 | if (hr_delta.tv64 <= TICK_NSEC) |
703 | return now; | |
69239749 | 704 | |
1cfd6849 TG |
705 | tsdelta = ktime_to_timespec(hr_delta); |
706 | now += timespec_to_jiffies(&tsdelta); | |
707 | if (time_before(now, expires)) | |
708 | return now; | |
1da177e4 LT |
709 | return expires; |
710 | } | |
1cfd6849 TG |
711 | |
712 | /** | |
713 | * next_timer_interrupt - return the jiffy of the next pending timer | |
05fb6bf0 | 714 | * @now: current time (in jiffies) |
1cfd6849 | 715 | */ |
fd064b9b | 716 | unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd6849 TG |
717 | { |
718 | tvec_base_t *base = __get_cpu_var(tvec_bases); | |
fd064b9b | 719 | unsigned long expires; |
1cfd6849 TG |
720 | |
721 | spin_lock(&base->lock); | |
722 | expires = __next_timer_interrupt(base); | |
723 | spin_unlock(&base->lock); | |
724 | ||
725 | if (time_before_eq(expires, now)) | |
726 | return now; | |
727 | ||
728 | return cmp_next_hrtimer_event(now, expires); | |
729 | } | |
fd064b9b TG |
730 | |
731 | #ifdef CONFIG_NO_IDLE_HZ | |
732 | unsigned long next_timer_interrupt(void) | |
733 | { | |
734 | return get_next_timer_interrupt(jiffies); | |
735 | } | |
736 | #endif | |
737 | ||
1da177e4 LT |
738 | #endif |
739 | ||
740 | /******************************************************************/ | |
741 | ||
1da177e4 LT |
742 | /* |
743 | * The current time | |
744 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
745 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
746 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
747 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
748 | * the usual normalization. | |
749 | */ | |
750 | struct timespec xtime __attribute__ ((aligned (16))); | |
751 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
752 | ||
753 | EXPORT_SYMBOL(xtime); | |
754 | ||
726c14bf | 755 | |
ad596171 | 756 | /* XXX - all of this timekeeping code should be later moved to time.c */ |
757 | #include <linux/clocksource.h> | |
758 | static struct clocksource *clock; /* pointer to current clocksource */ | |
cf3c769b | 759 | |
760 | #ifdef CONFIG_GENERIC_TIME | |
761 | /** | |
762 | * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook | |
763 | * | |
764 | * private function, must hold xtime_lock lock when being | |
765 | * called. Returns the number of nanoseconds since the | |
766 | * last call to update_wall_time() (adjusted by NTP scaling) | |
767 | */ | |
768 | static inline s64 __get_nsec_offset(void) | |
769 | { | |
770 | cycle_t cycle_now, cycle_delta; | |
771 | s64 ns_offset; | |
772 | ||
773 | /* read clocksource: */ | |
a2752549 | 774 | cycle_now = clocksource_read(clock); |
cf3c769b | 775 | |
776 | /* calculate the delta since the last update_wall_time: */ | |
19923c19 | 777 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
cf3c769b | 778 | |
779 | /* convert to nanoseconds: */ | |
780 | ns_offset = cyc2ns(clock, cycle_delta); | |
781 | ||
782 | return ns_offset; | |
783 | } | |
784 | ||
785 | /** | |
786 | * __get_realtime_clock_ts - Returns the time of day in a timespec | |
787 | * @ts: pointer to the timespec to be set | |
788 | * | |
789 | * Returns the time of day in a timespec. Used by | |
790 | * do_gettimeofday() and get_realtime_clock_ts(). | |
791 | */ | |
792 | static inline void __get_realtime_clock_ts(struct timespec *ts) | |
793 | { | |
794 | unsigned long seq; | |
795 | s64 nsecs; | |
796 | ||
797 | do { | |
798 | seq = read_seqbegin(&xtime_lock); | |
799 | ||
800 | *ts = xtime; | |
801 | nsecs = __get_nsec_offset(); | |
802 | ||
803 | } while (read_seqretry(&xtime_lock, seq)); | |
804 | ||
805 | timespec_add_ns(ts, nsecs); | |
806 | } | |
807 | ||
808 | /** | |
a2752549 | 809 | * getnstimeofday - Returns the time of day in a timespec |
cf3c769b | 810 | * @ts: pointer to the timespec to be set |
811 | * | |
812 | * Returns the time of day in a timespec. | |
813 | */ | |
814 | void getnstimeofday(struct timespec *ts) | |
815 | { | |
816 | __get_realtime_clock_ts(ts); | |
817 | } | |
818 | ||
819 | EXPORT_SYMBOL(getnstimeofday); | |
820 | ||
821 | /** | |
822 | * do_gettimeofday - Returns the time of day in a timeval | |
823 | * @tv: pointer to the timeval to be set | |
824 | * | |
825 | * NOTE: Users should be converted to using get_realtime_clock_ts() | |
826 | */ | |
827 | void do_gettimeofday(struct timeval *tv) | |
828 | { | |
829 | struct timespec now; | |
830 | ||
831 | __get_realtime_clock_ts(&now); | |
832 | tv->tv_sec = now.tv_sec; | |
833 | tv->tv_usec = now.tv_nsec/1000; | |
834 | } | |
835 | ||
836 | EXPORT_SYMBOL(do_gettimeofday); | |
837 | /** | |
838 | * do_settimeofday - Sets the time of day | |
839 | * @tv: pointer to the timespec variable containing the new time | |
840 | * | |
841 | * Sets the time of day to the new time and update NTP and notify hrtimers | |
842 | */ | |
843 | int do_settimeofday(struct timespec *tv) | |
844 | { | |
845 | unsigned long flags; | |
846 | time_t wtm_sec, sec = tv->tv_sec; | |
847 | long wtm_nsec, nsec = tv->tv_nsec; | |
848 | ||
849 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
850 | return -EINVAL; | |
851 | ||
852 | write_seqlock_irqsave(&xtime_lock, flags); | |
853 | ||
854 | nsec -= __get_nsec_offset(); | |
855 | ||
856 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
857 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
858 | ||
859 | set_normalized_timespec(&xtime, sec, nsec); | |
860 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
861 | ||
e154ff3d | 862 | clock->error = 0; |
cf3c769b | 863 | ntp_clear(); |
864 | ||
90675a27 DW |
865 | update_vsyscall(&xtime, clock); |
866 | ||
cf3c769b | 867 | write_sequnlock_irqrestore(&xtime_lock, flags); |
868 | ||
869 | /* signal hrtimers about time change */ | |
870 | clock_was_set(); | |
871 | ||
872 | return 0; | |
873 | } | |
874 | ||
875 | EXPORT_SYMBOL(do_settimeofday); | |
876 | ||
877 | /** | |
878 | * change_clocksource - Swaps clocksources if a new one is available | |
879 | * | |
880 | * Accumulates current time interval and initializes new clocksource | |
881 | */ | |
5d8b34fd | 882 | static void change_clocksource(void) |
cf3c769b | 883 | { |
884 | struct clocksource *new; | |
885 | cycle_t now; | |
886 | u64 nsec; | |
5d8b34fd | 887 | |
a2752549 | 888 | new = clocksource_get_next(); |
5d8b34fd TG |
889 | |
890 | if (clock == new) | |
891 | return; | |
892 | ||
893 | now = clocksource_read(new); | |
894 | nsec = __get_nsec_offset(); | |
895 | timespec_add_ns(&xtime, nsec); | |
896 | ||
897 | clock = new; | |
898 | clock->cycle_last = now; | |
899 | ||
900 | clock->error = 0; | |
901 | clock->xtime_nsec = 0; | |
902 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | |
903 | ||
79bf2bb3 TG |
904 | tick_clock_notify(); |
905 | ||
5d8b34fd TG |
906 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", |
907 | clock->name); | |
cf3c769b | 908 | } |
909 | #else | |
5d8b34fd | 910 | static inline void change_clocksource(void) { } |
cf3c769b | 911 | #endif |
912 | ||
913 | /** | |
9d634631 | 914 | * timekeeping_is_continuous - check to see if timekeeping is free running |
cf3c769b | 915 | */ |
916 | int timekeeping_is_continuous(void) | |
917 | { | |
918 | unsigned long seq; | |
919 | int ret; | |
920 | ||
921 | do { | |
922 | seq = read_seqbegin(&xtime_lock); | |
923 | ||
5d8b34fd | 924 | ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
cf3c769b | 925 | |
926 | } while (read_seqretry(&xtime_lock, seq)); | |
927 | ||
928 | return ret; | |
929 | } | |
930 | ||
411187fb JS |
931 | /** |
932 | * read_persistent_clock - Return time in seconds from the persistent clock. | |
933 | * | |
934 | * Weak dummy function for arches that do not yet support it. | |
935 | * Returns seconds from epoch using the battery backed persistent clock. | |
936 | * Returns zero if unsupported. | |
937 | * | |
938 | * XXX - Do be sure to remove it once all arches implement it. | |
939 | */ | |
940 | unsigned long __attribute__((weak)) read_persistent_clock(void) | |
941 | { | |
942 | return 0; | |
943 | } | |
944 | ||
1da177e4 | 945 | /* |
ad596171 | 946 | * timekeeping_init - Initializes the clocksource and common timekeeping values |
1da177e4 | 947 | */ |
ad596171 | 948 | void __init timekeeping_init(void) |
1da177e4 | 949 | { |
ad596171 | 950 | unsigned long flags; |
411187fb | 951 | unsigned long sec = read_persistent_clock(); |
ad596171 | 952 | |
953 | write_seqlock_irqsave(&xtime_lock, flags); | |
b0ee7556 RZ |
954 | |
955 | ntp_clear(); | |
956 | ||
a2752549 | 957 | clock = clocksource_get_next(); |
f4304ab2 | 958 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
19923c19 | 959 | clock->cycle_last = clocksource_read(clock); |
b0ee7556 | 960 | |
411187fb JS |
961 | xtime.tv_sec = sec; |
962 | xtime.tv_nsec = 0; | |
963 | set_normalized_timespec(&wall_to_monotonic, | |
964 | -xtime.tv_sec, -xtime.tv_nsec); | |
965 | ||
ad596171 | 966 | write_sequnlock_irqrestore(&xtime_lock, flags); |
967 | } | |
968 | ||
411187fb | 969 | /* flag for if timekeeping is suspended */ |
3e143475 | 970 | static int timekeeping_suspended; |
411187fb JS |
971 | /* time in seconds when suspend began */ |
972 | static unsigned long timekeeping_suspend_time; | |
973 | ||
2aae4a10 | 974 | /** |
ad596171 | 975 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
976 | * @dev: unused | |
977 | * | |
978 | * This is for the generic clocksource timekeeping. | |
8ef38609 | 979 | * xtime/wall_to_monotonic/jiffies/etc are |
ad596171 | 980 | * still managed by arch specific suspend/resume code. |
981 | */ | |
982 | static int timekeeping_resume(struct sys_device *dev) | |
983 | { | |
984 | unsigned long flags; | |
411187fb | 985 | unsigned long now = read_persistent_clock(); |
ad596171 | 986 | |
987 | write_seqlock_irqsave(&xtime_lock, flags); | |
411187fb JS |
988 | |
989 | if (now && (now > timekeeping_suspend_time)) { | |
990 | unsigned long sleep_length = now - timekeeping_suspend_time; | |
991 | ||
992 | xtime.tv_sec += sleep_length; | |
993 | wall_to_monotonic.tv_sec -= sleep_length; | |
994 | } | |
995 | /* re-base the last cycle value */ | |
19923c19 | 996 | clock->cycle_last = clocksource_read(clock); |
3e143475 | 997 | clock->error = 0; |
998 | timekeeping_suspended = 0; | |
999 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
411187fb JS |
1000 | |
1001 | touch_softlockup_watchdog(); | |
6321dd60 TG |
1002 | |
1003 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); | |
1004 | ||
d316c57f TG |
1005 | /* Resume hrtimers */ |
1006 | clock_was_set(); | |
411187fb | 1007 | |
3e143475 | 1008 | return 0; |
1009 | } | |
1010 | ||
1011 | static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |
1012 | { | |
1013 | unsigned long flags; | |
1014 | ||
1015 | write_seqlock_irqsave(&xtime_lock, flags); | |
1016 | timekeeping_suspended = 1; | |
411187fb | 1017 | timekeeping_suspend_time = read_persistent_clock(); |
ad596171 | 1018 | write_sequnlock_irqrestore(&xtime_lock, flags); |
6321dd60 TG |
1019 | |
1020 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | |
1021 | ||
ad596171 | 1022 | return 0; |
1023 | } | |
1024 | ||
1025 | /* sysfs resume/suspend bits for timekeeping */ | |
1026 | static struct sysdev_class timekeeping_sysclass = { | |
1027 | .resume = timekeeping_resume, | |
3e143475 | 1028 | .suspend = timekeeping_suspend, |
ad596171 | 1029 | set_kset_name("timekeeping"), |
1030 | }; | |
1031 | ||
1032 | static struct sys_device device_timer = { | |
1033 | .id = 0, | |
1034 | .cls = &timekeeping_sysclass, | |
1035 | }; | |
1036 | ||
1037 | static int __init timekeeping_init_device(void) | |
1038 | { | |
1039 | int error = sysdev_class_register(&timekeeping_sysclass); | |
1040 | if (!error) | |
1041 | error = sysdev_register(&device_timer); | |
1042 | return error; | |
1043 | } | |
1044 | ||
1045 | device_initcall(timekeeping_init_device); | |
1046 | ||
19923c19 | 1047 | /* |
e154ff3d | 1048 | * If the error is already larger, we look ahead even further |
19923c19 RZ |
1049 | * to compensate for late or lost adjustments. |
1050 | */ | |
f5f1a24a DW |
1051 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, |
1052 | s64 *offset) | |
19923c19 | 1053 | { |
e154ff3d RZ |
1054 | s64 tick_error, i; |
1055 | u32 look_ahead, adj; | |
1056 | s32 error2, mult; | |
19923c19 RZ |
1057 | |
1058 | /* | |
e154ff3d RZ |
1059 | * Use the current error value to determine how much to look ahead. |
1060 | * The larger the error the slower we adjust for it to avoid problems | |
1061 | * with losing too many ticks, otherwise we would overadjust and | |
1062 | * produce an even larger error. The smaller the adjustment the | |
1063 | * faster we try to adjust for it, as lost ticks can do less harm | |
1064 | * here. This is tuned so that an error of about 1 msec is adusted | |
1065 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | |
19923c19 | 1066 | */ |
e154ff3d RZ |
1067 | error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); |
1068 | error2 = abs(error2); | |
1069 | for (look_ahead = 0; error2 > 0; look_ahead++) | |
1070 | error2 >>= 2; | |
19923c19 RZ |
1071 | |
1072 | /* | |
e154ff3d RZ |
1073 | * Now calculate the error in (1 << look_ahead) ticks, but first |
1074 | * remove the single look ahead already included in the error. | |
19923c19 | 1075 | */ |
f5f1a24a DW |
1076 | tick_error = current_tick_length() >> |
1077 | (TICK_LENGTH_SHIFT - clock->shift + 1); | |
e154ff3d RZ |
1078 | tick_error -= clock->xtime_interval >> 1; |
1079 | error = ((error - tick_error) >> look_ahead) + tick_error; | |
1080 | ||
1081 | /* Finally calculate the adjustment shift value. */ | |
1082 | i = *interval; | |
1083 | mult = 1; | |
1084 | if (error < 0) { | |
1085 | error = -error; | |
1086 | *interval = -*interval; | |
1087 | *offset = -*offset; | |
1088 | mult = -1; | |
19923c19 | 1089 | } |
e154ff3d RZ |
1090 | for (adj = 0; error > i; adj++) |
1091 | error >>= 1; | |
19923c19 RZ |
1092 | |
1093 | *interval <<= adj; | |
1094 | *offset <<= adj; | |
e154ff3d | 1095 | return mult << adj; |
19923c19 RZ |
1096 | } |
1097 | ||
1098 | /* | |
1099 | * Adjust the multiplier to reduce the error value, | |
1100 | * this is optimized for the most common adjustments of -1,0,1, | |
1101 | * for other values we can do a bit more work. | |
1102 | */ | |
1103 | static void clocksource_adjust(struct clocksource *clock, s64 offset) | |
1104 | { | |
1105 | s64 error, interval = clock->cycle_interval; | |
1106 | int adj; | |
1107 | ||
1108 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | |
1109 | if (error > interval) { | |
e154ff3d RZ |
1110 | error >>= 2; |
1111 | if (likely(error <= interval)) | |
1112 | adj = 1; | |
1113 | else | |
1114 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 | 1115 | } else if (error < -interval) { |
e154ff3d RZ |
1116 | error >>= 2; |
1117 | if (likely(error >= -interval)) { | |
1118 | adj = -1; | |
1119 | interval = -interval; | |
1120 | offset = -offset; | |
1121 | } else | |
1122 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 RZ |
1123 | } else |
1124 | return; | |
1125 | ||
1126 | clock->mult += adj; | |
1127 | clock->xtime_interval += interval; | |
1128 | clock->xtime_nsec -= offset; | |
f5f1a24a DW |
1129 | clock->error -= (interval - offset) << |
1130 | (TICK_LENGTH_SHIFT - clock->shift); | |
19923c19 RZ |
1131 | } |
1132 | ||
2aae4a10 | 1133 | /** |
ad596171 | 1134 | * update_wall_time - Uses the current clocksource to increment the wall time |
1135 | * | |
1136 | * Called from the timer interrupt, must hold a write on xtime_lock. | |
1137 | */ | |
1138 | static void update_wall_time(void) | |
1139 | { | |
19923c19 | 1140 | cycle_t offset; |
ad596171 | 1141 | |
3e143475 | 1142 | /* Make sure we're fully resumed: */ |
1143 | if (unlikely(timekeeping_suspended)) | |
1144 | return; | |
5eb6d205 | 1145 | |
19923c19 RZ |
1146 | #ifdef CONFIG_GENERIC_TIME |
1147 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | |
1148 | #else | |
1149 | offset = clock->cycle_interval; | |
1150 | #endif | |
3e143475 | 1151 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; |
ad596171 | 1152 | |
1153 | /* normally this loop will run just once, however in the | |
1154 | * case of lost or late ticks, it will accumulate correctly. | |
1155 | */ | |
19923c19 | 1156 | while (offset >= clock->cycle_interval) { |
ad596171 | 1157 | /* accumulate one interval */ |
19923c19 RZ |
1158 | clock->xtime_nsec += clock->xtime_interval; |
1159 | clock->cycle_last += clock->cycle_interval; | |
1160 | offset -= clock->cycle_interval; | |
1161 | ||
1162 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | |
1163 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | |
1164 | xtime.tv_sec++; | |
1165 | second_overflow(); | |
1166 | } | |
ad596171 | 1167 | |
5eb6d205 | 1168 | /* interpolator bits */ |
19923c19 | 1169 | time_interpolator_update(clock->xtime_interval |
5eb6d205 | 1170 | >> clock->shift); |
5eb6d205 | 1171 | |
1172 | /* accumulate error between NTP and clock interval */ | |
19923c19 RZ |
1173 | clock->error += current_tick_length(); |
1174 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | |
1175 | } | |
5eb6d205 | 1176 | |
19923c19 RZ |
1177 | /* correct the clock when NTP error is too big */ |
1178 | clocksource_adjust(clock, offset); | |
5eb6d205 | 1179 | |
5eb6d205 | 1180 | /* store full nanoseconds into xtime */ |
e154ff3d | 1181 | xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; |
19923c19 | 1182 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
cf3c769b | 1183 | |
1184 | /* check to see if there is a new clocksource to use */ | |
5d8b34fd | 1185 | change_clocksource(); |
acc9a9dc | 1186 | update_vsyscall(&xtime, clock); |
1da177e4 LT |
1187 | } |
1188 | ||
1189 | /* | |
1190 | * Called from the timer interrupt handler to charge one tick to the current | |
1191 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
1192 | */ | |
1193 | void update_process_times(int user_tick) | |
1194 | { | |
1195 | struct task_struct *p = current; | |
1196 | int cpu = smp_processor_id(); | |
1197 | ||
1198 | /* Note: this timer irq context must be accounted for as well. */ | |
1199 | if (user_tick) | |
1200 | account_user_time(p, jiffies_to_cputime(1)); | |
1201 | else | |
1202 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
1203 | run_local_timers(); | |
1204 | if (rcu_pending(cpu)) | |
1205 | rcu_check_callbacks(cpu, user_tick); | |
1206 | scheduler_tick(); | |
1207 | run_posix_cpu_timers(p); | |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | * Nr of active tasks - counted in fixed-point numbers | |
1212 | */ | |
1213 | static unsigned long count_active_tasks(void) | |
1214 | { | |
db1b1fef | 1215 | return nr_active() * FIXED_1; |
1da177e4 LT |
1216 | } |
1217 | ||
1218 | /* | |
1219 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
1220 | * imply that avenrun[] is the standard name for this kind of thing. | |
1221 | * Nothing else seems to be standardized: the fractional size etc | |
1222 | * all seem to differ on different machines. | |
1223 | * | |
1224 | * Requires xtime_lock to access. | |
1225 | */ | |
1226 | unsigned long avenrun[3]; | |
1227 | ||
1228 | EXPORT_SYMBOL(avenrun); | |
1229 | ||
1230 | /* | |
1231 | * calc_load - given tick count, update the avenrun load estimates. | |
1232 | * This is called while holding a write_lock on xtime_lock. | |
1233 | */ | |
1234 | static inline void calc_load(unsigned long ticks) | |
1235 | { | |
1236 | unsigned long active_tasks; /* fixed-point */ | |
1237 | static int count = LOAD_FREQ; | |
1238 | ||
cd7175ed ED |
1239 | count -= ticks; |
1240 | if (unlikely(count < 0)) { | |
1241 | active_tasks = count_active_tasks(); | |
1242 | do { | |
1243 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
1244 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
1245 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
1246 | count += LOAD_FREQ; | |
1247 | } while (count < 0); | |
1da177e4 LT |
1248 | } |
1249 | } | |
1250 | ||
1da177e4 LT |
1251 | /* |
1252 | * This read-write spinlock protects us from races in SMP while | |
1253 | * playing with xtime and avenrun. | |
1254 | */ | |
5809f9d4 | 1255 | __attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
1da177e4 LT |
1256 | |
1257 | EXPORT_SYMBOL(xtime_lock); | |
1da177e4 LT |
1258 | |
1259 | /* | |
1260 | * This function runs timers and the timer-tq in bottom half context. | |
1261 | */ | |
1262 | static void run_timer_softirq(struct softirq_action *h) | |
1263 | { | |
a4a6198b | 1264 | tvec_base_t *base = __get_cpu_var(tvec_bases); |
1da177e4 | 1265 | |
82f67cd9 IM |
1266 | hrtimer_run_queues(); |
1267 | ||
1da177e4 LT |
1268 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1269 | __run_timers(base); | |
1270 | } | |
1271 | ||
1272 | /* | |
1273 | * Called by the local, per-CPU timer interrupt on SMP. | |
1274 | */ | |
1275 | void run_local_timers(void) | |
1276 | { | |
1277 | raise_softirq(TIMER_SOFTIRQ); | |
6687a97d | 1278 | softlockup_tick(); |
1da177e4 LT |
1279 | } |
1280 | ||
1281 | /* | |
1282 | * Called by the timer interrupt. xtime_lock must already be taken | |
1283 | * by the timer IRQ! | |
1284 | */ | |
3171a030 | 1285 | static inline void update_times(unsigned long ticks) |
1da177e4 | 1286 | { |
ad596171 | 1287 | update_wall_time(); |
1da177e4 LT |
1288 | calc_load(ticks); |
1289 | } | |
1290 | ||
1291 | /* | |
1292 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
1293 | * without sampling the sequence number in xtime_lock. | |
1294 | * jiffies is defined in the linker script... | |
1295 | */ | |
1296 | ||
3171a030 | 1297 | void do_timer(unsigned long ticks) |
1da177e4 | 1298 | { |
3171a030 AN |
1299 | jiffies_64 += ticks; |
1300 | update_times(ticks); | |
1da177e4 LT |
1301 | } |
1302 | ||
1303 | #ifdef __ARCH_WANT_SYS_ALARM | |
1304 | ||
1305 | /* | |
1306 | * For backwards compatibility? This can be done in libc so Alpha | |
1307 | * and all newer ports shouldn't need it. | |
1308 | */ | |
1309 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
1310 | { | |
c08b8a49 | 1311 | return alarm_setitimer(seconds); |
1da177e4 LT |
1312 | } |
1313 | ||
1314 | #endif | |
1315 | ||
1316 | #ifndef __alpha__ | |
1317 | ||
1318 | /* | |
1319 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
1320 | * should be moved into arch/i386 instead? | |
1321 | */ | |
1322 | ||
1323 | /** | |
1324 | * sys_getpid - return the thread group id of the current process | |
1325 | * | |
1326 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
1327 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
1328 | * which case the tgid is the same in all threads of the same group. | |
1329 | * | |
1330 | * This is SMP safe as current->tgid does not change. | |
1331 | */ | |
1332 | asmlinkage long sys_getpid(void) | |
1333 | { | |
1334 | return current->tgid; | |
1335 | } | |
1336 | ||
1337 | /* | |
6997a6fa KK |
1338 | * Accessing ->real_parent is not SMP-safe, it could |
1339 | * change from under us. However, we can use a stale | |
1340 | * value of ->real_parent under rcu_read_lock(), see | |
1341 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 LT |
1342 | */ |
1343 | asmlinkage long sys_getppid(void) | |
1344 | { | |
1345 | int pid; | |
1da177e4 | 1346 | |
6997a6fa KK |
1347 | rcu_read_lock(); |
1348 | pid = rcu_dereference(current->real_parent)->tgid; | |
1349 | rcu_read_unlock(); | |
1da177e4 | 1350 | |
1da177e4 LT |
1351 | return pid; |
1352 | } | |
1353 | ||
1354 | asmlinkage long sys_getuid(void) | |
1355 | { | |
1356 | /* Only we change this so SMP safe */ | |
1357 | return current->uid; | |
1358 | } | |
1359 | ||
1360 | asmlinkage long sys_geteuid(void) | |
1361 | { | |
1362 | /* Only we change this so SMP safe */ | |
1363 | return current->euid; | |
1364 | } | |
1365 | ||
1366 | asmlinkage long sys_getgid(void) | |
1367 | { | |
1368 | /* Only we change this so SMP safe */ | |
1369 | return current->gid; | |
1370 | } | |
1371 | ||
1372 | asmlinkage long sys_getegid(void) | |
1373 | { | |
1374 | /* Only we change this so SMP safe */ | |
1375 | return current->egid; | |
1376 | } | |
1377 | ||
1378 | #endif | |
1379 | ||
1380 | static void process_timeout(unsigned long __data) | |
1381 | { | |
36c8b586 | 1382 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1383 | } |
1384 | ||
1385 | /** | |
1386 | * schedule_timeout - sleep until timeout | |
1387 | * @timeout: timeout value in jiffies | |
1388 | * | |
1389 | * Make the current task sleep until @timeout jiffies have | |
1390 | * elapsed. The routine will return immediately unless | |
1391 | * the current task state has been set (see set_current_state()). | |
1392 | * | |
1393 | * You can set the task state as follows - | |
1394 | * | |
1395 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1396 | * pass before the routine returns. The routine will return 0 | |
1397 | * | |
1398 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1399 | * delivered to the current task. In this case the remaining time | |
1400 | * in jiffies will be returned, or 0 if the timer expired in time | |
1401 | * | |
1402 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1403 | * routine returns. | |
1404 | * | |
1405 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1406 | * the CPU away without a bound on the timeout. In this case the return | |
1407 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1408 | * | |
1409 | * In all cases the return value is guaranteed to be non-negative. | |
1410 | */ | |
1411 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1412 | { | |
1413 | struct timer_list timer; | |
1414 | unsigned long expire; | |
1415 | ||
1416 | switch (timeout) | |
1417 | { | |
1418 | case MAX_SCHEDULE_TIMEOUT: | |
1419 | /* | |
1420 | * These two special cases are useful to be comfortable | |
1421 | * in the caller. Nothing more. We could take | |
1422 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1423 | * but I' d like to return a valid offset (>=0) to allow | |
1424 | * the caller to do everything it want with the retval. | |
1425 | */ | |
1426 | schedule(); | |
1427 | goto out; | |
1428 | default: | |
1429 | /* | |
1430 | * Another bit of PARANOID. Note that the retval will be | |
1431 | * 0 since no piece of kernel is supposed to do a check | |
1432 | * for a negative retval of schedule_timeout() (since it | |
1433 | * should never happens anyway). You just have the printk() | |
1434 | * that will tell you if something is gone wrong and where. | |
1435 | */ | |
5b149bcc | 1436 | if (timeout < 0) { |
1da177e4 | 1437 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1438 | "value %lx\n", timeout); |
1439 | dump_stack(); | |
1da177e4 LT |
1440 | current->state = TASK_RUNNING; |
1441 | goto out; | |
1442 | } | |
1443 | } | |
1444 | ||
1445 | expire = timeout + jiffies; | |
1446 | ||
a8db2db1 ON |
1447 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1448 | __mod_timer(&timer, expire); | |
1da177e4 LT |
1449 | schedule(); |
1450 | del_singleshot_timer_sync(&timer); | |
1451 | ||
1452 | timeout = expire - jiffies; | |
1453 | ||
1454 | out: | |
1455 | return timeout < 0 ? 0 : timeout; | |
1456 | } | |
1da177e4 LT |
1457 | EXPORT_SYMBOL(schedule_timeout); |
1458 | ||
8a1c1757 AM |
1459 | /* |
1460 | * We can use __set_current_state() here because schedule_timeout() calls | |
1461 | * schedule() unconditionally. | |
1462 | */ | |
64ed93a2 NA |
1463 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1464 | { | |
a5a0d52c AM |
1465 | __set_current_state(TASK_INTERRUPTIBLE); |
1466 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1467 | } |
1468 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1469 | ||
1470 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |
1471 | { | |
a5a0d52c AM |
1472 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1473 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1474 | } |
1475 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1476 | ||
1da177e4 LT |
1477 | /* Thread ID - the internal kernel "pid" */ |
1478 | asmlinkage long sys_gettid(void) | |
1479 | { | |
1480 | return current->pid; | |
1481 | } | |
1482 | ||
2aae4a10 | 1483 | /** |
d4d23add | 1484 | * do_sysinfo - fill in sysinfo struct |
2aae4a10 | 1485 | * @info: pointer to buffer to fill |
1da177e4 | 1486 | */ |
d4d23add | 1487 | int do_sysinfo(struct sysinfo *info) |
1da177e4 | 1488 | { |
1da177e4 LT |
1489 | unsigned long mem_total, sav_total; |
1490 | unsigned int mem_unit, bitcount; | |
1491 | unsigned long seq; | |
1492 | ||
d4d23add | 1493 | memset(info, 0, sizeof(struct sysinfo)); |
1da177e4 LT |
1494 | |
1495 | do { | |
1496 | struct timespec tp; | |
1497 | seq = read_seqbegin(&xtime_lock); | |
1498 | ||
1499 | /* | |
1500 | * This is annoying. The below is the same thing | |
1501 | * posix_get_clock_monotonic() does, but it wants to | |
1502 | * take the lock which we want to cover the loads stuff | |
1503 | * too. | |
1504 | */ | |
1505 | ||
1506 | getnstimeofday(&tp); | |
1507 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1508 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
1509 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | |
1510 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1511 | tp.tv_sec++; | |
1512 | } | |
d4d23add | 1513 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
1da177e4 | 1514 | |
d4d23add KM |
1515 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); |
1516 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1517 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1da177e4 | 1518 | |
d4d23add | 1519 | info->procs = nr_threads; |
1da177e4 LT |
1520 | } while (read_seqretry(&xtime_lock, seq)); |
1521 | ||
d4d23add KM |
1522 | si_meminfo(info); |
1523 | si_swapinfo(info); | |
1da177e4 LT |
1524 | |
1525 | /* | |
1526 | * If the sum of all the available memory (i.e. ram + swap) | |
1527 | * is less than can be stored in a 32 bit unsigned long then | |
1528 | * we can be binary compatible with 2.2.x kernels. If not, | |
1529 | * well, in that case 2.2.x was broken anyways... | |
1530 | * | |
1531 | * -Erik Andersen <andersee@debian.org> | |
1532 | */ | |
1533 | ||
d4d23add KM |
1534 | mem_total = info->totalram + info->totalswap; |
1535 | if (mem_total < info->totalram || mem_total < info->totalswap) | |
1da177e4 LT |
1536 | goto out; |
1537 | bitcount = 0; | |
d4d23add | 1538 | mem_unit = info->mem_unit; |
1da177e4 LT |
1539 | while (mem_unit > 1) { |
1540 | bitcount++; | |
1541 | mem_unit >>= 1; | |
1542 | sav_total = mem_total; | |
1543 | mem_total <<= 1; | |
1544 | if (mem_total < sav_total) | |
1545 | goto out; | |
1546 | } | |
1547 | ||
1548 | /* | |
1549 | * If mem_total did not overflow, multiply all memory values by | |
d4d23add | 1550 | * info->mem_unit and set it to 1. This leaves things compatible |
1da177e4 LT |
1551 | * with 2.2.x, and also retains compatibility with earlier 2.4.x |
1552 | * kernels... | |
1553 | */ | |
1554 | ||
d4d23add KM |
1555 | info->mem_unit = 1; |
1556 | info->totalram <<= bitcount; | |
1557 | info->freeram <<= bitcount; | |
1558 | info->sharedram <<= bitcount; | |
1559 | info->bufferram <<= bitcount; | |
1560 | info->totalswap <<= bitcount; | |
1561 | info->freeswap <<= bitcount; | |
1562 | info->totalhigh <<= bitcount; | |
1563 | info->freehigh <<= bitcount; | |
1564 | ||
1565 | out: | |
1566 | return 0; | |
1567 | } | |
1568 | ||
1569 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1570 | { | |
1571 | struct sysinfo val; | |
1572 | ||
1573 | do_sysinfo(&val); | |
1da177e4 | 1574 | |
1da177e4 LT |
1575 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) |
1576 | return -EFAULT; | |
1577 | ||
1578 | return 0; | |
1579 | } | |
1580 | ||
d730e882 IM |
1581 | /* |
1582 | * lockdep: we want to track each per-CPU base as a separate lock-class, | |
1583 | * but timer-bases are kmalloc()-ed, so we need to attach separate | |
1584 | * keys to them: | |
1585 | */ | |
1586 | static struct lock_class_key base_lock_keys[NR_CPUS]; | |
1587 | ||
a4a6198b | 1588 | static int __devinit init_timers_cpu(int cpu) |
1da177e4 LT |
1589 | { |
1590 | int j; | |
1591 | tvec_base_t *base; | |
ba6edfcd | 1592 | static char __devinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1593 | |
ba6edfcd | 1594 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1595 | static char boot_done; |
1596 | ||
a4a6198b | 1597 | if (boot_done) { |
ba6edfcd AM |
1598 | /* |
1599 | * The APs use this path later in boot | |
1600 | */ | |
a4a6198b JB |
1601 | base = kmalloc_node(sizeof(*base), GFP_KERNEL, |
1602 | cpu_to_node(cpu)); | |
1603 | if (!base) | |
1604 | return -ENOMEM; | |
1605 | memset(base, 0, sizeof(*base)); | |
ba6edfcd | 1606 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1607 | } else { |
ba6edfcd AM |
1608 | /* |
1609 | * This is for the boot CPU - we use compile-time | |
1610 | * static initialisation because per-cpu memory isn't | |
1611 | * ready yet and because the memory allocators are not | |
1612 | * initialised either. | |
1613 | */ | |
a4a6198b | 1614 | boot_done = 1; |
ba6edfcd | 1615 | base = &boot_tvec_bases; |
a4a6198b | 1616 | } |
ba6edfcd AM |
1617 | tvec_base_done[cpu] = 1; |
1618 | } else { | |
1619 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1620 | } |
ba6edfcd | 1621 | |
3691c519 | 1622 | spin_lock_init(&base->lock); |
d730e882 IM |
1623 | lockdep_set_class(&base->lock, base_lock_keys + cpu); |
1624 | ||
1da177e4 LT |
1625 | for (j = 0; j < TVN_SIZE; j++) { |
1626 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1627 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1628 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1629 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1630 | } | |
1631 | for (j = 0; j < TVR_SIZE; j++) | |
1632 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1633 | ||
1634 | base->timer_jiffies = jiffies; | |
a4a6198b | 1635 | return 0; |
1da177e4 LT |
1636 | } |
1637 | ||
1638 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1639 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1640 | { |
1641 | struct timer_list *timer; | |
1642 | ||
1643 | while (!list_empty(head)) { | |
1644 | timer = list_entry(head->next, struct timer_list, entry); | |
55c888d6 | 1645 | detach_timer(timer, 0); |
3691c519 | 1646 | timer->base = new_base; |
1da177e4 | 1647 | internal_add_timer(new_base, timer); |
1da177e4 | 1648 | } |
1da177e4 LT |
1649 | } |
1650 | ||
1651 | static void __devinit migrate_timers(int cpu) | |
1652 | { | |
1653 | tvec_base_t *old_base; | |
1654 | tvec_base_t *new_base; | |
1655 | int i; | |
1656 | ||
1657 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1658 | old_base = per_cpu(tvec_bases, cpu); |
1659 | new_base = get_cpu_var(tvec_bases); | |
1da177e4 LT |
1660 | |
1661 | local_irq_disable(); | |
e81ce1f7 HC |
1662 | double_spin_lock(&new_base->lock, &old_base->lock, |
1663 | smp_processor_id() < cpu); | |
3691c519 ON |
1664 | |
1665 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1666 | |
1da177e4 | 1667 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1668 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1669 | for (i = 0; i < TVN_SIZE; i++) { | |
1670 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1671 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1672 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1673 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1674 | } | |
1675 | ||
e81ce1f7 HC |
1676 | double_spin_unlock(&new_base->lock, &old_base->lock, |
1677 | smp_processor_id() < cpu); | |
1da177e4 LT |
1678 | local_irq_enable(); |
1679 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1680 | } |
1681 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1682 | ||
8c78f307 | 1683 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1684 | unsigned long action, void *hcpu) |
1685 | { | |
1686 | long cpu = (long)hcpu; | |
1687 | switch(action) { | |
1688 | case CPU_UP_PREPARE: | |
a4a6198b JB |
1689 | if (init_timers_cpu(cpu) < 0) |
1690 | return NOTIFY_BAD; | |
1da177e4 LT |
1691 | break; |
1692 | #ifdef CONFIG_HOTPLUG_CPU | |
1693 | case CPU_DEAD: | |
1694 | migrate_timers(cpu); | |
1695 | break; | |
1696 | #endif | |
1697 | default: | |
1698 | break; | |
1699 | } | |
1700 | return NOTIFY_OK; | |
1701 | } | |
1702 | ||
8c78f307 | 1703 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1704 | .notifier_call = timer_cpu_notify, |
1705 | }; | |
1706 | ||
1707 | ||
1708 | void __init init_timers(void) | |
1709 | { | |
07dccf33 | 1710 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1da177e4 | 1711 | (void *)(long)smp_processor_id()); |
07dccf33 | 1712 | |
82f67cd9 IM |
1713 | init_timer_stats(); |
1714 | ||
07dccf33 | 1715 | BUG_ON(err == NOTIFY_BAD); |
1da177e4 LT |
1716 | register_cpu_notifier(&timers_nb); |
1717 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1718 | } | |
1719 | ||
1720 | #ifdef CONFIG_TIME_INTERPOLATION | |
1721 | ||
67890d70 CL |
1722 | struct time_interpolator *time_interpolator __read_mostly; |
1723 | static struct time_interpolator *time_interpolator_list __read_mostly; | |
1da177e4 LT |
1724 | static DEFINE_SPINLOCK(time_interpolator_lock); |
1725 | ||
3db5db4f | 1726 | static inline cycles_t time_interpolator_get_cycles(unsigned int src) |
1da177e4 LT |
1727 | { |
1728 | unsigned long (*x)(void); | |
1729 | ||
1730 | switch (src) | |
1731 | { | |
1732 | case TIME_SOURCE_FUNCTION: | |
1733 | x = time_interpolator->addr; | |
1734 | return x(); | |
1735 | ||
1736 | case TIME_SOURCE_MMIO64 : | |
685db65e | 1737 | return readq_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1738 | |
1739 | case TIME_SOURCE_MMIO32 : | |
685db65e | 1740 | return readl_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1741 | |
1742 | default: return get_cycles(); | |
1743 | } | |
1744 | } | |
1745 | ||
486d46ae | 1746 | static inline u64 time_interpolator_get_counter(int writelock) |
1da177e4 LT |
1747 | { |
1748 | unsigned int src = time_interpolator->source; | |
1749 | ||
1750 | if (time_interpolator->jitter) | |
1751 | { | |
3db5db4f HD |
1752 | cycles_t lcycle; |
1753 | cycles_t now; | |
1da177e4 LT |
1754 | |
1755 | do { | |
1756 | lcycle = time_interpolator->last_cycle; | |
1757 | now = time_interpolator_get_cycles(src); | |
1758 | if (lcycle && time_after(lcycle, now)) | |
1759 | return lcycle; | |
486d46ae AW |
1760 | |
1761 | /* When holding the xtime write lock, there's no need | |
1762 | * to add the overhead of the cmpxchg. Readers are | |
1763 | * force to retry until the write lock is released. | |
1764 | */ | |
1765 | if (writelock) { | |
1766 | time_interpolator->last_cycle = now; | |
1767 | return now; | |
1768 | } | |
1da177e4 LT |
1769 | /* Keep track of the last timer value returned. The use of cmpxchg here |
1770 | * will cause contention in an SMP environment. | |
1771 | */ | |
1772 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | |
1773 | return now; | |
1774 | } | |
1775 | else | |
1776 | return time_interpolator_get_cycles(src); | |
1777 | } | |
1778 | ||
1779 | void time_interpolator_reset(void) | |
1780 | { | |
1781 | time_interpolator->offset = 0; | |
486d46ae | 1782 | time_interpolator->last_counter = time_interpolator_get_counter(1); |
1da177e4 LT |
1783 | } |
1784 | ||
1785 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | |
1786 | ||
1787 | unsigned long time_interpolator_get_offset(void) | |
1788 | { | |
1789 | /* If we do not have a time interpolator set up then just return zero */ | |
1790 | if (!time_interpolator) | |
1791 | return 0; | |
1792 | ||
1793 | return time_interpolator->offset + | |
486d46ae | 1794 | GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator); |
1da177e4 LT |
1795 | } |
1796 | ||
1797 | #define INTERPOLATOR_ADJUST 65536 | |
1798 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | |
1799 | ||
4c7ee8de | 1800 | void time_interpolator_update(long delta_nsec) |
1da177e4 LT |
1801 | { |
1802 | u64 counter; | |
1803 | unsigned long offset; | |
1804 | ||
1805 | /* If there is no time interpolator set up then do nothing */ | |
1806 | if (!time_interpolator) | |
1807 | return; | |
1808 | ||
a5a0d52c AM |
1809 | /* |
1810 | * The interpolator compensates for late ticks by accumulating the late | |
1811 | * time in time_interpolator->offset. A tick earlier than expected will | |
1812 | * lead to a reset of the offset and a corresponding jump of the clock | |
1813 | * forward. Again this only works if the interpolator clock is running | |
1814 | * slightly slower than the regular clock and the tuning logic insures | |
1815 | * that. | |
1816 | */ | |
1da177e4 | 1817 | |
486d46ae | 1818 | counter = time_interpolator_get_counter(1); |
a5a0d52c AM |
1819 | offset = time_interpolator->offset + |
1820 | GET_TI_NSECS(counter, time_interpolator); | |
1da177e4 LT |
1821 | |
1822 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | |
1823 | time_interpolator->offset = offset - delta_nsec; | |
1824 | else { | |
1825 | time_interpolator->skips++; | |
1826 | time_interpolator->ns_skipped += delta_nsec - offset; | |
1827 | time_interpolator->offset = 0; | |
1828 | } | |
1829 | time_interpolator->last_counter = counter; | |
1830 | ||
1831 | /* Tuning logic for time interpolator invoked every minute or so. | |
1832 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | |
1833 | * Increase interpolator clock speed if we skip too much time. | |
1834 | */ | |
1835 | if (jiffies % INTERPOLATOR_ADJUST == 0) | |
1836 | { | |
b20367a6 | 1837 | if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) |
1da177e4 LT |
1838 | time_interpolator->nsec_per_cyc--; |
1839 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | |
1840 | time_interpolator->nsec_per_cyc++; | |
1841 | time_interpolator->skips = 0; | |
1842 | time_interpolator->ns_skipped = 0; | |
1843 | } | |
1844 | } | |
1845 | ||
1846 | static inline int | |
1847 | is_better_time_interpolator(struct time_interpolator *new) | |
1848 | { | |
1849 | if (!time_interpolator) | |
1850 | return 1; | |
1851 | return new->frequency > 2*time_interpolator->frequency || | |
1852 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | |
1853 | } | |
1854 | ||
1855 | void | |
1856 | register_time_interpolator(struct time_interpolator *ti) | |
1857 | { | |
1858 | unsigned long flags; | |
1859 | ||
1860 | /* Sanity check */ | |
9f31252c | 1861 | BUG_ON(ti->frequency == 0 || ti->mask == 0); |
1da177e4 LT |
1862 | |
1863 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | |
1864 | spin_lock(&time_interpolator_lock); | |
1865 | write_seqlock_irqsave(&xtime_lock, flags); | |
1866 | if (is_better_time_interpolator(ti)) { | |
1867 | time_interpolator = ti; | |
1868 | time_interpolator_reset(); | |
1869 | } | |
1870 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1871 | ||
1872 | ti->next = time_interpolator_list; | |
1873 | time_interpolator_list = ti; | |
1874 | spin_unlock(&time_interpolator_lock); | |
1875 | } | |
1876 | ||
1877 | void | |
1878 | unregister_time_interpolator(struct time_interpolator *ti) | |
1879 | { | |
1880 | struct time_interpolator *curr, **prev; | |
1881 | unsigned long flags; | |
1882 | ||
1883 | spin_lock(&time_interpolator_lock); | |
1884 | prev = &time_interpolator_list; | |
1885 | for (curr = *prev; curr; curr = curr->next) { | |
1886 | if (curr == ti) { | |
1887 | *prev = curr->next; | |
1888 | break; | |
1889 | } | |
1890 | prev = &curr->next; | |
1891 | } | |
1892 | ||
1893 | write_seqlock_irqsave(&xtime_lock, flags); | |
1894 | if (ti == time_interpolator) { | |
1895 | /* we lost the best time-interpolator: */ | |
1896 | time_interpolator = NULL; | |
1897 | /* find the next-best interpolator */ | |
1898 | for (curr = time_interpolator_list; curr; curr = curr->next) | |
1899 | if (is_better_time_interpolator(curr)) | |
1900 | time_interpolator = curr; | |
1901 | time_interpolator_reset(); | |
1902 | } | |
1903 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1904 | spin_unlock(&time_interpolator_lock); | |
1905 | } | |
1906 | #endif /* CONFIG_TIME_INTERPOLATION */ | |
1907 | ||
1908 | /** | |
1909 | * msleep - sleep safely even with waitqueue interruptions | |
1910 | * @msecs: Time in milliseconds to sleep for | |
1911 | */ | |
1912 | void msleep(unsigned int msecs) | |
1913 | { | |
1914 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1915 | ||
75bcc8c5 NA |
1916 | while (timeout) |
1917 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1918 | } |
1919 | ||
1920 | EXPORT_SYMBOL(msleep); | |
1921 | ||
1922 | /** | |
96ec3efd | 1923 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1924 | * @msecs: Time in milliseconds to sleep for |
1925 | */ | |
1926 | unsigned long msleep_interruptible(unsigned int msecs) | |
1927 | { | |
1928 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1929 | ||
75bcc8c5 NA |
1930 | while (timeout && !signal_pending(current)) |
1931 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1932 | return jiffies_to_msecs(timeout); |
1933 | } | |
1934 | ||
1935 | EXPORT_SYMBOL(msleep_interruptible); |