timekeeping: Try to catch clocksource delta underflows
[deliverable/linux.git] / kernel / time / timekeeping.c
CommitLineData
8524070b 1/*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
d7b4202e 11#include <linux/timekeeper_internal.h>
8524070b 12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
d43c36dc 17#include <linux/sched.h>
e1a85b2c 18#include <linux/syscore_ops.h>
8524070b 19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
75c5158f 23#include <linux/stop_machine.h>
e0b306fe 24#include <linux/pvclock_gtod.h>
52f5684c 25#include <linux/compiler.h>
8524070b 26
eb93e4d9 27#include "tick-internal.h"
aa6f9c59 28#include "ntp_internal.h"
5c83545f 29#include "timekeeping_internal.h"
155ec602 30
04397fe9
DV
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
780427f0 33#define TK_CLOCK_WAS_SET (1 << 2)
04397fe9 34
3fdb14fd
TG
35/*
36 * The most important data for readout fits into a single 64 byte
37 * cache line.
38 */
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
9a7a71b1 44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
48cdc135 45static struct timekeeper shadow_timekeeper;
155ec602 46
4396e058
TG
47/**
48 * struct tk_fast - NMI safe timekeeper
49 * @seq: Sequence counter for protecting updates. The lowest bit
50 * is the index for the tk_read_base array
51 * @base: tk_read_base array. Access is indexed by the lowest bit of
52 * @seq.
53 *
54 * See @update_fast_timekeeper() below.
55 */
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62
8fcce546
JS
63/* flag for if timekeeping is suspended */
64int __read_mostly timekeeping_suspended;
65
31ade306
FT
66/* Flag for if there is a persistent clock on this platform */
67bool __read_mostly persistent_clock_exist = false;
68
1e75fa8b
JS
69static inline void tk_normalize_xtime(struct timekeeper *tk)
70{
d28ede83
TG
71 while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
72 tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
1e75fa8b
JS
73 tk->xtime_sec++;
74 }
75}
76
c905fae4
TG
77static inline struct timespec64 tk_xtime(struct timekeeper *tk)
78{
79 struct timespec64 ts;
80
81 ts.tv_sec = tk->xtime_sec;
d28ede83 82 ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
c905fae4
TG
83 return ts;
84}
85
7d489d15 86static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
1e75fa8b
JS
87{
88 tk->xtime_sec = ts->tv_sec;
d28ede83 89 tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
1e75fa8b
JS
90}
91
7d489d15 92static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
1e75fa8b
JS
93{
94 tk->xtime_sec += ts->tv_sec;
d28ede83 95 tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
784ffcbb 96 tk_normalize_xtime(tk);
1e75fa8b 97}
8fcce546 98
7d489d15 99static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
6d0ef903 100{
7d489d15 101 struct timespec64 tmp;
6d0ef903
JS
102
103 /*
104 * Verify consistency of: offset_real = -wall_to_monotonic
105 * before modifying anything
106 */
7d489d15 107 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
6d0ef903 108 -tk->wall_to_monotonic.tv_nsec);
7d489d15 109 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
6d0ef903 110 tk->wall_to_monotonic = wtm;
7d489d15
JS
111 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
112 tk->offs_real = timespec64_to_ktime(tmp);
04005f60 113 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
6d0ef903
JS
114}
115
47da70d3 116static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
6d0ef903 117{
47da70d3 118 tk->offs_boot = ktime_add(tk->offs_boot, delta);
6d0ef903
JS
119}
120
3c17ad19
JS
121#ifdef CONFIG_DEBUG_TIMEKEEPING
122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
123{
124
125 cycle_t max_cycles = tk->tkr.clock->max_cycles;
126 const char *name = tk->tkr.clock->name;
127
128 if (offset > max_cycles) {
a558cd02 129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
3c17ad19 130 offset, name, max_cycles);
a558cd02 131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
3c17ad19
JS
132 } else {
133 if (offset > (max_cycles >> 1)) {
134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
135 offset, name, max_cycles >> 1);
136 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
137 }
138 }
139}
a558cd02
JS
140
141static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
142{
143 cycle_t cycle_now, delta;
144
145 /* read clocksource */
146 cycle_now = tkr->read(tkr->clock);
147
148 /* calculate the delta since the last update_wall_time */
149 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
150
057b87e3
JS
151 /*
152 * Try to catch underflows by checking if we are seeing small
153 * mask-relative negative values.
154 */
155 if (unlikely((~delta & tkr->mask) < (tkr->mask >> 3)))
156 delta = 0;
157
a558cd02
JS
158 /* Cap delta value to the max_cycles values to avoid mult overflows */
159 if (unlikely(delta > tkr->clock->max_cycles))
160 delta = tkr->clock->max_cycles;
161
162 return delta;
163}
3c17ad19
JS
164#else
165static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
166{
167}
a558cd02
JS
168static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
169{
170 cycle_t cycle_now, delta;
171
172 /* read clocksource */
173 cycle_now = tkr->read(tkr->clock);
174
175 /* calculate the delta since the last update_wall_time */
176 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
177
178 return delta;
179}
3c17ad19
JS
180#endif
181
155ec602 182/**
d26e4fe0 183 * tk_setup_internals - Set up internals to use clocksource clock.
155ec602 184 *
d26e4fe0 185 * @tk: The target timekeeper to setup.
155ec602
MS
186 * @clock: Pointer to clocksource.
187 *
188 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
189 * pair and interval request.
190 *
191 * Unless you're the timekeeping code, you should not be using this!
192 */
f726a697 193static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
155ec602
MS
194{
195 cycle_t interval;
a386b5af 196 u64 tmp, ntpinterval;
1e75fa8b 197 struct clocksource *old_clock;
155ec602 198
d28ede83
TG
199 old_clock = tk->tkr.clock;
200 tk->tkr.clock = clock;
201 tk->tkr.read = clock->read;
202 tk->tkr.mask = clock->mask;
203 tk->tkr.cycle_last = tk->tkr.read(clock);
155ec602
MS
204
205 /* Do the ns -> cycle conversion first, using original mult */
206 tmp = NTP_INTERVAL_LENGTH;
207 tmp <<= clock->shift;
a386b5af 208 ntpinterval = tmp;
0a544198
MS
209 tmp += clock->mult/2;
210 do_div(tmp, clock->mult);
155ec602
MS
211 if (tmp == 0)
212 tmp = 1;
213
214 interval = (cycle_t) tmp;
f726a697 215 tk->cycle_interval = interval;
155ec602
MS
216
217 /* Go back from cycles -> shifted ns */
f726a697
JS
218 tk->xtime_interval = (u64) interval * clock->mult;
219 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
220 tk->raw_interval =
0a544198 221 ((u64) interval * clock->mult) >> clock->shift;
155ec602 222
1e75fa8b
JS
223 /* if changing clocks, convert xtime_nsec shift units */
224 if (old_clock) {
225 int shift_change = clock->shift - old_clock->shift;
226 if (shift_change < 0)
d28ede83 227 tk->tkr.xtime_nsec >>= -shift_change;
1e75fa8b 228 else
d28ede83 229 tk->tkr.xtime_nsec <<= shift_change;
1e75fa8b 230 }
d28ede83 231 tk->tkr.shift = clock->shift;
155ec602 232
f726a697
JS
233 tk->ntp_error = 0;
234 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
375f45b5 235 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
0a544198
MS
236
237 /*
238 * The timekeeper keeps its own mult values for the currently
239 * active clocksource. These value will be adjusted via NTP
240 * to counteract clock drifting.
241 */
d28ede83 242 tk->tkr.mult = clock->mult;
dc491596 243 tk->ntp_err_mult = 0;
155ec602 244}
8524070b 245
2ba2a305 246/* Timekeeper helper functions. */
7b1f6207
SW
247
248#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
e06fde37
TG
249static u32 default_arch_gettimeoffset(void) { return 0; }
250u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
7b1f6207 251#else
e06fde37 252static inline u32 arch_gettimeoffset(void) { return 0; }
7b1f6207
SW
253#endif
254
0e5ac3a8 255static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
2ba2a305 256{
a558cd02 257 cycle_t delta;
1e75fa8b 258 s64 nsec;
2ba2a305 259
a558cd02 260 delta = timekeeping_get_delta(tkr);
2ba2a305 261
0e5ac3a8
TG
262 nsec = delta * tkr->mult + tkr->xtime_nsec;
263 nsec >>= tkr->shift;
f2a5a085 264
7b1f6207 265 /* If arch requires, add in get_arch_timeoffset() */
e06fde37 266 return nsec + arch_gettimeoffset();
2ba2a305
MS
267}
268
f726a697 269static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
2ba2a305 270{
d28ede83 271 struct clocksource *clock = tk->tkr.clock;
a558cd02 272 cycle_t delta;
f2a5a085 273 s64 nsec;
2ba2a305 274
a558cd02 275 delta = timekeeping_get_delta(&tk->tkr);
2ba2a305 276
f2a5a085 277 /* convert delta to nanoseconds. */
3a978377 278 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
f2a5a085 279
7b1f6207 280 /* If arch requires, add in get_arch_timeoffset() */
e06fde37 281 return nsec + arch_gettimeoffset();
2ba2a305
MS
282}
283
4396e058
TG
284/**
285 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
affe3e85 286 * @tkr: Timekeeping readout base from which we take the update
4396e058
TG
287 *
288 * We want to use this from any context including NMI and tracing /
289 * instrumenting the timekeeping code itself.
290 *
291 * So we handle this differently than the other timekeeping accessor
292 * functions which retry when the sequence count has changed. The
293 * update side does:
294 *
295 * smp_wmb(); <- Ensure that the last base[1] update is visible
296 * tkf->seq++;
297 * smp_wmb(); <- Ensure that the seqcount update is visible
affe3e85 298 * update(tkf->base[0], tkr);
4396e058
TG
299 * smp_wmb(); <- Ensure that the base[0] update is visible
300 * tkf->seq++;
301 * smp_wmb(); <- Ensure that the seqcount update is visible
affe3e85 302 * update(tkf->base[1], tkr);
4396e058
TG
303 *
304 * The reader side does:
305 *
306 * do {
307 * seq = tkf->seq;
308 * smp_rmb();
309 * idx = seq & 0x01;
310 * now = now(tkf->base[idx]);
311 * smp_rmb();
312 * } while (seq != tkf->seq)
313 *
314 * As long as we update base[0] readers are forced off to
315 * base[1]. Once base[0] is updated readers are redirected to base[0]
316 * and the base[1] update takes place.
317 *
318 * So if a NMI hits the update of base[0] then it will use base[1]
319 * which is still consistent. In the worst case this can result is a
320 * slightly wrong timestamp (a few nanoseconds). See
321 * @ktime_get_mono_fast_ns.
322 */
affe3e85 323static void update_fast_timekeeper(struct tk_read_base *tkr)
4396e058
TG
324{
325 struct tk_read_base *base = tk_fast_mono.base;
326
327 /* Force readers off to base[1] */
328 raw_write_seqcount_latch(&tk_fast_mono.seq);
329
330 /* Update base[0] */
affe3e85 331 memcpy(base, tkr, sizeof(*base));
4396e058
TG
332
333 /* Force readers back to base[0] */
334 raw_write_seqcount_latch(&tk_fast_mono.seq);
335
336 /* Update base[1] */
337 memcpy(base + 1, base, sizeof(*base));
338}
339
340/**
341 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
342 *
343 * This timestamp is not guaranteed to be monotonic across an update.
344 * The timestamp is calculated by:
345 *
346 * now = base_mono + clock_delta * slope
347 *
348 * So if the update lowers the slope, readers who are forced to the
349 * not yet updated second array are still using the old steeper slope.
350 *
351 * tmono
352 * ^
353 * | o n
354 * | o n
355 * | u
356 * | o
357 * |o
358 * |12345678---> reader order
359 *
360 * o = old slope
361 * u = update
362 * n = new slope
363 *
364 * So reader 6 will observe time going backwards versus reader 5.
365 *
366 * While other CPUs are likely to be able observe that, the only way
367 * for a CPU local observation is when an NMI hits in the middle of
368 * the update. Timestamps taken from that NMI context might be ahead
369 * of the following timestamps. Callers need to be aware of that and
370 * deal with it.
371 */
372u64 notrace ktime_get_mono_fast_ns(void)
373{
374 struct tk_read_base *tkr;
375 unsigned int seq;
376 u64 now;
377
378 do {
379 seq = raw_read_seqcount(&tk_fast_mono.seq);
380 tkr = tk_fast_mono.base + (seq & 0x01);
381 now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr);
382
383 } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
384 return now;
385}
386EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
387
060407ae
RW
388/* Suspend-time cycles value for halted fast timekeeper. */
389static cycle_t cycles_at_suspend;
390
391static cycle_t dummy_clock_read(struct clocksource *cs)
392{
393 return cycles_at_suspend;
394}
395
396/**
397 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
398 * @tk: Timekeeper to snapshot.
399 *
400 * It generally is unsafe to access the clocksource after timekeeping has been
401 * suspended, so take a snapshot of the readout base of @tk and use it as the
402 * fast timekeeper's readout base while suspended. It will return the same
403 * number of cycles every time until timekeeping is resumed at which time the
404 * proper readout base for the fast timekeeper will be restored automatically.
405 */
406static void halt_fast_timekeeper(struct timekeeper *tk)
407{
408 static struct tk_read_base tkr_dummy;
409 struct tk_read_base *tkr = &tk->tkr;
410
411 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
412 cycles_at_suspend = tkr->read(tkr->clock);
413 tkr_dummy.read = dummy_clock_read;
414 update_fast_timekeeper(&tkr_dummy);
415}
416
c905fae4
TG
417#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
418
419static inline void update_vsyscall(struct timekeeper *tk)
420{
0680eb1f 421 struct timespec xt, wm;
c905fae4 422
e2dff1ec 423 xt = timespec64_to_timespec(tk_xtime(tk));
0680eb1f
JS
424 wm = timespec64_to_timespec(tk->wall_to_monotonic);
425 update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult,
d28ede83 426 tk->tkr.cycle_last);
c905fae4
TG
427}
428
429static inline void old_vsyscall_fixup(struct timekeeper *tk)
430{
431 s64 remainder;
432
433 /*
434 * Store only full nanoseconds into xtime_nsec after rounding
435 * it up and add the remainder to the error difference.
436 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
437 * by truncating the remainder in vsyscalls. However, it causes
438 * additional work to be done in timekeeping_adjust(). Once
439 * the vsyscall implementations are converted to use xtime_nsec
440 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
441 * users are removed, this can be killed.
442 */
d28ede83
TG
443 remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
444 tk->tkr.xtime_nsec -= remainder;
445 tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
c905fae4 446 tk->ntp_error += remainder << tk->ntp_error_shift;
d28ede83 447 tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
c905fae4
TG
448}
449#else
450#define old_vsyscall_fixup(tk)
451#endif
452
e0b306fe
MT
453static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
454
780427f0 455static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
e0b306fe 456{
780427f0 457 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
e0b306fe
MT
458}
459
460/**
461 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
e0b306fe
MT
462 */
463int pvclock_gtod_register_notifier(struct notifier_block *nb)
464{
3fdb14fd 465 struct timekeeper *tk = &tk_core.timekeeper;
e0b306fe
MT
466 unsigned long flags;
467 int ret;
468
9a7a71b1 469 raw_spin_lock_irqsave(&timekeeper_lock, flags);
e0b306fe 470 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
780427f0 471 update_pvclock_gtod(tk, true);
9a7a71b1 472 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
e0b306fe
MT
473
474 return ret;
475}
476EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
477
478/**
479 * pvclock_gtod_unregister_notifier - unregister a pvclock
480 * timedata update listener
e0b306fe
MT
481 */
482int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
483{
e0b306fe
MT
484 unsigned long flags;
485 int ret;
486
9a7a71b1 487 raw_spin_lock_irqsave(&timekeeper_lock, flags);
e0b306fe 488 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
9a7a71b1 489 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
e0b306fe
MT
490
491 return ret;
492}
493EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
494
7c032df5
TG
495/*
496 * Update the ktime_t based scalar nsec members of the timekeeper
497 */
498static inline void tk_update_ktime_data(struct timekeeper *tk)
499{
9e3680b1
HS
500 u64 seconds;
501 u32 nsec;
7c032df5
TG
502
503 /*
504 * The xtime based monotonic readout is:
505 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
506 * The ktime based monotonic readout is:
507 * nsec = base_mono + now();
508 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
509 */
9e3680b1
HS
510 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
511 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
512 tk->tkr.base_mono = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
f519b1a2
TG
513
514 /* Update the monotonic raw base */
515 tk->base_raw = timespec64_to_ktime(tk->raw_time);
9e3680b1
HS
516
517 /*
518 * The sum of the nanoseconds portions of xtime and
519 * wall_to_monotonic can be greater/equal one second. Take
520 * this into account before updating tk->ktime_sec.
521 */
522 nsec += (u32)(tk->tkr.xtime_nsec >> tk->tkr.shift);
523 if (nsec >= NSEC_PER_SEC)
524 seconds++;
525 tk->ktime_sec = seconds;
7c032df5
TG
526}
527
9a7a71b1 528/* must hold timekeeper_lock */
04397fe9 529static void timekeeping_update(struct timekeeper *tk, unsigned int action)
cc06268c 530{
04397fe9 531 if (action & TK_CLEAR_NTP) {
f726a697 532 tk->ntp_error = 0;
cc06268c
TG
533 ntp_clear();
534 }
48cdc135 535
7c032df5
TG
536 tk_update_ktime_data(tk);
537
9bf2419f
TG
538 update_vsyscall(tk);
539 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
540
04397fe9 541 if (action & TK_MIRROR)
3fdb14fd
TG
542 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
543 sizeof(tk_core.timekeeper));
4396e058 544
affe3e85 545 update_fast_timekeeper(&tk->tkr);
cc06268c
TG
546}
547
8524070b 548/**
155ec602 549 * timekeeping_forward_now - update clock to the current time
8524070b 550 *
9a055117
RZ
551 * Forward the current clock to update its state since the last call to
552 * update_wall_time(). This is useful before significant clock changes,
553 * as it avoids having to deal with this time offset explicitly.
8524070b 554 */
f726a697 555static void timekeeping_forward_now(struct timekeeper *tk)
8524070b 556{
d28ede83 557 struct clocksource *clock = tk->tkr.clock;
3a978377 558 cycle_t cycle_now, delta;
9a055117 559 s64 nsec;
8524070b 560
d28ede83
TG
561 cycle_now = tk->tkr.read(clock);
562 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
563 tk->tkr.cycle_last = cycle_now;
8524070b 564
d28ede83 565 tk->tkr.xtime_nsec += delta * tk->tkr.mult;
7d27558c 566
7b1f6207 567 /* If arch requires, add in get_arch_timeoffset() */
d28ede83 568 tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
7d27558c 569
f726a697 570 tk_normalize_xtime(tk);
2d42244a 571
3a978377 572 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
7d489d15 573 timespec64_add_ns(&tk->raw_time, nsec);
8524070b 574}
575
576/**
d6d29896 577 * __getnstimeofday64 - Returns the time of day in a timespec64.
8524070b 578 * @ts: pointer to the timespec to be set
579 *
1e817fb6
KC
580 * Updates the time of day in the timespec.
581 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
8524070b 582 */
d6d29896 583int __getnstimeofday64(struct timespec64 *ts)
8524070b 584{
3fdb14fd 585 struct timekeeper *tk = &tk_core.timekeeper;
8524070b 586 unsigned long seq;
1e75fa8b 587 s64 nsecs = 0;
8524070b 588
589 do {
3fdb14fd 590 seq = read_seqcount_begin(&tk_core.seq);
8524070b 591
4e250fdd 592 ts->tv_sec = tk->xtime_sec;
0e5ac3a8 593 nsecs = timekeeping_get_ns(&tk->tkr);
8524070b 594
3fdb14fd 595 } while (read_seqcount_retry(&tk_core.seq, seq));
8524070b 596
ec145bab 597 ts->tv_nsec = 0;
d6d29896 598 timespec64_add_ns(ts, nsecs);
1e817fb6
KC
599
600 /*
601 * Do not bail out early, in case there were callers still using
602 * the value, even in the face of the WARN_ON.
603 */
604 if (unlikely(timekeeping_suspended))
605 return -EAGAIN;
606 return 0;
607}
d6d29896 608EXPORT_SYMBOL(__getnstimeofday64);
1e817fb6
KC
609
610/**
d6d29896 611 * getnstimeofday64 - Returns the time of day in a timespec64.
5322e4c2 612 * @ts: pointer to the timespec64 to be set
1e817fb6 613 *
5322e4c2 614 * Returns the time of day in a timespec64 (WARN if suspended).
1e817fb6 615 */
d6d29896 616void getnstimeofday64(struct timespec64 *ts)
1e817fb6 617{
d6d29896 618 WARN_ON(__getnstimeofday64(ts));
8524070b 619}
d6d29896 620EXPORT_SYMBOL(getnstimeofday64);
8524070b 621
951ed4d3
MS
622ktime_t ktime_get(void)
623{
3fdb14fd 624 struct timekeeper *tk = &tk_core.timekeeper;
951ed4d3 625 unsigned int seq;
a016a5bd
TG
626 ktime_t base;
627 s64 nsecs;
951ed4d3
MS
628
629 WARN_ON(timekeeping_suspended);
630
631 do {
3fdb14fd 632 seq = read_seqcount_begin(&tk_core.seq);
d28ede83 633 base = tk->tkr.base_mono;
0e5ac3a8 634 nsecs = timekeeping_get_ns(&tk->tkr);
951ed4d3 635
3fdb14fd 636 } while (read_seqcount_retry(&tk_core.seq, seq));
24e4a8c3 637
a016a5bd 638 return ktime_add_ns(base, nsecs);
951ed4d3
MS
639}
640EXPORT_SYMBOL_GPL(ktime_get);
641
0077dc60
TG
642static ktime_t *offsets[TK_OFFS_MAX] = {
643 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
644 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
645 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
646};
647
648ktime_t ktime_get_with_offset(enum tk_offsets offs)
649{
650 struct timekeeper *tk = &tk_core.timekeeper;
651 unsigned int seq;
652 ktime_t base, *offset = offsets[offs];
653 s64 nsecs;
654
655 WARN_ON(timekeeping_suspended);
656
657 do {
658 seq = read_seqcount_begin(&tk_core.seq);
d28ede83 659 base = ktime_add(tk->tkr.base_mono, *offset);
0e5ac3a8 660 nsecs = timekeeping_get_ns(&tk->tkr);
0077dc60
TG
661
662 } while (read_seqcount_retry(&tk_core.seq, seq));
663
664 return ktime_add_ns(base, nsecs);
665
666}
667EXPORT_SYMBOL_GPL(ktime_get_with_offset);
668
9a6b5197
TG
669/**
670 * ktime_mono_to_any() - convert mononotic time to any other time
671 * @tmono: time to convert.
672 * @offs: which offset to use
673 */
674ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
675{
676 ktime_t *offset = offsets[offs];
677 unsigned long seq;
678 ktime_t tconv;
679
680 do {
681 seq = read_seqcount_begin(&tk_core.seq);
682 tconv = ktime_add(tmono, *offset);
683 } while (read_seqcount_retry(&tk_core.seq, seq));
684
685 return tconv;
686}
687EXPORT_SYMBOL_GPL(ktime_mono_to_any);
688
f519b1a2
TG
689/**
690 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
691 */
692ktime_t ktime_get_raw(void)
693{
694 struct timekeeper *tk = &tk_core.timekeeper;
695 unsigned int seq;
696 ktime_t base;
697 s64 nsecs;
698
699 do {
700 seq = read_seqcount_begin(&tk_core.seq);
701 base = tk->base_raw;
702 nsecs = timekeeping_get_ns_raw(tk);
703
704 } while (read_seqcount_retry(&tk_core.seq, seq));
705
706 return ktime_add_ns(base, nsecs);
707}
708EXPORT_SYMBOL_GPL(ktime_get_raw);
709
951ed4d3 710/**
d6d29896 711 * ktime_get_ts64 - get the monotonic clock in timespec64 format
951ed4d3
MS
712 * @ts: pointer to timespec variable
713 *
714 * The function calculates the monotonic clock from the realtime
715 * clock and the wall_to_monotonic offset and stores the result
5322e4c2 716 * in normalized timespec64 format in the variable pointed to by @ts.
951ed4d3 717 */
d6d29896 718void ktime_get_ts64(struct timespec64 *ts)
951ed4d3 719{
3fdb14fd 720 struct timekeeper *tk = &tk_core.timekeeper;
d6d29896 721 struct timespec64 tomono;
ec145bab 722 s64 nsec;
951ed4d3 723 unsigned int seq;
951ed4d3
MS
724
725 WARN_ON(timekeeping_suspended);
726
727 do {
3fdb14fd 728 seq = read_seqcount_begin(&tk_core.seq);
d6d29896 729 ts->tv_sec = tk->xtime_sec;
0e5ac3a8 730 nsec = timekeeping_get_ns(&tk->tkr);
4e250fdd 731 tomono = tk->wall_to_monotonic;
951ed4d3 732
3fdb14fd 733 } while (read_seqcount_retry(&tk_core.seq, seq));
951ed4d3 734
d6d29896
TG
735 ts->tv_sec += tomono.tv_sec;
736 ts->tv_nsec = 0;
737 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
951ed4d3 738}
d6d29896 739EXPORT_SYMBOL_GPL(ktime_get_ts64);
951ed4d3 740
9e3680b1
HS
741/**
742 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
743 *
744 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
745 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
746 * works on both 32 and 64 bit systems. On 32 bit systems the readout
747 * covers ~136 years of uptime which should be enough to prevent
748 * premature wrap arounds.
749 */
750time64_t ktime_get_seconds(void)
751{
752 struct timekeeper *tk = &tk_core.timekeeper;
753
754 WARN_ON(timekeeping_suspended);
755 return tk->ktime_sec;
756}
757EXPORT_SYMBOL_GPL(ktime_get_seconds);
758
dbe7aa62
HS
759/**
760 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
761 *
762 * Returns the wall clock seconds since 1970. This replaces the
763 * get_seconds() interface which is not y2038 safe on 32bit systems.
764 *
765 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
766 * 32bit systems the access must be protected with the sequence
767 * counter to provide "atomic" access to the 64bit tk->xtime_sec
768 * value.
769 */
770time64_t ktime_get_real_seconds(void)
771{
772 struct timekeeper *tk = &tk_core.timekeeper;
773 time64_t seconds;
774 unsigned int seq;
775
776 if (IS_ENABLED(CONFIG_64BIT))
777 return tk->xtime_sec;
778
779 do {
780 seq = read_seqcount_begin(&tk_core.seq);
781 seconds = tk->xtime_sec;
782
783 } while (read_seqcount_retry(&tk_core.seq, seq));
784
785 return seconds;
786}
787EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
788
e2c18e49
AG
789#ifdef CONFIG_NTP_PPS
790
791/**
792 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
793 * @ts_raw: pointer to the timespec to be set to raw monotonic time
794 * @ts_real: pointer to the timespec to be set to the time of day
795 *
796 * This function reads both the time of day and raw monotonic time at the
797 * same time atomically and stores the resulting timestamps in timespec
798 * format.
799 */
800void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
801{
3fdb14fd 802 struct timekeeper *tk = &tk_core.timekeeper;
e2c18e49
AG
803 unsigned long seq;
804 s64 nsecs_raw, nsecs_real;
805
806 WARN_ON_ONCE(timekeeping_suspended);
807
808 do {
3fdb14fd 809 seq = read_seqcount_begin(&tk_core.seq);
e2c18e49 810
7d489d15 811 *ts_raw = timespec64_to_timespec(tk->raw_time);
4e250fdd 812 ts_real->tv_sec = tk->xtime_sec;
1e75fa8b 813 ts_real->tv_nsec = 0;
e2c18e49 814
4e250fdd 815 nsecs_raw = timekeeping_get_ns_raw(tk);
0e5ac3a8 816 nsecs_real = timekeeping_get_ns(&tk->tkr);
e2c18e49 817
3fdb14fd 818 } while (read_seqcount_retry(&tk_core.seq, seq));
e2c18e49
AG
819
820 timespec_add_ns(ts_raw, nsecs_raw);
821 timespec_add_ns(ts_real, nsecs_real);
822}
823EXPORT_SYMBOL(getnstime_raw_and_real);
824
825#endif /* CONFIG_NTP_PPS */
826
8524070b 827/**
828 * do_gettimeofday - Returns the time of day in a timeval
829 * @tv: pointer to the timeval to be set
830 *
efd9ac86 831 * NOTE: Users should be converted to using getnstimeofday()
8524070b 832 */
833void do_gettimeofday(struct timeval *tv)
834{
d6d29896 835 struct timespec64 now;
8524070b 836
d6d29896 837 getnstimeofday64(&now);
8524070b 838 tv->tv_sec = now.tv_sec;
839 tv->tv_usec = now.tv_nsec/1000;
840}
8524070b 841EXPORT_SYMBOL(do_gettimeofday);
d239f49d 842
8524070b 843/**
21f7eca5 844 * do_settimeofday64 - Sets the time of day.
845 * @ts: pointer to the timespec64 variable containing the new time
8524070b 846 *
847 * Sets the time of day to the new time and update NTP and notify hrtimers
848 */
21f7eca5 849int do_settimeofday64(const struct timespec64 *ts)
8524070b 850{
3fdb14fd 851 struct timekeeper *tk = &tk_core.timekeeper;
21f7eca5 852 struct timespec64 ts_delta, xt;
92c1d3ed 853 unsigned long flags;
8524070b 854
21f7eca5 855 if (!timespec64_valid_strict(ts))
8524070b 856 return -EINVAL;
857
9a7a71b1 858 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 859 write_seqcount_begin(&tk_core.seq);
8524070b 860
4e250fdd 861 timekeeping_forward_now(tk);
9a055117 862
4e250fdd 863 xt = tk_xtime(tk);
21f7eca5 864 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
865 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1e75fa8b 866
7d489d15 867 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
8524070b 868
21f7eca5 869 tk_set_xtime(tk, ts);
1e75fa8b 870
780427f0 871 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
8524070b 872
3fdb14fd 873 write_seqcount_end(&tk_core.seq);
9a7a71b1 874 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b 875
876 /* signal hrtimers about time change */
877 clock_was_set();
878
879 return 0;
880}
21f7eca5 881EXPORT_SYMBOL(do_settimeofday64);
8524070b 882
c528f7c6
JS
883/**
884 * timekeeping_inject_offset - Adds or subtracts from the current time.
885 * @tv: pointer to the timespec variable containing the offset
886 *
887 * Adds or subtracts an offset value from the current time.
888 */
889int timekeeping_inject_offset(struct timespec *ts)
890{
3fdb14fd 891 struct timekeeper *tk = &tk_core.timekeeper;
92c1d3ed 892 unsigned long flags;
7d489d15 893 struct timespec64 ts64, tmp;
4e8b1452 894 int ret = 0;
c528f7c6
JS
895
896 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
897 return -EINVAL;
898
7d489d15
JS
899 ts64 = timespec_to_timespec64(*ts);
900
9a7a71b1 901 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 902 write_seqcount_begin(&tk_core.seq);
c528f7c6 903
4e250fdd 904 timekeeping_forward_now(tk);
c528f7c6 905
4e8b1452 906 /* Make sure the proposed value is valid */
7d489d15
JS
907 tmp = timespec64_add(tk_xtime(tk), ts64);
908 if (!timespec64_valid_strict(&tmp)) {
4e8b1452
JS
909 ret = -EINVAL;
910 goto error;
911 }
1e75fa8b 912
7d489d15
JS
913 tk_xtime_add(tk, &ts64);
914 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
c528f7c6 915
4e8b1452 916error: /* even if we error out, we forwarded the time, so call update */
780427f0 917 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
c528f7c6 918
3fdb14fd 919 write_seqcount_end(&tk_core.seq);
9a7a71b1 920 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
c528f7c6
JS
921
922 /* signal hrtimers about time change */
923 clock_was_set();
924
4e8b1452 925 return ret;
c528f7c6
JS
926}
927EXPORT_SYMBOL(timekeeping_inject_offset);
928
cc244dda
JS
929
930/**
931 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
932 *
933 */
934s32 timekeeping_get_tai_offset(void)
935{
3fdb14fd 936 struct timekeeper *tk = &tk_core.timekeeper;
cc244dda
JS
937 unsigned int seq;
938 s32 ret;
939
940 do {
3fdb14fd 941 seq = read_seqcount_begin(&tk_core.seq);
cc244dda 942 ret = tk->tai_offset;
3fdb14fd 943 } while (read_seqcount_retry(&tk_core.seq, seq));
cc244dda
JS
944
945 return ret;
946}
947
948/**
949 * __timekeeping_set_tai_offset - Lock free worker function
950 *
951 */
dd5d70e8 952static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
cc244dda
JS
953{
954 tk->tai_offset = tai_offset;
04005f60 955 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
cc244dda
JS
956}
957
958/**
959 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
960 *
961 */
962void timekeeping_set_tai_offset(s32 tai_offset)
963{
3fdb14fd 964 struct timekeeper *tk = &tk_core.timekeeper;
cc244dda
JS
965 unsigned long flags;
966
9a7a71b1 967 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 968 write_seqcount_begin(&tk_core.seq);
cc244dda 969 __timekeeping_set_tai_offset(tk, tai_offset);
f55c0760 970 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
3fdb14fd 971 write_seqcount_end(&tk_core.seq);
9a7a71b1 972 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
4e8f8b34 973 clock_was_set();
cc244dda
JS
974}
975
8524070b 976/**
977 * change_clocksource - Swaps clocksources if a new one is available
978 *
979 * Accumulates current time interval and initializes new clocksource
980 */
75c5158f 981static int change_clocksource(void *data)
8524070b 982{
3fdb14fd 983 struct timekeeper *tk = &tk_core.timekeeper;
4614e6ad 984 struct clocksource *new, *old;
f695cf94 985 unsigned long flags;
8524070b 986
75c5158f 987 new = (struct clocksource *) data;
8524070b 988
9a7a71b1 989 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 990 write_seqcount_begin(&tk_core.seq);
f695cf94 991
4e250fdd 992 timekeeping_forward_now(tk);
09ac369c
TG
993 /*
994 * If the cs is in module, get a module reference. Succeeds
995 * for built-in code (owner == NULL) as well.
996 */
997 if (try_module_get(new->owner)) {
998 if (!new->enable || new->enable(new) == 0) {
d28ede83 999 old = tk->tkr.clock;
09ac369c
TG
1000 tk_setup_internals(tk, new);
1001 if (old->disable)
1002 old->disable(old);
1003 module_put(old->owner);
1004 } else {
1005 module_put(new->owner);
1006 }
75c5158f 1007 }
780427f0 1008 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
f695cf94 1009
3fdb14fd 1010 write_seqcount_end(&tk_core.seq);
9a7a71b1 1011 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
f695cf94 1012
75c5158f
MS
1013 return 0;
1014}
8524070b 1015
75c5158f
MS
1016/**
1017 * timekeeping_notify - Install a new clock source
1018 * @clock: pointer to the clock source
1019 *
1020 * This function is called from clocksource.c after a new, better clock
1021 * source has been registered. The caller holds the clocksource_mutex.
1022 */
ba919d1c 1023int timekeeping_notify(struct clocksource *clock)
75c5158f 1024{
3fdb14fd 1025 struct timekeeper *tk = &tk_core.timekeeper;
4e250fdd 1026
d28ede83 1027 if (tk->tkr.clock == clock)
ba919d1c 1028 return 0;
75c5158f 1029 stop_machine(change_clocksource, clock, NULL);
8524070b 1030 tick_clock_notify();
d28ede83 1031 return tk->tkr.clock == clock ? 0 : -1;
8524070b 1032}
75c5158f 1033
2d42244a 1034/**
cdba2ec5
JS
1035 * getrawmonotonic64 - Returns the raw monotonic time in a timespec
1036 * @ts: pointer to the timespec64 to be set
2d42244a
JS
1037 *
1038 * Returns the raw monotonic time (completely un-modified by ntp)
1039 */
cdba2ec5 1040void getrawmonotonic64(struct timespec64 *ts)
2d42244a 1041{
3fdb14fd 1042 struct timekeeper *tk = &tk_core.timekeeper;
7d489d15 1043 struct timespec64 ts64;
2d42244a
JS
1044 unsigned long seq;
1045 s64 nsecs;
2d42244a
JS
1046
1047 do {
3fdb14fd 1048 seq = read_seqcount_begin(&tk_core.seq);
4e250fdd 1049 nsecs = timekeeping_get_ns_raw(tk);
7d489d15 1050 ts64 = tk->raw_time;
2d42244a 1051
3fdb14fd 1052 } while (read_seqcount_retry(&tk_core.seq, seq));
2d42244a 1053
7d489d15 1054 timespec64_add_ns(&ts64, nsecs);
cdba2ec5 1055 *ts = ts64;
2d42244a 1056}
cdba2ec5
JS
1057EXPORT_SYMBOL(getrawmonotonic64);
1058
2d42244a 1059
8524070b 1060/**
cf4fc6cb 1061 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
8524070b 1062 */
cf4fc6cb 1063int timekeeping_valid_for_hres(void)
8524070b 1064{
3fdb14fd 1065 struct timekeeper *tk = &tk_core.timekeeper;
8524070b 1066 unsigned long seq;
1067 int ret;
1068
1069 do {
3fdb14fd 1070 seq = read_seqcount_begin(&tk_core.seq);
8524070b 1071
d28ede83 1072 ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
8524070b 1073
3fdb14fd 1074 } while (read_seqcount_retry(&tk_core.seq, seq));
8524070b 1075
1076 return ret;
1077}
1078
98962465
JH
1079/**
1080 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
98962465
JH
1081 */
1082u64 timekeeping_max_deferment(void)
1083{
3fdb14fd 1084 struct timekeeper *tk = &tk_core.timekeeper;
70471f2f
JS
1085 unsigned long seq;
1086 u64 ret;
42e71e81 1087
70471f2f 1088 do {
3fdb14fd 1089 seq = read_seqcount_begin(&tk_core.seq);
70471f2f 1090
d28ede83 1091 ret = tk->tkr.clock->max_idle_ns;
70471f2f 1092
3fdb14fd 1093 } while (read_seqcount_retry(&tk_core.seq, seq));
70471f2f
JS
1094
1095 return ret;
98962465
JH
1096}
1097
8524070b 1098/**
d4f587c6 1099 * read_persistent_clock - Return time from the persistent clock.
8524070b 1100 *
1101 * Weak dummy function for arches that do not yet support it.
d4f587c6
MS
1102 * Reads the time from the battery backed persistent clock.
1103 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
8524070b 1104 *
1105 * XXX - Do be sure to remove it once all arches implement it.
1106 */
52f5684c 1107void __weak read_persistent_clock(struct timespec *ts)
8524070b 1108{
d4f587c6
MS
1109 ts->tv_sec = 0;
1110 ts->tv_nsec = 0;
8524070b 1111}
1112
23970e38
MS
1113/**
1114 * read_boot_clock - Return time of the system start.
1115 *
1116 * Weak dummy function for arches that do not yet support it.
1117 * Function to read the exact time the system has been started.
1118 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1119 *
1120 * XXX - Do be sure to remove it once all arches implement it.
1121 */
52f5684c 1122void __weak read_boot_clock(struct timespec *ts)
23970e38
MS
1123{
1124 ts->tv_sec = 0;
1125 ts->tv_nsec = 0;
1126}
1127
8524070b 1128/*
1129 * timekeeping_init - Initializes the clocksource and common timekeeping values
1130 */
1131void __init timekeeping_init(void)
1132{
3fdb14fd 1133 struct timekeeper *tk = &tk_core.timekeeper;
155ec602 1134 struct clocksource *clock;
8524070b 1135 unsigned long flags;
7d489d15
JS
1136 struct timespec64 now, boot, tmp;
1137 struct timespec ts;
31ade306 1138
7d489d15
JS
1139 read_persistent_clock(&ts);
1140 now = timespec_to_timespec64(ts);
1141 if (!timespec64_valid_strict(&now)) {
4e8b1452
JS
1142 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1143 " Check your CMOS/BIOS settings.\n");
1144 now.tv_sec = 0;
1145 now.tv_nsec = 0;
31ade306
FT
1146 } else if (now.tv_sec || now.tv_nsec)
1147 persistent_clock_exist = true;
4e8b1452 1148
7d489d15
JS
1149 read_boot_clock(&ts);
1150 boot = timespec_to_timespec64(ts);
1151 if (!timespec64_valid_strict(&boot)) {
4e8b1452
JS
1152 pr_warn("WARNING: Boot clock returned invalid value!\n"
1153 " Check your CMOS/BIOS settings.\n");
1154 boot.tv_sec = 0;
1155 boot.tv_nsec = 0;
1156 }
8524070b 1157
9a7a71b1 1158 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 1159 write_seqcount_begin(&tk_core.seq);
06c017fd
JS
1160 ntp_init();
1161
f1b82746 1162 clock = clocksource_default_clock();
a0f7d48b
MS
1163 if (clock->enable)
1164 clock->enable(clock);
4e250fdd 1165 tk_setup_internals(tk, clock);
8524070b 1166
4e250fdd
JS
1167 tk_set_xtime(tk, &now);
1168 tk->raw_time.tv_sec = 0;
1169 tk->raw_time.tv_nsec = 0;
f519b1a2 1170 tk->base_raw.tv64 = 0;
1e75fa8b 1171 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
4e250fdd 1172 boot = tk_xtime(tk);
1e75fa8b 1173
7d489d15 1174 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
4e250fdd 1175 tk_set_wall_to_mono(tk, tmp);
6d0ef903 1176
f111adfd 1177 timekeeping_update(tk, TK_MIRROR);
48cdc135 1178
3fdb14fd 1179 write_seqcount_end(&tk_core.seq);
9a7a71b1 1180 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b 1181}
1182
8524070b 1183/* time in seconds when suspend began */
7d489d15 1184static struct timespec64 timekeeping_suspend_time;
8524070b 1185
304529b1
JS
1186/**
1187 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1188 * @delta: pointer to a timespec delta value
1189 *
1190 * Takes a timespec offset measuring a suspend interval and properly
1191 * adds the sleep offset to the timekeeping variables.
1192 */
f726a697 1193static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
7d489d15 1194 struct timespec64 *delta)
304529b1 1195{
7d489d15 1196 if (!timespec64_valid_strict(delta)) {
6d9bcb62
JS
1197 printk_deferred(KERN_WARNING
1198 "__timekeeping_inject_sleeptime: Invalid "
1199 "sleep delta value!\n");
cb5de2f8
JS
1200 return;
1201 }
f726a697 1202 tk_xtime_add(tk, delta);
7d489d15 1203 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
47da70d3 1204 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
5c83545f 1205 tk_debug_account_sleep_time(delta);
304529b1
JS
1206}
1207
304529b1 1208/**
04d90890 1209 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1210 * @delta: pointer to a timespec64 delta value
304529b1
JS
1211 *
1212 * This hook is for architectures that cannot support read_persistent_clock
1213 * because their RTC/persistent clock is only accessible when irqs are enabled.
1214 *
1215 * This function should only be called by rtc_resume(), and allows
1216 * a suspend offset to be injected into the timekeeping values.
1217 */
04d90890 1218void timekeeping_inject_sleeptime64(struct timespec64 *delta)
304529b1 1219{
3fdb14fd 1220 struct timekeeper *tk = &tk_core.timekeeper;
92c1d3ed 1221 unsigned long flags;
304529b1 1222
31ade306
FT
1223 /*
1224 * Make sure we don't set the clock twice, as timekeeping_resume()
1225 * already did it
1226 */
1227 if (has_persistent_clock())
304529b1
JS
1228 return;
1229
9a7a71b1 1230 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 1231 write_seqcount_begin(&tk_core.seq);
70471f2f 1232
4e250fdd 1233 timekeeping_forward_now(tk);
304529b1 1234
04d90890 1235 __timekeeping_inject_sleeptime(tk, delta);
304529b1 1236
780427f0 1237 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
304529b1 1238
3fdb14fd 1239 write_seqcount_end(&tk_core.seq);
9a7a71b1 1240 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
304529b1
JS
1241
1242 /* signal hrtimers about time change */
1243 clock_was_set();
1244}
1245
8524070b 1246/**
1247 * timekeeping_resume - Resumes the generic timekeeping subsystem.
8524070b 1248 *
1249 * This is for the generic clocksource timekeeping.
1250 * xtime/wall_to_monotonic/jiffies/etc are
1251 * still managed by arch specific suspend/resume code.
1252 */
124cf911 1253void timekeeping_resume(void)
8524070b 1254{
3fdb14fd 1255 struct timekeeper *tk = &tk_core.timekeeper;
d28ede83 1256 struct clocksource *clock = tk->tkr.clock;
92c1d3ed 1257 unsigned long flags;
7d489d15
JS
1258 struct timespec64 ts_new, ts_delta;
1259 struct timespec tmp;
e445cf1c
FT
1260 cycle_t cycle_now, cycle_delta;
1261 bool suspendtime_found = false;
d4f587c6 1262
7d489d15
JS
1263 read_persistent_clock(&tmp);
1264 ts_new = timespec_to_timespec64(tmp);
8524070b 1265
adc78e6b 1266 clockevents_resume();
d10ff3fb
TG
1267 clocksource_resume();
1268
9a7a71b1 1269 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 1270 write_seqcount_begin(&tk_core.seq);
8524070b 1271
e445cf1c
FT
1272 /*
1273 * After system resumes, we need to calculate the suspended time and
1274 * compensate it for the OS time. There are 3 sources that could be
1275 * used: Nonstop clocksource during suspend, persistent clock and rtc
1276 * device.
1277 *
1278 * One specific platform may have 1 or 2 or all of them, and the
1279 * preference will be:
1280 * suspend-nonstop clocksource -> persistent clock -> rtc
1281 * The less preferred source will only be tried if there is no better
1282 * usable source. The rtc part is handled separately in rtc core code.
1283 */
d28ede83 1284 cycle_now = tk->tkr.read(clock);
e445cf1c 1285 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
d28ede83 1286 cycle_now > tk->tkr.cycle_last) {
e445cf1c
FT
1287 u64 num, max = ULLONG_MAX;
1288 u32 mult = clock->mult;
1289 u32 shift = clock->shift;
1290 s64 nsec = 0;
1291
d28ede83
TG
1292 cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
1293 tk->tkr.mask);
e445cf1c
FT
1294
1295 /*
1296 * "cycle_delta * mutl" may cause 64 bits overflow, if the
1297 * suspended time is too long. In that case we need do the
1298 * 64 bits math carefully
1299 */
1300 do_div(max, mult);
1301 if (cycle_delta > max) {
1302 num = div64_u64(cycle_delta, max);
1303 nsec = (((u64) max * mult) >> shift) * num;
1304 cycle_delta -= num * max;
1305 }
1306 nsec += ((u64) cycle_delta * mult) >> shift;
1307
7d489d15 1308 ts_delta = ns_to_timespec64(nsec);
e445cf1c 1309 suspendtime_found = true;
7d489d15
JS
1310 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1311 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
e445cf1c 1312 suspendtime_found = true;
8524070b 1313 }
e445cf1c
FT
1314
1315 if (suspendtime_found)
1316 __timekeeping_inject_sleeptime(tk, &ts_delta);
1317
1318 /* Re-base the last cycle value */
d28ede83 1319 tk->tkr.cycle_last = cycle_now;
4e250fdd 1320 tk->ntp_error = 0;
8524070b 1321 timekeeping_suspended = 0;
780427f0 1322 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
3fdb14fd 1323 write_seqcount_end(&tk_core.seq);
9a7a71b1 1324 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b 1325
1326 touch_softlockup_watchdog();
1327
1328 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
1329
1330 /* Resume hrtimers */
b12a03ce 1331 hrtimers_resume();
8524070b 1332}
1333
124cf911 1334int timekeeping_suspend(void)
8524070b 1335{
3fdb14fd 1336 struct timekeeper *tk = &tk_core.timekeeper;
92c1d3ed 1337 unsigned long flags;
7d489d15
JS
1338 struct timespec64 delta, delta_delta;
1339 static struct timespec64 old_delta;
1340 struct timespec tmp;
8524070b 1341
7d489d15
JS
1342 read_persistent_clock(&tmp);
1343 timekeeping_suspend_time = timespec_to_timespec64(tmp);
3be90950 1344
0d6bd995
ZM
1345 /*
1346 * On some systems the persistent_clock can not be detected at
1347 * timekeeping_init by its return value, so if we see a valid
1348 * value returned, update the persistent_clock_exists flag.
1349 */
1350 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1351 persistent_clock_exist = true;
1352
9a7a71b1 1353 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 1354 write_seqcount_begin(&tk_core.seq);
4e250fdd 1355 timekeeping_forward_now(tk);
8524070b 1356 timekeeping_suspended = 1;
cb33217b
JS
1357
1358 /*
1359 * To avoid drift caused by repeated suspend/resumes,
1360 * which each can add ~1 second drift error,
1361 * try to compensate so the difference in system time
1362 * and persistent_clock time stays close to constant.
1363 */
7d489d15
JS
1364 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1365 delta_delta = timespec64_sub(delta, old_delta);
cb33217b
JS
1366 if (abs(delta_delta.tv_sec) >= 2) {
1367 /*
1368 * if delta_delta is too large, assume time correction
1369 * has occured and set old_delta to the current delta.
1370 */
1371 old_delta = delta;
1372 } else {
1373 /* Otherwise try to adjust old_system to compensate */
1374 timekeeping_suspend_time =
7d489d15 1375 timespec64_add(timekeeping_suspend_time, delta_delta);
cb33217b 1376 }
330a1617
JS
1377
1378 timekeeping_update(tk, TK_MIRROR);
060407ae 1379 halt_fast_timekeeper(tk);
3fdb14fd 1380 write_seqcount_end(&tk_core.seq);
9a7a71b1 1381 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b 1382
1383 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
c54a42b1 1384 clocksource_suspend();
adc78e6b 1385 clockevents_suspend();
8524070b 1386
1387 return 0;
1388}
1389
1390/* sysfs resume/suspend bits for timekeeping */
e1a85b2c 1391static struct syscore_ops timekeeping_syscore_ops = {
8524070b 1392 .resume = timekeeping_resume,
1393 .suspend = timekeeping_suspend,
8524070b 1394};
1395
e1a85b2c 1396static int __init timekeeping_init_ops(void)
8524070b 1397{
e1a85b2c
RW
1398 register_syscore_ops(&timekeeping_syscore_ops);
1399 return 0;
8524070b 1400}
e1a85b2c 1401device_initcall(timekeeping_init_ops);
8524070b 1402
1403/*
dc491596 1404 * Apply a multiplier adjustment to the timekeeper
8524070b 1405 */
dc491596
JS
1406static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1407 s64 offset,
1408 bool negative,
1409 int adj_scale)
8524070b 1410{
dc491596
JS
1411 s64 interval = tk->cycle_interval;
1412 s32 mult_adj = 1;
8524070b 1413
dc491596
JS
1414 if (negative) {
1415 mult_adj = -mult_adj;
1416 interval = -interval;
1417 offset = -offset;
1d17d174 1418 }
dc491596
JS
1419 mult_adj <<= adj_scale;
1420 interval <<= adj_scale;
1421 offset <<= adj_scale;
8524070b 1422
c2bc1111
JS
1423 /*
1424 * So the following can be confusing.
1425 *
dc491596 1426 * To keep things simple, lets assume mult_adj == 1 for now.
c2bc1111 1427 *
dc491596 1428 * When mult_adj != 1, remember that the interval and offset values
c2bc1111
JS
1429 * have been appropriately scaled so the math is the same.
1430 *
1431 * The basic idea here is that we're increasing the multiplier
1432 * by one, this causes the xtime_interval to be incremented by
1433 * one cycle_interval. This is because:
1434 * xtime_interval = cycle_interval * mult
1435 * So if mult is being incremented by one:
1436 * xtime_interval = cycle_interval * (mult + 1)
1437 * Its the same as:
1438 * xtime_interval = (cycle_interval * mult) + cycle_interval
1439 * Which can be shortened to:
1440 * xtime_interval += cycle_interval
1441 *
1442 * So offset stores the non-accumulated cycles. Thus the current
1443 * time (in shifted nanoseconds) is:
1444 * now = (offset * adj) + xtime_nsec
1445 * Now, even though we're adjusting the clock frequency, we have
1446 * to keep time consistent. In other words, we can't jump back
1447 * in time, and we also want to avoid jumping forward in time.
1448 *
1449 * So given the same offset value, we need the time to be the same
1450 * both before and after the freq adjustment.
1451 * now = (offset * adj_1) + xtime_nsec_1
1452 * now = (offset * adj_2) + xtime_nsec_2
1453 * So:
1454 * (offset * adj_1) + xtime_nsec_1 =
1455 * (offset * adj_2) + xtime_nsec_2
1456 * And we know:
1457 * adj_2 = adj_1 + 1
1458 * So:
1459 * (offset * adj_1) + xtime_nsec_1 =
1460 * (offset * (adj_1+1)) + xtime_nsec_2
1461 * (offset * adj_1) + xtime_nsec_1 =
1462 * (offset * adj_1) + offset + xtime_nsec_2
1463 * Canceling the sides:
1464 * xtime_nsec_1 = offset + xtime_nsec_2
1465 * Which gives us:
1466 * xtime_nsec_2 = xtime_nsec_1 - offset
1467 * Which simplfies to:
1468 * xtime_nsec -= offset
1469 *
1470 * XXX - TODO: Doc ntp_error calculation.
1471 */
cb2aa634 1472 if ((mult_adj > 0) && (tk->tkr.mult + mult_adj < mult_adj)) {
6067dc5a 1473 /* NTP adjustment caused clocksource mult overflow */
1474 WARN_ON_ONCE(1);
1475 return;
1476 }
1477
dc491596 1478 tk->tkr.mult += mult_adj;
f726a697 1479 tk->xtime_interval += interval;
d28ede83 1480 tk->tkr.xtime_nsec -= offset;
f726a697 1481 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
dc491596
JS
1482}
1483
1484/*
1485 * Calculate the multiplier adjustment needed to match the frequency
1486 * specified by NTP
1487 */
1488static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1489 s64 offset)
1490{
1491 s64 interval = tk->cycle_interval;
1492 s64 xinterval = tk->xtime_interval;
1493 s64 tick_error;
1494 bool negative;
1495 u32 adj;
1496
1497 /* Remove any current error adj from freq calculation */
1498 if (tk->ntp_err_mult)
1499 xinterval -= tk->cycle_interval;
1500
375f45b5
JS
1501 tk->ntp_tick = ntp_tick_length();
1502
dc491596
JS
1503 /* Calculate current error per tick */
1504 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1505 tick_error -= (xinterval + tk->xtime_remainder);
1506
1507 /* Don't worry about correcting it if its small */
1508 if (likely((tick_error >= 0) && (tick_error <= interval)))
1509 return;
1510
1511 /* preserve the direction of correction */
1512 negative = (tick_error < 0);
1513
1514 /* Sort out the magnitude of the correction */
1515 tick_error = abs(tick_error);
1516 for (adj = 0; tick_error > interval; adj++)
1517 tick_error >>= 1;
1518
1519 /* scale the corrections */
1520 timekeeping_apply_adjustment(tk, offset, negative, adj);
1521}
1522
1523/*
1524 * Adjust the timekeeper's multiplier to the correct frequency
1525 * and also to reduce the accumulated error value.
1526 */
1527static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1528{
1529 /* Correct for the current frequency error */
1530 timekeeping_freqadjust(tk, offset);
1531
1532 /* Next make a small adjustment to fix any cumulative error */
1533 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1534 tk->ntp_err_mult = 1;
1535 timekeeping_apply_adjustment(tk, offset, 0, 0);
1536 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1537 /* Undo any existing error adjustment */
1538 timekeeping_apply_adjustment(tk, offset, 1, 0);
1539 tk->ntp_err_mult = 0;
1540 }
1541
1542 if (unlikely(tk->tkr.clock->maxadj &&
659bc17b 1543 (abs(tk->tkr.mult - tk->tkr.clock->mult)
1544 > tk->tkr.clock->maxadj))) {
dc491596
JS
1545 printk_once(KERN_WARNING
1546 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1547 tk->tkr.clock->name, (long)tk->tkr.mult,
1548 (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
1549 }
2a8c0883
JS
1550
1551 /*
1552 * It may be possible that when we entered this function, xtime_nsec
1553 * was very small. Further, if we're slightly speeding the clocksource
1554 * in the code above, its possible the required corrective factor to
1555 * xtime_nsec could cause it to underflow.
1556 *
1557 * Now, since we already accumulated the second, cannot simply roll
1558 * the accumulated second back, since the NTP subsystem has been
1559 * notified via second_overflow. So instead we push xtime_nsec forward
1560 * by the amount we underflowed, and add that amount into the error.
1561 *
1562 * We'll correct this error next time through this function, when
1563 * xtime_nsec is not as small.
1564 */
d28ede83
TG
1565 if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
1566 s64 neg = -(s64)tk->tkr.xtime_nsec;
1567 tk->tkr.xtime_nsec = 0;
f726a697 1568 tk->ntp_error += neg << tk->ntp_error_shift;
2a8c0883 1569 }
8524070b 1570}
1571
1f4f9487
JS
1572/**
1573 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1574 *
1575 * Helper function that accumulates a the nsecs greater then a second
1576 * from the xtime_nsec field to the xtime_secs field.
1577 * It also calls into the NTP code to handle leapsecond processing.
1578 *
1579 */
780427f0 1580static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1f4f9487 1581{
d28ede83 1582 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
5258d3f2 1583 unsigned int clock_set = 0;
1f4f9487 1584
d28ede83 1585 while (tk->tkr.xtime_nsec >= nsecps) {
1f4f9487
JS
1586 int leap;
1587
d28ede83 1588 tk->tkr.xtime_nsec -= nsecps;
1f4f9487
JS
1589 tk->xtime_sec++;
1590
1591 /* Figure out if its a leap sec and apply if needed */
1592 leap = second_overflow(tk->xtime_sec);
6d0ef903 1593 if (unlikely(leap)) {
7d489d15 1594 struct timespec64 ts;
6d0ef903
JS
1595
1596 tk->xtime_sec += leap;
1f4f9487 1597
6d0ef903
JS
1598 ts.tv_sec = leap;
1599 ts.tv_nsec = 0;
1600 tk_set_wall_to_mono(tk,
7d489d15 1601 timespec64_sub(tk->wall_to_monotonic, ts));
6d0ef903 1602
cc244dda
JS
1603 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1604
5258d3f2 1605 clock_set = TK_CLOCK_WAS_SET;
6d0ef903 1606 }
1f4f9487 1607 }
5258d3f2 1608 return clock_set;
1f4f9487
JS
1609}
1610
a092ff0f 1611/**
1612 * logarithmic_accumulation - shifted accumulation of cycles
1613 *
1614 * This functions accumulates a shifted interval of cycles into
1615 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1616 * loop.
1617 *
1618 * Returns the unconsumed cycles.
1619 */
f726a697 1620static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
5258d3f2
JS
1621 u32 shift,
1622 unsigned int *clock_set)
a092ff0f 1623{
23a9537a 1624 cycle_t interval = tk->cycle_interval << shift;
deda2e81 1625 u64 raw_nsecs;
a092ff0f 1626
f726a697 1627 /* If the offset is smaller then a shifted interval, do nothing */
23a9537a 1628 if (offset < interval)
a092ff0f 1629 return offset;
1630
1631 /* Accumulate one shifted interval */
23a9537a 1632 offset -= interval;
d28ede83 1633 tk->tkr.cycle_last += interval;
a092ff0f 1634
d28ede83 1635 tk->tkr.xtime_nsec += tk->xtime_interval << shift;
5258d3f2 1636 *clock_set |= accumulate_nsecs_to_secs(tk);
a092ff0f 1637
deda2e81 1638 /* Accumulate raw time */
5b3900cd 1639 raw_nsecs = (u64)tk->raw_interval << shift;
f726a697 1640 raw_nsecs += tk->raw_time.tv_nsec;
c7dcf87a
JS
1641 if (raw_nsecs >= NSEC_PER_SEC) {
1642 u64 raw_secs = raw_nsecs;
1643 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
f726a697 1644 tk->raw_time.tv_sec += raw_secs;
a092ff0f 1645 }
f726a697 1646 tk->raw_time.tv_nsec = raw_nsecs;
a092ff0f 1647
1648 /* Accumulate error between NTP and clock interval */
375f45b5 1649 tk->ntp_error += tk->ntp_tick << shift;
f726a697
JS
1650 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1651 (tk->ntp_error_shift + shift);
a092ff0f 1652
1653 return offset;
1654}
1655
8524070b 1656/**
1657 * update_wall_time - Uses the current clocksource to increment the wall time
1658 *
8524070b 1659 */
47a1b796 1660void update_wall_time(void)
8524070b 1661{
3fdb14fd 1662 struct timekeeper *real_tk = &tk_core.timekeeper;
48cdc135 1663 struct timekeeper *tk = &shadow_timekeeper;
8524070b 1664 cycle_t offset;
a092ff0f 1665 int shift = 0, maxshift;
5258d3f2 1666 unsigned int clock_set = 0;
70471f2f
JS
1667 unsigned long flags;
1668
9a7a71b1 1669 raw_spin_lock_irqsave(&timekeeper_lock, flags);
8524070b 1670
1671 /* Make sure we're fully resumed: */
1672 if (unlikely(timekeeping_suspended))
70471f2f 1673 goto out;
8524070b 1674
592913ec 1675#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
48cdc135 1676 offset = real_tk->cycle_interval;
592913ec 1677#else
d28ede83
TG
1678 offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
1679 tk->tkr.cycle_last, tk->tkr.mask);
8524070b 1680#endif
8524070b 1681
bf2ac312 1682 /* Check if there's really nothing to do */
48cdc135 1683 if (offset < real_tk->cycle_interval)
bf2ac312
JS
1684 goto out;
1685
3c17ad19
JS
1686 /* Do some additional sanity checking */
1687 timekeeping_check_update(real_tk, offset);
1688
a092ff0f 1689 /*
1690 * With NO_HZ we may have to accumulate many cycle_intervals
1691 * (think "ticks") worth of time at once. To do this efficiently,
1692 * we calculate the largest doubling multiple of cycle_intervals
88b28adf 1693 * that is smaller than the offset. We then accumulate that
a092ff0f 1694 * chunk in one go, and then try to consume the next smaller
1695 * doubled multiple.
8524070b 1696 */
4e250fdd 1697 shift = ilog2(offset) - ilog2(tk->cycle_interval);
a092ff0f 1698 shift = max(0, shift);
88b28adf 1699 /* Bound shift to one less than what overflows tick_length */
ea7cf49a 1700 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
a092ff0f 1701 shift = min(shift, maxshift);
4e250fdd 1702 while (offset >= tk->cycle_interval) {
5258d3f2
JS
1703 offset = logarithmic_accumulation(tk, offset, shift,
1704 &clock_set);
4e250fdd 1705 if (offset < tk->cycle_interval<<shift)
830ec045 1706 shift--;
8524070b 1707 }
1708
1709 /* correct the clock when NTP error is too big */
4e250fdd 1710 timekeeping_adjust(tk, offset);
8524070b 1711
6a867a39 1712 /*
92bb1fcf
JS
1713 * XXX This can be killed once everyone converts
1714 * to the new update_vsyscall.
1715 */
1716 old_vsyscall_fixup(tk);
8524070b 1717
6a867a39
JS
1718 /*
1719 * Finally, make sure that after the rounding
1e75fa8b 1720 * xtime_nsec isn't larger than NSEC_PER_SEC
6a867a39 1721 */
5258d3f2 1722 clock_set |= accumulate_nsecs_to_secs(tk);
83f57a11 1723
3fdb14fd 1724 write_seqcount_begin(&tk_core.seq);
48cdc135
TG
1725 /*
1726 * Update the real timekeeper.
1727 *
1728 * We could avoid this memcpy by switching pointers, but that
1729 * requires changes to all other timekeeper usage sites as
1730 * well, i.e. move the timekeeper pointer getter into the
1731 * spinlocked/seqcount protected sections. And we trade this
3fdb14fd 1732 * memcpy under the tk_core.seq against one before we start
48cdc135
TG
1733 * updating.
1734 */
1735 memcpy(real_tk, tk, sizeof(*tk));
5258d3f2 1736 timekeeping_update(real_tk, clock_set);
3fdb14fd 1737 write_seqcount_end(&tk_core.seq);
ca4523cd 1738out:
9a7a71b1 1739 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
47a1b796 1740 if (clock_set)
cab5e127
JS
1741 /* Have to call _delayed version, since in irq context*/
1742 clock_was_set_delayed();
8524070b 1743}
7c3f1a57
TJ
1744
1745/**
d08c0cdd
JS
1746 * getboottime64 - Return the real time of system boot.
1747 * @ts: pointer to the timespec64 to be set
7c3f1a57 1748 *
d08c0cdd 1749 * Returns the wall-time of boot in a timespec64.
7c3f1a57
TJ
1750 *
1751 * This is based on the wall_to_monotonic offset and the total suspend
1752 * time. Calls to settimeofday will affect the value returned (which
1753 * basically means that however wrong your real time clock is at boot time,
1754 * you get the right time here).
1755 */
d08c0cdd 1756void getboottime64(struct timespec64 *ts)
7c3f1a57 1757{
3fdb14fd 1758 struct timekeeper *tk = &tk_core.timekeeper;
02cba159
TG
1759 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1760
d08c0cdd 1761 *ts = ktime_to_timespec64(t);
7c3f1a57 1762}
d08c0cdd 1763EXPORT_SYMBOL_GPL(getboottime64);
7c3f1a57 1764
17c38b74 1765unsigned long get_seconds(void)
1766{
3fdb14fd 1767 struct timekeeper *tk = &tk_core.timekeeper;
4e250fdd
JS
1768
1769 return tk->xtime_sec;
17c38b74 1770}
1771EXPORT_SYMBOL(get_seconds);
1772
da15cfda 1773struct timespec __current_kernel_time(void)
1774{
3fdb14fd 1775 struct timekeeper *tk = &tk_core.timekeeper;
4e250fdd 1776
7d489d15 1777 return timespec64_to_timespec(tk_xtime(tk));
da15cfda 1778}
17c38b74 1779
2c6b47de 1780struct timespec current_kernel_time(void)
1781{
3fdb14fd 1782 struct timekeeper *tk = &tk_core.timekeeper;
7d489d15 1783 struct timespec64 now;
2c6b47de 1784 unsigned long seq;
1785
1786 do {
3fdb14fd 1787 seq = read_seqcount_begin(&tk_core.seq);
83f57a11 1788
4e250fdd 1789 now = tk_xtime(tk);
3fdb14fd 1790 } while (read_seqcount_retry(&tk_core.seq, seq));
2c6b47de 1791
7d489d15 1792 return timespec64_to_timespec(now);
2c6b47de 1793}
2c6b47de 1794EXPORT_SYMBOL(current_kernel_time);
da15cfda 1795
334334b5 1796struct timespec64 get_monotonic_coarse64(void)
da15cfda 1797{
3fdb14fd 1798 struct timekeeper *tk = &tk_core.timekeeper;
7d489d15 1799 struct timespec64 now, mono;
da15cfda 1800 unsigned long seq;
1801
1802 do {
3fdb14fd 1803 seq = read_seqcount_begin(&tk_core.seq);
83f57a11 1804
4e250fdd
JS
1805 now = tk_xtime(tk);
1806 mono = tk->wall_to_monotonic;
3fdb14fd 1807 } while (read_seqcount_retry(&tk_core.seq, seq));
da15cfda 1808
7d489d15 1809 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
da15cfda 1810 now.tv_nsec + mono.tv_nsec);
7d489d15 1811
334334b5 1812 return now;
da15cfda 1813}
871cf1e5
TH
1814
1815/*
d6ad4187 1816 * Must hold jiffies_lock
871cf1e5
TH
1817 */
1818void do_timer(unsigned long ticks)
1819{
1820 jiffies_64 += ticks;
871cf1e5
TH
1821 calc_global_load(ticks);
1822}
48cf76f7
TH
1823
1824/**
76f41088
JS
1825 * ktime_get_update_offsets_tick - hrtimer helper
1826 * @offs_real: pointer to storage for monotonic -> realtime offset
1827 * @offs_boot: pointer to storage for monotonic -> boottime offset
1828 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1829 *
1830 * Returns monotonic time at last tick and various offsets
48cf76f7 1831 */
76f41088
JS
1832ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1833 ktime_t *offs_tai)
48cf76f7 1834{
3fdb14fd 1835 struct timekeeper *tk = &tk_core.timekeeper;
76f41088 1836 unsigned int seq;
48064f5f
TG
1837 ktime_t base;
1838 u64 nsecs;
48cf76f7
TH
1839
1840 do {
3fdb14fd 1841 seq = read_seqcount_begin(&tk_core.seq);
76f41088 1842
d28ede83
TG
1843 base = tk->tkr.base_mono;
1844 nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
48064f5f 1845
76f41088
JS
1846 *offs_real = tk->offs_real;
1847 *offs_boot = tk->offs_boot;
1848 *offs_tai = tk->offs_tai;
3fdb14fd 1849 } while (read_seqcount_retry(&tk_core.seq, seq));
76f41088 1850
48064f5f 1851 return ktime_add_ns(base, nsecs);
48cf76f7 1852}
f0af911a 1853
f6c06abf
TG
1854#ifdef CONFIG_HIGH_RES_TIMERS
1855/**
76f41088 1856 * ktime_get_update_offsets_now - hrtimer helper
f6c06abf
TG
1857 * @offs_real: pointer to storage for monotonic -> realtime offset
1858 * @offs_boot: pointer to storage for monotonic -> boottime offset
b7bc50e4 1859 * @offs_tai: pointer to storage for monotonic -> clock tai offset
f6c06abf
TG
1860 *
1861 * Returns current monotonic time and updates the offsets
b7bc50e4 1862 * Called from hrtimer_interrupt() or retrigger_next_event()
f6c06abf 1863 */
76f41088 1864ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
90adda98 1865 ktime_t *offs_tai)
f6c06abf 1866{
3fdb14fd 1867 struct timekeeper *tk = &tk_core.timekeeper;
f6c06abf 1868 unsigned int seq;
a37c0aad
TG
1869 ktime_t base;
1870 u64 nsecs;
f6c06abf
TG
1871
1872 do {
3fdb14fd 1873 seq = read_seqcount_begin(&tk_core.seq);
f6c06abf 1874
d28ede83 1875 base = tk->tkr.base_mono;
0e5ac3a8 1876 nsecs = timekeeping_get_ns(&tk->tkr);
f6c06abf 1877
4e250fdd
JS
1878 *offs_real = tk->offs_real;
1879 *offs_boot = tk->offs_boot;
90adda98 1880 *offs_tai = tk->offs_tai;
3fdb14fd 1881 } while (read_seqcount_retry(&tk_core.seq, seq));
f6c06abf 1882
a37c0aad 1883 return ktime_add_ns(base, nsecs);
f6c06abf
TG
1884}
1885#endif
1886
aa6f9c59
JS
1887/**
1888 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1889 */
1890int do_adjtimex(struct timex *txc)
1891{
3fdb14fd 1892 struct timekeeper *tk = &tk_core.timekeeper;
06c017fd 1893 unsigned long flags;
7d489d15 1894 struct timespec64 ts;
4e8f8b34 1895 s32 orig_tai, tai;
e4085693
JS
1896 int ret;
1897
1898 /* Validate the data before disabling interrupts */
1899 ret = ntp_validate_timex(txc);
1900 if (ret)
1901 return ret;
1902
cef90377
JS
1903 if (txc->modes & ADJ_SETOFFSET) {
1904 struct timespec delta;
1905 delta.tv_sec = txc->time.tv_sec;
1906 delta.tv_nsec = txc->time.tv_usec;
1907 if (!(txc->modes & ADJ_NANO))
1908 delta.tv_nsec *= 1000;
1909 ret = timekeeping_inject_offset(&delta);
1910 if (ret)
1911 return ret;
1912 }
1913
d6d29896 1914 getnstimeofday64(&ts);
87ace39b 1915
06c017fd 1916 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 1917 write_seqcount_begin(&tk_core.seq);
06c017fd 1918
4e8f8b34 1919 orig_tai = tai = tk->tai_offset;
87ace39b 1920 ret = __do_adjtimex(txc, &ts, &tai);
aa6f9c59 1921
4e8f8b34
JS
1922 if (tai != orig_tai) {
1923 __timekeeping_set_tai_offset(tk, tai);
f55c0760 1924 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
4e8f8b34 1925 }
3fdb14fd 1926 write_seqcount_end(&tk_core.seq);
06c017fd
JS
1927 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1928
6fdda9a9
JS
1929 if (tai != orig_tai)
1930 clock_was_set();
1931
7bd36014
JS
1932 ntp_notify_cmos_timer();
1933
87ace39b
JS
1934 return ret;
1935}
aa6f9c59
JS
1936
1937#ifdef CONFIG_NTP_PPS
1938/**
1939 * hardpps() - Accessor function to NTP __hardpps function
1940 */
1941void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1942{
06c017fd
JS
1943 unsigned long flags;
1944
1945 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd 1946 write_seqcount_begin(&tk_core.seq);
06c017fd 1947
aa6f9c59 1948 __hardpps(phase_ts, raw_ts);
06c017fd 1949
3fdb14fd 1950 write_seqcount_end(&tk_core.seq);
06c017fd 1951 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
aa6f9c59
JS
1952}
1953EXPORT_SYMBOL(hardpps);
1954#endif
1955
f0af911a
TH
1956/**
1957 * xtime_update() - advances the timekeeping infrastructure
1958 * @ticks: number of ticks, that have elapsed since the last call.
1959 *
1960 * Must be called with interrupts disabled.
1961 */
1962void xtime_update(unsigned long ticks)
1963{
d6ad4187 1964 write_seqlock(&jiffies_lock);
f0af911a 1965 do_timer(ticks);
d6ad4187 1966 write_sequnlock(&jiffies_lock);
47a1b796 1967 update_wall_time();
f0af911a 1968}
This page took 0.638779 seconds and 5 git commands to generate.