2bde974437fddeb9e7da52a37814e907dca50941
[deliverable/linux.git] / kernel / time / timekeeping.c
1 /*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/compiler.h>
26
27 #include "tick-internal.h"
28 #include "ntp_internal.h"
29 #include "timekeeping_internal.h"
30
31 #define TK_CLEAR_NTP (1 << 0)
32 #define TK_MIRROR (1 << 1)
33 #define TK_CLOCK_WAS_SET (1 << 2)
34
35 /*
36 * The most important data for readout fits into a single 64 byte
37 * cache line.
38 */
39 static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42 } tk_core ____cacheline_aligned;
43
44 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45 static struct timekeeper shadow_timekeeper;
46
47 /**
48 * struct tk_fast - NMI safe timekeeper
49 * @seq: Sequence counter for protecting updates. The lowest bit
50 * is the index for the tk_read_base array
51 * @base: tk_read_base array. Access is indexed by the lowest bit of
52 * @seq.
53 *
54 * See @update_fast_timekeeper() below.
55 */
56 struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59 };
60
61 static struct tk_fast tk_fast_mono ____cacheline_aligned;
62
63 /* flag for if timekeeping is suspended */
64 int __read_mostly timekeeping_suspended;
65
66 /* Flag for if there is a persistent clock on this platform */
67 bool __read_mostly persistent_clock_exist = false;
68
69 static inline void tk_normalize_xtime(struct timekeeper *tk)
70 {
71 while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
72 tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
73 tk->xtime_sec++;
74 }
75 }
76
77 static inline struct timespec64 tk_xtime(struct timekeeper *tk)
78 {
79 struct timespec64 ts;
80
81 ts.tv_sec = tk->xtime_sec;
82 ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
83 return ts;
84 }
85
86 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
87 {
88 tk->xtime_sec = ts->tv_sec;
89 tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
90 }
91
92 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
93 {
94 tk->xtime_sec += ts->tv_sec;
95 tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
96 tk_normalize_xtime(tk);
97 }
98
99 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
100 {
101 struct timespec64 tmp;
102
103 /*
104 * Verify consistency of: offset_real = -wall_to_monotonic
105 * before modifying anything
106 */
107 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
108 -tk->wall_to_monotonic.tv_nsec);
109 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
110 tk->wall_to_monotonic = wtm;
111 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
112 tk->offs_real = timespec64_to_ktime(tmp);
113 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
114 }
115
116 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
117 {
118 tk->offs_boot = ktime_add(tk->offs_boot, delta);
119 }
120
121 /**
122 * tk_setup_internals - Set up internals to use clocksource clock.
123 *
124 * @tk: The target timekeeper to setup.
125 * @clock: Pointer to clocksource.
126 *
127 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
128 * pair and interval request.
129 *
130 * Unless you're the timekeeping code, you should not be using this!
131 */
132 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
133 {
134 cycle_t interval;
135 u64 tmp, ntpinterval;
136 struct clocksource *old_clock;
137
138 old_clock = tk->tkr.clock;
139 tk->tkr.clock = clock;
140 tk->tkr.read = clock->read;
141 tk->tkr.mask = clock->mask;
142 tk->tkr.cycle_last = tk->tkr.read(clock);
143
144 /* Do the ns -> cycle conversion first, using original mult */
145 tmp = NTP_INTERVAL_LENGTH;
146 tmp <<= clock->shift;
147 ntpinterval = tmp;
148 tmp += clock->mult/2;
149 do_div(tmp, clock->mult);
150 if (tmp == 0)
151 tmp = 1;
152
153 interval = (cycle_t) tmp;
154 tk->cycle_interval = interval;
155
156 /* Go back from cycles -> shifted ns */
157 tk->xtime_interval = (u64) interval * clock->mult;
158 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
159 tk->raw_interval =
160 ((u64) interval * clock->mult) >> clock->shift;
161
162 /* if changing clocks, convert xtime_nsec shift units */
163 if (old_clock) {
164 int shift_change = clock->shift - old_clock->shift;
165 if (shift_change < 0)
166 tk->tkr.xtime_nsec >>= -shift_change;
167 else
168 tk->tkr.xtime_nsec <<= shift_change;
169 }
170 tk->tkr.shift = clock->shift;
171
172 tk->ntp_error = 0;
173 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
174 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
175
176 /*
177 * The timekeeper keeps its own mult values for the currently
178 * active clocksource. These value will be adjusted via NTP
179 * to counteract clock drifting.
180 */
181 tk->tkr.mult = clock->mult;
182 tk->ntp_err_mult = 0;
183 }
184
185 /* Timekeeper helper functions. */
186
187 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
188 static u32 default_arch_gettimeoffset(void) { return 0; }
189 u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
190 #else
191 static inline u32 arch_gettimeoffset(void) { return 0; }
192 #endif
193
194 static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
195 {
196 cycle_t cycle_now, delta;
197 s64 nsec;
198
199 /* read clocksource: */
200 cycle_now = tkr->read(tkr->clock);
201
202 /* calculate the delta since the last update_wall_time: */
203 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
204
205 nsec = delta * tkr->mult + tkr->xtime_nsec;
206 nsec >>= tkr->shift;
207
208 /* If arch requires, add in get_arch_timeoffset() */
209 return nsec + arch_gettimeoffset();
210 }
211
212 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
213 {
214 struct clocksource *clock = tk->tkr.clock;
215 cycle_t cycle_now, delta;
216 s64 nsec;
217
218 /* read clocksource: */
219 cycle_now = tk->tkr.read(clock);
220
221 /* calculate the delta since the last update_wall_time: */
222 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
223
224 /* convert delta to nanoseconds. */
225 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
226
227 /* If arch requires, add in get_arch_timeoffset() */
228 return nsec + arch_gettimeoffset();
229 }
230
231 /**
232 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
233 * @tk: The timekeeper from which we take the update
234 * @tkf: The fast timekeeper to update
235 * @tbase: The time base for the fast timekeeper (mono/raw)
236 *
237 * We want to use this from any context including NMI and tracing /
238 * instrumenting the timekeeping code itself.
239 *
240 * So we handle this differently than the other timekeeping accessor
241 * functions which retry when the sequence count has changed. The
242 * update side does:
243 *
244 * smp_wmb(); <- Ensure that the last base[1] update is visible
245 * tkf->seq++;
246 * smp_wmb(); <- Ensure that the seqcount update is visible
247 * update(tkf->base[0], tk);
248 * smp_wmb(); <- Ensure that the base[0] update is visible
249 * tkf->seq++;
250 * smp_wmb(); <- Ensure that the seqcount update is visible
251 * update(tkf->base[1], tk);
252 *
253 * The reader side does:
254 *
255 * do {
256 * seq = tkf->seq;
257 * smp_rmb();
258 * idx = seq & 0x01;
259 * now = now(tkf->base[idx]);
260 * smp_rmb();
261 * } while (seq != tkf->seq)
262 *
263 * As long as we update base[0] readers are forced off to
264 * base[1]. Once base[0] is updated readers are redirected to base[0]
265 * and the base[1] update takes place.
266 *
267 * So if a NMI hits the update of base[0] then it will use base[1]
268 * which is still consistent. In the worst case this can result is a
269 * slightly wrong timestamp (a few nanoseconds). See
270 * @ktime_get_mono_fast_ns.
271 */
272 static void update_fast_timekeeper(struct timekeeper *tk)
273 {
274 struct tk_read_base *base = tk_fast_mono.base;
275
276 /* Force readers off to base[1] */
277 raw_write_seqcount_latch(&tk_fast_mono.seq);
278
279 /* Update base[0] */
280 memcpy(base, &tk->tkr, sizeof(*base));
281
282 /* Force readers back to base[0] */
283 raw_write_seqcount_latch(&tk_fast_mono.seq);
284
285 /* Update base[1] */
286 memcpy(base + 1, base, sizeof(*base));
287 }
288
289 /**
290 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
291 *
292 * This timestamp is not guaranteed to be monotonic across an update.
293 * The timestamp is calculated by:
294 *
295 * now = base_mono + clock_delta * slope
296 *
297 * So if the update lowers the slope, readers who are forced to the
298 * not yet updated second array are still using the old steeper slope.
299 *
300 * tmono
301 * ^
302 * | o n
303 * | o n
304 * | u
305 * | o
306 * |o
307 * |12345678---> reader order
308 *
309 * o = old slope
310 * u = update
311 * n = new slope
312 *
313 * So reader 6 will observe time going backwards versus reader 5.
314 *
315 * While other CPUs are likely to be able observe that, the only way
316 * for a CPU local observation is when an NMI hits in the middle of
317 * the update. Timestamps taken from that NMI context might be ahead
318 * of the following timestamps. Callers need to be aware of that and
319 * deal with it.
320 */
321 u64 notrace ktime_get_mono_fast_ns(void)
322 {
323 struct tk_read_base *tkr;
324 unsigned int seq;
325 u64 now;
326
327 do {
328 seq = raw_read_seqcount(&tk_fast_mono.seq);
329 tkr = tk_fast_mono.base + (seq & 0x01);
330 now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr);
331
332 } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
333 return now;
334 }
335 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
336
337 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
338
339 static inline void update_vsyscall(struct timekeeper *tk)
340 {
341 struct timespec xt, wm;
342
343 xt = timespec64_to_timespec(tk_xtime(tk));
344 wm = timespec64_to_timespec(tk->wall_to_monotonic);
345 update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult,
346 tk->tkr.cycle_last);
347 }
348
349 static inline void old_vsyscall_fixup(struct timekeeper *tk)
350 {
351 s64 remainder;
352
353 /*
354 * Store only full nanoseconds into xtime_nsec after rounding
355 * it up and add the remainder to the error difference.
356 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
357 * by truncating the remainder in vsyscalls. However, it causes
358 * additional work to be done in timekeeping_adjust(). Once
359 * the vsyscall implementations are converted to use xtime_nsec
360 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
361 * users are removed, this can be killed.
362 */
363 remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
364 tk->tkr.xtime_nsec -= remainder;
365 tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
366 tk->ntp_error += remainder << tk->ntp_error_shift;
367 tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
368 }
369 #else
370 #define old_vsyscall_fixup(tk)
371 #endif
372
373 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
374
375 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
376 {
377 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
378 }
379
380 /**
381 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
382 */
383 int pvclock_gtod_register_notifier(struct notifier_block *nb)
384 {
385 struct timekeeper *tk = &tk_core.timekeeper;
386 unsigned long flags;
387 int ret;
388
389 raw_spin_lock_irqsave(&timekeeper_lock, flags);
390 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
391 update_pvclock_gtod(tk, true);
392 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
393
394 return ret;
395 }
396 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
397
398 /**
399 * pvclock_gtod_unregister_notifier - unregister a pvclock
400 * timedata update listener
401 */
402 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
403 {
404 unsigned long flags;
405 int ret;
406
407 raw_spin_lock_irqsave(&timekeeper_lock, flags);
408 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
409 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
410
411 return ret;
412 }
413 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
414
415 /*
416 * Update the ktime_t based scalar nsec members of the timekeeper
417 */
418 static inline void tk_update_ktime_data(struct timekeeper *tk)
419 {
420 s64 nsec;
421
422 /*
423 * The xtime based monotonic readout is:
424 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
425 * The ktime based monotonic readout is:
426 * nsec = base_mono + now();
427 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
428 */
429 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
430 nsec *= NSEC_PER_SEC;
431 nsec += tk->wall_to_monotonic.tv_nsec;
432 tk->tkr.base_mono = ns_to_ktime(nsec);
433
434 /* Update the monotonic raw base */
435 tk->base_raw = timespec64_to_ktime(tk->raw_time);
436 }
437
438 /* must hold timekeeper_lock */
439 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
440 {
441 if (action & TK_CLEAR_NTP) {
442 tk->ntp_error = 0;
443 ntp_clear();
444 }
445
446 tk_update_ktime_data(tk);
447
448 update_vsyscall(tk);
449 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
450
451 if (action & TK_MIRROR)
452 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
453 sizeof(tk_core.timekeeper));
454
455 update_fast_timekeeper(tk);
456 }
457
458 /**
459 * timekeeping_forward_now - update clock to the current time
460 *
461 * Forward the current clock to update its state since the last call to
462 * update_wall_time(). This is useful before significant clock changes,
463 * as it avoids having to deal with this time offset explicitly.
464 */
465 static void timekeeping_forward_now(struct timekeeper *tk)
466 {
467 struct clocksource *clock = tk->tkr.clock;
468 cycle_t cycle_now, delta;
469 s64 nsec;
470
471 cycle_now = tk->tkr.read(clock);
472 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
473 tk->tkr.cycle_last = cycle_now;
474
475 tk->tkr.xtime_nsec += delta * tk->tkr.mult;
476
477 /* If arch requires, add in get_arch_timeoffset() */
478 tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
479
480 tk_normalize_xtime(tk);
481
482 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
483 timespec64_add_ns(&tk->raw_time, nsec);
484 }
485
486 /**
487 * __getnstimeofday64 - Returns the time of day in a timespec64.
488 * @ts: pointer to the timespec to be set
489 *
490 * Updates the time of day in the timespec.
491 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
492 */
493 int __getnstimeofday64(struct timespec64 *ts)
494 {
495 struct timekeeper *tk = &tk_core.timekeeper;
496 unsigned long seq;
497 s64 nsecs = 0;
498
499 do {
500 seq = read_seqcount_begin(&tk_core.seq);
501
502 ts->tv_sec = tk->xtime_sec;
503 nsecs = timekeeping_get_ns(&tk->tkr);
504
505 } while (read_seqcount_retry(&tk_core.seq, seq));
506
507 ts->tv_nsec = 0;
508 timespec64_add_ns(ts, nsecs);
509
510 /*
511 * Do not bail out early, in case there were callers still using
512 * the value, even in the face of the WARN_ON.
513 */
514 if (unlikely(timekeeping_suspended))
515 return -EAGAIN;
516 return 0;
517 }
518 EXPORT_SYMBOL(__getnstimeofday64);
519
520 /**
521 * getnstimeofday64 - Returns the time of day in a timespec64.
522 * @ts: pointer to the timespec to be set
523 *
524 * Returns the time of day in a timespec (WARN if suspended).
525 */
526 void getnstimeofday64(struct timespec64 *ts)
527 {
528 WARN_ON(__getnstimeofday64(ts));
529 }
530 EXPORT_SYMBOL(getnstimeofday64);
531
532 ktime_t ktime_get(void)
533 {
534 struct timekeeper *tk = &tk_core.timekeeper;
535 unsigned int seq;
536 ktime_t base;
537 s64 nsecs;
538
539 WARN_ON(timekeeping_suspended);
540
541 do {
542 seq = read_seqcount_begin(&tk_core.seq);
543 base = tk->tkr.base_mono;
544 nsecs = timekeeping_get_ns(&tk->tkr);
545
546 } while (read_seqcount_retry(&tk_core.seq, seq));
547
548 return ktime_add_ns(base, nsecs);
549 }
550 EXPORT_SYMBOL_GPL(ktime_get);
551
552 static ktime_t *offsets[TK_OFFS_MAX] = {
553 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
554 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
555 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
556 };
557
558 ktime_t ktime_get_with_offset(enum tk_offsets offs)
559 {
560 struct timekeeper *tk = &tk_core.timekeeper;
561 unsigned int seq;
562 ktime_t base, *offset = offsets[offs];
563 s64 nsecs;
564
565 WARN_ON(timekeeping_suspended);
566
567 do {
568 seq = read_seqcount_begin(&tk_core.seq);
569 base = ktime_add(tk->tkr.base_mono, *offset);
570 nsecs = timekeeping_get_ns(&tk->tkr);
571
572 } while (read_seqcount_retry(&tk_core.seq, seq));
573
574 return ktime_add_ns(base, nsecs);
575
576 }
577 EXPORT_SYMBOL_GPL(ktime_get_with_offset);
578
579 /**
580 * ktime_mono_to_any() - convert mononotic time to any other time
581 * @tmono: time to convert.
582 * @offs: which offset to use
583 */
584 ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
585 {
586 ktime_t *offset = offsets[offs];
587 unsigned long seq;
588 ktime_t tconv;
589
590 do {
591 seq = read_seqcount_begin(&tk_core.seq);
592 tconv = ktime_add(tmono, *offset);
593 } while (read_seqcount_retry(&tk_core.seq, seq));
594
595 return tconv;
596 }
597 EXPORT_SYMBOL_GPL(ktime_mono_to_any);
598
599 /**
600 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
601 */
602 ktime_t ktime_get_raw(void)
603 {
604 struct timekeeper *tk = &tk_core.timekeeper;
605 unsigned int seq;
606 ktime_t base;
607 s64 nsecs;
608
609 do {
610 seq = read_seqcount_begin(&tk_core.seq);
611 base = tk->base_raw;
612 nsecs = timekeeping_get_ns_raw(tk);
613
614 } while (read_seqcount_retry(&tk_core.seq, seq));
615
616 return ktime_add_ns(base, nsecs);
617 }
618 EXPORT_SYMBOL_GPL(ktime_get_raw);
619
620 /**
621 * ktime_get_ts64 - get the monotonic clock in timespec64 format
622 * @ts: pointer to timespec variable
623 *
624 * The function calculates the monotonic clock from the realtime
625 * clock and the wall_to_monotonic offset and stores the result
626 * in normalized timespec format in the variable pointed to by @ts.
627 */
628 void ktime_get_ts64(struct timespec64 *ts)
629 {
630 struct timekeeper *tk = &tk_core.timekeeper;
631 struct timespec64 tomono;
632 s64 nsec;
633 unsigned int seq;
634
635 WARN_ON(timekeeping_suspended);
636
637 do {
638 seq = read_seqcount_begin(&tk_core.seq);
639 ts->tv_sec = tk->xtime_sec;
640 nsec = timekeeping_get_ns(&tk->tkr);
641 tomono = tk->wall_to_monotonic;
642
643 } while (read_seqcount_retry(&tk_core.seq, seq));
644
645 ts->tv_sec += tomono.tv_sec;
646 ts->tv_nsec = 0;
647 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
648 }
649 EXPORT_SYMBOL_GPL(ktime_get_ts64);
650
651 #ifdef CONFIG_NTP_PPS
652
653 /**
654 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
655 * @ts_raw: pointer to the timespec to be set to raw monotonic time
656 * @ts_real: pointer to the timespec to be set to the time of day
657 *
658 * This function reads both the time of day and raw monotonic time at the
659 * same time atomically and stores the resulting timestamps in timespec
660 * format.
661 */
662 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
663 {
664 struct timekeeper *tk = &tk_core.timekeeper;
665 unsigned long seq;
666 s64 nsecs_raw, nsecs_real;
667
668 WARN_ON_ONCE(timekeeping_suspended);
669
670 do {
671 seq = read_seqcount_begin(&tk_core.seq);
672
673 *ts_raw = timespec64_to_timespec(tk->raw_time);
674 ts_real->tv_sec = tk->xtime_sec;
675 ts_real->tv_nsec = 0;
676
677 nsecs_raw = timekeeping_get_ns_raw(tk);
678 nsecs_real = timekeeping_get_ns(&tk->tkr);
679
680 } while (read_seqcount_retry(&tk_core.seq, seq));
681
682 timespec_add_ns(ts_raw, nsecs_raw);
683 timespec_add_ns(ts_real, nsecs_real);
684 }
685 EXPORT_SYMBOL(getnstime_raw_and_real);
686
687 #endif /* CONFIG_NTP_PPS */
688
689 /**
690 * do_gettimeofday - Returns the time of day in a timeval
691 * @tv: pointer to the timeval to be set
692 *
693 * NOTE: Users should be converted to using getnstimeofday()
694 */
695 void do_gettimeofday(struct timeval *tv)
696 {
697 struct timespec64 now;
698
699 getnstimeofday64(&now);
700 tv->tv_sec = now.tv_sec;
701 tv->tv_usec = now.tv_nsec/1000;
702 }
703 EXPORT_SYMBOL(do_gettimeofday);
704
705 /**
706 * do_settimeofday64 - Sets the time of day.
707 * @ts: pointer to the timespec64 variable containing the new time
708 *
709 * Sets the time of day to the new time and update NTP and notify hrtimers
710 */
711 int do_settimeofday64(const struct timespec64 *ts)
712 {
713 struct timekeeper *tk = &tk_core.timekeeper;
714 struct timespec64 ts_delta, xt;
715 unsigned long flags;
716
717 if (!timespec64_valid_strict(ts))
718 return -EINVAL;
719
720 raw_spin_lock_irqsave(&timekeeper_lock, flags);
721 write_seqcount_begin(&tk_core.seq);
722
723 timekeeping_forward_now(tk);
724
725 xt = tk_xtime(tk);
726 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
727 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
728
729 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
730
731 tk_set_xtime(tk, ts);
732
733 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
734
735 write_seqcount_end(&tk_core.seq);
736 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
737
738 /* signal hrtimers about time change */
739 clock_was_set();
740
741 return 0;
742 }
743 EXPORT_SYMBOL(do_settimeofday64);
744
745 /**
746 * timekeeping_inject_offset - Adds or subtracts from the current time.
747 * @tv: pointer to the timespec variable containing the offset
748 *
749 * Adds or subtracts an offset value from the current time.
750 */
751 int timekeeping_inject_offset(struct timespec *ts)
752 {
753 struct timekeeper *tk = &tk_core.timekeeper;
754 unsigned long flags;
755 struct timespec64 ts64, tmp;
756 int ret = 0;
757
758 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
759 return -EINVAL;
760
761 ts64 = timespec_to_timespec64(*ts);
762
763 raw_spin_lock_irqsave(&timekeeper_lock, flags);
764 write_seqcount_begin(&tk_core.seq);
765
766 timekeeping_forward_now(tk);
767
768 /* Make sure the proposed value is valid */
769 tmp = timespec64_add(tk_xtime(tk), ts64);
770 if (!timespec64_valid_strict(&tmp)) {
771 ret = -EINVAL;
772 goto error;
773 }
774
775 tk_xtime_add(tk, &ts64);
776 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
777
778 error: /* even if we error out, we forwarded the time, so call update */
779 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
780
781 write_seqcount_end(&tk_core.seq);
782 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
783
784 /* signal hrtimers about time change */
785 clock_was_set();
786
787 return ret;
788 }
789 EXPORT_SYMBOL(timekeeping_inject_offset);
790
791
792 /**
793 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
794 *
795 */
796 s32 timekeeping_get_tai_offset(void)
797 {
798 struct timekeeper *tk = &tk_core.timekeeper;
799 unsigned int seq;
800 s32 ret;
801
802 do {
803 seq = read_seqcount_begin(&tk_core.seq);
804 ret = tk->tai_offset;
805 } while (read_seqcount_retry(&tk_core.seq, seq));
806
807 return ret;
808 }
809
810 /**
811 * __timekeeping_set_tai_offset - Lock free worker function
812 *
813 */
814 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
815 {
816 tk->tai_offset = tai_offset;
817 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
818 }
819
820 /**
821 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
822 *
823 */
824 void timekeeping_set_tai_offset(s32 tai_offset)
825 {
826 struct timekeeper *tk = &tk_core.timekeeper;
827 unsigned long flags;
828
829 raw_spin_lock_irqsave(&timekeeper_lock, flags);
830 write_seqcount_begin(&tk_core.seq);
831 __timekeeping_set_tai_offset(tk, tai_offset);
832 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
833 write_seqcount_end(&tk_core.seq);
834 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
835 clock_was_set();
836 }
837
838 /**
839 * change_clocksource - Swaps clocksources if a new one is available
840 *
841 * Accumulates current time interval and initializes new clocksource
842 */
843 static int change_clocksource(void *data)
844 {
845 struct timekeeper *tk = &tk_core.timekeeper;
846 struct clocksource *new, *old;
847 unsigned long flags;
848
849 new = (struct clocksource *) data;
850
851 raw_spin_lock_irqsave(&timekeeper_lock, flags);
852 write_seqcount_begin(&tk_core.seq);
853
854 timekeeping_forward_now(tk);
855 /*
856 * If the cs is in module, get a module reference. Succeeds
857 * for built-in code (owner == NULL) as well.
858 */
859 if (try_module_get(new->owner)) {
860 if (!new->enable || new->enable(new) == 0) {
861 old = tk->tkr.clock;
862 tk_setup_internals(tk, new);
863 if (old->disable)
864 old->disable(old);
865 module_put(old->owner);
866 } else {
867 module_put(new->owner);
868 }
869 }
870 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
871
872 write_seqcount_end(&tk_core.seq);
873 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
874
875 return 0;
876 }
877
878 /**
879 * timekeeping_notify - Install a new clock source
880 * @clock: pointer to the clock source
881 *
882 * This function is called from clocksource.c after a new, better clock
883 * source has been registered. The caller holds the clocksource_mutex.
884 */
885 int timekeeping_notify(struct clocksource *clock)
886 {
887 struct timekeeper *tk = &tk_core.timekeeper;
888
889 if (tk->tkr.clock == clock)
890 return 0;
891 stop_machine(change_clocksource, clock, NULL);
892 tick_clock_notify();
893 return tk->tkr.clock == clock ? 0 : -1;
894 }
895
896 /**
897 * getrawmonotonic - Returns the raw monotonic time in a timespec
898 * @ts: pointer to the timespec to be set
899 *
900 * Returns the raw monotonic time (completely un-modified by ntp)
901 */
902 void getrawmonotonic(struct timespec *ts)
903 {
904 struct timekeeper *tk = &tk_core.timekeeper;
905 struct timespec64 ts64;
906 unsigned long seq;
907 s64 nsecs;
908
909 do {
910 seq = read_seqcount_begin(&tk_core.seq);
911 nsecs = timekeeping_get_ns_raw(tk);
912 ts64 = tk->raw_time;
913
914 } while (read_seqcount_retry(&tk_core.seq, seq));
915
916 timespec64_add_ns(&ts64, nsecs);
917 *ts = timespec64_to_timespec(ts64);
918 }
919 EXPORT_SYMBOL(getrawmonotonic);
920
921 /**
922 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
923 */
924 int timekeeping_valid_for_hres(void)
925 {
926 struct timekeeper *tk = &tk_core.timekeeper;
927 unsigned long seq;
928 int ret;
929
930 do {
931 seq = read_seqcount_begin(&tk_core.seq);
932
933 ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
934
935 } while (read_seqcount_retry(&tk_core.seq, seq));
936
937 return ret;
938 }
939
940 /**
941 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
942 */
943 u64 timekeeping_max_deferment(void)
944 {
945 struct timekeeper *tk = &tk_core.timekeeper;
946 unsigned long seq;
947 u64 ret;
948
949 do {
950 seq = read_seqcount_begin(&tk_core.seq);
951
952 ret = tk->tkr.clock->max_idle_ns;
953
954 } while (read_seqcount_retry(&tk_core.seq, seq));
955
956 return ret;
957 }
958
959 /**
960 * read_persistent_clock - Return time from the persistent clock.
961 *
962 * Weak dummy function for arches that do not yet support it.
963 * Reads the time from the battery backed persistent clock.
964 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
965 *
966 * XXX - Do be sure to remove it once all arches implement it.
967 */
968 void __weak read_persistent_clock(struct timespec *ts)
969 {
970 ts->tv_sec = 0;
971 ts->tv_nsec = 0;
972 }
973
974 /**
975 * read_boot_clock - Return time of the system start.
976 *
977 * Weak dummy function for arches that do not yet support it.
978 * Function to read the exact time the system has been started.
979 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
980 *
981 * XXX - Do be sure to remove it once all arches implement it.
982 */
983 void __weak read_boot_clock(struct timespec *ts)
984 {
985 ts->tv_sec = 0;
986 ts->tv_nsec = 0;
987 }
988
989 /*
990 * timekeeping_init - Initializes the clocksource and common timekeeping values
991 */
992 void __init timekeeping_init(void)
993 {
994 struct timekeeper *tk = &tk_core.timekeeper;
995 struct clocksource *clock;
996 unsigned long flags;
997 struct timespec64 now, boot, tmp;
998 struct timespec ts;
999
1000 read_persistent_clock(&ts);
1001 now = timespec_to_timespec64(ts);
1002 if (!timespec64_valid_strict(&now)) {
1003 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1004 " Check your CMOS/BIOS settings.\n");
1005 now.tv_sec = 0;
1006 now.tv_nsec = 0;
1007 } else if (now.tv_sec || now.tv_nsec)
1008 persistent_clock_exist = true;
1009
1010 read_boot_clock(&ts);
1011 boot = timespec_to_timespec64(ts);
1012 if (!timespec64_valid_strict(&boot)) {
1013 pr_warn("WARNING: Boot clock returned invalid value!\n"
1014 " Check your CMOS/BIOS settings.\n");
1015 boot.tv_sec = 0;
1016 boot.tv_nsec = 0;
1017 }
1018
1019 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1020 write_seqcount_begin(&tk_core.seq);
1021 ntp_init();
1022
1023 clock = clocksource_default_clock();
1024 if (clock->enable)
1025 clock->enable(clock);
1026 tk_setup_internals(tk, clock);
1027
1028 tk_set_xtime(tk, &now);
1029 tk->raw_time.tv_sec = 0;
1030 tk->raw_time.tv_nsec = 0;
1031 tk->base_raw.tv64 = 0;
1032 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1033 boot = tk_xtime(tk);
1034
1035 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1036 tk_set_wall_to_mono(tk, tmp);
1037
1038 timekeeping_update(tk, TK_MIRROR);
1039
1040 write_seqcount_end(&tk_core.seq);
1041 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1042 }
1043
1044 /* time in seconds when suspend began */
1045 static struct timespec64 timekeeping_suspend_time;
1046
1047 /**
1048 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1049 * @delta: pointer to a timespec delta value
1050 *
1051 * Takes a timespec offset measuring a suspend interval and properly
1052 * adds the sleep offset to the timekeeping variables.
1053 */
1054 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1055 struct timespec64 *delta)
1056 {
1057 if (!timespec64_valid_strict(delta)) {
1058 printk_deferred(KERN_WARNING
1059 "__timekeeping_inject_sleeptime: Invalid "
1060 "sleep delta value!\n");
1061 return;
1062 }
1063 tk_xtime_add(tk, delta);
1064 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1065 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1066 tk_debug_account_sleep_time(delta);
1067 }
1068
1069 /**
1070 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1071 * @delta: pointer to a timespec64 delta value
1072 *
1073 * This hook is for architectures that cannot support read_persistent_clock
1074 * because their RTC/persistent clock is only accessible when irqs are enabled.
1075 *
1076 * This function should only be called by rtc_resume(), and allows
1077 * a suspend offset to be injected into the timekeeping values.
1078 */
1079 void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1080 {
1081 struct timekeeper *tk = &tk_core.timekeeper;
1082 unsigned long flags;
1083
1084 /*
1085 * Make sure we don't set the clock twice, as timekeeping_resume()
1086 * already did it
1087 */
1088 if (has_persistent_clock())
1089 return;
1090
1091 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1092 write_seqcount_begin(&tk_core.seq);
1093
1094 timekeeping_forward_now(tk);
1095
1096 __timekeeping_inject_sleeptime(tk, delta);
1097
1098 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1099
1100 write_seqcount_end(&tk_core.seq);
1101 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1102
1103 /* signal hrtimers about time change */
1104 clock_was_set();
1105 }
1106
1107 /**
1108 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1109 *
1110 * This is for the generic clocksource timekeeping.
1111 * xtime/wall_to_monotonic/jiffies/etc are
1112 * still managed by arch specific suspend/resume code.
1113 */
1114 static void timekeeping_resume(void)
1115 {
1116 struct timekeeper *tk = &tk_core.timekeeper;
1117 struct clocksource *clock = tk->tkr.clock;
1118 unsigned long flags;
1119 struct timespec64 ts_new, ts_delta;
1120 struct timespec tmp;
1121 cycle_t cycle_now, cycle_delta;
1122 bool suspendtime_found = false;
1123
1124 read_persistent_clock(&tmp);
1125 ts_new = timespec_to_timespec64(tmp);
1126
1127 clockevents_resume();
1128 clocksource_resume();
1129
1130 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1131 write_seqcount_begin(&tk_core.seq);
1132
1133 /*
1134 * After system resumes, we need to calculate the suspended time and
1135 * compensate it for the OS time. There are 3 sources that could be
1136 * used: Nonstop clocksource during suspend, persistent clock and rtc
1137 * device.
1138 *
1139 * One specific platform may have 1 or 2 or all of them, and the
1140 * preference will be:
1141 * suspend-nonstop clocksource -> persistent clock -> rtc
1142 * The less preferred source will only be tried if there is no better
1143 * usable source. The rtc part is handled separately in rtc core code.
1144 */
1145 cycle_now = tk->tkr.read(clock);
1146 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1147 cycle_now > tk->tkr.cycle_last) {
1148 u64 num, max = ULLONG_MAX;
1149 u32 mult = clock->mult;
1150 u32 shift = clock->shift;
1151 s64 nsec = 0;
1152
1153 cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
1154 tk->tkr.mask);
1155
1156 /*
1157 * "cycle_delta * mutl" may cause 64 bits overflow, if the
1158 * suspended time is too long. In that case we need do the
1159 * 64 bits math carefully
1160 */
1161 do_div(max, mult);
1162 if (cycle_delta > max) {
1163 num = div64_u64(cycle_delta, max);
1164 nsec = (((u64) max * mult) >> shift) * num;
1165 cycle_delta -= num * max;
1166 }
1167 nsec += ((u64) cycle_delta * mult) >> shift;
1168
1169 ts_delta = ns_to_timespec64(nsec);
1170 suspendtime_found = true;
1171 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1172 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1173 suspendtime_found = true;
1174 }
1175
1176 if (suspendtime_found)
1177 __timekeeping_inject_sleeptime(tk, &ts_delta);
1178
1179 /* Re-base the last cycle value */
1180 tk->tkr.cycle_last = cycle_now;
1181 tk->ntp_error = 0;
1182 timekeeping_suspended = 0;
1183 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1184 write_seqcount_end(&tk_core.seq);
1185 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1186
1187 touch_softlockup_watchdog();
1188
1189 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
1190
1191 /* Resume hrtimers */
1192 hrtimers_resume();
1193 }
1194
1195 static int timekeeping_suspend(void)
1196 {
1197 struct timekeeper *tk = &tk_core.timekeeper;
1198 unsigned long flags;
1199 struct timespec64 delta, delta_delta;
1200 static struct timespec64 old_delta;
1201 struct timespec tmp;
1202
1203 read_persistent_clock(&tmp);
1204 timekeeping_suspend_time = timespec_to_timespec64(tmp);
1205
1206 /*
1207 * On some systems the persistent_clock can not be detected at
1208 * timekeeping_init by its return value, so if we see a valid
1209 * value returned, update the persistent_clock_exists flag.
1210 */
1211 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1212 persistent_clock_exist = true;
1213
1214 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1215 write_seqcount_begin(&tk_core.seq);
1216 timekeeping_forward_now(tk);
1217 timekeeping_suspended = 1;
1218
1219 /*
1220 * To avoid drift caused by repeated suspend/resumes,
1221 * which each can add ~1 second drift error,
1222 * try to compensate so the difference in system time
1223 * and persistent_clock time stays close to constant.
1224 */
1225 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1226 delta_delta = timespec64_sub(delta, old_delta);
1227 if (abs(delta_delta.tv_sec) >= 2) {
1228 /*
1229 * if delta_delta is too large, assume time correction
1230 * has occured and set old_delta to the current delta.
1231 */
1232 old_delta = delta;
1233 } else {
1234 /* Otherwise try to adjust old_system to compensate */
1235 timekeeping_suspend_time =
1236 timespec64_add(timekeeping_suspend_time, delta_delta);
1237 }
1238
1239 timekeeping_update(tk, TK_MIRROR);
1240 write_seqcount_end(&tk_core.seq);
1241 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1242
1243 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1244 clocksource_suspend();
1245 clockevents_suspend();
1246
1247 return 0;
1248 }
1249
1250 /* sysfs resume/suspend bits for timekeeping */
1251 static struct syscore_ops timekeeping_syscore_ops = {
1252 .resume = timekeeping_resume,
1253 .suspend = timekeeping_suspend,
1254 };
1255
1256 static int __init timekeeping_init_ops(void)
1257 {
1258 register_syscore_ops(&timekeeping_syscore_ops);
1259 return 0;
1260 }
1261 device_initcall(timekeeping_init_ops);
1262
1263 /*
1264 * Apply a multiplier adjustment to the timekeeper
1265 */
1266 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1267 s64 offset,
1268 bool negative,
1269 int adj_scale)
1270 {
1271 s64 interval = tk->cycle_interval;
1272 s32 mult_adj = 1;
1273
1274 if (negative) {
1275 mult_adj = -mult_adj;
1276 interval = -interval;
1277 offset = -offset;
1278 }
1279 mult_adj <<= adj_scale;
1280 interval <<= adj_scale;
1281 offset <<= adj_scale;
1282
1283 /*
1284 * So the following can be confusing.
1285 *
1286 * To keep things simple, lets assume mult_adj == 1 for now.
1287 *
1288 * When mult_adj != 1, remember that the interval and offset values
1289 * have been appropriately scaled so the math is the same.
1290 *
1291 * The basic idea here is that we're increasing the multiplier
1292 * by one, this causes the xtime_interval to be incremented by
1293 * one cycle_interval. This is because:
1294 * xtime_interval = cycle_interval * mult
1295 * So if mult is being incremented by one:
1296 * xtime_interval = cycle_interval * (mult + 1)
1297 * Its the same as:
1298 * xtime_interval = (cycle_interval * mult) + cycle_interval
1299 * Which can be shortened to:
1300 * xtime_interval += cycle_interval
1301 *
1302 * So offset stores the non-accumulated cycles. Thus the current
1303 * time (in shifted nanoseconds) is:
1304 * now = (offset * adj) + xtime_nsec
1305 * Now, even though we're adjusting the clock frequency, we have
1306 * to keep time consistent. In other words, we can't jump back
1307 * in time, and we also want to avoid jumping forward in time.
1308 *
1309 * So given the same offset value, we need the time to be the same
1310 * both before and after the freq adjustment.
1311 * now = (offset * adj_1) + xtime_nsec_1
1312 * now = (offset * adj_2) + xtime_nsec_2
1313 * So:
1314 * (offset * adj_1) + xtime_nsec_1 =
1315 * (offset * adj_2) + xtime_nsec_2
1316 * And we know:
1317 * adj_2 = adj_1 + 1
1318 * So:
1319 * (offset * adj_1) + xtime_nsec_1 =
1320 * (offset * (adj_1+1)) + xtime_nsec_2
1321 * (offset * adj_1) + xtime_nsec_1 =
1322 * (offset * adj_1) + offset + xtime_nsec_2
1323 * Canceling the sides:
1324 * xtime_nsec_1 = offset + xtime_nsec_2
1325 * Which gives us:
1326 * xtime_nsec_2 = xtime_nsec_1 - offset
1327 * Which simplfies to:
1328 * xtime_nsec -= offset
1329 *
1330 * XXX - TODO: Doc ntp_error calculation.
1331 */
1332 if (tk->tkr.mult + mult_adj < mult_adj) {
1333 /* NTP adjustment caused clocksource mult overflow */
1334 WARN_ON_ONCE(1);
1335 return;
1336 }
1337
1338 tk->tkr.mult += mult_adj;
1339 tk->xtime_interval += interval;
1340 tk->tkr.xtime_nsec -= offset;
1341 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1342 }
1343
1344 /*
1345 * Calculate the multiplier adjustment needed to match the frequency
1346 * specified by NTP
1347 */
1348 static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1349 s64 offset)
1350 {
1351 s64 interval = tk->cycle_interval;
1352 s64 xinterval = tk->xtime_interval;
1353 s64 tick_error;
1354 bool negative;
1355 u32 adj;
1356
1357 /* Remove any current error adj from freq calculation */
1358 if (tk->ntp_err_mult)
1359 xinterval -= tk->cycle_interval;
1360
1361 tk->ntp_tick = ntp_tick_length();
1362
1363 /* Calculate current error per tick */
1364 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1365 tick_error -= (xinterval + tk->xtime_remainder);
1366
1367 /* Don't worry about correcting it if its small */
1368 if (likely((tick_error >= 0) && (tick_error <= interval)))
1369 return;
1370
1371 /* preserve the direction of correction */
1372 negative = (tick_error < 0);
1373
1374 /* Sort out the magnitude of the correction */
1375 tick_error = abs(tick_error);
1376 for (adj = 0; tick_error > interval; adj++)
1377 tick_error >>= 1;
1378
1379 /* scale the corrections */
1380 timekeeping_apply_adjustment(tk, offset, negative, adj);
1381 }
1382
1383 /*
1384 * Adjust the timekeeper's multiplier to the correct frequency
1385 * and also to reduce the accumulated error value.
1386 */
1387 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1388 {
1389 /* Correct for the current frequency error */
1390 timekeeping_freqadjust(tk, offset);
1391
1392 /* Next make a small adjustment to fix any cumulative error */
1393 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1394 tk->ntp_err_mult = 1;
1395 timekeeping_apply_adjustment(tk, offset, 0, 0);
1396 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1397 /* Undo any existing error adjustment */
1398 timekeeping_apply_adjustment(tk, offset, 1, 0);
1399 tk->ntp_err_mult = 0;
1400 }
1401
1402 if (unlikely(tk->tkr.clock->maxadj &&
1403 (abs(tk->tkr.mult - tk->tkr.clock->mult)
1404 > tk->tkr.clock->maxadj))) {
1405 printk_once(KERN_WARNING
1406 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1407 tk->tkr.clock->name, (long)tk->tkr.mult,
1408 (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
1409 }
1410
1411 /*
1412 * It may be possible that when we entered this function, xtime_nsec
1413 * was very small. Further, if we're slightly speeding the clocksource
1414 * in the code above, its possible the required corrective factor to
1415 * xtime_nsec could cause it to underflow.
1416 *
1417 * Now, since we already accumulated the second, cannot simply roll
1418 * the accumulated second back, since the NTP subsystem has been
1419 * notified via second_overflow. So instead we push xtime_nsec forward
1420 * by the amount we underflowed, and add that amount into the error.
1421 *
1422 * We'll correct this error next time through this function, when
1423 * xtime_nsec is not as small.
1424 */
1425 if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
1426 s64 neg = -(s64)tk->tkr.xtime_nsec;
1427 tk->tkr.xtime_nsec = 0;
1428 tk->ntp_error += neg << tk->ntp_error_shift;
1429 }
1430 }
1431
1432 /**
1433 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1434 *
1435 * Helper function that accumulates a the nsecs greater then a second
1436 * from the xtime_nsec field to the xtime_secs field.
1437 * It also calls into the NTP code to handle leapsecond processing.
1438 *
1439 */
1440 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1441 {
1442 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
1443 unsigned int clock_set = 0;
1444
1445 while (tk->tkr.xtime_nsec >= nsecps) {
1446 int leap;
1447
1448 tk->tkr.xtime_nsec -= nsecps;
1449 tk->xtime_sec++;
1450
1451 /* Figure out if its a leap sec and apply if needed */
1452 leap = second_overflow(tk->xtime_sec);
1453 if (unlikely(leap)) {
1454 struct timespec64 ts;
1455
1456 tk->xtime_sec += leap;
1457
1458 ts.tv_sec = leap;
1459 ts.tv_nsec = 0;
1460 tk_set_wall_to_mono(tk,
1461 timespec64_sub(tk->wall_to_monotonic, ts));
1462
1463 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1464
1465 clock_set = TK_CLOCK_WAS_SET;
1466 }
1467 }
1468 return clock_set;
1469 }
1470
1471 /**
1472 * logarithmic_accumulation - shifted accumulation of cycles
1473 *
1474 * This functions accumulates a shifted interval of cycles into
1475 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1476 * loop.
1477 *
1478 * Returns the unconsumed cycles.
1479 */
1480 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1481 u32 shift,
1482 unsigned int *clock_set)
1483 {
1484 cycle_t interval = tk->cycle_interval << shift;
1485 u64 raw_nsecs;
1486
1487 /* If the offset is smaller then a shifted interval, do nothing */
1488 if (offset < interval)
1489 return offset;
1490
1491 /* Accumulate one shifted interval */
1492 offset -= interval;
1493 tk->tkr.cycle_last += interval;
1494
1495 tk->tkr.xtime_nsec += tk->xtime_interval << shift;
1496 *clock_set |= accumulate_nsecs_to_secs(tk);
1497
1498 /* Accumulate raw time */
1499 raw_nsecs = (u64)tk->raw_interval << shift;
1500 raw_nsecs += tk->raw_time.tv_nsec;
1501 if (raw_nsecs >= NSEC_PER_SEC) {
1502 u64 raw_secs = raw_nsecs;
1503 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1504 tk->raw_time.tv_sec += raw_secs;
1505 }
1506 tk->raw_time.tv_nsec = raw_nsecs;
1507
1508 /* Accumulate error between NTP and clock interval */
1509 tk->ntp_error += tk->ntp_tick << shift;
1510 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1511 (tk->ntp_error_shift + shift);
1512
1513 return offset;
1514 }
1515
1516 /**
1517 * update_wall_time - Uses the current clocksource to increment the wall time
1518 *
1519 */
1520 void update_wall_time(void)
1521 {
1522 struct timekeeper *real_tk = &tk_core.timekeeper;
1523 struct timekeeper *tk = &shadow_timekeeper;
1524 cycle_t offset;
1525 int shift = 0, maxshift;
1526 unsigned int clock_set = 0;
1527 unsigned long flags;
1528
1529 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1530
1531 /* Make sure we're fully resumed: */
1532 if (unlikely(timekeeping_suspended))
1533 goto out;
1534
1535 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1536 offset = real_tk->cycle_interval;
1537 #else
1538 offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
1539 tk->tkr.cycle_last, tk->tkr.mask);
1540 #endif
1541
1542 /* Check if there's really nothing to do */
1543 if (offset < real_tk->cycle_interval)
1544 goto out;
1545
1546 /*
1547 * With NO_HZ we may have to accumulate many cycle_intervals
1548 * (think "ticks") worth of time at once. To do this efficiently,
1549 * we calculate the largest doubling multiple of cycle_intervals
1550 * that is smaller than the offset. We then accumulate that
1551 * chunk in one go, and then try to consume the next smaller
1552 * doubled multiple.
1553 */
1554 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1555 shift = max(0, shift);
1556 /* Bound shift to one less than what overflows tick_length */
1557 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1558 shift = min(shift, maxshift);
1559 while (offset >= tk->cycle_interval) {
1560 offset = logarithmic_accumulation(tk, offset, shift,
1561 &clock_set);
1562 if (offset < tk->cycle_interval<<shift)
1563 shift--;
1564 }
1565
1566 /* correct the clock when NTP error is too big */
1567 timekeeping_adjust(tk, offset);
1568
1569 /*
1570 * XXX This can be killed once everyone converts
1571 * to the new update_vsyscall.
1572 */
1573 old_vsyscall_fixup(tk);
1574
1575 /*
1576 * Finally, make sure that after the rounding
1577 * xtime_nsec isn't larger than NSEC_PER_SEC
1578 */
1579 clock_set |= accumulate_nsecs_to_secs(tk);
1580
1581 write_seqcount_begin(&tk_core.seq);
1582 /*
1583 * Update the real timekeeper.
1584 *
1585 * We could avoid this memcpy by switching pointers, but that
1586 * requires changes to all other timekeeper usage sites as
1587 * well, i.e. move the timekeeper pointer getter into the
1588 * spinlocked/seqcount protected sections. And we trade this
1589 * memcpy under the tk_core.seq against one before we start
1590 * updating.
1591 */
1592 memcpy(real_tk, tk, sizeof(*tk));
1593 timekeeping_update(real_tk, clock_set);
1594 write_seqcount_end(&tk_core.seq);
1595 out:
1596 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1597 if (clock_set)
1598 /* Have to call _delayed version, since in irq context*/
1599 clock_was_set_delayed();
1600 }
1601
1602 /**
1603 * getboottime - Return the real time of system boot.
1604 * @ts: pointer to the timespec to be set
1605 *
1606 * Returns the wall-time of boot in a timespec.
1607 *
1608 * This is based on the wall_to_monotonic offset and the total suspend
1609 * time. Calls to settimeofday will affect the value returned (which
1610 * basically means that however wrong your real time clock is at boot time,
1611 * you get the right time here).
1612 */
1613 void getboottime(struct timespec *ts)
1614 {
1615 struct timekeeper *tk = &tk_core.timekeeper;
1616 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1617
1618 *ts = ktime_to_timespec(t);
1619 }
1620 EXPORT_SYMBOL_GPL(getboottime);
1621
1622 unsigned long get_seconds(void)
1623 {
1624 struct timekeeper *tk = &tk_core.timekeeper;
1625
1626 return tk->xtime_sec;
1627 }
1628 EXPORT_SYMBOL(get_seconds);
1629
1630 struct timespec __current_kernel_time(void)
1631 {
1632 struct timekeeper *tk = &tk_core.timekeeper;
1633
1634 return timespec64_to_timespec(tk_xtime(tk));
1635 }
1636
1637 struct timespec current_kernel_time(void)
1638 {
1639 struct timekeeper *tk = &tk_core.timekeeper;
1640 struct timespec64 now;
1641 unsigned long seq;
1642
1643 do {
1644 seq = read_seqcount_begin(&tk_core.seq);
1645
1646 now = tk_xtime(tk);
1647 } while (read_seqcount_retry(&tk_core.seq, seq));
1648
1649 return timespec64_to_timespec(now);
1650 }
1651 EXPORT_SYMBOL(current_kernel_time);
1652
1653 struct timespec get_monotonic_coarse(void)
1654 {
1655 struct timekeeper *tk = &tk_core.timekeeper;
1656 struct timespec64 now, mono;
1657 unsigned long seq;
1658
1659 do {
1660 seq = read_seqcount_begin(&tk_core.seq);
1661
1662 now = tk_xtime(tk);
1663 mono = tk->wall_to_monotonic;
1664 } while (read_seqcount_retry(&tk_core.seq, seq));
1665
1666 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1667 now.tv_nsec + mono.tv_nsec);
1668
1669 return timespec64_to_timespec(now);
1670 }
1671
1672 /*
1673 * Must hold jiffies_lock
1674 */
1675 void do_timer(unsigned long ticks)
1676 {
1677 jiffies_64 += ticks;
1678 calc_global_load(ticks);
1679 }
1680
1681 /**
1682 * ktime_get_update_offsets_tick - hrtimer helper
1683 * @offs_real: pointer to storage for monotonic -> realtime offset
1684 * @offs_boot: pointer to storage for monotonic -> boottime offset
1685 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1686 *
1687 * Returns monotonic time at last tick and various offsets
1688 */
1689 ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1690 ktime_t *offs_tai)
1691 {
1692 struct timekeeper *tk = &tk_core.timekeeper;
1693 unsigned int seq;
1694 ktime_t base;
1695 u64 nsecs;
1696
1697 do {
1698 seq = read_seqcount_begin(&tk_core.seq);
1699
1700 base = tk->tkr.base_mono;
1701 nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
1702
1703 *offs_real = tk->offs_real;
1704 *offs_boot = tk->offs_boot;
1705 *offs_tai = tk->offs_tai;
1706 } while (read_seqcount_retry(&tk_core.seq, seq));
1707
1708 return ktime_add_ns(base, nsecs);
1709 }
1710
1711 #ifdef CONFIG_HIGH_RES_TIMERS
1712 /**
1713 * ktime_get_update_offsets_now - hrtimer helper
1714 * @offs_real: pointer to storage for monotonic -> realtime offset
1715 * @offs_boot: pointer to storage for monotonic -> boottime offset
1716 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1717 *
1718 * Returns current monotonic time and updates the offsets
1719 * Called from hrtimer_interrupt() or retrigger_next_event()
1720 */
1721 ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1722 ktime_t *offs_tai)
1723 {
1724 struct timekeeper *tk = &tk_core.timekeeper;
1725 unsigned int seq;
1726 ktime_t base;
1727 u64 nsecs;
1728
1729 do {
1730 seq = read_seqcount_begin(&tk_core.seq);
1731
1732 base = tk->tkr.base_mono;
1733 nsecs = timekeeping_get_ns(&tk->tkr);
1734
1735 *offs_real = tk->offs_real;
1736 *offs_boot = tk->offs_boot;
1737 *offs_tai = tk->offs_tai;
1738 } while (read_seqcount_retry(&tk_core.seq, seq));
1739
1740 return ktime_add_ns(base, nsecs);
1741 }
1742 #endif
1743
1744 /**
1745 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1746 */
1747 int do_adjtimex(struct timex *txc)
1748 {
1749 struct timekeeper *tk = &tk_core.timekeeper;
1750 unsigned long flags;
1751 struct timespec64 ts;
1752 s32 orig_tai, tai;
1753 int ret;
1754
1755 /* Validate the data before disabling interrupts */
1756 ret = ntp_validate_timex(txc);
1757 if (ret)
1758 return ret;
1759
1760 if (txc->modes & ADJ_SETOFFSET) {
1761 struct timespec delta;
1762 delta.tv_sec = txc->time.tv_sec;
1763 delta.tv_nsec = txc->time.tv_usec;
1764 if (!(txc->modes & ADJ_NANO))
1765 delta.tv_nsec *= 1000;
1766 ret = timekeeping_inject_offset(&delta);
1767 if (ret)
1768 return ret;
1769 }
1770
1771 getnstimeofday64(&ts);
1772
1773 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1774 write_seqcount_begin(&tk_core.seq);
1775
1776 orig_tai = tai = tk->tai_offset;
1777 ret = __do_adjtimex(txc, &ts, &tai);
1778
1779 if (tai != orig_tai) {
1780 __timekeeping_set_tai_offset(tk, tai);
1781 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1782 }
1783 write_seqcount_end(&tk_core.seq);
1784 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1785
1786 if (tai != orig_tai)
1787 clock_was_set();
1788
1789 ntp_notify_cmos_timer();
1790
1791 return ret;
1792 }
1793
1794 #ifdef CONFIG_NTP_PPS
1795 /**
1796 * hardpps() - Accessor function to NTP __hardpps function
1797 */
1798 void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1799 {
1800 unsigned long flags;
1801
1802 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1803 write_seqcount_begin(&tk_core.seq);
1804
1805 __hardpps(phase_ts, raw_ts);
1806
1807 write_seqcount_end(&tk_core.seq);
1808 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1809 }
1810 EXPORT_SYMBOL(hardpps);
1811 #endif
1812
1813 /**
1814 * xtime_update() - advances the timekeeping infrastructure
1815 * @ticks: number of ticks, that have elapsed since the last call.
1816 *
1817 * Must be called with interrupts disabled.
1818 */
1819 void xtime_update(unsigned long ticks)
1820 {
1821 write_seqlock(&jiffies_lock);
1822 do_timer(ticks);
1823 write_sequnlock(&jiffies_lock);
1824 update_wall_time();
1825 }
This page took 0.079499 seconds and 4 git commands to generate.