Commit | Line | Data |
---|---|---|
112f38a4 RK |
1 | /* |
2 | * sched_clock.c: support for extending counters to full 64-bit ns counter | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | #include <linux/clocksource.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/jiffies.h> | |
a08ca5d1 | 11 | #include <linux/ktime.h> |
112f38a4 | 12 | #include <linux/kernel.h> |
a42c3629 | 13 | #include <linux/moduleparam.h> |
112f38a4 | 14 | #include <linux/sched.h> |
f153d017 | 15 | #include <linux/syscore_ops.h> |
a08ca5d1 | 16 | #include <linux/hrtimer.h> |
38ff87f7 | 17 | #include <linux/sched_clock.h> |
85c3d2dd | 18 | #include <linux/seqlock.h> |
112f38a4 | 19 | |
2f0778af | 20 | struct clock_data { |
a08ca5d1 | 21 | ktime_t wrap_kt; |
2f0778af MZ |
22 | u64 epoch_ns; |
23 | u32 epoch_cyc; | |
85c3d2dd | 24 | seqcount_t seq; |
c115739d | 25 | unsigned long rate; |
2f0778af MZ |
26 | u32 mult; |
27 | u32 shift; | |
237ec6f2 | 28 | bool suspended; |
2f0778af MZ |
29 | }; |
30 | ||
a08ca5d1 | 31 | static struct hrtimer sched_clock_timer; |
a42c3629 RK |
32 | static int irqtime = -1; |
33 | ||
34 | core_param(irqtime, irqtime, int, 0400); | |
2f0778af MZ |
35 | |
36 | static struct clock_data cd = { | |
37 | .mult = NSEC_PER_SEC / HZ, | |
38 | }; | |
39 | ||
40 | static u32 __read_mostly sched_clock_mask = 0xffffffff; | |
41 | ||
42 | static u32 notrace jiffy_sched_clock_read(void) | |
43 | { | |
44 | return (u32)(jiffies - INITIAL_JIFFIES); | |
45 | } | |
46 | ||
47 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; | |
48 | ||
cea15092 | 49 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
2f0778af MZ |
50 | { |
51 | return (cyc * mult) >> shift; | |
52 | } | |
53 | ||
336ae118 | 54 | static unsigned long long notrace sched_clock_32(void) |
2f0778af MZ |
55 | { |
56 | u64 epoch_ns; | |
57 | u32 epoch_cyc; | |
336ae118 | 58 | u32 cyc; |
85c3d2dd | 59 | unsigned long seq; |
336ae118 SB |
60 | |
61 | if (cd.suspended) | |
62 | return cd.epoch_ns; | |
2f0778af | 63 | |
2f0778af | 64 | do { |
85c3d2dd | 65 | seq = read_seqcount_begin(&cd.seq); |
2f0778af | 66 | epoch_cyc = cd.epoch_cyc; |
2f0778af | 67 | epoch_ns = cd.epoch_ns; |
85c3d2dd | 68 | } while (read_seqcount_retry(&cd.seq, seq)); |
2f0778af | 69 | |
336ae118 SB |
70 | cyc = read_sched_clock(); |
71 | cyc = (cyc - epoch_cyc) & sched_clock_mask; | |
72 | return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); | |
2f0778af MZ |
73 | } |
74 | ||
75 | /* | |
76 | * Atomically update the sched_clock epoch. | |
77 | */ | |
78 | static void notrace update_sched_clock(void) | |
79 | { | |
80 | unsigned long flags; | |
81 | u32 cyc; | |
82 | u64 ns; | |
83 | ||
84 | cyc = read_sched_clock(); | |
85 | ns = cd.epoch_ns + | |
86 | cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, | |
87 | cd.mult, cd.shift); | |
85c3d2dd | 88 | |
2f0778af | 89 | raw_local_irq_save(flags); |
85c3d2dd | 90 | write_seqcount_begin(&cd.seq); |
2f0778af | 91 | cd.epoch_ns = ns; |
7c4e9ced | 92 | cd.epoch_cyc = cyc; |
85c3d2dd | 93 | write_seqcount_end(&cd.seq); |
2f0778af MZ |
94 | raw_local_irq_restore(flags); |
95 | } | |
112f38a4 | 96 | |
a08ca5d1 | 97 | static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) |
112f38a4 | 98 | { |
2f0778af | 99 | update_sched_clock(); |
a08ca5d1 SB |
100 | hrtimer_forward_now(hrt, cd.wrap_kt); |
101 | return HRTIMER_RESTART; | |
112f38a4 RK |
102 | } |
103 | ||
2f0778af | 104 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) |
112f38a4 | 105 | { |
a08ca5d1 | 106 | unsigned long r; |
112f38a4 RK |
107 | u64 res, wrap; |
108 | char r_unit; | |
109 | ||
c115739d RH |
110 | if (cd.rate > rate) |
111 | return; | |
112 | ||
2f0778af MZ |
113 | BUG_ON(bits > 32); |
114 | WARN_ON(!irqs_disabled()); | |
2f0778af MZ |
115 | read_sched_clock = read; |
116 | sched_clock_mask = (1 << bits) - 1; | |
c115739d | 117 | cd.rate = rate; |
112f38a4 RK |
118 | |
119 | /* calculate the mult/shift to convert counter ticks to ns. */ | |
2f0778af | 120 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); |
112f38a4 RK |
121 | |
122 | r = rate; | |
123 | if (r >= 4000000) { | |
124 | r /= 1000000; | |
125 | r_unit = 'M'; | |
2f0778af | 126 | } else if (r >= 1000) { |
112f38a4 RK |
127 | r /= 1000; |
128 | r_unit = 'k'; | |
2f0778af MZ |
129 | } else |
130 | r_unit = ' '; | |
112f38a4 RK |
131 | |
132 | /* calculate how many ns until we wrap */ | |
2f0778af | 133 | wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); |
a08ca5d1 | 134 | cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); |
112f38a4 RK |
135 | |
136 | /* calculate the ns resolution of this counter */ | |
2f0778af | 137 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); |
a08ca5d1 SB |
138 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", |
139 | bits, r, r_unit, res, wrap); | |
112f38a4 | 140 | |
2f0778af | 141 | update_sched_clock(); |
112f38a4 RK |
142 | |
143 | /* | |
144 | * Ensure that sched_clock() starts off at 0ns | |
145 | */ | |
2f0778af MZ |
146 | cd.epoch_ns = 0; |
147 | ||
a42c3629 RK |
148 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ |
149 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) | |
150 | enable_sched_clock_irqtime(); | |
151 | ||
2f0778af MZ |
152 | pr_debug("Registered %pF as sched_clock source\n", read); |
153 | } | |
154 | ||
7e48c0b9 RH |
155 | unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; |
156 | ||
157 | unsigned long long notrace sched_clock(void) | |
158 | { | |
159 | return sched_clock_func(); | |
160 | } | |
161 | ||
211baa70 RK |
162 | void __init sched_clock_postinit(void) |
163 | { | |
2f0778af MZ |
164 | /* |
165 | * If no sched_clock function has been provided at that point, | |
166 | * make it the final one one. | |
167 | */ | |
168 | if (read_sched_clock == jiffy_sched_clock_read) | |
169 | setup_sched_clock(jiffy_sched_clock_read, 32, HZ); | |
170 | ||
a08ca5d1 SB |
171 | update_sched_clock(); |
172 | ||
173 | /* | |
174 | * Start the timer to keep sched_clock() properly updated and | |
175 | * sets the initial epoch. | |
176 | */ | |
177 | hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
178 | sched_clock_timer.function = sched_clock_poll; | |
179 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); | |
211baa70 | 180 | } |
f153d017 RK |
181 | |
182 | static int sched_clock_suspend(void) | |
183 | { | |
a08ca5d1 | 184 | sched_clock_poll(&sched_clock_timer); |
6a4dae5e | 185 | cd.suspended = true; |
f153d017 RK |
186 | return 0; |
187 | } | |
188 | ||
237ec6f2 CC |
189 | static void sched_clock_resume(void) |
190 | { | |
6a4dae5e | 191 | cd.epoch_cyc = read_sched_clock(); |
6a4dae5e | 192 | cd.suspended = false; |
237ec6f2 CC |
193 | } |
194 | ||
f153d017 RK |
195 | static struct syscore_ops sched_clock_ops = { |
196 | .suspend = sched_clock_suspend, | |
237ec6f2 | 197 | .resume = sched_clock_resume, |
f153d017 RK |
198 | }; |
199 | ||
200 | static int __init sched_clock_syscore_init(void) | |
201 | { | |
202 | register_syscore_ops(&sched_clock_ops); | |
203 | return 0; | |
204 | } | |
205 | device_initcall(sched_clock_syscore_init); |