Commit | Line | Data |
---|---|---|
3e51f33f PZ |
1 | /* |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
5 | * | |
c300ba25 SR |
6 | * Updates and enhancements: |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
3e51f33f PZ |
9 | * Based on code by: |
10 | * Ingo Molnar <mingo@redhat.com> | |
11 | * Guillaume Chazarain <guichaz@gmail.com> | |
12 | * | |
13 | * Create a semi stable clock from a mixture of other events, including: | |
14 | * - gtod | |
15 | * - jiffies | |
16 | * - sched_clock() | |
17 | * - explicit idle events | |
18 | * | |
19 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | |
20 | * making it monotonic and keeping it within an expected window. This window | |
21 | * is set up using jiffies. | |
22 | * | |
23 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
24 | * that is otherwise invisible (TSC gets stopped). | |
25 | * | |
26 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | |
27 | * consistent between cpus (never more than 1 jiffies difference). | |
28 | */ | |
29 | #include <linux/sched.h> | |
30 | #include <linux/percpu.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/ktime.h> | |
33 | #include <linux/module.h> | |
34 | ||
2c3d103b HD |
35 | /* |
36 | * Scheduler clock - returns current time in nanosec units. | |
37 | * This is default implementation. | |
38 | * Architectures and sub-architectures can override this. | |
39 | */ | |
40 | unsigned long long __attribute__((weak)) sched_clock(void) | |
41 | { | |
42 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | |
43 | } | |
3e51f33f PZ |
44 | |
45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | |
46 | ||
c300ba25 SR |
47 | #define MULTI_SHIFT 15 |
48 | /* Max is double, Min is 1/2 */ | |
49 | #define MAX_MULTI (2LL << MULTI_SHIFT) | |
50 | #define MIN_MULTI (1LL << (MULTI_SHIFT-1)) | |
51 | ||
3e51f33f PZ |
52 | struct sched_clock_data { |
53 | /* | |
54 | * Raw spinlock - this is a special case: this might be called | |
55 | * from within instrumentation code so we dont want to do any | |
56 | * instrumentation ourselves. | |
57 | */ | |
58 | raw_spinlock_t lock; | |
59 | ||
62c43dd9 | 60 | unsigned long tick_jiffies; |
3e51f33f PZ |
61 | u64 prev_raw; |
62 | u64 tick_raw; | |
63 | u64 tick_gtod; | |
64 | u64 clock; | |
c300ba25 | 65 | s64 multi; |
af52a90a SR |
66 | #ifdef CONFIG_NO_HZ |
67 | int check_max; | |
68 | #endif | |
3e51f33f PZ |
69 | }; |
70 | ||
71 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
72 | ||
73 | static inline struct sched_clock_data *this_scd(void) | |
74 | { | |
75 | return &__get_cpu_var(sched_clock_data); | |
76 | } | |
77 | ||
78 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
79 | { | |
80 | return &per_cpu(sched_clock_data, cpu); | |
81 | } | |
82 | ||
a381759d PZ |
83 | static __read_mostly int sched_clock_running; |
84 | ||
3e51f33f PZ |
85 | void sched_clock_init(void) |
86 | { | |
87 | u64 ktime_now = ktime_to_ns(ktime_get()); | |
a381759d | 88 | unsigned long now_jiffies = jiffies; |
3e51f33f PZ |
89 | int cpu; |
90 | ||
91 | for_each_possible_cpu(cpu) { | |
92 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
93 | ||
94 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
62c43dd9 | 95 | scd->tick_jiffies = now_jiffies; |
a381759d PZ |
96 | scd->prev_raw = 0; |
97 | scd->tick_raw = 0; | |
3e51f33f PZ |
98 | scd->tick_gtod = ktime_now; |
99 | scd->clock = ktime_now; | |
c300ba25 | 100 | scd->multi = 1 << MULTI_SHIFT; |
af52a90a SR |
101 | #ifdef CONFIG_NO_HZ |
102 | scd->check_max = 1; | |
103 | #endif | |
3e51f33f | 104 | } |
a381759d PZ |
105 | |
106 | sched_clock_running = 1; | |
3e51f33f PZ |
107 | } |
108 | ||
af52a90a SR |
109 | #ifdef CONFIG_NO_HZ |
110 | /* | |
111 | * The dynamic ticks makes the delta jiffies inaccurate. This | |
112 | * prevents us from checking the maximum time update. | |
113 | * Disable the maximum check during stopped ticks. | |
114 | */ | |
115 | void sched_clock_tick_stop(int cpu) | |
116 | { | |
117 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
118 | ||
119 | scd->check_max = 0; | |
120 | } | |
121 | ||
122 | void sched_clock_tick_start(int cpu) | |
123 | { | |
124 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
125 | ||
126 | scd->check_max = 1; | |
127 | } | |
128 | ||
129 | static int check_max(struct sched_clock_data *scd) | |
130 | { | |
131 | return scd->check_max; | |
132 | } | |
133 | #else | |
134 | static int check_max(struct sched_clock_data *scd) | |
135 | { | |
136 | return 1; | |
137 | } | |
138 | #endif /* CONFIG_NO_HZ */ | |
139 | ||
3e51f33f PZ |
140 | /* |
141 | * update the percpu scd from the raw @now value | |
142 | * | |
143 | * - filter out backward motion | |
144 | * - use jiffies to generate a min,max window to clip the raw values | |
145 | */ | |
c0c87734 | 146 | static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) |
3e51f33f PZ |
147 | { |
148 | unsigned long now_jiffies = jiffies; | |
62c43dd9 | 149 | long delta_jiffies = now_jiffies - scd->tick_jiffies; |
3e51f33f PZ |
150 | u64 clock = scd->clock; |
151 | u64 min_clock, max_clock; | |
152 | s64 delta = now - scd->prev_raw; | |
153 | ||
154 | WARN_ON_ONCE(!irqs_disabled()); | |
f7cce27f | 155 | |
c300ba25 SR |
156 | /* |
157 | * At schedule tick the clock can be just under the gtod. We don't | |
158 | * want to push it too prematurely. | |
159 | */ | |
160 | min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); | |
161 | if (min_clock > TICK_NSEC) | |
162 | min_clock -= TICK_NSEC / 2; | |
3e51f33f PZ |
163 | |
164 | if (unlikely(delta < 0)) { | |
165 | clock++; | |
166 | goto out; | |
167 | } | |
168 | ||
f7cce27f SR |
169 | /* |
170 | * The clock must stay within a jiffie of the gtod. | |
171 | * But since we may be at the start of a jiffy or the end of one | |
172 | * we add another jiffy buffer. | |
173 | */ | |
174 | max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; | |
3e51f33f | 175 | |
c300ba25 SR |
176 | delta *= scd->multi; |
177 | delta >>= MULTI_SHIFT; | |
3e51f33f | 178 | |
af52a90a | 179 | if (unlikely(clock + delta > max_clock) && check_max(scd)) { |
3e51f33f PZ |
180 | if (clock < max_clock) |
181 | clock = max_clock; | |
182 | else | |
183 | clock++; | |
184 | } else { | |
185 | clock += delta; | |
186 | } | |
187 | ||
188 | out: | |
189 | if (unlikely(clock < min_clock)) | |
190 | clock = min_clock; | |
191 | ||
c0c87734 SR |
192 | if (time) |
193 | *time = clock; | |
194 | else { | |
195 | scd->prev_raw = now; | |
196 | scd->clock = clock; | |
197 | } | |
3e51f33f PZ |
198 | } |
199 | ||
200 | static void lock_double_clock(struct sched_clock_data *data1, | |
201 | struct sched_clock_data *data2) | |
202 | { | |
203 | if (data1 < data2) { | |
204 | __raw_spin_lock(&data1->lock); | |
205 | __raw_spin_lock(&data2->lock); | |
206 | } else { | |
207 | __raw_spin_lock(&data2->lock); | |
208 | __raw_spin_lock(&data1->lock); | |
209 | } | |
210 | } | |
211 | ||
212 | u64 sched_clock_cpu(int cpu) | |
213 | { | |
214 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
215 | u64 now, clock; | |
216 | ||
a381759d PZ |
217 | if (unlikely(!sched_clock_running)) |
218 | return 0ull; | |
219 | ||
3e51f33f PZ |
220 | WARN_ON_ONCE(!irqs_disabled()); |
221 | now = sched_clock(); | |
222 | ||
223 | if (cpu != raw_smp_processor_id()) { | |
224 | /* | |
225 | * in order to update a remote cpu's clock based on our | |
226 | * unstable raw time rebase it against: | |
227 | * tick_raw (offset between raw counters) | |
228 | * tick_gotd (tick offset between cpus) | |
229 | */ | |
230 | struct sched_clock_data *my_scd = this_scd(); | |
231 | ||
232 | lock_double_clock(scd, my_scd); | |
233 | ||
234 | now -= my_scd->tick_raw; | |
235 | now += scd->tick_raw; | |
236 | ||
2b8a0cf4 SR |
237 | now += my_scd->tick_gtod; |
238 | now -= scd->tick_gtod; | |
3e51f33f PZ |
239 | |
240 | __raw_spin_unlock(&my_scd->lock); | |
c0c87734 SR |
241 | |
242 | __update_sched_clock(scd, now, &clock); | |
243 | ||
244 | __raw_spin_unlock(&scd->lock); | |
245 | ||
3e51f33f PZ |
246 | } else { |
247 | __raw_spin_lock(&scd->lock); | |
c0c87734 SR |
248 | __update_sched_clock(scd, now, NULL); |
249 | clock = scd->clock; | |
250 | __raw_spin_unlock(&scd->lock); | |
3e51f33f PZ |
251 | } |
252 | ||
3e51f33f PZ |
253 | return clock; |
254 | } | |
255 | ||
256 | void sched_clock_tick(void) | |
257 | { | |
258 | struct sched_clock_data *scd = this_scd(); | |
62c43dd9 | 259 | unsigned long now_jiffies = jiffies; |
c300ba25 | 260 | s64 mult, delta_gtod, delta_raw; |
3e51f33f PZ |
261 | u64 now, now_gtod; |
262 | ||
a381759d PZ |
263 | if (unlikely(!sched_clock_running)) |
264 | return; | |
265 | ||
3e51f33f PZ |
266 | WARN_ON_ONCE(!irqs_disabled()); |
267 | ||
3e51f33f | 268 | now_gtod = ktime_to_ns(ktime_get()); |
a83bc47c | 269 | now = sched_clock(); |
3e51f33f PZ |
270 | |
271 | __raw_spin_lock(&scd->lock); | |
c0c87734 | 272 | __update_sched_clock(scd, now, NULL); |
3e51f33f PZ |
273 | /* |
274 | * update tick_gtod after __update_sched_clock() because that will | |
275 | * already observe 1 new jiffy; adding a new tick_gtod to that would | |
276 | * increase the clock 2 jiffies. | |
277 | */ | |
c300ba25 SR |
278 | delta_gtod = now_gtod - scd->tick_gtod; |
279 | delta_raw = now - scd->tick_raw; | |
280 | ||
281 | if ((long)delta_raw > 0) { | |
282 | mult = delta_gtod << MULTI_SHIFT; | |
283 | do_div(mult, delta_raw); | |
284 | scd->multi = mult; | |
285 | if (scd->multi > MAX_MULTI) | |
286 | scd->multi = MAX_MULTI; | |
287 | else if (scd->multi < MIN_MULTI) | |
288 | scd->multi = MIN_MULTI; | |
289 | } else | |
290 | scd->multi = 1 << MULTI_SHIFT; | |
291 | ||
3e51f33f PZ |
292 | scd->tick_raw = now; |
293 | scd->tick_gtod = now_gtod; | |
c300ba25 | 294 | scd->tick_jiffies = now_jiffies; |
3e51f33f PZ |
295 | __raw_spin_unlock(&scd->lock); |
296 | } | |
297 | ||
298 | /* | |
299 | * We are going deep-idle (irqs are disabled): | |
300 | */ | |
301 | void sched_clock_idle_sleep_event(void) | |
302 | { | |
303 | sched_clock_cpu(smp_processor_id()); | |
304 | } | |
305 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
306 | ||
307 | /* | |
308 | * We just idled delta nanoseconds (called with irqs disabled): | |
309 | */ | |
310 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
311 | { | |
312 | struct sched_clock_data *scd = this_scd(); | |
313 | u64 now = sched_clock(); | |
314 | ||
315 | /* | |
316 | * Override the previous timestamp and ignore all | |
317 | * sched_clock() deltas that occured while we idled, | |
318 | * and use the PM-provided delta_ns to advance the | |
319 | * rq clock: | |
320 | */ | |
321 | __raw_spin_lock(&scd->lock); | |
322 | scd->prev_raw = now; | |
323 | scd->clock += delta_ns; | |
c300ba25 | 324 | scd->multi = 1 << MULTI_SHIFT; |
3e51f33f PZ |
325 | __raw_spin_unlock(&scd->lock); |
326 | ||
327 | touch_softlockup_watchdog(); | |
328 | } | |
329 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
330 | ||
331 | #endif | |
332 | ||
76a2a6ee PZ |
333 | unsigned long long cpu_clock(int cpu) |
334 | { | |
335 | unsigned long long clock; | |
336 | unsigned long flags; | |
337 | ||
2d452c9b | 338 | local_irq_save(flags); |
76a2a6ee | 339 | clock = sched_clock_cpu(cpu); |
2d452c9b | 340 | local_irq_restore(flags); |
76a2a6ee PZ |
341 | |
342 | return clock; | |
343 | } | |
4c9fe8ad | 344 | EXPORT_SYMBOL_GPL(cpu_clock); |