Commit | Line | Data |
---|---|---|
3e51f33f PZ |
1 | /* |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
5 | * | |
c300ba25 SR |
6 | * Updates and enhancements: |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
3e51f33f PZ |
9 | * Based on code by: |
10 | * Ingo Molnar <mingo@redhat.com> | |
11 | * Guillaume Chazarain <guichaz@gmail.com> | |
12 | * | |
13 | * Create a semi stable clock from a mixture of other events, including: | |
14 | * - gtod | |
15 | * - jiffies | |
16 | * - sched_clock() | |
17 | * - explicit idle events | |
18 | * | |
19 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | |
20 | * making it monotonic and keeping it within an expected window. This window | |
21 | * is set up using jiffies. | |
22 | * | |
23 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
24 | * that is otherwise invisible (TSC gets stopped). | |
25 | * | |
26 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | |
27 | * consistent between cpus (never more than 1 jiffies difference). | |
28 | */ | |
29 | #include <linux/sched.h> | |
30 | #include <linux/percpu.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/ktime.h> | |
33 | #include <linux/module.h> | |
34 | ||
2c3d103b HD |
35 | /* |
36 | * Scheduler clock - returns current time in nanosec units. | |
37 | * This is default implementation. | |
38 | * Architectures and sub-architectures can override this. | |
39 | */ | |
40 | unsigned long long __attribute__((weak)) sched_clock(void) | |
41 | { | |
42 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | |
43 | } | |
3e51f33f | 44 | |
c1955a3d PZ |
45 | static __read_mostly int sched_clock_running; |
46 | ||
3e51f33f PZ |
47 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
48 | ||
49 | struct sched_clock_data { | |
50 | /* | |
51 | * Raw spinlock - this is a special case: this might be called | |
52 | * from within instrumentation code so we dont want to do any | |
53 | * instrumentation ourselves. | |
54 | */ | |
55 | raw_spinlock_t lock; | |
56 | ||
62c43dd9 | 57 | unsigned long tick_jiffies; |
3e51f33f PZ |
58 | u64 tick_raw; |
59 | u64 tick_gtod; | |
60 | u64 clock; | |
61 | }; | |
62 | ||
63 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
64 | ||
65 | static inline struct sched_clock_data *this_scd(void) | |
66 | { | |
67 | return &__get_cpu_var(sched_clock_data); | |
68 | } | |
69 | ||
70 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
71 | { | |
72 | return &per_cpu(sched_clock_data, cpu); | |
73 | } | |
74 | ||
75 | void sched_clock_init(void) | |
76 | { | |
77 | u64 ktime_now = ktime_to_ns(ktime_get()); | |
a381759d | 78 | unsigned long now_jiffies = jiffies; |
3e51f33f PZ |
79 | int cpu; |
80 | ||
81 | for_each_possible_cpu(cpu) { | |
82 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
83 | ||
84 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
62c43dd9 | 85 | scd->tick_jiffies = now_jiffies; |
a381759d | 86 | scd->tick_raw = 0; |
3e51f33f PZ |
87 | scd->tick_gtod = ktime_now; |
88 | scd->clock = ktime_now; | |
89 | } | |
a381759d PZ |
90 | |
91 | sched_clock_running = 1; | |
3e51f33f PZ |
92 | } |
93 | ||
94 | /* | |
95 | * update the percpu scd from the raw @now value | |
96 | * | |
97 | * - filter out backward motion | |
98 | * - use jiffies to generate a min,max window to clip the raw values | |
99 | */ | |
56b90612 | 100 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) |
3e51f33f PZ |
101 | { |
102 | unsigned long now_jiffies = jiffies; | |
62c43dd9 | 103 | long delta_jiffies = now_jiffies - scd->tick_jiffies; |
3e51f33f PZ |
104 | u64 clock = scd->clock; |
105 | u64 min_clock, max_clock; | |
18e4e36c | 106 | s64 delta = now - scd->tick_raw; |
3e51f33f PZ |
107 | |
108 | WARN_ON_ONCE(!irqs_disabled()); | |
e4e4e534 | 109 | min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; |
3e51f33f PZ |
110 | |
111 | if (unlikely(delta < 0)) { | |
112 | clock++; | |
113 | goto out; | |
114 | } | |
115 | ||
e4e4e534 | 116 | max_clock = min_clock + TICK_NSEC; |
3e51f33f | 117 | |
e4e4e534 | 118 | if (unlikely(clock + delta > max_clock)) { |
3e51f33f PZ |
119 | if (clock < max_clock) |
120 | clock = max_clock; | |
121 | else | |
122 | clock++; | |
123 | } else { | |
124 | clock += delta; | |
125 | } | |
126 | ||
127 | out: | |
128 | if (unlikely(clock < min_clock)) | |
129 | clock = min_clock; | |
130 | ||
e4e4e534 IM |
131 | scd->tick_jiffies = now_jiffies; |
132 | scd->clock = clock; | |
56b90612 IM |
133 | |
134 | return clock; | |
3e51f33f PZ |
135 | } |
136 | ||
137 | static void lock_double_clock(struct sched_clock_data *data1, | |
138 | struct sched_clock_data *data2) | |
139 | { | |
140 | if (data1 < data2) { | |
141 | __raw_spin_lock(&data1->lock); | |
142 | __raw_spin_lock(&data2->lock); | |
143 | } else { | |
144 | __raw_spin_lock(&data2->lock); | |
145 | __raw_spin_lock(&data1->lock); | |
146 | } | |
147 | } | |
148 | ||
149 | u64 sched_clock_cpu(int cpu) | |
150 | { | |
151 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
4a273f20 | 152 | u64 now, clock, this_clock, remote_clock; |
3e51f33f | 153 | |
a381759d PZ |
154 | if (unlikely(!sched_clock_running)) |
155 | return 0ull; | |
156 | ||
3e51f33f PZ |
157 | WARN_ON_ONCE(!irqs_disabled()); |
158 | now = sched_clock(); | |
159 | ||
160 | if (cpu != raw_smp_processor_id()) { | |
3e51f33f PZ |
161 | struct sched_clock_data *my_scd = this_scd(); |
162 | ||
163 | lock_double_clock(scd, my_scd); | |
164 | ||
4a273f20 IM |
165 | this_clock = __update_sched_clock(my_scd, now); |
166 | remote_clock = scd->clock; | |
167 | ||
168 | /* | |
169 | * Use the opportunity that we have both locks | |
170 | * taken to couple the two clocks: we take the | |
171 | * larger time as the latest time for both | |
172 | * runqueues. (this creates monotonic movement) | |
173 | */ | |
174 | if (likely(remote_clock < this_clock)) { | |
175 | clock = this_clock; | |
176 | scd->clock = clock; | |
177 | } else { | |
178 | /* | |
179 | * Should be rare, but possible: | |
180 | */ | |
181 | clock = remote_clock; | |
182 | my_scd->clock = remote_clock; | |
183 | } | |
3e51f33f PZ |
184 | |
185 | __raw_spin_unlock(&my_scd->lock); | |
186 | } else { | |
187 | __raw_spin_lock(&scd->lock); | |
4a273f20 | 188 | clock = __update_sched_clock(scd, now); |
3e51f33f PZ |
189 | } |
190 | ||
e4e4e534 IM |
191 | __raw_spin_unlock(&scd->lock); |
192 | ||
3e51f33f PZ |
193 | return clock; |
194 | } | |
195 | ||
196 | void sched_clock_tick(void) | |
197 | { | |
198 | struct sched_clock_data *scd = this_scd(); | |
199 | u64 now, now_gtod; | |
200 | ||
a381759d PZ |
201 | if (unlikely(!sched_clock_running)) |
202 | return; | |
203 | ||
3e51f33f PZ |
204 | WARN_ON_ONCE(!irqs_disabled()); |
205 | ||
3e51f33f | 206 | now_gtod = ktime_to_ns(ktime_get()); |
a83bc47c | 207 | now = sched_clock(); |
3e51f33f PZ |
208 | |
209 | __raw_spin_lock(&scd->lock); | |
e4e4e534 | 210 | __update_sched_clock(scd, now); |
3e51f33f PZ |
211 | /* |
212 | * update tick_gtod after __update_sched_clock() because that will | |
213 | * already observe 1 new jiffy; adding a new tick_gtod to that would | |
214 | * increase the clock 2 jiffies. | |
215 | */ | |
216 | scd->tick_raw = now; | |
217 | scd->tick_gtod = now_gtod; | |
218 | __raw_spin_unlock(&scd->lock); | |
219 | } | |
220 | ||
221 | /* | |
222 | * We are going deep-idle (irqs are disabled): | |
223 | */ | |
224 | void sched_clock_idle_sleep_event(void) | |
225 | { | |
226 | sched_clock_cpu(smp_processor_id()); | |
227 | } | |
228 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
229 | ||
230 | /* | |
231 | * We just idled delta nanoseconds (called with irqs disabled): | |
232 | */ | |
233 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
234 | { | |
235 | struct sched_clock_data *scd = this_scd(); | |
3e51f33f PZ |
236 | |
237 | /* | |
238 | * Override the previous timestamp and ignore all | |
239 | * sched_clock() deltas that occured while we idled, | |
240 | * and use the PM-provided delta_ns to advance the | |
241 | * rq clock: | |
242 | */ | |
243 | __raw_spin_lock(&scd->lock); | |
3e51f33f PZ |
244 | scd->clock += delta_ns; |
245 | __raw_spin_unlock(&scd->lock); | |
246 | ||
247 | touch_softlockup_watchdog(); | |
248 | } | |
249 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
250 | ||
c1955a3d PZ |
251 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
252 | ||
253 | void sched_clock_init(void) | |
254 | { | |
255 | sched_clock_running = 1; | |
256 | } | |
257 | ||
258 | u64 sched_clock_cpu(int cpu) | |
259 | { | |
260 | if (unlikely(!sched_clock_running)) | |
261 | return 0; | |
262 | ||
263 | return sched_clock(); | |
264 | } | |
265 | ||
3e51f33f PZ |
266 | #endif |
267 | ||
76a2a6ee PZ |
268 | unsigned long long cpu_clock(int cpu) |
269 | { | |
270 | unsigned long long clock; | |
271 | unsigned long flags; | |
272 | ||
2d452c9b | 273 | local_irq_save(flags); |
76a2a6ee | 274 | clock = sched_clock_cpu(cpu); |
2d452c9b | 275 | local_irq_restore(flags); |
76a2a6ee PZ |
276 | |
277 | return clock; | |
278 | } | |
4c9fe8ad | 279 | EXPORT_SYMBOL_GPL(cpu_clock); |