Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* calibrate.c: default delay calibration |
2 | * | |
3 | * Excised from init/main.c | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
cd354f1a | 7 | #include <linux/jiffies.h> |
1da177e4 LT |
8 | #include <linux/delay.h> |
9 | #include <linux/init.h> | |
941e492b | 10 | #include <linux/timex.h> |
3da757da | 11 | #include <linux/smp.h> |
7afe1845 | 12 | #include <linux/percpu.h> |
8a9e1b0f | 13 | |
f3f3149f | 14 | unsigned long lpj_fine; |
bfe8df3d | 15 | unsigned long preset_lpj; |
1da177e4 LT |
16 | static int __init lpj_setup(char *str) |
17 | { | |
18 | preset_lpj = simple_strtoul(str,NULL,0); | |
19 | return 1; | |
20 | } | |
21 | ||
22 | __setup("lpj=", lpj_setup); | |
23 | ||
8a9e1b0f VP |
24 | #ifdef ARCH_HAS_READ_CURRENT_TIMER |
25 | ||
26 | /* This routine uses the read_current_timer() routine and gets the | |
27 | * loops per jiffy directly, instead of guessing it using delay(). | |
28 | * Also, this code tries to handle non-maskable asynchronous events | |
29 | * (like SMIs) | |
30 | */ | |
31 | #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100)) | |
32 | #define MAX_DIRECT_CALIBRATION_RETRIES 5 | |
33 | ||
6c81c32f | 34 | static unsigned long __cpuinit calibrate_delay_direct(void) |
8a9e1b0f VP |
35 | { |
36 | unsigned long pre_start, start, post_start; | |
37 | unsigned long pre_end, end, post_end; | |
38 | unsigned long start_jiffies; | |
f3f3149f AK |
39 | unsigned long timer_rate_min, timer_rate_max; |
40 | unsigned long good_timer_sum = 0; | |
41 | unsigned long good_timer_count = 0; | |
d2b46313 AW |
42 | unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES]; |
43 | int max = -1; /* index of measured_times with max/min values or not set */ | |
44 | int min = -1; | |
8a9e1b0f VP |
45 | int i; |
46 | ||
47 | if (read_current_timer(&pre_start) < 0 ) | |
48 | return 0; | |
49 | ||
50 | /* | |
51 | * A simple loop like | |
52 | * while ( jiffies < start_jiffies+1) | |
53 | * start = read_current_timer(); | |
54 | * will not do. As we don't really know whether jiffy switch | |
55 | * happened first or timer_value was read first. And some asynchronous | |
56 | * event can happen between these two events introducing errors in lpj. | |
57 | * | |
58 | * So, we do | |
59 | * 1. pre_start <- When we are sure that jiffy switch hasn't happened | |
60 | * 2. check jiffy switch | |
61 | * 3. start <- timer value before or after jiffy switch | |
62 | * 4. post_start <- When we are sure that jiffy switch has happened | |
63 | * | |
64 | * Note, we don't know anything about order of 2 and 3. | |
65 | * Now, by looking at post_start and pre_start difference, we can | |
66 | * check whether any asynchronous event happened or not | |
67 | */ | |
68 | ||
69 | for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) { | |
70 | pre_start = 0; | |
71 | read_current_timer(&start); | |
72 | start_jiffies = jiffies; | |
70a06228 | 73 | while (time_before_eq(jiffies, start_jiffies + 1)) { |
8a9e1b0f VP |
74 | pre_start = start; |
75 | read_current_timer(&start); | |
76 | } | |
77 | read_current_timer(&post_start); | |
78 | ||
79 | pre_end = 0; | |
80 | end = post_start; | |
70a06228 TD |
81 | while (time_before_eq(jiffies, start_jiffies + 1 + |
82 | DELAY_CALIBRATION_TICKS)) { | |
8a9e1b0f VP |
83 | pre_end = end; |
84 | read_current_timer(&end); | |
85 | } | |
86 | read_current_timer(&post_end); | |
87 | ||
f3f3149f AK |
88 | timer_rate_max = (post_end - pre_start) / |
89 | DELAY_CALIBRATION_TICKS; | |
90 | timer_rate_min = (pre_end - post_start) / | |
91 | DELAY_CALIBRATION_TICKS; | |
8a9e1b0f VP |
92 | |
93 | /* | |
f3f3149f | 94 | * If the upper limit and lower limit of the timer_rate is |
8a9e1b0f VP |
95 | * >= 12.5% apart, redo calibration. |
96 | */ | |
d2b46313 AW |
97 | if (start >= post_end) |
98 | printk(KERN_NOTICE "calibrate_delay_direct() ignoring " | |
99 | "timer_rate as we had a TSC wrap around" | |
100 | " start=%lu >=post_end=%lu\n", | |
101 | start, post_end); | |
102 | if (start < post_end && pre_start != 0 && pre_end != 0 && | |
f3f3149f AK |
103 | (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) { |
104 | good_timer_count++; | |
105 | good_timer_sum += timer_rate_max; | |
d2b46313 AW |
106 | measured_times[i] = timer_rate_max; |
107 | if (max < 0 || timer_rate_max > measured_times[max]) | |
108 | max = i; | |
109 | if (min < 0 || timer_rate_max < measured_times[min]) | |
110 | min = i; | |
111 | } else | |
112 | measured_times[i] = 0; | |
113 | ||
8a9e1b0f VP |
114 | } |
115 | ||
d2b46313 AW |
116 | /* |
117 | * Find the maximum & minimum - if they differ too much throw out the | |
118 | * one with the largest difference from the mean and try again... | |
119 | */ | |
120 | while (good_timer_count > 1) { | |
121 | unsigned long estimate; | |
122 | unsigned long maxdiff; | |
123 | ||
124 | /* compute the estimate */ | |
125 | estimate = (good_timer_sum/good_timer_count); | |
126 | maxdiff = estimate >> 3; | |
127 | ||
128 | /* if range is within 12% let's take it */ | |
129 | if ((measured_times[max] - measured_times[min]) < maxdiff) | |
130 | return estimate; | |
131 | ||
132 | /* ok - drop the worse value and try again... */ | |
133 | good_timer_sum = 0; | |
134 | good_timer_count = 0; | |
135 | if ((measured_times[max] - estimate) < | |
136 | (estimate - measured_times[min])) { | |
137 | printk(KERN_NOTICE "calibrate_delay_direct() dropping " | |
138 | "min bogoMips estimate %d = %lu\n", | |
139 | min, measured_times[min]); | |
140 | measured_times[min] = 0; | |
141 | min = max; | |
142 | } else { | |
143 | printk(KERN_NOTICE "calibrate_delay_direct() dropping " | |
144 | "max bogoMips estimate %d = %lu\n", | |
145 | max, measured_times[max]); | |
146 | measured_times[max] = 0; | |
147 | max = min; | |
148 | } | |
149 | ||
150 | for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) { | |
151 | if (measured_times[i] == 0) | |
152 | continue; | |
153 | good_timer_count++; | |
154 | good_timer_sum += measured_times[i]; | |
155 | if (measured_times[i] < measured_times[min]) | |
156 | min = i; | |
157 | if (measured_times[i] > measured_times[max]) | |
158 | max = i; | |
159 | } | |
160 | ||
161 | } | |
8a9e1b0f | 162 | |
d2b46313 AW |
163 | printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good " |
164 | "estimate for loops_per_jiffy.\nProbably due to long platform " | |
165 | "interrupts. Consider using \"lpj=\" boot option.\n"); | |
8a9e1b0f VP |
166 | return 0; |
167 | } | |
168 | #else | |
6c81c32f | 169 | static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;} |
8a9e1b0f VP |
170 | #endif |
171 | ||
1da177e4 LT |
172 | /* |
173 | * This is the number of bits of precision for the loops_per_jiffy. Each | |
191e5688 PC |
174 | * time we refine our estimate after the first takes 1.5/HZ seconds, so try |
175 | * to start with a good estimate. | |
3da757da | 176 | * For the boot cpu we can skip the delay calibration and assign it a value |
f3f3149f AK |
177 | * calculated based on the timer frequency. |
178 | * For the rest of the CPUs we cannot assume that the timer frequency is same as | |
3da757da | 179 | * the cpu frequency, hence do the calibration for those. |
1da177e4 LT |
180 | */ |
181 | #define LPS_PREC 8 | |
182 | ||
71c696b1 | 183 | static unsigned long __cpuinit calibrate_delay_converge(void) |
1da177e4 | 184 | { |
191e5688 | 185 | /* First stage - slowly accelerate to find initial bounds */ |
b1b5f65e | 186 | unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit; |
191e5688 | 187 | int trials = 0, band = 0, trial_in_band = 0; |
71c696b1 PC |
188 | |
189 | lpj = (1<<12); | |
191e5688 PC |
190 | |
191 | /* wait for "start of" clock tick */ | |
192 | ticks = jiffies; | |
193 | while (ticks == jiffies) | |
194 | ; /* nothing */ | |
195 | /* Go .. */ | |
196 | ticks = jiffies; | |
197 | do { | |
198 | if (++trial_in_band == (1<<band)) { | |
199 | ++band; | |
200 | trial_in_band = 0; | |
201 | } | |
202 | __delay(lpj * band); | |
203 | trials += band; | |
204 | } while (ticks == jiffies); | |
205 | /* | |
206 | * We overshot, so retreat to a clear underestimate. Then estimate | |
207 | * the largest likely undershoot. This defines our chop bounds. | |
208 | */ | |
209 | trials -= band; | |
b1b5f65e PC |
210 | loopadd_base = lpj * band; |
211 | lpj_base = lpj * trials; | |
212 | ||
213 | recalibrate: | |
214 | lpj = lpj_base; | |
215 | loopadd = loopadd_base; | |
71c696b1 PC |
216 | |
217 | /* | |
218 | * Do a binary approximation to get lpj set to | |
191e5688 | 219 | * equal one clock (up to LPS_PREC bits) |
71c696b1 | 220 | */ |
b1b5f65e | 221 | chop_limit = lpj >> LPS_PREC; |
191e5688 PC |
222 | while (loopadd > chop_limit) { |
223 | lpj += loopadd; | |
71c696b1 PC |
224 | ticks = jiffies; |
225 | while (ticks == jiffies) | |
191e5688 | 226 | ; /* nothing */ |
71c696b1 PC |
227 | ticks = jiffies; |
228 | __delay(lpj); | |
229 | if (jiffies != ticks) /* longer than 1 tick */ | |
191e5688 PC |
230 | lpj -= loopadd; |
231 | loopadd >>= 1; | |
71c696b1 | 232 | } |
b1b5f65e PC |
233 | /* |
234 | * If we incremented every single time possible, presume we've | |
235 | * massively underestimated initially, and retry with a higher | |
236 | * start, and larger range. (Only seen on x86_64, due to SMIs) | |
237 | */ | |
238 | if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) { | |
239 | lpj_base = lpj; | |
240 | loopadd_base <<= 2; | |
241 | goto recalibrate; | |
242 | } | |
71c696b1 PC |
243 | |
244 | return lpj; | |
245 | } | |
246 | ||
7afe1845 SN |
247 | static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 }; |
248 | ||
b565201c JS |
249 | /* |
250 | * Check if cpu calibration delay is already known. For example, | |
251 | * some processors with multi-core sockets may have all cores | |
252 | * with the same calibration delay. | |
253 | * | |
254 | * Architectures should override this function if a faster calibration | |
255 | * method is available. | |
256 | */ | |
257 | unsigned long __attribute__((weak)) __cpuinit calibrate_delay_is_known(void) | |
258 | { | |
259 | return 0; | |
260 | } | |
261 | ||
71c696b1 PC |
262 | void __cpuinit calibrate_delay(void) |
263 | { | |
1b19ca9f | 264 | unsigned long lpj; |
feae3203 | 265 | static bool printed; |
7afe1845 | 266 | int this_cpu = smp_processor_id(); |
1da177e4 | 267 | |
7afe1845 SN |
268 | if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { |
269 | lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); | |
8595c539 DT |
270 | if (!printed) |
271 | pr_info("Calibrating delay loop (skipped) " | |
7afe1845 SN |
272 | "already calibrated this CPU"); |
273 | } else if (preset_lpj) { | |
1b19ca9f | 274 | lpj = preset_lpj; |
feae3203 MT |
275 | if (!printed) |
276 | pr_info("Calibrating delay loop (skipped) " | |
277 | "preset value.. "); | |
278 | } else if ((!printed) && lpj_fine) { | |
1b19ca9f | 279 | lpj = lpj_fine; |
feae3203 | 280 | pr_info("Calibrating delay loop (skipped), " |
f3f3149f | 281 | "value calculated using timer frequency.. "); |
b565201c JS |
282 | } else if ((lpj = calibrate_delay_is_known())) { |
283 | ; | |
1b19ca9f | 284 | } else if ((lpj = calibrate_delay_direct()) != 0) { |
feae3203 MT |
285 | if (!printed) |
286 | pr_info("Calibrating delay using timer " | |
287 | "specific routine.. "); | |
1da177e4 | 288 | } else { |
feae3203 MT |
289 | if (!printed) |
290 | pr_info("Calibrating delay loop... "); | |
1b19ca9f | 291 | lpj = calibrate_delay_converge(); |
1da177e4 | 292 | } |
7afe1845 | 293 | per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; |
feae3203 MT |
294 | if (!printed) |
295 | pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", | |
1b19ca9f RK |
296 | lpj/(500000/HZ), |
297 | (lpj/(5000/HZ)) % 100, lpj); | |
feae3203 | 298 | |
1b19ca9f | 299 | loops_per_jiffy = lpj; |
feae3203 | 300 | printed = true; |
1da177e4 | 301 | } |