Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6
[deliverable/linux.git] / arch / i386 / kernel / cpu / mtrr / generic.c
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/tlbflush.h>
13 #include "mtrr.h"
14
15 struct mtrr_state {
16 struct mtrr_var_range *var_ranges;
17 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18 unsigned char enabled;
19 unsigned char have_fixed;
20 mtrr_type def_type;
21 };
22
23 static unsigned long smp_changes_mask;
24 static struct mtrr_state mtrr_state = {};
25
26 #undef MODULE_PARAM_PREFIX
27 #define MODULE_PARAM_PREFIX "mtrr."
28
29 static __initdata int mtrr_show;
30 module_param_named(show, mtrr_show, bool, 0);
31
32 /* Get the MSR pair relating to a var range */
33 static void __init
34 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
35 {
36 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
37 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
38 }
39
40 static void __init
41 get_fixed_ranges(mtrr_type * frs)
42 {
43 unsigned int *p = (unsigned int *) frs;
44 int i;
45
46 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
47
48 for (i = 0; i < 2; i++)
49 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
50 for (i = 0; i < 8; i++)
51 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
52 }
53
54 static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
55 {
56 unsigned i;
57
58 for (i = 0; i < 8; ++i, ++types, base += step)
59 printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
60 }
61
62 /* Grab all of the MTRR state for this CPU into *state */
63 void __init get_mtrr_state(void)
64 {
65 unsigned int i;
66 struct mtrr_var_range *vrs;
67 unsigned lo, dummy;
68
69 if (!mtrr_state.var_ranges) {
70 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
71 GFP_KERNEL);
72 if (!mtrr_state.var_ranges)
73 return;
74 }
75 vrs = mtrr_state.var_ranges;
76
77 rdmsr(MTRRcap_MSR, lo, dummy);
78 mtrr_state.have_fixed = (lo >> 8) & 1;
79
80 for (i = 0; i < num_var_ranges; i++)
81 get_mtrr_var_range(i, &vrs[i]);
82 if (mtrr_state.have_fixed)
83 get_fixed_ranges(mtrr_state.fixed_ranges);
84
85 rdmsr(MTRRdefType_MSR, lo, dummy);
86 mtrr_state.def_type = (lo & 0xff);
87 mtrr_state.enabled = (lo & 0xc00) >> 10;
88
89 if (mtrr_show) {
90 int high_width;
91
92 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
93 if (mtrr_state.have_fixed) {
94 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
95 mtrr_state.enabled & 1 ? "en" : "dis");
96 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
97 for (i = 0; i < 2; ++i)
98 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
99 for (i = 0; i < 8; ++i)
100 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
101 }
102 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
103 mtrr_state.enabled & 2 ? "en" : "dis");
104 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
105 for (i = 0; i < num_var_ranges; ++i) {
106 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
107 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
108 i,
109 high_width,
110 mtrr_state.var_ranges[i].base_hi,
111 mtrr_state.var_ranges[i].base_lo >> 12,
112 high_width,
113 mtrr_state.var_ranges[i].mask_hi,
114 mtrr_state.var_ranges[i].mask_lo >> 12,
115 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
116 else
117 printk(KERN_INFO "MTRR %u disabled\n", i);
118 }
119 }
120 }
121
122 /* Some BIOS's are fucked and don't set all MTRRs the same! */
123 void __init mtrr_state_warn(void)
124 {
125 unsigned long mask = smp_changes_mask;
126
127 if (!mask)
128 return;
129 if (mask & MTRR_CHANGE_MASK_FIXED)
130 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
131 if (mask & MTRR_CHANGE_MASK_VARIABLE)
132 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
133 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
134 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
135 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
136 printk(KERN_INFO "mtrr: corrected configuration.\n");
137 }
138
139 /* Doesn't attempt to pass an error out to MTRR users
140 because it's quite complicated in some cases and probably not
141 worth it because the best error handling is to ignore it. */
142 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
143 {
144 if (wrmsr_safe(msr, a, b) < 0)
145 printk(KERN_ERR
146 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
147 smp_processor_id(), msr, a, b);
148 }
149
150 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
151 /* [SUMMARY] Get a free MTRR.
152 <base> The starting (base) address of the region.
153 <size> The size (in bytes) of the region.
154 [RETURNS] The index of the region on success, else -1 on error.
155 */
156 {
157 int i, max;
158 mtrr_type ltype;
159 unsigned long lbase, lsize;
160
161 max = num_var_ranges;
162 if (replace_reg >= 0 && replace_reg < max)
163 return replace_reg;
164 for (i = 0; i < max; ++i) {
165 mtrr_if->get(i, &lbase, &lsize, &ltype);
166 if (lsize == 0)
167 return i;
168 }
169 return -ENOSPC;
170 }
171
172 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
173 unsigned long *size, mtrr_type *type)
174 {
175 unsigned int mask_lo, mask_hi, base_lo, base_hi;
176
177 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
178 if ((mask_lo & 0x800) == 0) {
179 /* Invalid (i.e. free) range */
180 *base = 0;
181 *size = 0;
182 *type = 0;
183 return;
184 }
185
186 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
187
188 /* Work out the shifted address mask. */
189 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
190 | mask_lo >> PAGE_SHIFT;
191
192 /* This works correctly if size is a power of two, i.e. a
193 contiguous range. */
194 *size = -mask_lo;
195 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
196 *type = base_lo & 0xff;
197 }
198
199 static int set_fixed_ranges(mtrr_type * frs)
200 {
201 unsigned int *p = (unsigned int *) frs;
202 int changed = FALSE;
203 int i;
204 unsigned int lo, hi;
205
206 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
207 if (p[0] != lo || p[1] != hi) {
208 mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
209 changed = TRUE;
210 }
211
212 for (i = 0; i < 2; i++) {
213 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
214 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
215 mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
216 p[3 + i * 2]);
217 changed = TRUE;
218 }
219 }
220
221 for (i = 0; i < 8; i++) {
222 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
223 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
224 mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
225 p[7 + i * 2]);
226 changed = TRUE;
227 }
228 }
229 return changed;
230 }
231
232 /* Set the MSR pair relating to a var range. Returns TRUE if
233 changes are made */
234 static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
235 {
236 unsigned int lo, hi;
237 int changed = FALSE;
238
239 rdmsr(MTRRphysBase_MSR(index), lo, hi);
240 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
241 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
242 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
243 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
244 changed = TRUE;
245 }
246
247 rdmsr(MTRRphysMask_MSR(index), lo, hi);
248
249 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
250 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
251 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
252 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
253 changed = TRUE;
254 }
255 return changed;
256 }
257
258 static u32 deftype_lo, deftype_hi;
259
260 static unsigned long set_mtrr_state(void)
261 /* [SUMMARY] Set the MTRR state for this CPU.
262 <state> The MTRR state information to read.
263 <ctxt> Some relevant CPU context.
264 [NOTE] The CPU must already be in a safe state for MTRR changes.
265 [RETURNS] 0 if no changes made, else a mask indication what was changed.
266 */
267 {
268 unsigned int i;
269 unsigned long change_mask = 0;
270
271 for (i = 0; i < num_var_ranges; i++)
272 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
273 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
274
275 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
276 change_mask |= MTRR_CHANGE_MASK_FIXED;
277
278 /* Set_mtrr_restore restores the old value of MTRRdefType,
279 so to set it we fiddle with the saved value */
280 if ((deftype_lo & 0xff) != mtrr_state.def_type
281 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
282 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
283 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
284 }
285
286 return change_mask;
287 }
288
289
290 static unsigned long cr4 = 0;
291 static DEFINE_SPINLOCK(set_atomicity_lock);
292
293 /*
294 * Since we are disabling the cache don't allow any interrupts - they
295 * would run extremely slow and would only increase the pain. The caller must
296 * ensure that local interrupts are disabled and are reenabled after post_set()
297 * has been called.
298 */
299
300 static void prepare_set(void) __acquires(set_atomicity_lock)
301 {
302 unsigned long cr0;
303
304 /* Note that this is not ideal, since the cache is only flushed/disabled
305 for this CPU while the MTRRs are changed, but changing this requires
306 more invasive changes to the way the kernel boots */
307
308 spin_lock(&set_atomicity_lock);
309
310 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
311 cr0 = read_cr0() | 0x40000000; /* set CD flag */
312 write_cr0(cr0);
313 wbinvd();
314
315 /* Save value of CR4 and clear Page Global Enable (bit 7) */
316 if ( cpu_has_pge ) {
317 cr4 = read_cr4();
318 write_cr4(cr4 & ~X86_CR4_PGE);
319 }
320
321 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
322 __flush_tlb();
323
324 /* Save MTRR state */
325 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
326
327 /* Disable MTRRs, and set the default type to uncached */
328 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
329 }
330
331 static void post_set(void) __releases(set_atomicity_lock)
332 {
333 /* Flush TLBs (no need to flush caches - they are disabled) */
334 __flush_tlb();
335
336 /* Intel (P6) standard MTRRs */
337 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
338
339 /* Enable caches */
340 write_cr0(read_cr0() & 0xbfffffff);
341
342 /* Restore value of CR4 */
343 if ( cpu_has_pge )
344 write_cr4(cr4);
345 spin_unlock(&set_atomicity_lock);
346 }
347
348 static void generic_set_all(void)
349 {
350 unsigned long mask, count;
351 unsigned long flags;
352
353 local_irq_save(flags);
354 prepare_set();
355
356 /* Actually set the state */
357 mask = set_mtrr_state();
358
359 post_set();
360 local_irq_restore(flags);
361
362 /* Use the atomic bitops to update the global mask */
363 for (count = 0; count < sizeof mask * 8; ++count) {
364 if (mask & 0x01)
365 set_bit(count, &smp_changes_mask);
366 mask >>= 1;
367 }
368
369 }
370
371 static void generic_set_mtrr(unsigned int reg, unsigned long base,
372 unsigned long size, mtrr_type type)
373 /* [SUMMARY] Set variable MTRR register on the local CPU.
374 <reg> The register to set.
375 <base> The base address of the region.
376 <size> The size of the region. If this is 0 the region is disabled.
377 <type> The type of the region.
378 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
379 be done externally.
380 [RETURNS] Nothing.
381 */
382 {
383 unsigned long flags;
384 struct mtrr_var_range *vr;
385
386 vr = &mtrr_state.var_ranges[reg];
387
388 local_irq_save(flags);
389 prepare_set();
390
391 if (size == 0) {
392 /* The invalid bit is kept in the mask, so we simply clear the
393 relevant mask register to disable a range. */
394 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
395 memset(vr, 0, sizeof(struct mtrr_var_range));
396 } else {
397 vr->base_lo = base << PAGE_SHIFT | type;
398 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
399 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
400 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
401
402 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
403 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
404 }
405
406 post_set();
407 local_irq_restore(flags);
408 }
409
410 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
411 {
412 unsigned long lbase, last;
413
414 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
415 and not touch 0x70000000->0x7003FFFF */
416 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
417 boot_cpu_data.x86_model == 1 &&
418 boot_cpu_data.x86_mask <= 7) {
419 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
420 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
421 return -EINVAL;
422 }
423 if (!(base + size < 0x70000 || base > 0x7003F) &&
424 (type == MTRR_TYPE_WRCOMB
425 || type == MTRR_TYPE_WRBACK)) {
426 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
427 return -EINVAL;
428 }
429 }
430
431 if (base + size < 0x100) {
432 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
433 base, size);
434 return -EINVAL;
435 }
436 /* Check upper bits of base and last are equal and lower bits are 0
437 for base and 1 for last */
438 last = base + size - 1;
439 for (lbase = base; !(lbase & 1) && (last & 1);
440 lbase = lbase >> 1, last = last >> 1) ;
441 if (lbase != last) {
442 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
443 base, size);
444 return -EINVAL;
445 }
446 return 0;
447 }
448
449
450 static int generic_have_wrcomb(void)
451 {
452 unsigned long config, dummy;
453 rdmsr(MTRRcap_MSR, config, dummy);
454 return (config & (1 << 10));
455 }
456
457 int positive_have_wrcomb(void)
458 {
459 return 1;
460 }
461
462 /* generic structure...
463 */
464 struct mtrr_ops generic_mtrr_ops = {
465 .use_intel_if = 1,
466 .set_all = generic_set_all,
467 .get = generic_get_mtrr,
468 .get_free_region = generic_get_free_region,
469 .set = generic_set_mtrr,
470 .validate_add_page = generic_validate_add_page,
471 .have_wrcomb = generic_have_wrcomb,
472 };
This page took 0.045455 seconds and 6 git commands to generate.