x86: print physical addresses consistently with other parts of kernel
[deliverable/linux.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
5a0e3ad6 15#include <linux/slab.h>
ad2cde16 16#include <linux/mm.h>
2e5d9c85 17#include <linux/fs.h>
335ef896 18#include <linux/rbtree.h>
2e5d9c85 19
ad2cde16 20#include <asm/cacheflush.h>
2e5d9c85 21#include <asm/processor.h>
ad2cde16 22#include <asm/tlbflush.h>
fd12a0d6 23#include <asm/x86_init.h>
2e5d9c85 24#include <asm/pgtable.h>
2e5d9c85 25#include <asm/fcntl.h>
ad2cde16 26#include <asm/e820.h>
2e5d9c85 27#include <asm/mtrr.h>
ad2cde16
IM
28#include <asm/page.h>
29#include <asm/msr.h>
30#include <asm/pat.h>
e7f260a2 31#include <asm/io.h>
2e5d9c85 32
be5a0c12 33#include "pat_internal.h"
34
8d4a4300 35#ifdef CONFIG_X86_PAT
499f8f84 36int __read_mostly pat_enabled = 1;
2e5d9c85 37
1ee4bd92 38static inline void pat_disable(const char *reason)
2e5d9c85 39{
499f8f84 40 pat_enabled = 0;
8d4a4300 41 printk(KERN_INFO "%s\n", reason);
2e5d9c85 42}
2e5d9c85 43
be524fb9 44static int __init nopat(char *str)
2e5d9c85 45{
8d4a4300 46 pat_disable("PAT support disabled.");
2e5d9c85 47 return 0;
48}
8d4a4300 49early_param("nopat", nopat);
75a04811
PA
50#else
51static inline void pat_disable(const char *reason)
52{
53 (void)reason;
54}
8d4a4300
TG
55#endif
56
77b52b4c 57
be5a0c12 58int pat_debug_enable;
ad2cde16 59
77b52b4c
VP
60static int __init pat_debug_setup(char *str)
61{
be5a0c12 62 pat_debug_enable = 1;
77b52b4c
VP
63 return 0;
64}
65__setup("debugpat", pat_debug_setup);
66
8d4a4300 67static u64 __read_mostly boot_pat_state;
2e5d9c85 68
69enum {
70 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */
72 PAT_WT = 4, /* Write Through */
73 PAT_WP = 5, /* Write Protected */
74 PAT_WB = 6, /* Write Back (default) */
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
76};
77
cd7a4e93 78#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 79
80void pat_init(void)
81{
82 u64 pat;
e23a8b6a 83 bool boot_cpu = !boot_pat_state;
2e5d9c85 84
499f8f84 85 if (!pat_enabled)
2e5d9c85 86 return;
87
75a04811
PA
88 if (!cpu_has_pat) {
89 if (!boot_pat_state) {
90 pat_disable("PAT not supported by CPU.");
91 return;
92 } else {
93 /*
94 * If this happens we are on a secondary CPU, but
95 * switched to PAT on the boot CPU. We have no way to
96 * undo PAT.
97 */
98 printk(KERN_ERR "PAT enabled, "
99 "but not supported by secondary CPU\n");
100 BUG();
101 }
8d4a4300 102 }
2e5d9c85 103
104 /* Set PWT to Write-Combining. All other bits stay the same */
105 /*
106 * PTE encoding used in Linux:
107 * PAT
108 * |PCD
109 * ||PWT
110 * |||
111 * 000 WB _PAGE_CACHE_WB
112 * 001 WC _PAGE_CACHE_WC
113 * 010 UC- _PAGE_CACHE_UC_MINUS
114 * 011 UC _PAGE_CACHE_UC
115 * PAT bit unused
116 */
cd7a4e93
AH
117 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
118 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
2e5d9c85 119
120 /* Boot CPU check */
8d4a4300 121 if (!boot_pat_state)
2e5d9c85 122 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
2e5d9c85 123
124 wrmsrl(MSR_IA32_CR_PAT, pat);
e23a8b6a
RD
125
126 if (boot_cpu)
127 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
128 smp_processor_id(), boot_pat_state, pat);
2e5d9c85 129}
130
131#undef PAT
132
9e41a49a 133static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
335ef896 134
2e5d9c85 135/*
136 * Does intersection of PAT memory type and MTRR memory type and returns
137 * the resulting memory type as PAT understands it.
138 * (Type in pat and mtrr will not have same value)
139 * The intersection is based on "Effective Memory Type" tables in IA-32
140 * SDM vol 3a
141 */
6cf514fc 142static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
2e5d9c85 143{
c26421d0
VP
144 /*
145 * Look for MTRR hint to get the effective type in case where PAT
146 * request is for WB.
147 */
dd0c7c49
AH
148 if (req_type == _PAGE_CACHE_WB) {
149 u8 mtrr_type;
150
151 mtrr_type = mtrr_type_lookup(start, end);
b6ff32d9
SS
152 if (mtrr_type != MTRR_TYPE_WRBACK)
153 return _PAGE_CACHE_UC_MINUS;
154
155 return _PAGE_CACHE_WB;
dd0c7c49
AH
156 }
157
158 return req_type;
2e5d9c85 159}
160
3709c857 161static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
be03d9e8
SS
162{
163 int ram_page = 0, not_rampage = 0;
164 unsigned long page_nr;
165
166 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
167 ++page_nr) {
168 /*
169 * For legacy reasons, physical address range in the legacy ISA
170 * region is tracked as non-RAM. This will allow users of
171 * /dev/mem to map portions of legacy ISA region, even when
172 * some of those portions are listed(or not even listed) with
173 * different e820 types(RAM/reserved/..)
174 */
175 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
176 page_is_ram(page_nr))
177 ram_page = 1;
178 else
179 not_rampage = 1;
180
181 if (ram_page == not_rampage)
182 return -1;
183 }
184
185 return ram_page;
186}
187
9542ada8 188/*
f5841740
VP
189 * For RAM pages, we use page flags to mark the pages with appropriate type.
190 * Here we do two pass:
191 * - Find the memtype of all the pages in the range, look for any conflicts
192 * - In case of no conflicts, set the new memtype for pages in the range
9542ada8
SS
193 */
194static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
ad2cde16 195 unsigned long *new_type)
9542ada8
SS
196{
197 struct page *page;
f5841740
VP
198 u64 pfn;
199
200 if (req_type == _PAGE_CACHE_UC) {
201 /* We do not support strong UC */
202 WARN_ON_ONCE(1);
203 req_type = _PAGE_CACHE_UC_MINUS;
204 }
9542ada8
SS
205
206 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
f5841740 207 unsigned long type;
9542ada8 208
f5841740
VP
209 page = pfn_to_page(pfn);
210 type = get_page_memtype(page);
211 if (type != -1) {
365811d6
BH
212 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
213 start, end - 1, type, req_type);
f5841740
VP
214 if (new_type)
215 *new_type = type;
216
217 return -EBUSY;
218 }
9542ada8 219 }
9542ada8 220
f5841740
VP
221 if (new_type)
222 *new_type = req_type;
223
224 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 225 page = pfn_to_page(pfn);
f5841740 226 set_page_memtype(page, req_type);
9542ada8 227 }
f5841740 228 return 0;
9542ada8
SS
229}
230
231static int free_ram_pages_type(u64 start, u64 end)
232{
233 struct page *page;
f5841740 234 u64 pfn;
9542ada8
SS
235
236 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
237 page = pfn_to_page(pfn);
f5841740 238 set_page_memtype(page, -1);
9542ada8
SS
239 }
240 return 0;
9542ada8
SS
241}
242
e7f260a2 243/*
244 * req_type typically has one of the:
245 * - _PAGE_CACHE_WB
246 * - _PAGE_CACHE_WC
247 * - _PAGE_CACHE_UC_MINUS
248 * - _PAGE_CACHE_UC
249 *
ac97991e
AH
250 * If new_type is NULL, function will return an error if it cannot reserve the
251 * region with req_type. If new_type is non-NULL, function will return
252 * available type in new_type in case of no error. In case of any error
e7f260a2 253 * it will return a negative return value.
254 */
2e5d9c85 255int reserve_memtype(u64 start, u64 end, unsigned long req_type,
ad2cde16 256 unsigned long *new_type)
2e5d9c85 257{
be5a0c12 258 struct memtype *new;
2e5d9c85 259 unsigned long actual_type;
9542ada8 260 int is_range_ram;
ad2cde16 261 int err = 0;
2e5d9c85 262
ad2cde16 263 BUG_ON(start >= end); /* end is exclusive */
69e26be9 264
499f8f84 265 if (!pat_enabled) {
e7f260a2 266 /* This is identical to page table setting without PAT */
ac97991e 267 if (new_type) {
83ea05ea 268 if (req_type == _PAGE_CACHE_WC)
5fc51746 269 *new_type = _PAGE_CACHE_UC_MINUS;
ac97991e
AH
270 else
271 *new_type = req_type & _PAGE_CACHE_MASK;
e7f260a2 272 }
2e5d9c85 273 return 0;
274 }
275
276 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 277 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e
AH
278 if (new_type)
279 *new_type = _PAGE_CACHE_WB;
2e5d9c85 280 return 0;
281 }
282
b6ff32d9
SS
283 /*
284 * Call mtrr_lookup to get the type hint. This is an
285 * optimization for /dev/mem mmap'ers into WB memory (BIOS
286 * tools and ACPI tools). Use WB request for WB memory and use
287 * UC_MINUS otherwise.
288 */
289 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
2e5d9c85 290
95971342
SS
291 if (new_type)
292 *new_type = actual_type;
293
be03d9e8 294 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
295 if (is_range_ram == 1) {
296
f5841740 297 err = reserve_ram_pages_type(start, end, req_type, new_type);
f5841740
VP
298
299 return err;
300 } else if (is_range_ram < 0) {
9542ada8 301 return -EINVAL;
f5841740 302 }
9542ada8 303
6a4f3b52 304 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
ac97991e 305 if (!new)
2e5d9c85 306 return -ENOMEM;
307
ad2cde16
IM
308 new->start = start;
309 new->end = end;
310 new->type = actual_type;
2e5d9c85 311
2e5d9c85 312 spin_lock(&memtype_lock);
313
9e41a49a 314 err = rbt_memtype_check_insert(new, new_type);
2e5d9c85 315 if (err) {
365811d6
BH
316 printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
317 start, end - 1,
318 cattr_name(new->type), cattr_name(req_type));
ac97991e 319 kfree(new);
2e5d9c85 320 spin_unlock(&memtype_lock);
ad2cde16 321
2e5d9c85 322 return err;
323 }
324
2e5d9c85 325 spin_unlock(&memtype_lock);
3e9c83b3 326
365811d6
BH
327 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
328 start, end - 1, cattr_name(new->type), cattr_name(req_type),
3e9c83b3
AH
329 new_type ? cattr_name(*new_type) : "-");
330
2e5d9c85 331 return err;
332}
333
334int free_memtype(u64 start, u64 end)
335{
2e5d9c85 336 int err = -EINVAL;
9542ada8 337 int is_range_ram;
20413f27 338 struct memtype *entry;
2e5d9c85 339
69e26be9 340 if (!pat_enabled)
2e5d9c85 341 return 0;
2e5d9c85 342
343 /* Low ISA region is always mapped WB. No need to track */
8a271389 344 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 345 return 0;
2e5d9c85 346
be03d9e8 347 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
348 if (is_range_ram == 1) {
349
f5841740 350 err = free_ram_pages_type(start, end);
f5841740
VP
351
352 return err;
353 } else if (is_range_ram < 0) {
9542ada8 354 return -EINVAL;
f5841740 355 }
9542ada8 356
2e5d9c85 357 spin_lock(&memtype_lock);
20413f27 358 entry = rbt_memtype_erase(start, end);
2e5d9c85 359 spin_unlock(&memtype_lock);
360
20413f27 361 if (!entry) {
365811d6
BH
362 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
363 current->comm, current->pid, start, end - 1);
20413f27 364 return -EINVAL;
2e5d9c85 365 }
6997ab49 366
20413f27
XF
367 kfree(entry);
368
365811d6 369 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
ad2cde16 370
20413f27 371 return 0;
2e5d9c85 372}
373
f0970c13 374
637b86e7
VP
375/**
376 * lookup_memtype - Looksup the memory type for a physical address
377 * @paddr: physical address of which memory type needs to be looked up
378 *
379 * Only to be called when PAT is enabled
380 *
381 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
382 * _PAGE_CACHE_UC
383 */
384static unsigned long lookup_memtype(u64 paddr)
385{
386 int rettype = _PAGE_CACHE_WB;
387 struct memtype *entry;
388
8a271389 389 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
390 return rettype;
391
392 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
393 struct page *page;
637b86e7
VP
394 page = pfn_to_page(paddr >> PAGE_SHIFT);
395 rettype = get_page_memtype(page);
637b86e7
VP
396 /*
397 * -1 from get_page_memtype() implies RAM page is in its
398 * default state and not reserved, and hence of type WB
399 */
400 if (rettype == -1)
401 rettype = _PAGE_CACHE_WB;
402
403 return rettype;
404 }
405
406 spin_lock(&memtype_lock);
407
9e41a49a 408 entry = rbt_memtype_lookup(paddr);
637b86e7
VP
409 if (entry != NULL)
410 rettype = entry->type;
411 else
412 rettype = _PAGE_CACHE_UC_MINUS;
413
414 spin_unlock(&memtype_lock);
415 return rettype;
416}
417
9fd126bc
VP
418/**
419 * io_reserve_memtype - Request a memory type mapping for a region of memory
420 * @start: start (physical address) of the region
421 * @end: end (physical address) of the region
422 * @type: A pointer to memtype, with requested type. On success, requested
423 * or any other compatible type that was available for the region is returned
424 *
425 * On success, returns 0
426 * On failure, returns non-zero
427 */
428int io_reserve_memtype(resource_size_t start, resource_size_t end,
429 unsigned long *type)
430{
b855192c 431 resource_size_t size = end - start;
9fd126bc
VP
432 unsigned long req_type = *type;
433 unsigned long new_type;
434 int ret;
435
b855192c 436 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc
VP
437
438 ret = reserve_memtype(start, end, req_type, &new_type);
439 if (ret)
440 goto out_err;
441
b855192c 442 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
443 goto out_free;
444
b855192c 445 if (kernel_map_sync_memtype(start, size, new_type) < 0)
9fd126bc
VP
446 goto out_free;
447
448 *type = new_type;
449 return 0;
450
451out_free:
452 free_memtype(start, end);
453 ret = -EBUSY;
454out_err:
455 return ret;
456}
457
458/**
459 * io_free_memtype - Release a memory type mapping for a region of memory
460 * @start: start (physical address) of the region
461 * @end: end (physical address) of the region
462 */
463void io_free_memtype(resource_size_t start, resource_size_t end)
464{
465 free_memtype(start, end);
466}
467
f0970c13 468pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
469 unsigned long size, pgprot_t vma_prot)
470{
471 return vma_prot;
472}
473
d092633b
IM
474#ifdef CONFIG_STRICT_DEVMEM
475/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
0124cecf
VP
476static inline int range_is_allowed(unsigned long pfn, unsigned long size)
477{
478 return 1;
479}
480#else
9e41bff2 481/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
482static inline int range_is_allowed(unsigned long pfn, unsigned long size)
483{
484 u64 from = ((u64)pfn) << PAGE_SHIFT;
485 u64 to = from + size;
486 u64 cursor = from;
487
9e41bff2
RT
488 if (!pat_enabled)
489 return 1;
490
0124cecf
VP
491 while (cursor < to) {
492 if (!devmem_is_allowed(pfn)) {
365811d6
BH
493 printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
494 current->comm, from, to - 1);
0124cecf
VP
495 return 0;
496 }
497 cursor += PAGE_SIZE;
498 pfn++;
499 }
500 return 1;
501}
d092633b 502#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 503
f0970c13 504int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
505 unsigned long size, pgprot_t *vma_prot)
506{
0c3c8a18 507 unsigned long flags = _PAGE_CACHE_WB;
f0970c13 508
0124cecf
VP
509 if (!range_is_allowed(pfn, size))
510 return 0;
511
6b2f3d1f 512 if (file->f_flags & O_DSYNC)
28df82eb 513 flags = _PAGE_CACHE_UC_MINUS;
f0970c13 514
515#ifdef CONFIG_X86_32
516 /*
517 * On the PPro and successors, the MTRRs are used to set
518 * memory types for physical addresses outside main memory,
519 * so blindly setting UC or PWT on those pages is wrong.
520 * For Pentiums and earlier, the surround logic should disable
521 * caching for the high addresses through the KEN pin, but
522 * we maintain the tradition of paranoia in this code.
523 */
499f8f84 524 if (!pat_enabled &&
cd7a4e93
AH
525 !(boot_cpu_has(X86_FEATURE_MTRR) ||
526 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
527 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
528 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
529 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e7f260a2 530 flags = _PAGE_CACHE_UC;
f0970c13 531 }
532#endif
533
e7f260a2 534 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
535 flags);
f0970c13 536 return 1;
537}
e7f260a2 538
7880f746
VP
539/*
540 * Change the memory type for the physial address range in kernel identity
541 * mapping space if that range is a part of identity map.
542 */
543int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
544{
545 unsigned long id_sz;
546
5fc51746 547 if (base >= __pa(high_memory))
7880f746
VP
548 return 0;
549
550 id_sz = (__pa(high_memory) < base + size) ?
551 __pa(high_memory) - base :
552 size;
553
554 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
365811d6
BH
555 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
556 "for [mem %#010Lx-%#010Lx]\n",
7880f746
VP
557 current->comm, current->pid,
558 cattr_name(flags),
365811d6 559 base, (unsigned long long)(base + size-1));
7880f746
VP
560 return -EINVAL;
561 }
562 return 0;
563}
564
5899329b 565/*
566 * Internal interface to reserve a range of physical memory with prot.
567 * Reserved non RAM regions only and after successful reserve_memtype,
568 * this func also keeps identity mapping (if any) in sync with this new prot.
569 */
cdecff68 570static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
571 int strict_prot)
5899329b 572{
573 int is_ram = 0;
7880f746 574 int ret;
cdecff68 575 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
0c3c8a18 576 unsigned long flags = want_flags;
5899329b 577
be03d9e8 578 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 579
be03d9e8 580 /*
d886c73c
VP
581 * reserve_pfn_range() for RAM pages. We do not refcount to keep
582 * track of number of mappings of RAM pages. We can assert that
583 * the type requested matches the type of first page in the range.
be03d9e8 584 */
d886c73c
VP
585 if (is_ram) {
586 if (!pat_enabled)
587 return 0;
588
589 flags = lookup_memtype(paddr);
590 if (want_flags != flags) {
365811d6 591 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
d886c73c
VP
592 current->comm, current->pid,
593 cattr_name(want_flags),
594 (unsigned long long)paddr,
365811d6 595 (unsigned long long)(paddr + size - 1),
d886c73c
VP
596 cattr_name(flags));
597 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
598 (~_PAGE_CACHE_MASK)) |
599 flags);
600 }
4bb9c5c0 601 return 0;
d886c73c 602 }
5899329b 603
604 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
605 if (ret)
606 return ret;
607
608 if (flags != want_flags) {
1adcaafe
SS
609 if (strict_prot ||
610 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
cdecff68 611 free_memtype(paddr, paddr + size);
612 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
365811d6 613 " for [mem %#010Lx-%#010Lx], got %s\n",
cdecff68 614 current->comm, current->pid,
615 cattr_name(want_flags),
616 (unsigned long long)paddr,
365811d6 617 (unsigned long long)(paddr + size - 1),
cdecff68 618 cattr_name(flags));
619 return -EINVAL;
620 }
621 /*
622 * We allow returning different type than the one requested in
623 * non strict case.
624 */
625 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
626 (~_PAGE_CACHE_MASK)) |
627 flags);
5899329b 628 }
629
7880f746 630 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
5899329b 631 free_memtype(paddr, paddr + size);
5899329b 632 return -EINVAL;
633 }
634 return 0;
635}
636
637/*
638 * Internal interface to free a range of physical memory.
639 * Frees non RAM regions only.
640 */
641static void free_pfn_range(u64 paddr, unsigned long size)
642{
643 int is_ram;
644
be03d9e8 645 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 646 if (is_ram == 0)
647 free_memtype(paddr, paddr + size);
648}
649
650/*
651 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
652 * copied through copy_page_range().
653 *
654 * If the vma has a linear pfn mapping for the entire range, we get the prot
655 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 656 */
657int track_pfn_vma_copy(struct vm_area_struct *vma)
658{
c1c15b65 659 resource_size_t paddr;
982d789a 660 unsigned long prot;
4b065046 661 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 662 pgprot_t pgprot;
5899329b 663
5899329b 664 if (is_linear_pfn_mapping(vma)) {
665 /*
982d789a 666 * reserve the whole chunk covered by vma. We need the
667 * starting address and protection from pte.
5899329b 668 */
4b065046 669 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 670 WARN_ON_ONCE(1);
982d789a 671 return -EINVAL;
5899329b 672 }
cdecff68 673 pgprot = __pgprot(prot);
674 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 675 }
676
5899329b 677 return 0;
5899329b 678}
679
680/*
681 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
682 * for physical range indicated by pfn and size.
683 *
684 * prot is passed in as a parameter for the new mapping. If the vma has a
685 * linear pfn mapping for the entire range reserve the entire vma range with
686 * single reserve_pfn_range call.
5899329b 687 */
e4b866ed 688int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
5899329b 689 unsigned long pfn, unsigned long size)
690{
10876376 691 unsigned long flags;
c1c15b65 692 resource_size_t paddr;
4b065046 693 unsigned long vma_size = vma->vm_end - vma->vm_start;
5899329b 694
5899329b 695 if (is_linear_pfn_mapping(vma)) {
696 /* reserve the whole chunk starting from vm_pgoff */
c1c15b65 697 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
cdecff68 698 return reserve_pfn_range(paddr, vma_size, prot, 0);
5899329b 699 }
700
10876376
VP
701 if (!pat_enabled)
702 return 0;
703
704 /* for vm_insert_pfn and friends, we set prot based on lookup */
705 flags = lookup_memtype(pfn << PAGE_SHIFT);
706 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
707 flags);
708
5899329b 709 return 0;
5899329b 710}
711
712/*
713 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
714 * untrack can be called for a specific region indicated by pfn and size or
715 * can be for the entire vma (in which case size can be zero).
716 */
717void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
718 unsigned long size)
719{
c1c15b65 720 resource_size_t paddr;
4b065046 721 unsigned long vma_size = vma->vm_end - vma->vm_start;
5899329b 722
5899329b 723 if (is_linear_pfn_mapping(vma)) {
724 /* free the whole chunk starting from vm_pgoff */
c1c15b65 725 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
5899329b 726 free_pfn_range(paddr, vma_size);
727 return;
728 }
5899329b 729}
730
2520bd31 731pgprot_t pgprot_writecombine(pgprot_t prot)
732{
733 if (pat_enabled)
734 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
735 else
736 return pgprot_noncached(prot);
737}
92b9af9e 738EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 739
012f09e7 740#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 741
fec0962e 742static struct memtype *memtype_get_idx(loff_t pos)
743{
be5a0c12 744 struct memtype *print_entry;
745 int ret;
fec0962e 746
be5a0c12 747 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
fec0962e 748 if (!print_entry)
749 return NULL;
750
751 spin_lock(&memtype_lock);
9e41a49a 752 ret = rbt_memtype_copy_nth_element(print_entry, pos);
fec0962e 753 spin_unlock(&memtype_lock);
ad2cde16 754
be5a0c12 755 if (!ret) {
756 return print_entry;
757 } else {
758 kfree(print_entry);
759 return NULL;
760 }
fec0962e 761}
762
763static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
764{
765 if (*pos == 0) {
766 ++*pos;
767 seq_printf(seq, "PAT memtype list:\n");
768 }
769
770 return memtype_get_idx(*pos);
771}
772
773static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
774{
775 ++*pos;
776 return memtype_get_idx(*pos);
777}
778
779static void memtype_seq_stop(struct seq_file *seq, void *v)
780{
781}
782
783static int memtype_seq_show(struct seq_file *seq, void *v)
784{
785 struct memtype *print_entry = (struct memtype *)v;
786
787 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
788 print_entry->start, print_entry->end);
789 kfree(print_entry);
ad2cde16 790
fec0962e 791 return 0;
792}
793
d535e431 794static const struct seq_operations memtype_seq_ops = {
fec0962e 795 .start = memtype_seq_start,
796 .next = memtype_seq_next,
797 .stop = memtype_seq_stop,
798 .show = memtype_seq_show,
799};
800
801static int memtype_seq_open(struct inode *inode, struct file *file)
802{
803 return seq_open(file, &memtype_seq_ops);
804}
805
806static const struct file_operations memtype_fops = {
807 .open = memtype_seq_open,
808 .read = seq_read,
809 .llseek = seq_lseek,
810 .release = seq_release,
811};
812
813static int __init pat_memtype_list_init(void)
814{
dd4377b0
XF
815 if (pat_enabled) {
816 debugfs_create_file("pat_memtype_list", S_IRUSR,
817 arch_debugfs_dir, NULL, &memtype_fops);
818 }
fec0962e 819 return 0;
820}
821
822late_initcall(pat_memtype_list_init);
823
012f09e7 824#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
This page took 0.42424 seconds and 5 git commands to generate.