2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/x86_init.h>
24 #include <asm/pgtable.h>
25 #include <asm/fcntl.h>
33 #include "pat_internal.h"
34 #include "mm_internal.h"
37 #define pr_fmt(fmt) "" fmt
39 static bool boot_cpu_done
;
41 static int __read_mostly __pat_enabled
= IS_ENABLED(CONFIG_X86_PAT
);
43 static inline void pat_disable(const char *reason
)
46 pr_info("x86/PAT: %s\n", reason
);
49 static int __init
nopat(char *str
)
51 pat_disable("PAT support disabled.");
54 early_param("nopat", nopat
);
56 bool pat_enabled(void)
58 return !!__pat_enabled
;
60 EXPORT_SYMBOL_GPL(pat_enabled
);
64 static int __init
pat_debug_setup(char *str
)
69 __setup("debugpat", pat_debug_setup
);
73 * X86 PAT uses page flags WC and Uncached together to keep track of
74 * memory type of pages that have backing page struct. X86 PAT supports 3
75 * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
76 * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
77 * been changed from its default (value of -1 used to denote this).
78 * Note we do not support _PAGE_CACHE_MODE_UC here.
81 #define _PGMT_DEFAULT 0
82 #define _PGMT_WC (1UL << PG_arch_1)
83 #define _PGMT_UC_MINUS (1UL << PG_uncached)
84 #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
85 #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
86 #define _PGMT_CLEAR_MASK (~_PGMT_MASK)
88 static inline enum page_cache_mode
get_page_memtype(struct page
*pg
)
90 unsigned long pg_flags
= pg
->flags
& _PGMT_MASK
;
92 if (pg_flags
== _PGMT_DEFAULT
)
94 else if (pg_flags
== _PGMT_WC
)
95 return _PAGE_CACHE_MODE_WC
;
96 else if (pg_flags
== _PGMT_UC_MINUS
)
97 return _PAGE_CACHE_MODE_UC_MINUS
;
99 return _PAGE_CACHE_MODE_WB
;
102 static inline void set_page_memtype(struct page
*pg
,
103 enum page_cache_mode memtype
)
105 unsigned long memtype_flags
;
106 unsigned long old_flags
;
107 unsigned long new_flags
;
110 case _PAGE_CACHE_MODE_WC
:
111 memtype_flags
= _PGMT_WC
;
113 case _PAGE_CACHE_MODE_UC_MINUS
:
114 memtype_flags
= _PGMT_UC_MINUS
;
116 case _PAGE_CACHE_MODE_WB
:
117 memtype_flags
= _PGMT_WB
;
120 memtype_flags
= _PGMT_DEFAULT
;
125 old_flags
= pg
->flags
;
126 new_flags
= (old_flags
& _PGMT_CLEAR_MASK
) | memtype_flags
;
127 } while (cmpxchg(&pg
->flags
, old_flags
, new_flags
) != old_flags
);
130 static inline enum page_cache_mode
get_page_memtype(struct page
*pg
)
134 static inline void set_page_memtype(struct page
*pg
,
135 enum page_cache_mode memtype
)
141 PAT_UC
= 0, /* uncached */
142 PAT_WC
= 1, /* Write combining */
143 PAT_WT
= 4, /* Write Through */
144 PAT_WP
= 5, /* Write Protected */
145 PAT_WB
= 6, /* Write Back (default) */
146 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
149 #define CM(c) (_PAGE_CACHE_MODE_ ## c)
151 static enum page_cache_mode
pat_get_cache_mode(unsigned pat_val
, char *msg
)
153 enum page_cache_mode cache
;
157 case PAT_UC
: cache
= CM(UC
); cache_mode
= "UC "; break;
158 case PAT_WC
: cache
= CM(WC
); cache_mode
= "WC "; break;
159 case PAT_WT
: cache
= CM(WT
); cache_mode
= "WT "; break;
160 case PAT_WP
: cache
= CM(WP
); cache_mode
= "WP "; break;
161 case PAT_WB
: cache
= CM(WB
); cache_mode
= "WB "; break;
162 case PAT_UC_MINUS
: cache
= CM(UC_MINUS
); cache_mode
= "UC- "; break;
163 default: cache
= CM(WB
); cache_mode
= "WB "; break;
166 memcpy(msg
, cache_mode
, 4);
174 * Update the cache mode to pgprot translation tables according to PAT
176 * Using lower indices is preferred, so we start with highest index.
178 void pat_init_cache_modes(u64 pat
)
180 enum page_cache_mode cache
;
185 for (i
= 7; i
>= 0; i
--) {
186 cache
= pat_get_cache_mode((pat
>> (i
* 8)) & 7,
188 update_cache_mode_entry(i
, cache
);
190 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg
);
193 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
195 static void pat_bsp_init(u64 pat
)
200 pat_disable("PAT not supported by CPU.");
207 rdmsrl(MSR_IA32_CR_PAT
, tmp_pat
);
209 pat_disable("PAT MSR is 0, disabled.");
213 wrmsrl(MSR_IA32_CR_PAT
, pat
);
216 pat_init_cache_modes(pat
);
219 static void pat_ap_init(u64 pat
)
226 * If this happens we are on a secondary CPU, but switched to
227 * PAT on the boot CPU. We have no way to undo PAT.
229 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
232 wrmsrl(MSR_IA32_CR_PAT
, pat
);
239 if (!pat_enabled()) {
241 * No PAT. Emulate the PAT table that corresponds to the two
242 * cache bits, PWT (Write Through) and PCD (Cache Disable). This
243 * setup is the same as the BIOS default setup when the system
244 * has PAT but the "nopat" boot option has been specified. This
245 * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
252 * 00 0 WB : _PAGE_CACHE_MODE_WB
253 * 01 1 WT : _PAGE_CACHE_MODE_WT
254 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
255 * 11 3 UC : _PAGE_CACHE_MODE_UC
257 * NOTE: When WC or WP is used, it is redirected to UC- per
258 * the default setup in __cachemode2pte_tbl[].
260 pat
= PAT(0, WB
) | PAT(1, WT
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
261 PAT(4, WB
) | PAT(5, WT
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
264 * PTE encoding used in Linux:
269 * 000 WB _PAGE_CACHE_WB
270 * 001 WC _PAGE_CACHE_WC
271 * 010 UC- _PAGE_CACHE_UC_MINUS
272 * 011 UC _PAGE_CACHE_UC
275 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
276 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
279 if (!boot_cpu_done
) {
281 boot_cpu_done
= true;
289 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype accesses */
292 * Does intersection of PAT memory type and MTRR memory type and returns
293 * the resulting memory type as PAT understands it.
294 * (Type in pat and mtrr will not have same value)
295 * The intersection is based on "Effective Memory Type" tables in IA-32
298 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
,
299 enum page_cache_mode req_type
)
302 * Look for MTRR hint to get the effective type in case where PAT
305 if (req_type
== _PAGE_CACHE_MODE_WB
) {
306 u8 mtrr_type
, uniform
;
308 mtrr_type
= mtrr_type_lookup(start
, end
, &uniform
);
309 if (mtrr_type
!= MTRR_TYPE_WRBACK
)
310 return _PAGE_CACHE_MODE_UC_MINUS
;
312 return _PAGE_CACHE_MODE_WB
;
318 struct pagerange_state
{
319 unsigned long cur_pfn
;
325 pagerange_is_ram_callback(unsigned long initial_pfn
, unsigned long total_nr_pages
, void *arg
)
327 struct pagerange_state
*state
= arg
;
329 state
->not_ram
|= initial_pfn
> state
->cur_pfn
;
330 state
->ram
|= total_nr_pages
> 0;
331 state
->cur_pfn
= initial_pfn
+ total_nr_pages
;
333 return state
->ram
&& state
->not_ram
;
336 static int pat_pagerange_is_ram(resource_size_t start
, resource_size_t end
)
339 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
340 unsigned long end_pfn
= (end
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
341 struct pagerange_state state
= {start_pfn
, 0, 0};
344 * For legacy reasons, physical address range in the legacy ISA
345 * region is tracked as non-RAM. This will allow users of
346 * /dev/mem to map portions of legacy ISA region, even when
347 * some of those portions are listed(or not even listed) with
348 * different e820 types(RAM/reserved/..)
350 if (start_pfn
< ISA_END_ADDRESS
>> PAGE_SHIFT
)
351 start_pfn
= ISA_END_ADDRESS
>> PAGE_SHIFT
;
353 if (start_pfn
< end_pfn
) {
354 ret
= walk_system_ram_range(start_pfn
, end_pfn
- start_pfn
,
355 &state
, pagerange_is_ram_callback
);
358 return (ret
> 0) ? -1 : (state
.ram
? 1 : 0);
362 * For RAM pages, we use page flags to mark the pages with appropriate type.
363 * Here we do two pass:
364 * - Find the memtype of all the pages in the range, look for any conflicts
365 * - In case of no conflicts, set the new memtype for pages in the range
367 static int reserve_ram_pages_type(u64 start
, u64 end
,
368 enum page_cache_mode req_type
,
369 enum page_cache_mode
*new_type
)
374 if (req_type
== _PAGE_CACHE_MODE_UC
) {
375 /* We do not support strong UC */
377 req_type
= _PAGE_CACHE_MODE_UC_MINUS
;
380 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
381 enum page_cache_mode type
;
383 page
= pfn_to_page(pfn
);
384 type
= get_page_memtype(page
);
386 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
387 start
, end
- 1, type
, req_type
);
396 *new_type
= req_type
;
398 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
399 page
= pfn_to_page(pfn
);
400 set_page_memtype(page
, req_type
);
405 static int free_ram_pages_type(u64 start
, u64 end
)
410 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
411 page
= pfn_to_page(pfn
);
412 set_page_memtype(page
, -1);
418 * req_type typically has one of the:
419 * - _PAGE_CACHE_MODE_WB
420 * - _PAGE_CACHE_MODE_WC
421 * - _PAGE_CACHE_MODE_UC_MINUS
422 * - _PAGE_CACHE_MODE_UC
424 * If new_type is NULL, function will return an error if it cannot reserve the
425 * region with req_type. If new_type is non-NULL, function will return
426 * available type in new_type in case of no error. In case of any error
427 * it will return a negative return value.
429 int reserve_memtype(u64 start
, u64 end
, enum page_cache_mode req_type
,
430 enum page_cache_mode
*new_type
)
433 enum page_cache_mode actual_type
;
437 BUG_ON(start
>= end
); /* end is exclusive */
439 if (!pat_enabled()) {
440 /* This is identical to page table setting without PAT */
442 if (req_type
== _PAGE_CACHE_MODE_WC
)
443 *new_type
= _PAGE_CACHE_MODE_UC_MINUS
;
445 *new_type
= req_type
;
450 /* Low ISA region is always mapped WB in page table. No need to track */
451 if (x86_platform
.is_untracked_pat_range(start
, end
)) {
453 *new_type
= _PAGE_CACHE_MODE_WB
;
458 * Call mtrr_lookup to get the type hint. This is an
459 * optimization for /dev/mem mmap'ers into WB memory (BIOS
460 * tools and ACPI tools). Use WB request for WB memory and use
461 * UC_MINUS otherwise.
463 actual_type
= pat_x_mtrr_type(start
, end
, req_type
);
466 *new_type
= actual_type
;
468 is_range_ram
= pat_pagerange_is_ram(start
, end
);
469 if (is_range_ram
== 1) {
471 err
= reserve_ram_pages_type(start
, end
, req_type
, new_type
);
474 } else if (is_range_ram
< 0) {
478 new = kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
484 new->type
= actual_type
;
486 spin_lock(&memtype_lock
);
488 err
= rbt_memtype_check_insert(new, new_type
);
490 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
492 cattr_name(new->type
), cattr_name(req_type
));
494 spin_unlock(&memtype_lock
);
499 spin_unlock(&memtype_lock
);
501 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
502 start
, end
- 1, cattr_name(new->type
), cattr_name(req_type
),
503 new_type
? cattr_name(*new_type
) : "-");
508 int free_memtype(u64 start
, u64 end
)
512 struct memtype
*entry
;
517 /* Low ISA region is always mapped WB. No need to track */
518 if (x86_platform
.is_untracked_pat_range(start
, end
))
521 is_range_ram
= pat_pagerange_is_ram(start
, end
);
522 if (is_range_ram
== 1) {
524 err
= free_ram_pages_type(start
, end
);
527 } else if (is_range_ram
< 0) {
531 spin_lock(&memtype_lock
);
532 entry
= rbt_memtype_erase(start
, end
);
533 spin_unlock(&memtype_lock
);
536 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
537 current
->comm
, current
->pid
, start
, end
- 1);
543 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start
, end
- 1);
550 * lookup_memtype - Looksup the memory type for a physical address
551 * @paddr: physical address of which memory type needs to be looked up
553 * Only to be called when PAT is enabled
555 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
556 * or _PAGE_CACHE_MODE_UC
558 static enum page_cache_mode
lookup_memtype(u64 paddr
)
560 enum page_cache_mode rettype
= _PAGE_CACHE_MODE_WB
;
561 struct memtype
*entry
;
563 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ PAGE_SIZE
))
566 if (pat_pagerange_is_ram(paddr
, paddr
+ PAGE_SIZE
)) {
568 page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
569 rettype
= get_page_memtype(page
);
571 * -1 from get_page_memtype() implies RAM page is in its
572 * default state and not reserved, and hence of type WB
575 rettype
= _PAGE_CACHE_MODE_WB
;
580 spin_lock(&memtype_lock
);
582 entry
= rbt_memtype_lookup(paddr
);
584 rettype
= entry
->type
;
586 rettype
= _PAGE_CACHE_MODE_UC_MINUS
;
588 spin_unlock(&memtype_lock
);
593 * io_reserve_memtype - Request a memory type mapping for a region of memory
594 * @start: start (physical address) of the region
595 * @end: end (physical address) of the region
596 * @type: A pointer to memtype, with requested type. On success, requested
597 * or any other compatible type that was available for the region is returned
599 * On success, returns 0
600 * On failure, returns non-zero
602 int io_reserve_memtype(resource_size_t start
, resource_size_t end
,
603 enum page_cache_mode
*type
)
605 resource_size_t size
= end
- start
;
606 enum page_cache_mode req_type
= *type
;
607 enum page_cache_mode new_type
;
610 WARN_ON_ONCE(iomem_map_sanity_check(start
, size
));
612 ret
= reserve_memtype(start
, end
, req_type
, &new_type
);
616 if (!is_new_memtype_allowed(start
, size
, req_type
, new_type
))
619 if (kernel_map_sync_memtype(start
, size
, new_type
) < 0)
626 free_memtype(start
, end
);
633 * io_free_memtype - Release a memory type mapping for a region of memory
634 * @start: start (physical address) of the region
635 * @end: end (physical address) of the region
637 void io_free_memtype(resource_size_t start
, resource_size_t end
)
639 free_memtype(start
, end
);
642 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
643 unsigned long size
, pgprot_t vma_prot
)
648 #ifdef CONFIG_STRICT_DEVMEM
649 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
650 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
655 /* This check is needed to avoid cache aliasing when PAT is enabled */
656 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
658 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
659 u64 to
= from
+ size
;
665 while (cursor
< to
) {
666 if (!devmem_is_allowed(pfn
)) {
667 pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
668 current
->comm
, from
, to
- 1);
676 #endif /* CONFIG_STRICT_DEVMEM */
678 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
679 unsigned long size
, pgprot_t
*vma_prot
)
681 enum page_cache_mode pcm
= _PAGE_CACHE_MODE_WB
;
683 if (!range_is_allowed(pfn
, size
))
686 if (file
->f_flags
& O_DSYNC
)
687 pcm
= _PAGE_CACHE_MODE_UC_MINUS
;
691 * On the PPro and successors, the MTRRs are used to set
692 * memory types for physical addresses outside main memory,
693 * so blindly setting UC or PWT on those pages is wrong.
694 * For Pentiums and earlier, the surround logic should disable
695 * caching for the high addresses through the KEN pin, but
696 * we maintain the tradition of paranoia in this code.
698 if (!pat_enabled() &&
699 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
700 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
701 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
702 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
703 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
704 pcm
= _PAGE_CACHE_MODE_UC
;
708 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
709 cachemode2protval(pcm
));
714 * Change the memory type for the physial address range in kernel identity
715 * mapping space if that range is a part of identity map.
717 int kernel_map_sync_memtype(u64 base
, unsigned long size
,
718 enum page_cache_mode pcm
)
722 if (base
> __pa(high_memory
-1))
726 * some areas in the middle of the kernel identity range
727 * are not mapped, like the PCI space.
729 if (!page_is_ram(base
>> PAGE_SHIFT
))
732 id_sz
= (__pa(high_memory
-1) <= base
+ size
) ?
733 __pa(high_memory
) - base
:
736 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, pcm
) < 0) {
737 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
738 current
->comm
, current
->pid
,
740 base
, (unsigned long long)(base
+ size
-1));
747 * Internal interface to reserve a range of physical memory with prot.
748 * Reserved non RAM regions only and after successful reserve_memtype,
749 * this func also keeps identity mapping (if any) in sync with this new prot.
751 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
756 enum page_cache_mode want_pcm
= pgprot2cachemode(*vma_prot
);
757 enum page_cache_mode pcm
= want_pcm
;
759 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
762 * reserve_pfn_range() for RAM pages. We do not refcount to keep
763 * track of number of mappings of RAM pages. We can assert that
764 * the type requested matches the type of first page in the range.
770 pcm
= lookup_memtype(paddr
);
771 if (want_pcm
!= pcm
) {
772 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
773 current
->comm
, current
->pid
,
774 cattr_name(want_pcm
),
775 (unsigned long long)paddr
,
776 (unsigned long long)(paddr
+ size
- 1),
778 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
779 (~_PAGE_CACHE_MASK
)) |
780 cachemode2protval(pcm
));
785 ret
= reserve_memtype(paddr
, paddr
+ size
, want_pcm
, &pcm
);
789 if (pcm
!= want_pcm
) {
791 !is_new_memtype_allowed(paddr
, size
, want_pcm
, pcm
)) {
792 free_memtype(paddr
, paddr
+ size
);
793 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
794 current
->comm
, current
->pid
,
795 cattr_name(want_pcm
),
796 (unsigned long long)paddr
,
797 (unsigned long long)(paddr
+ size
- 1),
802 * We allow returning different type than the one requested in
805 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
806 (~_PAGE_CACHE_MASK
)) |
807 cachemode2protval(pcm
));
810 if (kernel_map_sync_memtype(paddr
, size
, pcm
) < 0) {
811 free_memtype(paddr
, paddr
+ size
);
818 * Internal interface to free a range of physical memory.
819 * Frees non RAM regions only.
821 static void free_pfn_range(u64 paddr
, unsigned long size
)
825 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
827 free_memtype(paddr
, paddr
+ size
);
831 * track_pfn_copy is called when vma that is covering the pfnmap gets
832 * copied through copy_page_range().
834 * If the vma has a linear pfn mapping for the entire range, we get the prot
835 * from pte and reserve the entire vma range with single reserve_pfn_range call.
837 int track_pfn_copy(struct vm_area_struct
*vma
)
839 resource_size_t paddr
;
841 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
844 if (vma
->vm_flags
& VM_PAT
) {
846 * reserve the whole chunk covered by vma. We need the
847 * starting address and protection from pte.
849 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
853 pgprot
= __pgprot(prot
);
854 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
861 * prot is passed in as a parameter for the new mapping. If the vma has a
862 * linear pfn mapping for the entire range reserve the entire vma range with
863 * single reserve_pfn_range call.
865 int track_pfn_remap(struct vm_area_struct
*vma
, pgprot_t
*prot
,
866 unsigned long pfn
, unsigned long addr
, unsigned long size
)
868 resource_size_t paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
869 enum page_cache_mode pcm
;
871 /* reserve the whole chunk starting from paddr */
872 if (addr
== vma
->vm_start
&& size
== (vma
->vm_end
- vma
->vm_start
)) {
875 ret
= reserve_pfn_range(paddr
, size
, prot
, 0);
877 vma
->vm_flags
|= VM_PAT
;
885 * For anything smaller than the vma size we set prot based on the
888 pcm
= lookup_memtype(paddr
);
890 /* Check memtype for the remaining pages */
891 while (size
> PAGE_SIZE
) {
894 if (pcm
!= lookup_memtype(paddr
))
898 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
899 cachemode2protval(pcm
));
904 int track_pfn_insert(struct vm_area_struct
*vma
, pgprot_t
*prot
,
907 enum page_cache_mode pcm
;
912 /* Set prot based on lookup */
913 pcm
= lookup_memtype((resource_size_t
)pfn
<< PAGE_SHIFT
);
914 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
915 cachemode2protval(pcm
));
921 * untrack_pfn is called while unmapping a pfnmap for a region.
922 * untrack can be called for a specific region indicated by pfn and size or
923 * can be for the entire vma (in which case pfn, size are zero).
925 void untrack_pfn(struct vm_area_struct
*vma
, unsigned long pfn
,
928 resource_size_t paddr
;
931 if (!(vma
->vm_flags
& VM_PAT
))
934 /* free the chunk starting from pfn or the whole chunk */
935 paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
936 if (!paddr
&& !size
) {
937 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
942 size
= vma
->vm_end
- vma
->vm_start
;
944 free_pfn_range(paddr
, size
);
945 vma
->vm_flags
&= ~VM_PAT
;
948 pgprot_t
pgprot_writecombine(pgprot_t prot
)
951 return __pgprot(pgprot_val(prot
) |
952 cachemode2protval(_PAGE_CACHE_MODE_WC
));
954 return pgprot_noncached(prot
);
956 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
958 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
960 static struct memtype
*memtype_get_idx(loff_t pos
)
962 struct memtype
*print_entry
;
965 print_entry
= kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
969 spin_lock(&memtype_lock
);
970 ret
= rbt_memtype_copy_nth_element(print_entry
, pos
);
971 spin_unlock(&memtype_lock
);
981 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
985 seq_puts(seq
, "PAT memtype list:\n");
988 return memtype_get_idx(*pos
);
991 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
994 return memtype_get_idx(*pos
);
997 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
1001 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
1003 struct memtype
*print_entry
= (struct memtype
*)v
;
1005 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
1006 print_entry
->start
, print_entry
->end
);
1012 static const struct seq_operations memtype_seq_ops
= {
1013 .start
= memtype_seq_start
,
1014 .next
= memtype_seq_next
,
1015 .stop
= memtype_seq_stop
,
1016 .show
= memtype_seq_show
,
1019 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
1021 return seq_open(file
, &memtype_seq_ops
);
1024 static const struct file_operations memtype_fops
= {
1025 .open
= memtype_seq_open
,
1027 .llseek
= seq_lseek
,
1028 .release
= seq_release
,
1031 static int __init
pat_memtype_list_init(void)
1033 if (pat_enabled()) {
1034 debugfs_create_file("pat_memtype_list", S_IRUSR
,
1035 arch_debugfs_dir
, NULL
, &memtype_fops
);
1040 late_initcall(pat_memtype_list_init
);
1042 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */