powerpc: Add hugepage support to 64-bit tablewalk code for FSL_BOOK3E
[deliverable/linux.git] / arch / powerpc / include / asm / hugetlb.h
CommitLineData
6d779079
GS
1#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
41151e77 4#ifdef CONFIG_HUGETLB_PAGE
6d779079
GS
5#include <asm/page.h>
6
41151e77
BB
7extern struct kmem_cache *hugepte_cache;
8extern void __init reserve_hugetlb_gpages(void);
9
10static inline pte_t *hugepd_page(hugepd_t hpd)
11{
12 BUG_ON(!hugepd_ok(hpd));
13 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
14}
15
16static inline unsigned int hugepd_shift(hugepd_t hpd)
17{
18 return hpd.pd & HUGEPD_SHIFT_MASK;
19}
20
21static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
22 unsigned pdshift)
23{
24 /*
881fde1d
BB
25 * On FSL BookE, we have multiple higher-level table entries that
26 * point to the same hugepte. Just use the first one since they're all
41151e77
BB
27 * identical. So for that case, idx=0.
28 */
29 unsigned long idx = 0;
30
31 pte_t *dir = hugepd_page(*hpdp);
881fde1d 32#ifndef CONFIG_PPC_FSL_BOOK3E
41151e77
BB
33 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
34#endif
35
36 return dir + idx;
37}
38
883a3e52
DG
39pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
40 unsigned long addr, unsigned *shift);
41
0895ecda
DG
42void flush_dcache_icache_hugepage(struct page *page);
43
41151e77 44#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
6d779079
GS
45int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
46 unsigned long len);
41151e77
BB
47#else
48static inline int is_hugepage_only_range(struct mm_struct *mm,
49 unsigned long addr,
50 unsigned long len)
51{
52 return 0;
53}
54#endif
55
56void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte);
57void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
6d779079 58
42b77728 59void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
6d779079
GS
60 unsigned long end, unsigned long floor,
61 unsigned long ceiling);
62
3340289d
MG
63/*
64 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
65 * to override the version in mm/hugetlb.c
66 */
67#define vma_mmu_pagesize vma_mmu_pagesize
68
6d779079
GS
69/*
70 * If the arch doesn't supply something else, assume that hugepage
71 * size aligned regions are ok without further preparation.
72 */
a5516438
AK
73static inline int prepare_hugepage_range(struct file *file,
74 unsigned long addr, unsigned long len)
6d779079 75{
0d9ea754
JT
76 struct hstate *h = hstate_file(file);
77 if (len & ~huge_page_mask(h))
6d779079 78 return -EINVAL;
0d9ea754 79 if (addr & ~huge_page_mask(h))
6d779079
GS
80 return -EINVAL;
81 return 0;
82}
83
84static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
85{
86}
87
0895ecda
DG
88
89static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep, pte_t pte)
91{
92 set_pte_at(mm, addr, ptep, pte);
93}
94
95static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 unsigned long addr, pte_t *ptep)
97{
41151e77
BB
98#ifdef CONFIG_PPC64
99 return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
100#else
101 return __pte(pte_update(ptep, ~0UL, 0));
102#endif
0895ecda
DG
103}
104
8fe627ec
GS
105static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
106 unsigned long addr, pte_t *ptep)
107{
0895ecda
DG
108 pte_t pte;
109 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
110 flush_tlb_page(vma, addr);
8fe627ec
GS
111}
112
7f2e9525
GS
113static inline int huge_pte_none(pte_t pte)
114{
115 return pte_none(pte);
116}
117
118static inline pte_t huge_pte_wrprotect(pte_t pte)
119{
120 return pte_wrprotect(pte);
121}
122
7f2e9525
GS
123static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
124 unsigned long addr, pte_t *ptep,
125 pte_t pte, int dirty)
126{
97632e6f
BB
127#if defined(CONFIG_PPC_MMU_NOHASH) && \
128 !(defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC32))
129 /*
130 * The "return 1" forces a call of update_mmu_cache, which will write a
131 * TLB entry. Without this, platforms that don't do a write of the TLB
132 * entry in the TLB miss handler asm will fault ad infinitum.
133 */
134 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
135 return 1;
136#else
7f2e9525 137 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
97632e6f 138#endif
7f2e9525
GS
139}
140
141static inline pte_t huge_ptep_get(pte_t *ptep)
142{
143 return *ptep;
144}
145
146static inline int arch_prepare_hugepage(struct page *page)
147{
148 return 0;
149}
150
151static inline void arch_release_hugepage(struct page *page)
152{
153}
154
41151e77
BB
155#else /* ! CONFIG_HUGETLB_PAGE */
156static inline void reserve_hugetlb_gpages(void)
157{
158 pr_err("Cannot reserve gpages without hugetlb enabled\n");
159}
160static inline void flush_hugetlb_page(struct vm_area_struct *vma,
161 unsigned long vmaddr)
162{
163}
164#endif
165
6d779079 166#endif /* _ASM_POWERPC_HUGETLB_H */
This page took 0.328669 seconds and 5 git commands to generate.