x86/entry/64: Fix context tracking state warning when load_gs_index fails
[deliverable/linux.git] / arch / sparc / mm / tlb.c
1 /* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/percpu.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/preempt.h>
11
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlb.h>
18
19 /* Heavily inspired by the ppc64 code. */
20
21 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
22
23 void flush_tlb_pending(void)
24 {
25 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
26 struct mm_struct *mm = tb->mm;
27
28 if (!tb->tlb_nr)
29 goto out;
30
31 flush_tsb_user(tb);
32
33 if (CTX_VALID(mm->context)) {
34 if (tb->tlb_nr == 1) {
35 global_flush_tlb_page(mm, tb->vaddrs[0]);
36 } else {
37 #ifdef CONFIG_SMP
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
39 &tb->vaddrs[0]);
40 #else
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
42 tb->tlb_nr, &tb->vaddrs[0]);
43 #endif
44 }
45 }
46
47 tb->tlb_nr = 0;
48
49 out:
50 put_cpu_var(tlb_batch);
51 }
52
53 void arch_enter_lazy_mmu_mode(void)
54 {
55 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
56
57 tb->active = 1;
58 }
59
60 void arch_leave_lazy_mmu_mode(void)
61 {
62 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
63
64 if (tb->tlb_nr)
65 flush_tlb_pending();
66 tb->active = 0;
67 }
68
69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
70 bool exec, bool huge)
71 {
72 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
73 unsigned long nr;
74
75 vaddr &= PAGE_MASK;
76 if (exec)
77 vaddr |= 0x1UL;
78
79 nr = tb->tlb_nr;
80
81 if (unlikely(nr != 0 && mm != tb->mm)) {
82 flush_tlb_pending();
83 nr = 0;
84 }
85
86 if (!tb->active) {
87 flush_tsb_user_page(mm, vaddr, huge);
88 global_flush_tlb_page(mm, vaddr);
89 goto out;
90 }
91
92 if (nr == 0) {
93 tb->mm = mm;
94 tb->huge = huge;
95 }
96
97 if (tb->huge != huge) {
98 flush_tlb_pending();
99 tb->huge = huge;
100 nr = 0;
101 }
102
103 tb->vaddrs[nr] = vaddr;
104 tb->tlb_nr = ++nr;
105 if (nr >= TLB_BATCH_NR)
106 flush_tlb_pending();
107
108 out:
109 put_cpu_var(tlb_batch);
110 }
111
112 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
113 pte_t *ptep, pte_t orig, int fullmm)
114 {
115 bool huge = is_hugetlb_pte(orig);
116
117 if (tlb_type != hypervisor &&
118 pte_dirty(orig)) {
119 unsigned long paddr, pfn = pte_pfn(orig);
120 struct address_space *mapping;
121 struct page *page;
122
123 if (!pfn_valid(pfn))
124 goto no_cache_flush;
125
126 page = pfn_to_page(pfn);
127 if (PageReserved(page))
128 goto no_cache_flush;
129
130 /* A real file page? */
131 mapping = page_mapping(page);
132 if (!mapping)
133 goto no_cache_flush;
134
135 paddr = (unsigned long) page_address(page);
136 if ((paddr ^ vaddr) & (1 << 13))
137 flush_dcache_page_all(mm, page);
138 }
139
140 no_cache_flush:
141 if (!fullmm)
142 tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
143 }
144
145 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
146 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
147 pmd_t pmd)
148 {
149 unsigned long end;
150 pte_t *pte;
151
152 pte = pte_offset_map(&pmd, vaddr);
153 end = vaddr + HPAGE_SIZE;
154 while (vaddr < end) {
155 if (pte_val(*pte) & _PAGE_VALID) {
156 bool exec = pte_exec(*pte);
157
158 tlb_batch_add_one(mm, vaddr, exec, false);
159 }
160 pte++;
161 vaddr += PAGE_SIZE;
162 }
163 pte_unmap(pte);
164 }
165
166 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
167 pmd_t *pmdp, pmd_t pmd)
168 {
169 pmd_t orig = *pmdp;
170
171 *pmdp = pmd;
172
173 if (mm == &init_mm)
174 return;
175
176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
177 if (pmd_val(pmd) & _PAGE_PMD_HUGE)
178 mm->context.thp_pte_count++;
179 else
180 mm->context.thp_pte_count--;
181
182 /* Do not try to allocate the TSB hash table if we
183 * don't have one already. We have various locks held
184 * and thus we'll end up doing a GFP_KERNEL allocation
185 * in an atomic context.
186 *
187 * Instead, we let the first TLB miss on a hugepage
188 * take care of this.
189 */
190 }
191
192 if (!pmd_none(orig)) {
193 addr &= HPAGE_MASK;
194 if (pmd_trans_huge(orig)) {
195 pte_t orig_pte = __pte(pmd_val(orig));
196 bool exec = pte_exec(orig_pte);
197
198 tlb_batch_add_one(mm, addr, exec, true);
199 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
200 true);
201 } else {
202 tlb_batch_pmd_scan(mm, addr, orig);
203 }
204 }
205 }
206
207 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
208 pmd_t *pmdp)
209 {
210 pmd_t entry = *pmdp;
211
212 pmd_val(entry) &= ~_PAGE_VALID;
213
214 set_pmd_at(vma->vm_mm, address, pmdp, entry);
215 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
216 }
217
218 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
219 pgtable_t pgtable)
220 {
221 struct list_head *lh = (struct list_head *) pgtable;
222
223 assert_spin_locked(&mm->page_table_lock);
224
225 /* FIFO */
226 if (!pmd_huge_pte(mm, pmdp))
227 INIT_LIST_HEAD(lh);
228 else
229 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
230 pmd_huge_pte(mm, pmdp) = pgtable;
231 }
232
233 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
234 {
235 struct list_head *lh;
236 pgtable_t pgtable;
237
238 assert_spin_locked(&mm->page_table_lock);
239
240 /* FIFO */
241 pgtable = pmd_huge_pte(mm, pmdp);
242 lh = (struct list_head *) pgtable;
243 if (list_empty(lh))
244 pmd_huge_pte(mm, pmdp) = NULL;
245 else {
246 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
247 list_del(lh);
248 }
249 pte_val(pgtable[0]) = 0;
250 pte_val(pgtable[1]) = 0;
251
252 return pgtable;
253 }
254 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
This page took 0.035973 seconds and 5 git commands to generate.