Merge tag 'tpmdd-next-20160902' into next
[deliverable/linux.git] / arch / s390 / mm / pageattr.c
1 /*
2 * Copyright IBM Corp. 2011
3 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
4 */
5 #include <linux/hugetlb.h>
6 #include <linux/module.h>
7 #include <linux/mm.h>
8 #include <asm/cacheflush.h>
9 #include <asm/facility.h>
10 #include <asm/pgtable.h>
11 #include <asm/page.h>
12
13 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
14 {
15 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
16 : [addr] "+a" (addr) : [skey] "d" (skey));
17 return addr;
18 }
19
20 void __storage_key_init_range(unsigned long start, unsigned long end)
21 {
22 unsigned long boundary, size;
23
24 if (!PAGE_DEFAULT_KEY)
25 return;
26 while (start < end) {
27 if (MACHINE_HAS_EDAT1) {
28 /* set storage keys for a 1MB frame */
29 size = 1UL << 20;
30 boundary = (start + size) & ~(size - 1);
31 if (boundary <= end) {
32 do {
33 start = sske_frame(start, PAGE_DEFAULT_KEY);
34 } while (start < boundary);
35 continue;
36 }
37 }
38 page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
39 start += PAGE_SIZE;
40 }
41 }
42
43 #ifdef CONFIG_PROC_FS
44 atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
45
46 void arch_report_meminfo(struct seq_file *m)
47 {
48 seq_printf(m, "DirectMap4k: %8lu kB\n",
49 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
50 seq_printf(m, "DirectMap1M: %8lu kB\n",
51 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
52 seq_printf(m, "DirectMap2G: %8lu kB\n",
53 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
54 }
55 #endif /* CONFIG_PROC_FS */
56
57 static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
58 unsigned long dtt)
59 {
60 unsigned long table, mask;
61
62 mask = 0;
63 if (MACHINE_HAS_EDAT2) {
64 switch (dtt) {
65 case CRDTE_DTT_REGION3:
66 mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
67 break;
68 case CRDTE_DTT_SEGMENT:
69 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
70 break;
71 case CRDTE_DTT_PAGE:
72 mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
73 break;
74 }
75 table = (unsigned long)old & mask;
76 crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
77 } else if (MACHINE_HAS_IDTE) {
78 cspg(old, *old, new);
79 } else {
80 csp((unsigned int *)old + 1, *old, new);
81 }
82 }
83
84 struct cpa {
85 unsigned int set_ro : 1;
86 unsigned int clear_ro : 1;
87 };
88
89 static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
90 struct cpa cpa)
91 {
92 pte_t *ptep, new;
93
94 ptep = pte_offset(pmdp, addr);
95 do {
96 if (pte_none(*ptep))
97 return -EINVAL;
98 if (cpa.set_ro)
99 new = pte_wrprotect(*ptep);
100 else if (cpa.clear_ro)
101 new = pte_mkwrite(pte_mkdirty(*ptep));
102 pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
103 ptep++;
104 addr += PAGE_SIZE;
105 cond_resched();
106 } while (addr < end);
107 return 0;
108 }
109
110 static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
111 {
112 unsigned long pte_addr, prot;
113 pte_t *pt_dir, *ptep;
114 pmd_t new;
115 int i, ro;
116
117 pt_dir = vmem_pte_alloc();
118 if (!pt_dir)
119 return -ENOMEM;
120 pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
121 ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
122 prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
123 ptep = pt_dir;
124 for (i = 0; i < PTRS_PER_PTE; i++) {
125 pte_val(*ptep) = pte_addr | prot;
126 pte_addr += PAGE_SIZE;
127 ptep++;
128 }
129 pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
130 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
131 update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
132 update_page_count(PG_DIRECT_MAP_1M, -1);
133 return 0;
134 }
135
136 static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa)
137 {
138 pmd_t new;
139
140 if (cpa.set_ro)
141 new = pmd_wrprotect(*pmdp);
142 else if (cpa.clear_ro)
143 new = pmd_mkwrite(pmd_mkdirty(*pmdp));
144 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
145 }
146
147 static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
148 struct cpa cpa)
149 {
150 unsigned long next;
151 pmd_t *pmdp;
152 int rc = 0;
153
154 pmdp = pmd_offset(pudp, addr);
155 do {
156 if (pmd_none(*pmdp))
157 return -EINVAL;
158 next = pmd_addr_end(addr, end);
159 if (pmd_large(*pmdp)) {
160 if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
161 rc = split_pmd_page(pmdp, addr);
162 if (rc)
163 return rc;
164 continue;
165 }
166 modify_pmd_page(pmdp, addr, cpa);
167 } else {
168 rc = walk_pte_level(pmdp, addr, next, cpa);
169 if (rc)
170 return rc;
171 }
172 pmdp++;
173 addr = next;
174 cond_resched();
175 } while (addr < end);
176 return rc;
177 }
178
179 static int split_pud_page(pud_t *pudp, unsigned long addr)
180 {
181 unsigned long pmd_addr, prot;
182 pmd_t *pm_dir, *pmdp;
183 pud_t new;
184 int i, ro;
185
186 pm_dir = vmem_pmd_alloc();
187 if (!pm_dir)
188 return -ENOMEM;
189 pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
190 ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
191 prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
192 pmdp = pm_dir;
193 for (i = 0; i < PTRS_PER_PMD; i++) {
194 pmd_val(*pmdp) = pmd_addr | prot;
195 pmd_addr += PMD_SIZE;
196 pmdp++;
197 }
198 pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
199 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
200 update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
201 update_page_count(PG_DIRECT_MAP_2G, -1);
202 return 0;
203 }
204
205 static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa)
206 {
207 pud_t new;
208
209 if (cpa.set_ro)
210 new = pud_wrprotect(*pudp);
211 else if (cpa.clear_ro)
212 new = pud_mkwrite(pud_mkdirty(*pudp));
213 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
214 }
215
216 static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
217 struct cpa cpa)
218 {
219 unsigned long next;
220 pud_t *pudp;
221 int rc = 0;
222
223 pudp = pud_offset(pgd, addr);
224 do {
225 if (pud_none(*pudp))
226 return -EINVAL;
227 next = pud_addr_end(addr, end);
228 if (pud_large(*pudp)) {
229 if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
230 rc = split_pud_page(pudp, addr);
231 if (rc)
232 break;
233 continue;
234 }
235 modify_pud_page(pudp, addr, cpa);
236 } else {
237 rc = walk_pmd_level(pudp, addr, next, cpa);
238 }
239 pudp++;
240 addr = next;
241 cond_resched();
242 } while (addr < end && !rc);
243 return rc;
244 }
245
246 static DEFINE_MUTEX(cpa_mutex);
247
248 static int change_page_attr(unsigned long addr, unsigned long end,
249 struct cpa cpa)
250 {
251 unsigned long next;
252 int rc = -EINVAL;
253 pgd_t *pgdp;
254
255 if (end >= MODULES_END)
256 return -EINVAL;
257 mutex_lock(&cpa_mutex);
258 pgdp = pgd_offset_k(addr);
259 do {
260 if (pgd_none(*pgdp))
261 break;
262 next = pgd_addr_end(addr, end);
263 rc = walk_pud_level(pgdp, addr, next, cpa);
264 if (rc)
265 break;
266 cond_resched();
267 } while (pgdp++, addr = next, addr < end && !rc);
268 mutex_unlock(&cpa_mutex);
269 return rc;
270 }
271
272 int set_memory_ro(unsigned long addr, int numpages)
273 {
274 struct cpa cpa = {
275 .set_ro = 1,
276 };
277
278 addr &= PAGE_MASK;
279 return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
280 }
281
282 int set_memory_rw(unsigned long addr, int numpages)
283 {
284 struct cpa cpa = {
285 .clear_ro = 1,
286 };
287
288 addr &= PAGE_MASK;
289 return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
290 }
291
292 /* not possible */
293 int set_memory_nx(unsigned long addr, int numpages)
294 {
295 return 0;
296 }
297
298 int set_memory_x(unsigned long addr, int numpages)
299 {
300 return 0;
301 }
302
303 #ifdef CONFIG_DEBUG_PAGEALLOC
304
305 static void ipte_range(pte_t *pte, unsigned long address, int nr)
306 {
307 int i;
308
309 if (test_facility(13)) {
310 __ptep_ipte_range(address, nr - 1, pte);
311 return;
312 }
313 for (i = 0; i < nr; i++) {
314 __ptep_ipte(address, pte);
315 address += PAGE_SIZE;
316 pte++;
317 }
318 }
319
320 void __kernel_map_pages(struct page *page, int numpages, int enable)
321 {
322 unsigned long address;
323 int nr, i, j;
324 pgd_t *pgd;
325 pud_t *pud;
326 pmd_t *pmd;
327 pte_t *pte;
328
329 for (i = 0; i < numpages;) {
330 address = page_to_phys(page + i);
331 pgd = pgd_offset_k(address);
332 pud = pud_offset(pgd, address);
333 pmd = pmd_offset(pud, address);
334 pte = pte_offset_kernel(pmd, address);
335 nr = (unsigned long)pte >> ilog2(sizeof(long));
336 nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
337 nr = min(numpages - i, nr);
338 if (enable) {
339 for (j = 0; j < nr; j++) {
340 pte_val(*pte) = address | pgprot_val(PAGE_KERNEL);
341 address += PAGE_SIZE;
342 pte++;
343 }
344 } else {
345 ipte_range(pte, address, nr);
346 }
347 i += nr;
348 }
349 }
350
351 #ifdef CONFIG_HIBERNATION
352 bool kernel_page_present(struct page *page)
353 {
354 unsigned long addr;
355 int cc;
356
357 addr = page_to_phys(page);
358 asm volatile(
359 " lra %1,0(%1)\n"
360 " ipm %0\n"
361 " srl %0,28"
362 : "=d" (cc), "+a" (addr) : : "cc");
363 return cc == 0;
364 }
365 #endif /* CONFIG_HIBERNATION */
366
367 #endif /* CONFIG_DEBUG_PAGEALLOC */
This page took 0.037673 seconds and 5 git commands to generate.