Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[deliverable/linux.git] / arch / x86 / mm / pageattr.c
CommitLineData
9f4c815c
IM
1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
1da177e4 3 * Thanks to Ben LaHaise for precious feedback.
9f4c815c 4 */
1da177e4 5#include <linux/highmem.h>
8192206d 6#include <linux/bootmem.h>
1da177e4 7#include <linux/module.h>
9f4c815c 8#include <linux/sched.h>
1da177e4 9#include <linux/slab.h>
9f4c815c 10#include <linux/mm.h>
76ebd054 11#include <linux/interrupt.h>
ee7ae7a1
TG
12#include <linux/seq_file.h>
13#include <linux/debugfs.h>
9f4c815c 14
950f9d95 15#include <asm/e820.h>
1da177e4
LT
16#include <asm/processor.h>
17#include <asm/tlbflush.h>
f8af095d 18#include <asm/sections.h>
9f4c815c
IM
19#include <asm/uaccess.h>
20#include <asm/pgalloc.h>
c31c7d48 21#include <asm/proto.h>
1219333d 22#include <asm/pat.h>
1da177e4 23
9df84993
IM
24/*
25 * The current flushing context - we pass it instead of 5 arguments:
26 */
72e458df
TG
27struct cpa_data {
28 unsigned long vaddr;
72e458df
TG
29 pgprot_t mask_set;
30 pgprot_t mask_clr;
65e074df 31 int numpages;
f4ae5da0 32 int flushtlb;
c31c7d48 33 unsigned long pfn;
c9caa02c 34 unsigned force_split : 1;
72e458df
TG
35};
36
c31c7d48
TG
37#ifdef CONFIG_X86_64
38
39static inline unsigned long highmap_start_pfn(void)
40{
41 return __pa(_text) >> PAGE_SHIFT;
42}
43
44static inline unsigned long highmap_end_pfn(void)
45{
46 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
47}
48
49#endif
50
92cb54a3
IM
51#ifdef CONFIG_DEBUG_PAGEALLOC
52# define debug_pagealloc 1
53#else
54# define debug_pagealloc 0
55#endif
56
ed724be6
AV
57static inline int
58within(unsigned long addr, unsigned long start, unsigned long end)
687c4825 59{
ed724be6
AV
60 return addr >= start && addr < end;
61}
62
d7c8f21a
TG
63/*
64 * Flushing functions
65 */
cd8ddf1a 66
cd8ddf1a
TG
67/**
68 * clflush_cache_range - flush a cache range with clflush
69 * @addr: virtual start address
70 * @size: number of bytes to flush
71 *
72 * clflush is an unordered instruction which needs fencing with mfence
73 * to avoid ordering issues.
74 */
4c61afcd 75void clflush_cache_range(void *vaddr, unsigned int size)
d7c8f21a 76{
4c61afcd 77 void *vend = vaddr + size - 1;
d7c8f21a 78
cd8ddf1a 79 mb();
4c61afcd
IM
80
81 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
82 clflush(vaddr);
83 /*
84 * Flush any possible final partial cacheline:
85 */
86 clflush(vend);
87
cd8ddf1a 88 mb();
d7c8f21a
TG
89}
90
af1e6844 91static void __cpa_flush_all(void *arg)
d7c8f21a 92{
6bb8383b
AK
93 unsigned long cache = (unsigned long)arg;
94
d7c8f21a
TG
95 /*
96 * Flush all to work around Errata in early athlons regarding
97 * large page flushing.
98 */
99 __flush_tlb_all();
100
6bb8383b 101 if (cache && boot_cpu_data.x86_model >= 4)
d7c8f21a
TG
102 wbinvd();
103}
104
6bb8383b 105static void cpa_flush_all(unsigned long cache)
d7c8f21a
TG
106{
107 BUG_ON(irqs_disabled());
108
6bb8383b 109 on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1);
d7c8f21a
TG
110}
111
57a6a46a
TG
112static void __cpa_flush_range(void *arg)
113{
57a6a46a
TG
114 /*
115 * We could optimize that further and do individual per page
116 * tlb invalidates for a low number of pages. Caveat: we must
117 * flush the high aliases on 64bit as well.
118 */
119 __flush_tlb_all();
57a6a46a
TG
120}
121
6bb8383b 122static void cpa_flush_range(unsigned long start, int numpages, int cache)
57a6a46a 123{
4c61afcd
IM
124 unsigned int i, level;
125 unsigned long addr;
126
57a6a46a 127 BUG_ON(irqs_disabled());
4c61afcd 128 WARN_ON(PAGE_ALIGN(start) != start);
57a6a46a 129
3b233e52 130 on_each_cpu(__cpa_flush_range, NULL, 1, 1);
57a6a46a 131
6bb8383b
AK
132 if (!cache)
133 return;
134
3b233e52
TG
135 /*
136 * We only need to flush on one CPU,
137 * clflush is a MESI-coherent instruction that
138 * will cause all other CPUs to flush the same
139 * cachelines:
140 */
4c61afcd
IM
141 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
142 pte_t *pte = lookup_address(addr, &level);
143
144 /*
145 * Only flush present addresses:
146 */
7bfb72e8 147 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
4c61afcd
IM
148 clflush_cache_range((void *) addr, PAGE_SIZE);
149 }
57a6a46a
TG
150}
151
ed724be6
AV
152/*
153 * Certain areas of memory on x86 require very specific protection flags,
154 * for example the BIOS area or kernel text. Callers don't always get this
155 * right (again, ioremap() on BIOS memory is not uncommon) so this function
156 * checks and fixes these known static required protection bits.
157 */
c31c7d48
TG
158static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
159 unsigned long pfn)
ed724be6
AV
160{
161 pgprot_t forbidden = __pgprot(0);
162
687c4825 163 /*
ed724be6
AV
164 * The BIOS area between 640k and 1Mb needs to be executable for
165 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
687c4825 166 */
c31c7d48 167 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
ed724be6
AV
168 pgprot_val(forbidden) |= _PAGE_NX;
169
170 /*
171 * The kernel text needs to be executable for obvious reasons
c31c7d48
TG
172 * Does not cover __inittext since that is gone later on. On
173 * 64bit we do not enforce !NX on the low mapping
ed724be6
AV
174 */
175 if (within(address, (unsigned long)_text, (unsigned long)_etext))
176 pgprot_val(forbidden) |= _PAGE_NX;
cc0f21bb 177
cc0f21bb 178 /*
c31c7d48
TG
179 * The .rodata section needs to be read-only. Using the pfn
180 * catches all aliases.
cc0f21bb 181 */
c31c7d48
TG
182 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
183 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
cc0f21bb 184 pgprot_val(forbidden) |= _PAGE_RW;
ed724be6
AV
185
186 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
687c4825
IM
187
188 return prot;
189}
190
9a14aefc
TG
191/*
192 * Lookup the page table entry for a virtual address. Return a pointer
193 * to the entry and the level of the mapping.
194 *
195 * Note: We return pud and pmd either when the entry is marked large
196 * or when the present bit is not set. Otherwise we would return a
197 * pointer to a nonexisting mapping.
198 */
da7bfc50 199pte_t *lookup_address(unsigned long address, unsigned int *level)
9f4c815c 200{
1da177e4
LT
201 pgd_t *pgd = pgd_offset_k(address);
202 pud_t *pud;
203 pmd_t *pmd;
9f4c815c 204
30551bb3
TG
205 *level = PG_LEVEL_NONE;
206
1da177e4
LT
207 if (pgd_none(*pgd))
208 return NULL;
9df84993 209
1da177e4
LT
210 pud = pud_offset(pgd, address);
211 if (pud_none(*pud))
212 return NULL;
c2f71ee2
AK
213
214 *level = PG_LEVEL_1G;
215 if (pud_large(*pud) || !pud_present(*pud))
216 return (pte_t *)pud;
217
1da177e4
LT
218 pmd = pmd_offset(pud, address);
219 if (pmd_none(*pmd))
220 return NULL;
30551bb3
TG
221
222 *level = PG_LEVEL_2M;
9a14aefc 223 if (pmd_large(*pmd) || !pmd_present(*pmd))
1da177e4 224 return (pte_t *)pmd;
1da177e4 225
30551bb3 226 *level = PG_LEVEL_4K;
9df84993 227
9f4c815c
IM
228 return pte_offset_kernel(pmd, address);
229}
230
9df84993
IM
231/*
232 * Set the new pmd in all the pgds we know about:
233 */
9a3dc780 234static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
9f4c815c 235{
9f4c815c
IM
236 /* change init_mm */
237 set_pte_atomic(kpte, pte);
44af6c41 238#ifdef CONFIG_X86_32
e4b71dcf 239 if (!SHARED_KERNEL_PMD) {
44af6c41
IM
240 struct page *page;
241
e3ed910d 242 list_for_each_entry(page, &pgd_list, lru) {
44af6c41
IM
243 pgd_t *pgd;
244 pud_t *pud;
245 pmd_t *pmd;
246
247 pgd = (pgd_t *)page_address(page) + pgd_index(address);
248 pud = pud_offset(pgd, address);
249 pmd = pmd_offset(pud, address);
250 set_pte_atomic((pte_t *)pmd, pte);
251 }
1da177e4 252 }
44af6c41 253#endif
1da177e4
LT
254}
255
9df84993
IM
256static int
257try_preserve_large_page(pte_t *kpte, unsigned long address,
258 struct cpa_data *cpa)
65e074df 259{
c31c7d48 260 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
65e074df
TG
261 pte_t new_pte, old_pte, *tmp;
262 pgprot_t old_prot, new_prot;
fac84939 263 int i, do_split = 1;
da7bfc50 264 unsigned int level;
65e074df 265
c9caa02c
AK
266 if (cpa->force_split)
267 return 1;
268
65e074df
TG
269 spin_lock_irqsave(&pgd_lock, flags);
270 /*
271 * Check for races, another CPU might have split this page
272 * up already:
273 */
274 tmp = lookup_address(address, &level);
275 if (tmp != kpte)
276 goto out_unlock;
277
278 switch (level) {
279 case PG_LEVEL_2M:
31422c51
AK
280 psize = PMD_PAGE_SIZE;
281 pmask = PMD_PAGE_MASK;
65e074df 282 break;
f07333fd 283#ifdef CONFIG_X86_64
65e074df 284 case PG_LEVEL_1G:
5d3c8b21
AK
285 psize = PUD_PAGE_SIZE;
286 pmask = PUD_PAGE_MASK;
f07333fd
AK
287 break;
288#endif
65e074df 289 default:
beaff633 290 do_split = -EINVAL;
65e074df
TG
291 goto out_unlock;
292 }
293
294 /*
295 * Calculate the number of pages, which fit into this large
296 * page starting at address:
297 */
298 nextpage_addr = (address + psize) & pmask;
299 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
9b5cf48b
RW
300 if (numpages < cpa->numpages)
301 cpa->numpages = numpages;
65e074df
TG
302
303 /*
304 * We are safe now. Check whether the new pgprot is the same:
305 */
306 old_pte = *kpte;
307 old_prot = new_prot = pte_pgprot(old_pte);
308
309 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
310 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
c31c7d48
TG
311
312 /*
313 * old_pte points to the large page base address. So we need
314 * to add the offset of the virtual address:
315 */
316 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
317 cpa->pfn = pfn;
318
319 new_prot = static_protections(new_prot, address, pfn);
65e074df 320
fac84939
TG
321 /*
322 * We need to check the full range, whether
323 * static_protection() requires a different pgprot for one of
324 * the pages in the range we try to preserve:
325 */
326 addr = address + PAGE_SIZE;
c31c7d48 327 pfn++;
9b5cf48b 328 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
c31c7d48 329 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
fac84939
TG
330
331 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
332 goto out_unlock;
333 }
334
65e074df
TG
335 /*
336 * If there are no changes, return. maxpages has been updated
337 * above:
338 */
339 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
beaff633 340 do_split = 0;
65e074df
TG
341 goto out_unlock;
342 }
343
344 /*
345 * We need to change the attributes. Check, whether we can
346 * change the large page in one go. We request a split, when
347 * the address is not aligned and the number of pages is
348 * smaller than the number of pages in the large page. Note
349 * that we limited the number of possible pages already to
350 * the number of pages in the large page.
351 */
9b5cf48b 352 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
65e074df
TG
353 /*
354 * The address is aligned and the number of pages
355 * covers the full page.
356 */
357 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
358 __set_pmd_pte(kpte, address, new_pte);
359 cpa->flushtlb = 1;
beaff633 360 do_split = 0;
65e074df
TG
361 }
362
363out_unlock:
364 spin_unlock_irqrestore(&pgd_lock, flags);
9df84993 365
beaff633 366 return do_split;
65e074df
TG
367}
368
76ebd054
TG
369static LIST_HEAD(page_pool);
370static unsigned long pool_size, pool_pages, pool_low;
92cb54a3 371static unsigned long pool_used, pool_failed;
76ebd054 372
92cb54a3 373static void cpa_fill_pool(struct page **ret)
76ebd054 374{
76ebd054 375 gfp_t gfp = GFP_KERNEL;
92cb54a3
IM
376 unsigned long flags;
377 struct page *p;
76ebd054 378
76ebd054 379 /*
92cb54a3
IM
380 * Avoid recursion (on debug-pagealloc) and also signal
381 * our priority to get to these pagetables:
76ebd054 382 */
92cb54a3 383 if (current->flags & PF_MEMALLOC)
76ebd054 384 return;
92cb54a3 385 current->flags |= PF_MEMALLOC;
76ebd054 386
76ebd054 387 /*
92cb54a3 388 * Allocate atomically from atomic contexts:
76ebd054 389 */
92cb54a3
IM
390 if (in_atomic() || irqs_disabled() || debug_pagealloc)
391 gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
76ebd054 392
92cb54a3 393 while (pool_pages < pool_size || (ret && !*ret)) {
76ebd054
TG
394 p = alloc_pages(gfp, 0);
395 if (!p) {
396 pool_failed++;
397 break;
398 }
92cb54a3
IM
399 /*
400 * If the call site needs a page right now, provide it:
401 */
402 if (ret && !*ret) {
403 *ret = p;
404 continue;
405 }
406 spin_lock_irqsave(&pgd_lock, flags);
76ebd054
TG
407 list_add(&p->lru, &page_pool);
408 pool_pages++;
92cb54a3 409 spin_unlock_irqrestore(&pgd_lock, flags);
76ebd054 410 }
92cb54a3
IM
411
412 current->flags &= ~PF_MEMALLOC;
76ebd054
TG
413}
414
415#define SHIFT_MB (20 - PAGE_SHIFT)
416#define ROUND_MB_GB ((1 << 10) - 1)
417#define SHIFT_MB_GB 10
418#define POOL_PAGES_PER_GB 16
419
420void __init cpa_init(void)
421{
422 struct sysinfo si;
423 unsigned long gb;
424
425 si_meminfo(&si);
426 /*
427 * Calculate the number of pool pages:
428 *
429 * Convert totalram (nr of pages) to MiB and round to the next
430 * GiB. Shift MiB to Gib and multiply the result by
431 * POOL_PAGES_PER_GB:
432 */
92cb54a3
IM
433 if (debug_pagealloc) {
434 gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
435 pool_size = POOL_PAGES_PER_GB * gb;
436 } else {
437 pool_size = 1;
438 }
76ebd054
TG
439 pool_low = pool_size;
440
92cb54a3 441 cpa_fill_pool(NULL);
76ebd054
TG
442 printk(KERN_DEBUG
443 "CPA: page pool initialized %lu of %lu pages preallocated\n",
444 pool_pages, pool_size);
445}
446
7afe15b9 447static int split_large_page(pte_t *kpte, unsigned long address)
bb5c2dbd 448{
7b610eec 449 unsigned long flags, pfn, pfninc = 1;
9df84993 450 unsigned int i, level;
bb5c2dbd 451 pte_t *pbase, *tmp;
9df84993 452 pgprot_t ref_prot;
bb5c2dbd
IM
453 struct page *base;
454
eb5b5f02
TG
455 /*
456 * Get a page from the pool. The pool list is protected by the
457 * pgd_lock, which we have to take anyway for the split
458 * operation:
459 */
460 spin_lock_irqsave(&pgd_lock, flags);
461 if (list_empty(&page_pool)) {
462 spin_unlock_irqrestore(&pgd_lock, flags);
92cb54a3
IM
463 base = NULL;
464 cpa_fill_pool(&base);
465 if (!base)
466 return -ENOMEM;
467 spin_lock_irqsave(&pgd_lock, flags);
468 } else {
469 base = list_first_entry(&page_pool, struct page, lru);
470 list_del(&base->lru);
471 pool_pages--;
472
473 if (pool_pages < pool_low)
474 pool_low = pool_pages;
eb5b5f02
TG
475 }
476
bb5c2dbd
IM
477 /*
478 * Check for races, another CPU might have split this page
479 * up for us already:
480 */
481 tmp = lookup_address(address, &level);
6ce9fc17 482 if (tmp != kpte)
bb5c2dbd
IM
483 goto out_unlock;
484
bb5c2dbd 485 pbase = (pte_t *)page_address(base);
44af6c41 486#ifdef CONFIG_X86_32
bb5c2dbd 487 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
44af6c41 488#endif
07cf89c0 489 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
bb5c2dbd 490
f07333fd
AK
491#ifdef CONFIG_X86_64
492 if (level == PG_LEVEL_1G) {
493 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
494 pgprot_val(ref_prot) |= _PAGE_PSE;
f07333fd
AK
495 }
496#endif
497
63c1dcf4
TG
498 /*
499 * Get the target pfn from the original entry:
500 */
501 pfn = pte_pfn(*kpte);
f07333fd 502 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
63c1dcf4 503 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
bb5c2dbd
IM
504
505 /*
07cf89c0 506 * Install the new, split up pagetable. Important details here:
4c881ca1
HY
507 *
508 * On Intel the NX bit of all levels must be cleared to make a
509 * page executable. See section 4.13.2 of Intel 64 and IA-32
510 * Architectures Software Developer's Manual).
07cf89c0
TG
511 *
512 * Mark the entry present. The current mapping might be
513 * set to not present, which we preserved above.
bb5c2dbd 514 */
4c881ca1 515 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
07cf89c0 516 pgprot_val(ref_prot) |= _PAGE_PRESENT;
9a3dc780 517 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
bb5c2dbd
IM
518 base = NULL;
519
520out_unlock:
eb5b5f02
TG
521 /*
522 * If we dropped out via the lookup_address check under
523 * pgd_lock then stick the page back into the pool:
524 */
525 if (base) {
526 list_add(&base->lru, &page_pool);
527 pool_pages++;
528 } else
529 pool_used++;
9a3dc780 530 spin_unlock_irqrestore(&pgd_lock, flags);
bb5c2dbd 531
bb5c2dbd
IM
532 return 0;
533}
534
c31c7d48 535static int __change_page_attr(struct cpa_data *cpa, int primary)
9f4c815c 536{
c31c7d48 537 unsigned long address = cpa->vaddr;
da7bfc50
HH
538 int do_split, err;
539 unsigned int level;
c31c7d48 540 pte_t *kpte, old_pte;
1da177e4 541
97f99fed 542repeat:
f0646e43 543 kpte = lookup_address(address, &level);
1da177e4 544 if (!kpte)
c31c7d48
TG
545 return primary ? -EINVAL : 0;
546
547 old_pte = *kpte;
548 if (!pte_val(old_pte)) {
549 if (!primary)
550 return 0;
551 printk(KERN_WARNING "CPA: called for zero pte. "
552 "vaddr = %lx cpa->vaddr = %lx\n", address,
553 cpa->vaddr);
554 WARN_ON(1);
1da177e4 555 return -EINVAL;
c31c7d48 556 }
9f4c815c 557
30551bb3 558 if (level == PG_LEVEL_4K) {
c31c7d48 559 pte_t new_pte;
626c2c9d 560 pgprot_t new_prot = pte_pgprot(old_pte);
c31c7d48 561 unsigned long pfn = pte_pfn(old_pte);
86f03989 562
72e458df
TG
563 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
564 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
86f03989 565
c31c7d48 566 new_prot = static_protections(new_prot, address, pfn);
86f03989 567
626c2c9d
AV
568 /*
569 * We need to keep the pfn from the existing PTE,
570 * after all we're only going to change it's attributes
571 * not the memory it points to
572 */
c31c7d48
TG
573 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
574 cpa->pfn = pfn;
f4ae5da0
TG
575 /*
576 * Do we really change anything ?
577 */
578 if (pte_val(old_pte) != pte_val(new_pte)) {
579 set_pte_atomic(kpte, new_pte);
580 cpa->flushtlb = 1;
581 }
9b5cf48b 582 cpa->numpages = 1;
65e074df 583 return 0;
1da177e4 584 }
65e074df
TG
585
586 /*
587 * Check, whether we can keep the large page intact
588 * and just change the pte:
589 */
beaff633 590 do_split = try_preserve_large_page(kpte, address, cpa);
65e074df
TG
591 /*
592 * When the range fits into the existing large page,
9b5cf48b 593 * return. cp->numpages and cpa->tlbflush have been updated in
65e074df
TG
594 * try_large_page:
595 */
87f7f8fe
IM
596 if (do_split <= 0)
597 return do_split;
65e074df
TG
598
599 /*
600 * We have to split the large page:
601 */
87f7f8fe
IM
602 err = split_large_page(kpte, address);
603 if (!err) {
604 cpa->flushtlb = 1;
605 goto repeat;
606 }
beaff633 607
87f7f8fe 608 return err;
9f4c815c 609}
1da177e4 610
c31c7d48
TG
611static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
612
613static int cpa_process_alias(struct cpa_data *cpa)
1da177e4 614{
c31c7d48 615 struct cpa_data alias_cpa;
f34b439f 616 int ret = 0;
44af6c41 617
c31c7d48
TG
618 if (cpa->pfn > max_pfn_mapped)
619 return 0;
626c2c9d 620
f34b439f
TG
621 /*
622 * No need to redo, when the primary call touched the direct
623 * mapping already:
624 */
625 if (!within(cpa->vaddr, PAGE_OFFSET,
626 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
44af6c41 627
f34b439f
TG
628 alias_cpa = *cpa;
629 alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
630
631 ret = __change_page_attr_set_clr(&alias_cpa, 0);
632 }
44af6c41 633
44af6c41 634#ifdef CONFIG_X86_64
c31c7d48
TG
635 if (ret)
636 return ret;
f34b439f
TG
637 /*
638 * No need to redo, when the primary call touched the high
639 * mapping already:
640 */
641 if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end))
642 return 0;
643
488fd995 644 /*
0879750f
TG
645 * If the physical address is inside the kernel map, we need
646 * to touch the high mapped kernel as well:
488fd995 647 */
c31c7d48
TG
648 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
649 return 0;
0879750f 650
c31c7d48
TG
651 alias_cpa = *cpa;
652 alias_cpa.vaddr =
653 (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
654
655 /*
656 * The high mapping range is imprecise, so ignore the return value.
657 */
658 __change_page_attr_set_clr(&alias_cpa, 0);
488fd995 659#endif
c31c7d48 660 return ret;
1da177e4
LT
661}
662
c31c7d48 663static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
ff31452b 664{
65e074df 665 int ret, numpages = cpa->numpages;
ff31452b 666
65e074df
TG
667 while (numpages) {
668 /*
669 * Store the remaining nr of pages for the large page
670 * preservation check.
671 */
9b5cf48b 672 cpa->numpages = numpages;
c31c7d48
TG
673
674 ret = __change_page_attr(cpa, checkalias);
ff31452b
TG
675 if (ret)
676 return ret;
ff31452b 677
c31c7d48
TG
678 if (checkalias) {
679 ret = cpa_process_alias(cpa);
680 if (ret)
681 return ret;
682 }
683
65e074df
TG
684 /*
685 * Adjust the number of pages with the result of the
686 * CPA operation. Either a large page has been
687 * preserved or a single page update happened.
688 */
9b5cf48b
RW
689 BUG_ON(cpa->numpages > numpages);
690 numpages -= cpa->numpages;
691 cpa->vaddr += cpa->numpages * PAGE_SIZE;
65e074df 692 }
ff31452b
TG
693 return 0;
694}
695
6bb8383b
AK
696static inline int cache_attr(pgprot_t attr)
697{
698 return pgprot_val(attr) &
699 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
700}
701
ff31452b 702static int change_page_attr_set_clr(unsigned long addr, int numpages,
c9caa02c
AK
703 pgprot_t mask_set, pgprot_t mask_clr,
704 int force_split)
ff31452b 705{
72e458df 706 struct cpa_data cpa;
af96e443 707 int ret, cache, checkalias;
331e4065
TG
708
709 /*
710 * Check, if we are requested to change a not supported
711 * feature:
712 */
713 mask_set = canon_pgprot(mask_set);
714 mask_clr = canon_pgprot(mask_clr);
c9caa02c 715 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
331e4065
TG
716 return 0;
717
69b1415e
TG
718 /* Ensure we are PAGE_SIZE aligned */
719 if (addr & ~PAGE_MASK) {
720 addr &= PAGE_MASK;
721 /*
722 * People should not be passing in unaligned addresses:
723 */
724 WARN_ON_ONCE(1);
725 }
726
72e458df
TG
727 cpa.vaddr = addr;
728 cpa.numpages = numpages;
729 cpa.mask_set = mask_set;
730 cpa.mask_clr = mask_clr;
f4ae5da0 731 cpa.flushtlb = 0;
c9caa02c 732 cpa.force_split = force_split;
72e458df 733
af96e443
TG
734 /* No alias checking for _NX bit modifications */
735 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
736
737 ret = __change_page_attr_set_clr(&cpa, checkalias);
ff31452b 738
f4ae5da0
TG
739 /*
740 * Check whether we really changed something:
741 */
742 if (!cpa.flushtlb)
76ebd054 743 goto out;
f4ae5da0 744
6bb8383b
AK
745 /*
746 * No need to flush, when we did not set any of the caching
747 * attributes:
748 */
749 cache = cache_attr(mask_set);
750
57a6a46a
TG
751 /*
752 * On success we use clflush, when the CPU supports it to
753 * avoid the wbindv. If the CPU does not support it and in the
af1e6844 754 * error case we fall back to cpa_flush_all (which uses
57a6a46a
TG
755 * wbindv):
756 */
757 if (!ret && cpu_has_clflush)
6bb8383b 758 cpa_flush_range(addr, numpages, cache);
57a6a46a 759 else
6bb8383b 760 cpa_flush_all(cache);
ff31452b 761
76ebd054 762out:
92cb54a3
IM
763 cpa_fill_pool(NULL);
764
ff31452b
TG
765 return ret;
766}
767
56744546
TG
768static inline int change_page_attr_set(unsigned long addr, int numpages,
769 pgprot_t mask)
75cbade8 770{
c9caa02c 771 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0);
75cbade8
AV
772}
773
56744546
TG
774static inline int change_page_attr_clear(unsigned long addr, int numpages,
775 pgprot_t mask)
72932c7a 776{
c9caa02c 777 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0);
72932c7a
TG
778}
779
1219333d 780int _set_memory_uc(unsigned long addr, int numpages)
72932c7a
TG
781{
782 return change_page_attr_set(addr, numpages,
2e5d9c85 783 __pgprot(_PAGE_CACHE_UC));
75cbade8 784}
1219333d 785
786int set_memory_uc(unsigned long addr, int numpages)
787{
788 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
789 _PAGE_CACHE_UC, NULL))
790 return -EINVAL;
791
792 return _set_memory_uc(addr, numpages);
793}
75cbade8
AV
794EXPORT_SYMBOL(set_memory_uc);
795
ef354af4 796int _set_memory_wc(unsigned long addr, int numpages)
797{
798 return change_page_attr_set(addr, numpages,
799 __pgprot(_PAGE_CACHE_WC));
800}
801
802int set_memory_wc(unsigned long addr, int numpages)
803{
804 if (!pat_wc_enabled)
805 return set_memory_uc(addr, numpages);
806
807 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
808 _PAGE_CACHE_WC, NULL))
809 return -EINVAL;
810
811 return _set_memory_wc(addr, numpages);
812}
813EXPORT_SYMBOL(set_memory_wc);
814
1219333d 815int _set_memory_wb(unsigned long addr, int numpages)
75cbade8 816{
72932c7a 817 return change_page_attr_clear(addr, numpages,
2e5d9c85 818 __pgprot(_PAGE_CACHE_MASK));
75cbade8 819}
1219333d 820
821int set_memory_wb(unsigned long addr, int numpages)
822{
823 free_memtype(addr, addr + numpages * PAGE_SIZE);
824
825 return _set_memory_wb(addr, numpages);
826}
75cbade8
AV
827EXPORT_SYMBOL(set_memory_wb);
828
829int set_memory_x(unsigned long addr, int numpages)
830{
72932c7a 831 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
75cbade8
AV
832}
833EXPORT_SYMBOL(set_memory_x);
834
835int set_memory_nx(unsigned long addr, int numpages)
836{
72932c7a 837 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
75cbade8
AV
838}
839EXPORT_SYMBOL(set_memory_nx);
840
841int set_memory_ro(unsigned long addr, int numpages)
842{
72932c7a 843 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
75cbade8 844}
75cbade8
AV
845
846int set_memory_rw(unsigned long addr, int numpages)
847{
72932c7a 848 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
75cbade8 849}
f62d0f00
IM
850
851int set_memory_np(unsigned long addr, int numpages)
852{
72932c7a 853 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
f62d0f00 854}
75cbade8 855
c9caa02c
AK
856int set_memory_4k(unsigned long addr, int numpages)
857{
858 return change_page_attr_set_clr(addr, numpages, __pgprot(0),
859 __pgprot(0), 1);
860}
861
75cbade8
AV
862int set_pages_uc(struct page *page, int numpages)
863{
864 unsigned long addr = (unsigned long)page_address(page);
75cbade8 865
d7c8f21a 866 return set_memory_uc(addr, numpages);
75cbade8
AV
867}
868EXPORT_SYMBOL(set_pages_uc);
869
870int set_pages_wb(struct page *page, int numpages)
871{
872 unsigned long addr = (unsigned long)page_address(page);
75cbade8 873
d7c8f21a 874 return set_memory_wb(addr, numpages);
75cbade8
AV
875}
876EXPORT_SYMBOL(set_pages_wb);
877
878int set_pages_x(struct page *page, int numpages)
879{
880 unsigned long addr = (unsigned long)page_address(page);
75cbade8 881
d7c8f21a 882 return set_memory_x(addr, numpages);
75cbade8
AV
883}
884EXPORT_SYMBOL(set_pages_x);
885
886int set_pages_nx(struct page *page, int numpages)
887{
888 unsigned long addr = (unsigned long)page_address(page);
75cbade8 889
d7c8f21a 890 return set_memory_nx(addr, numpages);
75cbade8
AV
891}
892EXPORT_SYMBOL(set_pages_nx);
893
894int set_pages_ro(struct page *page, int numpages)
895{
896 unsigned long addr = (unsigned long)page_address(page);
75cbade8 897
d7c8f21a 898 return set_memory_ro(addr, numpages);
75cbade8 899}
75cbade8
AV
900
901int set_pages_rw(struct page *page, int numpages)
902{
903 unsigned long addr = (unsigned long)page_address(page);
e81d5dc4 904
d7c8f21a 905 return set_memory_rw(addr, numpages);
78c94aba
IM
906}
907
1da177e4 908#ifdef CONFIG_DEBUG_PAGEALLOC
f62d0f00
IM
909
910static int __set_pages_p(struct page *page, int numpages)
911{
72e458df
TG
912 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
913 .numpages = numpages,
914 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
915 .mask_clr = __pgprot(0)};
72932c7a 916
c31c7d48 917 return __change_page_attr_set_clr(&cpa, 1);
f62d0f00
IM
918}
919
920static int __set_pages_np(struct page *page, int numpages)
921{
72e458df
TG
922 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
923 .numpages = numpages,
924 .mask_set = __pgprot(0),
925 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
72932c7a 926
c31c7d48 927 return __change_page_attr_set_clr(&cpa, 1);
f62d0f00
IM
928}
929
1da177e4
LT
930void kernel_map_pages(struct page *page, int numpages, int enable)
931{
932 if (PageHighMem(page))
933 return;
9f4c815c 934 if (!enable) {
f9b8404c
IM
935 debug_check_no_locks_freed(page_address(page),
936 numpages * PAGE_SIZE);
9f4c815c 937 }
de5097c2 938
12d6f21e
IM
939 /*
940 * If page allocator is not up yet then do not call c_p_a():
941 */
942 if (!debug_pagealloc_enabled)
943 return;
944
9f4c815c 945 /*
f8d8406b
IM
946 * The return value is ignored as the calls cannot fail.
947 * Large pages are kept enabled at boot time, and are
948 * split up quickly with DEBUG_PAGEALLOC. If a splitup
949 * fails here (due to temporary memory shortage) no damage
950 * is done because we just keep the largepage intact up
951 * to the next attempt when it will likely be split up:
1da177e4 952 */
f62d0f00
IM
953 if (enable)
954 __set_pages_p(page, numpages);
955 else
956 __set_pages_np(page, numpages);
9f4c815c
IM
957
958 /*
e4b71dcf
IM
959 * We should perform an IPI and flush all tlbs,
960 * but that can deadlock->flush only current cpu:
1da177e4
LT
961 */
962 __flush_tlb_all();
76ebd054
TG
963
964 /*
965 * Try to refill the page pool here. We can do this only after
966 * the tlb flush.
967 */
92cb54a3 968 cpa_fill_pool(NULL);
1da177e4 969}
8a235efa 970
ee7ae7a1
TG
971#ifdef CONFIG_DEBUG_FS
972static int dpa_show(struct seq_file *m, void *v)
973{
974 seq_puts(m, "DEBUG_PAGEALLOC\n");
975 seq_printf(m, "pool_size : %lu\n", pool_size);
976 seq_printf(m, "pool_pages : %lu\n", pool_pages);
977 seq_printf(m, "pool_low : %lu\n", pool_low);
978 seq_printf(m, "pool_used : %lu\n", pool_used);
979 seq_printf(m, "pool_failed : %lu\n", pool_failed);
980
981 return 0;
982}
983
984static int dpa_open(struct inode *inode, struct file *filp)
985{
986 return single_open(filp, dpa_show, NULL);
987}
988
989static const struct file_operations dpa_fops = {
990 .open = dpa_open,
991 .read = seq_read,
992 .llseek = seq_lseek,
993 .release = single_release,
994};
995
996int __init debug_pagealloc_proc_init(void)
997{
998 struct dentry *de;
999
1000 de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL,
1001 &dpa_fops);
1002 if (!de)
1003 return -ENOMEM;
1004
1005 return 0;
1006}
1007__initcall(debug_pagealloc_proc_init);
1008#endif
1009
8a235efa
RW
1010#ifdef CONFIG_HIBERNATION
1011
1012bool kernel_page_present(struct page *page)
1013{
1014 unsigned int level;
1015 pte_t *pte;
1016
1017 if (PageHighMem(page))
1018 return false;
1019
1020 pte = lookup_address((unsigned long)page_address(page), &level);
1021 return (pte_val(*pte) & _PAGE_PRESENT);
1022}
1023
1024#endif /* CONFIG_HIBERNATION */
1025
1026#endif /* CONFIG_DEBUG_PAGEALLOC */
d1028a15
AV
1027
1028/*
1029 * The testcases use internal knowledge of the implementation that shouldn't
1030 * be exposed to the rest of the kernel. Include these directly here.
1031 */
1032#ifdef CONFIG_CPA_DEBUG
1033#include "pageattr-test.c"
1034#endif
This page took 0.884224 seconds and 5 git commands to generate.