Commit | Line | Data |
---|---|---|
f938d2c8 RR |
1 | /*P:700 The pagetable code, on the other hand, still shows the scars of |
2 | * previous encounters. It's functional, and as neat as it can be in the | |
3 | * circumstances, but be wary, for these things are subtle and break easily. | |
4 | * The Guest provides a virtual to physical mapping, but we can neither trust | |
a6bd8e13 RR |
5 | * it nor use it: we verify and convert it here then point the CPU to the |
6 | * converted Guest pages when running the Guest. :*/ | |
f938d2c8 RR |
7 | |
8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. | |
d7e28ffe RR |
9 | * GPL v2 and any later version */ |
10 | #include <linux/mm.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <asm/tlbflush.h> | |
47436aa4 | 16 | #include <asm/uaccess.h> |
d7e28ffe RR |
17 | #include "lg.h" |
18 | ||
f56a384e RR |
19 | /*M:008 We hold reference to pages, which prevents them from being swapped. |
20 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants | |
21 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we | |
22 | * could probably consider launching Guests as non-root. :*/ | |
23 | ||
bff672e6 RR |
24 | /*H:300 |
25 | * The Page Table Code | |
26 | * | |
27 | * We use two-level page tables for the Guest. If you're not entirely | |
28 | * comfortable with virtual addresses, physical addresses and page tables then | |
e1e72965 RR |
29 | * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with |
30 | * diagrams!). | |
bff672e6 RR |
31 | * |
32 | * The Guest keeps page tables, but we maintain the actual ones here: these are | |
33 | * called "shadow" page tables. Which is a very Guest-centric name: these are | |
34 | * the real page tables the CPU uses, although we keep them up to date to | |
35 | * reflect the Guest's. (See what I mean about weird naming? Since when do | |
36 | * shadows reflect anything?) | |
37 | * | |
38 | * Anyway, this is the most complicated part of the Host code. There are seven | |
39 | * parts to this: | |
e1e72965 RR |
40 | * (i) Looking up a page table entry when the Guest faults, |
41 | * (ii) Making sure the Guest stack is mapped, | |
42 | * (iii) Setting up a page table entry when the Guest tells us one has changed, | |
bff672e6 | 43 | * (iv) Switching page tables, |
e1e72965 | 44 | * (v) Flushing (throwing away) page tables, |
bff672e6 RR |
45 | * (vi) Mapping the Switcher when the Guest is about to run, |
46 | * (vii) Setting up the page tables initially. | |
47 | :*/ | |
48 | ||
bff672e6 RR |
49 | |
50 | /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is | |
51 | * conveniently placed at the top 4MB, so it uses a separate, complete PTE | |
52 | * page. */ | |
df29f43e | 53 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) |
d7e28ffe | 54 | |
bff672e6 RR |
55 | /* We actually need a separate PTE page for each CPU. Remember that after the |
56 | * Switcher code itself comes two pages for each CPU, and we don't want this | |
57 | * CPU's guest to see the pages of any other CPU. */ | |
df29f43e | 58 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); |
d7e28ffe RR |
59 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) |
60 | ||
e1e72965 RR |
61 | /*H:320 The page table code is curly enough to need helper functions to keep it |
62 | * clear and clean. | |
bff672e6 | 63 | * |
df29f43e | 64 | * There are two functions which return pointers to the shadow (aka "real") |
bff672e6 RR |
65 | * page tables. |
66 | * | |
67 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | |
e1e72965 RR |
68 | * page directory entry (PGD) for that address. Since we keep track of several |
69 | * page tables, the "i" argument tells us which one we're interested in (it's | |
bff672e6 | 70 | * usually the current one). */ |
382ac6b3 | 71 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) |
d7e28ffe | 72 | { |
df29f43e | 73 | unsigned int index = pgd_index(vaddr); |
d7e28ffe | 74 | |
bff672e6 | 75 | /* We kill any Guest trying to touch the Switcher addresses. */ |
d7e28ffe | 76 | if (index >= SWITCHER_PGD_INDEX) { |
382ac6b3 | 77 | kill_guest(cpu, "attempt to access switcher pages"); |
d7e28ffe RR |
78 | index = 0; |
79 | } | |
bff672e6 | 80 | /* Return a pointer index'th pgd entry for the i'th page table. */ |
382ac6b3 | 81 | return &cpu->lg->pgdirs[i].pgdir[index]; |
d7e28ffe RR |
82 | } |
83 | ||
e1e72965 RR |
84 | /* This routine then takes the page directory entry returned above, which |
85 | * contains the address of the page table entry (PTE) page. It then returns a | |
86 | * pointer to the PTE entry for the given address. */ | |
2092aa27 | 87 | static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) |
d7e28ffe | 88 | { |
df29f43e | 89 | pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); |
bff672e6 | 90 | /* You should never call this if the PGD entry wasn't valid */ |
df29f43e MZ |
91 | BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); |
92 | return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; | |
d7e28ffe RR |
93 | } |
94 | ||
bff672e6 RR |
95 | /* These two functions just like the above two, except they access the Guest |
96 | * page tables. Hence they return a Guest address. */ | |
1713608f | 97 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 98 | { |
df29f43e | 99 | unsigned int index = vaddr >> (PGDIR_SHIFT); |
1713608f | 100 | return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); |
d7e28ffe RR |
101 | } |
102 | ||
934faab4 | 103 | static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) |
d7e28ffe | 104 | { |
df29f43e MZ |
105 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; |
106 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | |
107 | return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); | |
d7e28ffe | 108 | } |
a6bd8e13 RR |
109 | /*:*/ |
110 | ||
71a3f4ed RR |
111 | /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as |
112 | * an optimization (ie. pre-faulting). :*/ | |
d7e28ffe | 113 | |
bff672e6 RR |
114 | /*H:350 This routine takes a page number given by the Guest and converts it to |
115 | * an actual, physical page number. It can fail for several reasons: the | |
116 | * virtual address might not be mapped by the Launcher, the write flag is set | |
117 | * and the page is read-only, or the write flag was set and the page was | |
118 | * shared so had to be copied, but we ran out of memory. | |
119 | * | |
a6bd8e13 RR |
120 | * This holds a reference to the page, so release_pte() is careful to put that |
121 | * back. */ | |
d7e28ffe RR |
122 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
123 | { | |
124 | struct page *page; | |
71a3f4ed RR |
125 | |
126 | /* gup me one page at this address please! */ | |
127 | if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) | |
128 | return page_to_pfn(page); | |
129 | ||
bff672e6 | 130 | /* This value indicates failure. */ |
71a3f4ed | 131 | return -1UL; |
d7e28ffe RR |
132 | } |
133 | ||
bff672e6 RR |
134 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table |
135 | * entry can be a little tricky. The flags are (almost) the same, but the | |
136 | * Guest PTE contains a virtual page number: the CPU needs the real page | |
137 | * number. */ | |
382ac6b3 | 138 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) |
d7e28ffe | 139 | { |
df29f43e | 140 | unsigned long pfn, base, flags; |
d7e28ffe | 141 | |
bff672e6 RR |
142 | /* The Guest sets the global flag, because it thinks that it is using |
143 | * PGE. We only told it to use PGE so it would tell us whether it was | |
144 | * flushing a kernel mapping or a userspace mapping. We don't actually | |
145 | * use the global bit, so throw it away. */ | |
df29f43e | 146 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); |
bff672e6 | 147 | |
3c6b5bfa | 148 | /* The Guest's pages are offset inside the Launcher. */ |
382ac6b3 | 149 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; |
3c6b5bfa | 150 | |
bff672e6 RR |
151 | /* We need a temporary "unsigned long" variable to hold the answer from |
152 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't | |
153 | * fit in spte.pfn. get_pfn() finds the real physical number of the | |
154 | * page, given the virtual number. */ | |
df29f43e | 155 | pfn = get_pfn(base + pte_pfn(gpte), write); |
d7e28ffe | 156 | if (pfn == -1UL) { |
382ac6b3 | 157 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); |
bff672e6 RR |
158 | /* When we destroy the Guest, we'll go through the shadow page |
159 | * tables and release_pte() them. Make sure we don't think | |
160 | * this one is valid! */ | |
df29f43e | 161 | flags = 0; |
d7e28ffe | 162 | } |
df29f43e MZ |
163 | /* Now we assemble our shadow PTE from the page number and flags. */ |
164 | return pfn_pte(pfn, __pgprot(flags)); | |
d7e28ffe RR |
165 | } |
166 | ||
bff672e6 | 167 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
df29f43e | 168 | static void release_pte(pte_t pte) |
d7e28ffe | 169 | { |
71a3f4ed | 170 | /* Remember that get_user_pages_fast() took a reference to the page, in |
bff672e6 | 171 | * get_pfn()? We have to put it back now. */ |
df29f43e MZ |
172 | if (pte_flags(pte) & _PAGE_PRESENT) |
173 | put_page(pfn_to_page(pte_pfn(pte))); | |
d7e28ffe | 174 | } |
bff672e6 | 175 | /*:*/ |
d7e28ffe | 176 | |
382ac6b3 | 177 | static void check_gpte(struct lg_cpu *cpu, pte_t gpte) |
d7e28ffe | 178 | { |
31f4b46e AD |
179 | if ((pte_flags(gpte) & _PAGE_PSE) || |
180 | pte_pfn(gpte) >= cpu->lg->pfn_limit) | |
382ac6b3 | 181 | kill_guest(cpu, "bad page table entry"); |
d7e28ffe RR |
182 | } |
183 | ||
382ac6b3 | 184 | static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) |
d7e28ffe | 185 | { |
382ac6b3 GOC |
186 | if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || |
187 | (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) | |
188 | kill_guest(cpu, "bad page directory entry"); | |
d7e28ffe RR |
189 | } |
190 | ||
bff672e6 | 191 | /*H:330 |
e1e72965 | 192 | * (i) Looking up a page table entry when the Guest faults. |
bff672e6 RR |
193 | * |
194 | * We saw this call in run_guest(): when we see a page fault in the Guest, we | |
195 | * come here. That's because we only set up the shadow page tables lazily as | |
196 | * they're needed, so we get page faults all the time and quietly fix them up | |
197 | * and return to the Guest without it knowing. | |
198 | * | |
199 | * If we fixed up the fault (ie. we mapped the address), this routine returns | |
e1e72965 | 200 | * true. Otherwise, it was a real fault and we need to tell the Guest. */ |
1713608f | 201 | int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) |
d7e28ffe | 202 | { |
df29f43e MZ |
203 | pgd_t gpgd; |
204 | pgd_t *spgd; | |
d7e28ffe | 205 | unsigned long gpte_ptr; |
df29f43e MZ |
206 | pte_t gpte; |
207 | pte_t *spte; | |
d7e28ffe | 208 | |
bff672e6 | 209 | /* First step: get the top-level Guest page table entry. */ |
382ac6b3 | 210 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); |
bff672e6 | 211 | /* Toplevel not present? We can't map it in. */ |
df29f43e | 212 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) |
d7e28ffe RR |
213 | return 0; |
214 | ||
bff672e6 | 215 | /* Now look at the matching shadow entry. */ |
382ac6b3 | 216 | spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); |
df29f43e | 217 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { |
bff672e6 | 218 | /* No shadow entry: allocate a new shadow PTE page. */ |
d7e28ffe | 219 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
bff672e6 RR |
220 | /* This is not really the Guest's fault, but killing it is |
221 | * simple for this corner case. */ | |
d7e28ffe | 222 | if (!ptepage) { |
382ac6b3 | 223 | kill_guest(cpu, "out of memory allocating pte page"); |
d7e28ffe RR |
224 | return 0; |
225 | } | |
bff672e6 | 226 | /* We check that the Guest pgd is OK. */ |
382ac6b3 | 227 | check_gpgd(cpu, gpgd); |
bff672e6 RR |
228 | /* And we copy the flags to the shadow PGD entry. The page |
229 | * number in the shadow PGD is the page we just allocated. */ | |
df29f43e | 230 | *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); |
d7e28ffe RR |
231 | } |
232 | ||
bff672e6 RR |
233 | /* OK, now we look at the lower level in the Guest page table: keep its |
234 | * address, because we might update it later. */ | |
934faab4 | 235 | gpte_ptr = gpte_addr(gpgd, vaddr); |
382ac6b3 | 236 | gpte = lgread(cpu, gpte_ptr, pte_t); |
d7e28ffe | 237 | |
bff672e6 | 238 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
df29f43e | 239 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
d7e28ffe RR |
240 | return 0; |
241 | ||
bff672e6 RR |
242 | /* Check they're not trying to write to a page the Guest wants |
243 | * read-only (bit 2 of errcode == write). */ | |
df29f43e | 244 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) |
d7e28ffe RR |
245 | return 0; |
246 | ||
e1e72965 | 247 | /* User access to a kernel-only page? (bit 3 == user access) */ |
df29f43e | 248 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) |
d7e28ffe RR |
249 | return 0; |
250 | ||
bff672e6 RR |
251 | /* Check that the Guest PTE flags are OK, and the page number is below |
252 | * the pfn_limit (ie. not mapping the Launcher binary). */ | |
382ac6b3 | 253 | check_gpte(cpu, gpte); |
e1e72965 | 254 | |
bff672e6 | 255 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
df29f43e | 256 | gpte = pte_mkyoung(gpte); |
d7e28ffe | 257 | if (errcode & 2) |
df29f43e | 258 | gpte = pte_mkdirty(gpte); |
d7e28ffe | 259 | |
bff672e6 | 260 | /* Get the pointer to the shadow PTE entry we're going to set. */ |
2092aa27 | 261 | spte = spte_addr(*spgd, vaddr); |
bff672e6 RR |
262 | /* If there was a valid shadow PTE entry here before, we release it. |
263 | * This can happen with a write to a previously read-only entry. */ | |
d7e28ffe RR |
264 | release_pte(*spte); |
265 | ||
bff672e6 RR |
266 | /* If this is a write, we insist that the Guest page is writable (the |
267 | * final arg to gpte_to_spte()). */ | |
df29f43e | 268 | if (pte_dirty(gpte)) |
382ac6b3 | 269 | *spte = gpte_to_spte(cpu, gpte, 1); |
df29f43e | 270 | else |
bff672e6 RR |
271 | /* If this is a read, don't set the "writable" bit in the page |
272 | * table entry, even if the Guest says it's writable. That way | |
e1e72965 RR |
273 | * we will come back here when a write does actually occur, so |
274 | * we can update the Guest's _PAGE_DIRTY flag. */ | |
382ac6b3 | 275 | *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); |
d7e28ffe | 276 | |
bff672e6 RR |
277 | /* Finally, we write the Guest PTE entry back: we've set the |
278 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ | |
382ac6b3 | 279 | lgwrite(cpu, gpte_ptr, pte_t, gpte); |
bff672e6 | 280 | |
e1e72965 RR |
281 | /* The fault is fixed, the page table is populated, the mapping |
282 | * manipulated, the result returned and the code complete. A small | |
283 | * delay and a trace of alliteration are the only indications the Guest | |
284 | * has that a page fault occurred at all. */ | |
d7e28ffe RR |
285 | return 1; |
286 | } | |
287 | ||
e1e72965 RR |
288 | /*H:360 |
289 | * (ii) Making sure the Guest stack is mapped. | |
bff672e6 | 290 | * |
e1e72965 RR |
291 | * Remember that direct traps into the Guest need a mapped Guest kernel stack. |
292 | * pin_stack_pages() calls us here: we could simply call demand_page(), but as | |
293 | * we've seen that logic is quite long, and usually the stack pages are already | |
294 | * mapped, so it's overkill. | |
bff672e6 RR |
295 | * |
296 | * This is a quick version which answers the question: is this virtual address | |
297 | * mapped by the shadow page tables, and is it writable? */ | |
1713608f | 298 | static int page_writable(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 299 | { |
df29f43e | 300 | pgd_t *spgd; |
d7e28ffe RR |
301 | unsigned long flags; |
302 | ||
e1e72965 | 303 | /* Look at the current top level entry: is it present? */ |
382ac6b3 | 304 | spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); |
df29f43e | 305 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) |
d7e28ffe RR |
306 | return 0; |
307 | ||
bff672e6 RR |
308 | /* Check the flags on the pte entry itself: it must be present and |
309 | * writable. */ | |
2092aa27 | 310 | flags = pte_flags(*(spte_addr(*spgd, vaddr))); |
df29f43e | 311 | |
d7e28ffe RR |
312 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); |
313 | } | |
314 | ||
bff672e6 RR |
315 | /* So, when pin_stack_pages() asks us to pin a page, we check if it's already |
316 | * in the page tables, and if not, we call demand_page() with error code 2 | |
317 | * (meaning "write"). */ | |
1713608f | 318 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 319 | { |
1713608f | 320 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) |
382ac6b3 | 321 | kill_guest(cpu, "bad stack page %#lx", vaddr); |
d7e28ffe RR |
322 | } |
323 | ||
bff672e6 | 324 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ |
df29f43e | 325 | static void release_pgd(struct lguest *lg, pgd_t *spgd) |
d7e28ffe | 326 | { |
bff672e6 | 327 | /* If the entry's not present, there's nothing to release. */ |
df29f43e | 328 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
d7e28ffe | 329 | unsigned int i; |
bff672e6 RR |
330 | /* Converting the pfn to find the actual PTE page is easy: turn |
331 | * the page number into a physical address, then convert to a | |
332 | * virtual address (easy for kernel pages like this one). */ | |
df29f43e | 333 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); |
bff672e6 | 334 | /* For each entry in the page, we might need to release it. */ |
df29f43e | 335 | for (i = 0; i < PTRS_PER_PTE; i++) |
d7e28ffe | 336 | release_pte(ptepage[i]); |
bff672e6 | 337 | /* Now we can free the page of PTEs */ |
d7e28ffe | 338 | free_page((long)ptepage); |
e1e72965 | 339 | /* And zero out the PGD entry so we never release it twice. */ |
df29f43e | 340 | *spgd = __pgd(0); |
d7e28ffe RR |
341 | } |
342 | } | |
343 | ||
e1e72965 RR |
344 | /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() |
345 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. | |
346 | * It simply releases every PTE page from 0 up to the Guest's kernel address. */ | |
d7e28ffe RR |
347 | static void flush_user_mappings(struct lguest *lg, int idx) |
348 | { | |
349 | unsigned int i; | |
bff672e6 | 350 | /* Release every pgd entry up to the kernel's address. */ |
47436aa4 | 351 | for (i = 0; i < pgd_index(lg->kernel_address); i++) |
d7e28ffe RR |
352 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); |
353 | } | |
354 | ||
e1e72965 RR |
355 | /*H:440 (v) Flushing (throwing away) page tables, |
356 | * | |
357 | * The Guest has a hypercall to throw away the page tables: it's used when a | |
358 | * large number of mappings have been changed. */ | |
1713608f | 359 | void guest_pagetable_flush_user(struct lg_cpu *cpu) |
d7e28ffe | 360 | { |
bff672e6 | 361 | /* Drop the userspace part of the current page table. */ |
1713608f | 362 | flush_user_mappings(cpu->lg, cpu->cpu_pgd); |
d7e28ffe | 363 | } |
bff672e6 | 364 | /*:*/ |
d7e28ffe | 365 | |
47436aa4 | 366 | /* We walk down the guest page tables to get a guest-physical address */ |
1713608f | 367 | unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) |
47436aa4 RR |
368 | { |
369 | pgd_t gpgd; | |
370 | pte_t gpte; | |
371 | ||
372 | /* First step: get the top-level Guest page table entry. */ | |
382ac6b3 | 373 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); |
47436aa4 RR |
374 | /* Toplevel not present? We can't map it in. */ |
375 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) | |
382ac6b3 | 376 | kill_guest(cpu, "Bad address %#lx", vaddr); |
47436aa4 | 377 | |
382ac6b3 | 378 | gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); |
47436aa4 | 379 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
382ac6b3 | 380 | kill_guest(cpu, "Bad address %#lx", vaddr); |
47436aa4 RR |
381 | |
382 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); | |
383 | } | |
384 | ||
bff672e6 RR |
385 | /* We keep several page tables. This is a simple routine to find the page |
386 | * table (if any) corresponding to this top-level address the Guest has given | |
387 | * us. */ | |
d7e28ffe RR |
388 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) |
389 | { | |
390 | unsigned int i; | |
391 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
4357bd94 | 392 | if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) |
d7e28ffe RR |
393 | break; |
394 | return i; | |
395 | } | |
396 | ||
bff672e6 RR |
397 | /*H:435 And this is us, creating the new page directory. If we really do |
398 | * allocate a new one (and so the kernel parts are not there), we set | |
399 | * blank_pgdir. */ | |
1713608f | 400 | static unsigned int new_pgdir(struct lg_cpu *cpu, |
ee3db0f2 | 401 | unsigned long gpgdir, |
d7e28ffe RR |
402 | int *blank_pgdir) |
403 | { | |
404 | unsigned int next; | |
405 | ||
bff672e6 RR |
406 | /* We pick one entry at random to throw out. Choosing the Least |
407 | * Recently Used might be better, but this is easy. */ | |
382ac6b3 | 408 | next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); |
bff672e6 | 409 | /* If it's never been allocated at all before, try now. */ |
382ac6b3 GOC |
410 | if (!cpu->lg->pgdirs[next].pgdir) { |
411 | cpu->lg->pgdirs[next].pgdir = | |
412 | (pgd_t *)get_zeroed_page(GFP_KERNEL); | |
bff672e6 | 413 | /* If the allocation fails, just keep using the one we have */ |
382ac6b3 | 414 | if (!cpu->lg->pgdirs[next].pgdir) |
1713608f | 415 | next = cpu->cpu_pgd; |
d7e28ffe | 416 | else |
bff672e6 RR |
417 | /* This is a blank page, so there are no kernel |
418 | * mappings: caller must map the stack! */ | |
d7e28ffe RR |
419 | *blank_pgdir = 1; |
420 | } | |
bff672e6 | 421 | /* Record which Guest toplevel this shadows. */ |
382ac6b3 | 422 | cpu->lg->pgdirs[next].gpgdir = gpgdir; |
d7e28ffe | 423 | /* Release all the non-kernel mappings. */ |
382ac6b3 | 424 | flush_user_mappings(cpu->lg, next); |
d7e28ffe RR |
425 | |
426 | return next; | |
427 | } | |
428 | ||
bff672e6 RR |
429 | /*H:430 (iv) Switching page tables |
430 | * | |
e1e72965 RR |
431 | * Now we've seen all the page table setting and manipulation, let's see what |
432 | * what happens when the Guest changes page tables (ie. changes the top-level | |
433 | * pgdir). This occurs on almost every context switch. */ | |
4665ac8e | 434 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) |
d7e28ffe RR |
435 | { |
436 | int newpgdir, repin = 0; | |
437 | ||
bff672e6 | 438 | /* Look to see if we have this one already. */ |
382ac6b3 | 439 | newpgdir = find_pgdir(cpu->lg, pgtable); |
bff672e6 RR |
440 | /* If not, we allocate or mug an existing one: if it's a fresh one, |
441 | * repin gets set to 1. */ | |
382ac6b3 | 442 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) |
1713608f | 443 | newpgdir = new_pgdir(cpu, pgtable, &repin); |
bff672e6 | 444 | /* Change the current pgd index to the new one. */ |
1713608f | 445 | cpu->cpu_pgd = newpgdir; |
bff672e6 | 446 | /* If it was completely blank, we map in the Guest kernel stack */ |
d7e28ffe | 447 | if (repin) |
4665ac8e | 448 | pin_stack_pages(cpu); |
d7e28ffe RR |
449 | } |
450 | ||
bff672e6 | 451 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all |
e1e72965 RR |
452 | * the shadow page tables, including the Guest's kernel mappings. This is used |
453 | * when we destroy the Guest. */ | |
d7e28ffe RR |
454 | static void release_all_pagetables(struct lguest *lg) |
455 | { | |
456 | unsigned int i, j; | |
457 | ||
bff672e6 | 458 | /* Every shadow pagetable this Guest has */ |
d7e28ffe RR |
459 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
460 | if (lg->pgdirs[i].pgdir) | |
bff672e6 | 461 | /* Every PGD entry except the Switcher at the top */ |
d7e28ffe RR |
462 | for (j = 0; j < SWITCHER_PGD_INDEX; j++) |
463 | release_pgd(lg, lg->pgdirs[i].pgdir + j); | |
464 | } | |
465 | ||
bff672e6 RR |
466 | /* We also throw away everything when a Guest tells us it's changed a kernel |
467 | * mapping. Since kernel mappings are in every page table, it's easiest to | |
e1e72965 RR |
468 | * throw them all away. This traps the Guest in amber for a while as |
469 | * everything faults back in, but it's rare. */ | |
4665ac8e | 470 | void guest_pagetable_clear_all(struct lg_cpu *cpu) |
d7e28ffe | 471 | { |
4665ac8e | 472 | release_all_pagetables(cpu->lg); |
bff672e6 | 473 | /* We need the Guest kernel stack mapped again. */ |
4665ac8e | 474 | pin_stack_pages(cpu); |
d7e28ffe | 475 | } |
e1e72965 RR |
476 | /*:*/ |
477 | /*M:009 Since we throw away all mappings when a kernel mapping changes, our | |
478 | * performance sucks for guests using highmem. In fact, a guest with | |
479 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is | |
480 | * usually slower than a Guest with less memory. | |
481 | * | |
482 | * This, of course, cannot be fixed. It would take some kind of... well, I | |
483 | * don't know, but the term "puissant code-fu" comes to mind. :*/ | |
d7e28ffe | 484 | |
bff672e6 RR |
485 | /*H:420 This is the routine which actually sets the page table entry for then |
486 | * "idx"'th shadow page table. | |
487 | * | |
488 | * Normally, we can just throw out the old entry and replace it with 0: if they | |
489 | * use it demand_page() will put the new entry in. We need to do this anyway: | |
490 | * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page | |
491 | * is read from, and _PAGE_DIRTY when it's written to. | |
492 | * | |
493 | * But Avi Kivity pointed out that most Operating Systems (Linux included) set | |
494 | * these bits on PTEs immediately anyway. This is done to save the CPU from | |
495 | * having to update them, but it helps us the same way: if they set | |
496 | * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if | |
497 | * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. | |
498 | */ | |
382ac6b3 | 499 | static void do_set_pte(struct lg_cpu *cpu, int idx, |
df29f43e | 500 | unsigned long vaddr, pte_t gpte) |
d7e28ffe | 501 | { |
e1e72965 | 502 | /* Look up the matching shadow page directory entry. */ |
382ac6b3 | 503 | pgd_t *spgd = spgd_addr(cpu, idx, vaddr); |
bff672e6 RR |
504 | |
505 | /* If the top level isn't present, there's no entry to update. */ | |
df29f43e | 506 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
bff672e6 | 507 | /* Otherwise, we start by releasing the existing entry. */ |
2092aa27 | 508 | pte_t *spte = spte_addr(*spgd, vaddr); |
d7e28ffe | 509 | release_pte(*spte); |
bff672e6 RR |
510 | |
511 | /* If they're setting this entry as dirty or accessed, we might | |
512 | * as well put that entry they've given us in now. This shaves | |
513 | * 10% off a copy-on-write micro-benchmark. */ | |
df29f43e | 514 | if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { |
382ac6b3 GOC |
515 | check_gpte(cpu, gpte); |
516 | *spte = gpte_to_spte(cpu, gpte, | |
df29f43e | 517 | pte_flags(gpte) & _PAGE_DIRTY); |
d7e28ffe | 518 | } else |
e1e72965 RR |
519 | /* Otherwise kill it and we can demand_page() it in |
520 | * later. */ | |
df29f43e | 521 | *spte = __pte(0); |
d7e28ffe RR |
522 | } |
523 | } | |
524 | ||
bff672e6 RR |
525 | /*H:410 Updating a PTE entry is a little trickier. |
526 | * | |
527 | * We keep track of several different page tables (the Guest uses one for each | |
528 | * process, so it makes sense to cache at least a few). Each of these have | |
529 | * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for | |
530 | * all processes. So when the page table above that address changes, we update | |
531 | * all the page tables, not just the current one. This is rare. | |
532 | * | |
a6bd8e13 RR |
533 | * The benefit is that when we have to track a new page table, we can keep all |
534 | * the kernel mappings. This speeds up context switch immensely. */ | |
382ac6b3 | 535 | void guest_set_pte(struct lg_cpu *cpu, |
ee3db0f2 | 536 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) |
d7e28ffe | 537 | { |
a6bd8e13 RR |
538 | /* Kernel mappings must be changed on all top levels. Slow, but doesn't |
539 | * happen often. */ | |
382ac6b3 | 540 | if (vaddr >= cpu->lg->kernel_address) { |
d7e28ffe | 541 | unsigned int i; |
382ac6b3 GOC |
542 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) |
543 | if (cpu->lg->pgdirs[i].pgdir) | |
544 | do_set_pte(cpu, i, vaddr, gpte); | |
d7e28ffe | 545 | } else { |
bff672e6 | 546 | /* Is this page table one we have a shadow for? */ |
382ac6b3 GOC |
547 | int pgdir = find_pgdir(cpu->lg, gpgdir); |
548 | if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) | |
bff672e6 | 549 | /* If so, do the update. */ |
382ac6b3 | 550 | do_set_pte(cpu, pgdir, vaddr, gpte); |
d7e28ffe RR |
551 | } |
552 | } | |
553 | ||
bff672e6 | 554 | /*H:400 |
e1e72965 | 555 | * (iii) Setting up a page table entry when the Guest tells us one has changed. |
bff672e6 RR |
556 | * |
557 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal | |
558 | * with the other side of page tables while we're here: what happens when the | |
559 | * Guest asks for a page table to be updated? | |
560 | * | |
561 | * We already saw that demand_page() will fill in the shadow page tables when | |
562 | * needed, so we can simply remove shadow page table entries whenever the Guest | |
563 | * tells us they've changed. When the Guest tries to use the new entry it will | |
564 | * fault and demand_page() will fix it up. | |
565 | * | |
566 | * So with that in mind here's our code to to update a (top-level) PGD entry: | |
567 | */ | |
ee3db0f2 | 568 | void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) |
d7e28ffe RR |
569 | { |
570 | int pgdir; | |
571 | ||
bff672e6 RR |
572 | /* The kernel seems to try to initialize this early on: we ignore its |
573 | * attempts to map over the Switcher. */ | |
d7e28ffe RR |
574 | if (idx >= SWITCHER_PGD_INDEX) |
575 | return; | |
576 | ||
bff672e6 | 577 | /* If they're talking about a page table we have a shadow for... */ |
ee3db0f2 | 578 | pgdir = find_pgdir(lg, gpgdir); |
d7e28ffe | 579 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) |
bff672e6 | 580 | /* ... throw it away. */ |
d7e28ffe RR |
581 | release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); |
582 | } | |
583 | ||
bff672e6 RR |
584 | /*H:500 (vii) Setting up the page tables initially. |
585 | * | |
586 | * When a Guest is first created, the Launcher tells us where the toplevel of | |
587 | * its first page table is. We set some things up here: */ | |
d7e28ffe RR |
588 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) |
589 | { | |
bff672e6 RR |
590 | /* We start on the first shadow page table, and give it a blank PGD |
591 | * page. */ | |
1713608f GOC |
592 | lg->pgdirs[0].gpgdir = pgtable; |
593 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); | |
594 | if (!lg->pgdirs[0].pgdir) | |
d7e28ffe | 595 | return -ENOMEM; |
1713608f | 596 | lg->cpus[0].cpu_pgd = 0; |
d7e28ffe RR |
597 | return 0; |
598 | } | |
599 | ||
47436aa4 | 600 | /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ |
382ac6b3 | 601 | void page_table_guest_data_init(struct lg_cpu *cpu) |
47436aa4 RR |
602 | { |
603 | /* We get the kernel address: above this is all kernel memory. */ | |
382ac6b3 GOC |
604 | if (get_user(cpu->lg->kernel_address, |
605 | &cpu->lg->lguest_data->kernel_address) | |
47436aa4 RR |
606 | /* We tell the Guest that it can't use the top 4MB of virtual |
607 | * addresses used by the Switcher. */ | |
382ac6b3 GOC |
608 | || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) |
609 | || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) | |
610 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | |
47436aa4 RR |
611 | |
612 | /* In flush_user_mappings() we loop from 0 to | |
613 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the | |
614 | * Switcher mappings, so check that now. */ | |
382ac6b3 GOC |
615 | if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) |
616 | kill_guest(cpu, "bad kernel address %#lx", | |
617 | cpu->lg->kernel_address); | |
47436aa4 RR |
618 | } |
619 | ||
bff672e6 | 620 | /* When a Guest dies, our cleanup is fairly simple. */ |
d7e28ffe RR |
621 | void free_guest_pagetable(struct lguest *lg) |
622 | { | |
623 | unsigned int i; | |
624 | ||
bff672e6 | 625 | /* Throw away all page table pages. */ |
d7e28ffe | 626 | release_all_pagetables(lg); |
bff672e6 | 627 | /* Now free the top levels: free_page() can handle 0 just fine. */ |
d7e28ffe RR |
628 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
629 | free_page((long)lg->pgdirs[i].pgdir); | |
630 | } | |
631 | ||
bff672e6 RR |
632 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. |
633 | * | |
e1e72965 | 634 | * The Switcher and the two pages for this CPU need to be visible in the |
bff672e6 | 635 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages |
e1e72965 RR |
636 | * for each CPU already set up, we just need to hook them in now we know which |
637 | * Guest is about to run on this CPU. */ | |
0c78441c | 638 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) |
d7e28ffe | 639 | { |
df29f43e MZ |
640 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); |
641 | pgd_t switcher_pgd; | |
642 | pte_t regs_pte; | |
a53a35a8 | 643 | unsigned long pfn; |
d7e28ffe | 644 | |
bff672e6 RR |
645 | /* Make the last PGD entry for this Guest point to the Switcher's PTE |
646 | * page for this CPU (with appropriate flags). */ | |
84f12e39 | 647 | switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL); |
df29f43e | 648 | |
1713608f | 649 | cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; |
d7e28ffe | 650 | |
bff672e6 RR |
651 | /* We also change the Switcher PTE page. When we're running the Guest, |
652 | * we want the Guest's "regs" page to appear where the first Switcher | |
653 | * page for this CPU is. This is an optimization: when the Switcher | |
654 | * saves the Guest registers, it saves them into the first page of this | |
655 | * CPU's "struct lguest_pages": if we make sure the Guest's register | |
656 | * page is already mapped there, we don't have to copy them out | |
657 | * again. */ | |
a53a35a8 | 658 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; |
84f12e39 | 659 | regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); |
df29f43e | 660 | switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; |
d7e28ffe | 661 | } |
bff672e6 | 662 | /*:*/ |
d7e28ffe RR |
663 | |
664 | static void free_switcher_pte_pages(void) | |
665 | { | |
666 | unsigned int i; | |
667 | ||
668 | for_each_possible_cpu(i) | |
669 | free_page((long)switcher_pte_page(i)); | |
670 | } | |
671 | ||
bff672e6 RR |
672 | /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given |
673 | * the CPU number and the "struct page"s for the Switcher code itself. | |
674 | * | |
675 | * Currently the Switcher is less than a page long, so "pages" is always 1. */ | |
d7e28ffe RR |
676 | static __init void populate_switcher_pte_page(unsigned int cpu, |
677 | struct page *switcher_page[], | |
678 | unsigned int pages) | |
679 | { | |
680 | unsigned int i; | |
df29f43e | 681 | pte_t *pte = switcher_pte_page(cpu); |
d7e28ffe | 682 | |
bff672e6 | 683 | /* The first entries are easy: they map the Switcher code. */ |
d7e28ffe | 684 | for (i = 0; i < pages; i++) { |
df29f43e MZ |
685 | pte[i] = mk_pte(switcher_page[i], |
686 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); | |
d7e28ffe RR |
687 | } |
688 | ||
bff672e6 | 689 | /* The only other thing we map is this CPU's pair of pages. */ |
d7e28ffe RR |
690 | i = pages + cpu*2; |
691 | ||
bff672e6 | 692 | /* First page (Guest registers) is writable from the Guest */ |
df29f43e MZ |
693 | pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), |
694 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); | |
695 | ||
bff672e6 RR |
696 | /* The second page contains the "struct lguest_ro_state", and is |
697 | * read-only. */ | |
df29f43e MZ |
698 | pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), |
699 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); | |
d7e28ffe RR |
700 | } |
701 | ||
e1e72965 RR |
702 | /* We've made it through the page table code. Perhaps our tired brains are |
703 | * still processing the details, or perhaps we're simply glad it's over. | |
704 | * | |
a6bd8e13 RR |
705 | * If nothing else, note that all this complexity in juggling shadow page tables |
706 | * in sync with the Guest's page tables is for one reason: for most Guests this | |
707 | * page table dance determines how bad performance will be. This is why Xen | |
708 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD | |
709 | * have implemented shadow page table support directly into hardware. | |
e1e72965 RR |
710 | * |
711 | * There is just one file remaining in the Host. */ | |
712 | ||
bff672e6 RR |
713 | /*H:510 At boot or module load time, init_pagetables() allocates and populates |
714 | * the Switcher PTE page for each CPU. */ | |
d7e28ffe RR |
715 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) |
716 | { | |
717 | unsigned int i; | |
718 | ||
719 | for_each_possible_cpu(i) { | |
df29f43e | 720 | switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL); |
d7e28ffe RR |
721 | if (!switcher_pte_page(i)) { |
722 | free_switcher_pte_pages(); | |
723 | return -ENOMEM; | |
724 | } | |
725 | populate_switcher_pte_page(i, switcher_page, pages); | |
726 | } | |
727 | return 0; | |
728 | } | |
bff672e6 | 729 | /*:*/ |
d7e28ffe | 730 | |
bff672e6 | 731 | /* Cleaning up simply involves freeing the PTE page for each CPU. */ |
d7e28ffe RR |
732 | void free_pagetables(void) |
733 | { | |
734 | free_switcher_pte_pages(); | |
735 | } |