aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/page_tables.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r--drivers/lguest/page_tables.c329
1 files changed, 299 insertions, 30 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 1b0ba09b1269..b7a924ace684 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1,5 +1,11 @@
1/* Shadow page table operations. 1/*P:700 The pagetable code, on the other hand, still shows the scars of
2 * Copyright (C) Rusty Russell IBM Corporation 2006. 2 * previous encounters. It's functional, and as neat as it can be in the
3 * circumstances, but be wary, for these things are subtle and break easily.
4 * The Guest provides a virtual to physical mapping, but we can neither trust
5 * it nor use it: we verify and convert it here to point the hardware to the
6 * actual Guest pages when running the Guest. :*/
7
8/* Copyright (C) Rusty Russell IBM Corporation 2006.
3 * GPL v2 and any later version */ 9 * GPL v2 and any later version */
4#include <linux/mm.h> 10#include <linux/mm.h>
5#include <linux/types.h> 11#include <linux/types.h>
@@ -9,38 +15,96 @@
9#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
10#include "lg.h" 16#include "lg.h"
11 17
18/*M:008 We hold reference to pages, which prevents them from being swapped.
19 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
20 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
21 * could probably consider launching Guests as non-root. :*/
22
23/*H:300
24 * The Page Table Code
25 *
26 * We use two-level page tables for the Guest. If you're not entirely
27 * comfortable with virtual addresses, physical addresses and page tables then
28 * I recommend you review lguest.c's "Page Table Handling" (with diagrams!).
29 *
30 * The Guest keeps page tables, but we maintain the actual ones here: these are
31 * called "shadow" page tables. Which is a very Guest-centric name: these are
32 * the real page tables the CPU uses, although we keep them up to date to
33 * reflect the Guest's. (See what I mean about weird naming? Since when do
34 * shadows reflect anything?)
35 *
36 * Anyway, this is the most complicated part of the Host code. There are seven
37 * parts to this:
38 * (i) Setting up a page table entry for the Guest when it faults,
39 * (ii) Setting up the page table entry for the Guest stack,
40 * (iii) Setting up a page table entry when the Guest tells us it has changed,
41 * (iv) Switching page tables,
42 * (v) Flushing (thowing away) page tables,
43 * (vi) Mapping the Switcher when the Guest is about to run,
44 * (vii) Setting up the page tables initially.
45 :*/
46
47/* Pages a 4k long, and each page table entry is 4 bytes long, giving us 1024
48 * (or 2^10) entries per page. */
12#define PTES_PER_PAGE_SHIFT 10 49#define PTES_PER_PAGE_SHIFT 10
13#define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT) 50#define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT)
51
52/* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is
53 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
54 * page. */
14#define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1) 55#define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1)
15 56
57/* We actually need a separate PTE page for each CPU. Remember that after the
58 * Switcher code itself comes two pages for each CPU, and we don't want this
59 * CPU's guest to see the pages of any other CPU. */
16static DEFINE_PER_CPU(spte_t *, switcher_pte_pages); 60static DEFINE_PER_CPU(spte_t *, switcher_pte_pages);
17#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) 61#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
18 62
63/*H:320 With our shadow and Guest types established, we need to deal with
64 * them: the page table code is curly enough to need helper functions to keep
65 * it clear and clean.
66 *
67 * The first helper takes a virtual address, and says which entry in the top
68 * level page table deals with that address. Since each top level entry deals
69 * with 4M, this effectively divides by 4M. */
19static unsigned vaddr_to_pgd_index(unsigned long vaddr) 70static unsigned vaddr_to_pgd_index(unsigned long vaddr)
20{ 71{
21 return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); 72 return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
22} 73}
23 74
24/* These access the shadow versions (ie. the ones used by the CPU). */ 75/* There are two functions which return pointers to the shadow (aka "real")
76 * page tables.
77 *
78 * spgd_addr() takes the virtual address and returns a pointer to the top-level
79 * page directory entry for that address. Since we keep track of several page
80 * tables, the "i" argument tells us which one we're interested in (it's
81 * usually the current one). */
25static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) 82static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
26{ 83{
27 unsigned int index = vaddr_to_pgd_index(vaddr); 84 unsigned int index = vaddr_to_pgd_index(vaddr);
28 85
86 /* We kill any Guest trying to touch the Switcher addresses. */
29 if (index >= SWITCHER_PGD_INDEX) { 87 if (index >= SWITCHER_PGD_INDEX) {
30 kill_guest(lg, "attempt to access switcher pages"); 88 kill_guest(lg, "attempt to access switcher pages");
31 index = 0; 89 index = 0;
32 } 90 }
91 /* Return a pointer index'th pgd entry for the i'th page table. */
33 return &lg->pgdirs[i].pgdir[index]; 92 return &lg->pgdirs[i].pgdir[index];
34} 93}
35 94
95/* This routine then takes the PGD entry given above, which contains the
96 * address of the PTE page. It then returns a pointer to the PTE entry for the
97 * given address. */
36static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr) 98static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr)
37{ 99{
38 spte_t *page = __va(spgd.pfn << PAGE_SHIFT); 100 spte_t *page = __va(spgd.pfn << PAGE_SHIFT);
101 /* You should never call this if the PGD entry wasn't valid */
39 BUG_ON(!(spgd.flags & _PAGE_PRESENT)); 102 BUG_ON(!(spgd.flags & _PAGE_PRESENT));
40 return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE]; 103 return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE];
41} 104}
42 105
43/* These access the guest versions. */ 106/* These two functions just like the above two, except they access the Guest
107 * page tables. Hence they return a Guest address. */
44static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) 108static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
45{ 109{
46 unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); 110 unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
@@ -55,12 +119,24 @@ static unsigned long gpte_addr(struct lguest *lg,
55 return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t); 119 return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t);
56} 120}
57 121
58/* Do a virtual -> physical mapping on a user page. */ 122/*H:350 This routine takes a page number given by the Guest and converts it to
123 * an actual, physical page number. It can fail for several reasons: the
124 * virtual address might not be mapped by the Launcher, the write flag is set
125 * and the page is read-only, or the write flag was set and the page was
126 * shared so had to be copied, but we ran out of memory.
127 *
128 * This holds a reference to the page, so release_pte() is careful to
129 * put that back. */
59static unsigned long get_pfn(unsigned long virtpfn, int write) 130static unsigned long get_pfn(unsigned long virtpfn, int write)
60{ 131{
61 struct page *page; 132 struct page *page;
133 /* This value indicates failure. */
62 unsigned long ret = -1UL; 134 unsigned long ret = -1UL;
63 135
136 /* get_user_pages() is a complex interface: it gets the "struct
137 * vm_area_struct" and "struct page" assocated with a range of pages.
138 * It also needs the task's mmap_sem held, and is not very quick.
139 * It returns the number of pages it got. */
64 down_read(&current->mm->mmap_sem); 140 down_read(&current->mm->mmap_sem);
65 if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, 141 if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
66 1, write, 1, &page, NULL) == 1) 142 1, write, 1, &page, NULL) == 1)
@@ -69,28 +145,47 @@ static unsigned long get_pfn(unsigned long virtpfn, int write)
69 return ret; 145 return ret;
70} 146}
71 147
148/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
149 * entry can be a little tricky. The flags are (almost) the same, but the
150 * Guest PTE contains a virtual page number: the CPU needs the real page
151 * number. */
72static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write) 152static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write)
73{ 153{
74 spte_t spte; 154 spte_t spte;
75 unsigned long pfn; 155 unsigned long pfn;
76 156
77 /* We ignore the global flag. */ 157 /* The Guest sets the global flag, because it thinks that it is using
158 * PGE. We only told it to use PGE so it would tell us whether it was
159 * flushing a kernel mapping or a userspace mapping. We don't actually
160 * use the global bit, so throw it away. */
78 spte.flags = (gpte.flags & ~_PAGE_GLOBAL); 161 spte.flags = (gpte.flags & ~_PAGE_GLOBAL);
162
163 /* We need a temporary "unsigned long" variable to hold the answer from
164 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
165 * fit in spte.pfn. get_pfn() finds the real physical number of the
166 * page, given the virtual number. */
79 pfn = get_pfn(gpte.pfn, write); 167 pfn = get_pfn(gpte.pfn, write);
80 if (pfn == -1UL) { 168 if (pfn == -1UL) {
81 kill_guest(lg, "failed to get page %u", gpte.pfn); 169 kill_guest(lg, "failed to get page %u", gpte.pfn);
82 /* Must not put_page() bogus page on cleanup. */ 170 /* When we destroy the Guest, we'll go through the shadow page
171 * tables and release_pte() them. Make sure we don't think
172 * this one is valid! */
83 spte.flags = 0; 173 spte.flags = 0;
84 } 174 }
175 /* Now we assign the page number, and our shadow PTE is complete. */
85 spte.pfn = pfn; 176 spte.pfn = pfn;
86 return spte; 177 return spte;
87} 178}
88 179
180/*H:460 And to complete the chain, release_pte() looks like this: */
89static void release_pte(spte_t pte) 181static void release_pte(spte_t pte)
90{ 182{
183 /* Remember that get_user_pages() took a reference to the page, in
184 * get_pfn()? We have to put it back now. */
91 if (pte.flags & _PAGE_PRESENT) 185 if (pte.flags & _PAGE_PRESENT)
92 put_page(pfn_to_page(pte.pfn)); 186 put_page(pfn_to_page(pte.pfn));
93} 187}
188/*:*/
94 189
95static void check_gpte(struct lguest *lg, gpte_t gpte) 190static void check_gpte(struct lguest *lg, gpte_t gpte)
96{ 191{
@@ -104,11 +199,16 @@ static void check_gpgd(struct lguest *lg, gpgd_t gpgd)
104 kill_guest(lg, "bad page directory entry"); 199 kill_guest(lg, "bad page directory entry");
105} 200}
106 201
107/* FIXME: We hold reference to pages, which prevents them from being 202/*H:330
108 swapped. It'd be nice to have a callback when Linux wants to swap out. */ 203 * (i) Setting up a page table entry for the Guest when it faults
109 204 *
110/* We fault pages in, which allows us to update accessed/dirty bits. 205 * We saw this call in run_guest(): when we see a page fault in the Guest, we
111 * Return true if we got page. */ 206 * come here. That's because we only set up the shadow page tables lazily as
207 * they're needed, so we get page faults all the time and quietly fix them up
208 * and return to the Guest without it knowing.
209 *
210 * If we fixed up the fault (ie. we mapped the address), this routine returns
211 * true. */
112int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) 212int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
113{ 213{
114 gpgd_t gpgd; 214 gpgd_t gpgd;
@@ -117,106 +217,161 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
117 gpte_t gpte; 217 gpte_t gpte;
118 spte_t *spte; 218 spte_t *spte;
119 219
220 /* First step: get the top-level Guest page table entry. */
120 gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr))); 221 gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr)));
222 /* Toplevel not present? We can't map it in. */
121 if (!(gpgd.flags & _PAGE_PRESENT)) 223 if (!(gpgd.flags & _PAGE_PRESENT))
122 return 0; 224 return 0;
123 225
226 /* Now look at the matching shadow entry. */
124 spgd = spgd_addr(lg, lg->pgdidx, vaddr); 227 spgd = spgd_addr(lg, lg->pgdidx, vaddr);
125 if (!(spgd->flags & _PAGE_PRESENT)) { 228 if (!(spgd->flags & _PAGE_PRESENT)) {
126 /* Get a page of PTEs for them. */ 229 /* No shadow entry: allocate a new shadow PTE page. */
127 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 230 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
128 /* FIXME: Steal from self in this case? */ 231 /* This is not really the Guest's fault, but killing it is
232 * simple for this corner case. */
129 if (!ptepage) { 233 if (!ptepage) {
130 kill_guest(lg, "out of memory allocating pte page"); 234 kill_guest(lg, "out of memory allocating pte page");
131 return 0; 235 return 0;
132 } 236 }
237 /* We check that the Guest pgd is OK. */
133 check_gpgd(lg, gpgd); 238 check_gpgd(lg, gpgd);
239 /* And we copy the flags to the shadow PGD entry. The page
240 * number in the shadow PGD is the page we just allocated. */
134 spgd->raw.val = (__pa(ptepage) | gpgd.flags); 241 spgd->raw.val = (__pa(ptepage) | gpgd.flags);
135 } 242 }
136 243
244 /* OK, now we look at the lower level in the Guest page table: keep its
245 * address, because we might update it later. */
137 gpte_ptr = gpte_addr(lg, gpgd, vaddr); 246 gpte_ptr = gpte_addr(lg, gpgd, vaddr);
138 gpte = mkgpte(lgread_u32(lg, gpte_ptr)); 247 gpte = mkgpte(lgread_u32(lg, gpte_ptr));
139 248
140 /* No page? */ 249 /* If this page isn't in the Guest page tables, we can't page it in. */
141 if (!(gpte.flags & _PAGE_PRESENT)) 250 if (!(gpte.flags & _PAGE_PRESENT))
142 return 0; 251 return 0;
143 252
144 /* Write to read-only page? */ 253 /* Check they're not trying to write to a page the Guest wants
254 * read-only (bit 2 of errcode == write). */
145 if ((errcode & 2) && !(gpte.flags & _PAGE_RW)) 255 if ((errcode & 2) && !(gpte.flags & _PAGE_RW))
146 return 0; 256 return 0;
147 257
148 /* User access to a non-user page? */ 258 /* User access to a kernel page? (bit 3 == user access) */
149 if ((errcode & 4) && !(gpte.flags & _PAGE_USER)) 259 if ((errcode & 4) && !(gpte.flags & _PAGE_USER))
150 return 0; 260 return 0;
151 261
262 /* Check that the Guest PTE flags are OK, and the page number is below
263 * the pfn_limit (ie. not mapping the Launcher binary). */
152 check_gpte(lg, gpte); 264 check_gpte(lg, gpte);
265 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
153 gpte.flags |= _PAGE_ACCESSED; 266 gpte.flags |= _PAGE_ACCESSED;
154 if (errcode & 2) 267 if (errcode & 2)
155 gpte.flags |= _PAGE_DIRTY; 268 gpte.flags |= _PAGE_DIRTY;
156 269
157 /* We're done with the old pte. */ 270 /* Get the pointer to the shadow PTE entry we're going to set. */
158 spte = spte_addr(lg, *spgd, vaddr); 271 spte = spte_addr(lg, *spgd, vaddr);
272 /* If there was a valid shadow PTE entry here before, we release it.
273 * This can happen with a write to a previously read-only entry. */
159 release_pte(*spte); 274 release_pte(*spte);
160 275
161 /* We don't make it writable if this isn't a write: later 276 /* If this is a write, we insist that the Guest page is writable (the
162 * write will fault so we can set dirty bit in guest. */ 277 * final arg to gpte_to_spte()). */
163 if (gpte.flags & _PAGE_DIRTY) 278 if (gpte.flags & _PAGE_DIRTY)
164 *spte = gpte_to_spte(lg, gpte, 1); 279 *spte = gpte_to_spte(lg, gpte, 1);
165 else { 280 else {
281 /* If this is a read, don't set the "writable" bit in the page
282 * table entry, even if the Guest says it's writable. That way
283 * we come back here when a write does actually ocur, so we can
284 * update the Guest's _PAGE_DIRTY flag. */
166 gpte_t ro_gpte = gpte; 285 gpte_t ro_gpte = gpte;
167 ro_gpte.flags &= ~_PAGE_RW; 286 ro_gpte.flags &= ~_PAGE_RW;
168 *spte = gpte_to_spte(lg, ro_gpte, 0); 287 *spte = gpte_to_spte(lg, ro_gpte, 0);
169 } 288 }
170 289
171 /* Now we update dirty/accessed on guest. */ 290 /* Finally, we write the Guest PTE entry back: we've set the
291 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
172 lgwrite_u32(lg, gpte_ptr, gpte.raw.val); 292 lgwrite_u32(lg, gpte_ptr, gpte.raw.val);
293
294 /* We succeeded in mapping the page! */
173 return 1; 295 return 1;
174} 296}
175 297
176/* This is much faster than the full demand_page logic. */ 298/*H:360 (ii) Setting up the page table entry for the Guest stack.
299 *
300 * Remember pin_stack_pages() which makes sure the stack is mapped? It could
301 * simply call demand_page(), but as we've seen that logic is quite long, and
302 * usually the stack pages are already mapped anyway, so it's not required.
303 *
304 * This is a quick version which answers the question: is this virtual address
305 * mapped by the shadow page tables, and is it writable? */
177static int page_writable(struct lguest *lg, unsigned long vaddr) 306static int page_writable(struct lguest *lg, unsigned long vaddr)
178{ 307{
179 spgd_t *spgd; 308 spgd_t *spgd;
180 unsigned long flags; 309 unsigned long flags;
181 310
311 /* Look at the top level entry: is it present? */
182 spgd = spgd_addr(lg, lg->pgdidx, vaddr); 312 spgd = spgd_addr(lg, lg->pgdidx, vaddr);
183 if (!(spgd->flags & _PAGE_PRESENT)) 313 if (!(spgd->flags & _PAGE_PRESENT))
184 return 0; 314 return 0;
185 315
316 /* Check the flags on the pte entry itself: it must be present and
317 * writable. */
186 flags = spte_addr(lg, *spgd, vaddr)->flags; 318 flags = spte_addr(lg, *spgd, vaddr)->flags;
187 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 319 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
188} 320}
189 321
322/* So, when pin_stack_pages() asks us to pin a page, we check if it's already
323 * in the page tables, and if not, we call demand_page() with error code 2
324 * (meaning "write"). */
190void pin_page(struct lguest *lg, unsigned long vaddr) 325void pin_page(struct lguest *lg, unsigned long vaddr)
191{ 326{
192 if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2)) 327 if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2))
193 kill_guest(lg, "bad stack page %#lx", vaddr); 328 kill_guest(lg, "bad stack page %#lx", vaddr);
194} 329}
195 330
331/*H:450 If we chase down the release_pgd() code, it looks like this: */
196static void release_pgd(struct lguest *lg, spgd_t *spgd) 332static void release_pgd(struct lguest *lg, spgd_t *spgd)
197{ 333{
334 /* If the entry's not present, there's nothing to release. */
198 if (spgd->flags & _PAGE_PRESENT) { 335 if (spgd->flags & _PAGE_PRESENT) {
199 unsigned int i; 336 unsigned int i;
337 /* Converting the pfn to find the actual PTE page is easy: turn
338 * the page number into a physical address, then convert to a
339 * virtual address (easy for kernel pages like this one). */
200 spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT); 340 spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT);
341 /* For each entry in the page, we might need to release it. */
201 for (i = 0; i < PTES_PER_PAGE; i++) 342 for (i = 0; i < PTES_PER_PAGE; i++)
202 release_pte(ptepage[i]); 343 release_pte(ptepage[i]);
344 /* Now we can free the page of PTEs */
203 free_page((long)ptepage); 345 free_page((long)ptepage);
346 /* And zero out the PGD entry we we never release it twice. */
204 spgd->raw.val = 0; 347 spgd->raw.val = 0;
205 } 348 }
206} 349}
207 350
351/*H:440 (v) Flushing (thowing away) page tables,
352 *
353 * We saw flush_user_mappings() called when we re-used a top-level pgdir page.
354 * It simply releases every PTE page from 0 up to the kernel address. */
208static void flush_user_mappings(struct lguest *lg, int idx) 355static void flush_user_mappings(struct lguest *lg, int idx)
209{ 356{
210 unsigned int i; 357 unsigned int i;
358 /* Release every pgd entry up to the kernel's address. */
211 for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++) 359 for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++)
212 release_pgd(lg, lg->pgdirs[idx].pgdir + i); 360 release_pgd(lg, lg->pgdirs[idx].pgdir + i);
213} 361}
214 362
363/* The Guest also has a hypercall to do this manually: it's used when a large
364 * number of mappings have been changed. */
215void guest_pagetable_flush_user(struct lguest *lg) 365void guest_pagetable_flush_user(struct lguest *lg)
216{ 366{
367 /* Drop the userspace part of the current page table. */
217 flush_user_mappings(lg, lg->pgdidx); 368 flush_user_mappings(lg, lg->pgdidx);
218} 369}
370/*:*/
219 371
372/* We keep several page tables. This is a simple routine to find the page
373 * table (if any) corresponding to this top-level address the Guest has given
374 * us. */
220static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) 375static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
221{ 376{
222 unsigned int i; 377 unsigned int i;
@@ -226,21 +381,30 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
226 return i; 381 return i;
227} 382}
228 383
384/*H:435 And this is us, creating the new page directory. If we really do
385 * allocate a new one (and so the kernel parts are not there), we set
386 * blank_pgdir. */
229static unsigned int new_pgdir(struct lguest *lg, 387static unsigned int new_pgdir(struct lguest *lg,
230 unsigned long cr3, 388 unsigned long cr3,
231 int *blank_pgdir) 389 int *blank_pgdir)
232{ 390{
233 unsigned int next; 391 unsigned int next;
234 392
393 /* We pick one entry at random to throw out. Choosing the Least
394 * Recently Used might be better, but this is easy. */
235 next = random32() % ARRAY_SIZE(lg->pgdirs); 395 next = random32() % ARRAY_SIZE(lg->pgdirs);
396 /* If it's never been allocated at all before, try now. */
236 if (!lg->pgdirs[next].pgdir) { 397 if (!lg->pgdirs[next].pgdir) {
237 lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL); 398 lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL);
399 /* If the allocation fails, just keep using the one we have */
238 if (!lg->pgdirs[next].pgdir) 400 if (!lg->pgdirs[next].pgdir)
239 next = lg->pgdidx; 401 next = lg->pgdidx;
240 else 402 else
241 /* There are no mappings: you'll need to re-pin */ 403 /* This is a blank page, so there are no kernel
404 * mappings: caller must map the stack! */
242 *blank_pgdir = 1; 405 *blank_pgdir = 1;
243 } 406 }
407 /* Record which Guest toplevel this shadows. */
244 lg->pgdirs[next].cr3 = cr3; 408 lg->pgdirs[next].cr3 = cr3;
245 /* Release all the non-kernel mappings. */ 409 /* Release all the non-kernel mappings. */
246 flush_user_mappings(lg, next); 410 flush_user_mappings(lg, next);
@@ -248,82 +412,161 @@ static unsigned int new_pgdir(struct lguest *lg,
248 return next; 412 return next;
249} 413}
250 414
415/*H:430 (iv) Switching page tables
416 *
417 * This is what happens when the Guest changes page tables (ie. changes the
418 * top-level pgdir). This happens on almost every context switch. */
251void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) 419void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
252{ 420{
253 int newpgdir, repin = 0; 421 int newpgdir, repin = 0;
254 422
423 /* Look to see if we have this one already. */
255 newpgdir = find_pgdir(lg, pgtable); 424 newpgdir = find_pgdir(lg, pgtable);
425 /* If not, we allocate or mug an existing one: if it's a fresh one,
426 * repin gets set to 1. */
256 if (newpgdir == ARRAY_SIZE(lg->pgdirs)) 427 if (newpgdir == ARRAY_SIZE(lg->pgdirs))
257 newpgdir = new_pgdir(lg, pgtable, &repin); 428 newpgdir = new_pgdir(lg, pgtable, &repin);
429 /* Change the current pgd index to the new one. */
258 lg->pgdidx = newpgdir; 430 lg->pgdidx = newpgdir;
431 /* If it was completely blank, we map in the Guest kernel stack */
259 if (repin) 432 if (repin)
260 pin_stack_pages(lg); 433 pin_stack_pages(lg);
261} 434}
262 435
436/*H:470 Finally, a routine which throws away everything: all PGD entries in all
437 * the shadow page tables. This is used when we destroy the Guest. */
263static void release_all_pagetables(struct lguest *lg) 438static void release_all_pagetables(struct lguest *lg)
264{ 439{
265 unsigned int i, j; 440 unsigned int i, j;
266 441
442 /* Every shadow pagetable this Guest has */
267 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 443 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
268 if (lg->pgdirs[i].pgdir) 444 if (lg->pgdirs[i].pgdir)
445 /* Every PGD entry except the Switcher at the top */
269 for (j = 0; j < SWITCHER_PGD_INDEX; j++) 446 for (j = 0; j < SWITCHER_PGD_INDEX; j++)
270 release_pgd(lg, lg->pgdirs[i].pgdir + j); 447 release_pgd(lg, lg->pgdirs[i].pgdir + j);
271} 448}
272 449
450/* We also throw away everything when a Guest tells us it's changed a kernel
451 * mapping. Since kernel mappings are in every page table, it's easiest to
452 * throw them all away. This is amazingly slow, but thankfully rare. */
273void guest_pagetable_clear_all(struct lguest *lg) 453void guest_pagetable_clear_all(struct lguest *lg)
274{ 454{
275 release_all_pagetables(lg); 455 release_all_pagetables(lg);
456 /* We need the Guest kernel stack mapped again. */
276 pin_stack_pages(lg); 457 pin_stack_pages(lg);
277} 458}
278 459
460/*H:420 This is the routine which actually sets the page table entry for then
461 * "idx"'th shadow page table.
462 *
463 * Normally, we can just throw out the old entry and replace it with 0: if they
464 * use it demand_page() will put the new entry in. We need to do this anyway:
465 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
466 * is read from, and _PAGE_DIRTY when it's written to.
467 *
468 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
469 * these bits on PTEs immediately anyway. This is done to save the CPU from
470 * having to update them, but it helps us the same way: if they set
471 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
472 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
473 */
279static void do_set_pte(struct lguest *lg, int idx, 474static void do_set_pte(struct lguest *lg, int idx,
280 unsigned long vaddr, gpte_t gpte) 475 unsigned long vaddr, gpte_t gpte)
281{ 476{
477 /* Look up the matching shadow page directot entry. */
282 spgd_t *spgd = spgd_addr(lg, idx, vaddr); 478 spgd_t *spgd = spgd_addr(lg, idx, vaddr);
479
480 /* If the top level isn't present, there's no entry to update. */
283 if (spgd->flags & _PAGE_PRESENT) { 481 if (spgd->flags & _PAGE_PRESENT) {
482 /* Otherwise, we start by releasing the existing entry. */
284 spte_t *spte = spte_addr(lg, *spgd, vaddr); 483 spte_t *spte = spte_addr(lg, *spgd, vaddr);
285 release_pte(*spte); 484 release_pte(*spte);
485
486 /* If they're setting this entry as dirty or accessed, we might
487 * as well put that entry they've given us in now. This shaves
488 * 10% off a copy-on-write micro-benchmark. */
286 if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 489 if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
287 check_gpte(lg, gpte); 490 check_gpte(lg, gpte);
288 *spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY); 491 *spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY);
289 } else 492 } else
493 /* Otherwise we can demand_page() it in later. */
290 spte->raw.val = 0; 494 spte->raw.val = 0;
291 } 495 }
292} 496}
293 497
498/*H:410 Updating a PTE entry is a little trickier.
499 *
500 * We keep track of several different page tables (the Guest uses one for each
501 * process, so it makes sense to cache at least a few). Each of these have
502 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
503 * all processes. So when the page table above that address changes, we update
504 * all the page tables, not just the current one. This is rare.
505 *
506 * The benefit is that when we have to track a new page table, we can copy keep
507 * all the kernel mappings. This speeds up context switch immensely. */
294void guest_set_pte(struct lguest *lg, 508void guest_set_pte(struct lguest *lg,
295 unsigned long cr3, unsigned long vaddr, gpte_t gpte) 509 unsigned long cr3, unsigned long vaddr, gpte_t gpte)
296{ 510{
297 /* Kernel mappings must be changed on all top levels. */ 511 /* Kernel mappings must be changed on all top levels. Slow, but
512 * doesn't happen often. */
298 if (vaddr >= lg->page_offset) { 513 if (vaddr >= lg->page_offset) {
299 unsigned int i; 514 unsigned int i;
300 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 515 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
301 if (lg->pgdirs[i].pgdir) 516 if (lg->pgdirs[i].pgdir)
302 do_set_pte(lg, i, vaddr, gpte); 517 do_set_pte(lg, i, vaddr, gpte);
303 } else { 518 } else {
519 /* Is this page table one we have a shadow for? */
304 int pgdir = find_pgdir(lg, cr3); 520 int pgdir = find_pgdir(lg, cr3);
305 if (pgdir != ARRAY_SIZE(lg->pgdirs)) 521 if (pgdir != ARRAY_SIZE(lg->pgdirs))
522 /* If so, do the update. */
306 do_set_pte(lg, pgdir, vaddr, gpte); 523 do_set_pte(lg, pgdir, vaddr, gpte);
307 } 524 }
308} 525}
309 526
527/*H:400
528 * (iii) Setting up a page table entry when the Guest tells us it has changed.
529 *
530 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
531 * with the other side of page tables while we're here: what happens when the
532 * Guest asks for a page table to be updated?
533 *
534 * We already saw that demand_page() will fill in the shadow page tables when
535 * needed, so we can simply remove shadow page table entries whenever the Guest
536 * tells us they've changed. When the Guest tries to use the new entry it will
537 * fault and demand_page() will fix it up.
538 *
539 * So with that in mind here's our code to to update a (top-level) PGD entry:
540 */
310void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) 541void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx)
311{ 542{
312 int pgdir; 543 int pgdir;
313 544
545 /* The kernel seems to try to initialize this early on: we ignore its
546 * attempts to map over the Switcher. */
314 if (idx >= SWITCHER_PGD_INDEX) 547 if (idx >= SWITCHER_PGD_INDEX)
315 return; 548 return;
316 549
550 /* If they're talking about a page table we have a shadow for... */
317 pgdir = find_pgdir(lg, cr3); 551 pgdir = find_pgdir(lg, cr3);
318 if (pgdir < ARRAY_SIZE(lg->pgdirs)) 552 if (pgdir < ARRAY_SIZE(lg->pgdirs))
553 /* ... throw it away. */
319 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); 554 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
320} 555}
321 556
557/*H:500 (vii) Setting up the page tables initially.
558 *
559 * When a Guest is first created, the Launcher tells us where the toplevel of
560 * its first page table is. We set some things up here: */
322int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) 561int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
323{ 562{
324 /* We assume this in flush_user_mappings, so check now */ 563 /* In flush_user_mappings() we loop from 0 to
564 * "vaddr_to_pgd_index(lg->page_offset)". This assumes it won't hit
565 * the Switcher mappings, so check that now. */
325 if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX) 566 if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX)
326 return -EINVAL; 567 return -EINVAL;
568 /* We start on the first shadow page table, and give it a blank PGD
569 * page. */
327 lg->pgdidx = 0; 570 lg->pgdidx = 0;
328 lg->pgdirs[lg->pgdidx].cr3 = pgtable; 571 lg->pgdirs[lg->pgdidx].cr3 = pgtable;
329 lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL); 572 lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL);
@@ -332,33 +575,48 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
332 return 0; 575 return 0;
333} 576}
334 577
578/* When a Guest dies, our cleanup is fairly simple. */
335void free_guest_pagetable(struct lguest *lg) 579void free_guest_pagetable(struct lguest *lg)
336{ 580{
337 unsigned int i; 581 unsigned int i;
338 582
583 /* Throw away all page table pages. */
339 release_all_pagetables(lg); 584 release_all_pagetables(lg);
585 /* Now free the top levels: free_page() can handle 0 just fine. */
340 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 586 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
341 free_page((long)lg->pgdirs[i].pgdir); 587 free_page((long)lg->pgdirs[i].pgdir);
342} 588}
343 589
344/* Caller must be preempt-safe */ 590/*H:480 (vi) Mapping the Switcher when the Guest is about to run.
591 *
592 * The Switcher and the two pages for this CPU need to be available to the
593 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
594 * for each CPU already set up, we just need to hook them in. */
345void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) 595void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
346{ 596{
347 spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 597 spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
348 spgd_t switcher_pgd; 598 spgd_t switcher_pgd;
349 spte_t regs_pte; 599 spte_t regs_pte;
350 600
351 /* Since switcher less that 4MB, we simply mug top pte page. */ 601 /* Make the last PGD entry for this Guest point to the Switcher's PTE
602 * page for this CPU (with appropriate flags). */
352 switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT; 603 switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT;
353 switcher_pgd.flags = _PAGE_KERNEL; 604 switcher_pgd.flags = _PAGE_KERNEL;
354 lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 605 lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
355 606
356 /* Map our regs page over stack page. */ 607 /* We also change the Switcher PTE page. When we're running the Guest,
608 * we want the Guest's "regs" page to appear where the first Switcher
609 * page for this CPU is. This is an optimization: when the Switcher
610 * saves the Guest registers, it saves them into the first page of this
611 * CPU's "struct lguest_pages": if we make sure the Guest's register
612 * page is already mapped there, we don't have to copy them out
613 * again. */
357 regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT; 614 regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT;
358 regs_pte.flags = _PAGE_KERNEL; 615 regs_pte.flags = _PAGE_KERNEL;
359 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE] 616 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE]
360 = regs_pte; 617 = regs_pte;
361} 618}
619/*:*/
362 620
363static void free_switcher_pte_pages(void) 621static void free_switcher_pte_pages(void)
364{ 622{
@@ -368,6 +626,10 @@ static void free_switcher_pte_pages(void)
368 free_page((long)switcher_pte_page(i)); 626 free_page((long)switcher_pte_page(i));
369} 627}
370 628
629/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
630 * the CPU number and the "struct page"s for the Switcher code itself.
631 *
632 * Currently the Switcher is less than a page long, so "pages" is always 1. */
371static __init void populate_switcher_pte_page(unsigned int cpu, 633static __init void populate_switcher_pte_page(unsigned int cpu,
372 struct page *switcher_page[], 634 struct page *switcher_page[],
373 unsigned int pages) 635 unsigned int pages)
@@ -375,21 +637,26 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
375 unsigned int i; 637 unsigned int i;
376 spte_t *pte = switcher_pte_page(cpu); 638 spte_t *pte = switcher_pte_page(cpu);
377 639
640 /* The first entries are easy: they map the Switcher code. */
378 for (i = 0; i < pages; i++) { 641 for (i = 0; i < pages; i++) {
379 pte[i].pfn = page_to_pfn(switcher_page[i]); 642 pte[i].pfn = page_to_pfn(switcher_page[i]);
380 pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED; 643 pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
381 } 644 }
382 645
383 /* We only map this CPU's pages, so guest can't see others. */ 646 /* The only other thing we map is this CPU's pair of pages. */
384 i = pages + cpu*2; 647 i = pages + cpu*2;
385 648
386 /* First page (regs) is rw, second (state) is ro. */ 649 /* First page (Guest registers) is writable from the Guest */
387 pte[i].pfn = page_to_pfn(switcher_page[i]); 650 pte[i].pfn = page_to_pfn(switcher_page[i]);
388 pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW; 651 pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW;
652 /* The second page contains the "struct lguest_ro_state", and is
653 * read-only. */
389 pte[i+1].pfn = page_to_pfn(switcher_page[i+1]); 654 pte[i+1].pfn = page_to_pfn(switcher_page[i+1]);
390 pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED; 655 pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
391} 656}
392 657
658/*H:510 At boot or module load time, init_pagetables() allocates and populates
659 * the Switcher PTE page for each CPU. */
393__init int init_pagetables(struct page **switcher_page, unsigned int pages) 660__init int init_pagetables(struct page **switcher_page, unsigned int pages)
394{ 661{
395 unsigned int i; 662 unsigned int i;
@@ -404,7 +671,9 @@ __init int init_pagetables(struct page **switcher_page, unsigned int pages)
404 } 671 }
405 return 0; 672 return 0;
406} 673}
674/*:*/
407 675
676/* Cleaning up simply involves freeing the PTE page for each CPU. */
408void free_pagetables(void) 677void free_pagetables(void)
409{ 678{
410 free_switcher_pte_pages(); 679 free_switcher_pte_pages();