diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-07-19 04:49:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:52 -0400 |
commit | d7e28ffe6c74416b54345d6004fd0964c115b12c (patch) | |
tree | 844beb4f400d5400098538e0c1e5f12d20a9504a /drivers/lguest/page_tables.c | |
parent | 07ad157f6e5d228be78acd5cea0291e5d0360398 (diff) |
lguest: the host code
This is the code for the "lg.ko" module, which allows lguest guests to
be launched.
[akpm@linux-foundation.org: update for futex-new-private-futexes]
[akpm@linux-foundation.org: build fix]
[jmorris@namei.org: lguest: use hrtimers]
[akpm@linux-foundation.org: x86_64 build fix]
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Andi Kleen <ak@suse.de>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r-- | drivers/lguest/page_tables.c | 411 |
1 files changed, 411 insertions, 0 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c new file mode 100644 index 000000000000..1b0ba09b1269 --- /dev/null +++ b/drivers/lguest/page_tables.c | |||
@@ -0,0 +1,411 @@ | |||
1 | /* Shadow page table operations. | ||
2 | * Copyright (C) Rusty Russell IBM Corporation 2006. | ||
3 | * GPL v2 and any later version */ | ||
4 | #include <linux/mm.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/random.h> | ||
8 | #include <linux/percpu.h> | ||
9 | #include <asm/tlbflush.h> | ||
10 | #include "lg.h" | ||
11 | |||
12 | #define PTES_PER_PAGE_SHIFT 10 | ||
13 | #define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT) | ||
14 | #define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1) | ||
15 | |||
16 | static DEFINE_PER_CPU(spte_t *, switcher_pte_pages); | ||
17 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) | ||
18 | |||
19 | static unsigned vaddr_to_pgd_index(unsigned long vaddr) | ||
20 | { | ||
21 | return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); | ||
22 | } | ||
23 | |||
24 | /* These access the shadow versions (ie. the ones used by the CPU). */ | ||
25 | static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) | ||
26 | { | ||
27 | unsigned int index = vaddr_to_pgd_index(vaddr); | ||
28 | |||
29 | if (index >= SWITCHER_PGD_INDEX) { | ||
30 | kill_guest(lg, "attempt to access switcher pages"); | ||
31 | index = 0; | ||
32 | } | ||
33 | return &lg->pgdirs[i].pgdir[index]; | ||
34 | } | ||
35 | |||
36 | static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr) | ||
37 | { | ||
38 | spte_t *page = __va(spgd.pfn << PAGE_SHIFT); | ||
39 | BUG_ON(!(spgd.flags & _PAGE_PRESENT)); | ||
40 | return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE]; | ||
41 | } | ||
42 | |||
43 | /* These access the guest versions. */ | ||
44 | static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) | ||
45 | { | ||
46 | unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); | ||
47 | return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(gpgd_t); | ||
48 | } | ||
49 | |||
50 | static unsigned long gpte_addr(struct lguest *lg, | ||
51 | gpgd_t gpgd, unsigned long vaddr) | ||
52 | { | ||
53 | unsigned long gpage = gpgd.pfn << PAGE_SHIFT; | ||
54 | BUG_ON(!(gpgd.flags & _PAGE_PRESENT)); | ||
55 | return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t); | ||
56 | } | ||
57 | |||
58 | /* Do a virtual -> physical mapping on a user page. */ | ||
59 | static unsigned long get_pfn(unsigned long virtpfn, int write) | ||
60 | { | ||
61 | struct page *page; | ||
62 | unsigned long ret = -1UL; | ||
63 | |||
64 | down_read(¤t->mm->mmap_sem); | ||
65 | if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, | ||
66 | 1, write, 1, &page, NULL) == 1) | ||
67 | ret = page_to_pfn(page); | ||
68 | up_read(¤t->mm->mmap_sem); | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write) | ||
73 | { | ||
74 | spte_t spte; | ||
75 | unsigned long pfn; | ||
76 | |||
77 | /* We ignore the global flag. */ | ||
78 | spte.flags = (gpte.flags & ~_PAGE_GLOBAL); | ||
79 | pfn = get_pfn(gpte.pfn, write); | ||
80 | if (pfn == -1UL) { | ||
81 | kill_guest(lg, "failed to get page %u", gpte.pfn); | ||
82 | /* Must not put_page() bogus page on cleanup. */ | ||
83 | spte.flags = 0; | ||
84 | } | ||
85 | spte.pfn = pfn; | ||
86 | return spte; | ||
87 | } | ||
88 | |||
89 | static void release_pte(spte_t pte) | ||
90 | { | ||
91 | if (pte.flags & _PAGE_PRESENT) | ||
92 | put_page(pfn_to_page(pte.pfn)); | ||
93 | } | ||
94 | |||
95 | static void check_gpte(struct lguest *lg, gpte_t gpte) | ||
96 | { | ||
97 | if ((gpte.flags & (_PAGE_PWT|_PAGE_PSE)) || gpte.pfn >= lg->pfn_limit) | ||
98 | kill_guest(lg, "bad page table entry"); | ||
99 | } | ||
100 | |||
101 | static void check_gpgd(struct lguest *lg, gpgd_t gpgd) | ||
102 | { | ||
103 | if ((gpgd.flags & ~_PAGE_TABLE) || gpgd.pfn >= lg->pfn_limit) | ||
104 | kill_guest(lg, "bad page directory entry"); | ||
105 | } | ||
106 | |||
107 | /* FIXME: We hold reference to pages, which prevents them from being | ||
108 | swapped. It'd be nice to have a callback when Linux wants to swap out. */ | ||
109 | |||
110 | /* We fault pages in, which allows us to update accessed/dirty bits. | ||
111 | * Return true if we got page. */ | ||
112 | int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) | ||
113 | { | ||
114 | gpgd_t gpgd; | ||
115 | spgd_t *spgd; | ||
116 | unsigned long gpte_ptr; | ||
117 | gpte_t gpte; | ||
118 | spte_t *spte; | ||
119 | |||
120 | gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr))); | ||
121 | if (!(gpgd.flags & _PAGE_PRESENT)) | ||
122 | return 0; | ||
123 | |||
124 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); | ||
125 | if (!(spgd->flags & _PAGE_PRESENT)) { | ||
126 | /* Get a page of PTEs for them. */ | ||
127 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); | ||
128 | /* FIXME: Steal from self in this case? */ | ||
129 | if (!ptepage) { | ||
130 | kill_guest(lg, "out of memory allocating pte page"); | ||
131 | return 0; | ||
132 | } | ||
133 | check_gpgd(lg, gpgd); | ||
134 | spgd->raw.val = (__pa(ptepage) | gpgd.flags); | ||
135 | } | ||
136 | |||
137 | gpte_ptr = gpte_addr(lg, gpgd, vaddr); | ||
138 | gpte = mkgpte(lgread_u32(lg, gpte_ptr)); | ||
139 | |||
140 | /* No page? */ | ||
141 | if (!(gpte.flags & _PAGE_PRESENT)) | ||
142 | return 0; | ||
143 | |||
144 | /* Write to read-only page? */ | ||
145 | if ((errcode & 2) && !(gpte.flags & _PAGE_RW)) | ||
146 | return 0; | ||
147 | |||
148 | /* User access to a non-user page? */ | ||
149 | if ((errcode & 4) && !(gpte.flags & _PAGE_USER)) | ||
150 | return 0; | ||
151 | |||
152 | check_gpte(lg, gpte); | ||
153 | gpte.flags |= _PAGE_ACCESSED; | ||
154 | if (errcode & 2) | ||
155 | gpte.flags |= _PAGE_DIRTY; | ||
156 | |||
157 | /* We're done with the old pte. */ | ||
158 | spte = spte_addr(lg, *spgd, vaddr); | ||
159 | release_pte(*spte); | ||
160 | |||
161 | /* We don't make it writable if this isn't a write: later | ||
162 | * write will fault so we can set dirty bit in guest. */ | ||
163 | if (gpte.flags & _PAGE_DIRTY) | ||
164 | *spte = gpte_to_spte(lg, gpte, 1); | ||
165 | else { | ||
166 | gpte_t ro_gpte = gpte; | ||
167 | ro_gpte.flags &= ~_PAGE_RW; | ||
168 | *spte = gpte_to_spte(lg, ro_gpte, 0); | ||
169 | } | ||
170 | |||
171 | /* Now we update dirty/accessed on guest. */ | ||
172 | lgwrite_u32(lg, gpte_ptr, gpte.raw.val); | ||
173 | return 1; | ||
174 | } | ||
175 | |||
176 | /* This is much faster than the full demand_page logic. */ | ||
177 | static int page_writable(struct lguest *lg, unsigned long vaddr) | ||
178 | { | ||
179 | spgd_t *spgd; | ||
180 | unsigned long flags; | ||
181 | |||
182 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); | ||
183 | if (!(spgd->flags & _PAGE_PRESENT)) | ||
184 | return 0; | ||
185 | |||
186 | flags = spte_addr(lg, *spgd, vaddr)->flags; | ||
187 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); | ||
188 | } | ||
189 | |||
190 | void pin_page(struct lguest *lg, unsigned long vaddr) | ||
191 | { | ||
192 | if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2)) | ||
193 | kill_guest(lg, "bad stack page %#lx", vaddr); | ||
194 | } | ||
195 | |||
196 | static void release_pgd(struct lguest *lg, spgd_t *spgd) | ||
197 | { | ||
198 | if (spgd->flags & _PAGE_PRESENT) { | ||
199 | unsigned int i; | ||
200 | spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT); | ||
201 | for (i = 0; i < PTES_PER_PAGE; i++) | ||
202 | release_pte(ptepage[i]); | ||
203 | free_page((long)ptepage); | ||
204 | spgd->raw.val = 0; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static void flush_user_mappings(struct lguest *lg, int idx) | ||
209 | { | ||
210 | unsigned int i; | ||
211 | for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++) | ||
212 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); | ||
213 | } | ||
214 | |||
215 | void guest_pagetable_flush_user(struct lguest *lg) | ||
216 | { | ||
217 | flush_user_mappings(lg, lg->pgdidx); | ||
218 | } | ||
219 | |||
220 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) | ||
221 | { | ||
222 | unsigned int i; | ||
223 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | ||
224 | if (lg->pgdirs[i].cr3 == pgtable) | ||
225 | break; | ||
226 | return i; | ||
227 | } | ||
228 | |||
229 | static unsigned int new_pgdir(struct lguest *lg, | ||
230 | unsigned long cr3, | ||
231 | int *blank_pgdir) | ||
232 | { | ||
233 | unsigned int next; | ||
234 | |||
235 | next = random32() % ARRAY_SIZE(lg->pgdirs); | ||
236 | if (!lg->pgdirs[next].pgdir) { | ||
237 | lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL); | ||
238 | if (!lg->pgdirs[next].pgdir) | ||
239 | next = lg->pgdidx; | ||
240 | else | ||
241 | /* There are no mappings: you'll need to re-pin */ | ||
242 | *blank_pgdir = 1; | ||
243 | } | ||
244 | lg->pgdirs[next].cr3 = cr3; | ||
245 | /* Release all the non-kernel mappings. */ | ||
246 | flush_user_mappings(lg, next); | ||
247 | |||
248 | return next; | ||
249 | } | ||
250 | |||
251 | void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) | ||
252 | { | ||
253 | int newpgdir, repin = 0; | ||
254 | |||
255 | newpgdir = find_pgdir(lg, pgtable); | ||
256 | if (newpgdir == ARRAY_SIZE(lg->pgdirs)) | ||
257 | newpgdir = new_pgdir(lg, pgtable, &repin); | ||
258 | lg->pgdidx = newpgdir; | ||
259 | if (repin) | ||
260 | pin_stack_pages(lg); | ||
261 | } | ||
262 | |||
263 | static void release_all_pagetables(struct lguest *lg) | ||
264 | { | ||
265 | unsigned int i, j; | ||
266 | |||
267 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | ||
268 | if (lg->pgdirs[i].pgdir) | ||
269 | for (j = 0; j < SWITCHER_PGD_INDEX; j++) | ||
270 | release_pgd(lg, lg->pgdirs[i].pgdir + j); | ||
271 | } | ||
272 | |||
273 | void guest_pagetable_clear_all(struct lguest *lg) | ||
274 | { | ||
275 | release_all_pagetables(lg); | ||
276 | pin_stack_pages(lg); | ||
277 | } | ||
278 | |||
279 | static void do_set_pte(struct lguest *lg, int idx, | ||
280 | unsigned long vaddr, gpte_t gpte) | ||
281 | { | ||
282 | spgd_t *spgd = spgd_addr(lg, idx, vaddr); | ||
283 | if (spgd->flags & _PAGE_PRESENT) { | ||
284 | spte_t *spte = spte_addr(lg, *spgd, vaddr); | ||
285 | release_pte(*spte); | ||
286 | if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) { | ||
287 | check_gpte(lg, gpte); | ||
288 | *spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY); | ||
289 | } else | ||
290 | spte->raw.val = 0; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | void guest_set_pte(struct lguest *lg, | ||
295 | unsigned long cr3, unsigned long vaddr, gpte_t gpte) | ||
296 | { | ||
297 | /* Kernel mappings must be changed on all top levels. */ | ||
298 | if (vaddr >= lg->page_offset) { | ||
299 | unsigned int i; | ||
300 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | ||
301 | if (lg->pgdirs[i].pgdir) | ||
302 | do_set_pte(lg, i, vaddr, gpte); | ||
303 | } else { | ||
304 | int pgdir = find_pgdir(lg, cr3); | ||
305 | if (pgdir != ARRAY_SIZE(lg->pgdirs)) | ||
306 | do_set_pte(lg, pgdir, vaddr, gpte); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) | ||
311 | { | ||
312 | int pgdir; | ||
313 | |||
314 | if (idx >= SWITCHER_PGD_INDEX) | ||
315 | return; | ||
316 | |||
317 | pgdir = find_pgdir(lg, cr3); | ||
318 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) | ||
319 | release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); | ||
320 | } | ||
321 | |||
322 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) | ||
323 | { | ||
324 | /* We assume this in flush_user_mappings, so check now */ | ||
325 | if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX) | ||
326 | return -EINVAL; | ||
327 | lg->pgdidx = 0; | ||
328 | lg->pgdirs[lg->pgdidx].cr3 = pgtable; | ||
329 | lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL); | ||
330 | if (!lg->pgdirs[lg->pgdidx].pgdir) | ||
331 | return -ENOMEM; | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | void free_guest_pagetable(struct lguest *lg) | ||
336 | { | ||
337 | unsigned int i; | ||
338 | |||
339 | release_all_pagetables(lg); | ||
340 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | ||
341 | free_page((long)lg->pgdirs[i].pgdir); | ||
342 | } | ||
343 | |||
344 | /* Caller must be preempt-safe */ | ||
345 | void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) | ||
346 | { | ||
347 | spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); | ||
348 | spgd_t switcher_pgd; | ||
349 | spte_t regs_pte; | ||
350 | |||
351 | /* Since switcher less that 4MB, we simply mug top pte page. */ | ||
352 | switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT; | ||
353 | switcher_pgd.flags = _PAGE_KERNEL; | ||
354 | lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; | ||
355 | |||
356 | /* Map our regs page over stack page. */ | ||
357 | regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT; | ||
358 | regs_pte.flags = _PAGE_KERNEL; | ||
359 | switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE] | ||
360 | = regs_pte; | ||
361 | } | ||
362 | |||
363 | static void free_switcher_pte_pages(void) | ||
364 | { | ||
365 | unsigned int i; | ||
366 | |||
367 | for_each_possible_cpu(i) | ||
368 | free_page((long)switcher_pte_page(i)); | ||
369 | } | ||
370 | |||
371 | static __init void populate_switcher_pte_page(unsigned int cpu, | ||
372 | struct page *switcher_page[], | ||
373 | unsigned int pages) | ||
374 | { | ||
375 | unsigned int i; | ||
376 | spte_t *pte = switcher_pte_page(cpu); | ||
377 | |||
378 | for (i = 0; i < pages; i++) { | ||
379 | pte[i].pfn = page_to_pfn(switcher_page[i]); | ||
380 | pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED; | ||
381 | } | ||
382 | |||
383 | /* We only map this CPU's pages, so guest can't see others. */ | ||
384 | i = pages + cpu*2; | ||
385 | |||
386 | /* First page (regs) is rw, second (state) is ro. */ | ||
387 | pte[i].pfn = page_to_pfn(switcher_page[i]); | ||
388 | pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW; | ||
389 | pte[i+1].pfn = page_to_pfn(switcher_page[i+1]); | ||
390 | pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED; | ||
391 | } | ||
392 | |||
393 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) | ||
394 | { | ||
395 | unsigned int i; | ||
396 | |||
397 | for_each_possible_cpu(i) { | ||
398 | switcher_pte_page(i) = (spte_t *)get_zeroed_page(GFP_KERNEL); | ||
399 | if (!switcher_pte_page(i)) { | ||
400 | free_switcher_pte_pages(); | ||
401 | return -ENOMEM; | ||
402 | } | ||
403 | populate_switcher_pte_page(i, switcher_page, pages); | ||
404 | } | ||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | void free_pagetables(void) | ||
409 | { | ||
410 | free_switcher_pte_pages(); | ||
411 | } | ||