diff options
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r-- | drivers/lguest/page_tables.c | 47 |
1 files changed, 40 insertions, 7 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index bfe3650b28d6..fe3c7575647b 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/random.h> | 13 | #include <linux/random.h> |
14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/uaccess.h> | ||
16 | #include "lg.h" | 17 | #include "lg.h" |
17 | 18 | ||
18 | /*M:008 We hold reference to pages, which prevents them from being swapped. | 19 | /*M:008 We hold reference to pages, which prevents them from being swapped. |
@@ -345,7 +346,7 @@ static void flush_user_mappings(struct lguest *lg, int idx) | |||
345 | { | 346 | { |
346 | unsigned int i; | 347 | unsigned int i; |
347 | /* Release every pgd entry up to the kernel's address. */ | 348 | /* Release every pgd entry up to the kernel's address. */ |
348 | for (i = 0; i < pgd_index(lg->page_offset); i++) | 349 | for (i = 0; i < pgd_index(lg->kernel_address); i++) |
349 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); | 350 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); |
350 | } | 351 | } |
351 | 352 | ||
@@ -358,6 +359,25 @@ void guest_pagetable_flush_user(struct lguest *lg) | |||
358 | } | 359 | } |
359 | /*:*/ | 360 | /*:*/ |
360 | 361 | ||
362 | /* We walk down the guest page tables to get a guest-physical address */ | ||
363 | unsigned long guest_pa(struct lguest *lg, unsigned long vaddr) | ||
364 | { | ||
365 | pgd_t gpgd; | ||
366 | pte_t gpte; | ||
367 | |||
368 | /* First step: get the top-level Guest page table entry. */ | ||
369 | gpgd = __pgd(lgread_u32(lg, gpgd_addr(lg, vaddr))); | ||
370 | /* Toplevel not present? We can't map it in. */ | ||
371 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) | ||
372 | kill_guest(lg, "Bad address %#lx", vaddr); | ||
373 | |||
374 | gpte = __pte(lgread_u32(lg, gpte_addr(lg, gpgd, vaddr))); | ||
375 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) | ||
376 | kill_guest(lg, "Bad address %#lx", vaddr); | ||
377 | |||
378 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); | ||
379 | } | ||
380 | |||
361 | /* We keep several page tables. This is a simple routine to find the page | 381 | /* We keep several page tables. This is a simple routine to find the page |
362 | * table (if any) corresponding to this top-level address the Guest has given | 382 | * table (if any) corresponding to this top-level address the Guest has given |
363 | * us. */ | 383 | * us. */ |
@@ -500,7 +520,7 @@ void guest_set_pte(struct lguest *lg, | |||
500 | { | 520 | { |
501 | /* Kernel mappings must be changed on all top levels. Slow, but | 521 | /* Kernel mappings must be changed on all top levels. Slow, but |
502 | * doesn't happen often. */ | 522 | * doesn't happen often. */ |
503 | if (vaddr >= lg->page_offset) { | 523 | if (vaddr >= lg->kernel_address) { |
504 | unsigned int i; | 524 | unsigned int i; |
505 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | 525 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
506 | if (lg->pgdirs[i].pgdir) | 526 | if (lg->pgdirs[i].pgdir) |
@@ -550,11 +570,6 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) | |||
550 | * its first page table is. We set some things up here: */ | 570 | * its first page table is. We set some things up here: */ |
551 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) | 571 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) |
552 | { | 572 | { |
553 | /* In flush_user_mappings() we loop from 0 to | ||
554 | * "pgd_index(lg->page_offset)". This assumes it won't hit | ||
555 | * the Switcher mappings, so check that now. */ | ||
556 | if (pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX) | ||
557 | return -EINVAL; | ||
558 | /* We start on the first shadow page table, and give it a blank PGD | 573 | /* We start on the first shadow page table, and give it a blank PGD |
559 | * page. */ | 574 | * page. */ |
560 | lg->pgdidx = 0; | 575 | lg->pgdidx = 0; |
@@ -565,6 +580,24 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) | |||
565 | return 0; | 580 | return 0; |
566 | } | 581 | } |
567 | 582 | ||
583 | /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ | ||
584 | void page_table_guest_data_init(struct lguest *lg) | ||
585 | { | ||
586 | /* We get the kernel address: above this is all kernel memory. */ | ||
587 | if (get_user(lg->kernel_address, &lg->lguest_data->kernel_address) | ||
588 | /* We tell the Guest that it can't use the top 4MB of virtual | ||
589 | * addresses used by the Switcher. */ | ||
590 | || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem) | ||
591 | || put_user(lg->pgdirs[lg->pgdidx].gpgdir,&lg->lguest_data->pgdir)) | ||
592 | kill_guest(lg, "bad guest page %p", lg->lguest_data); | ||
593 | |||
594 | /* In flush_user_mappings() we loop from 0 to | ||
595 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the | ||
596 | * Switcher mappings, so check that now. */ | ||
597 | if (pgd_index(lg->kernel_address) >= SWITCHER_PGD_INDEX) | ||
598 | kill_guest(lg, "bad kernel address %#lx", lg->kernel_address); | ||
599 | } | ||
600 | |||
568 | /* When a Guest dies, our cleanup is fairly simple. */ | 601 | /* When a Guest dies, our cleanup is fairly simple. */ |
569 | void free_guest_pagetable(struct lguest *lg) | 602 | void free_guest_pagetable(struct lguest *lg) |
570 | { | 603 | { |