diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-07-24 04:48:20 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-07-24 04:48:21 -0400 |
commit | e5992f2e6c3829cd43dbc4438ee13dcd6506f7f3 (patch) | |
tree | b2d5d9fbfc610bd788532eafcd4b56e9ef7dbdd3 /arch/s390/include/asm/pgalloc.h | |
parent | 144d634a21caff1d54cb4bb0d073774e88130045 (diff) |
[S390] kvm guest address space mapping
Add code that allows KVM to control the virtual memory layout that
is seen by a guest. The guest address space uses a second page table
that shares the last level pte-tables with the process page table.
If a page is unmapped from the process page table it is automatically
unmapped from the guest page table as well.
The guest address space mapping starts out empty, KVM can map any
individual 1MB segments from the process virtual memory to any 1MB
aligned location in the guest virtual memory. If a target segment in
the process virtual memory does not exist or is unmapped while a
guest mapping exists the desired target address is stored as an
invalid segment table entry in the guest page table.
The population of the guest page table is fault driven.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/pgalloc.h')
-rw-r--r-- | arch/s390/include/asm/pgalloc.h | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 38e71ebcd3c2..8eef9b5b3cf4 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -20,7 +20,7 @@ | |||
20 | unsigned long *crst_table_alloc(struct mm_struct *); | 20 | unsigned long *crst_table_alloc(struct mm_struct *); |
21 | void crst_table_free(struct mm_struct *, unsigned long *); | 21 | void crst_table_free(struct mm_struct *, unsigned long *); |
22 | 22 | ||
23 | unsigned long *page_table_alloc(struct mm_struct *); | 23 | unsigned long *page_table_alloc(struct mm_struct *, unsigned long); |
24 | void page_table_free(struct mm_struct *, unsigned long *); | 24 | void page_table_free(struct mm_struct *, unsigned long *); |
25 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 25 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
26 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); | 26 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); |
@@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
115 | { | 115 | { |
116 | spin_lock_init(&mm->context.list_lock); | 116 | spin_lock_init(&mm->context.list_lock); |
117 | INIT_LIST_HEAD(&mm->context.pgtable_list); | 117 | INIT_LIST_HEAD(&mm->context.pgtable_list); |
118 | INIT_LIST_HEAD(&mm->context.gmap_list); | ||
118 | return (pgd_t *) crst_table_alloc(mm); | 119 | return (pgd_t *) crst_table_alloc(mm); |
119 | } | 120 | } |
120 | #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) | 121 | #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) |
@@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm, | |||
133 | /* | 134 | /* |
134 | * page table entry allocation/free routines. | 135 | * page table entry allocation/free routines. |
135 | */ | 136 | */ |
136 | #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) | 137 | #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) |
137 | #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) | 138 | #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) |
138 | 139 | ||
139 | #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) | 140 | #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) |
140 | #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) | 141 | #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) |