aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2011-07-24 04:48:20 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-07-24 04:48:21 -0400
commite5992f2e6c3829cd43dbc4438ee13dcd6506f7f3 (patch)
treeb2d5d9fbfc610bd788532eafcd4b56e9ef7dbdd3 /arch/s390/include
parent144d634a21caff1d54cb4bb0d073774e88130045 (diff)
[S390] kvm guest address space mapping
Add code that allows KVM to control the virtual memory layout that is seen by a guest. The guest address space uses a second page table that shares the last level pte-tables with the process page table. If a page is unmapped from the process page table it is automatically unmapped from the guest page table as well. The guest address space mapping starts out empty, KVM can map any individual 1MB segments from the process virtual memory to any 1MB aligned location in the guest virtual memory. If a target segment in the process virtual memory does not exist or is unmapped while a guest mapping exists the desired target address is stored as an invalid segment table entry in the guest page table. The population of the guest page table is fault driven. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h7
-rw-r--r--arch/s390/include/asm/pgtable.h42
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/tlbflush.h2
6 files changed, 52 insertions, 6 deletions
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 228cf0b295db..f26280d9e88d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -268,7 +268,7 @@ struct _lowcore {
268 __u64 vdso_per_cpu_data; /* 0x0358 */ 268 __u64 vdso_per_cpu_data; /* 0x0358 */
269 __u64 machine_flags; /* 0x0360 */ 269 __u64 machine_flags; /* 0x0360 */
270 __u64 ftrace_func; /* 0x0368 */ 270 __u64 ftrace_func; /* 0x0368 */
271 __u64 sie_hook; /* 0x0370 */ 271 __u64 gmap; /* 0x0370 */
272 __u64 cmf_hpp; /* 0x0378 */ 272 __u64 cmf_hpp; /* 0x0378 */
273 273
274 /* Interrupt response block. */ 274 /* Interrupt response block. */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 82d0847896a0..4506791adcd5 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -6,6 +6,7 @@ typedef struct {
6 unsigned int flush_mm; 6 unsigned int flush_mm;
7 spinlock_t list_lock; 7 spinlock_t list_lock;
8 struct list_head pgtable_list; 8 struct list_head pgtable_list;
9 struct list_head gmap_list;
9 unsigned long asce_bits; 10 unsigned long asce_bits;
10 unsigned long asce_limit; 11 unsigned long asce_limit;
11 unsigned long vdso_base; 12 unsigned long vdso_base;
@@ -17,6 +18,7 @@ typedef struct {
17 18
18#define INIT_MM_CONTEXT(name) \ 19#define INIT_MM_CONTEXT(name) \
19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 20 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
20 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), 21 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
22 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
21 23
22#endif 24#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 38e71ebcd3c2..8eef9b5b3cf4 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -20,7 +20,7 @@
20unsigned long *crst_table_alloc(struct mm_struct *); 20unsigned long *crst_table_alloc(struct mm_struct *);
21void crst_table_free(struct mm_struct *, unsigned long *); 21void crst_table_free(struct mm_struct *, unsigned long *);
22 22
23unsigned long *page_table_alloc(struct mm_struct *); 23unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
24void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
25#ifdef CONFIG_HAVE_RCU_TABLE_FREE 25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *); 26void page_table_free_rcu(struct mmu_gather *, unsigned long *);
@@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
115{ 115{
116 spin_lock_init(&mm->context.list_lock); 116 spin_lock_init(&mm->context.list_lock);
117 INIT_LIST_HEAD(&mm->context.pgtable_list); 117 INIT_LIST_HEAD(&mm->context.pgtable_list);
118 INIT_LIST_HEAD(&mm->context.gmap_list);
118 return (pgd_t *) crst_table_alloc(mm); 119 return (pgd_t *) crst_table_alloc(mm);
119} 120}
120#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 121#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
@@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm,
133/* 134/*
134 * page table entry allocation/free routines. 135 * page table entry allocation/free routines.
135 */ 136 */
136#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 137#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
137#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 138#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
138 139
139#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 140#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
140#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) 141#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 801fbe1d837d..519eb5f187ef 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -654,6 +654,48 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
654#endif 654#endif
655} 655}
656 656
657/**
658 * struct gmap_struct - guest address space
659 * @mm: pointer to the parent mm_struct
660 * @table: pointer to the page directory
661 * @crst_list: list of all crst tables used in the guest address space
662 */
663struct gmap {
664 struct list_head list;
665 struct mm_struct *mm;
666 unsigned long *table;
667 struct list_head crst_list;
668};
669
670/**
671 * struct gmap_rmap - reverse mapping for segment table entries
672 * @next: pointer to the next gmap_rmap structure in the list
673 * @entry: pointer to a segment table entry
674 */
675struct gmap_rmap {
676 struct list_head list;
677 unsigned long *entry;
678};
679
680/**
681 * struct gmap_pgtable - gmap information attached to a page table
682 * @vmaddr: address of the 1MB segment in the process virtual memory
683 * @mapper: list of segment table entries maping a page table
684 */
685struct gmap_pgtable {
686 unsigned long vmaddr;
687 struct list_head mapper;
688};
689
690struct gmap *gmap_alloc(struct mm_struct *mm);
691void gmap_free(struct gmap *gmap);
692void gmap_enable(struct gmap *gmap);
693void gmap_disable(struct gmap *gmap);
694int gmap_map_segment(struct gmap *gmap, unsigned long from,
695 unsigned long to, unsigned long length);
696int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
697unsigned long gmap_fault(unsigned long address, struct gmap *);
698
657/* 699/*
658 * Certain architectures need to do special things when PTEs 700 * Certain architectures need to do special things when PTEs
659 * within a page table are directly modified. Thus, the following 701 * within a page table are directly modified. Thus, the following
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 1300c3025334..55dfcc8bdc0d 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -80,6 +80,7 @@ struct thread_struct {
80 mm_segment_t mm_segment; 80 mm_segment_t mm_segment;
81 unsigned long prot_addr; /* address of protection-excep. */ 81 unsigned long prot_addr; /* address of protection-excep. */
82 unsigned int trap_no; 82 unsigned int trap_no;
83 unsigned long gmap_addr; /* address of last gmap fault. */
83 struct per_regs per_user; /* User specified PER registers */ 84 struct per_regs per_user; /* User specified PER registers */
84 struct per_event per_event; /* Cause of the last PER trap */ 85 struct per_event per_event; /* Cause of the last PER trap */
85 /* pfault_wait is used to block the process on a pfault event */ 86 /* pfault_wait is used to block the process on a pfault event */
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index b7a4f2eb0057..304445382382 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -80,7 +80,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
80 * on all cpus instead of doing a local flush if the mm 80 * on all cpus instead of doing a local flush if the mm
81 * only ran on the local cpu. 81 * only ran on the local cpu.
82 */ 82 */
83 if (MACHINE_HAS_IDTE) 83 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
84 __tlb_flush_idte((unsigned long) mm->pgd | 84 __tlb_flush_idte((unsigned long) mm->pgd |
85 mm->context.asce_bits); 85 mm->context.asce_bits);
86 else 86 else