diff options
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r-- | arch/x86/kernel/kvm.c | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a8e36da60120..cbadc730496a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/kvm_para.h> | 25 | #include <linux/kvm_para.h> |
26 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/highmem.h> | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * No need for any "IO delay" on KVM | 31 | * No need for any "IO delay" on KVM |
@@ -33,6 +34,122 @@ static void kvm_io_delay(void) | |||
33 | { | 34 | { |
34 | } | 35 | } |
35 | 36 | ||
37 | static void kvm_mmu_op(void *buffer, unsigned len) | ||
38 | { | ||
39 | int r; | ||
40 | unsigned long a1, a2; | ||
41 | |||
42 | do { | ||
43 | a1 = __pa(buffer); | ||
44 | a2 = 0; /* on i386 __pa() always returns <4G */ | ||
45 | r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2); | ||
46 | buffer += r; | ||
47 | len -= r; | ||
48 | } while (len); | ||
49 | } | ||
50 | |||
51 | static void kvm_mmu_write(void *dest, u64 val) | ||
52 | { | ||
53 | __u64 pte_phys; | ||
54 | struct kvm_mmu_op_write_pte wpte; | ||
55 | |||
56 | #ifdef CONFIG_HIGHPTE | ||
57 | struct page *page; | ||
58 | unsigned long dst = (unsigned long) dest; | ||
59 | |||
60 | page = kmap_atomic_to_page(dest); | ||
61 | pte_phys = page_to_pfn(page); | ||
62 | pte_phys <<= PAGE_SHIFT; | ||
63 | pte_phys += (dst & ~(PAGE_MASK)); | ||
64 | #else | ||
65 | pte_phys = (unsigned long)__pa(dest); | ||
66 | #endif | ||
67 | wpte.header.op = KVM_MMU_OP_WRITE_PTE; | ||
68 | wpte.pte_val = val; | ||
69 | wpte.pte_phys = pte_phys; | ||
70 | |||
71 | kvm_mmu_op(&wpte, sizeof wpte); | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * We only need to hook operations that are MMU writes. We hook these so that | ||
76 | * we can use lazy MMU mode to batch these operations. We could probably | ||
77 | * improve the performance of the host code if we used some of the information | ||
78 | * here to simplify processing of batched writes. | ||
79 | */ | ||
80 | static void kvm_set_pte(pte_t *ptep, pte_t pte) | ||
81 | { | ||
82 | kvm_mmu_write(ptep, pte_val(pte)); | ||
83 | } | ||
84 | |||
85 | static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
86 | pte_t *ptep, pte_t pte) | ||
87 | { | ||
88 | kvm_mmu_write(ptep, pte_val(pte)); | ||
89 | } | ||
90 | |||
91 | static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd) | ||
92 | { | ||
93 | kvm_mmu_write(pmdp, pmd_val(pmd)); | ||
94 | } | ||
95 | |||
96 | #if PAGETABLE_LEVELS >= 3 | ||
97 | #ifdef CONFIG_X86_PAE | ||
98 | static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte) | ||
99 | { | ||
100 | kvm_mmu_write(ptep, pte_val(pte)); | ||
101 | } | ||
102 | |||
103 | static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr, | ||
104 | pte_t *ptep, pte_t pte) | ||
105 | { | ||
106 | kvm_mmu_write(ptep, pte_val(pte)); | ||
107 | } | ||
108 | |||
109 | static void kvm_pte_clear(struct mm_struct *mm, | ||
110 | unsigned long addr, pte_t *ptep) | ||
111 | { | ||
112 | kvm_mmu_write(ptep, 0); | ||
113 | } | ||
114 | |||
115 | static void kvm_pmd_clear(pmd_t *pmdp) | ||
116 | { | ||
117 | kvm_mmu_write(pmdp, 0); | ||
118 | } | ||
119 | #endif | ||
120 | |||
121 | static void kvm_set_pud(pud_t *pudp, pud_t pud) | ||
122 | { | ||
123 | kvm_mmu_write(pudp, pud_val(pud)); | ||
124 | } | ||
125 | |||
126 | #if PAGETABLE_LEVELS == 4 | ||
127 | static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd) | ||
128 | { | ||
129 | kvm_mmu_write(pgdp, pgd_val(pgd)); | ||
130 | } | ||
131 | #endif | ||
132 | #endif /* PAGETABLE_LEVELS >= 3 */ | ||
133 | |||
134 | static void kvm_flush_tlb(void) | ||
135 | { | ||
136 | struct kvm_mmu_op_flush_tlb ftlb = { | ||
137 | .header.op = KVM_MMU_OP_FLUSH_TLB, | ||
138 | }; | ||
139 | |||
140 | kvm_mmu_op(&ftlb, sizeof ftlb); | ||
141 | } | ||
142 | |||
143 | static void kvm_release_pt(u32 pfn) | ||
144 | { | ||
145 | struct kvm_mmu_op_release_pt rpt = { | ||
146 | .header.op = KVM_MMU_OP_RELEASE_PT, | ||
147 | .pt_phys = (u64)pfn << PAGE_SHIFT, | ||
148 | }; | ||
149 | |||
150 | kvm_mmu_op(&rpt, sizeof rpt); | ||
151 | } | ||
152 | |||
36 | static void paravirt_ops_setup(void) | 153 | static void paravirt_ops_setup(void) |
37 | { | 154 | { |
38 | pv_info.name = "KVM"; | 155 | pv_info.name = "KVM"; |
@@ -41,6 +158,27 @@ static void paravirt_ops_setup(void) | |||
41 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) | 158 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) |
42 | pv_cpu_ops.io_delay = kvm_io_delay; | 159 | pv_cpu_ops.io_delay = kvm_io_delay; |
43 | 160 | ||
161 | if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) { | ||
162 | pv_mmu_ops.set_pte = kvm_set_pte; | ||
163 | pv_mmu_ops.set_pte_at = kvm_set_pte_at; | ||
164 | pv_mmu_ops.set_pmd = kvm_set_pmd; | ||
165 | #if PAGETABLE_LEVELS >= 3 | ||
166 | #ifdef CONFIG_X86_PAE | ||
167 | pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic; | ||
168 | pv_mmu_ops.set_pte_present = kvm_set_pte_present; | ||
169 | pv_mmu_ops.pte_clear = kvm_pte_clear; | ||
170 | pv_mmu_ops.pmd_clear = kvm_pmd_clear; | ||
171 | #endif | ||
172 | pv_mmu_ops.set_pud = kvm_set_pud; | ||
173 | #if PAGETABLE_LEVELS == 4 | ||
174 | pv_mmu_ops.set_pgd = kvm_set_pgd; | ||
175 | #endif | ||
176 | #endif | ||
177 | pv_mmu_ops.flush_tlb_user = kvm_flush_tlb; | ||
178 | pv_mmu_ops.release_pte = kvm_release_pt; | ||
179 | pv_mmu_ops.release_pmd = kvm_release_pt; | ||
180 | pv_mmu_ops.release_pud = kvm_release_pt; | ||
181 | } | ||
44 | } | 182 | } |
45 | 183 | ||
46 | void __init kvm_guest_init(void) | 184 | void __init kvm_guest_init(void) |