diff options
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 15 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu.c | 32 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 20 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 29 |
9 files changed, 91 insertions, 40 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 603fba494a0b..a07bd7e7d4a4 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -128,7 +128,9 @@ extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); | |||
128 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); | 128 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); |
129 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | 129 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); |
130 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); | 130 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); |
131 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | 131 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, |
132 | bool iswrite); | ||
133 | extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | ||
132 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); | 134 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
133 | extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); | 135 | extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); |
134 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); | 136 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
@@ -157,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | |||
157 | bool upper, u32 val); | 159 | bool upper, u32 val); |
158 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 160 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
159 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); | 161 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); |
160 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | 162 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, |
163 | bool *writable); | ||
161 | extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, | 164 | extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, |
162 | unsigned long *rmap, long pte_index, int realmode); | 165 | unsigned long *rmap, long pte_index, int realmode); |
163 | extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, | 166 | extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 404dbc81434d..b6881917cd84 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -348,7 +348,8 @@ struct kvmppc_mmu { | |||
348 | /* book3s */ | 348 | /* book3s */ |
349 | void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); | 349 | void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); |
350 | u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); | 350 | u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); |
351 | int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); | 351 | int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, |
352 | struct kvmppc_pte *pte, bool data, bool iswrite); | ||
352 | void (*reset_msr)(struct kvm_vcpu *vcpu); | 353 | void (*reset_msr)(struct kvm_vcpu *vcpu); |
353 | void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); | 354 | void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); |
354 | int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); | 355 | int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index f97369dc457c..807103ad2628 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -286,7 +286,8 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
288 | 288 | ||
289 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 289 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, |
290 | bool *writable) | ||
290 | { | 291 | { |
291 | ulong mp_pa = vcpu->arch.magic_page_pa; | 292 | ulong mp_pa = vcpu->arch.magic_page_pa; |
292 | 293 | ||
@@ -302,20 +303,22 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
302 | 303 | ||
303 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; | 304 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; |
304 | get_page(pfn_to_page(pfn)); | 305 | get_page(pfn_to_page(pfn)); |
306 | if (writable) | ||
307 | *writable = true; | ||
305 | return pfn; | 308 | return pfn; |
306 | } | 309 | } |
307 | 310 | ||
308 | return gfn_to_pfn(vcpu->kvm, gfn); | 311 | return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); |
309 | } | 312 | } |
310 | 313 | ||
311 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 314 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
312 | struct kvmppc_pte *pte) | 315 | bool iswrite, struct kvmppc_pte *pte) |
313 | { | 316 | { |
314 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); | 317 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); |
315 | int r; | 318 | int r; |
316 | 319 | ||
317 | if (relocated) { | 320 | if (relocated) { |
318 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | 321 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); |
319 | } else { | 322 | } else { |
320 | pte->eaddr = eaddr; | 323 | pte->eaddr = eaddr; |
321 | pte->raddr = eaddr & KVM_PAM; | 324 | pte->raddr = eaddr & KVM_PAM; |
@@ -361,7 +364,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |||
361 | 364 | ||
362 | vcpu->stat.st++; | 365 | vcpu->stat.st++; |
363 | 366 | ||
364 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) | 367 | if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte)) |
365 | return -ENOENT; | 368 | return -ENOENT; |
366 | 369 | ||
367 | *eaddr = pte.raddr; | 370 | *eaddr = pte.raddr; |
@@ -383,7 +386,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |||
383 | 386 | ||
384 | vcpu->stat.ld++; | 387 | vcpu->stat.ld++; |
385 | 388 | ||
386 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) | 389 | if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte)) |
387 | goto nopte; | 390 | goto nopte; |
388 | 391 | ||
389 | *eaddr = pte.raddr; | 392 | *eaddr = pte.raddr; |
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index b14af6d09347..76a64ce6a5b6 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw) | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | 86 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, |
87 | struct kvmppc_pte *pte, bool data); | 87 | struct kvmppc_pte *pte, bool data, |
88 | bool iswrite); | ||
88 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | 89 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
89 | u64 *vsid); | 90 | u64 *vsid); |
90 | 91 | ||
@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
99 | u64 vsid; | 100 | u64 vsid; |
100 | struct kvmppc_pte pte; | 101 | struct kvmppc_pte pte; |
101 | 102 | ||
102 | if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) | 103 | if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) |
103 | return pte.vpage; | 104 | return pte.vpage; |
104 | 105 | ||
105 | kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | 106 | kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
@@ -146,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) | |||
146 | } | 147 | } |
147 | 148 | ||
148 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | 149 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, |
149 | struct kvmppc_pte *pte, bool data) | 150 | struct kvmppc_pte *pte, bool data, |
151 | bool iswrite) | ||
150 | { | 152 | { |
151 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 153 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
152 | struct kvmppc_bat *bat; | 154 | struct kvmppc_bat *bat; |
@@ -187,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
187 | printk(KERN_INFO "BAT is not readable!\n"); | 189 | printk(KERN_INFO "BAT is not readable!\n"); |
188 | continue; | 190 | continue; |
189 | } | 191 | } |
190 | if (!pte->may_write) { | 192 | if (iswrite && !pte->may_write) { |
191 | /* let's treat r/o BATs as not-readable for now */ | ||
192 | dprintk_pte("BAT is read-only!\n"); | 193 | dprintk_pte("BAT is read-only!\n"); |
193 | continue; | 194 | continue; |
194 | } | 195 | } |
@@ -202,7 +203,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
202 | 203 | ||
203 | static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | 204 | static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, |
204 | struct kvmppc_pte *pte, bool data, | 205 | struct kvmppc_pte *pte, bool data, |
205 | bool primary) | 206 | bool iswrite, bool primary) |
206 | { | 207 | { |
207 | u32 sre; | 208 | u32 sre; |
208 | hva_t ptegp; | 209 | hva_t ptegp; |
@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
258 | break; | 259 | break; |
259 | } | 260 | } |
260 | 261 | ||
261 | if ( !pte->may_read ) | ||
262 | continue; | ||
263 | |||
264 | dprintk_pte("MMU: Found PTE -> %x %x - %x\n", | 262 | dprintk_pte("MMU: Found PTE -> %x %x - %x\n", |
265 | pteg[i], pteg[i+1], pp); | 263 | pteg[i], pteg[i+1], pp); |
266 | found = 1; | 264 | found = 1; |
@@ -282,11 +280,12 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
282 | pte_r |= PTEG_FLAG_ACCESSED; | 280 | pte_r |= PTEG_FLAG_ACCESSED; |
283 | put_user(pte_r >> 8, addr + 2); | 281 | put_user(pte_r >> 8, addr + 2); |
284 | } | 282 | } |
285 | if (pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) { | 283 | if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) { |
286 | /* XXX should only set this for stores */ | ||
287 | pte_r |= PTEG_FLAG_DIRTY; | 284 | pte_r |= PTEG_FLAG_DIRTY; |
288 | put_user(pte_r, addr + 3); | 285 | put_user(pte_r, addr + 3); |
289 | } | 286 | } |
287 | if (!pte->may_read || (iswrite && !pte->may_write)) | ||
288 | return -EPERM; | ||
290 | return 0; | 289 | return 0; |
291 | } | 290 | } |
292 | 291 | ||
@@ -305,7 +304,8 @@ no_page_found: | |||
305 | } | 304 | } |
306 | 305 | ||
307 | static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | 306 | static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
308 | struct kvmppc_pte *pte, bool data) | 307 | struct kvmppc_pte *pte, bool data, |
308 | bool iswrite) | ||
309 | { | 309 | { |
310 | int r; | 310 | int r; |
311 | ulong mp_ea = vcpu->arch.magic_page_ea; | 311 | ulong mp_ea = vcpu->arch.magic_page_ea; |
@@ -327,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
327 | return 0; | 327 | return 0; |
328 | } | 328 | } |
329 | 329 | ||
330 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); | 330 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite); |
331 | if (r < 0) | 331 | if (r < 0) |
332 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); | 332 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, |
333 | data, iswrite, true); | ||
333 | if (r < 0) | 334 | if (r < 0) |
334 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); | 335 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, |
336 | data, iswrite, false); | ||
335 | 337 | ||
336 | return r; | 338 | return r; |
337 | } | 339 | } |
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 00e619bf608e..673322329238 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c | |||
@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, | |||
138 | 138 | ||
139 | extern char etext[]; | 139 | extern char etext[]; |
140 | 140 | ||
141 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | 141 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, |
142 | bool iswrite) | ||
142 | { | 143 | { |
143 | pfn_t hpaddr; | 144 | pfn_t hpaddr; |
144 | u64 vpn; | 145 | u64 vpn; |
@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
152 | bool evict = false; | 153 | bool evict = false; |
153 | struct hpte_cache *pte; | 154 | struct hpte_cache *pte; |
154 | int r = 0; | 155 | int r = 0; |
156 | bool writable; | ||
155 | 157 | ||
156 | /* Get host physical address for gpa */ | 158 | /* Get host physical address for gpa */ |
157 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); | 159 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, |
160 | iswrite, &writable); | ||
158 | if (is_error_noslot_pfn(hpaddr)) { | 161 | if (is_error_noslot_pfn(hpaddr)) { |
159 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", | 162 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", |
160 | orig_pte->eaddr); | 163 | orig_pte->eaddr); |
@@ -204,7 +207,7 @@ next_pteg: | |||
204 | (primary ? 0 : PTE_SEC); | 207 | (primary ? 0 : PTE_SEC); |
205 | pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; | 208 | pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; |
206 | 209 | ||
207 | if (orig_pte->may_write) { | 210 | if (orig_pte->may_write && writable) { |
208 | pteg1 |= PP_RWRW; | 211 | pteg1 |= PP_RWRW; |
209 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 212 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); |
210 | } else { | 213 | } else { |
@@ -259,6 +262,11 @@ out: | |||
259 | return r; | 262 | return r; |
260 | } | 263 | } |
261 | 264 | ||
265 | void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | ||
266 | { | ||
267 | kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL); | ||
268 | } | ||
269 | |||
262 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | 270 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) |
263 | { | 271 | { |
264 | struct kvmppc_sid_map *map; | 272 | struct kvmppc_sid_map *map; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index c110145522e6..83da1f868fd5 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -206,7 +206,8 @@ static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) | |||
206 | } | 206 | } |
207 | 207 | ||
208 | static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | 208 | static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
209 | struct kvmppc_pte *gpte, bool data) | 209 | struct kvmppc_pte *gpte, bool data, |
210 | bool iswrite) | ||
210 | { | 211 | { |
211 | struct kvmppc_slb *slbe; | 212 | struct kvmppc_slb *slbe; |
212 | hva_t ptegp; | 213 | hva_t ptegp; |
@@ -345,8 +346,8 @@ do_second: | |||
345 | r |= HPTE_R_R; | 346 | r |= HPTE_R_R; |
346 | put_user(r >> 8, addr + 6); | 347 | put_user(r >> 8, addr + 6); |
347 | } | 348 | } |
348 | if (data && gpte->may_write && !(r & HPTE_R_C)) { | 349 | if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { |
349 | /* Set the dirty flag -- XXX even if not writing */ | 350 | /* Set the dirty flag */ |
350 | /* Use a single byte write */ | 351 | /* Use a single byte write */ |
351 | char __user *addr = (char __user *) &pteg[i+1]; | 352 | char __user *addr = (char __user *) &pteg[i+1]; |
352 | r |= HPTE_R_C; | 353 | r |= HPTE_R_C; |
@@ -355,7 +356,7 @@ do_second: | |||
355 | 356 | ||
356 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); | 357 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
357 | 358 | ||
358 | if (!gpte->may_read) | 359 | if (!gpte->may_read || (iswrite && !gpte->may_write)) |
359 | return -EPERM; | 360 | return -EPERM; |
360 | return 0; | 361 | return 0; |
361 | 362 | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 6bda504ceda7..cc9fb89b8884 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | |||
78 | return NULL; | 78 | return NULL; |
79 | } | 79 | } |
80 | 80 | ||
81 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | 81 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, |
82 | bool iswrite) | ||
82 | { | 83 | { |
83 | unsigned long vpn; | 84 | unsigned long vpn; |
84 | pfn_t hpaddr; | 85 | pfn_t hpaddr; |
@@ -91,9 +92,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
91 | struct kvmppc_sid_map *map; | 92 | struct kvmppc_sid_map *map; |
92 | int r = 0; | 93 | int r = 0; |
93 | int hpsize = MMU_PAGE_4K; | 94 | int hpsize = MMU_PAGE_4K; |
95 | bool writable; | ||
94 | 96 | ||
95 | /* Get host physical address for gpa */ | 97 | /* Get host physical address for gpa */ |
96 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); | 98 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, |
99 | iswrite, &writable); | ||
97 | if (is_error_noslot_pfn(hpaddr)) { | 100 | if (is_error_noslot_pfn(hpaddr)) { |
98 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); | 101 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); |
99 | r = -EINVAL; | 102 | r = -EINVAL; |
@@ -119,7 +122,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
119 | 122 | ||
120 | vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); | 123 | vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); |
121 | 124 | ||
122 | if (!orig_pte->may_write) | 125 | if (!orig_pte->may_write || !writable) |
123 | rflags |= HPTE_R_PP; | 126 | rflags |= HPTE_R_PP; |
124 | else | 127 | else |
125 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 128 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); |
@@ -186,6 +189,17 @@ out: | |||
186 | return r; | 189 | return r; |
187 | } | 190 | } |
188 | 191 | ||
192 | void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | ||
193 | { | ||
194 | u64 mask = 0xfffffffffULL; | ||
195 | u64 vsid; | ||
196 | |||
197 | vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); | ||
198 | if (vsid & VSID_64K) | ||
199 | mask = 0xffffffff0ULL; | ||
200 | kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); | ||
201 | } | ||
202 | |||
189 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | 203 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) |
190 | { | 204 | { |
191 | struct kvmppc_sid_map *map; | 205 | struct kvmppc_sid_map *map; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ccb89a048bf8..394fef820f0c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -451,7 +451,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, | |||
451 | } | 451 | } |
452 | 452 | ||
453 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | 453 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
454 | struct kvmppc_pte *gpte, bool data) | 454 | struct kvmppc_pte *gpte, bool data, bool iswrite) |
455 | { | 455 | { |
456 | struct kvm *kvm = vcpu->kvm; | 456 | struct kvm *kvm = vcpu->kvm; |
457 | struct kvmppc_slb *slbe; | 457 | struct kvmppc_slb *slbe; |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 677d7e33b1ff..2f84ed807184 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -401,6 +401,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
401 | ulong eaddr, int vec) | 401 | ulong eaddr, int vec) |
402 | { | 402 | { |
403 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | 403 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); |
404 | bool iswrite = false; | ||
404 | int r = RESUME_GUEST; | 405 | int r = RESUME_GUEST; |
405 | int relocated; | 406 | int relocated; |
406 | int page_found = 0; | 407 | int page_found = 0; |
@@ -411,10 +412,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
411 | u64 vsid; | 412 | u64 vsid; |
412 | 413 | ||
413 | relocated = data ? dr : ir; | 414 | relocated = data ? dr : ir; |
415 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) | ||
416 | iswrite = true; | ||
414 | 417 | ||
415 | /* Resolve real address if translation turned on */ | 418 | /* Resolve real address if translation turned on */ |
416 | if (relocated) { | 419 | if (relocated) { |
417 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | 420 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
418 | } else { | 421 | } else { |
419 | pte.may_execute = true; | 422 | pte.may_execute = true; |
420 | pte.may_read = true; | 423 | pte.may_read = true; |
@@ -475,12 +478,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
475 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 478 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
476 | } else if (!is_mmio && | 479 | } else if (!is_mmio && |
477 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 480 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
481 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { | ||
482 | /* | ||
483 | * There is already a host HPTE there, presumably | ||
484 | * a read-only one for a page the guest thinks | ||
485 | * is writable, so get rid of it first. | ||
486 | */ | ||
487 | kvmppc_mmu_unmap_page(vcpu, &pte); | ||
488 | } | ||
478 | /* The guest's PTE is not mapped yet. Map on the host */ | 489 | /* The guest's PTE is not mapped yet. Map on the host */ |
479 | kvmppc_mmu_map_page(vcpu, &pte); | 490 | kvmppc_mmu_map_page(vcpu, &pte, iswrite); |
480 | if (data) | 491 | if (data) |
481 | vcpu->stat.sp_storage++; | 492 | vcpu->stat.sp_storage++; |
482 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 493 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
483 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | 494 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
484 | kvmppc_patch_dcbz(vcpu, &pte); | 495 | kvmppc_patch_dcbz(vcpu, &pte); |
485 | } else { | 496 | } else { |
486 | /* MMIO */ | 497 | /* MMIO */ |
@@ -732,7 +743,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
732 | 743 | ||
733 | /* only care about PTEG not found errors, but leave NX alone */ | 744 | /* only care about PTEG not found errors, but leave NX alone */ |
734 | if (shadow_srr1 & 0x40000000) { | 745 | if (shadow_srr1 & 0x40000000) { |
746 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
735 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); | 747 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
748 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
736 | vcpu->stat.sp_instruc++; | 749 | vcpu->stat.sp_instruc++; |
737 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 750 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
738 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | 751 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { |
@@ -774,9 +787,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
774 | } | 787 | } |
775 | #endif | 788 | #endif |
776 | 789 | ||
777 | /* The only case we need to handle is missing shadow PTEs */ | 790 | /* |
778 | if (fault_dsisr & DSISR_NOHPTE) { | 791 | * We need to handle missing shadow PTEs, and |
792 | * protection faults due to us mapping a page read-only | ||
793 | * when the guest thinks it is writable. | ||
794 | */ | ||
795 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | ||
796 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
779 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 797 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
798 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
780 | } else { | 799 | } else { |
781 | vcpu->arch.shared->dar = dar; | 800 | vcpu->arch.shared->dar = dar; |
782 | vcpu->arch.shared->dsisr = fault_dsisr; | 801 | vcpu->arch.shared->dsisr = fault_dsisr; |