aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-09-20 00:52:51 -0400
committerAlexander Graf <agraf@suse.de>2013-10-17 08:49:35 -0400
commit93b159b466bdc9753bba5c3c51b40d7ddbbcc07c (patch)
tree723a5f54132c2f44e25cbc8ea8b365c3940a5e10 /arch/powerpc/kvm
parent4f6c11db10159e362b0100d41b35bf6d731eb4e2 (diff)
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the guest, even if the guest is only loading from the page. This reduces the effectiveness of KSM because it means that we unshare every page we access. Also, we always set the changed (C) bit in the guest HPTE if it allows writing, even for a guest load. This fixes both these problems. We pass an 'iswrite' flag to the mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether the access is a load or a store. The mmu.xlate() functions now only set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot() instead of gfn_to_pfn() so that it can indicate whether we need write access to the page, and get back a 'writable' flag to indicate whether the page is writable or not. If that 'writable' flag is clear, we then make the host HPTE read-only even if the guest HPTE allowed writing. This means that we can get a protection fault when the guest writes to a page that it has mapped read-write but which is read-only on the host side (perhaps due to KSM having merged the page). Thus we now call kvmppc_handle_pagefault() for protection faults as well as HPTE not found faults. In kvmppc_handle_pagefault(), if the access was allowed by the guest HPTE and we thus need to install a new host HPTE, we then need to remove the old host HPTE if there is one. This is done with a new function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to find and remove the old host HPTE. Since the memslot-related functions require the KVM SRCU read lock to be held, this adds srcu_read_lock/unlock pairs around the calls to kvmppc_handle_pagefault(). Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore guest HPTEs that don't permit access, and to return -EPERM for accesses that are not permitted by the page protections. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c15
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c32
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c14
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c20
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c29
7 files changed, 84 insertions, 37 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index f97369dc457c..807103ad2628 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -286,7 +286,8 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
286 return 0; 286 return 0;
287} 287}
288 288
289pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 289pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
290 bool *writable)
290{ 291{
291 ulong mp_pa = vcpu->arch.magic_page_pa; 292 ulong mp_pa = vcpu->arch.magic_page_pa;
292 293
@@ -302,20 +303,22 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
302 303
303 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 304 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
304 get_page(pfn_to_page(pfn)); 305 get_page(pfn_to_page(pfn));
306 if (writable)
307 *writable = true;
305 return pfn; 308 return pfn;
306 } 309 }
307 310
308 return gfn_to_pfn(vcpu->kvm, gfn); 311 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
309} 312}
310 313
311static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 314static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
312 struct kvmppc_pte *pte) 315 bool iswrite, struct kvmppc_pte *pte)
313{ 316{
314 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); 317 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
315 int r; 318 int r;
316 319
317 if (relocated) { 320 if (relocated) {
318 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); 321 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
319 } else { 322 } else {
320 pte->eaddr = eaddr; 323 pte->eaddr = eaddr;
321 pte->raddr = eaddr & KVM_PAM; 324 pte->raddr = eaddr & KVM_PAM;
@@ -361,7 +364,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
361 364
362 vcpu->stat.st++; 365 vcpu->stat.st++;
363 366
364 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 367 if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
365 return -ENOENT; 368 return -ENOENT;
366 369
367 *eaddr = pte.raddr; 370 *eaddr = pte.raddr;
@@ -383,7 +386,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
383 386
384 vcpu->stat.ld++; 387 vcpu->stat.ld++;
385 388
386 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 389 if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
387 goto nopte; 390 goto nopte;
388 391
389 *eaddr = pte.raddr; 392 *eaddr = pte.raddr;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index b14af6d09347..76a64ce6a5b6 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw)
84} 84}
85 85
86static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 86static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
87 struct kvmppc_pte *pte, bool data); 87 struct kvmppc_pte *pte, bool data,
88 bool iswrite);
88static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 89static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
89 u64 *vsid); 90 u64 *vsid);
90 91
@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
99 u64 vsid; 100 u64 vsid;
100 struct kvmppc_pte pte; 101 struct kvmppc_pte pte;
101 102
102 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) 103 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
103 return pte.vpage; 104 return pte.vpage;
104 105
105 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 106 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -146,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
146} 147}
147 148
148static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 149static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
149 struct kvmppc_pte *pte, bool data) 150 struct kvmppc_pte *pte, bool data,
151 bool iswrite)
150{ 152{
151 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 153 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
152 struct kvmppc_bat *bat; 154 struct kvmppc_bat *bat;
@@ -187,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
187 printk(KERN_INFO "BAT is not readable!\n"); 189 printk(KERN_INFO "BAT is not readable!\n");
188 continue; 190 continue;
189 } 191 }
190 if (!pte->may_write) { 192 if (iswrite && !pte->may_write) {
191 /* let's treat r/o BATs as not-readable for now */
192 dprintk_pte("BAT is read-only!\n"); 193 dprintk_pte("BAT is read-only!\n");
193 continue; 194 continue;
194 } 195 }
@@ -202,7 +203,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
202 203
203static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, 204static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
204 struct kvmppc_pte *pte, bool data, 205 struct kvmppc_pte *pte, bool data,
205 bool primary) 206 bool iswrite, bool primary)
206{ 207{
207 u32 sre; 208 u32 sre;
208 hva_t ptegp; 209 hva_t ptegp;
@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
258 break; 259 break;
259 } 260 }
260 261
261 if ( !pte->may_read )
262 continue;
263
264 dprintk_pte("MMU: Found PTE -> %x %x - %x\n", 262 dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
265 pteg[i], pteg[i+1], pp); 263 pteg[i], pteg[i+1], pp);
266 found = 1; 264 found = 1;
@@ -282,11 +280,12 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
282 pte_r |= PTEG_FLAG_ACCESSED; 280 pte_r |= PTEG_FLAG_ACCESSED;
283 put_user(pte_r >> 8, addr + 2); 281 put_user(pte_r >> 8, addr + 2);
284 } 282 }
285 if (pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) { 283 if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
286 /* XXX should only set this for stores */
287 pte_r |= PTEG_FLAG_DIRTY; 284 pte_r |= PTEG_FLAG_DIRTY;
288 put_user(pte_r, addr + 3); 285 put_user(pte_r, addr + 3);
289 } 286 }
287 if (!pte->may_read || (iswrite && !pte->may_write))
288 return -EPERM;
290 return 0; 289 return 0;
291 } 290 }
292 291
@@ -305,7 +304,8 @@ no_page_found:
305} 304}
306 305
307static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 306static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
308 struct kvmppc_pte *pte, bool data) 307 struct kvmppc_pte *pte, bool data,
308 bool iswrite)
309{ 309{
310 int r; 310 int r;
311 ulong mp_ea = vcpu->arch.magic_page_ea; 311 ulong mp_ea = vcpu->arch.magic_page_ea;
@@ -327,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
327 return 0; 327 return 0;
328 } 328 }
329 329
330 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); 330 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
331 if (r < 0) 331 if (r < 0)
332 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); 332 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
333 data, iswrite, true);
333 if (r < 0) 334 if (r < 0)
334 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); 335 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
336 data, iswrite, false);
335 337
336 return r; 338 return r;
337} 339}
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 00e619bf608e..673322329238 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
138 138
139extern char etext[]; 139extern char etext[];
140 140
141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
142 bool iswrite)
142{ 143{
143 pfn_t hpaddr; 144 pfn_t hpaddr;
144 u64 vpn; 145 u64 vpn;
@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
152 bool evict = false; 153 bool evict = false;
153 struct hpte_cache *pte; 154 struct hpte_cache *pte;
154 int r = 0; 155 int r = 0;
156 bool writable;
155 157
156 /* Get host physical address for gpa */ 158 /* Get host physical address for gpa */
157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 159 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
160 iswrite, &writable);
158 if (is_error_noslot_pfn(hpaddr)) { 161 if (is_error_noslot_pfn(hpaddr)) {
159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 162 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
160 orig_pte->eaddr); 163 orig_pte->eaddr);
@@ -204,7 +207,7 @@ next_pteg:
204 (primary ? 0 : PTE_SEC); 207 (primary ? 0 : PTE_SEC);
205 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; 208 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
206 209
207 if (orig_pte->may_write) { 210 if (orig_pte->may_write && writable) {
208 pteg1 |= PP_RWRW; 211 pteg1 |= PP_RWRW;
209 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 212 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
210 } else { 213 } else {
@@ -259,6 +262,11 @@ out:
259 return r; 262 return r;
260} 263}
261 264
265void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
266{
267 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
268}
269
262static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 270static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
263{ 271{
264 struct kvmppc_sid_map *map; 272 struct kvmppc_sid_map *map;
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index c110145522e6..83da1f868fd5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -206,7 +206,8 @@ static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
206} 206}
207 207
208static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 208static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
209 struct kvmppc_pte *gpte, bool data) 209 struct kvmppc_pte *gpte, bool data,
210 bool iswrite)
210{ 211{
211 struct kvmppc_slb *slbe; 212 struct kvmppc_slb *slbe;
212 hva_t ptegp; 213 hva_t ptegp;
@@ -345,8 +346,8 @@ do_second:
345 r |= HPTE_R_R; 346 r |= HPTE_R_R;
346 put_user(r >> 8, addr + 6); 347 put_user(r >> 8, addr + 6);
347 } 348 }
348 if (data && gpte->may_write && !(r & HPTE_R_C)) { 349 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
349 /* Set the dirty flag -- XXX even if not writing */ 350 /* Set the dirty flag */
350 /* Use a single byte write */ 351 /* Use a single byte write */
351 char __user *addr = (char __user *) &pteg[i+1]; 352 char __user *addr = (char __user *) &pteg[i+1];
352 r |= HPTE_R_C; 353 r |= HPTE_R_C;
@@ -355,7 +356,7 @@ do_second:
355 356
356 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); 357 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
357 358
358 if (!gpte->may_read) 359 if (!gpte->may_read || (iswrite && !gpte->may_write))
359 return -EPERM; 360 return -EPERM;
360 return 0; 361 return 0;
361 362
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 6bda504ceda7..cc9fb89b8884 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
78 return NULL; 78 return NULL;
79} 79}
80 80
81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
82 bool iswrite)
82{ 83{
83 unsigned long vpn; 84 unsigned long vpn;
84 pfn_t hpaddr; 85 pfn_t hpaddr;
@@ -91,9 +92,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
91 struct kvmppc_sid_map *map; 92 struct kvmppc_sid_map *map;
92 int r = 0; 93 int r = 0;
93 int hpsize = MMU_PAGE_4K; 94 int hpsize = MMU_PAGE_4K;
95 bool writable;
94 96
95 /* Get host physical address for gpa */ 97 /* Get host physical address for gpa */
96 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 98 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
99 iswrite, &writable);
97 if (is_error_noslot_pfn(hpaddr)) { 100 if (is_error_noslot_pfn(hpaddr)) {
98 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 101 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
99 r = -EINVAL; 102 r = -EINVAL;
@@ -119,7 +122,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
119 122
120 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); 123 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
121 124
122 if (!orig_pte->may_write) 125 if (!orig_pte->may_write || !writable)
123 rflags |= HPTE_R_PP; 126 rflags |= HPTE_R_PP;
124 else 127 else
125 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 128 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
@@ -186,6 +189,17 @@ out:
186 return r; 189 return r;
187} 190}
188 191
192void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
193{
194 u64 mask = 0xfffffffffULL;
195 u64 vsid;
196
197 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
198 if (vsid & VSID_64K)
199 mask = 0xffffffff0ULL;
200 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
201}
202
189static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 203static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
190{ 204{
191 struct kvmppc_sid_map *map; 205 struct kvmppc_sid_map *map;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ccb89a048bf8..394fef820f0c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -451,7 +451,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
451} 451}
452 452
453static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 453static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
454 struct kvmppc_pte *gpte, bool data) 454 struct kvmppc_pte *gpte, bool data, bool iswrite)
455{ 455{
456 struct kvm *kvm = vcpu->kvm; 456 struct kvm *kvm = vcpu->kvm;
457 struct kvmppc_slb *slbe; 457 struct kvmppc_slb *slbe;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 677d7e33b1ff..2f84ed807184 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -401,6 +401,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
401 ulong eaddr, int vec) 401 ulong eaddr, int vec)
402{ 402{
403 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); 403 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
404 bool iswrite = false;
404 int r = RESUME_GUEST; 405 int r = RESUME_GUEST;
405 int relocated; 406 int relocated;
406 int page_found = 0; 407 int page_found = 0;
@@ -411,10 +412,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
411 u64 vsid; 412 u64 vsid;
412 413
413 relocated = data ? dr : ir; 414 relocated = data ? dr : ir;
415 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
416 iswrite = true;
414 417
415 /* Resolve real address if translation turned on */ 418 /* Resolve real address if translation turned on */
416 if (relocated) { 419 if (relocated) {
417 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); 420 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
418 } else { 421 } else {
419 pte.may_execute = true; 422 pte.may_execute = true;
420 pte.may_read = true; 423 pte.may_read = true;
@@ -475,12 +478,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
475 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 478 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
476 } else if (!is_mmio && 479 } else if (!is_mmio &&
477 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 480 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
481 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
482 /*
483 * There is already a host HPTE there, presumably
484 * a read-only one for a page the guest thinks
485 * is writable, so get rid of it first.
486 */
487 kvmppc_mmu_unmap_page(vcpu, &pte);
488 }
478 /* The guest's PTE is not mapped yet. Map on the host */ 489 /* The guest's PTE is not mapped yet. Map on the host */
479 kvmppc_mmu_map_page(vcpu, &pte); 490 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
480 if (data) 491 if (data)
481 vcpu->stat.sp_storage++; 492 vcpu->stat.sp_storage++;
482 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 493 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
483 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) 494 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
484 kvmppc_patch_dcbz(vcpu, &pte); 495 kvmppc_patch_dcbz(vcpu, &pte);
485 } else { 496 } else {
486 /* MMIO */ 497 /* MMIO */
@@ -732,7 +743,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
732 743
733 /* only care about PTEG not found errors, but leave NX alone */ 744 /* only care about PTEG not found errors, but leave NX alone */
734 if (shadow_srr1 & 0x40000000) { 745 if (shadow_srr1 & 0x40000000) {
746 int idx = srcu_read_lock(&vcpu->kvm->srcu);
735 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 747 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
748 srcu_read_unlock(&vcpu->kvm->srcu, idx);
736 vcpu->stat.sp_instruc++; 749 vcpu->stat.sp_instruc++;
737 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 750 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
738 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 751 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -774,9 +787,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
774 } 787 }
775#endif 788#endif
776 789
777 /* The only case we need to handle is missing shadow PTEs */ 790 /*
778 if (fault_dsisr & DSISR_NOHPTE) { 791 * We need to handle missing shadow PTEs, and
792 * protection faults due to us mapping a page read-only
793 * when the guest thinks it is writable.
794 */
795 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
796 int idx = srcu_read_lock(&vcpu->kvm->srcu);
779 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 797 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
798 srcu_read_unlock(&vcpu->kvm->srcu, idx);
780 } else { 799 } else {
781 vcpu->arch.shared->dar = dar; 800 vcpu->arch.shared->dar = dar;
782 vcpu->arch.shared->dsisr = fault_dsisr; 801 vcpu->arch.shared->dsisr = fault_dsisr;