aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rm_mmu.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-12-12 07:38:51 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:38 -0500
commit4cf302bc106566c5bad523337296ea8b72df63f5 (patch)
tree65c633e045baf08b5700301725093c3b43330e1f /arch/powerpc/kvm/book3s_hv_rm_mmu.c
parenta355aa54f1d25dff83c0feef8863d83a76988fdb (diff)
KVM: PPC: Allow for read-only pages backing a Book3S HV guest
With this, if a guest does an H_ENTER with a read/write HPTE on a page which is currently read-only, we make the actual HPTE inserted be a read-only version of the HPTE. We now intercept protection faults as well as HPTE not found faults, and for a protection fault we work out whether it should be reflected to the guest (e.g. because the guest HPTE didn't allow write access to usermode) or handled by switching to kernel context and calling kvmppc_book3s_hv_page_fault, which will then request write access to the page and update the actual HPTE. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c32
1 files changed, 22 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 81d16ed9767d..d3e36fc77e2b 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -120,7 +120,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
120} 120}
121 121
122static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva, 122static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
123 unsigned long *pte_sizep) 123 int writing, unsigned long *pte_sizep)
124{ 124{
125 pte_t *ptep; 125 pte_t *ptep;
126 unsigned long ps = *pte_sizep; 126 unsigned long ps = *pte_sizep;
@@ -137,7 +137,7 @@ static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
137 return __pte(0); 137 return __pte(0);
138 if (!pte_present(*ptep)) 138 if (!pte_present(*ptep))
139 return __pte(0); 139 return __pte(0);
140 return kvmppc_read_update_linux_pte(ptep); 140 return kvmppc_read_update_linux_pte(ptep, writing);
141} 141}
142 142
143long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 143long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
@@ -154,12 +154,14 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
154 unsigned long is_io; 154 unsigned long is_io;
155 unsigned long *rmap; 155 unsigned long *rmap;
156 pte_t pte; 156 pte_t pte;
157 unsigned int writing;
157 unsigned long mmu_seq; 158 unsigned long mmu_seq;
158 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING; 159 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
159 160
160 psize = hpte_page_size(pteh, ptel); 161 psize = hpte_page_size(pteh, ptel);
161 if (!psize) 162 if (!psize)
162 return H_PARAMETER; 163 return H_PARAMETER;
164 writing = hpte_is_writable(ptel);
163 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); 165 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
164 166
165 /* used later to detect if we might have been invalidated */ 167 /* used later to detect if we might have been invalidated */
@@ -208,8 +210,11 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
208 210
209 /* Look up the Linux PTE for the backing page */ 211 /* Look up the Linux PTE for the backing page */
210 pte_size = psize; 212 pte_size = psize;
211 pte = lookup_linux_pte(vcpu, hva, &pte_size); 213 pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
212 if (pte_present(pte)) { 214 if (pte_present(pte)) {
215 if (writing && !pte_write(pte))
216 /* make the actual HPTE be read-only */
217 ptel = hpte_make_readonly(ptel);
213 is_io = hpte_cache_bits(pte_val(pte)); 218 is_io = hpte_cache_bits(pte_val(pte));
214 pa = pte_pfn(pte) << PAGE_SHIFT; 219 pa = pte_pfn(pte) << PAGE_SHIFT;
215 } 220 }
@@ -678,7 +683,9 @@ EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
678 683
679/* 684/*
680 * Called in real mode to check whether an HPTE not found fault 685 * Called in real mode to check whether an HPTE not found fault
681 * is due to accessing a paged-out page or an emulated MMIO page. 686 * is due to accessing a paged-out page or an emulated MMIO page,
687 * or if a protection fault is due to accessing a page that the
688 * guest wanted read/write access to but which we made read-only.
682 * Returns a possibly modified status (DSISR) value if not 689 * Returns a possibly modified status (DSISR) value if not
683 * (i.e. pass the interrupt to the guest), 690 * (i.e. pass the interrupt to the guest),
684 * -1 to pass the fault up to host kernel mode code, -2 to do that 691 * -1 to pass the fault up to host kernel mode code, -2 to do that
@@ -696,12 +703,17 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
696 struct revmap_entry *rev; 703 struct revmap_entry *rev;
697 unsigned long pp, key; 704 unsigned long pp, key;
698 705
699 valid = HPTE_V_VALID | HPTE_V_ABSENT; 706 /* For protection fault, expect to find a valid HPTE */
707 valid = HPTE_V_VALID;
708 if (status & DSISR_NOHPTE)
709 valid |= HPTE_V_ABSENT;
700 710
701 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); 711 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
702 if (index < 0) 712 if (index < 0) {
703 return status; /* there really was no HPTE */ 713 if (status & DSISR_NOHPTE)
704 714 return status; /* there really was no HPTE */
715 return 0; /* for prot fault, HPTE disappeared */
716 }
705 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 717 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
706 v = hpte[0] & ~HPTE_V_HVLOCK; 718 v = hpte[0] & ~HPTE_V_HVLOCK;
707 r = hpte[1]; 719 r = hpte[1];
@@ -712,8 +724,8 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
712 asm volatile("lwsync" : : : "memory"); 724 asm volatile("lwsync" : : : "memory");
713 hpte[0] = v; 725 hpte[0] = v;
714 726
715 /* If the HPTE is valid by now, retry the instruction */ 727 /* For not found, if the HPTE is valid by now, retry the instruction */
716 if (v & HPTE_V_VALID) 728 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
717 return 0; 729 return 0;
718 730
719 /* Check access permissions to the page */ 731 /* Check access permissions to the page */