diff options
author | Avi Kivity <avi@redhat.com> | 2010-07-06 09:20:43 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 23:40:30 -0400 |
commit | f59c1d2ded54e4bd7a9126f4a32c9eca8b336457 (patch) | |
tree | 0528e6c6d301fbda9d897f1d6cf3704a0eb2bfea /arch/x86/kvm/paging_tmpl.h | |
parent | b0eeec29fe7a5b114000f769bd68ffa02652bfb7 (diff) |
KVM: MMU: Keep going on permission error
Real hardware disregards permission errors when computing page fault error
code bit 0 (page present). Do the same.
Reviewed-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 52 |
1 files changed, 30 insertions, 22 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 3a3f6d784d79..1cea41cad069 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -119,21 +119,25 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
119 | { | 119 | { |
120 | pt_element_t pte; | 120 | pt_element_t pte; |
121 | gfn_t table_gfn; | 121 | gfn_t table_gfn; |
122 | unsigned index, pt_access, pte_access; | 122 | unsigned index, pt_access, uninitialized_var(pte_access); |
123 | gpa_t pte_gpa; | 123 | gpa_t pte_gpa; |
124 | int rsvd_fault = 0; | 124 | bool eperm, present, rsvd_fault; |
125 | 125 | ||
126 | trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, | 126 | trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, |
127 | fetch_fault); | 127 | fetch_fault); |
128 | walk: | 128 | walk: |
129 | present = true; | ||
130 | eperm = rsvd_fault = false; | ||
129 | walker->level = vcpu->arch.mmu.root_level; | 131 | walker->level = vcpu->arch.mmu.root_level; |
130 | pte = vcpu->arch.cr3; | 132 | pte = vcpu->arch.cr3; |
131 | #if PTTYPE == 64 | 133 | #if PTTYPE == 64 |
132 | if (!is_long_mode(vcpu)) { | 134 | if (!is_long_mode(vcpu)) { |
133 | pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); | 135 | pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); |
134 | trace_kvm_mmu_paging_element(pte, walker->level); | 136 | trace_kvm_mmu_paging_element(pte, walker->level); |
135 | if (!is_present_gpte(pte)) | 137 | if (!is_present_gpte(pte)) { |
136 | goto not_present; | 138 | present = false; |
139 | goto error; | ||
140 | } | ||
137 | --walker->level; | 141 | --walker->level; |
138 | } | 142 | } |
139 | #endif | 143 | #endif |
@@ -151,31 +155,36 @@ walk: | |||
151 | walker->table_gfn[walker->level - 1] = table_gfn; | 155 | walker->table_gfn[walker->level - 1] = table_gfn; |
152 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 156 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
153 | 157 | ||
154 | if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) | 158 | if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) { |
155 | goto not_present; | 159 | present = false; |
160 | break; | ||
161 | } | ||
156 | 162 | ||
157 | trace_kvm_mmu_paging_element(pte, walker->level); | 163 | trace_kvm_mmu_paging_element(pte, walker->level); |
158 | 164 | ||
159 | if (!is_present_gpte(pte)) | 165 | if (!is_present_gpte(pte)) { |
160 | goto not_present; | 166 | present = false; |
167 | break; | ||
168 | } | ||
161 | 169 | ||
162 | rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level); | 170 | if (is_rsvd_bits_set(vcpu, pte, walker->level)) { |
163 | if (rsvd_fault) | 171 | rsvd_fault = true; |
164 | goto access_error; | 172 | break; |
173 | } | ||
165 | 174 | ||
166 | if (write_fault && !is_writable_pte(pte)) | 175 | if (write_fault && !is_writable_pte(pte)) |
167 | if (user_fault || is_write_protection(vcpu)) | 176 | if (user_fault || is_write_protection(vcpu)) |
168 | goto access_error; | 177 | eperm = true; |
169 | 178 | ||
170 | if (user_fault && !(pte & PT_USER_MASK)) | 179 | if (user_fault && !(pte & PT_USER_MASK)) |
171 | goto access_error; | 180 | eperm = true; |
172 | 181 | ||
173 | #if PTTYPE == 64 | 182 | #if PTTYPE == 64 |
174 | if (fetch_fault && (pte & PT64_NX_MASK)) | 183 | if (fetch_fault && (pte & PT64_NX_MASK)) |
175 | goto access_error; | 184 | eperm = true; |
176 | #endif | 185 | #endif |
177 | 186 | ||
178 | if (!(pte & PT_ACCESSED_MASK)) { | 187 | if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) { |
179 | trace_kvm_mmu_set_accessed_bit(table_gfn, index, | 188 | trace_kvm_mmu_set_accessed_bit(table_gfn, index, |
180 | sizeof(pte)); | 189 | sizeof(pte)); |
181 | if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, | 190 | if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, |
@@ -214,6 +223,9 @@ walk: | |||
214 | --walker->level; | 223 | --walker->level; |
215 | } | 224 | } |
216 | 225 | ||
226 | if (!present || eperm || rsvd_fault) | ||
227 | goto error; | ||
228 | |||
217 | if (write_fault && !is_dirty_gpte(pte)) { | 229 | if (write_fault && !is_dirty_gpte(pte)) { |
218 | bool ret; | 230 | bool ret; |
219 | 231 | ||
@@ -233,14 +245,10 @@ walk: | |||
233 | __func__, (u64)pte, pte_access, pt_access); | 245 | __func__, (u64)pte, pte_access, pt_access); |
234 | return 1; | 246 | return 1; |
235 | 247 | ||
236 | not_present: | 248 | error: |
237 | walker->error_code = 0; | 249 | walker->error_code = 0; |
238 | goto err; | 250 | if (present) |
239 | 251 | walker->error_code |= PFERR_PRESENT_MASK; | |
240 | access_error: | ||
241 | walker->error_code = PFERR_PRESENT_MASK; | ||
242 | |||
243 | err: | ||
244 | if (write_fault) | 252 | if (write_fault) |
245 | walker->error_code |= PFERR_WRITE_MASK; | 253 | walker->error_code |= PFERR_WRITE_MASK; |
246 | if (user_fault) | 254 | if (user_fault) |