aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/x86.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-11-25 07:12:03 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:18 -0500
commitc1a5d4f990ce034bcb19aebbb910c07019e60f6b (patch)
tree23aeb993f99c0b9523486c0dcbedb61247352a45 /drivers/kvm/x86.c
parentc3c91fee5195ba5176a6da5ddc2a2822243eb79f (diff)
KVM: Replace #GP injection by the generalized exception queue
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/x86.c')
-rw-r--r--drivers/kvm/x86.c43
1 files changed, 19 insertions, 24 deletions
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index dc007a32a883..6deb052b5f93 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -128,11 +128,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
128} 128}
129EXPORT_SYMBOL_GPL(kvm_set_apic_base); 129EXPORT_SYMBOL_GPL(kvm_set_apic_base);
130 130
131static void inject_gp(struct kvm_vcpu *vcpu)
132{
133 kvm_x86_ops->inject_gp(vcpu, 0);
134}
135
136void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 131void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
137{ 132{
138 WARN_ON(vcpu->exception.pending); 133 WARN_ON(vcpu->exception.pending);
@@ -232,20 +227,20 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
232 if (cr0 & CR0_RESERVED_BITS) { 227 if (cr0 & CR0_RESERVED_BITS) {
233 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 228 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
234 cr0, vcpu->cr0); 229 cr0, vcpu->cr0);
235 inject_gp(vcpu); 230 kvm_inject_gp(vcpu, 0);
236 return; 231 return;
237 } 232 }
238 233
239 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { 234 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
240 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); 235 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
241 inject_gp(vcpu); 236 kvm_inject_gp(vcpu, 0);
242 return; 237 return;
243 } 238 }
244 239
245 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { 240 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
246 printk(KERN_DEBUG "set_cr0: #GP, set PG flag " 241 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
247 "and a clear PE flag\n"); 242 "and a clear PE flag\n");
248 inject_gp(vcpu); 243 kvm_inject_gp(vcpu, 0);
249 return; 244 return;
250 } 245 }
251 246
@@ -257,14 +252,14 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
257 if (!is_pae(vcpu)) { 252 if (!is_pae(vcpu)) {
258 printk(KERN_DEBUG "set_cr0: #GP, start paging " 253 printk(KERN_DEBUG "set_cr0: #GP, start paging "
259 "in long mode while PAE is disabled\n"); 254 "in long mode while PAE is disabled\n");
260 inject_gp(vcpu); 255 kvm_inject_gp(vcpu, 0);
261 return; 256 return;
262 } 257 }
263 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 258 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
264 if (cs_l) { 259 if (cs_l) {
265 printk(KERN_DEBUG "set_cr0: #GP, start paging " 260 printk(KERN_DEBUG "set_cr0: #GP, start paging "
266 "in long mode while CS.L == 1\n"); 261 "in long mode while CS.L == 1\n");
267 inject_gp(vcpu); 262 kvm_inject_gp(vcpu, 0);
268 return; 263 return;
269 264
270 } 265 }
@@ -273,7 +268,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
273 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { 268 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
274 printk(KERN_DEBUG "set_cr0: #GP, pdptrs " 269 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
275 "reserved bits\n"); 270 "reserved bits\n");
276 inject_gp(vcpu); 271 kvm_inject_gp(vcpu, 0);
277 return; 272 return;
278 } 273 }
279 274
@@ -299,7 +294,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
299{ 294{
300 if (cr4 & CR4_RESERVED_BITS) { 295 if (cr4 & CR4_RESERVED_BITS) {
301 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); 296 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
302 inject_gp(vcpu); 297 kvm_inject_gp(vcpu, 0);
303 return; 298 return;
304 } 299 }
305 300
@@ -307,19 +302,19 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
307 if (!(cr4 & X86_CR4_PAE)) { 302 if (!(cr4 & X86_CR4_PAE)) {
308 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " 303 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
309 "in long mode\n"); 304 "in long mode\n");
310 inject_gp(vcpu); 305 kvm_inject_gp(vcpu, 0);
311 return; 306 return;
312 } 307 }
313 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) 308 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
314 && !load_pdptrs(vcpu, vcpu->cr3)) { 309 && !load_pdptrs(vcpu, vcpu->cr3)) {
315 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); 310 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
316 inject_gp(vcpu); 311 kvm_inject_gp(vcpu, 0);
317 return; 312 return;
318 } 313 }
319 314
320 if (cr4 & X86_CR4_VMXE) { 315 if (cr4 & X86_CR4_VMXE) {
321 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); 316 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
322 inject_gp(vcpu); 317 kvm_inject_gp(vcpu, 0);
323 return; 318 return;
324 } 319 }
325 kvm_x86_ops->set_cr4(vcpu, cr4); 320 kvm_x86_ops->set_cr4(vcpu, cr4);
@@ -340,7 +335,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
340 if (is_long_mode(vcpu)) { 335 if (is_long_mode(vcpu)) {
341 if (cr3 & CR3_L_MODE_RESERVED_BITS) { 336 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
342 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); 337 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
343 inject_gp(vcpu); 338 kvm_inject_gp(vcpu, 0);
344 return; 339 return;
345 } 340 }
346 } else { 341 } else {
@@ -348,13 +343,13 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
348 if (cr3 & CR3_PAE_RESERVED_BITS) { 343 if (cr3 & CR3_PAE_RESERVED_BITS) {
349 printk(KERN_DEBUG 344 printk(KERN_DEBUG
350 "set_cr3: #GP, reserved bits\n"); 345 "set_cr3: #GP, reserved bits\n");
351 inject_gp(vcpu); 346 kvm_inject_gp(vcpu, 0);
352 return; 347 return;
353 } 348 }
354 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { 349 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
355 printk(KERN_DEBUG "set_cr3: #GP, pdptrs " 350 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
356 "reserved bits\n"); 351 "reserved bits\n");
357 inject_gp(vcpu); 352 kvm_inject_gp(vcpu, 0);
358 return; 353 return;
359 } 354 }
360 } 355 }
@@ -375,7 +370,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
375 * to debug) behavior on the guest side. 370 * to debug) behavior on the guest side.
376 */ 371 */
377 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) 372 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
378 inject_gp(vcpu); 373 kvm_inject_gp(vcpu, 0);
379 else { 374 else {
380 vcpu->cr3 = cr3; 375 vcpu->cr3 = cr3;
381 vcpu->mmu.new_cr3(vcpu); 376 vcpu->mmu.new_cr3(vcpu);
@@ -388,7 +383,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
388{ 383{
389 if (cr8 & CR8_RESERVED_BITS) { 384 if (cr8 & CR8_RESERVED_BITS) {
390 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); 385 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
391 inject_gp(vcpu); 386 kvm_inject_gp(vcpu, 0);
392 return; 387 return;
393 } 388 }
394 if (irqchip_in_kernel(vcpu->kvm)) 389 if (irqchip_in_kernel(vcpu->kvm))
@@ -436,14 +431,14 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
436 if (efer & EFER_RESERVED_BITS) { 431 if (efer & EFER_RESERVED_BITS) {
437 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", 432 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
438 efer); 433 efer);
439 inject_gp(vcpu); 434 kvm_inject_gp(vcpu, 0);
440 return; 435 return;
441 } 436 }
442 437
443 if (is_paging(vcpu) 438 if (is_paging(vcpu)
444 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) { 439 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
445 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); 440 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
446 inject_gp(vcpu); 441 kvm_inject_gp(vcpu, 0);
447 return; 442 return;
448 } 443 }
449 444
@@ -2047,7 +2042,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2047 * String I/O in reverse. Yuck. Kill the guest, fix later. 2042 * String I/O in reverse. Yuck. Kill the guest, fix later.
2048 */ 2043 */
2049 pr_unimpl(vcpu, "guest string pio down\n"); 2044 pr_unimpl(vcpu, "guest string pio down\n");
2050 inject_gp(vcpu); 2045 kvm_inject_gp(vcpu, 0);
2051 return 1; 2046 return 1;
2052 } 2047 }
2053 vcpu->run->io.count = now; 2048 vcpu->run->io.count = now;
@@ -2062,7 +2057,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2062 vcpu->pio.guest_pages[i] = page; 2057 vcpu->pio.guest_pages[i] = page;
2063 mutex_unlock(&vcpu->kvm->lock); 2058 mutex_unlock(&vcpu->kvm->lock);
2064 if (!page) { 2059 if (!page) {
2065 inject_gp(vcpu); 2060 kvm_inject_gp(vcpu, 0);
2066 free_pio_guest_pages(vcpu); 2061 free_pio_guest_pages(vcpu);
2067 return 1; 2062 return 1;
2068 } 2063 }