diff options
author | Carsten Otte <cotte@de.ibm.com> | 2007-10-29 11:09:35 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:58 -0500 |
commit | a03490ed29d2771c675d4d9c0ffe22e19a1757f3 (patch) | |
tree | d4ed168cbb506282d4d6dddf922a3bdea0ae2ca7 /drivers/kvm/kvm_main.c | |
parent | 6866b83ed7240bf4a7c50836ee10f61c8534503f (diff) |
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 219 |
1 files changed, 0 insertions, 219 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index e8972a82544b..8f7125710d02 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -90,17 +90,6 @@ static struct kvm_stats_debugfs_item { | |||
90 | 90 | ||
91 | static struct dentry *debugfs_dir; | 91 | static struct dentry *debugfs_dir; |
92 | 92 | ||
93 | #define CR0_RESERVED_BITS \ | ||
94 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | ||
95 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | ||
96 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | ||
97 | #define CR4_RESERVED_BITS \ | ||
98 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | ||
99 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | ||
100 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | ||
101 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | ||
102 | |||
103 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | ||
104 | #define EFER_RESERVED_BITS 0xfffffffffffff2fe | 93 | #define EFER_RESERVED_BITS 0xfffffffffffff2fe |
105 | 94 | ||
106 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | 95 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
@@ -348,214 +337,6 @@ static void inject_gp(struct kvm_vcpu *vcpu) | |||
348 | kvm_x86_ops->inject_gp(vcpu, 0); | 337 | kvm_x86_ops->inject_gp(vcpu, 0); |
349 | } | 338 | } |
350 | 339 | ||
351 | /* | ||
352 | * Load the pae pdptrs. Return true is they are all valid. | ||
353 | */ | ||
354 | static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | ||
355 | { | ||
356 | gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; | ||
357 | unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; | ||
358 | int i; | ||
359 | int ret; | ||
360 | u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; | ||
361 | |||
362 | mutex_lock(&vcpu->kvm->lock); | ||
363 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, | ||
364 | offset * sizeof(u64), sizeof(pdpte)); | ||
365 | if (ret < 0) { | ||
366 | ret = 0; | ||
367 | goto out; | ||
368 | } | ||
369 | for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { | ||
370 | if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) { | ||
371 | ret = 0; | ||
372 | goto out; | ||
373 | } | ||
374 | } | ||
375 | ret = 1; | ||
376 | |||
377 | memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); | ||
378 | out: | ||
379 | mutex_unlock(&vcpu->kvm->lock); | ||
380 | |||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | ||
385 | { | ||
386 | if (cr0 & CR0_RESERVED_BITS) { | ||
387 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | ||
388 | cr0, vcpu->cr0); | ||
389 | inject_gp(vcpu); | ||
390 | return; | ||
391 | } | ||
392 | |||
393 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | ||
394 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | ||
395 | inject_gp(vcpu); | ||
396 | return; | ||
397 | } | ||
398 | |||
399 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | ||
400 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | ||
401 | "and a clear PE flag\n"); | ||
402 | inject_gp(vcpu); | ||
403 | return; | ||
404 | } | ||
405 | |||
406 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | ||
407 | #ifdef CONFIG_X86_64 | ||
408 | if ((vcpu->shadow_efer & EFER_LME)) { | ||
409 | int cs_db, cs_l; | ||
410 | |||
411 | if (!is_pae(vcpu)) { | ||
412 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
413 | "in long mode while PAE is disabled\n"); | ||
414 | inject_gp(vcpu); | ||
415 | return; | ||
416 | } | ||
417 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | ||
418 | if (cs_l) { | ||
419 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
420 | "in long mode while CS.L == 1\n"); | ||
421 | inject_gp(vcpu); | ||
422 | return; | ||
423 | |||
424 | } | ||
425 | } else | ||
426 | #endif | ||
427 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { | ||
428 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | ||
429 | "reserved bits\n"); | ||
430 | inject_gp(vcpu); | ||
431 | return; | ||
432 | } | ||
433 | |||
434 | } | ||
435 | |||
436 | kvm_x86_ops->set_cr0(vcpu, cr0); | ||
437 | vcpu->cr0 = cr0; | ||
438 | |||
439 | mutex_lock(&vcpu->kvm->lock); | ||
440 | kvm_mmu_reset_context(vcpu); | ||
441 | mutex_unlock(&vcpu->kvm->lock); | ||
442 | return; | ||
443 | } | ||
444 | EXPORT_SYMBOL_GPL(set_cr0); | ||
445 | |||
446 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | ||
447 | { | ||
448 | set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); | ||
449 | } | ||
450 | EXPORT_SYMBOL_GPL(lmsw); | ||
451 | |||
452 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | ||
453 | { | ||
454 | if (cr4 & CR4_RESERVED_BITS) { | ||
455 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | ||
456 | inject_gp(vcpu); | ||
457 | return; | ||
458 | } | ||
459 | |||
460 | if (is_long_mode(vcpu)) { | ||
461 | if (!(cr4 & X86_CR4_PAE)) { | ||
462 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | ||
463 | "in long mode\n"); | ||
464 | inject_gp(vcpu); | ||
465 | return; | ||
466 | } | ||
467 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) | ||
468 | && !load_pdptrs(vcpu, vcpu->cr3)) { | ||
469 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | ||
470 | inject_gp(vcpu); | ||
471 | return; | ||
472 | } | ||
473 | |||
474 | if (cr4 & X86_CR4_VMXE) { | ||
475 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | ||
476 | inject_gp(vcpu); | ||
477 | return; | ||
478 | } | ||
479 | kvm_x86_ops->set_cr4(vcpu, cr4); | ||
480 | vcpu->cr4 = cr4; | ||
481 | mutex_lock(&vcpu->kvm->lock); | ||
482 | kvm_mmu_reset_context(vcpu); | ||
483 | mutex_unlock(&vcpu->kvm->lock); | ||
484 | } | ||
485 | EXPORT_SYMBOL_GPL(set_cr4); | ||
486 | |||
487 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | ||
488 | { | ||
489 | if (is_long_mode(vcpu)) { | ||
490 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | ||
491 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | ||
492 | inject_gp(vcpu); | ||
493 | return; | ||
494 | } | ||
495 | } else { | ||
496 | if (is_pae(vcpu)) { | ||
497 | if (cr3 & CR3_PAE_RESERVED_BITS) { | ||
498 | printk(KERN_DEBUG | ||
499 | "set_cr3: #GP, reserved bits\n"); | ||
500 | inject_gp(vcpu); | ||
501 | return; | ||
502 | } | ||
503 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | ||
504 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | ||
505 | "reserved bits\n"); | ||
506 | inject_gp(vcpu); | ||
507 | return; | ||
508 | } | ||
509 | } | ||
510 | /* | ||
511 | * We don't check reserved bits in nonpae mode, because | ||
512 | * this isn't enforced, and VMware depends on this. | ||
513 | */ | ||
514 | } | ||
515 | |||
516 | mutex_lock(&vcpu->kvm->lock); | ||
517 | /* | ||
518 | * Does the new cr3 value map to physical memory? (Note, we | ||
519 | * catch an invalid cr3 even in real-mode, because it would | ||
520 | * cause trouble later on when we turn on paging anyway.) | ||
521 | * | ||
522 | * A real CPU would silently accept an invalid cr3 and would | ||
523 | * attempt to use it - with largely undefined (and often hard | ||
524 | * to debug) behavior on the guest side. | ||
525 | */ | ||
526 | if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) | ||
527 | inject_gp(vcpu); | ||
528 | else { | ||
529 | vcpu->cr3 = cr3; | ||
530 | vcpu->mmu.new_cr3(vcpu); | ||
531 | } | ||
532 | mutex_unlock(&vcpu->kvm->lock); | ||
533 | } | ||
534 | EXPORT_SYMBOL_GPL(set_cr3); | ||
535 | |||
536 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | ||
537 | { | ||
538 | if (cr8 & CR8_RESERVED_BITS) { | ||
539 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | ||
540 | inject_gp(vcpu); | ||
541 | return; | ||
542 | } | ||
543 | if (irqchip_in_kernel(vcpu->kvm)) | ||
544 | kvm_lapic_set_tpr(vcpu, cr8); | ||
545 | else | ||
546 | vcpu->cr8 = cr8; | ||
547 | } | ||
548 | EXPORT_SYMBOL_GPL(set_cr8); | ||
549 | |||
550 | unsigned long get_cr8(struct kvm_vcpu *vcpu) | ||
551 | { | ||
552 | if (irqchip_in_kernel(vcpu->kvm)) | ||
553 | return kvm_lapic_get_cr8(vcpu); | ||
554 | else | ||
555 | return vcpu->cr8; | ||
556 | } | ||
557 | EXPORT_SYMBOL_GPL(get_cr8); | ||
558 | |||
559 | void fx_init(struct kvm_vcpu *vcpu) | 340 | void fx_init(struct kvm_vcpu *vcpu) |
560 | { | 341 | { |
561 | unsigned after_mxcsr_mask; | 342 | unsigned after_mxcsr_mask; |