aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm/arm.c')
-rw-r--r--arch/arm/kvm/arm.c194
1 files changed, 4 insertions, 190 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 842098d78f58..a0dfc2a53f91 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -30,11 +30,9 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "trace.h" 31#include "trace.h"
32 32
33#include <asm/unified.h>
34#include <asm/uaccess.h> 33#include <asm/uaccess.h>
35#include <asm/ptrace.h> 34#include <asm/ptrace.h>
36#include <asm/mman.h> 35#include <asm/mman.h>
37#include <asm/cputype.h>
38#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
39#include <asm/cacheflush.h> 37#include <asm/cacheflush.h>
40#include <asm/virt.h> 38#include <asm/virt.h>
@@ -44,14 +42,13 @@
44#include <asm/kvm_emulate.h> 42#include <asm/kvm_emulate.h>
45#include <asm/kvm_coproc.h> 43#include <asm/kvm_coproc.h>
46#include <asm/kvm_psci.h> 44#include <asm/kvm_psci.h>
47#include <asm/opcodes.h>
48 45
49#ifdef REQUIRES_VIRT 46#ifdef REQUIRES_VIRT
50__asm__(".arch_extension virt"); 47__asm__(".arch_extension virt");
51#endif 48#endif
52 49
53static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 50static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 51static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors; 52static unsigned long hyp_default_vectors;
56 53
57/* Per-CPU variable containing the currently running vcpu. */ 54/* Per-CPU variable containing the currently running vcpu. */
@@ -304,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
304 return 0; 301 return 0;
305} 302}
306 303
307int __attribute_const__ kvm_target_cpu(void)
308{
309 unsigned long implementor = read_cpuid_implementor();
310 unsigned long part_number = read_cpuid_part_number();
311
312 if (implementor != ARM_CPU_IMP_ARM)
313 return -EINVAL;
314
315 switch (part_number) {
316 case ARM_CPU_PART_CORTEX_A15:
317 return KVM_ARM_TARGET_CORTEX_A15;
318 default:
319 return -EINVAL;
320 }
321}
322
323int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 304int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
324{ 305{
325 int ret; 306 int ret;
@@ -482,163 +463,6 @@ static void update_vttbr(struct kvm *kvm)
482 spin_unlock(&kvm_vmid_lock); 463 spin_unlock(&kvm_vmid_lock);
483} 464}
484 465
485static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
486{
487 /* SVC called from Hyp mode should never get here */
488 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
489 BUG();
490 return -EINVAL; /* Squash warning */
491}
492
493static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
494{
495 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
496 vcpu->arch.hsr & HSR_HVC_IMM_MASK);
497
498 if (kvm_psci_call(vcpu))
499 return 1;
500
501 kvm_inject_undefined(vcpu);
502 return 1;
503}
504
505static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
506{
507 if (kvm_psci_call(vcpu))
508 return 1;
509
510 kvm_inject_undefined(vcpu);
511 return 1;
512}
513
514static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
515{
516 /* The hypervisor should never cause aborts */
517 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
518 vcpu->arch.hxfar, vcpu->arch.hsr);
519 return -EFAULT;
520}
521
522static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
523{
524 /* This is either an error in the ws. code or an external abort */
525 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
526 vcpu->arch.hxfar, vcpu->arch.hsr);
527 return -EFAULT;
528}
529
530typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
531static exit_handle_fn arm_exit_handlers[] = {
532 [HSR_EC_WFI] = kvm_handle_wfi,
533 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
534 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
535 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
536 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
537 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
538 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
539 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
540 [HSR_EC_SVC_HYP] = handle_svc_hyp,
541 [HSR_EC_HVC] = handle_hvc,
542 [HSR_EC_SMC] = handle_smc,
543 [HSR_EC_IABT] = kvm_handle_guest_abort,
544 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
545 [HSR_EC_DABT] = kvm_handle_guest_abort,
546 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
547};
548
549/*
550 * A conditional instruction is allowed to trap, even though it
551 * wouldn't be executed. So let's re-implement the hardware, in
552 * software!
553 */
554static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
555{
556 unsigned long cpsr, cond, insn;
557
558 /*
559 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
560 * catch undefined instructions, and then we won't get past
561 * the arm_exit_handlers test anyway.
562 */
563 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
564
565 /* Top two bits non-zero? Unconditional. */
566 if (vcpu->arch.hsr >> 30)
567 return true;
568
569 cpsr = *vcpu_cpsr(vcpu);
570
571 /* Is condition field valid? */
572 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
573 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
574 else {
575 /* This can happen in Thumb mode: examine IT state. */
576 unsigned long it;
577
578 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
579
580 /* it == 0 => unconditional. */
581 if (it == 0)
582 return true;
583
584 /* The cond for this insn works out as the top 4 bits. */
585 cond = (it >> 4);
586 }
587
588 /* Shift makes it look like an ARM-mode instruction */
589 insn = cond << 28;
590 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
591}
592
593/*
594 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
595 * proper exit to QEMU.
596 */
597static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
598 int exception_index)
599{
600 unsigned long hsr_ec;
601
602 switch (exception_index) {
603 case ARM_EXCEPTION_IRQ:
604 return 1;
605 case ARM_EXCEPTION_UNDEFINED:
606 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
607 vcpu->arch.hyp_pc);
608 BUG();
609 panic("KVM: Hypervisor undefined exception!\n");
610 case ARM_EXCEPTION_DATA_ABORT:
611 case ARM_EXCEPTION_PREF_ABORT:
612 case ARM_EXCEPTION_HVC:
613 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
614
615 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
616 || !arm_exit_handlers[hsr_ec]) {
617 kvm_err("Unknown exception class: %#08lx, "
618 "hsr: %#08x\n", hsr_ec,
619 (unsigned int)vcpu->arch.hsr);
620 BUG();
621 }
622
623 /*
624 * See ARM ARM B1.14.1: "Hyp traps on instructions
625 * that fail their condition code check"
626 */
627 if (!kvm_condition_valid(vcpu)) {
628 bool is_wide = vcpu->arch.hsr & HSR_IL;
629 kvm_skip_instr(vcpu, is_wide);
630 return 1;
631 }
632
633 return arm_exit_handlers[hsr_ec](vcpu, run);
634 default:
635 kvm_pr_unimpl("Unsupported exception type: %d",
636 exception_index);
637 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
638 return 0;
639 }
640}
641
642static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 466static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
643{ 467{
644 if (likely(vcpu->arch.has_run_once)) 468 if (likely(vcpu->arch.has_run_once))
@@ -973,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
973static void cpu_init_hyp_mode(void *vector) 797static void cpu_init_hyp_mode(void *vector)
974{ 798{
975 unsigned long long pgd_ptr; 799 unsigned long long pgd_ptr;
976 unsigned long pgd_low, pgd_high;
977 unsigned long hyp_stack_ptr; 800 unsigned long hyp_stack_ptr;
978 unsigned long stack_page; 801 unsigned long stack_page;
979 unsigned long vector_ptr; 802 unsigned long vector_ptr;
@@ -982,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector)
982 __hyp_set_vectors((unsigned long)vector); 805 __hyp_set_vectors((unsigned long)vector);
983 806
984 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); 807 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
985 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
986 pgd_high = (pgd_ptr >> 32ULL);
987 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 808 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
988 hyp_stack_ptr = stack_page + PAGE_SIZE; 809 hyp_stack_ptr = stack_page + PAGE_SIZE;
989 vector_ptr = (unsigned long)__kvm_hyp_vector; 810 vector_ptr = (unsigned long)__kvm_hyp_vector;
990 811
991 /* 812 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
992 * Call initialization code, and switch to the full blown
993 * HYP code. The init code doesn't need to preserve these registers as
994 * r1-r3 and r12 are already callee save according to the AAPCS.
995 * Note that we slightly misuse the prototype by casing the pgd_low to
996 * a void *.
997 */
998 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
999} 813}
1000 814
1001/** 815/**
@@ -1078,7 +892,7 @@ static int init_hyp_mode(void)
1078 /* 892 /*
1079 * Map the host VFP structures 893 * Map the host VFP structures
1080 */ 894 */
1081 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); 895 kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t);
1082 if (!kvm_host_vfp_state) { 896 if (!kvm_host_vfp_state) {
1083 err = -ENOMEM; 897 err = -ENOMEM;
1084 kvm_err("Cannot allocate host VFP state\n"); 898 kvm_err("Cannot allocate host VFP state\n");
@@ -1086,7 +900,7 @@ static int init_hyp_mode(void)
1086 } 900 }
1087 901
1088 for_each_possible_cpu(cpu) { 902 for_each_possible_cpu(cpu) {
1089 struct vfp_hard_struct *vfp; 903 kvm_kernel_vfp_t *vfp;
1090 904
1091 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); 905 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
1092 err = create_hyp_mappings(vfp, vfp + 1); 906 err = create_hyp_mappings(vfp, vfp + 1);