aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 12:13:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 12:13:19 -0400
commit8546dc1d4b671480961c3eaf4c0c102ae6848340 (patch)
treec646079fb48811b22b742deb6bd2e907f9e6c3d4 /arch/arm/kvm
parent9992ba72327fa0d8bdc9fb624e80f5cce338a711 (diff)
parent33b9f582c5c1db515412cc7efff28f7d1779321f (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "The major items included in here are: - MCPM, multi-cluster power management, part of the infrastructure required for ARMs big.LITTLE support. - A rework of the ARM KVM code to allow re-use by ARM64. - Error handling cleanups of the IS_ERR_OR_NULL() madness and fixes of that stuff for arch/arm - Preparatory patches for Cortex-M3 support from Uwe Kleine-König. There is also a set of three patches in here from Hugh/Catalin to address freeing of inappropriate page tables on LPAE. You already have these from akpm, but they were already part of my tree at the time he sent them, so unfortunately they'll end up with duplicate commits" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits) ARM: EXYNOS: remove unnecessary use of IS_ERR_VALUE() ARM: IMX: remove unnecessary use of IS_ERR_VALUE() ARM: OMAP: use consistent error checking ARM: cleanup: OMAP hwmod error checking ARM: 7709/1: mcpm: Add explicit AFLAGS to support v6/v7 multiplatform kernels ARM: 7700/2: Make cpu_init() notrace ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE ARM: 7701/1: mm: Allow arch code to control the user page table ceiling ARM: 7703/1: Disable preemption in broadcast_tlb*_a15_erratum() ARM: mcpm: provide an interface to set the SMP ops at run time ARM: mcpm: generic SMP secondary bringup and hotplug support ARM: mcpm_head.S: vlock-based first man election ARM: mcpm: Add baremetal voting mutexes ARM: mcpm: introduce helpers for platform coherency exit/setup ARM: mcpm: introduce the CPU/cluster power API ARM: multi-cluster PM: secondary kernel entry code ARM: cacheflush: add synchronization helpers for mixed cache state accesses ARM: cpu hotplug: remove majority of cache flushing from platforms ARM: smp: flush L1 cache in cpu_die() ARM: tegra: remove tegra specific cpu_disable() ...
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arm.c194
-rw-r--r--arch/arm/kvm/coproc.c28
-rw-r--r--arch/arm/kvm/coproc.h4
-rw-r--r--arch/arm/kvm/emulate.c75
-rw-r--r--arch/arm/kvm/guest.c17
-rw-r--r--arch/arm/kvm/handle_exit.c164
-rw-r--r--arch/arm/kvm/interrupts.S13
-rw-r--r--arch/arm/kvm/mmio.c46
-rw-r--r--arch/arm/kvm/mmu.c184
-rw-r--r--arch/arm/kvm/vgic.c2
11 files changed, 369 insertions, 360 deletions
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index fc96ce6f2357..8dc5e76cb789 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
18 18
19obj-y += kvm-arm.o init.o interrupts.o 19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o 20obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o 21obj-y += coproc.o coproc_a15.o mmio.o psci.o
22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o 22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o 23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 842098d78f58..a0dfc2a53f91 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -30,11 +30,9 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "trace.h" 31#include "trace.h"
32 32
33#include <asm/unified.h>
34#include <asm/uaccess.h> 33#include <asm/uaccess.h>
35#include <asm/ptrace.h> 34#include <asm/ptrace.h>
36#include <asm/mman.h> 35#include <asm/mman.h>
37#include <asm/cputype.h>
38#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
39#include <asm/cacheflush.h> 37#include <asm/cacheflush.h>
40#include <asm/virt.h> 38#include <asm/virt.h>
@@ -44,14 +42,13 @@
44#include <asm/kvm_emulate.h> 42#include <asm/kvm_emulate.h>
45#include <asm/kvm_coproc.h> 43#include <asm/kvm_coproc.h>
46#include <asm/kvm_psci.h> 44#include <asm/kvm_psci.h>
47#include <asm/opcodes.h>
48 45
49#ifdef REQUIRES_VIRT 46#ifdef REQUIRES_VIRT
50__asm__(".arch_extension virt"); 47__asm__(".arch_extension virt");
51#endif 48#endif
52 49
53static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 50static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 51static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors; 52static unsigned long hyp_default_vectors;
56 53
57/* Per-CPU variable containing the currently running vcpu. */ 54/* Per-CPU variable containing the currently running vcpu. */
@@ -304,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
304 return 0; 301 return 0;
305} 302}
306 303
307int __attribute_const__ kvm_target_cpu(void)
308{
309 unsigned long implementor = read_cpuid_implementor();
310 unsigned long part_number = read_cpuid_part_number();
311
312 if (implementor != ARM_CPU_IMP_ARM)
313 return -EINVAL;
314
315 switch (part_number) {
316 case ARM_CPU_PART_CORTEX_A15:
317 return KVM_ARM_TARGET_CORTEX_A15;
318 default:
319 return -EINVAL;
320 }
321}
322
323int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 304int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
324{ 305{
325 int ret; 306 int ret;
@@ -482,163 +463,6 @@ static void update_vttbr(struct kvm *kvm)
482 spin_unlock(&kvm_vmid_lock); 463 spin_unlock(&kvm_vmid_lock);
483} 464}
484 465
485static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
486{
487 /* SVC called from Hyp mode should never get here */
488 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
489 BUG();
490 return -EINVAL; /* Squash warning */
491}
492
493static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
494{
495 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
496 vcpu->arch.hsr & HSR_HVC_IMM_MASK);
497
498 if (kvm_psci_call(vcpu))
499 return 1;
500
501 kvm_inject_undefined(vcpu);
502 return 1;
503}
504
505static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
506{
507 if (kvm_psci_call(vcpu))
508 return 1;
509
510 kvm_inject_undefined(vcpu);
511 return 1;
512}
513
514static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
515{
516 /* The hypervisor should never cause aborts */
517 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
518 vcpu->arch.hxfar, vcpu->arch.hsr);
519 return -EFAULT;
520}
521
522static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
523{
524 /* This is either an error in the ws. code or an external abort */
525 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
526 vcpu->arch.hxfar, vcpu->arch.hsr);
527 return -EFAULT;
528}
529
530typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
531static exit_handle_fn arm_exit_handlers[] = {
532 [HSR_EC_WFI] = kvm_handle_wfi,
533 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
534 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
535 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
536 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
537 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
538 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
539 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
540 [HSR_EC_SVC_HYP] = handle_svc_hyp,
541 [HSR_EC_HVC] = handle_hvc,
542 [HSR_EC_SMC] = handle_smc,
543 [HSR_EC_IABT] = kvm_handle_guest_abort,
544 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
545 [HSR_EC_DABT] = kvm_handle_guest_abort,
546 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
547};
548
549/*
550 * A conditional instruction is allowed to trap, even though it
551 * wouldn't be executed. So let's re-implement the hardware, in
552 * software!
553 */
554static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
555{
556 unsigned long cpsr, cond, insn;
557
558 /*
559 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
560 * catch undefined instructions, and then we won't get past
561 * the arm_exit_handlers test anyway.
562 */
563 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
564
565 /* Top two bits non-zero? Unconditional. */
566 if (vcpu->arch.hsr >> 30)
567 return true;
568
569 cpsr = *vcpu_cpsr(vcpu);
570
571 /* Is condition field valid? */
572 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
573 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
574 else {
575 /* This can happen in Thumb mode: examine IT state. */
576 unsigned long it;
577
578 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
579
580 /* it == 0 => unconditional. */
581 if (it == 0)
582 return true;
583
584 /* The cond for this insn works out as the top 4 bits. */
585 cond = (it >> 4);
586 }
587
588 /* Shift makes it look like an ARM-mode instruction */
589 insn = cond << 28;
590 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
591}
592
593/*
594 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
595 * proper exit to QEMU.
596 */
597static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
598 int exception_index)
599{
600 unsigned long hsr_ec;
601
602 switch (exception_index) {
603 case ARM_EXCEPTION_IRQ:
604 return 1;
605 case ARM_EXCEPTION_UNDEFINED:
606 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
607 vcpu->arch.hyp_pc);
608 BUG();
609 panic("KVM: Hypervisor undefined exception!\n");
610 case ARM_EXCEPTION_DATA_ABORT:
611 case ARM_EXCEPTION_PREF_ABORT:
612 case ARM_EXCEPTION_HVC:
613 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
614
615 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
616 || !arm_exit_handlers[hsr_ec]) {
617 kvm_err("Unknown exception class: %#08lx, "
618 "hsr: %#08x\n", hsr_ec,
619 (unsigned int)vcpu->arch.hsr);
620 BUG();
621 }
622
623 /*
624 * See ARM ARM B1.14.1: "Hyp traps on instructions
625 * that fail their condition code check"
626 */
627 if (!kvm_condition_valid(vcpu)) {
628 bool is_wide = vcpu->arch.hsr & HSR_IL;
629 kvm_skip_instr(vcpu, is_wide);
630 return 1;
631 }
632
633 return arm_exit_handlers[hsr_ec](vcpu, run);
634 default:
635 kvm_pr_unimpl("Unsupported exception type: %d",
636 exception_index);
637 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
638 return 0;
639 }
640}
641
642static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 466static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
643{ 467{
644 if (likely(vcpu->arch.has_run_once)) 468 if (likely(vcpu->arch.has_run_once))
@@ -973,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
973static void cpu_init_hyp_mode(void *vector) 797static void cpu_init_hyp_mode(void *vector)
974{ 798{
975 unsigned long long pgd_ptr; 799 unsigned long long pgd_ptr;
976 unsigned long pgd_low, pgd_high;
977 unsigned long hyp_stack_ptr; 800 unsigned long hyp_stack_ptr;
978 unsigned long stack_page; 801 unsigned long stack_page;
979 unsigned long vector_ptr; 802 unsigned long vector_ptr;
@@ -982,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector)
982 __hyp_set_vectors((unsigned long)vector); 805 __hyp_set_vectors((unsigned long)vector);
983 806
984 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); 807 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
985 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
986 pgd_high = (pgd_ptr >> 32ULL);
987 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 808 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
988 hyp_stack_ptr = stack_page + PAGE_SIZE; 809 hyp_stack_ptr = stack_page + PAGE_SIZE;
989 vector_ptr = (unsigned long)__kvm_hyp_vector; 810 vector_ptr = (unsigned long)__kvm_hyp_vector;
990 811
991 /* 812 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
992 * Call initialization code, and switch to the full blown
993 * HYP code. The init code doesn't need to preserve these registers as
994 * r1-r3 and r12 are already callee save according to the AAPCS.
995 * Note that we slightly misuse the prototype by casing the pgd_low to
996 * a void *.
997 */
998 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
999} 813}
1000 814
1001/** 815/**
@@ -1078,7 +892,7 @@ static int init_hyp_mode(void)
1078 /* 892 /*
1079 * Map the host VFP structures 893 * Map the host VFP structures
1080 */ 894 */
1081 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); 895 kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t);
1082 if (!kvm_host_vfp_state) { 896 if (!kvm_host_vfp_state) {
1083 err = -ENOMEM; 897 err = -ENOMEM;
1084 kvm_err("Cannot allocate host VFP state\n"); 898 kvm_err("Cannot allocate host VFP state\n");
@@ -1086,7 +900,7 @@ static int init_hyp_mode(void)
1086 } 900 }
1087 901
1088 for_each_possible_cpu(cpu) { 902 for_each_possible_cpu(cpu) {
1089 struct vfp_hard_struct *vfp; 903 kvm_kernel_vfp_t *vfp;
1090 904
1091 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); 905 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
1092 err = create_hyp_mappings(vfp, vfp + 1); 906 err = create_hyp_mappings(vfp, vfp + 1);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7bed7556077a..8eea97be1ed5 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
76 const struct coproc_params *p, 76 const struct coproc_params *p,
77 const struct coproc_reg *r) 77 const struct coproc_reg *r)
78{ 78{
79 u32 val; 79 unsigned long val;
80 int cpu; 80 int cpu;
81 81
82 if (!p->is_write) 82 if (!p->is_write)
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
293 293
294 if (likely(r->access(vcpu, params, r))) { 294 if (likely(r->access(vcpu, params, r))) {
295 /* Skip instruction, since it was emulated */ 295 /* Skip instruction, since it was emulated */
296 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); 296 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
297 return 1; 297 return 1;
298 } 298 }
299 /* If access function fails, it should complain. */ 299 /* If access function fails, it should complain. */
300 } else { 300 } else {
301 kvm_err("Unsupported guest CP15 access at: %08x\n", 301 kvm_err("Unsupported guest CP15 access at: %08lx\n",
302 *vcpu_pc(vcpu)); 302 *vcpu_pc(vcpu));
303 print_cp_instr(params); 303 print_cp_instr(params);
304 } 304 }
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
315{ 315{
316 struct coproc_params params; 316 struct coproc_params params;
317 317
318 params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 318 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
319 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 319 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
320 params.is_write = ((vcpu->arch.hsr & 1) == 0); 320 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
321 params.is_64bit = true; 321 params.is_64bit = true;
322 322
323 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; 323 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
324 params.Op2 = 0; 324 params.Op2 = 0;
325 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; 325 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
326 params.CRn = 0; 326 params.CRn = 0;
327 327
328 return emulate_cp15(vcpu, &params); 328 return emulate_cp15(vcpu, &params);
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
347{ 347{
348 struct coproc_params params; 348 struct coproc_params params;
349 349
350 params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 350 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
351 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 351 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
352 params.is_write = ((vcpu->arch.hsr & 1) == 0); 352 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
353 params.is_64bit = false; 353 params.is_64bit = false;
354 354
355 params.CRn = (vcpu->arch.hsr >> 10) & 0xf; 355 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
356 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; 356 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
357 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; 357 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
358 params.Rt2 = 0; 358 params.Rt2 = 0;
359 359
360 return emulate_cp15(vcpu, &params); 360 return emulate_cp15(vcpu, &params);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 992adfafa2ff..b7301d3e4799 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
84static inline bool write_to_read_only(struct kvm_vcpu *vcpu, 84static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
85 const struct coproc_params *params) 85 const struct coproc_params *params)
86{ 86{
87 kvm_debug("CP15 write to read-only register at: %08x\n", 87 kvm_debug("CP15 write to read-only register at: %08lx\n",
88 *vcpu_pc(vcpu)); 88 *vcpu_pc(vcpu));
89 print_cp_instr(params); 89 print_cp_instr(params);
90 return false; 90 return false;
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
93static inline bool read_from_write_only(struct kvm_vcpu *vcpu, 93static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
94 const struct coproc_params *params) 94 const struct coproc_params *params)
95{ 95{
96 kvm_debug("CP15 read to write-only register at: %08x\n", 96 kvm_debug("CP15 read to write-only register at: %08lx\n",
97 *vcpu_pc(vcpu)); 97 *vcpu_pc(vcpu));
98 print_cp_instr(params); 98 print_cp_instr(params);
99 return false; 99 return false;
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d61450ac6665..bdede9e7da51 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -20,6 +20,7 @@
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h> 21#include <asm/kvm_arm.h>
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/opcodes.h>
23#include <trace/events/kvm.h> 24#include <trace/events/kvm.h>
24 25
25#include "trace.h" 26#include "trace.h"
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
109 * Return a pointer to the register number valid in the current mode of 110 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU. 111 * the virtual CPU.
111 */ 112 */
112u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) 113unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
113{ 114{
114 u32 *reg_array = (u32 *)&vcpu->arch.regs; 115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
115 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
116 117
117 switch (mode) { 118 switch (mode) {
118 case USR_MODE...SVC_MODE: 119 case USR_MODE...SVC_MODE:
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
141/* 142/*
142 * Return the SPSR for the current mode of the virtual CPU. 143 * Return the SPSR for the current mode of the virtual CPU.
143 */ 144 */
144u32 *vcpu_spsr(struct kvm_vcpu *vcpu) 145unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
145{ 146{
146 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
147 switch (mode) { 148 switch (mode) {
148 case SVC_MODE: 149 case SVC_MODE:
149 return &vcpu->arch.regs.KVM_ARM_SVC_spsr; 150 return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
160 } 161 }
161} 162}
162 163
163/** 164/*
164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest 165 * A conditional instruction is allowed to trap, even though it
165 * @vcpu: the vcpu pointer 166 * wouldn't be executed. So let's re-implement the hardware, in
166 * @run: the kvm_run structure pointer 167 * software!
167 *
168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
169 * halt execution of world-switches and schedule other host processes until
170 * there is an incoming IRQ or FIQ to the VM.
171 */ 168 */
172int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) 169bool kvm_condition_valid(struct kvm_vcpu *vcpu)
173{ 170{
174 trace_kvm_wfi(*vcpu_pc(vcpu)); 171 unsigned long cpsr, cond, insn;
175 kvm_vcpu_block(vcpu); 172
176 return 1; 173 /*
174 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
175 * catch undefined instructions, and then we won't get past
176 * the arm_exit_handlers test anyway.
177 */
178 BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
179
180 /* Top two bits non-zero? Unconditional. */
181 if (kvm_vcpu_get_hsr(vcpu) >> 30)
182 return true;
183
184 cpsr = *vcpu_cpsr(vcpu);
185
186 /* Is condition field valid? */
187 if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
188 cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
189 else {
190 /* This can happen in Thumb mode: examine IT state. */
191 unsigned long it;
192
193 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
194
195 /* it == 0 => unconditional. */
196 if (it == 0)
197 return true;
198
199 /* The cond for this insn works out as the top 4 bits. */
200 cond = (it >> 4);
201 }
202
203 /* Shift makes it look like an ARM-mode instruction */
204 insn = cond << 28;
205 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
177} 206}
178 207
179/** 208/**
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
257 */ 286 */
258void kvm_inject_undefined(struct kvm_vcpu *vcpu) 287void kvm_inject_undefined(struct kvm_vcpu *vcpu)
259{ 288{
260 u32 new_lr_value; 289 unsigned long new_lr_value;
261 u32 new_spsr_value; 290 unsigned long new_spsr_value;
262 u32 cpsr = *vcpu_cpsr(vcpu); 291 unsigned long cpsr = *vcpu_cpsr(vcpu);
263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 292 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
264 bool is_thumb = (cpsr & PSR_T_BIT); 293 bool is_thumb = (cpsr & PSR_T_BIT);
265 u32 vect_offset = 4; 294 u32 vect_offset = 4;
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
291 */ 320 */
292static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) 321static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
293{ 322{
294 u32 new_lr_value; 323 unsigned long new_lr_value;
295 u32 new_spsr_value; 324 unsigned long new_spsr_value;
296 u32 cpsr = *vcpu_cpsr(vcpu); 325 unsigned long cpsr = *vcpu_cpsr(vcpu);
297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 326 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
298 bool is_thumb = (cpsr & PSR_T_BIT); 327 bool is_thumb = (cpsr & PSR_T_BIT);
299 u32 vect_offset; 328 u32 vect_offset;
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 2339d9609d36..152d03612181 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <asm/cputype.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <asm/kvm.h> 27#include <asm/kvm.h>
27#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
180 return -EINVAL; 181 return -EINVAL;
181} 182}
182 183
184int __attribute_const__ kvm_target_cpu(void)
185{
186 unsigned long implementor = read_cpuid_implementor();
187 unsigned long part_number = read_cpuid_part_number();
188
189 if (implementor != ARM_CPU_IMP_ARM)
190 return -EINVAL;
191
192 switch (part_number) {
193 case ARM_CPU_PART_CORTEX_A15:
194 return KVM_ARM_TARGET_CORTEX_A15;
195 default:
196 return -EINVAL;
197 }
198}
199
183int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 200int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
184 const struct kvm_vcpu_init *init) 201 const struct kvm_vcpu_init *init)
185{ 202{
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
new file mode 100644
index 000000000000..3d74a0be47db
--- /dev/null
+++ b/arch/arm/kvm/handle_exit.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_emulate.h>
22#include <asm/kvm_coproc.h>
23#include <asm/kvm_mmu.h>
24#include <asm/kvm_psci.h>
25#include <trace/events/kvm.h>
26
27#include "trace.h"
28
29#include "trace.h"
30
31typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
32
33static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
34{
35 /* SVC called from Hyp mode should never get here */
36 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
37 BUG();
38 return -EINVAL; /* Squash warning */
39}
40
41static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
42{
43 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
44 kvm_vcpu_hvc_get_imm(vcpu));
45
46 if (kvm_psci_call(vcpu))
47 return 1;
48
49 kvm_inject_undefined(vcpu);
50 return 1;
51}
52
53static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54{
55 if (kvm_psci_call(vcpu))
56 return 1;
57
58 kvm_inject_undefined(vcpu);
59 return 1;
60}
61
62static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
63{
64 /* The hypervisor should never cause aborts */
65 kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
66 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
67 return -EFAULT;
68}
69
70static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
71{
72 /* This is either an error in the ws. code or an external abort */
73 kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
74 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
75 return -EFAULT;
76}
77
78/**
79 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
80 * @vcpu: the vcpu pointer
81 * @run: the kvm_run structure pointer
82 *
83 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
84 * halt execution of world-switches and schedule other host processes until
85 * there is an incoming IRQ or FIQ to the VM.
86 */
87static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
88{
89 trace_kvm_wfi(*vcpu_pc(vcpu));
90 kvm_vcpu_block(vcpu);
91 return 1;
92}
93
94static exit_handle_fn arm_exit_handlers[] = {
95 [HSR_EC_WFI] = kvm_handle_wfi,
96 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
97 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
98 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
100 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
103 [HSR_EC_SVC_HYP] = handle_svc_hyp,
104 [HSR_EC_HVC] = handle_hvc,
105 [HSR_EC_SMC] = handle_smc,
106 [HSR_EC_IABT] = kvm_handle_guest_abort,
107 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
108 [HSR_EC_DABT] = kvm_handle_guest_abort,
109 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
110};
111
112static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
113{
114 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
115
116 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
117 !arm_exit_handlers[hsr_ec]) {
118 kvm_err("Unknown exception class: hsr: %#08x\n",
119 (unsigned int)kvm_vcpu_get_hsr(vcpu));
120 BUG();
121 }
122
123 return arm_exit_handlers[hsr_ec];
124}
125
126/*
127 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
128 * proper exit to userspace.
129 */
130int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
131 int exception_index)
132{
133 exit_handle_fn exit_handler;
134
135 switch (exception_index) {
136 case ARM_EXCEPTION_IRQ:
137 return 1;
138 case ARM_EXCEPTION_UNDEFINED:
139 kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
140 kvm_vcpu_get_hyp_pc(vcpu));
141 BUG();
142 panic("KVM: Hypervisor undefined exception!\n");
143 case ARM_EXCEPTION_DATA_ABORT:
144 case ARM_EXCEPTION_PREF_ABORT:
145 case ARM_EXCEPTION_HVC:
146 /*
147 * See ARM ARM B1.14.1: "Hyp traps on instructions
148 * that fail their condition code check"
149 */
150 if (!kvm_condition_valid(vcpu)) {
151 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
152 return 1;
153 }
154
155 exit_handler = kvm_get_exit_handler(vcpu);
156
157 return exit_handler(vcpu, run);
158 default:
159 kvm_pr_unimpl("Unsupported exception type: %d",
160 exception_index);
161 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
162 return 0;
163 }
164}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 8ca87ab0919d..f7793df62f58 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
35/******************************************************************** 35/********************************************************************
36 * Flush per-VMID TLBs 36 * Flush per-VMID TLBs
37 * 37 *
38 * void __kvm_tlb_flush_vmid(struct kvm *kvm); 38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 * 39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs 40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7 41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll 42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple 43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version. 44 * hardware assisted version.
45 *
46 * As v7 does not support flushing per IPA, just nuke the whole TLB
47 * instead, ignoring the ipa value.
45 */ 48 */
46ENTRY(__kvm_tlb_flush_vmid) 49ENTRY(__kvm_tlb_flush_vmid_ipa)
47 push {r2, r3} 50 push {r2, r3}
48 51
49 add r0, r0, #KVM_VTTBR 52 add r0, r0, #KVM_VTTBR
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
60 63
61 pop {r2, r3} 64 pop {r2, r3}
62 bx lr 65 bx lr
63ENDPROC(__kvm_tlb_flush_vmid) 66ENDPROC(__kvm_tlb_flush_vmid_ipa)
64 67
65/******************************************************************** 68/********************************************************************
66 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable 69 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
235 * instruction is issued since all traps are disabled when running the host 238 * instruction is issued since all traps are disabled when running the host
236 * kernel as per the Hyp-mode initialization at boot time. 239 * kernel as per the Hyp-mode initialization at boot time.
237 * 240 *
238 * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc 241 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
239 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the 242 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
240 * host kernel) and they cause a trap to the vector page + offset 0xc when HVC 243 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
241 * instructions are called from within Hyp-mode. 244 * instructions are called from within Hyp-mode.
242 * 245 *
243 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): 246 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 98a870ff1a5c..72a12f2171b2 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -33,16 +33,16 @@
33 */ 33 */
34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) 34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
35{ 35{
36 __u32 *dest; 36 unsigned long *dest;
37 unsigned int len; 37 unsigned int len;
38 int mask; 38 int mask;
39 39
40 if (!run->mmio.is_write) { 40 if (!run->mmio.is_write) {
41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); 41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
42 memset(dest, 0, sizeof(int)); 42 *dest = 0;
43 43
44 len = run->mmio.len; 44 len = run->mmio.len;
45 if (len > 4) 45 if (len > sizeof(unsigned long))
46 return -EINVAL; 46 return -EINVAL;
47 47
48 memcpy(dest, run->mmio.data, len); 48 memcpy(dest, run->mmio.data, len);
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
51 *((u64 *)run->mmio.data)); 51 *((u64 *)run->mmio.data));
52 52
53 if (vcpu->arch.mmio_decode.sign_extend && len < 4) { 53 if (vcpu->arch.mmio_decode.sign_extend &&
54 len < sizeof(unsigned long)) {
54 mask = 1U << ((len * 8) - 1); 55 mask = 1U << ((len * 8) - 1);
55 *dest = (*dest ^ mask) - mask; 56 *dest = (*dest ^ mask) - mask;
56 } 57 }
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
65 unsigned long rt, len; 66 unsigned long rt, len;
66 bool is_write, sign_extend; 67 bool is_write, sign_extend;
67 68
68 if ((vcpu->arch.hsr >> 8) & 1) { 69 if (kvm_vcpu_dabt_isextabt(vcpu)) {
69 /* cache operation on I/O addr, tell guest unsupported */ 70 /* cache operation on I/O addr, tell guest unsupported */
70 kvm_inject_dabt(vcpu, vcpu->arch.hxfar); 71 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
71 return 1; 72 return 1;
72 } 73 }
73 74
74 if ((vcpu->arch.hsr >> 7) & 1) { 75 if (kvm_vcpu_dabt_iss1tw(vcpu)) {
75 /* page table accesses IO mem: tell guest to fix its TTBR */ 76 /* page table accesses IO mem: tell guest to fix its TTBR */
76 kvm_inject_dabt(vcpu, vcpu->arch.hxfar); 77 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
77 return 1; 78 return 1;
78 } 79 }
79 80
80 switch ((vcpu->arch.hsr >> 22) & 0x3) { 81 len = kvm_vcpu_dabt_get_as(vcpu);
81 case 0: 82 if (unlikely(len < 0))
82 len = 1; 83 return len;
83 break;
84 case 1:
85 len = 2;
86 break;
87 case 2:
88 len = 4;
89 break;
90 default:
91 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
92 return -EFAULT;
93 }
94 84
95 is_write = vcpu->arch.hsr & HSR_WNR; 85 is_write = kvm_vcpu_dabt_iswrite(vcpu);
96 sign_extend = vcpu->arch.hsr & HSR_SSE; 86 sign_extend = kvm_vcpu_dabt_issext(vcpu);
97 rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; 87 rt = kvm_vcpu_dabt_get_rd(vcpu);
98 88
99 if (kvm_vcpu_reg_is_pc(vcpu, rt)) { 89 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
100 /* IO memory trying to read/write pc */ 90 /* IO memory trying to read/write pc */
101 kvm_inject_pabt(vcpu, vcpu->arch.hxfar); 91 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
102 return 1; 92 return 1;
103 } 93 }
104 94
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
112 * The MMIO instruction is emulated and should not be re-executed 102 * The MMIO instruction is emulated and should not be re-executed
113 * in the guest. 103 * in the guest.
114 */ 104 */
115 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); 105 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
116 return 0; 106 return 0;
117} 107}
118 108
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
130 * space do its magic. 120 * space do its magic.
131 */ 121 */
132 122
133 if (vcpu->arch.hsr & HSR_ISV) { 123 if (kvm_vcpu_dabt_isvalid(vcpu)) {
134 ret = decode_hsr(vcpu, fault_ipa, &mmio); 124 ret = decode_hsr(vcpu, fault_ipa, &mmio);
135 if (ret) 125 if (ret)
136 return ret; 126 return ret;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 99e07c7dd745..2f12e4056408 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -20,7 +20,6 @@
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <trace/events/kvm.h> 22#include <trace/events/kvm.h>
23#include <asm/idmap.h>
24#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
25#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
26#include <asm/kvm_arm.h> 25#include <asm/kvm_arm.h>
@@ -28,8 +27,6 @@
28#include <asm/kvm_mmio.h> 27#include <asm/kvm_mmio.h>
29#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
30#include <asm/kvm_emulate.h> 29#include <asm/kvm_emulate.h>
31#include <asm/mach/map.h>
32#include <trace/events/kvm.h>
33 30
34#include "trace.h" 31#include "trace.h"
35 32
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
37 34
38static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 35static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39 36
40static void kvm_tlb_flush_vmid(struct kvm *kvm) 37static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
41{ 38{
42 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); 39 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
43}
44
45static void kvm_set_pte(pte_t *pte, pte_t new_pte)
46{
47 pte_val(*pte) = new_pte;
48 /*
49 * flush_pmd_entry just takes a void pointer and cleans the necessary
50 * cache entries, so we can reuse the function for ptes.
51 */
52 flush_pmd_entry(pte);
53} 40}
54 41
55static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 42static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)
98 } 85 }
99} 86}
100 87
88static void free_hyp_pgd_entry(unsigned long addr)
89{
90 pgd_t *pgd;
91 pud_t *pud;
92 pmd_t *pmd;
93 unsigned long hyp_addr = KERN_TO_HYP(addr);
94
95 pgd = hyp_pgd + pgd_index(hyp_addr);
96 pud = pud_offset(pgd, hyp_addr);
97
98 if (pud_none(*pud))
99 return;
100 BUG_ON(pud_bad(*pud));
101
102 pmd = pmd_offset(pud, hyp_addr);
103 free_ptes(pmd, addr);
104 pmd_free(NULL, pmd);
105 pud_clear(pud);
106}
107
101/** 108/**
102 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables 109 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
103 * 110 *
104 * Assumes this is a page table used strictly in Hyp-mode and therefore contains 111 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
105 * only mappings in the kernel memory area, which is above PAGE_OFFSET. 112 * either mappings in the kernel memory area (above PAGE_OFFSET), or
113 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
106 */ 114 */
107void free_hyp_pmds(void) 115void free_hyp_pmds(void)
108{ 116{
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 unsigned long addr; 117 unsigned long addr;
113 118
114 mutex_lock(&kvm_hyp_pgd_mutex); 119 mutex_lock(&kvm_hyp_pgd_mutex);
115 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { 120 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
116 pgd = hyp_pgd + pgd_index(addr); 121 free_hyp_pgd_entry(addr);
117 pud = pud_offset(pgd, addr); 122 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
118 123 free_hyp_pgd_entry(addr);
119 if (pud_none(*pud))
120 continue;
121 BUG_ON(pud_bad(*pud));
122
123 pmd = pmd_offset(pud, addr);
124 free_ptes(pmd, addr);
125 pmd_free(NULL, pmd);
126 pud_clear(pud);
127 }
128 mutex_unlock(&kvm_hyp_pgd_mutex); 124 mutex_unlock(&kvm_hyp_pgd_mutex);
129} 125}
130 126
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
136 struct page *page; 132 struct page *page;
137 133
138 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 134 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
139 pte = pte_offset_kernel(pmd, addr); 135 unsigned long hyp_addr = KERN_TO_HYP(addr);
136
137 pte = pte_offset_kernel(pmd, hyp_addr);
140 BUG_ON(!virt_addr_valid(addr)); 138 BUG_ON(!virt_addr_valid(addr));
141 page = virt_to_page(addr); 139 page = virt_to_page(addr);
142 kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); 140 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
151 unsigned long addr; 149 unsigned long addr;
152 150
153 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 151 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
154 pte = pte_offset_kernel(pmd, addr); 152 unsigned long hyp_addr = KERN_TO_HYP(addr);
153
154 pte = pte_offset_kernel(pmd, hyp_addr);
155 BUG_ON(pfn_valid(*pfn_base)); 155 BUG_ON(pfn_valid(*pfn_base));
156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); 156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
157 (*pfn_base)++; 157 (*pfn_base)++;
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
166 unsigned long addr, next; 166 unsigned long addr, next;
167 167
168 for (addr = start; addr < end; addr = next) { 168 for (addr = start; addr < end; addr = next) {
169 pmd = pmd_offset(pud, addr); 169 unsigned long hyp_addr = KERN_TO_HYP(addr);
170 pmd = pmd_offset(pud, hyp_addr);
170 171
171 BUG_ON(pmd_sect(*pmd)); 172 BUG_ON(pmd_sect(*pmd));
172 173
173 if (pmd_none(*pmd)) { 174 if (pmd_none(*pmd)) {
174 pte = pte_alloc_one_kernel(NULL, addr); 175 pte = pte_alloc_one_kernel(NULL, hyp_addr);
175 if (!pte) { 176 if (!pte) {
176 kvm_err("Cannot allocate Hyp pte\n"); 177 kvm_err("Cannot allocate Hyp pte\n");
177 return -ENOMEM; 178 return -ENOMEM;
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
206 unsigned long addr, next; 207 unsigned long addr, next;
207 int err = 0; 208 int err = 0;
208 209
209 BUG_ON(start > end); 210 if (start >= end)
210 if (start < PAGE_OFFSET) 211 return -EINVAL;
212 /* Check for a valid kernel memory mapping */
213 if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
214 return -EINVAL;
215 /* Check for a valid kernel IO mapping */
216 if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
211 return -EINVAL; 217 return -EINVAL;
212 218
213 mutex_lock(&kvm_hyp_pgd_mutex); 219 mutex_lock(&kvm_hyp_pgd_mutex);
214 for (addr = start; addr < end; addr = next) { 220 for (addr = start; addr < end; addr = next) {
215 pgd = hyp_pgd + pgd_index(addr); 221 unsigned long hyp_addr = KERN_TO_HYP(addr);
216 pud = pud_offset(pgd, addr); 222 pgd = hyp_pgd + pgd_index(hyp_addr);
223 pud = pud_offset(pgd, hyp_addr);
217 224
218 if (pud_none_or_clear_bad(pud)) { 225 if (pud_none_or_clear_bad(pud)) {
219 pmd = pmd_alloc_one(NULL, addr); 226 pmd = pmd_alloc_one(NULL, hyp_addr);
220 if (!pmd) { 227 if (!pmd) {
221 kvm_err("Cannot allocate Hyp pmd\n"); 228 kvm_err("Cannot allocate Hyp pmd\n");
222 err = -ENOMEM; 229 err = -ENOMEM;
@@ -236,12 +243,13 @@ out:
236} 243}
237 244
238/** 245/**
239 * create_hyp_mappings - map a kernel virtual address range in Hyp mode 246 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
240 * @from: The virtual kernel start address of the range 247 * @from: The virtual kernel start address of the range
241 * @to: The virtual kernel end address of the range (exclusive) 248 * @to: The virtual kernel end address of the range (exclusive)
242 * 249 *
243 * The same virtual address as the kernel virtual address is also used in 250 * The same virtual address as the kernel virtual address is also used
244 * Hyp-mode mapping to the same underlying physical pages. 251 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
252 * physical pages.
245 * 253 *
246 * Note: Wrapping around zero in the "to" address is not supported. 254 * Note: Wrapping around zero in the "to" address is not supported.
247 */ 255 */
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)
251} 259}
252 260
253/** 261/**
254 * create_hyp_io_mappings - map a physical IO range in Hyp mode 262 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
255 * @from: The virtual HYP start address of the range 263 * @from: The kernel start VA of the range
256 * @to: The virtual HYP end address of the range (exclusive) 264 * @to: The kernel end VA of the range (exclusive)
257 * @addr: The physical start address which gets mapped 265 * @addr: The physical start address which gets mapped
266 *
267 * The resulting HYP VA is the same as the kernel VA, modulo
268 * HYP_PAGE_OFFSET.
258 */ 269 */
259int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) 270int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
260{ 271{
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
290 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); 301 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
291 302
292 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); 303 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
293 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); 304 kvm_clean_pgd(pgd);
294 kvm->arch.pgd = pgd; 305 kvm->arch.pgd = pgd;
295 306
296 return 0; 307 return 0;
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
422 return 0; /* ignore calls from kvm_set_spte_hva */ 433 return 0; /* ignore calls from kvm_set_spte_hva */
423 pmd = mmu_memory_cache_alloc(cache); 434 pmd = mmu_memory_cache_alloc(cache);
424 pud_populate(NULL, pud, pmd); 435 pud_populate(NULL, pud, pmd);
425 pmd += pmd_index(addr);
426 get_page(virt_to_page(pud)); 436 get_page(virt_to_page(pud));
427 } else 437 }
428 pmd = pmd_offset(pud, addr); 438
439 pmd = pmd_offset(pud, addr);
429 440
430 /* Create 2nd stage page table mapping - Level 2 */ 441 /* Create 2nd stage page table mapping - Level 2 */
431 if (pmd_none(*pmd)) { 442 if (pmd_none(*pmd)) {
432 if (!cache) 443 if (!cache)
433 return 0; /* ignore calls from kvm_set_spte_hva */ 444 return 0; /* ignore calls from kvm_set_spte_hva */
434 pte = mmu_memory_cache_alloc(cache); 445 pte = mmu_memory_cache_alloc(cache);
435 clean_pte_table(pte); 446 kvm_clean_pte(pte);
436 pmd_populate_kernel(NULL, pmd, pte); 447 pmd_populate_kernel(NULL, pmd, pte);
437 pte += pte_index(addr);
438 get_page(virt_to_page(pmd)); 448 get_page(virt_to_page(pmd));
439 } else 449 }
440 pte = pte_offset_kernel(pmd, addr); 450
451 pte = pte_offset_kernel(pmd, addr);
441 452
442 if (iomap && pte_present(*pte)) 453 if (iomap && pte_present(*pte))
443 return -EFAULT; 454 return -EFAULT;
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
446 old_pte = *pte; 457 old_pte = *pte;
447 kvm_set_pte(pte, *new_pte); 458 kvm_set_pte(pte, *new_pte);
448 if (pte_present(old_pte)) 459 if (pte_present(old_pte))
449 kvm_tlb_flush_vmid(kvm); 460 kvm_tlb_flush_vmid_ipa(kvm, addr);
450 else 461 else
451 get_page(virt_to_page(pte)); 462 get_page(virt_to_page(pte));
452 463
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
473 pfn = __phys_to_pfn(pa); 484 pfn = __phys_to_pfn(pa);
474 485
475 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { 486 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
476 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); 487 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
488 kvm_set_s2pte_writable(&pte);
477 489
478 ret = mmu_topup_memory_cache(&cache, 2, 2); 490 ret = mmu_topup_memory_cache(&cache, 2, 2);
479 if (ret) 491 if (ret)
@@ -492,29 +504,6 @@ out:
492 return ret; 504 return ret;
493} 505}
494 506
495static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
496{
497 /*
498 * If we are going to insert an instruction page and the icache is
499 * either VIPT or PIPT, there is a potential problem where the host
500 * (or another VM) may have used the same page as this guest, and we
501 * read incorrect data from the icache. If we're using a PIPT cache,
502 * we can invalidate just that page, but if we are using a VIPT cache
503 * we need to invalidate the entire icache - damn shame - as written
504 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
505 *
506 * VIVT caches are tagged using both the ASID and the VMID and doesn't
507 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
508 */
509 if (icache_is_pipt()) {
510 unsigned long hva = gfn_to_hva(kvm, gfn);
511 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
512 } else if (!icache_is_vivt_asid_tagged()) {
513 /* any kind of VIPT cache */
514 __flush_icache_all();
515 }
516}
517
518static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 507static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
519 gfn_t gfn, struct kvm_memory_slot *memslot, 508 gfn_t gfn, struct kvm_memory_slot *memslot,
520 unsigned long fault_status) 509 unsigned long fault_status)
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
526 unsigned long mmu_seq; 515 unsigned long mmu_seq;
527 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 516 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
528 517
529 write_fault = kvm_is_write_fault(vcpu->arch.hsr); 518 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
530 if (fault_status == FSC_PERM && !write_fault) { 519 if (fault_status == FSC_PERM && !write_fault) {
531 kvm_err("Unexpected L2 read permission error\n"); 520 kvm_err("Unexpected L2 read permission error\n");
532 return -EFAULT; 521 return -EFAULT;
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
560 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 549 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
561 goto out_unlock; 550 goto out_unlock;
562 if (writable) { 551 if (writable) {
563 pte_val(new_pte) |= L_PTE_S2_RDWR; 552 kvm_set_s2pte_writable(&new_pte);
564 kvm_set_pfn_dirty(pfn); 553 kvm_set_pfn_dirty(pfn);
565 } 554 }
566 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); 555 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
@@ -585,7 +574,6 @@ out_unlock:
585 */ 574 */
586int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) 575int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
587{ 576{
588 unsigned long hsr_ec;
589 unsigned long fault_status; 577 unsigned long fault_status;
590 phys_addr_t fault_ipa; 578 phys_addr_t fault_ipa;
591 struct kvm_memory_slot *memslot; 579 struct kvm_memory_slot *memslot;
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
593 gfn_t gfn; 581 gfn_t gfn;
594 int ret, idx; 582 int ret, idx;
595 583
596 hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; 584 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
597 is_iabt = (hsr_ec == HSR_EC_IABT); 585 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
598 fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
599 586
600 trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, 587 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
601 vcpu->arch.hxfar, fault_ipa); 588 kvm_vcpu_get_hfar(vcpu), fault_ipa);
602 589
603 /* Check the stage-2 fault is trans. fault or write fault */ 590 /* Check the stage-2 fault is trans. fault or write fault */
604 fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); 591 fault_status = kvm_vcpu_trap_get_fault(vcpu);
605 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { 592 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
606 kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", 593 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
607 hsr_ec, fault_status); 594 kvm_vcpu_trap_get_class(vcpu), fault_status);
608 return -EFAULT; 595 return -EFAULT;
609 } 596 }
610 597
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
614 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { 601 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
615 if (is_iabt) { 602 if (is_iabt) {
616 /* Prefetch Abort on I/O address */ 603 /* Prefetch Abort on I/O address */
617 kvm_inject_pabt(vcpu, vcpu->arch.hxfar); 604 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
618 ret = 1; 605 ret = 1;
619 goto out_unlock; 606 goto out_unlock;
620 } 607 }
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
626 goto out_unlock; 613 goto out_unlock;
627 } 614 }
628 615
629 /* Adjust page offset */ 616 /*
630 fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; 617 * The IPA is reported as [MAX:12], so we need to
618 * complement it with the bottom 12 bits from the
619 * faulting VA. This is always 12 bits, irrespective
620 * of the page size.
621 */
622 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
631 ret = io_mem_abort(vcpu, run, fault_ipa); 623 ret = io_mem_abort(vcpu, run, fault_ipa);
632 goto out_unlock; 624 goto out_unlock;
633 } 625 }
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
682static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 674static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
683{ 675{
684 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 676 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
685 kvm_tlb_flush_vmid(kvm); 677 kvm_tlb_flush_vmid_ipa(kvm, gpa);
686} 678}
687 679
688int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 680int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)
776 pmd = pmd_offset(pud, addr); 768 pmd = pmd_offset(pud, addr);
777 769
778 pud_clear(pud); 770 pud_clear(pud);
779 clean_pmd_entry(pmd); 771 kvm_clean_pmd_entry(pmd);
780 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); 772 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
781 } while (pgd++, addr = next, addr < end); 773 } while (pgd++, addr = next, addr < end);
782} 774}
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index 0e4cfe123b38..17c5ac7d10ed 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -1477,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
1477 if (addr & ~KVM_PHYS_MASK) 1477 if (addr & ~KVM_PHYS_MASK)
1478 return -E2BIG; 1478 return -E2BIG;
1479 1479
1480 if (addr & ~PAGE_MASK) 1480 if (addr & (SZ_4K - 1))
1481 return -EINVAL; 1481 return -EINVAL;
1482 1482
1483 mutex_lock(&kvm->lock); 1483 mutex_lock(&kvm->lock);