aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 00:40:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 00:40:43 -0500
commit7ebd3faa9b5b42caf2d5aa1352a93dcfa0098011 (patch)
treec45acf88b7976dcec117b6a3dbe31a7fe710ef33 /arch/arm64
parentbb1281f2aae08e5ef23eb0692c8833e95579cdf2 (diff)
parent7650b6870930055426abb32cc47d164ccdea49db (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "First round of KVM updates for 3.14; PPC parts will come next week. Nothing major here, just bugfixes all over the place. The most interesting part is the ARM guys' virtualized interrupt controller overhaul, which lets userspace get/set the state and thus enables migration of ARM VMs" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (67 commits) kvm: make KVM_MMU_AUDIT help text more readable KVM: s390: Fix memory access error detection KVM: nVMX: Update guest activity state field on L2 exits KVM: nVMX: Fix nested_run_pending on activity state HLT KVM: nVMX: Clean up handling of VMX-related MSRs KVM: nVMX: Add tracepoints for nested_vmexit and nested_vmexit_inject KVM: nVMX: Pass vmexit parameters to nested_vmx_vmexit KVM: nVMX: Leave VMX mode on clearing of feature control MSR KVM: VMX: Fix DR6 update on #DB exception KVM: SVM: Fix reading of DR6 KVM: x86: Sync DR7 on KVM_SET_DEBUGREGS add support for Hyper-V reference time counter KVM: remove useless write to vcpu->hv_clock.tsc_timestamp KVM: x86: fix tsc catchup issue with tsc scaling KVM: x86: limit PIT timer frequency KVM: x86: handle invalid root_hpa everywhere kvm: Provide kvm_vcpu_eligible_for_directed_yield() stub kvm: vfio: silence GCC warning KVM: ARM: Remove duplicate include arm/arm64: KVM: relax the requirements of VMA alignment for THP ...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_host.h7
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h21
-rw-r--r--arch/arm64/kvm/Kconfig11
-rw-r--r--arch/arm64/kvm/guest.c32
-rw-r--r--arch/arm64/kvm/handle_exit.c3
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c3
7 files changed, 60 insertions, 18 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5d85a02d1231..0a1d69751562 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -26,7 +26,12 @@
26#include <asm/kvm_asm.h> 26#include <asm/kvm_asm.h>
27#include <asm/kvm_mmio.h> 27#include <asm/kvm_mmio.h>
28 28
29#define KVM_MAX_VCPUS 4 29#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
30#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
31#else
32#define KVM_MAX_VCPUS 0
33#endif
34
30#define KVM_USER_MEM_SLOTS 32 35#define KVM_USER_MEM_SLOTS 32
31#define KVM_PRIVATE_MEM_SLOTS 4 36#define KVM_PRIVATE_MEM_SLOTS 4
32#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 37#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 680f74e67497..7f1f9408ff66 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -136,6 +136,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
136} 136}
137 137
138#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 138#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
139#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
139 140
140#endif /* __ASSEMBLY__ */ 141#endif /* __ASSEMBLY__ */
141#endif /* __ARM64_KVM_MMU_H__ */ 142#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 5031f4263937..495ab6f84a61 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -55,8 +55,9 @@ struct kvm_regs {
55#define KVM_ARM_TARGET_AEM_V8 0 55#define KVM_ARM_TARGET_AEM_V8 0
56#define KVM_ARM_TARGET_FOUNDATION_V8 1 56#define KVM_ARM_TARGET_FOUNDATION_V8 1
57#define KVM_ARM_TARGET_CORTEX_A57 2 57#define KVM_ARM_TARGET_CORTEX_A57 2
58#define KVM_ARM_TARGET_XGENE_POTENZA 3
58 59
59#define KVM_ARM_NUM_TARGETS 3 60#define KVM_ARM_NUM_TARGETS 4
60 61
61/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ 62/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
62#define KVM_ARM_DEVICE_TYPE_SHIFT 0 63#define KVM_ARM_DEVICE_TYPE_SHIFT 0
@@ -129,6 +130,24 @@ struct kvm_arch_memory_slot {
129#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 130#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
130#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 131#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
131 132
133#define ARM64_SYS_REG_SHIFT_MASK(x,n) \
134 (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \
135 KVM_REG_ARM64_SYSREG_ ## n ## _MASK)
136
137#define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \
138 (KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \
139 ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
140 ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
141 ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
142 ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
143 ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
144
145#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
146
147#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
150
132/* KVM_IRQ_LINE irq field index values */ 151/* KVM_IRQ_LINE irq field index values */
133#define KVM_ARM_IRQ_TYPE_SHIFT 24 152#define KVM_ARM_IRQ_TYPE_SHIFT 24
134#define KVM_ARM_IRQ_TYPE_MASK 0xff 153#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 4480ab339a00..8ba85e9ea388 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -36,6 +36,17 @@ config KVM_ARM_HOST
36 ---help--- 36 ---help---
37 Provides host support for ARM processors. 37 Provides host support for ARM processors.
38 38
39config KVM_ARM_MAX_VCPUS
40 int "Number maximum supported virtual CPUs per VM"
41 depends on KVM_ARM_HOST
42 default 4
43 help
44 Static number of max supported virtual CPUs per VM.
45
46 If you choose a high number, the vcpu structures will be quite
47 large, so only choose a reasonable number that you expect to
48 actually use.
49
39config KVM_ARM_VGIC 50config KVM_ARM_VGIC
40 bool 51 bool
41 depends on KVM_ARM_HOST && OF 52 depends on KVM_ARM_HOST && OF
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 3f0731e53274..08745578d54d 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -207,20 +207,26 @@ int __attribute_const__ kvm_target_cpu(void)
207 unsigned long implementor = read_cpuid_implementor(); 207 unsigned long implementor = read_cpuid_implementor();
208 unsigned long part_number = read_cpuid_part_number(); 208 unsigned long part_number = read_cpuid_part_number();
209 209
210 if (implementor != ARM_CPU_IMP_ARM) 210 switch (implementor) {
211 return -EINVAL; 211 case ARM_CPU_IMP_ARM:
212 switch (part_number) {
213 case ARM_CPU_PART_AEM_V8:
214 return KVM_ARM_TARGET_AEM_V8;
215 case ARM_CPU_PART_FOUNDATION:
216 return KVM_ARM_TARGET_FOUNDATION_V8;
217 case ARM_CPU_PART_CORTEX_A57:
218 return KVM_ARM_TARGET_CORTEX_A57;
219 };
220 break;
221 case ARM_CPU_IMP_APM:
222 switch (part_number) {
223 case APM_CPU_PART_POTENZA:
224 return KVM_ARM_TARGET_XGENE_POTENZA;
225 };
226 break;
227 };
212 228
213 switch (part_number) { 229 return -EINVAL;
214 case ARM_CPU_PART_AEM_V8:
215 return KVM_ARM_TARGET_AEM_V8;
216 case ARM_CPU_PART_FOUNDATION:
217 return KVM_ARM_TARGET_FOUNDATION_V8;
218 case ARM_CPU_PART_CORTEX_A57:
219 /* Currently handled by the generic backend */
220 return KVM_ARM_TARGET_CORTEX_A57;
221 default:
222 return -EINVAL;
223 }
224} 230}
225 231
226int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 232int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 42a0f1bddfe7..7bc41eab4c64 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -39,9 +39,6 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
39 39
40static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 40static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
41{ 41{
42 if (kvm_psci_call(vcpu))
43 return 1;
44
45 kvm_inject_undefined(vcpu); 42 kvm_inject_undefined(vcpu);
46 return 1; 43 return 1;
47} 44}
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 4268ab9356b1..8fe6f76b0edc 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -90,6 +90,9 @@ static int __init sys_reg_genericv8_init(void)
90 &genericv8_target_table); 90 &genericv8_target_table);
91 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57, 91 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
92 &genericv8_target_table); 92 &genericv8_target_table);
93 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
94 &genericv8_target_table);
95
93 return 0; 96 return 0;
94} 97}
95late_initcall(sys_reg_genericv8_init); 98late_initcall(sys_reg_genericv8_init);