aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 05:27:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 05:27:39 -0400
commite4e65676f272adb63655a2ca95207e8212d282f1 (patch)
tree3679a3e6897d698ee949642660281e7f74e2852b /arch/arm64
parentf89f4a06a59f30dec64b2afc4111426fc01e9e12 (diff)
parentf439ed27f8b8b90d243ae15acb193d37f96eebe0 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Fixes and features for 3.18. Apart from the usual cleanups, here is the summary of new features: - s390 moves closer towards host large page support - PowerPC has improved support for debugging (both inside the guest and via gdbstub) and support for e6500 processors - ARM/ARM64 support read-only memory (which is necessary to put firmware in emulated NOR flash) - x86 has the usual emulator fixes and nested virtualization improvements (including improved Windows support on Intel and Jailhouse hypervisor support on AMD), adaptive PLE which helps overcommitting of huge guests. Also included are some patches that make KVM more friendly to memory hot-unplug, and fixes for rare caching bugs. Two patches have trivial mm/ parts that were acked by Rik and Andrew. Note: I will soon switch to a subkey for signing purposes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (157 commits) kvm: do not handle APIC access page if in-kernel irqchip is not in use KVM: s390: count vcpu wakeups in stat.halt_wakeup KVM: s390/facilities: allow TOD-CLOCK steering facility bit KVM: PPC: BOOK3S: HV: CMA: Reserve cma region only in hypervisor mode arm/arm64: KVM: Report correct FSC for unsupported fault types arm/arm64: KVM: Fix VTTBR_BADDR_MASK and pgd alloc kvm: Fix kvm_get_page_retry_io __gup retval check arm/arm64: KVM: Fix set_clear_sgi_pend_reg offset kvm: x86: Unpin and remove kvm_arch->apic_access_page kvm: vmx: Implement set_apic_access_page_addr kvm: x86: Add request bit to reload APIC access page address kvm: Add arch specific mmu notifier for page invalidation kvm: Rename make_all_cpus_request() to kvm_make_all_cpus_request() and make it non-static kvm: Fix page ageing bugs kvm/x86/mmu: Pass gfn and level to rmapp callback. x86: kvm: use alternatives for VMCALL vs. VMMCALL if kernel text is read-only kvm: x86: use macros to compute bank MSRs KVM: x86: Remove debug assertion of non-PAE reserved bits kvm: don't take vcpu mutex for obviously invalid vcpu ioctls kvm: Faults which trigger IO release the mmap_sem ...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_arm.h13
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h24
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h18
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c2
7 files changed, 40 insertions, 26 deletions
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index cc83520459ed..7fd3e27e3ccc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -122,6 +122,17 @@
122#define VTCR_EL2_T0SZ_MASK 0x3f 122#define VTCR_EL2_T0SZ_MASK 0x3f
123#define VTCR_EL2_T0SZ_40B 24 123#define VTCR_EL2_T0SZ_40B 24
124 124
125/*
126 * We configure the Stage-2 page tables to always restrict the IPA space to be
127 * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
128 * not known to exist and will break with this configuration.
129 *
130 * Note that when using 4K pages, we concatenate two first level page tables
131 * together.
132 *
133 * The magic numbers used for VTTBR_X in this patch can be found in Tables
134 * D4-23 and D4-25 in ARM DDI 0487A.b.
135 */
125#ifdef CONFIG_ARM64_64K_PAGES 136#ifdef CONFIG_ARM64_64K_PAGES
126/* 137/*
127 * Stage2 translation configuration: 138 * Stage2 translation configuration:
@@ -149,7 +160,7 @@
149#endif 160#endif
150 161
151#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 162#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
152#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) 163#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
153#define VTTBR_VMID_SHIFT (48LLU) 164#define VTTBR_VMID_SHIFT (48LLU)
154#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) 165#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
155 166
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fdc3e21abd8d..5674a55b5518 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -174,6 +174,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
174 174
175static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 175static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
176{ 176{
177 return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
178}
179
180static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
181{
177 return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; 182 return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
178} 183}
179 184
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e10c45a578e3..2012c4ba8d67 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -22,6 +22,8 @@
22#ifndef __ARM64_KVM_HOST_H__ 22#ifndef __ARM64_KVM_HOST_H__
23#define __ARM64_KVM_HOST_H__ 23#define __ARM64_KVM_HOST_H__
24 24
25#include <linux/types.h>
26#include <linux/kvm_types.h>
25#include <asm/kvm.h> 27#include <asm/kvm.h>
26#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
27#include <asm/kvm_mmio.h> 29#include <asm/kvm_mmio.h>
@@ -41,8 +43,7 @@
41 43
42#define KVM_VCPU_MAX_FEATURES 3 44#define KVM_VCPU_MAX_FEATURES 3
43 45
44struct kvm_vcpu; 46int __attribute_const__ kvm_target_cpu(void);
45int kvm_target_cpu(void);
46int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 47int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
47int kvm_arch_dev_ioctl_check_extension(long ext); 48int kvm_arch_dev_ioctl_check_extension(long ext);
48 49
@@ -164,25 +165,23 @@ struct kvm_vcpu_stat {
164 u32 halt_wakeup; 165 u32 halt_wakeup;
165}; 166};
166 167
167struct kvm_vcpu_init;
168int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 168int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
169 const struct kvm_vcpu_init *init); 169 const struct kvm_vcpu_init *init);
170int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 170int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
171unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 171unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
172int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 172int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
173struct kvm_one_reg;
174int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 173int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
175int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 174int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
176 175
177#define KVM_ARCH_WANT_MMU_NOTIFIER 176#define KVM_ARCH_WANT_MMU_NOTIFIER
178struct kvm;
179int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 177int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
180int kvm_unmap_hva_range(struct kvm *kvm, 178int kvm_unmap_hva_range(struct kvm *kvm,
181 unsigned long start, unsigned long end); 179 unsigned long start, unsigned long end);
182void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 180void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
183 181
184/* We do not have shadow page tables, hence the empty hooks */ 182/* We do not have shadow page tables, hence the empty hooks */
185static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) 183static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
184 unsigned long end)
186{ 185{
187 return 0; 186 return 0;
188} 187}
@@ -192,8 +191,13 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
192 return 0; 191 return 0;
193} 192}
194 193
194static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
195 unsigned long address)
196{
197}
198
195struct kvm_vcpu *kvm_arm_get_running_vcpu(void); 199struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
196struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); 200struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
197 201
198u64 kvm_call_hyp(void *hypfn, ...); 202u64 kvm_call_hyp(void *hypfn, ...);
199 203
@@ -244,4 +248,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
244 } 248 }
245} 249}
246 250
251static inline void kvm_arch_hardware_disable(void) {}
252static inline void kvm_arch_hardware_unsetup(void) {}
253static inline void kvm_arch_sync_events(struct kvm *kvm) {}
254static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
255static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
256
247#endif /* __ARM64_KVM_HOST_H__ */ 257#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 8e138c7c53ac..a030d163840b 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -59,10 +59,9 @@
59#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) 59#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
60 60
61/* 61/*
62 * Align KVM with the kernel's view of physical memory. Should be 62 * We currently only support a 40bit IPA.
63 * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
64 */ 63 */
65#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT 64#define KVM_PHYS_SHIFT (40)
66#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 65#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
67#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 66#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
68 67
@@ -93,19 +92,6 @@ void kvm_clear_hyp_idmap(void);
93#define kvm_set_pte(ptep, pte) set_pte(ptep, pte) 92#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
94#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 93#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
95 94
96static inline bool kvm_is_write_fault(unsigned long esr)
97{
98 unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
99
100 if (esr_ec == ESR_EL2_EC_IABT)
101 return false;
102
103 if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
104 return false;
105
106 return true;
107}
108
109static inline void kvm_clean_pgd(pgd_t *pgd) {} 95static inline void kvm_clean_pgd(pgd_t *pgd) {}
110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 96static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {} 97static inline void kvm_clean_pte(pte_t *pte) {}
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index e633ff8cdec8..8e38878c87c6 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -37,6 +37,7 @@
37 37
38#define __KVM_HAVE_GUEST_DEBUG 38#define __KVM_HAVE_GUEST_DEBUG
39#define __KVM_HAVE_IRQ_LINE 39#define __KVM_HAVE_IRQ_LINE
40#define __KVM_HAVE_READONLY_MEM
40 41
41#define KVM_REG_SIZE(id) \ 42#define KVM_REG_SIZE(id) \
42 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) 43 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -159,6 +160,7 @@ struct kvm_arch_memory_slot {
159#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) 160#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
160#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 161#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
161#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) 162#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
163#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
162 164
163/* KVM_IRQ_LINE irq field index values */ 165/* KVM_IRQ_LINE irq field index values */
164#define KVM_ARM_IRQ_TYPE_SHIFT 24 166#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 8d1ec2887a26..76794692c20b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -174,7 +174,7 @@ static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
174 174
175 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 175 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
176 if (ret != 0) 176 if (ret != 0)
177 return ret; 177 return -EFAULT;
178 178
179 return kvm_arm_timer_set_reg(vcpu, reg->id, val); 179 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
180} 180}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 5805e7c4a4dd..4cc3b719208e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1218,7 +1218,7 @@ static bool is_valid_cache(u32 val)
1218 u32 level, ctype; 1218 u32 level, ctype;
1219 1219
1220 if (val >= CSSELR_MAX) 1220 if (val >= CSSELR_MAX)
1221 return -ENOENT; 1221 return false;
1222 1222
1223 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 1223 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1224 level = (val >> 1); 1224 level = (val >> 1);