diff options
| -rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 5 | ||||
| -rw-r--r-- | arch/arm/kvm/coproc.c | 14 | ||||
| -rw-r--r-- | arch/arm/kvm/psci.c | 17 | ||||
| -rw-r--r-- | arch/arm/kvm/reset.c | 4 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 5 |
5 files changed, 33 insertions, 12 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index a464e8d7b6c5..708e4d8a647f 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
| @@ -157,4 +157,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) | |||
| 157 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; | 157 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | ||
| 161 | { | ||
| 162 | return vcpu->arch.cp15[c0_MPIDR]; | ||
| 163 | } | ||
| 164 | |||
| 160 | #endif /* __ARM_KVM_EMULATE_H__ */ | 165 | #endif /* __ARM_KVM_EMULATE_H__ */ |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index a629f2c1d0f9..78c0885d6501 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
| @@ -74,11 +74,13 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 74 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 74 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
| 75 | { | 75 | { |
| 76 | /* | 76 | /* |
| 77 | * Compute guest MPIDR. No need to mess around with different clusters | 77 | * Compute guest MPIDR. We build a virtual cluster out of the |
| 78 | * but we read the 'U' bit from the underlying hardware directly. | 78 | * vcpu_id, but we read the 'U' bit from the underlying |
| 79 | * hardware directly. | ||
| 79 | */ | 80 | */ |
| 80 | vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | 81 | vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | |
| 81 | | vcpu->vcpu_id; | 82 | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | |
| 83 | (vcpu->vcpu_id & 3)); | ||
| 82 | } | 84 | } |
| 83 | 85 | ||
| 84 | /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ | 86 | /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ |
| @@ -122,6 +124,10 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | |||
| 122 | asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); | 124 | asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); |
| 123 | l2ctlr &= ~(3 << 24); | 125 | l2ctlr &= ~(3 << 24); |
| 124 | ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; | 126 | ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; |
| 127 | /* How many cores in the current cluster and the next ones */ | ||
| 128 | ncores -= (vcpu->vcpu_id & ~3); | ||
| 129 | /* Cap it to the maximum number of cores in a single cluster */ | ||
| 130 | ncores = min(ncores, 3U); | ||
| 125 | l2ctlr |= (ncores & 3) << 24; | 131 | l2ctlr |= (ncores & 3) << 24; |
| 126 | 132 | ||
| 127 | vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; | 133 | vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; |
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 86a693a02ba3..311263124acf 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/kvm_host.h> | 18 | #include <linux/kvm_host.h> |
| 19 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
| 20 | 20 | ||
| 21 | #include <asm/cputype.h> | ||
| 21 | #include <asm/kvm_emulate.h> | 22 | #include <asm/kvm_emulate.h> |
| 22 | #include <asm/kvm_psci.h> | 23 | #include <asm/kvm_psci.h> |
| 23 | 24 | ||
| @@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) | |||
| 34 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | 35 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
| 35 | { | 36 | { |
| 36 | struct kvm *kvm = source_vcpu->kvm; | 37 | struct kvm *kvm = source_vcpu->kvm; |
| 37 | struct kvm_vcpu *vcpu; | 38 | struct kvm_vcpu *vcpu = NULL, *tmp; |
| 38 | wait_queue_head_t *wq; | 39 | wait_queue_head_t *wq; |
| 39 | unsigned long cpu_id; | 40 | unsigned long cpu_id; |
| 41 | unsigned long mpidr; | ||
| 40 | phys_addr_t target_pc; | 42 | phys_addr_t target_pc; |
| 43 | int i; | ||
| 41 | 44 | ||
| 42 | cpu_id = *vcpu_reg(source_vcpu, 1); | 45 | cpu_id = *vcpu_reg(source_vcpu, 1); |
| 43 | if (vcpu_mode_is_32bit(source_vcpu)) | 46 | if (vcpu_mode_is_32bit(source_vcpu)) |
| 44 | cpu_id &= ~((u32) 0); | 47 | cpu_id &= ~((u32) 0); |
| 45 | 48 | ||
| 46 | if (cpu_id >= atomic_read(&kvm->online_vcpus)) | 49 | kvm_for_each_vcpu(i, tmp, kvm) { |
| 50 | mpidr = kvm_vcpu_get_mpidr(tmp); | ||
| 51 | if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { | ||
| 52 | vcpu = tmp; | ||
| 53 | break; | ||
| 54 | } | ||
| 55 | } | ||
| 56 | |||
| 57 | if (!vcpu) | ||
| 47 | return KVM_PSCI_RET_INVAL; | 58 | return KVM_PSCI_RET_INVAL; |
| 48 | 59 | ||
| 49 | target_pc = *vcpu_reg(source_vcpu, 2); | 60 | target_pc = *vcpu_reg(source_vcpu, 2); |
| 50 | 61 | ||
| 51 | vcpu = kvm_get_vcpu(kvm, cpu_id); | ||
| 52 | |||
| 53 | wq = kvm_arch_vcpu_wq(vcpu); | 62 | wq = kvm_arch_vcpu_wq(vcpu); |
| 54 | if (!waitqueue_active(wq)) | 63 | if (!waitqueue_active(wq)) |
| 55 | return KVM_PSCI_RET_INVAL; | 64 | return KVM_PSCI_RET_INVAL; |
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index d153e64d1255..f558c073c023 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c | |||
| @@ -33,8 +33,6 @@ | |||
| 33 | * Cortex-A15 and Cortex-A7 Reset Values | 33 | * Cortex-A15 and Cortex-A7 Reset Values |
| 34 | */ | 34 | */ |
| 35 | 35 | ||
| 36 | static const int cortexa_max_cpu_idx = 3; | ||
| 37 | |||
| 38 | static struct kvm_regs cortexa_regs_reset = { | 36 | static struct kvm_regs cortexa_regs_reset = { |
| 39 | .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, | 37 | .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, |
| 40 | }; | 38 | }; |
| @@ -64,8 +62,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
| 64 | switch (vcpu->arch.target) { | 62 | switch (vcpu->arch.target) { |
| 65 | case KVM_ARM_TARGET_CORTEX_A7: | 63 | case KVM_ARM_TARGET_CORTEX_A7: |
| 66 | case KVM_ARM_TARGET_CORTEX_A15: | 64 | case KVM_ARM_TARGET_CORTEX_A15: |
| 67 | if (vcpu->vcpu_id > cortexa_max_cpu_idx) | ||
| 68 | return -EINVAL; | ||
| 69 | reset_regs = &cortexa_regs_reset; | 65 | reset_regs = &cortexa_regs_reset; |
| 70 | vcpu->arch.midr = read_cpuid_id(); | 66 | vcpu->arch.midr = read_cpuid_id(); |
| 71 | cpu_vtimer_irq = &cortexa_vtimer_irq; | 67 | cpu_vtimer_irq = &cortexa_vtimer_irq; |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index eec073875218..6df93cdc652b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
| @@ -177,4 +177,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | |||
| 177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | ||
| 181 | { | ||
| 182 | return vcpu_sys_reg(vcpu, MPIDR_EL1); | ||
| 183 | } | ||
| 184 | |||
| 180 | #endif /* __ARM64_KVM_EMULATE_H__ */ | 185 | #endif /* __ARM64_KVM_EMULATE_H__ */ |
