diff options
author | Andre Przywara <andre.przywara@arm.com> | 2014-06-02 09:37:13 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2015-01-20 12:25:17 -0500 |
commit | 4429fc64b90368e9bc93f933ea8b011d8db3a2f2 (patch) | |
tree | a7cb2e3dbebe4c2d6dc7a17e648a04b1c82090b9 /arch | |
parent | 7276030a082c9c33150e5900a80e26c6e3189b16 (diff) |
arm/arm64: KVM: rework MPIDR assignment and add accessors
The virtual MPIDR registers (containing topology information) for the
guest are currently mapped linearily to the vcpu_id. Improve this
mapping for arm64 by using three levels to not artificially limit the
number of vCPUs.
To help this, change and rename the kvm_vcpu_get_mpidr() function to
mask off the non-affinity bits in the MPIDR register.
Also add an accessor to later allow easier access to a vCPU with a
given MPIDR. Use this new accessor in the PSCI emulation.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 13 | ||||
-rw-r--r-- | arch/arm/kvm/psci.c | 17 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 13 |
7 files changed, 39 insertions, 18 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 66ce17655bb9..c52861577567 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/kvm_asm.h> | 23 | #include <asm/kvm_asm.h> |
24 | #include <asm/kvm_mmio.h> | 24 | #include <asm/kvm_mmio.h> |
25 | #include <asm/kvm_arm.h> | 25 | #include <asm/kvm_arm.h> |
26 | #include <asm/cputype.h> | ||
26 | 27 | ||
27 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | 28 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
28 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); | 29 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); |
@@ -167,9 +168,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) | |||
167 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; | 168 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; |
168 | } | 169 | } |
169 | 170 | ||
170 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | 171 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
171 | { | 172 | { |
172 | return vcpu->arch.cp15[c0_MPIDR]; | 173 | return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; |
173 | } | 174 | } |
174 | 175 | ||
175 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) | 176 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 7d07eb85c0b0..2fa51740edc2 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -236,6 +236,8 @@ int kvm_perf_teardown(void); | |||
236 | 236 | ||
237 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); | 237 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); |
238 | 238 | ||
239 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); | ||
240 | |||
239 | static inline void kvm_arch_hardware_disable(void) {} | 241 | static inline void kvm_arch_hardware_disable(void) {} |
240 | static inline void kvm_arch_hardware_unsetup(void) {} | 242 | static inline void kvm_arch_hardware_unsetup(void) {} |
241 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | 243 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 74603a09ee76..a7b94ecba0d8 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -1075,6 +1075,19 @@ static void check_kvm_target_cpu(void *ret) | |||
1075 | *(int *)ret = kvm_target_cpu(); | 1075 | *(int *)ret = kvm_target_cpu(); |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) | ||
1079 | { | ||
1080 | struct kvm_vcpu *vcpu; | ||
1081 | int i; | ||
1082 | |||
1083 | mpidr &= MPIDR_HWID_BITMASK; | ||
1084 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1085 | if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) | ||
1086 | return vcpu; | ||
1087 | } | ||
1088 | return NULL; | ||
1089 | } | ||
1090 | |||
1078 | /** | 1091 | /** |
1079 | * Initialize Hyp-mode and memory mappings on all CPUs. | 1092 | * Initialize Hyp-mode and memory mappings on all CPUs. |
1080 | */ | 1093 | */ |
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 58cb3248d277..02fa8eff6ae1 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
23 | #include <asm/kvm_emulate.h> | 23 | #include <asm/kvm_emulate.h> |
24 | #include <asm/kvm_psci.h> | 24 | #include <asm/kvm_psci.h> |
25 | #include <asm/kvm_host.h> | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * This is an implementation of the Power State Coordination Interface | 28 | * This is an implementation of the Power State Coordination Interface |
@@ -66,25 +67,17 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) | |||
66 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | 67 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
67 | { | 68 | { |
68 | struct kvm *kvm = source_vcpu->kvm; | 69 | struct kvm *kvm = source_vcpu->kvm; |
69 | struct kvm_vcpu *vcpu = NULL, *tmp; | 70 | struct kvm_vcpu *vcpu = NULL; |
70 | wait_queue_head_t *wq; | 71 | wait_queue_head_t *wq; |
71 | unsigned long cpu_id; | 72 | unsigned long cpu_id; |
72 | unsigned long context_id; | 73 | unsigned long context_id; |
73 | unsigned long mpidr; | ||
74 | phys_addr_t target_pc; | 74 | phys_addr_t target_pc; |
75 | int i; | ||
76 | 75 | ||
77 | cpu_id = *vcpu_reg(source_vcpu, 1); | 76 | cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; |
78 | if (vcpu_mode_is_32bit(source_vcpu)) | 77 | if (vcpu_mode_is_32bit(source_vcpu)) |
79 | cpu_id &= ~((u32) 0); | 78 | cpu_id &= ~((u32) 0); |
80 | 79 | ||
81 | kvm_for_each_vcpu(i, tmp, kvm) { | 80 | vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); |
82 | mpidr = kvm_vcpu_get_mpidr(tmp); | ||
83 | if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { | ||
84 | vcpu = tmp; | ||
85 | break; | ||
86 | } | ||
87 | } | ||
88 | 81 | ||
89 | /* | 82 | /* |
90 | * Make sure the caller requested a valid CPU and that the CPU is | 83 | * Make sure the caller requested a valid CPU and that the CPU is |
@@ -155,7 +148,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) | |||
155 | * then ON else OFF | 148 | * then ON else OFF |
156 | */ | 149 | */ |
157 | kvm_for_each_vcpu(i, tmp, kvm) { | 150 | kvm_for_each_vcpu(i, tmp, kvm) { |
158 | mpidr = kvm_vcpu_get_mpidr(tmp); | 151 | mpidr = kvm_vcpu_get_mpidr_aff(tmp); |
159 | if (((mpidr & target_affinity_mask) == target_affinity) && | 152 | if (((mpidr & target_affinity_mask) == target_affinity) && |
160 | !tmp->arch.pause) { | 153 | !tmp->arch.pause) { |
161 | return PSCI_0_2_AFFINITY_LEVEL_ON; | 154 | return PSCI_0_2_AFFINITY_LEVEL_ON; |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index a6fa2d2cd41c..b3f1defcb081 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/kvm_arm.h> | 27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_mmio.h> | 28 | #include <asm/kvm_mmio.h> |
29 | #include <asm/ptrace.h> | 29 | #include <asm/ptrace.h> |
30 | #include <asm/cputype.h> | ||
30 | 31 | ||
31 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); | 32 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); |
32 | unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); | 33 | unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); |
@@ -192,9 +193,9 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | |||
192 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 193 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; |
193 | } | 194 | } |
194 | 195 | ||
195 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | 196 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
196 | { | 197 | { |
197 | return vcpu_sys_reg(vcpu, MPIDR_EL1); | 198 | return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; |
198 | } | 199 | } |
199 | 200 | ||
200 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) | 201 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 012af6ce9eed..ff8ee3ec32f4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -207,6 +207,8 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
207 | int kvm_perf_init(void); | 207 | int kvm_perf_init(void); |
208 | int kvm_perf_teardown(void); | 208 | int kvm_perf_teardown(void); |
209 | 209 | ||
210 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); | ||
211 | |||
210 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, | 212 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, |
211 | phys_addr_t pgd_ptr, | 213 | phys_addr_t pgd_ptr, |
212 | unsigned long hyp_stack_ptr, | 214 | unsigned long hyp_stack_ptr, |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3d7c2df89946..136e6797676b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -252,10 +252,19 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |||
252 | 252 | ||
253 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | 253 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
254 | { | 254 | { |
255 | u64 mpidr; | ||
256 | |||
255 | /* | 257 | /* |
256 | * Simply map the vcpu_id into the Aff0 field of the MPIDR. | 258 | * Map the vcpu_id into the first three affinity level fields of |
259 | * the MPIDR. We limit the number of VCPUs in level 0 due to a | ||
260 | * limitation to 16 CPUs in that level in the ICC_SGIxR registers | ||
261 | * of the GICv3 to be able to address each CPU directly when | ||
262 | * sending IPIs. | ||
257 | */ | 263 | */ |
258 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); | 264 | mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); |
265 | mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); | ||
266 | mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); | ||
267 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; | ||
259 | } | 268 | } |
260 | 269 | ||
261 | /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ | 270 | /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ |