diff options
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 16 | ||||
-rw-r--r-- | arch/arm64/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | virt/kvm/arm/arm.c | 14 |
5 files changed, 28 insertions, 16 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index fe7754315e9c..2d721ab05925 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -153,9 +153,13 @@ struct kvm_cpu_context { | |||
153 | u32 cp15[NR_CP15_REGS]; | 153 | u32 cp15[NR_CP15_REGS]; |
154 | }; | 154 | }; |
155 | 155 | ||
156 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 156 | struct kvm_host_data { |
157 | struct kvm_cpu_context host_ctxt; | ||
158 | }; | ||
159 | |||
160 | typedef struct kvm_host_data kvm_host_data_t; | ||
157 | 161 | ||
158 | static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, | 162 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, |
159 | int cpu) | 163 | int cpu) |
160 | { | 164 | { |
161 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | 165 | /* The host's MPIDR is immutable, so let's set it up at boot time */ |
@@ -185,7 +189,7 @@ struct kvm_vcpu_arch { | |||
185 | struct kvm_vcpu_fault_info fault; | 189 | struct kvm_vcpu_fault_info fault; |
186 | 190 | ||
187 | /* Host FP context */ | 191 | /* Host FP context */ |
188 | kvm_cpu_context_t *host_cpu_context; | 192 | struct kvm_cpu_context *host_cpu_context; |
189 | 193 | ||
190 | /* VGIC state */ | 194 | /* VGIC state */ |
191 | struct vgic_cpu vgic_cpu; | 195 | struct vgic_cpu vgic_cpu; |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f5b79e995f40..ff73f5462aca 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void); | |||
108 | .endm | 108 | .endm |
109 | 109 | ||
110 | .macro get_host_ctxt reg, tmp | 110 | .macro get_host_ctxt reg, tmp |
111 | hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp | 111 | hyp_adr_this_cpu \reg, kvm_host_data, \tmp |
112 | add \reg, \reg, #HOST_DATA_CONTEXT | ||
112 | .endm | 113 | .endm |
113 | 114 | ||
114 | .macro get_vcpu_ptr vcpu, ctxt | 115 | .macro get_vcpu_ptr vcpu, ctxt |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f772ac2fb3e9..9ba59832b71a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -233,7 +233,11 @@ struct kvm_cpu_context { | |||
233 | struct kvm_vcpu *__hyp_running_vcpu; | 233 | struct kvm_vcpu *__hyp_running_vcpu; |
234 | }; | 234 | }; |
235 | 235 | ||
236 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 236 | struct kvm_host_data { |
237 | struct kvm_cpu_context host_ctxt; | ||
238 | }; | ||
239 | |||
240 | typedef struct kvm_host_data kvm_host_data_t; | ||
237 | 241 | ||
238 | struct vcpu_reset_state { | 242 | struct vcpu_reset_state { |
239 | unsigned long pc; | 243 | unsigned long pc; |
@@ -278,7 +282,7 @@ struct kvm_vcpu_arch { | |||
278 | struct kvm_guest_debug_arch external_debug_state; | 282 | struct kvm_guest_debug_arch external_debug_state; |
279 | 283 | ||
280 | /* Pointer to host CPU context */ | 284 | /* Pointer to host CPU context */ |
281 | kvm_cpu_context_t *host_cpu_context; | 285 | struct kvm_cpu_context *host_cpu_context; |
282 | 286 | ||
283 | struct thread_info *host_thread_info; /* hyp VA */ | 287 | struct thread_info *host_thread_info; /* hyp VA */ |
284 | struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ | 288 | struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ |
@@ -483,9 +487,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); | |||
483 | 487 | ||
484 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); | 488 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); |
485 | 489 | ||
486 | DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); | 490 | DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); |
487 | 491 | ||
488 | static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, | 492 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, |
489 | int cpu) | 493 | int cpu) |
490 | { | 494 | { |
491 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | 495 | /* The host's MPIDR is immutable, so let's set it up at boot time */ |
@@ -503,8 +507,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, | |||
503 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 | 507 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 |
504 | * so that we can use adr_l to access per-cpu variables in EL2. | 508 | * so that we can use adr_l to access per-cpu variables in EL2. |
505 | */ | 509 | */ |
506 | u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - | 510 | u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) - |
507 | (u64)kvm_ksym_ref(kvm_host_cpu_state)); | 511 | (u64)kvm_ksym_ref(kvm_host_data)); |
508 | 512 | ||
509 | /* | 513 | /* |
510 | * Call initialization code, and switch to the full blown HYP code. | 514 | * Call initialization code, and switch to the full blown HYP code. |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 8178330a9f7a..768b23101ff0 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -134,6 +134,7 @@ int main(void) | |||
134 | DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); | 134 | DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); |
135 | DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); | 135 | DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); |
136 | DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); | 136 | DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); |
137 | DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); | ||
137 | #endif | 138 | #endif |
138 | #ifdef CONFIG_CPU_PM | 139 | #ifdef CONFIG_CPU_PM |
139 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); | 140 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 156c09da9e2b..e960b91551d6 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -56,7 +56,7 @@ | |||
56 | __asm__(".arch_extension virt"); | 56 | __asm__(".arch_extension virt"); |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); | 59 | DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); |
60 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); | 60 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
61 | 61 | ||
62 | /* Per-CPU variable containing the currently running vcpu. */ | 62 | /* Per-CPU variable containing the currently running vcpu. */ |
@@ -360,8 +360,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
360 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 360 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
361 | { | 361 | { |
362 | int *last_ran; | 362 | int *last_ran; |
363 | kvm_host_data_t *cpu_data; | ||
363 | 364 | ||
364 | last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); | 365 | last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); |
366 | cpu_data = this_cpu_ptr(&kvm_host_data); | ||
365 | 367 | ||
366 | /* | 368 | /* |
367 | * We might get preempted before the vCPU actually runs, but | 369 | * We might get preempted before the vCPU actually runs, but |
@@ -373,7 +375,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
373 | } | 375 | } |
374 | 376 | ||
375 | vcpu->cpu = cpu; | 377 | vcpu->cpu = cpu; |
376 | vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state); | 378 | vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; |
377 | 379 | ||
378 | kvm_arm_set_running_vcpu(vcpu); | 380 | kvm_arm_set_running_vcpu(vcpu); |
379 | kvm_vgic_load(vcpu); | 381 | kvm_vgic_load(vcpu); |
@@ -1569,11 +1571,11 @@ static int init_hyp_mode(void) | |||
1569 | } | 1571 | } |
1570 | 1572 | ||
1571 | for_each_possible_cpu(cpu) { | 1573 | for_each_possible_cpu(cpu) { |
1572 | kvm_cpu_context_t *cpu_ctxt; | 1574 | kvm_host_data_t *cpu_data; |
1573 | 1575 | ||
1574 | cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu); | 1576 | cpu_data = per_cpu_ptr(&kvm_host_data, cpu); |
1575 | kvm_init_host_cpu_context(cpu_ctxt, cpu); | 1577 | kvm_init_host_cpu_context(&cpu_data->host_ctxt, cpu); |
1576 | err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP); | 1578 | err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); |
1577 | 1579 | ||
1578 | if (err) { | 1580 | if (err) { |
1579 | kvm_err("Cannot map host CPU state: %d\n", err); | 1581 | kvm_err("Cannot map host CPU state: %d\n", err); |