diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2014-01-22 04:43:38 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2014-03-02 20:15:23 -0500 |
commit | ac30a11e8e92a03dbe236b285c5cbae0bf563141 (patch) | |
tree | fe7e8a2493f46af06b0c59c18f71ab60562b3d15 /arch | |
parent | 547f781378a22b65c2ab468f235c23001b5924da (diff) |
ARM: KVM: introduce per-vcpu HYP Configuration Register
So far, KVM/ARM used a fixed HCR configuration per guest, except for
the VI/VF/VA bits to control the interrupt in absence of VGIC.
With the upcoming need to dynamically reconfigure trapping, it becomes
necessary to allow the HCR to be changed on a per-vcpu basis.
The fix here is to mimic what KVM/arm64 already does: a per vcpu HCR
field, initialized at setup time.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/kvm_arm.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 9 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/arm/kvm/guest.c | 1 | ||||
-rw-r--r-- | arch/arm/kvm/interrupts_head.S | 9 |
5 files changed, 11 insertions, 10 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 1d3153c7eb41..a843e74a384c 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h | |||
@@ -69,7 +69,6 @@ | |||
69 | #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ | 69 | #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ |
70 | HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ | 70 | HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ |
71 | HCR_TWE | HCR_SWIO | HCR_TIDCP) | 71 | HCR_TWE | HCR_SWIO | HCR_TIDCP) |
72 | #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) | ||
73 | 72 | ||
74 | /* System Control Register (SCTLR) bits */ | 73 | /* System Control Register (SCTLR) bits */ |
75 | #define SCTLR_TE (1 << 30) | 74 | #define SCTLR_TE (1 << 30) |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 098f7dd6d564..09af14999c9b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -101,6 +101,12 @@ struct kvm_vcpu_arch { | |||
101 | /* The CPU type we expose to the VM */ | 101 | /* The CPU type we expose to the VM */ |
102 | u32 midr; | 102 | u32 midr; |
103 | 103 | ||
104 | /* HYP trapping configuration */ | ||
105 | u32 hcr; | ||
106 | |||
107 | /* Interrupt related fields */ | ||
108 | u32 irq_lines; /* IRQ and FIQ levels */ | ||
109 | |||
104 | /* Exception Information */ | 110 | /* Exception Information */ |
105 | struct kvm_vcpu_fault_info fault; | 111 | struct kvm_vcpu_fault_info fault; |
106 | 112 | ||
@@ -128,9 +134,6 @@ struct kvm_vcpu_arch { | |||
128 | /* IO related fields */ | 134 | /* IO related fields */ |
129 | struct kvm_decode mmio_decode; | 135 | struct kvm_decode mmio_decode; |
130 | 136 | ||
131 | /* Interrupt related fields */ | ||
132 | u32 irq_lines; /* IRQ and FIQ levels */ | ||
133 | |||
134 | /* Cache some mmu pages needed inside spinlock regions */ | 137 | /* Cache some mmu pages needed inside spinlock regions */ |
135 | struct kvm_mmu_memory_cache mmu_page_cache; | 138 | struct kvm_mmu_memory_cache mmu_page_cache; |
136 | 139 | ||
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index ded041711beb..85598b5d1efd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -174,6 +174,7 @@ int main(void) | |||
174 | DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); | 174 | DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); |
175 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); | 175 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); |
176 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); | 176 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); |
177 | DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); | ||
177 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); | 178 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); |
178 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); | 179 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); |
179 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); | 180 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 2786eae10c0d..b23a59c1c522 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -38,6 +38,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
38 | 38 | ||
39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
40 | { | 40 | { |
41 | vcpu->arch.hcr = HCR_GUEST_MASK; | ||
41 | return 0; | 42 | return 0; |
42 | } | 43 | } |
43 | 44 | ||
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 6f18695a09cb..a37270d7d4d6 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S | |||
@@ -597,17 +597,14 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
597 | 597 | ||
598 | /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ | 598 | /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ |
599 | .macro configure_hyp_role operation | 599 | .macro configure_hyp_role operation |
600 | mrc p15, 4, r2, c1, c1, 0 @ HCR | ||
601 | bic r2, r2, #HCR_VIRT_EXCP_MASK | ||
602 | ldr r3, =HCR_GUEST_MASK | ||
603 | .if \operation == vmentry | 600 | .if \operation == vmentry |
604 | orr r2, r2, r3 | 601 | ldr r2, [vcpu, #VCPU_HCR] |
605 | ldr r3, [vcpu, #VCPU_IRQ_LINES] | 602 | ldr r3, [vcpu, #VCPU_IRQ_LINES] |
606 | orr r2, r2, r3 | 603 | orr r2, r2, r3 |
607 | .else | 604 | .else |
608 | bic r2, r2, r3 | 605 | mov r2, #0 |
609 | .endif | 606 | .endif |
610 | mcr p15, 4, r2, c1, c1, 0 | 607 | mcr p15, 4, r2, c1, c1, 0 @ HCR |
611 | .endm | 608 | .endm |
612 | 609 | ||
613 | .macro load_vcpu | 610 | .macro load_vcpu |