aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 14:35:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 14:35:30 -0400
commit66bb0aa077978dbb76e6283531eb3cc7a878de38 (patch)
tree62a28a96cb43df2d8f7c6eb14d4676a1e2ce3887 /arch/arm64
parente306e3be1cbe5b11d0f8a53a557c205cf27e4979 (diff)
parentc77dcacb397519b6ade8f08201a4a90a7f4f751e (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull second round of KVM changes from Paolo Bonzini: "Here are the PPC and ARM changes for KVM, which I separated because they had small conflicts (respectively within KVM documentation, and with 3.16-rc changes). Since they were all within the subsystem, I took care of them. Stephen Rothwell reported some snags in PPC builds, but they are all fixed now; the latest linux-next report was clean. New features for ARM include: - KVM VGIC v2 emulation on GICv3 hardware - Big-Endian support for arm/arm64 (guest and host) - Debug Architecture support for arm64 (arm32 is on Christoffer's todo list) And for PPC: - Book3S: Good number of LE host fixes, enable HV on LE - Book3S HV: Add in-guest debug support This release drops support for KVM on the PPC440. As a result, the PPC merge removes more lines than it adds. :) I also included an x86 change, since Davidlohr tied it to an independent bug report and the reporter quickly provided a Tested-by; there was no reason to wait for -rc2" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (122 commits) KVM: Move more code under CONFIG_HAVE_KVM_IRQFD KVM: nVMX: fix "acknowledge interrupt on exit" when APICv is in use KVM: nVMX: Fix nested vmexit ack intr before load vmcs01 KVM: PPC: Enable IRQFD support for the XICS interrupt controller KVM: Give IRQFD its own separate enabling Kconfig option KVM: Move irq notifier implementation into eventfd.c KVM: Move all accesses to kvm::irq_routing into irqchip.c KVM: irqchip: Provide and use accessors for irq routing table KVM: Don't keep reference to irq routing table in irqfd struct KVM: PPC: drop duplicate tracepoint arm64: KVM: fix 64bit CP15 VM access for 32bit guests KVM: arm64: GICv3: mandate page-aligned GICV region arm64: KVM: GICv3: move system register access to msr_s/mrs_s KVM: PPC: PR: Handle FSCR feature deselects KVM: PPC: HV: Remove generic instruction emulation KVM: PPC: BOOKEHV: rename e500hv_spr to bookehv_spr KVM: PPC: Remove DCR handling KVM: PPC: Expose helper functions for data/inst faults KVM: PPC: Separate loadstore emulation from priv emulation KVM: PPC: Handle magic page in kvmppc_ld/st ...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/debug-monitors.h19
-rw-r--r--arch/arm64/include/asm/kvm_arm.h5
-rw-r--r--arch/arm64/include/asm/kvm_asm.h53
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h22
-rw-r--r--arch/arm64/include/asm/kvm_host.h48
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h15
-rw-r--r--arch/arm64/include/asm/virt.h4
-rw-r--r--arch/arm64/kernel/asm-offsets.c26
-rw-r--r--arch/arm64/kernel/debug-monitors.c9
-rw-r--r--arch/arm64/kvm/Makefile4
-rw-r--r--arch/arm64/kvm/guest.c68
-rw-r--r--arch/arm64/kvm/handle_exit.c4
-rw-r--r--arch/arm64/kvm/hyp.S600
-rw-r--r--arch/arm64/kvm/sys_regs.c546
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S133
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S267
17 files changed, 1608 insertions, 218 deletions
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 6e9b5b36921c..7fb343779498 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,15 @@
18 18
19#ifdef __KERNEL__ 19#ifdef __KERNEL__
20 20
21/* Low-level stepping controls. */
22#define DBG_MDSCR_SS (1 << 0)
23#define DBG_SPSR_SS (1 << 21)
24
25/* MDSCR_EL1 enabling bits */
26#define DBG_MDSCR_KDE (1 << 13)
27#define DBG_MDSCR_MDE (1 << 15)
28#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
29
21#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7) 30#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
22 31
23/* AArch64 */ 32/* AArch64 */
@@ -73,11 +82,6 @@
73 82
74#define CACHE_FLUSH_IS_SAFE 1 83#define CACHE_FLUSH_IS_SAFE 1
75 84
76enum debug_el {
77 DBG_ACTIVE_EL0 = 0,
78 DBG_ACTIVE_EL1,
79};
80
81/* AArch32 */ 85/* AArch32 */
82#define DBG_ESR_EVT_BKPT 0x4 86#define DBG_ESR_EVT_BKPT 0x4
83#define DBG_ESR_EVT_VECC 0x5 87#define DBG_ESR_EVT_VECC 0x5
@@ -115,6 +119,11 @@ void unregister_break_hook(struct break_hook *hook);
115 119
116u8 debug_monitors_arch(void); 120u8 debug_monitors_arch(void);
117 121
122enum debug_el {
123 DBG_ACTIVE_EL0 = 0,
124 DBG_ACTIVE_EL1,
125};
126
118void enable_debug_monitors(enum debug_el el); 127void enable_debug_monitors(enum debug_el el);
119void disable_debug_monitors(enum debug_el el); 128void disable_debug_monitors(enum debug_el el);
120 129
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 3d6903006a8a..cc83520459ed 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -76,9 +76,10 @@
76 */ 76 */
77#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ 77#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
78 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ 78 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
79 HCR_AMO | HCR_IMO | HCR_FMO | \ 79 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
80 HCR_SWIO | HCR_TIDCP | HCR_RW)
81#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) 80#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
81#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
82
82 83
83/* Hyp System Control Register (SCTLR_EL2) bits */ 84/* Hyp System Control Register (SCTLR_EL2) bits */
84#define SCTLR_EL2_EE (1 << 25) 85#define SCTLR_EL2_EE (1 << 25)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 9fcd54b1e16d..483842180f8f 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -18,6 +18,8 @@
18#ifndef __ARM_KVM_ASM_H__ 18#ifndef __ARM_KVM_ASM_H__
19#define __ARM_KVM_ASM_H__ 19#define __ARM_KVM_ASM_H__
20 20
21#include <asm/virt.h>
22
21/* 23/*
22 * 0 is reserved as an invalid value. 24 * 0 is reserved as an invalid value.
23 * Order *must* be kept in sync with the hyp switch code. 25 * Order *must* be kept in sync with the hyp switch code.
@@ -43,14 +45,25 @@
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 45#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 46#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45#define PAR_EL1 21 /* Physical Address Register */ 47#define PAR_EL1 21 /* Physical Address Register */
48#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
49#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
50#define DBGBCR15_EL1 38
51#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
52#define DBGBVR15_EL1 54
53#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
54#define DBGWCR15_EL1 70
55#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
56#define DBGWVR15_EL1 86
57#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
58
46/* 32bit specific registers. Keep them at the end of the range */ 59/* 32bit specific registers. Keep them at the end of the range */
47#define DACR32_EL2 22 /* Domain Access Control Register */ 60#define DACR32_EL2 88 /* Domain Access Control Register */
48#define IFSR32_EL2 23 /* Instruction Fault Status Register */ 61#define IFSR32_EL2 89 /* Instruction Fault Status Register */
49#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ 62#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
50#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ 63#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
51#define TEECR32_EL1 26 /* ThumbEE Configuration Register */ 64#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
52#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ 65#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
53#define NR_SYS_REGS 28 66#define NR_SYS_REGS 94
54 67
55/* 32bit mapping */ 68/* 32bit mapping */
56#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 69#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -82,11 +95,23 @@
82#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ 95#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
83#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ 96#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
84#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ 97#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
85#define NR_CP15_REGS (NR_SYS_REGS * 2) 98
99#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
100#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
101#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
102#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
103#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
104#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
105#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
106
107#define NR_COPRO_REGS (NR_SYS_REGS * 2)
86 108
87#define ARM_EXCEPTION_IRQ 0 109#define ARM_EXCEPTION_IRQ 0
88#define ARM_EXCEPTION_TRAP 1 110#define ARM_EXCEPTION_TRAP 1
89 111
112#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
113#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
114
90#ifndef __ASSEMBLY__ 115#ifndef __ASSEMBLY__
91struct kvm; 116struct kvm;
92struct kvm_vcpu; 117struct kvm_vcpu;
@@ -96,13 +121,21 @@ extern char __kvm_hyp_init_end[];
96 121
97extern char __kvm_hyp_vector[]; 122extern char __kvm_hyp_vector[];
98 123
99extern char __kvm_hyp_code_start[]; 124#define __kvm_hyp_code_start __hyp_text_start
100extern char __kvm_hyp_code_end[]; 125#define __kvm_hyp_code_end __hyp_text_end
101 126
102extern void __kvm_flush_vm_context(void); 127extern void __kvm_flush_vm_context(void);
103extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 128extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
104 129
105extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 130extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
131
132extern u64 __vgic_v3_get_ich_vtr_el2(void);
133
134extern char __save_vgic_v2_state[];
135extern char __restore_vgic_v2_state[];
136extern char __save_vgic_v3_state[];
137extern char __restore_vgic_v3_state[];
138
106#endif 139#endif
107 140
108#endif /* __ARM_KVM_ASM_H__ */ 141#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
index 9a59301cd014..0b52377a6c11 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -39,7 +39,8 @@ void kvm_register_target_sys_reg_table(unsigned int target,
39 struct kvm_sys_reg_target_table *table); 39 struct kvm_sys_reg_target_table *table);
40 40
41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 42int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 44int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
44int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 45int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
45int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); 46int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index dd8ecfc3f995..fdc3e21abd8d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -213,6 +213,17 @@ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
213 default: 213 default:
214 return be64_to_cpu(data); 214 return be64_to_cpu(data);
215 } 215 }
216 } else {
217 switch (len) {
218 case 1:
219 return data & 0xff;
220 case 2:
221 return le16_to_cpu(data & 0xffff);
222 case 4:
223 return le32_to_cpu(data & 0xffffffff);
224 default:
225 return le64_to_cpu(data);
226 }
216 } 227 }
217 228
218 return data; /* Leave LE untouched */ 229 return data; /* Leave LE untouched */
@@ -233,6 +244,17 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
233 default: 244 default:
234 return cpu_to_be64(data); 245 return cpu_to_be64(data);
235 } 246 }
247 } else {
248 switch (len) {
249 case 1:
250 return data & 0xff;
251 case 2:
252 return cpu_to_le16(data & 0xffff);
253 case 4:
254 return cpu_to_le32(data & 0xffffffff);
255 default:
256 return cpu_to_le64(data);
257 }
236 } 258 }
237 259
238 return data; /* Leave LE untouched */ 260 return data; /* Leave LE untouched */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 92242ce06309..e10c45a578e3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -86,7 +86,7 @@ struct kvm_cpu_context {
86 struct kvm_regs gp_regs; 86 struct kvm_regs gp_regs;
87 union { 87 union {
88 u64 sys_regs[NR_SYS_REGS]; 88 u64 sys_regs[NR_SYS_REGS];
89 u32 cp15[NR_CP15_REGS]; 89 u32 copro[NR_COPRO_REGS];
90 }; 90 };
91}; 91};
92 92
@@ -101,6 +101,9 @@ struct kvm_vcpu_arch {
101 /* Exception Information */ 101 /* Exception Information */
102 struct kvm_vcpu_fault_info fault; 102 struct kvm_vcpu_fault_info fault;
103 103
104 /* Debug state */
105 u64 debug_flags;
106
104 /* Pointer to host CPU context */ 107 /* Pointer to host CPU context */
105 kvm_cpu_context_t *host_cpu_context; 108 kvm_cpu_context_t *host_cpu_context;
106 109
@@ -138,7 +141,20 @@ struct kvm_vcpu_arch {
138 141
139#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) 142#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
140#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) 143#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
141#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) 144/*
145 * CP14 and CP15 live in the same array, as they are backed by the
146 * same system registers.
147 */
148#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
149#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
150
151#ifdef CONFIG_CPU_BIG_ENDIAN
152#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
153#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1)
154#else
155#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
156#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r))
157#endif
142 158
143struct kvm_vm_stat { 159struct kvm_vm_stat {
144 u32 remote_tlb_flush; 160 u32 remote_tlb_flush;
@@ -200,4 +216,32 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
200 hyp_stack_ptr, vector_ptr); 216 hyp_stack_ptr, vector_ptr);
201} 217}
202 218
219struct vgic_sr_vectors {
220 void *save_vgic;
221 void *restore_vgic;
222};
223
224static inline void vgic_arch_setup(const struct vgic_params *vgic)
225{
226 extern struct vgic_sr_vectors __vgic_sr_vectors;
227
228 switch(vgic->type)
229 {
230 case VGIC_V2:
231 __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
232 __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
233 break;
234
235#ifdef CONFIG_ARM_GIC_V3
236 case VGIC_V3:
237 __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
238 __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
239 break;
240#endif
241
242 default:
243 BUG();
244 }
245}
246
203#endif /* __ARM64_KVM_HOST_H__ */ 247#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7d29847a893b..8e138c7c53ac 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -125,6 +125,21 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
127 127
128static inline bool kvm_page_empty(void *ptr)
129{
130 struct page *ptr_page = virt_to_page(ptr);
131 return page_count(ptr_page) == 1;
132}
133
134#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
135#ifndef CONFIG_ARM64_64K_PAGES
136#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
137#else
138#define kvm_pmd_table_empty(pmdp) (0)
139#endif
140#define kvm_pud_table_empty(pudp) (0)
141
142
128struct kvm; 143struct kvm;
129 144
130#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 145#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 215ad4649dd7..7a5df5252dd7 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -50,6 +50,10 @@ static inline bool is_hyp_mode_mismatched(void)
50 return __boot_cpu_mode[0] != __boot_cpu_mode[1]; 50 return __boot_cpu_mode[0] != __boot_cpu_mode[1];
51} 51}
52 52
53/* The section containing the hypervisor text */
54extern char __hyp_text_start[];
55extern char __hyp_text_end[];
56
53#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
54 58
55#endif /* ! __ASM__VIRT_H */ 59#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 646f888387cd..9a9fce090d58 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -120,6 +120,7 @@ int main(void)
120 DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); 120 DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
121 DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); 121 DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
122 DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); 122 DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
123 DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
123 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); 124 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
124 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); 125 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
125 DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); 126 DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
@@ -129,13 +130,24 @@ int main(void)
129 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); 130 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
130 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 131 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
131 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 132 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
132 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 133 DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
133 DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); 134 DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
134 DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); 135 DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
135 DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); 136 DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
136 DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); 137 DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
137 DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); 138 DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
138 DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); 139 DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
140 DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
141 DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
142 DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
143 DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
144 DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
145 DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
146 DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
147 DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
148 DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
149 DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
150 DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
139 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); 151 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
140 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 152 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
141 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); 153 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index fe5b94078d82..b056369fd47d 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -30,15 +30,6 @@
30#include <asm/cputype.h> 30#include <asm/cputype.h>
31#include <asm/system_misc.h> 31#include <asm/system_misc.h>
32 32
33/* Low-level stepping controls. */
34#define DBG_MDSCR_SS (1 << 0)
35#define DBG_SPSR_SS (1 << 21)
36
37/* MDSCR_EL1 enabling bits */
38#define DBG_MDSCR_KDE (1 << 13)
39#define DBG_MDSCR_MDE (1 << 15)
40#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
41
42/* Determine debug architecture. */ 33/* Determine debug architecture. */
43u8 debug_monitors_arch(void) 34u8 debug_monitors_arch(void)
44{ 35{
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 72a9fd583ad3..32a096174b94 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,4 +20,8 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o 20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
21 21
22kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 22kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
23kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
24kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
25kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
26kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
23kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o 27kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 60b5c31f3c10..8d1ec2887a26 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -136,13 +136,67 @@ static unsigned long num_core_regs(void)
136} 136}
137 137
138/** 138/**
139 * ARM64 versions of the TIMER registers, always available on arm64
140 */
141
142#define NUM_TIMER_REGS 3
143
144static bool is_timer_reg(u64 index)
145{
146 switch (index) {
147 case KVM_REG_ARM_TIMER_CTL:
148 case KVM_REG_ARM_TIMER_CNT:
149 case KVM_REG_ARM_TIMER_CVAL:
150 return true;
151 }
152 return false;
153}
154
155static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
156{
157 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
158 return -EFAULT;
159 uindices++;
160 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
161 return -EFAULT;
162 uindices++;
163 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
164 return -EFAULT;
165
166 return 0;
167}
168
169static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
170{
171 void __user *uaddr = (void __user *)(long)reg->addr;
172 u64 val;
173 int ret;
174
175 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
176 if (ret != 0)
177 return ret;
178
179 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
180}
181
182static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
183{
184 void __user *uaddr = (void __user *)(long)reg->addr;
185 u64 val;
186
187 val = kvm_arm_timer_get_reg(vcpu, reg->id);
188 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
189}
190
191/**
139 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 192 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
140 * 193 *
141 * This is for all registers. 194 * This is for all registers.
142 */ 195 */
143unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 196unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
144{ 197{
145 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); 198 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
199 + NUM_TIMER_REGS;
146} 200}
147 201
148/** 202/**
@@ -154,6 +208,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
154{ 208{
155 unsigned int i; 209 unsigned int i;
156 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; 210 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
211 int ret;
157 212
158 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 213 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
159 if (put_user(core_reg | i, uindices)) 214 if (put_user(core_reg | i, uindices))
@@ -161,6 +216,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
161 uindices++; 216 uindices++;
162 } 217 }
163 218
219 ret = copy_timer_indices(vcpu, uindices);
220 if (ret)
221 return ret;
222 uindices += NUM_TIMER_REGS;
223
164 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 224 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
165} 225}
166 226
@@ -174,6 +234,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
174 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 234 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
175 return get_core_reg(vcpu, reg); 235 return get_core_reg(vcpu, reg);
176 236
237 if (is_timer_reg(reg->id))
238 return get_timer_reg(vcpu, reg);
239
177 return kvm_arm_sys_reg_get_reg(vcpu, reg); 240 return kvm_arm_sys_reg_get_reg(vcpu, reg);
178} 241}
179 242
@@ -187,6 +250,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
187 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 250 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
188 return set_core_reg(vcpu, reg); 251 return set_core_reg(vcpu, reg);
189 252
253 if (is_timer_reg(reg->id))
254 return set_timer_reg(vcpu, reg);
255
190 return kvm_arm_sys_reg_set_reg(vcpu, reg); 256 return kvm_arm_sys_reg_set_reg(vcpu, reg);
191} 257}
192 258
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 182415e1a952..e28be510380c 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -73,9 +73,9 @@ static exit_handle_fn arm_exit_handlers[] = {
73 [ESR_EL2_EC_WFI] = kvm_handle_wfx, 73 [ESR_EL2_EC_WFI] = kvm_handle_wfx,
74 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, 74 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
75 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, 75 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
76 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, 76 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
77 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, 77 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
78 [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access, 78 [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
79 [ESR_EL2_EC_HVC32] = handle_hvc, 79 [ESR_EL2_EC_HVC32] = handle_hvc,
80 [ESR_EL2_EC_SMC32] = handle_smc, 80 [ESR_EL2_EC_SMC32] = handle_smc,
81 [ESR_EL2_EC_HVC64] = handle_hvc, 81 [ESR_EL2_EC_HVC64] = handle_hvc,
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index b0d1512acf08..b72aa9f9215c 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -16,11 +16,11 @@
16 */ 16 */
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20 19
21#include <asm/assembler.h> 20#include <asm/assembler.h>
22#include <asm/memory.h> 21#include <asm/memory.h>
23#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
23#include <asm/debug-monitors.h>
24#include <asm/fpsimdmacros.h> 24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h> 25#include <asm/kvm.h>
26#include <asm/kvm_asm.h> 26#include <asm/kvm_asm.h>
@@ -36,9 +36,6 @@
36 .pushsection .hyp.text, "ax" 36 .pushsection .hyp.text, "ax"
37 .align PAGE_SHIFT 37 .align PAGE_SHIFT
38 38
39__kvm_hyp_code_start:
40 .globl __kvm_hyp_code_start
41
42.macro save_common_regs 39.macro save_common_regs
43 // x2: base address for cpu context 40 // x2: base address for cpu context
44 // x3: tmp register 41 // x3: tmp register
@@ -215,6 +212,7 @@ __kvm_hyp_code_start:
215 mrs x22, amair_el1 212 mrs x22, amair_el1
216 mrs x23, cntkctl_el1 213 mrs x23, cntkctl_el1
217 mrs x24, par_el1 214 mrs x24, par_el1
215 mrs x25, mdscr_el1
218 216
219 stp x4, x5, [x3] 217 stp x4, x5, [x3]
220 stp x6, x7, [x3, #16] 218 stp x6, x7, [x3, #16]
@@ -226,7 +224,202 @@ __kvm_hyp_code_start:
226 stp x18, x19, [x3, #112] 224 stp x18, x19, [x3, #112]
227 stp x20, x21, [x3, #128] 225 stp x20, x21, [x3, #128]
228 stp x22, x23, [x3, #144] 226 stp x22, x23, [x3, #144]
229 str x24, [x3, #160] 227 stp x24, x25, [x3, #160]
228.endm
229
230.macro save_debug
231 // x2: base address for cpu context
232 // x3: tmp register
233
234 mrs x26, id_aa64dfr0_el1
235 ubfx x24, x26, #12, #4 // Extract BRPs
236 ubfx x25, x26, #20, #4 // Extract WRPs
237 mov w26, #15
238 sub w24, w26, w24 // How many BPs to skip
239 sub w25, w26, w25 // How many WPs to skip
240
241 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
242
243 adr x26, 1f
244 add x26, x26, x24, lsl #2
245 br x26
2461:
247 mrs x20, dbgbcr15_el1
248 mrs x19, dbgbcr14_el1
249 mrs x18, dbgbcr13_el1
250 mrs x17, dbgbcr12_el1
251 mrs x16, dbgbcr11_el1
252 mrs x15, dbgbcr10_el1
253 mrs x14, dbgbcr9_el1
254 mrs x13, dbgbcr8_el1
255 mrs x12, dbgbcr7_el1
256 mrs x11, dbgbcr6_el1
257 mrs x10, dbgbcr5_el1
258 mrs x9, dbgbcr4_el1
259 mrs x8, dbgbcr3_el1
260 mrs x7, dbgbcr2_el1
261 mrs x6, dbgbcr1_el1
262 mrs x5, dbgbcr0_el1
263
264 adr x26, 1f
265 add x26, x26, x24, lsl #2
266 br x26
267
2681:
269 str x20, [x3, #(15 * 8)]
270 str x19, [x3, #(14 * 8)]
271 str x18, [x3, #(13 * 8)]
272 str x17, [x3, #(12 * 8)]
273 str x16, [x3, #(11 * 8)]
274 str x15, [x3, #(10 * 8)]
275 str x14, [x3, #(9 * 8)]
276 str x13, [x3, #(8 * 8)]
277 str x12, [x3, #(7 * 8)]
278 str x11, [x3, #(6 * 8)]
279 str x10, [x3, #(5 * 8)]
280 str x9, [x3, #(4 * 8)]
281 str x8, [x3, #(3 * 8)]
282 str x7, [x3, #(2 * 8)]
283 str x6, [x3, #(1 * 8)]
284 str x5, [x3, #(0 * 8)]
285
286 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
287
288 adr x26, 1f
289 add x26, x26, x24, lsl #2
290 br x26
2911:
292 mrs x20, dbgbvr15_el1
293 mrs x19, dbgbvr14_el1
294 mrs x18, dbgbvr13_el1
295 mrs x17, dbgbvr12_el1
296 mrs x16, dbgbvr11_el1
297 mrs x15, dbgbvr10_el1
298 mrs x14, dbgbvr9_el1
299 mrs x13, dbgbvr8_el1
300 mrs x12, dbgbvr7_el1
301 mrs x11, dbgbvr6_el1
302 mrs x10, dbgbvr5_el1
303 mrs x9, dbgbvr4_el1
304 mrs x8, dbgbvr3_el1
305 mrs x7, dbgbvr2_el1
306 mrs x6, dbgbvr1_el1
307 mrs x5, dbgbvr0_el1
308
309 adr x26, 1f
310 add x26, x26, x24, lsl #2
311 br x26
312
3131:
314 str x20, [x3, #(15 * 8)]
315 str x19, [x3, #(14 * 8)]
316 str x18, [x3, #(13 * 8)]
317 str x17, [x3, #(12 * 8)]
318 str x16, [x3, #(11 * 8)]
319 str x15, [x3, #(10 * 8)]
320 str x14, [x3, #(9 * 8)]
321 str x13, [x3, #(8 * 8)]
322 str x12, [x3, #(7 * 8)]
323 str x11, [x3, #(6 * 8)]
324 str x10, [x3, #(5 * 8)]
325 str x9, [x3, #(4 * 8)]
326 str x8, [x3, #(3 * 8)]
327 str x7, [x3, #(2 * 8)]
328 str x6, [x3, #(1 * 8)]
329 str x5, [x3, #(0 * 8)]
330
331 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
332
333 adr x26, 1f
334 add x26, x26, x25, lsl #2
335 br x26
3361:
337 mrs x20, dbgwcr15_el1
338 mrs x19, dbgwcr14_el1
339 mrs x18, dbgwcr13_el1
340 mrs x17, dbgwcr12_el1
341 mrs x16, dbgwcr11_el1
342 mrs x15, dbgwcr10_el1
343 mrs x14, dbgwcr9_el1
344 mrs x13, dbgwcr8_el1
345 mrs x12, dbgwcr7_el1
346 mrs x11, dbgwcr6_el1
347 mrs x10, dbgwcr5_el1
348 mrs x9, dbgwcr4_el1
349 mrs x8, dbgwcr3_el1
350 mrs x7, dbgwcr2_el1
351 mrs x6, dbgwcr1_el1
352 mrs x5, dbgwcr0_el1
353
354 adr x26, 1f
355 add x26, x26, x25, lsl #2
356 br x26
357
3581:
359 str x20, [x3, #(15 * 8)]
360 str x19, [x3, #(14 * 8)]
361 str x18, [x3, #(13 * 8)]
362 str x17, [x3, #(12 * 8)]
363 str x16, [x3, #(11 * 8)]
364 str x15, [x3, #(10 * 8)]
365 str x14, [x3, #(9 * 8)]
366 str x13, [x3, #(8 * 8)]
367 str x12, [x3, #(7 * 8)]
368 str x11, [x3, #(6 * 8)]
369 str x10, [x3, #(5 * 8)]
370 str x9, [x3, #(4 * 8)]
371 str x8, [x3, #(3 * 8)]
372 str x7, [x3, #(2 * 8)]
373 str x6, [x3, #(1 * 8)]
374 str x5, [x3, #(0 * 8)]
375
376 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
377
378 adr x26, 1f
379 add x26, x26, x25, lsl #2
380 br x26
3811:
382 mrs x20, dbgwvr15_el1
383 mrs x19, dbgwvr14_el1
384 mrs x18, dbgwvr13_el1
385 mrs x17, dbgwvr12_el1
386 mrs x16, dbgwvr11_el1
387 mrs x15, dbgwvr10_el1
388 mrs x14, dbgwvr9_el1
389 mrs x13, dbgwvr8_el1
390 mrs x12, dbgwvr7_el1
391 mrs x11, dbgwvr6_el1
392 mrs x10, dbgwvr5_el1
393 mrs x9, dbgwvr4_el1
394 mrs x8, dbgwvr3_el1
395 mrs x7, dbgwvr2_el1
396 mrs x6, dbgwvr1_el1
397 mrs x5, dbgwvr0_el1
398
399 adr x26, 1f
400 add x26, x26, x25, lsl #2
401 br x26
402
4031:
404 str x20, [x3, #(15 * 8)]
405 str x19, [x3, #(14 * 8)]
406 str x18, [x3, #(13 * 8)]
407 str x17, [x3, #(12 * 8)]
408 str x16, [x3, #(11 * 8)]
409 str x15, [x3, #(10 * 8)]
410 str x14, [x3, #(9 * 8)]
411 str x13, [x3, #(8 * 8)]
412 str x12, [x3, #(7 * 8)]
413 str x11, [x3, #(6 * 8)]
414 str x10, [x3, #(5 * 8)]
415 str x9, [x3, #(4 * 8)]
416 str x8, [x3, #(3 * 8)]
417 str x7, [x3, #(2 * 8)]
418 str x6, [x3, #(1 * 8)]
419 str x5, [x3, #(0 * 8)]
420
421 mrs x21, mdccint_el1
422 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
230.endm 423.endm
231 424
232.macro restore_sysregs 425.macro restore_sysregs
@@ -245,7 +438,7 @@ __kvm_hyp_code_start:
245 ldp x18, x19, [x3, #112] 438 ldp x18, x19, [x3, #112]
246 ldp x20, x21, [x3, #128] 439 ldp x20, x21, [x3, #128]
247 ldp x22, x23, [x3, #144] 440 ldp x22, x23, [x3, #144]
248 ldr x24, [x3, #160] 441 ldp x24, x25, [x3, #160]
249 442
250 msr vmpidr_el2, x4 443 msr vmpidr_el2, x4
251 msr csselr_el1, x5 444 msr csselr_el1, x5
@@ -268,6 +461,198 @@ __kvm_hyp_code_start:
268 msr amair_el1, x22 461 msr amair_el1, x22
269 msr cntkctl_el1, x23 462 msr cntkctl_el1, x23
270 msr par_el1, x24 463 msr par_el1, x24
464 msr mdscr_el1, x25
465.endm
466
467.macro restore_debug
468 // x2: base address for cpu context
469 // x3: tmp register
470
471 mrs x26, id_aa64dfr0_el1
472 ubfx x24, x26, #12, #4 // Extract BRPs
473 ubfx x25, x26, #20, #4 // Extract WRPs
474 mov w26, #15
475 sub w24, w26, w24 // How many BPs to skip
476 sub w25, w26, w25 // How many WPs to skip
477
478 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
479
480 adr x26, 1f
481 add x26, x26, x24, lsl #2
482 br x26
4831:
484 ldr x20, [x3, #(15 * 8)]
485 ldr x19, [x3, #(14 * 8)]
486 ldr x18, [x3, #(13 * 8)]
487 ldr x17, [x3, #(12 * 8)]
488 ldr x16, [x3, #(11 * 8)]
489 ldr x15, [x3, #(10 * 8)]
490 ldr x14, [x3, #(9 * 8)]
491 ldr x13, [x3, #(8 * 8)]
492 ldr x12, [x3, #(7 * 8)]
493 ldr x11, [x3, #(6 * 8)]
494 ldr x10, [x3, #(5 * 8)]
495 ldr x9, [x3, #(4 * 8)]
496 ldr x8, [x3, #(3 * 8)]
497 ldr x7, [x3, #(2 * 8)]
498 ldr x6, [x3, #(1 * 8)]
499 ldr x5, [x3, #(0 * 8)]
500
501 adr x26, 1f
502 add x26, x26, x24, lsl #2
503 br x26
5041:
505 msr dbgbcr15_el1, x20
506 msr dbgbcr14_el1, x19
507 msr dbgbcr13_el1, x18
508 msr dbgbcr12_el1, x17
509 msr dbgbcr11_el1, x16
510 msr dbgbcr10_el1, x15
511 msr dbgbcr9_el1, x14
512 msr dbgbcr8_el1, x13
513 msr dbgbcr7_el1, x12
514 msr dbgbcr6_el1, x11
515 msr dbgbcr5_el1, x10
516 msr dbgbcr4_el1, x9
517 msr dbgbcr3_el1, x8
518 msr dbgbcr2_el1, x7
519 msr dbgbcr1_el1, x6
520 msr dbgbcr0_el1, x5
521
522 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
523
524 adr x26, 1f
525 add x26, x26, x24, lsl #2
526 br x26
5271:
528 ldr x20, [x3, #(15 * 8)]
529 ldr x19, [x3, #(14 * 8)]
530 ldr x18, [x3, #(13 * 8)]
531 ldr x17, [x3, #(12 * 8)]
532 ldr x16, [x3, #(11 * 8)]
533 ldr x15, [x3, #(10 * 8)]
534 ldr x14, [x3, #(9 * 8)]
535 ldr x13, [x3, #(8 * 8)]
536 ldr x12, [x3, #(7 * 8)]
537 ldr x11, [x3, #(6 * 8)]
538 ldr x10, [x3, #(5 * 8)]
539 ldr x9, [x3, #(4 * 8)]
540 ldr x8, [x3, #(3 * 8)]
541 ldr x7, [x3, #(2 * 8)]
542 ldr x6, [x3, #(1 * 8)]
543 ldr x5, [x3, #(0 * 8)]
544
545 adr x26, 1f
546 add x26, x26, x24, lsl #2
547 br x26
5481:
549 msr dbgbvr15_el1, x20
550 msr dbgbvr14_el1, x19
551 msr dbgbvr13_el1, x18
552 msr dbgbvr12_el1, x17
553 msr dbgbvr11_el1, x16
554 msr dbgbvr10_el1, x15
555 msr dbgbvr9_el1, x14
556 msr dbgbvr8_el1, x13
557 msr dbgbvr7_el1, x12
558 msr dbgbvr6_el1, x11
559 msr dbgbvr5_el1, x10
560 msr dbgbvr4_el1, x9
561 msr dbgbvr3_el1, x8
562 msr dbgbvr2_el1, x7
563 msr dbgbvr1_el1, x6
564 msr dbgbvr0_el1, x5
565
566 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
567
568 adr x26, 1f
569 add x26, x26, x25, lsl #2
570 br x26
5711:
572 ldr x20, [x3, #(15 * 8)]
573 ldr x19, [x3, #(14 * 8)]
574 ldr x18, [x3, #(13 * 8)]
575 ldr x17, [x3, #(12 * 8)]
576 ldr x16, [x3, #(11 * 8)]
577 ldr x15, [x3, #(10 * 8)]
578 ldr x14, [x3, #(9 * 8)]
579 ldr x13, [x3, #(8 * 8)]
580 ldr x12, [x3, #(7 * 8)]
581 ldr x11, [x3, #(6 * 8)]
582 ldr x10, [x3, #(5 * 8)]
583 ldr x9, [x3, #(4 * 8)]
584 ldr x8, [x3, #(3 * 8)]
585 ldr x7, [x3, #(2 * 8)]
586 ldr x6, [x3, #(1 * 8)]
587 ldr x5, [x3, #(0 * 8)]
588
589 adr x26, 1f
590 add x26, x26, x25, lsl #2
591 br x26
5921:
593 msr dbgwcr15_el1, x20
594 msr dbgwcr14_el1, x19
595 msr dbgwcr13_el1, x18
596 msr dbgwcr12_el1, x17
597 msr dbgwcr11_el1, x16
598 msr dbgwcr10_el1, x15
599 msr dbgwcr9_el1, x14
600 msr dbgwcr8_el1, x13
601 msr dbgwcr7_el1, x12
602 msr dbgwcr6_el1, x11
603 msr dbgwcr5_el1, x10
604 msr dbgwcr4_el1, x9
605 msr dbgwcr3_el1, x8
606 msr dbgwcr2_el1, x7
607 msr dbgwcr1_el1, x6
608 msr dbgwcr0_el1, x5
609
610 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
611
612 adr x26, 1f
613 add x26, x26, x25, lsl #2
614 br x26
6151:
616 ldr x20, [x3, #(15 * 8)]
617 ldr x19, [x3, #(14 * 8)]
618 ldr x18, [x3, #(13 * 8)]
619 ldr x17, [x3, #(12 * 8)]
620 ldr x16, [x3, #(11 * 8)]
621 ldr x15, [x3, #(10 * 8)]
622 ldr x14, [x3, #(9 * 8)]
623 ldr x13, [x3, #(8 * 8)]
624 ldr x12, [x3, #(7 * 8)]
625 ldr x11, [x3, #(6 * 8)]
626 ldr x10, [x3, #(5 * 8)]
627 ldr x9, [x3, #(4 * 8)]
628 ldr x8, [x3, #(3 * 8)]
629 ldr x7, [x3, #(2 * 8)]
630 ldr x6, [x3, #(1 * 8)]
631 ldr x5, [x3, #(0 * 8)]
632
633 adr x26, 1f
634 add x26, x26, x25, lsl #2
635 br x26
6361:
637 msr dbgwvr15_el1, x20
638 msr dbgwvr14_el1, x19
639 msr dbgwvr13_el1, x18
640 msr dbgwvr12_el1, x17
641 msr dbgwvr11_el1, x16
642 msr dbgwvr10_el1, x15
643 msr dbgwvr9_el1, x14
644 msr dbgwvr8_el1, x13
645 msr dbgwvr7_el1, x12
646 msr dbgwvr6_el1, x11
647 msr dbgwvr5_el1, x10
648 msr dbgwvr4_el1, x9
649 msr dbgwvr3_el1, x8
650 msr dbgwvr2_el1, x7
651 msr dbgwvr1_el1, x6
652 msr dbgwvr0_el1, x5
653
654 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
655 msr mdccint_el1, x21
271.endm 656.endm
272 657
273.macro skip_32bit_state tmp, target 658.macro skip_32bit_state tmp, target
@@ -282,6 +667,35 @@ __kvm_hyp_code_start:
282 tbz \tmp, #12, \target 667 tbz \tmp, #12, \target
283.endm 668.endm
284 669
670.macro skip_debug_state tmp, target
671 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
672 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
673.endm
674
675.macro compute_debug_state target
676 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
677 // is set, we do a full save/restore cycle and disable trapping.
678 add x25, x0, #VCPU_CONTEXT
679
680 // Check the state of MDSCR_EL1
681 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
682 and x26, x25, #DBG_MDSCR_KDE
683 and x25, x25, #DBG_MDSCR_MDE
684 adds xzr, x25, x26
685 b.eq 9998f // Nothing to see there
686
687 // If any interesting bits was set, we must set the flag
688 mov x26, #KVM_ARM64_DEBUG_DIRTY
689 str x26, [x0, #VCPU_DEBUG_FLAGS]
690 b 9999f // Don't skip restore
691
6929998:
693 // Otherwise load the flags from memory in case we recently
694 // trapped
695 skip_debug_state x25, \target
6969999:
697.endm
698
285.macro save_guest_32bit_state 699.macro save_guest_32bit_state
286 skip_32bit_state x3, 1f 700 skip_32bit_state x3, 1f
287 701
@@ -297,10 +711,13 @@ __kvm_hyp_code_start:
297 mrs x4, dacr32_el2 711 mrs x4, dacr32_el2
298 mrs x5, ifsr32_el2 712 mrs x5, ifsr32_el2
299 mrs x6, fpexc32_el2 713 mrs x6, fpexc32_el2
300 mrs x7, dbgvcr32_el2
301 stp x4, x5, [x3] 714 stp x4, x5, [x3]
302 stp x6, x7, [x3, #16] 715 str x6, [x3, #16]
303 716
717 skip_debug_state x8, 2f
718 mrs x7, dbgvcr32_el2
719 str x7, [x3, #24]
7202:
304 skip_tee_state x8, 1f 721 skip_tee_state x8, 1f
305 722
306 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) 723 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -323,12 +740,15 @@ __kvm_hyp_code_start:
323 740
324 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) 741 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
325 ldp x4, x5, [x3] 742 ldp x4, x5, [x3]
326 ldp x6, x7, [x3, #16] 743 ldr x6, [x3, #16]
327 msr dacr32_el2, x4 744 msr dacr32_el2, x4
328 msr ifsr32_el2, x5 745 msr ifsr32_el2, x5
329 msr fpexc32_el2, x6 746 msr fpexc32_el2, x6
330 msr dbgvcr32_el2, x7
331 747
748 skip_debug_state x8, 2f
749 ldr x7, [x3, #24]
750 msr dbgvcr32_el2, x7
7512:
332 skip_tee_state x8, 1f 752 skip_tee_state x8, 1f
333 753
334 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) 754 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -339,11 +759,8 @@ __kvm_hyp_code_start:
339.endm 759.endm
340 760
341.macro activate_traps 761.macro activate_traps
342 ldr x2, [x0, #VCPU_IRQ_LINES] 762 ldr x2, [x0, #VCPU_HCR_EL2]
343 ldr x1, [x0, #VCPU_HCR_EL2] 763 msr hcr_el2, x2
344 orr x2, x2, x1
345 msr hcr_el2, x2
346
347 ldr x2, =(CPTR_EL2_TTA) 764 ldr x2, =(CPTR_EL2_TTA)
348 msr cptr_el2, x2 765 msr cptr_el2, x2
349 766
@@ -353,6 +770,14 @@ __kvm_hyp_code_start:
353 mrs x2, mdcr_el2 770 mrs x2, mdcr_el2
354 and x2, x2, #MDCR_EL2_HPMN_MASK 771 and x2, x2, #MDCR_EL2_HPMN_MASK
355 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) 772 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
773 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
774
775 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
776 // if not dirty.
777 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
778 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
779 orr x2, x2, #MDCR_EL2_TDA
7801:
356 msr mdcr_el2, x2 781 msr mdcr_el2, x2
357.endm 782.endm
358 783
@@ -379,100 +804,33 @@ __kvm_hyp_code_start:
379.endm 804.endm
380 805
381/* 806/*
382 * Save the VGIC CPU state into memory 807 * Call into the vgic backend for state saving
383 * x0: Register pointing to VCPU struct
384 * Do not corrupt x1!!!
385 */ 808 */
386.macro save_vgic_state 809.macro save_vgic_state
387 /* Get VGIC VCTRL base into x2 */ 810 adr x24, __vgic_sr_vectors
388 ldr x2, [x0, #VCPU_KVM] 811 ldr x24, [x24, VGIC_SAVE_FN]
389 kern_hyp_va x2 812 kern_hyp_va x24
390 ldr x2, [x2, #KVM_VGIC_VCTRL] 813 blr x24
391 kern_hyp_va x2 814 mrs x24, hcr_el2
392 cbz x2, 2f // disabled 815 mov x25, #HCR_INT_OVERRIDE
393 816 neg x25, x25
394 /* Compute the address of struct vgic_cpu */ 817 and x24, x24, x25
395 add x3, x0, #VCPU_VGIC_CPU 818 msr hcr_el2, x24
396
397 /* Save all interesting registers */
398 ldr w4, [x2, #GICH_HCR]
399 ldr w5, [x2, #GICH_VMCR]
400 ldr w6, [x2, #GICH_MISR]
401 ldr w7, [x2, #GICH_EISR0]
402 ldr w8, [x2, #GICH_EISR1]
403 ldr w9, [x2, #GICH_ELRSR0]
404 ldr w10, [x2, #GICH_ELRSR1]
405 ldr w11, [x2, #GICH_APR]
406CPU_BE( rev w4, w4 )
407CPU_BE( rev w5, w5 )
408CPU_BE( rev w6, w6 )
409CPU_BE( rev w7, w7 )
410CPU_BE( rev w8, w8 )
411CPU_BE( rev w9, w9 )
412CPU_BE( rev w10, w10 )
413CPU_BE( rev w11, w11 )
414
415 str w4, [x3, #VGIC_CPU_HCR]
416 str w5, [x3, #VGIC_CPU_VMCR]
417 str w6, [x3, #VGIC_CPU_MISR]
418 str w7, [x3, #VGIC_CPU_EISR]
419 str w8, [x3, #(VGIC_CPU_EISR + 4)]
420 str w9, [x3, #VGIC_CPU_ELRSR]
421 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
422 str w11, [x3, #VGIC_CPU_APR]
423
424 /* Clear GICH_HCR */
425 str wzr, [x2, #GICH_HCR]
426
427 /* Save list registers */
428 add x2, x2, #GICH_LR0
429 ldr w4, [x3, #VGIC_CPU_NR_LR]
430 add x3, x3, #VGIC_CPU_LR
4311: ldr w5, [x2], #4
432CPU_BE( rev w5, w5 )
433 str w5, [x3], #4
434 sub w4, w4, #1
435 cbnz w4, 1b
4362:
437.endm 819.endm
438 820
439/* 821/*
440 * Restore the VGIC CPU state from memory 822 * Call into the vgic backend for state restoring
441 * x0: Register pointing to VCPU struct
442 */ 823 */
443.macro restore_vgic_state 824.macro restore_vgic_state
444 /* Get VGIC VCTRL base into x2 */ 825 mrs x24, hcr_el2
445 ldr x2, [x0, #VCPU_KVM] 826 ldr x25, [x0, #VCPU_IRQ_LINES]
446 kern_hyp_va x2 827 orr x24, x24, #HCR_INT_OVERRIDE
447 ldr x2, [x2, #KVM_VGIC_VCTRL] 828 orr x24, x24, x25
448 kern_hyp_va x2 829 msr hcr_el2, x24
449 cbz x2, 2f // disabled 830 adr x24, __vgic_sr_vectors
450 831 ldr x24, [x24, #VGIC_RESTORE_FN]
451 /* Compute the address of struct vgic_cpu */ 832 kern_hyp_va x24
452 add x3, x0, #VCPU_VGIC_CPU 833 blr x24
453
454 /* We only restore a minimal set of registers */
455 ldr w4, [x3, #VGIC_CPU_HCR]
456 ldr w5, [x3, #VGIC_CPU_VMCR]
457 ldr w6, [x3, #VGIC_CPU_APR]
458CPU_BE( rev w4, w4 )
459CPU_BE( rev w5, w5 )
460CPU_BE( rev w6, w6 )
461
462 str w4, [x2, #GICH_HCR]
463 str w5, [x2, #GICH_VMCR]
464 str w6, [x2, #GICH_APR]
465
466 /* Restore list registers */
467 add x2, x2, #GICH_LR0
468 ldr w4, [x3, #VGIC_CPU_NR_LR]
469 add x3, x3, #VGIC_CPU_LR
4701: ldr w5, [x3], #4
471CPU_BE( rev w5, w5 )
472 str w5, [x2], #4
473 sub w4, w4, #1
474 cbnz w4, 1b
4752:
476.endm 834.endm
477 835
478.macro save_timer_state 836.macro save_timer_state
@@ -537,6 +895,14 @@ __restore_sysregs:
537 restore_sysregs 895 restore_sysregs
538 ret 896 ret
539 897
898__save_debug:
899 save_debug
900 ret
901
902__restore_debug:
903 restore_debug
904 ret
905
540__save_fpsimd: 906__save_fpsimd:
541 save_fpsimd 907 save_fpsimd
542 ret 908 ret
@@ -568,6 +934,9 @@ ENTRY(__kvm_vcpu_run)
568 bl __save_fpsimd 934 bl __save_fpsimd
569 bl __save_sysregs 935 bl __save_sysregs
570 936
937 compute_debug_state 1f
938 bl __save_debug
9391:
571 activate_traps 940 activate_traps
572 activate_vm 941 activate_vm
573 942
@@ -579,6 +948,10 @@ ENTRY(__kvm_vcpu_run)
579 948
580 bl __restore_sysregs 949 bl __restore_sysregs
581 bl __restore_fpsimd 950 bl __restore_fpsimd
951
952 skip_debug_state x3, 1f
953 bl __restore_debug
9541:
582 restore_guest_32bit_state 955 restore_guest_32bit_state
583 restore_guest_regs 956 restore_guest_regs
584 957
@@ -595,6 +968,10 @@ __kvm_vcpu_return:
595 save_guest_regs 968 save_guest_regs
596 bl __save_fpsimd 969 bl __save_fpsimd
597 bl __save_sysregs 970 bl __save_sysregs
971
972 skip_debug_state x3, 1f
973 bl __save_debug
9741:
598 save_guest_32bit_state 975 save_guest_32bit_state
599 976
600 save_timer_state 977 save_timer_state
@@ -609,6 +986,14 @@ __kvm_vcpu_return:
609 986
610 bl __restore_sysregs 987 bl __restore_sysregs
611 bl __restore_fpsimd 988 bl __restore_fpsimd
989
990 skip_debug_state x3, 1f
991 // Clear the dirty flag for the next run, as all the state has
992 // already been saved. Note that we nuke the whole 64bit word.
993 // If we ever add more flags, we'll have to be more careful...
994 str xzr, [x0, #VCPU_DEBUG_FLAGS]
995 bl __restore_debug
9961:
612 restore_host_regs 997 restore_host_regs
613 998
614 mov x0, x1 999 mov x0, x1
@@ -653,6 +1038,12 @@ ENTRY(__kvm_flush_vm_context)
653 ret 1038 ret
654ENDPROC(__kvm_flush_vm_context) 1039ENDPROC(__kvm_flush_vm_context)
655 1040
1041 // struct vgic_sr_vectors __vgi_sr_vectors;
1042 .align 3
1043ENTRY(__vgic_sr_vectors)
1044 .skip VGIC_SR_VECTOR_SZ
1045ENDPROC(__vgic_sr_vectors)
1046
656__kvm_hyp_panic: 1047__kvm_hyp_panic:
657 // Guess the context by looking at VTTBR: 1048 // Guess the context by looking at VTTBR:
658 // If zero, then we're already a host. 1049 // If zero, then we're already a host.
@@ -830,7 +1221,7 @@ el1_trap:
830 mrs x2, far_el2 1221 mrs x2, far_el2
831 1222
8322: mrs x0, tpidr_el2 12232: mrs x0, tpidr_el2
833 str x1, [x0, #VCPU_ESR_EL2] 1224 str w1, [x0, #VCPU_ESR_EL2]
834 str x2, [x0, #VCPU_FAR_EL2] 1225 str x2, [x0, #VCPU_FAR_EL2]
835 str x3, [x0, #VCPU_HPFAR_EL2] 1226 str x3, [x0, #VCPU_HPFAR_EL2]
836 1227
@@ -880,7 +1271,4 @@ ENTRY(__kvm_hyp_vector)
880 ventry el1_error_invalid // Error 32-bit EL1 1271 ventry el1_error_invalid // Error 32-bit EL1
881ENDPROC(__kvm_hyp_vector) 1272ENDPROC(__kvm_hyp_vector)
882 1273
883__kvm_hyp_code_end:
884 .globl __kvm_hyp_code_end
885
886 .popsection 1274 .popsection
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c59a1bdab5eb..5805e7c4a4dd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -30,6 +30,7 @@
30#include <asm/kvm_mmu.h> 30#include <asm/kvm_mmu.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/cputype.h> 32#include <asm/cputype.h>
33#include <asm/debug-monitors.h>
33#include <trace/events/kvm.h> 34#include <trace/events/kvm.h>
34 35
35#include "sys_regs.h" 36#include "sys_regs.h"
@@ -137,10 +138,11 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
137 if (!p->is_aarch32) { 138 if (!p->is_aarch32) {
138 vcpu_sys_reg(vcpu, r->reg) = val; 139 vcpu_sys_reg(vcpu, r->reg) = val;
139 } else { 140 } else {
140 vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
141 if (!p->is_32bit) 141 if (!p->is_32bit)
142 vcpu_cp15(vcpu, r->reg + 1) = val >> 32; 142 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
143 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
143 } 144 }
145
144 return true; 146 return true;
145} 147}
146 148
@@ -163,18 +165,9 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
163 return true; 165 return true;
164} 166}
165 167
166/* 168static bool trap_raz_wi(struct kvm_vcpu *vcpu,
167 * We could trap ID_DFR0 and tell the guest we don't support performance 169 const struct sys_reg_params *p,
168 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 170 const struct sys_reg_desc *r)
169 * NAKed, so it will read the PMCR anyway.
170 *
171 * Therefore we tell the guest we have 0 counters. Unfortunately, we
172 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
173 * all PM registers, which doesn't crash the guest kernel at least.
174 */
175static bool pm_fake(struct kvm_vcpu *vcpu,
176 const struct sys_reg_params *p,
177 const struct sys_reg_desc *r)
178{ 171{
179 if (p->is_write) 172 if (p->is_write)
180 return ignore_write(vcpu, p); 173 return ignore_write(vcpu, p);
@@ -182,6 +175,73 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
182 return read_zero(vcpu, p); 175 return read_zero(vcpu, p);
183} 176}
184 177
178static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
179 const struct sys_reg_params *p,
180 const struct sys_reg_desc *r)
181{
182 if (p->is_write) {
183 return ignore_write(vcpu, p);
184 } else {
185 *vcpu_reg(vcpu, p->Rt) = (1 << 3);
186 return true;
187 }
188}
189
190static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
191 const struct sys_reg_params *p,
192 const struct sys_reg_desc *r)
193{
194 if (p->is_write) {
195 return ignore_write(vcpu, p);
196 } else {
197 u32 val;
198 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
199 *vcpu_reg(vcpu, p->Rt) = val;
200 return true;
201 }
202}
203
204/*
205 * We want to avoid world-switching all the DBG registers all the
206 * time:
207 *
208 * - If we've touched any debug register, it is likely that we're
209 * going to touch more of them. It then makes sense to disable the
210 * traps and start doing the save/restore dance
211 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
212 * then mandatory to save/restore the registers, as the guest
213 * depends on them.
214 *
215 * For this, we use a DIRTY bit, indicating the guest has modified the
216 * debug registers, used as follow:
217 *
218 * On guest entry:
219 * - If the dirty bit is set (because we're coming back from trapping),
220 * disable the traps, save host registers, restore guest registers.
221 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
222 * set the dirty bit, disable the traps, save host registers,
223 * restore guest registers.
224 * - Otherwise, enable the traps
225 *
226 * On guest exit:
227 * - If the dirty bit is set, save guest registers, restore host
228 * registers and clear the dirty bit. This ensure that the host can
229 * now use the debug registers.
230 */
231static bool trap_debug_regs(struct kvm_vcpu *vcpu,
232 const struct sys_reg_params *p,
233 const struct sys_reg_desc *r)
234{
235 if (p->is_write) {
236 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
237 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
238 } else {
239 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
240 }
241
242 return true;
243}
244
185static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 245static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
186{ 246{
187 u64 amair; 247 u64 amair;
@@ -198,9 +258,39 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
198 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); 258 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
199} 259}
200 260
261/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
262#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
263 /* DBGBVRn_EL1 */ \
264 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
265 trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
266 /* DBGBCRn_EL1 */ \
267 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
268 trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
269 /* DBGWVRn_EL1 */ \
270 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
271 trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
272 /* DBGWCRn_EL1 */ \
273 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
274 trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
275
201/* 276/*
202 * Architected system registers. 277 * Architected system registers.
203 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 278 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
279 *
280 * We could trap ID_DFR0 and tell the guest we don't support performance
281 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
282 * NAKed, so it will read the PMCR anyway.
283 *
284 * Therefore we tell the guest we have 0 counters. Unfortunately, we
285 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
286 * all PM registers, which doesn't crash the guest kernel at least.
287 *
288 * Debug handling: We do trap most, if not all debug related system
289 * registers. The implementation is good enough to ensure that a guest
290 * can use these with minimal performance degradation. The drawback is
291 * that we don't implement any of the external debug, none of the
292 * OSlock protocol. This should be revisited if we ever encounter a
293 * more demanding guest...
204 */ 294 */
205static const struct sys_reg_desc sys_reg_descs[] = { 295static const struct sys_reg_desc sys_reg_descs[] = {
206 /* DC ISW */ 296 /* DC ISW */
@@ -213,12 +303,71 @@ static const struct sys_reg_desc sys_reg_descs[] = {
213 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), 303 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
214 access_dcsw }, 304 access_dcsw },
215 305
306 DBG_BCR_BVR_WCR_WVR_EL1(0),
307 DBG_BCR_BVR_WCR_WVR_EL1(1),
308 /* MDCCINT_EL1 */
309 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
310 trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
311 /* MDSCR_EL1 */
312 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
313 trap_debug_regs, reset_val, MDSCR_EL1, 0 },
314 DBG_BCR_BVR_WCR_WVR_EL1(2),
315 DBG_BCR_BVR_WCR_WVR_EL1(3),
316 DBG_BCR_BVR_WCR_WVR_EL1(4),
317 DBG_BCR_BVR_WCR_WVR_EL1(5),
318 DBG_BCR_BVR_WCR_WVR_EL1(6),
319 DBG_BCR_BVR_WCR_WVR_EL1(7),
320 DBG_BCR_BVR_WCR_WVR_EL1(8),
321 DBG_BCR_BVR_WCR_WVR_EL1(9),
322 DBG_BCR_BVR_WCR_WVR_EL1(10),
323 DBG_BCR_BVR_WCR_WVR_EL1(11),
324 DBG_BCR_BVR_WCR_WVR_EL1(12),
325 DBG_BCR_BVR_WCR_WVR_EL1(13),
326 DBG_BCR_BVR_WCR_WVR_EL1(14),
327 DBG_BCR_BVR_WCR_WVR_EL1(15),
328
329 /* MDRAR_EL1 */
330 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
331 trap_raz_wi },
332 /* OSLAR_EL1 */
333 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
334 trap_raz_wi },
335 /* OSLSR_EL1 */
336 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
337 trap_oslsr_el1 },
338 /* OSDLR_EL1 */
339 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
340 trap_raz_wi },
341 /* DBGPRCR_EL1 */
342 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
343 trap_raz_wi },
344 /* DBGCLAIMSET_EL1 */
345 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
346 trap_raz_wi },
347 /* DBGCLAIMCLR_EL1 */
348 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
349 trap_raz_wi },
350 /* DBGAUTHSTATUS_EL1 */
351 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
352 trap_dbgauthstatus_el1 },
353
216 /* TEECR32_EL1 */ 354 /* TEECR32_EL1 */
217 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), 355 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
218 NULL, reset_val, TEECR32_EL1, 0 }, 356 NULL, reset_val, TEECR32_EL1, 0 },
219 /* TEEHBR32_EL1 */ 357 /* TEEHBR32_EL1 */
220 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), 358 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
221 NULL, reset_val, TEEHBR32_EL1, 0 }, 359 NULL, reset_val, TEEHBR32_EL1, 0 },
360
361 /* MDCCSR_EL1 */
362 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
363 trap_raz_wi },
364 /* DBGDTR_EL0 */
365 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
366 trap_raz_wi },
367 /* DBGDTR[TR]X_EL0 */
368 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
369 trap_raz_wi },
370
222 /* DBGVCR32_EL2 */ 371 /* DBGVCR32_EL2 */
223 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), 372 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
224 NULL, reset_val, DBGVCR32_EL2, 0 }, 373 NULL, reset_val, DBGVCR32_EL2, 0 },
@@ -260,10 +409,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
260 409
261 /* PMINTENSET_EL1 */ 410 /* PMINTENSET_EL1 */
262 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 411 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
263 pm_fake }, 412 trap_raz_wi },
264 /* PMINTENCLR_EL1 */ 413 /* PMINTENCLR_EL1 */
265 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), 414 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
266 pm_fake }, 415 trap_raz_wi },
267 416
268 /* MAIR_EL1 */ 417 /* MAIR_EL1 */
269 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), 418 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
@@ -292,43 +441,43 @@ static const struct sys_reg_desc sys_reg_descs[] = {
292 441
293 /* PMCR_EL0 */ 442 /* PMCR_EL0 */
294 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), 443 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
295 pm_fake }, 444 trap_raz_wi },
296 /* PMCNTENSET_EL0 */ 445 /* PMCNTENSET_EL0 */
297 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), 446 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
298 pm_fake }, 447 trap_raz_wi },
299 /* PMCNTENCLR_EL0 */ 448 /* PMCNTENCLR_EL0 */
300 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), 449 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
301 pm_fake }, 450 trap_raz_wi },
302 /* PMOVSCLR_EL0 */ 451 /* PMOVSCLR_EL0 */
303 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), 452 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
304 pm_fake }, 453 trap_raz_wi },
305 /* PMSWINC_EL0 */ 454 /* PMSWINC_EL0 */
306 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), 455 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
307 pm_fake }, 456 trap_raz_wi },
308 /* PMSELR_EL0 */ 457 /* PMSELR_EL0 */
309 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), 458 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
310 pm_fake }, 459 trap_raz_wi },
311 /* PMCEID0_EL0 */ 460 /* PMCEID0_EL0 */
312 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), 461 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
313 pm_fake }, 462 trap_raz_wi },
314 /* PMCEID1_EL0 */ 463 /* PMCEID1_EL0 */
315 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), 464 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
316 pm_fake }, 465 trap_raz_wi },
317 /* PMCCNTR_EL0 */ 466 /* PMCCNTR_EL0 */
318 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), 467 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
319 pm_fake }, 468 trap_raz_wi },
320 /* PMXEVTYPER_EL0 */ 469 /* PMXEVTYPER_EL0 */
321 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), 470 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
322 pm_fake }, 471 trap_raz_wi },
323 /* PMXEVCNTR_EL0 */ 472 /* PMXEVCNTR_EL0 */
324 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), 473 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
325 pm_fake }, 474 trap_raz_wi },
326 /* PMUSERENR_EL0 */ 475 /* PMUSERENR_EL0 */
327 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), 476 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
328 pm_fake }, 477 trap_raz_wi },
329 /* PMOVSSET_EL0 */ 478 /* PMOVSSET_EL0 */
330 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), 479 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
331 pm_fake }, 480 trap_raz_wi },
332 481
333 /* TPIDR_EL0 */ 482 /* TPIDR_EL0 */
334 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), 483 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -348,13 +497,161 @@ static const struct sys_reg_desc sys_reg_descs[] = {
348 NULL, reset_val, FPEXC32_EL2, 0x70 }, 497 NULL, reset_val, FPEXC32_EL2, 0x70 },
349}; 498};
350 499
500static bool trap_dbgidr(struct kvm_vcpu *vcpu,
501 const struct sys_reg_params *p,
502 const struct sys_reg_desc *r)
503{
504 if (p->is_write) {
505 return ignore_write(vcpu, p);
506 } else {
507 u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
508 u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
509 u32 el3 = !!((pfr >> 12) & 0xf);
510
511 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
512 (((dfr >> 12) & 0xf) << 24) |
513 (((dfr >> 28) & 0xf) << 20) |
514 (6 << 16) | (el3 << 14) | (el3 << 12));
515 return true;
516 }
517}
518
519static bool trap_debug32(struct kvm_vcpu *vcpu,
520 const struct sys_reg_params *p,
521 const struct sys_reg_desc *r)
522{
523 if (p->is_write) {
524 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
525 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
526 } else {
527 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
528 }
529
530 return true;
531}
532
533#define DBG_BCR_BVR_WCR_WVR(n) \
534 /* DBGBVRn */ \
535 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
536 NULL, (cp14_DBGBVR0 + (n) * 2) }, \
537 /* DBGBCRn */ \
538 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
539 NULL, (cp14_DBGBCR0 + (n) * 2) }, \
540 /* DBGWVRn */ \
541 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
542 NULL, (cp14_DBGWVR0 + (n) * 2) }, \
543 /* DBGWCRn */ \
544 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
545 NULL, (cp14_DBGWCR0 + (n) * 2) }
546
547#define DBGBXVR(n) \
548 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
549 NULL, cp14_DBGBXVR0 + n * 2 }
550
551/*
552 * Trapped cp14 registers. We generally ignore most of the external
553 * debug, on the principle that they don't really make sense to a
554 * guest. Revisit this one day, whould this principle change.
555 */
556static const struct sys_reg_desc cp14_regs[] = {
557 /* DBGIDR */
558 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
559 /* DBGDTRRXext */
560 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
561
562 DBG_BCR_BVR_WCR_WVR(0),
563 /* DBGDSCRint */
564 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
565 DBG_BCR_BVR_WCR_WVR(1),
566 /* DBGDCCINT */
567 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
568 /* DBGDSCRext */
569 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
570 DBG_BCR_BVR_WCR_WVR(2),
571 /* DBGDTR[RT]Xint */
572 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
573 /* DBGDTR[RT]Xext */
574 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
575 DBG_BCR_BVR_WCR_WVR(3),
576 DBG_BCR_BVR_WCR_WVR(4),
577 DBG_BCR_BVR_WCR_WVR(5),
578 /* DBGWFAR */
579 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
580 /* DBGOSECCR */
581 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
582 DBG_BCR_BVR_WCR_WVR(6),
583 /* DBGVCR */
584 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
585 DBG_BCR_BVR_WCR_WVR(7),
586 DBG_BCR_BVR_WCR_WVR(8),
587 DBG_BCR_BVR_WCR_WVR(9),
588 DBG_BCR_BVR_WCR_WVR(10),
589 DBG_BCR_BVR_WCR_WVR(11),
590 DBG_BCR_BVR_WCR_WVR(12),
591 DBG_BCR_BVR_WCR_WVR(13),
592 DBG_BCR_BVR_WCR_WVR(14),
593 DBG_BCR_BVR_WCR_WVR(15),
594
595 /* DBGDRAR (32bit) */
596 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
597
598 DBGBXVR(0),
599 /* DBGOSLAR */
600 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
601 DBGBXVR(1),
602 /* DBGOSLSR */
603 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
604 DBGBXVR(2),
605 DBGBXVR(3),
606 /* DBGOSDLR */
607 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
608 DBGBXVR(4),
609 /* DBGPRCR */
610 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
611 DBGBXVR(5),
612 DBGBXVR(6),
613 DBGBXVR(7),
614 DBGBXVR(8),
615 DBGBXVR(9),
616 DBGBXVR(10),
617 DBGBXVR(11),
618 DBGBXVR(12),
619 DBGBXVR(13),
620 DBGBXVR(14),
621 DBGBXVR(15),
622
623 /* DBGDSAR (32bit) */
624 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
625
626 /* DBGDEVID2 */
627 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
628 /* DBGDEVID1 */
629 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
630 /* DBGDEVID */
631 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
632 /* DBGCLAIMSET */
633 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
634 /* DBGCLAIMCLR */
635 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
636 /* DBGAUTHSTATUS */
637 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
638};
639
640/* Trapped cp14 64bit registers */
641static const struct sys_reg_desc cp14_64_regs[] = {
642 /* DBGDRAR (64bit) */
643 { Op1( 0), CRm( 1), .access = trap_raz_wi },
644
645 /* DBGDSAR (64bit) */
646 { Op1( 0), CRm( 2), .access = trap_raz_wi },
647};
648
351/* 649/*
352 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 650 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
353 * depending on the way they are accessed (as a 32bit or a 64bit 651 * depending on the way they are accessed (as a 32bit or a 64bit
354 * register). 652 * register).
355 */ 653 */
356static const struct sys_reg_desc cp15_regs[] = { 654static const struct sys_reg_desc cp15_regs[] = {
357 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
358 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, 655 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
359 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 656 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
360 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 657 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -374,26 +671,30 @@ static const struct sys_reg_desc cp15_regs[] = {
374 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 671 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
375 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 672 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
376 673
377 { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, 674 /* PMU */
378 { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, 675 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
379 { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, 676 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
380 { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake }, 677 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
381 { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake }, 678 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
382 { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake }, 679 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
383 { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake }, 680 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
384 { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake }, 681 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
385 { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake }, 682 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
386 { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake }, 683 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
387 { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, 684 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
388 { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, 685 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
389 { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, 686 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
687 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
390 688
391 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 689 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
392 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 690 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
393 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 691 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
394 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 692 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
395 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 693 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
694};
396 695
696static const struct sys_reg_desc cp15_64_regs[] = {
697 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
397 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 698 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
398}; 699};
399 700
@@ -454,26 +755,29 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
454 return 1; 755 return 1;
455} 756}
456 757
457int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 758/*
458{ 759 * emulate_cp -- tries to match a sys_reg access in a handling table, and
459 kvm_inject_undefined(vcpu); 760 * call the corresponding trap handler.
460 return 1; 761 *
461} 762 * @params: pointer to the descriptor of the access
462 763 * @table: array of trap descriptors
463static void emulate_cp15(struct kvm_vcpu *vcpu, 764 * @num: size of the trap descriptor array
464 const struct sys_reg_params *params) 765 *
766 * Return 0 if the access has been handled, and -1 if not.
767 */
768static int emulate_cp(struct kvm_vcpu *vcpu,
769 const struct sys_reg_params *params,
770 const struct sys_reg_desc *table,
771 size_t num)
465{ 772{
466 size_t num; 773 const struct sys_reg_desc *r;
467 const struct sys_reg_desc *table, *r;
468 774
469 table = get_target_table(vcpu->arch.target, false, &num); 775 if (!table)
776 return -1; /* Not handled */
470 777
471 /* Search target-specific then generic table. */
472 r = find_reg(params, table, num); 778 r = find_reg(params, table, num);
473 if (!r)
474 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
475 779
476 if (likely(r)) { 780 if (r) {
477 /* 781 /*
478 * Not having an accessor means that we have 782 * Not having an accessor means that we have
479 * configured a trap that we don't know how to 783 * configured a trap that we don't know how to
@@ -485,22 +789,51 @@ static void emulate_cp15(struct kvm_vcpu *vcpu,
485 if (likely(r->access(vcpu, params, r))) { 789 if (likely(r->access(vcpu, params, r))) {
486 /* Skip instruction, since it was emulated */ 790 /* Skip instruction, since it was emulated */
487 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 791 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
488 return;
489 } 792 }
490 /* If access function fails, it should complain. */ 793
794 /* Handled */
795 return 0;
491 } 796 }
492 797
493 kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); 798 /* Not handled */
799 return -1;
800}
801
802static void unhandled_cp_access(struct kvm_vcpu *vcpu,
803 struct sys_reg_params *params)
804{
805 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
806 int cp;
807
808 switch(hsr_ec) {
809 case ESR_EL2_EC_CP15_32:
810 case ESR_EL2_EC_CP15_64:
811 cp = 15;
812 break;
813 case ESR_EL2_EC_CP14_MR:
814 case ESR_EL2_EC_CP14_64:
815 cp = 14;
816 break;
817 default:
818 WARN_ON((cp = -1));
819 }
820
821 kvm_err("Unsupported guest CP%d access at: %08lx\n",
822 cp, *vcpu_pc(vcpu));
494 print_sys_reg_instr(params); 823 print_sys_reg_instr(params);
495 kvm_inject_undefined(vcpu); 824 kvm_inject_undefined(vcpu);
496} 825}
497 826
498/** 827/**
499 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 828 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
500 * @vcpu: The VCPU pointer 829 * @vcpu: The VCPU pointer
501 * @run: The kvm_run struct 830 * @run: The kvm_run struct
502 */ 831 */
503int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 832static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
833 const struct sys_reg_desc *global,
834 size_t nr_global,
835 const struct sys_reg_desc *target_specific,
836 size_t nr_specific)
504{ 837{
505 struct sys_reg_params params; 838 struct sys_reg_params params;
506 u32 hsr = kvm_vcpu_get_hsr(vcpu); 839 u32 hsr = kvm_vcpu_get_hsr(vcpu);
@@ -529,8 +862,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
529 *vcpu_reg(vcpu, params.Rt) = val; 862 *vcpu_reg(vcpu, params.Rt) = val;
530 } 863 }
531 864
532 emulate_cp15(vcpu, &params); 865 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
866 goto out;
867 if (!emulate_cp(vcpu, &params, global, nr_global))
868 goto out;
533 869
870 unhandled_cp_access(vcpu, &params);
871
872out:
534 /* Do the opposite hack for the read side */ 873 /* Do the opposite hack for the read side */
535 if (!params.is_write) { 874 if (!params.is_write) {
536 u64 val = *vcpu_reg(vcpu, params.Rt); 875 u64 val = *vcpu_reg(vcpu, params.Rt);
@@ -546,7 +885,11 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
546 * @vcpu: The VCPU pointer 885 * @vcpu: The VCPU pointer
547 * @run: The kvm_run struct 886 * @run: The kvm_run struct
548 */ 887 */
549int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 888static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
889 const struct sys_reg_desc *global,
890 size_t nr_global,
891 const struct sys_reg_desc *target_specific,
892 size_t nr_specific)
550{ 893{
551 struct sys_reg_params params; 894 struct sys_reg_params params;
552 u32 hsr = kvm_vcpu_get_hsr(vcpu); 895 u32 hsr = kvm_vcpu_get_hsr(vcpu);
@@ -561,10 +904,51 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
561 params.Op1 = (hsr >> 14) & 0x7; 904 params.Op1 = (hsr >> 14) & 0x7;
562 params.Op2 = (hsr >> 17) & 0x7; 905 params.Op2 = (hsr >> 17) & 0x7;
563 906
564 emulate_cp15(vcpu, &params); 907 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
908 return 1;
909 if (!emulate_cp(vcpu, &params, global, nr_global))
910 return 1;
911
912 unhandled_cp_access(vcpu, &params);
565 return 1; 913 return 1;
566} 914}
567 915
916int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
917{
918 const struct sys_reg_desc *target_specific;
919 size_t num;
920
921 target_specific = get_target_table(vcpu->arch.target, false, &num);
922 return kvm_handle_cp_64(vcpu,
923 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
924 target_specific, num);
925}
926
927int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
928{
929 const struct sys_reg_desc *target_specific;
930 size_t num;
931
932 target_specific = get_target_table(vcpu->arch.target, false, &num);
933 return kvm_handle_cp_32(vcpu,
934 cp15_regs, ARRAY_SIZE(cp15_regs),
935 target_specific, num);
936}
937
938int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
939{
940 return kvm_handle_cp_64(vcpu,
941 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
942 NULL, 0);
943}
944
945int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
946{
947 return kvm_handle_cp_32(vcpu,
948 cp14_regs, ARRAY_SIZE(cp14_regs),
949 NULL, 0);
950}
951
568static int emulate_sys_reg(struct kvm_vcpu *vcpu, 952static int emulate_sys_reg(struct kvm_vcpu *vcpu,
569 const struct sys_reg_params *params) 953 const struct sys_reg_params *params)
570{ 954{
@@ -776,17 +1160,15 @@ static struct sys_reg_desc invariant_sys_regs[] = {
776 NULL, get_ctr_el0 }, 1160 NULL, get_ctr_el0 },
777}; 1161};
778 1162
779static int reg_from_user(void *val, const void __user *uaddr, u64 id) 1163static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
780{ 1164{
781 /* This Just Works because we are little endian. */
782 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 1165 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
783 return -EFAULT; 1166 return -EFAULT;
784 return 0; 1167 return 0;
785} 1168}
786 1169
787static int reg_to_user(void __user *uaddr, const void *val, u64 id) 1170static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
788{ 1171{
789 /* This Just Works because we are little endian. */
790 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 1172 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
791 return -EFAULT; 1173 return -EFAULT;
792 return 0; 1174 return 0;
@@ -962,7 +1344,7 @@ static unsigned int num_demux_regs(void)
962 1344
963static int write_demux_regids(u64 __user *uindices) 1345static int write_demux_regids(u64 __user *uindices)
964{ 1346{
965 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1347 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
966 unsigned int i; 1348 unsigned int i;
967 1349
968 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 1350 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
@@ -1069,14 +1451,32 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1069 return write_demux_regids(uindices); 1451 return write_demux_regids(uindices);
1070} 1452}
1071 1453
1454static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
1455{
1456 unsigned int i;
1457
1458 for (i = 1; i < n; i++) {
1459 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
1460 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
1461 return 1;
1462 }
1463 }
1464
1465 return 0;
1466}
1467
1072void kvm_sys_reg_table_init(void) 1468void kvm_sys_reg_table_init(void)
1073{ 1469{
1074 unsigned int i; 1470 unsigned int i;
1075 struct sys_reg_desc clidr; 1471 struct sys_reg_desc clidr;
1076 1472
1077 /* Make sure tables are unique and in order. */ 1473 /* Make sure tables are unique and in order. */
1078 for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) 1474 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
1079 BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); 1475 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
1476 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
1477 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1478 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
1479 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
1080 1480
1081 /* We abuse the reset function to overwrite the table itself. */ 1481 /* We abuse the reset function to overwrite the table itself. */
1082 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 1482 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
new file mode 100644
index 000000000000..ae211772f991
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -0,0 +1,133 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/kvm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
28
29 .text
30 .pushsection .hyp.text, "ax"
31
32/*
33 * Save the VGIC CPU state into memory
34 * x0: Register pointing to VCPU struct
35 * Do not corrupt x1!!!
36 */
37ENTRY(__save_vgic_v2_state)
38__save_vgic_v2_state:
39 /* Get VGIC VCTRL base into x2 */
40 ldr x2, [x0, #VCPU_KVM]
41 kern_hyp_va x2
42 ldr x2, [x2, #KVM_VGIC_VCTRL]
43 kern_hyp_va x2
44 cbz x2, 2f // disabled
45
46 /* Compute the address of struct vgic_cpu */
47 add x3, x0, #VCPU_VGIC_CPU
48
49 /* Save all interesting registers */
50 ldr w4, [x2, #GICH_HCR]
51 ldr w5, [x2, #GICH_VMCR]
52 ldr w6, [x2, #GICH_MISR]
53 ldr w7, [x2, #GICH_EISR0]
54 ldr w8, [x2, #GICH_EISR1]
55 ldr w9, [x2, #GICH_ELRSR0]
56 ldr w10, [x2, #GICH_ELRSR1]
57 ldr w11, [x2, #GICH_APR]
58CPU_BE( rev w4, w4 )
59CPU_BE( rev w5, w5 )
60CPU_BE( rev w6, w6 )
61CPU_BE( rev w7, w7 )
62CPU_BE( rev w8, w8 )
63CPU_BE( rev w9, w9 )
64CPU_BE( rev w10, w10 )
65CPU_BE( rev w11, w11 )
66
67 str w4, [x3, #VGIC_V2_CPU_HCR]
68 str w5, [x3, #VGIC_V2_CPU_VMCR]
69 str w6, [x3, #VGIC_V2_CPU_MISR]
70 str w7, [x3, #VGIC_V2_CPU_EISR]
71 str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
72 str w9, [x3, #VGIC_V2_CPU_ELRSR]
73 str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
74 str w11, [x3, #VGIC_V2_CPU_APR]
75
76 /* Clear GICH_HCR */
77 str wzr, [x2, #GICH_HCR]
78
79 /* Save list registers */
80 add x2, x2, #GICH_LR0
81 ldr w4, [x3, #VGIC_CPU_NR_LR]
82 add x3, x3, #VGIC_V2_CPU_LR
831: ldr w5, [x2], #4
84CPU_BE( rev w5, w5 )
85 str w5, [x3], #4
86 sub w4, w4, #1
87 cbnz w4, 1b
882:
89 ret
90ENDPROC(__save_vgic_v2_state)
91
92/*
93 * Restore the VGIC CPU state from memory
94 * x0: Register pointing to VCPU struct
95 */
96ENTRY(__restore_vgic_v2_state)
97__restore_vgic_v2_state:
98 /* Get VGIC VCTRL base into x2 */
99 ldr x2, [x0, #VCPU_KVM]
100 kern_hyp_va x2
101 ldr x2, [x2, #KVM_VGIC_VCTRL]
102 kern_hyp_va x2
103 cbz x2, 2f // disabled
104
105 /* Compute the address of struct vgic_cpu */
106 add x3, x0, #VCPU_VGIC_CPU
107
108 /* We only restore a minimal set of registers */
109 ldr w4, [x3, #VGIC_V2_CPU_HCR]
110 ldr w5, [x3, #VGIC_V2_CPU_VMCR]
111 ldr w6, [x3, #VGIC_V2_CPU_APR]
112CPU_BE( rev w4, w4 )
113CPU_BE( rev w5, w5 )
114CPU_BE( rev w6, w6 )
115
116 str w4, [x2, #GICH_HCR]
117 str w5, [x2, #GICH_VMCR]
118 str w6, [x2, #GICH_APR]
119
120 /* Restore list registers */
121 add x2, x2, #GICH_LR0
122 ldr w4, [x3, #VGIC_CPU_NR_LR]
123 add x3, x3, #VGIC_V2_CPU_LR
1241: ldr w5, [x3], #4
125CPU_BE( rev w5, w5 )
126 str w5, [x2], #4
127 sub w4, w4, #1
128 cbnz w4, 1b
1292:
130 ret
131ENDPROC(__restore_vgic_v2_state)
132
133 .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
new file mode 100644
index 000000000000..d16046999e06
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -0,0 +1,267 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic-v3.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/kvm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27
28 .text
29 .pushsection .hyp.text, "ax"
30
31/*
32 * We store LRs in reverse order to let the CPU deal with streaming
33 * access. Use this macro to make it look saner...
34 */
35#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
36
37/*
38 * Save the VGIC CPU state into memory
39 * x0: Register pointing to VCPU struct
40 * Do not corrupt x1!!!
41 */
42.macro save_vgic_v3_state
43 // Compute the address of struct vgic_cpu
44 add x3, x0, #VCPU_VGIC_CPU
45
46 // Make sure stores to the GIC via the memory mapped interface
47 // are now visible to the system register interface
48 dsb st
49
50 // Save all interesting registers
51 mrs_s x4, ICH_HCR_EL2
52 mrs_s x5, ICH_VMCR_EL2
53 mrs_s x6, ICH_MISR_EL2
54 mrs_s x7, ICH_EISR_EL2
55 mrs_s x8, ICH_ELSR_EL2
56
57 str w4, [x3, #VGIC_V3_CPU_HCR]
58 str w5, [x3, #VGIC_V3_CPU_VMCR]
59 str w6, [x3, #VGIC_V3_CPU_MISR]
60 str w7, [x3, #VGIC_V3_CPU_EISR]
61 str w8, [x3, #VGIC_V3_CPU_ELRSR]
62
63 msr_s ICH_HCR_EL2, xzr
64
65 mrs_s x21, ICH_VTR_EL2
66 mvn w22, w21
67 ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
68
69 adr x24, 1f
70 add x24, x24, x23
71 br x24
72
731:
74 mrs_s x20, ICH_LR15_EL2
75 mrs_s x19, ICH_LR14_EL2
76 mrs_s x18, ICH_LR13_EL2
77 mrs_s x17, ICH_LR12_EL2
78 mrs_s x16, ICH_LR11_EL2
79 mrs_s x15, ICH_LR10_EL2
80 mrs_s x14, ICH_LR9_EL2
81 mrs_s x13, ICH_LR8_EL2
82 mrs_s x12, ICH_LR7_EL2
83 mrs_s x11, ICH_LR6_EL2
84 mrs_s x10, ICH_LR5_EL2
85 mrs_s x9, ICH_LR4_EL2
86 mrs_s x8, ICH_LR3_EL2
87 mrs_s x7, ICH_LR2_EL2
88 mrs_s x6, ICH_LR1_EL2
89 mrs_s x5, ICH_LR0_EL2
90
91 adr x24, 1f
92 add x24, x24, x23
93 br x24
94
951:
96 str x20, [x3, #LR_OFFSET(15)]
97 str x19, [x3, #LR_OFFSET(14)]
98 str x18, [x3, #LR_OFFSET(13)]
99 str x17, [x3, #LR_OFFSET(12)]
100 str x16, [x3, #LR_OFFSET(11)]
101 str x15, [x3, #LR_OFFSET(10)]
102 str x14, [x3, #LR_OFFSET(9)]
103 str x13, [x3, #LR_OFFSET(8)]
104 str x12, [x3, #LR_OFFSET(7)]
105 str x11, [x3, #LR_OFFSET(6)]
106 str x10, [x3, #LR_OFFSET(5)]
107 str x9, [x3, #LR_OFFSET(4)]
108 str x8, [x3, #LR_OFFSET(3)]
109 str x7, [x3, #LR_OFFSET(2)]
110 str x6, [x3, #LR_OFFSET(1)]
111 str x5, [x3, #LR_OFFSET(0)]
112
113 tbnz w21, #29, 6f // 6 bits
114 tbz w21, #30, 5f // 5 bits
115 // 7 bits
116 mrs_s x20, ICH_AP0R3_EL2
117 str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
118 mrs_s x19, ICH_AP0R2_EL2
119 str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
1206: mrs_s x18, ICH_AP0R1_EL2
121 str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
1225: mrs_s x17, ICH_AP0R0_EL2
123 str w17, [x3, #VGIC_V3_CPU_AP0R]
124
125 tbnz w21, #29, 6f // 6 bits
126 tbz w21, #30, 5f // 5 bits
127 // 7 bits
128 mrs_s x20, ICH_AP1R3_EL2
129 str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
130 mrs_s x19, ICH_AP1R2_EL2
131 str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
1326: mrs_s x18, ICH_AP1R1_EL2
133 str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
1345: mrs_s x17, ICH_AP1R0_EL2
135 str w17, [x3, #VGIC_V3_CPU_AP1R]
136
137 // Restore SRE_EL1 access and re-enable SRE at EL1.
138 mrs_s x5, ICC_SRE_EL2
139 orr x5, x5, #ICC_SRE_EL2_ENABLE
140 msr_s ICC_SRE_EL2, x5
141 isb
142 mov x5, #1
143 msr_s ICC_SRE_EL1, x5
144.endm
145
146/*
147 * Restore the VGIC CPU state from memory
148 * x0: Register pointing to VCPU struct
149 */
150.macro restore_vgic_v3_state
151 // Disable SRE_EL1 access. Necessary, otherwise
152 // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
153 msr_s ICC_SRE_EL1, xzr
154 isb
155
156 // Compute the address of struct vgic_cpu
157 add x3, x0, #VCPU_VGIC_CPU
158
159 // Restore all interesting registers
160 ldr w4, [x3, #VGIC_V3_CPU_HCR]
161 ldr w5, [x3, #VGIC_V3_CPU_VMCR]
162
163 msr_s ICH_HCR_EL2, x4
164 msr_s ICH_VMCR_EL2, x5
165
166 mrs_s x21, ICH_VTR_EL2
167
168 tbnz w21, #29, 6f // 6 bits
169 tbz w21, #30, 5f // 5 bits
170 // 7 bits
171 ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
172 msr_s ICH_AP1R3_EL2, x20
173 ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
174 msr_s ICH_AP1R2_EL2, x19
1756: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
176 msr_s ICH_AP1R1_EL2, x18
1775: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
178 msr_s ICH_AP1R0_EL2, x17
179
180 tbnz w21, #29, 6f // 6 bits
181 tbz w21, #30, 5f // 5 bits
182 // 7 bits
183 ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
184 msr_s ICH_AP0R3_EL2, x20
185 ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
186 msr_s ICH_AP0R2_EL2, x19
1876: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
188 msr_s ICH_AP0R1_EL2, x18
1895: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
190 msr_s ICH_AP0R0_EL2, x17
191
192 and w22, w21, #0xf
193 mvn w22, w21
194 ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
195
196 adr x24, 1f
197 add x24, x24, x23
198 br x24
199
2001:
201 ldr x20, [x3, #LR_OFFSET(15)]
202 ldr x19, [x3, #LR_OFFSET(14)]
203 ldr x18, [x3, #LR_OFFSET(13)]
204 ldr x17, [x3, #LR_OFFSET(12)]
205 ldr x16, [x3, #LR_OFFSET(11)]
206 ldr x15, [x3, #LR_OFFSET(10)]
207 ldr x14, [x3, #LR_OFFSET(9)]
208 ldr x13, [x3, #LR_OFFSET(8)]
209 ldr x12, [x3, #LR_OFFSET(7)]
210 ldr x11, [x3, #LR_OFFSET(6)]
211 ldr x10, [x3, #LR_OFFSET(5)]
212 ldr x9, [x3, #LR_OFFSET(4)]
213 ldr x8, [x3, #LR_OFFSET(3)]
214 ldr x7, [x3, #LR_OFFSET(2)]
215 ldr x6, [x3, #LR_OFFSET(1)]
216 ldr x5, [x3, #LR_OFFSET(0)]
217
218 adr x24, 1f
219 add x24, x24, x23
220 br x24
221
2221:
223 msr_s ICH_LR15_EL2, x20
224 msr_s ICH_LR14_EL2, x19
225 msr_s ICH_LR13_EL2, x18
226 msr_s ICH_LR12_EL2, x17
227 msr_s ICH_LR11_EL2, x16
228 msr_s ICH_LR10_EL2, x15
229 msr_s ICH_LR9_EL2, x14
230 msr_s ICH_LR8_EL2, x13
231 msr_s ICH_LR7_EL2, x12
232 msr_s ICH_LR6_EL2, x11
233 msr_s ICH_LR5_EL2, x10
234 msr_s ICH_LR4_EL2, x9
235 msr_s ICH_LR3_EL2, x8
236 msr_s ICH_LR2_EL2, x7
237 msr_s ICH_LR1_EL2, x6
238 msr_s ICH_LR0_EL2, x5
239
240 // Ensure that the above will have reached the
241 // (re)distributors. This ensure the guest will read
242 // the correct values from the memory-mapped interface.
243 isb
244 dsb sy
245
246 // Prevent the guest from touching the GIC system registers
247 mrs_s x5, ICC_SRE_EL2
248 and x5, x5, #~ICC_SRE_EL2_ENABLE
249 msr_s ICC_SRE_EL2, x5
250.endm
251
252ENTRY(__save_vgic_v3_state)
253 save_vgic_v3_state
254 ret
255ENDPROC(__save_vgic_v3_state)
256
257ENTRY(__restore_vgic_v3_state)
258 restore_vgic_v3_state
259 ret
260ENDPROC(__restore_vgic_v3_state)
261
262ENTRY(__vgic_v3_get_ich_vtr_el2)
263 mrs_s x0, ICH_VTR_EL2
264 ret
265ENDPROC(__vgic_v3_get_ich_vtr_el2)
266
267 .popsection