diff options
author | Mark Rutland <mark.rutland@arm.com> | 2014-11-24 08:59:30 -0500 |
---|---|---|
committer | Mark Rutland <mark.rutland@arm.com> | 2015-01-15 07:24:25 -0500 |
commit | c6d01a947a51193e839516165286bc8d14a0e409 (patch) | |
tree | da329869e3bd4187a6ebfc4f80ef0efd31629c07 | |
parent | 60a1f02c9e91e0796b54e83b14fb8a07f7a568b6 (diff) |
arm64: kvm: move to ESR_ELx macros
Now that we have common ESR_ELx macros, make use of them in the arm64
KVM code. The addition of <asm/esr.h> to the include path highlighted
badly ordered (i.e. not alphabetical) include lists; these are changed
to alphabetical order.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 28 | ||||
-rw-r--r-- | arch/arm64/kvm/emulate.c | 5 | ||||
-rw-r--r-- | arch/arm64/kvm/handle_exit.c | 32 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp.S | 17 | ||||
-rw-r--r-- | arch/arm64/kvm/inject_fault.c | 14 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 23 |
6 files changed, 64 insertions, 55 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 8127e45e2637..5c56c0d2cef1 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -23,8 +23,10 @@ | |||
23 | #define __ARM64_KVM_EMULATE_H__ | 23 | #define __ARM64_KVM_EMULATE_H__ |
24 | 24 | ||
25 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
26 | #include <asm/kvm_asm.h> | 26 | |
27 | #include <asm/esr.h> | ||
27 | #include <asm/kvm_arm.h> | 28 | #include <asm/kvm_arm.h> |
29 | #include <asm/kvm_asm.h> | ||
28 | #include <asm/kvm_mmio.h> | 30 | #include <asm/kvm_mmio.h> |
29 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
30 | 32 | ||
@@ -128,63 +130,63 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | |||
128 | 130 | ||
129 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) | 131 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
130 | { | 132 | { |
131 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); | 133 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
132 | } | 134 | } |
133 | 135 | ||
134 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | 136 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
135 | { | 137 | { |
136 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); | 138 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR); |
137 | } | 139 | } |
138 | 140 | ||
139 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) | 141 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
140 | { | 142 | { |
141 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); | 143 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
142 | } | 144 | } |
143 | 145 | ||
144 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | 146 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
145 | { | 147 | { |
146 | return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; | 148 | return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
147 | } | 149 | } |
148 | 150 | ||
149 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) | 151 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
150 | { | 152 | { |
151 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); | 153 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA); |
152 | } | 154 | } |
153 | 155 | ||
154 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) | 156 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
155 | { | 157 | { |
156 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); | 158 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
157 | } | 159 | } |
158 | 160 | ||
159 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) | 161 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
160 | { | 162 | { |
161 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); | 163 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
162 | } | 164 | } |
163 | 165 | ||
164 | /* This one is not specific to Data Abort */ | 166 | /* This one is not specific to Data Abort */ |
165 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | 167 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) |
166 | { | 168 | { |
167 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); | 169 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); |
168 | } | 170 | } |
169 | 171 | ||
170 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | 172 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
171 | { | 173 | { |
172 | return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; | 174 | return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT; |
173 | } | 175 | } |
174 | 176 | ||
175 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | 177 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
176 | { | 178 | { |
177 | return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; | 179 | return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; |
178 | } | 180 | } |
179 | 181 | ||
180 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | 182 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
181 | { | 183 | { |
182 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; | 184 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; |
183 | } | 185 | } |
184 | 186 | ||
185 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | 187 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) |
186 | { | 188 | { |
187 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 189 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; |
188 | } | 190 | } |
189 | 191 | ||
190 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | 192 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c index 124418d17049..f87d8fbaa48d 100644 --- a/arch/arm64/kvm/emulate.c +++ b/arch/arm64/kvm/emulate.c | |||
@@ -22,6 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/esr.h> | ||
25 | #include <asm/kvm_emulate.h> | 26 | #include <asm/kvm_emulate.h> |
26 | 27 | ||
27 | /* | 28 | /* |
@@ -55,8 +56,8 @@ static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) | |||
55 | { | 56 | { |
56 | u32 esr = kvm_vcpu_get_hsr(vcpu); | 57 | u32 esr = kvm_vcpu_get_hsr(vcpu); |
57 | 58 | ||
58 | if (esr & ESR_EL2_CV) | 59 | if (esr & ESR_ELx_CV) |
59 | return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT; | 60 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; |
60 | 61 | ||
61 | return -1; | 62 | return -1; |
62 | } | 63 | } |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 34b8bd0711e9..bcbc923d3060 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -21,8 +21,10 @@ | |||
21 | 21 | ||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <asm/kvm_emulate.h> | 24 | |
25 | #include <asm/esr.h> | ||
25 | #include <asm/kvm_coproc.h> | 26 | #include <asm/kvm_coproc.h> |
27 | #include <asm/kvm_emulate.h> | ||
26 | #include <asm/kvm_mmu.h> | 28 | #include <asm/kvm_mmu.h> |
27 | #include <asm/kvm_psci.h> | 29 | #include <asm/kvm_psci.h> |
28 | 30 | ||
@@ -61,7 +63,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
61 | */ | 63 | */ |
62 | static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) | 64 | static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) |
63 | { | 65 | { |
64 | if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE) | 66 | if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) |
65 | kvm_vcpu_on_spin(vcpu); | 67 | kvm_vcpu_on_spin(vcpu); |
66 | else | 68 | else |
67 | kvm_vcpu_block(vcpu); | 69 | kvm_vcpu_block(vcpu); |
@@ -72,19 +74,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
72 | } | 74 | } |
73 | 75 | ||
74 | static exit_handle_fn arm_exit_handlers[] = { | 76 | static exit_handle_fn arm_exit_handlers[] = { |
75 | [ESR_EL2_EC_WFI] = kvm_handle_wfx, | 77 | [ESR_ELx_EC_WFx] = kvm_handle_wfx, |
76 | [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, | 78 | [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, |
77 | [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, | 79 | [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, |
78 | [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32, | 80 | [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32, |
79 | [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, | 81 | [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, |
80 | [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64, | 82 | [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, |
81 | [ESR_EL2_EC_HVC32] = handle_hvc, | 83 | [ESR_ELx_EC_HVC32] = handle_hvc, |
82 | [ESR_EL2_EC_SMC32] = handle_smc, | 84 | [ESR_ELx_EC_SMC32] = handle_smc, |
83 | [ESR_EL2_EC_HVC64] = handle_hvc, | 85 | [ESR_ELx_EC_HVC64] = handle_hvc, |
84 | [ESR_EL2_EC_SMC64] = handle_smc, | 86 | [ESR_ELx_EC_SMC64] = handle_smc, |
85 | [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg, | 87 | [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, |
86 | [ESR_EL2_EC_IABT] = kvm_handle_guest_abort, | 88 | [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, |
87 | [ESR_EL2_EC_DABT] = kvm_handle_guest_abort, | 89 | [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, |
88 | }; | 90 | }; |
89 | 91 | ||
90 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | 92 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fbe909fb0a1a..c0d820280a5e 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -17,15 +17,16 @@ | |||
17 | 17 | ||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | 19 | ||
20 | #include <asm/assembler.h> | ||
21 | #include <asm/memory.h> | ||
22 | #include <asm/asm-offsets.h> | 20 | #include <asm/asm-offsets.h> |
21 | #include <asm/assembler.h> | ||
23 | #include <asm/debug-monitors.h> | 22 | #include <asm/debug-monitors.h> |
23 | #include <asm/esr.h> | ||
24 | #include <asm/fpsimdmacros.h> | 24 | #include <asm/fpsimdmacros.h> |
25 | #include <asm/kvm.h> | 25 | #include <asm/kvm.h> |
26 | #include <asm/kvm_asm.h> | ||
27 | #include <asm/kvm_arm.h> | 26 | #include <asm/kvm_arm.h> |
27 | #include <asm/kvm_asm.h> | ||
28 | #include <asm/kvm_mmu.h> | 28 | #include <asm/kvm_mmu.h> |
29 | #include <asm/memory.h> | ||
29 | 30 | ||
30 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | 31 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) |
31 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | 32 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) |
@@ -1140,9 +1141,9 @@ el1_sync: // Guest trapped into EL2 | |||
1140 | push x2, x3 | 1141 | push x2, x3 |
1141 | 1142 | ||
1142 | mrs x1, esr_el2 | 1143 | mrs x1, esr_el2 |
1143 | lsr x2, x1, #ESR_EL2_EC_SHIFT | 1144 | lsr x2, x1, #ESR_ELx_EC_SHIFT |
1144 | 1145 | ||
1145 | cmp x2, #ESR_EL2_EC_HVC64 | 1146 | cmp x2, #ESR_ELx_EC_HVC64 |
1146 | b.ne el1_trap | 1147 | b.ne el1_trap |
1147 | 1148 | ||
1148 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | 1149 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest |
@@ -1177,13 +1178,13 @@ el1_trap: | |||
1177 | * x1: ESR | 1178 | * x1: ESR |
1178 | * x2: ESR_EC | 1179 | * x2: ESR_EC |
1179 | */ | 1180 | */ |
1180 | cmp x2, #ESR_EL2_EC_DABT | 1181 | cmp x2, #ESR_ELx_EC_DABT_LOW |
1181 | mov x0, #ESR_EL2_EC_IABT | 1182 | mov x0, #ESR_ELx_EC_IABT_LOW |
1182 | ccmp x2, x0, #4, ne | 1183 | ccmp x2, x0, #4, ne |
1183 | b.ne 1f // Not an abort we care about | 1184 | b.ne 1f // Not an abort we care about |
1184 | 1185 | ||
1185 | /* This is an abort. Check for permission fault */ | 1186 | /* This is an abort. Check for permission fault */ |
1186 | and x2, x1, #ESR_EL2_FSC_TYPE | 1187 | and x2, x1, #ESR_ELx_FSC_TYPE |
1187 | cmp x2, #FSC_PERM | 1188 | cmp x2, #FSC_PERM |
1188 | b.ne 1f // Not a permission fault | 1189 | b.ne 1f // Not a permission fault |
1189 | 1190 | ||
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 81a02a8762b0..f02530e726f6 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -118,27 +118,27 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr | |||
118 | * instruction set. Report an external synchronous abort. | 118 | * instruction set. Report an external synchronous abort. |
119 | */ | 119 | */ |
120 | if (kvm_vcpu_trap_il_is32bit(vcpu)) | 120 | if (kvm_vcpu_trap_il_is32bit(vcpu)) |
121 | esr |= ESR_EL1_IL; | 121 | esr |= ESR_ELx_IL; |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Here, the guest runs in AArch64 mode when in EL1. If we get | 124 | * Here, the guest runs in AArch64 mode when in EL1. If we get |
125 | * an AArch32 fault, it means we managed to trap an EL0 fault. | 125 | * an AArch32 fault, it means we managed to trap an EL0 fault. |
126 | */ | 126 | */ |
127 | if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) | 127 | if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) |
128 | esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT); | 128 | esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT); |
129 | else | 129 | else |
130 | esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT); | 130 | esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT); |
131 | 131 | ||
132 | if (!is_iabt) | 132 | if (!is_iabt) |
133 | esr |= ESR_EL1_EC_DABT_EL0; | 133 | esr |= ESR_ELx_EC_DABT_LOW; |
134 | 134 | ||
135 | vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT; | 135 | vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT; |
136 | } | 136 | } |
137 | 137 | ||
138 | static void inject_undef64(struct kvm_vcpu *vcpu) | 138 | static void inject_undef64(struct kvm_vcpu *vcpu) |
139 | { | 139 | { |
140 | unsigned long cpsr = *vcpu_cpsr(vcpu); | 140 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
141 | u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT); | 141 | u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); |
142 | 142 | ||
143 | *vcpu_spsr(vcpu) = cpsr; | 143 | *vcpu_spsr(vcpu) = cpsr; |
144 | *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); | 144 | *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); |
@@ -151,7 +151,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) | |||
151 | * set. | 151 | * set. |
152 | */ | 152 | */ |
153 | if (kvm_vcpu_trap_il_is32bit(vcpu)) | 153 | if (kvm_vcpu_trap_il_is32bit(vcpu)) |
154 | esr |= ESR_EL1_IL; | 154 | esr |= ESR_ELx_IL; |
155 | 155 | ||
156 | vcpu_sys_reg(vcpu, ESR_EL1) = esr; | 156 | vcpu_sys_reg(vcpu, ESR_EL1) = esr; |
157 | } | 157 | } |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3d7c2df89946..6b859d7a48e7 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -20,17 +20,20 @@ | |||
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/mm.h> | ||
25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <asm/kvm_arm.h> | 26 | |
27 | #include <asm/kvm_host.h> | ||
28 | #include <asm/kvm_emulate.h> | ||
29 | #include <asm/kvm_coproc.h> | ||
30 | #include <asm/kvm_mmu.h> | ||
31 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
32 | #include <asm/cputype.h> | 28 | #include <asm/cputype.h> |
33 | #include <asm/debug-monitors.h> | 29 | #include <asm/debug-monitors.h> |
30 | #include <asm/esr.h> | ||
31 | #include <asm/kvm_arm.h> | ||
32 | #include <asm/kvm_coproc.h> | ||
33 | #include <asm/kvm_emulate.h> | ||
34 | #include <asm/kvm_host.h> | ||
35 | #include <asm/kvm_mmu.h> | ||
36 | |||
34 | #include <trace/events/kvm.h> | 37 | #include <trace/events/kvm.h> |
35 | 38 | ||
36 | #include "sys_regs.h" | 39 | #include "sys_regs.h" |
@@ -815,12 +818,12 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, | |||
815 | int cp; | 818 | int cp; |
816 | 819 | ||
817 | switch(hsr_ec) { | 820 | switch(hsr_ec) { |
818 | case ESR_EL2_EC_CP15_32: | 821 | case ESR_ELx_EC_CP15_32: |
819 | case ESR_EL2_EC_CP15_64: | 822 | case ESR_ELx_EC_CP15_64: |
820 | cp = 15; | 823 | cp = 15; |
821 | break; | 824 | break; |
822 | case ESR_EL2_EC_CP14_MR: | 825 | case ESR_ELx_EC_CP14_MR: |
823 | case ESR_EL2_EC_CP14_64: | 826 | case ESR_ELx_EC_CP14_64: |
824 | cp = 14; | 827 | cp = 14; |
825 | break; | 828 | break; |
826 | default: | 829 | default: |