diff options
author | Mark Rutland <mark.rutland@arm.com> | 2014-11-24 08:59:30 -0500 |
---|---|---|
committer | Mark Rutland <mark.rutland@arm.com> | 2015-01-15 07:24:25 -0500 |
commit | c6d01a947a51193e839516165286bc8d14a0e409 (patch) | |
tree | da329869e3bd4187a6ebfc4f80ef0efd31629c07 /arch/arm64/include | |
parent | 60a1f02c9e91e0796b54e83b14fb8a07f7a568b6 (diff) |
arm64: kvm: move to ESR_ELx macros
Now that we have common ESR_ELx macros, make use of them in the arm64
KVM code. The addition of <asm/esr.h> to the include path highlighted
badly ordered (i.e. not alphabetical) include lists; these are changed
to alphabetical order.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 8127e45e2637..5c56c0d2cef1 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -23,8 +23,10 @@ | |||
23 | #define __ARM64_KVM_EMULATE_H__ | 23 | #define __ARM64_KVM_EMULATE_H__ |
24 | 24 | ||
25 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
26 | #include <asm/kvm_asm.h> | 26 | |
27 | #include <asm/esr.h> | ||
27 | #include <asm/kvm_arm.h> | 28 | #include <asm/kvm_arm.h> |
29 | #include <asm/kvm_asm.h> | ||
28 | #include <asm/kvm_mmio.h> | 30 | #include <asm/kvm_mmio.h> |
29 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
30 | 32 | ||
@@ -128,63 +130,63 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | |||
128 | 130 | ||
129 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) | 131 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
130 | { | 132 | { |
131 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); | 133 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
132 | } | 134 | } |
133 | 135 | ||
134 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | 136 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
135 | { | 137 | { |
136 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); | 138 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR); |
137 | } | 139 | } |
138 | 140 | ||
139 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) | 141 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
140 | { | 142 | { |
141 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); | 143 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
142 | } | 144 | } |
143 | 145 | ||
144 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | 146 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
145 | { | 147 | { |
146 | return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; | 148 | return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
147 | } | 149 | } |
148 | 150 | ||
149 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) | 151 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
150 | { | 152 | { |
151 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); | 153 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA); |
152 | } | 154 | } |
153 | 155 | ||
154 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) | 156 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
155 | { | 157 | { |
156 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); | 158 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
157 | } | 159 | } |
158 | 160 | ||
159 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) | 161 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
160 | { | 162 | { |
161 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); | 163 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
162 | } | 164 | } |
163 | 165 | ||
164 | /* This one is not specific to Data Abort */ | 166 | /* This one is not specific to Data Abort */ |
165 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | 167 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) |
166 | { | 168 | { |
167 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); | 169 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); |
168 | } | 170 | } |
169 | 171 | ||
170 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | 172 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
171 | { | 173 | { |
172 | return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; | 174 | return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT; |
173 | } | 175 | } |
174 | 176 | ||
175 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | 177 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
176 | { | 178 | { |
177 | return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; | 179 | return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; |
178 | } | 180 | } |
179 | 181 | ||
180 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | 182 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
181 | { | 183 | { |
182 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; | 184 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; |
183 | } | 185 | } |
184 | 186 | ||
185 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | 187 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) |
186 | { | 188 | { |
187 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 189 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; |
188 | } | 190 | } |
189 | 191 | ||
190 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | 192 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) |