diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2016-02-09 13:53:04 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2016-03-08 23:24:06 -0500 |
commit | b4344545cf85d2a6ad546ec21dab5f76487e020e (patch) | |
tree | b495bbd8a0579cfa38c3d7fca848c6e4f7e23cac | |
parent | 1b8e83c04ee2c05c0cd0d304c4b389adf24ebe7f (diff) |
arm64: KVM: vgic-v3: Save maintenance interrupt state only if required
Next on our list of useless accesses is the maintenance interrupt
status registers (ICH_MISR_EL2, ICH_EISR_EL2).
It is pointless to save them if we haven't asked for a maintenance
interrupt the first place, which can only happen for two reasons:
- Underflow: ICH_HCR_UIE will be set,
- EOI: ICH_LR_EOI will be set.
These conditions can be checked on the in-memory copies of the regs.
Should any of these two condition be valid, we must read GICH_MISR.
We can then check for ICH_MISR_EOI, and only when set read
ICH_EISR_EL2.
This means that in most case, we don't have to save them at all.
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r-- | arch/arm64/kvm/hyp/vgic-v3-sr.c | 33 |
1 files changed, 31 insertions, 2 deletions
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index e596945a88f7..61a5e46b4335 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c | |||
@@ -131,6 +131,35 @@ static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | |||
131 | } | 131 | } |
132 | } | 132 | } |
133 | 133 | ||
134 | static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr) | ||
135 | { | ||
136 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | ||
137 | int i; | ||
138 | bool expect_mi; | ||
139 | |||
140 | expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE); | ||
141 | |||
142 | for (i = 0; i < nr_lr; i++) { | ||
143 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) | ||
144 | continue; | ||
145 | |||
146 | expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) && | ||
147 | (cpu_if->vgic_lr[i] & ICH_LR_EOI)); | ||
148 | } | ||
149 | |||
150 | if (expect_mi) { | ||
151 | cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); | ||
152 | |||
153 | if (cpu_if->vgic_misr & ICH_MISR_EOI) | ||
154 | cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); | ||
155 | else | ||
156 | cpu_if->vgic_eisr = 0; | ||
157 | } else { | ||
158 | cpu_if->vgic_misr = 0; | ||
159 | cpu_if->vgic_eisr = 0; | ||
160 | } | ||
161 | } | ||
162 | |||
134 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | 163 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
135 | { | 164 | { |
136 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | 165 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
@@ -148,8 +177,6 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
148 | int i; | 177 | int i; |
149 | u32 max_lr_idx, nr_pri_bits; | 178 | u32 max_lr_idx, nr_pri_bits; |
150 | 179 | ||
151 | cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); | ||
152 | cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); | ||
153 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); | 180 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
154 | 181 | ||
155 | write_gicreg(0, ICH_HCR_EL2); | 182 | write_gicreg(0, ICH_HCR_EL2); |
@@ -157,6 +184,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
157 | max_lr_idx = vtr_to_max_lr_idx(val); | 184 | max_lr_idx = vtr_to_max_lr_idx(val); |
158 | nr_pri_bits = vtr_to_nr_pri_bits(val); | 185 | nr_pri_bits = vtr_to_nr_pri_bits(val); |
159 | 186 | ||
187 | save_maint_int_state(vcpu, max_lr_idx + 1); | ||
188 | |||
160 | for (i = 0; i <= max_lr_idx; i++) { | 189 | for (i = 0; i <= max_lr_idx; i++) { |
161 | if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) | 190 | if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) |
162 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | 191 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); |