aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-02-02 14:35:34 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2016-03-08 23:22:20 -0500
commit59f00ff9afc028053fa9281407627e95008ebd5c (patch)
tree68ae8ad7133b4707c9dd15a93b3ec263e33d1a3b
parent9b4a3004439d5be680faf41f4267968ca11bb9f6 (diff)
KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers
GICv2 registers are *slow*. As in "terrifyingly slow". Which is bad. But we're equaly bad, as we make a point in accessing them even if we don't have any interrupt in flight. A good solution is to first find out if we have anything useful to write into the GIC, and if we don't, to simply not do it. This involves tracking which LRs actually have something valid there. Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--include/kvm/arm_vgic.h2
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c72
2 files changed, 52 insertions, 22 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 13a3d537811b..f473fd65fab5 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -321,6 +321,8 @@ struct vgic_cpu {
321 321
322 /* Protected by the distributor's irq_phys_map_lock */ 322 /* Protected by the distributor's irq_phys_map_lock */
323 struct list_head irq_phys_map_list; 323 struct list_head irq_phys_map_list;
324
325 u64 live_lrs;
324}; 326};
325 327
326#define LR_EMPTY 0xff 328#define LR_EMPTY 0xff
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index 9514a7d90d71..aa0fdb89827f 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -36,28 +36,41 @@ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
36 36
37 nr_lr = vcpu->arch.vgic_cpu.nr_lr; 37 nr_lr = vcpu->arch.vgic_cpu.nr_lr;
38 cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); 38 cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
39 cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); 39
40 eisr0 = readl_relaxed(base + GICH_EISR0); 40 if (vcpu->arch.vgic_cpu.live_lrs) {
41 elrsr0 = readl_relaxed(base + GICH_ELRSR0); 41 eisr0 = readl_relaxed(base + GICH_EISR0);
42 if (unlikely(nr_lr > 32)) { 42 elrsr0 = readl_relaxed(base + GICH_ELRSR0);
43 eisr1 = readl_relaxed(base + GICH_EISR1); 43 cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
44 elrsr1 = readl_relaxed(base + GICH_ELRSR1); 44 cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
45 } else { 45
46 eisr1 = elrsr1 = 0; 46 if (unlikely(nr_lr > 32)) {
47 } 47 eisr1 = readl_relaxed(base + GICH_EISR1);
48 elrsr1 = readl_relaxed(base + GICH_ELRSR1);
49 } else {
50 eisr1 = elrsr1 = 0;
51 }
52
48#ifdef CONFIG_CPU_BIG_ENDIAN 53#ifdef CONFIG_CPU_BIG_ENDIAN
49 cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; 54 cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
50 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; 55 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
51#else 56#else
52 cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; 57 cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
53 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; 58 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
54#endif 59#endif
55 cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
56 60
57 writel_relaxed(0, base + GICH_HCR); 61 for (i = 0; i < nr_lr; i++)
62 if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i))
63 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
58 64
59 for (i = 0; i < nr_lr; i++) 65 writel_relaxed(0, base + GICH_HCR);
60 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); 66
67 vcpu->arch.vgic_cpu.live_lrs = 0;
68 } else {
69 cpu_if->vgic_eisr = 0;
70 cpu_if->vgic_elrsr = ~0UL;
71 cpu_if->vgic_misr = 0;
72 cpu_if->vgic_apr = 0;
73 }
61} 74}
62 75
63/* vcpu is already in the HYP VA space */ 76/* vcpu is already in the HYP VA space */
@@ -68,15 +81,30 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
68 struct vgic_dist *vgic = &kvm->arch.vgic; 81 struct vgic_dist *vgic = &kvm->arch.vgic;
69 void __iomem *base = kern_hyp_va(vgic->vctrl_base); 82 void __iomem *base = kern_hyp_va(vgic->vctrl_base);
70 int i, nr_lr; 83 int i, nr_lr;
84 u64 live_lrs = 0;
71 85
72 if (!base) 86 if (!base)
73 return; 87 return;
74 88
75 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
76 writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
77 writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
78
79 nr_lr = vcpu->arch.vgic_cpu.nr_lr; 89 nr_lr = vcpu->arch.vgic_cpu.nr_lr;
90
80 for (i = 0; i < nr_lr; i++) 91 for (i = 0; i < nr_lr; i++)
81 writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); 92 if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
93 live_lrs |= 1UL << i;
94
95 if (live_lrs) {
96 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
97 writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
98 for (i = 0; i < nr_lr; i++) {
99 u32 val = 0;
100
101 if (live_lrs & (1UL << i))
102 val = cpu_if->vgic_lr[i];
103
104 writel_relaxed(val, base + GICH_LR0 + (i * 4));
105 }
106 }
107
108 writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
109 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
82} 110}