aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2016-04-08 08:17:27 -0400
committerRadim Krčmář <rkrcmar@redhat.com>2016-04-08 08:17:27 -0400
commit4a6cd3ba6fc4993f1805613098d7b032a7aa937b (patch)
treeb3f1da0eed3d73f677651feb53c3d1c87555765c
parent3d8e15dd6de644736916c8ba012c1cc6b356d4ba (diff)
parent06a71a24bae57a07afee9cda6b00495347d8a448 (diff)
Merge tag 'kvm-arm-for-4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
KVM/ARM Fixes for v4.6-rc4 Addresses: - Wrong indentation in the PMU code from the merge window - A long-time bug occuring with running ntpd on the host, candidate for stable - Properly handle (and warn about) the unsupported configuration of running on systems with less than 40 bits of PA space - More fixes to the PM and hotplug notifier stuff from the merge window
-rw-r--r--arch/arm/kvm/arm.c13
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h7
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c39
-rw-r--r--virt/kvm/arm/arch_timer.c49
-rw-r--r--virt/kvm/arm/pmu.c3
7 files changed, 95 insertions, 24 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index b5384311dec4..dded1b763c16 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1112,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void)
1112{ 1112{
1113 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); 1113 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1114} 1114}
1115static void __init hyp_cpu_pm_exit(void)
1116{
1117 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1118}
1115#else 1119#else
1116static inline void hyp_cpu_pm_init(void) 1120static inline void hyp_cpu_pm_init(void)
1117{ 1121{
1118} 1122}
1123static inline void hyp_cpu_pm_exit(void)
1124{
1125}
1119#endif 1126#endif
1120 1127
1121static void teardown_common_resources(void) 1128static void teardown_common_resources(void)
@@ -1141,9 +1148,7 @@ static int init_subsystems(void)
1141 /* 1148 /*
1142 * Register CPU Hotplug notifier 1149 * Register CPU Hotplug notifier
1143 */ 1150 */
1144 cpu_notifier_register_begin(); 1151 err = register_cpu_notifier(&hyp_init_cpu_nb);
1145 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1146 cpu_notifier_register_done();
1147 if (err) { 1152 if (err) {
1148 kvm_err("Cannot register KVM init CPU notifier (%d)\n", err); 1153 kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
1149 return err; 1154 return err;
@@ -1193,6 +1198,8 @@ static void teardown_hyp_mode(void)
1193 free_hyp_pgds(); 1198 free_hyp_pgds();
1194 for_each_possible_cpu(cpu) 1199 for_each_possible_cpu(cpu)
1195 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 1200 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1201 unregister_cpu_notifier(&hyp_init_cpu_nb);
1202 hyp_cpu_pm_exit();
1196} 1203}
1197 1204
1198static int init_vhe_mode(void) 1205static int init_vhe_mode(void)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 4150fd8bae01..3f29887995bc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -151,8 +151,7 @@
151 */ 151 */
152#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ 152#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
153 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ 153 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
154 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ 154 VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
155 VTCR_EL2_RES1)
156#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) 155#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
157#else 156#else
158/* 157/*
@@ -163,8 +162,7 @@
163 */ 162 */
164#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ 163#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
165 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ 164 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
166 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ 165 VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
167 VTCR_EL2_RES1)
168#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) 166#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
169#endif 167#endif
170 168
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index eb7490d232a0..40a0a24e6c98 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void);
54 54
55extern u32 __kvm_get_mdcr_el2(void); 55extern u32 __kvm_get_mdcr_el2(void);
56 56
57extern void __init_stage2_translation(void); 57extern u32 __init_stage2_translation(void);
58 58
59#endif 59#endif
60 60
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index b7e82a795ac9..f5c6bd2541ef 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
369int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 369int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
370 struct kvm_device_attr *attr); 370 struct kvm_device_attr *attr);
371 371
372/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
373
374static inline void __cpu_init_stage2(void) 372static inline void __cpu_init_stage2(void)
375{ 373{
376 kvm_call_hyp(__init_stage2_translation); 374 u32 parange = kvm_call_hyp(__init_stage2_translation);
375
376 WARN_ONCE(parange < 40,
377 "PARange is %d bits, unsupported configuration!", parange);
377} 378}
378 379
379#endif /* __ARM64_KVM_HOST_H__ */ 380#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index 5a9f3bf542b0..bcbe761a5a3d 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -20,9 +20,10 @@
20#include <asm/kvm_asm.h> 20#include <asm/kvm_asm.h>
21#include <asm/kvm_hyp.h> 21#include <asm/kvm_hyp.h>
22 22
23void __hyp_text __init_stage2_translation(void) 23u32 __hyp_text __init_stage2_translation(void)
24{ 24{
25 u64 val = VTCR_EL2_FLAGS; 25 u64 val = VTCR_EL2_FLAGS;
26 u64 parange;
26 u64 tmp; 27 u64 tmp;
27 28
28 /* 29 /*
@@ -30,7 +31,39 @@ void __hyp_text __init_stage2_translation(void)
30 * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while 31 * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
31 * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... 32 * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
32 */ 33 */
33 val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16; 34 parange = read_sysreg(id_aa64mmfr0_el1) & 7;
35 val |= parange << 16;
36
37 /* Compute the actual PARange... */
38 switch (parange) {
39 case 0:
40 parange = 32;
41 break;
42 case 1:
43 parange = 36;
44 break;
45 case 2:
46 parange = 40;
47 break;
48 case 3:
49 parange = 42;
50 break;
51 case 4:
52 parange = 44;
53 break;
54 case 5:
55 default:
56 parange = 48;
57 break;
58 }
59
60 /*
61 * ... and clamp it to 40 bits, unless we have some braindead
62 * HW that implements less than that. In all cases, we'll
63 * return that value for the rest of the kernel to decide what
64 * to do.
65 */
66 val |= 64 - (parange > 40 ? 40 : parange);
34 67
35 /* 68 /*
36 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS 69 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
@@ -42,4 +75,6 @@ void __hyp_text __init_stage2_translation(void)
42 VTCR_EL2_VS_8BIT; 75 VTCR_EL2_VS_8BIT;
43 76
44 write_sysreg(val, vtcr_el2); 77 write_sysreg(val, vtcr_el2);
78
79 return parange;
45} 80}
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a9ad4fe3f68f..9aaa35dd9144 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -91,6 +91,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
92 vcpu->arch.timer_cpu.armed = false; 92 vcpu->arch.timer_cpu.armed = false;
93 93
94 WARN_ON(!kvm_timer_should_fire(vcpu));
95
94 /* 96 /*
95 * If the vcpu is blocked we want to wake it up so that it will see 97 * If the vcpu is blocked we want to wake it up so that it will see
96 * the timer has expired when entering the guest. 98 * the timer has expired when entering the guest.
@@ -98,10 +100,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
98 kvm_vcpu_kick(vcpu); 100 kvm_vcpu_kick(vcpu);
99} 101}
100 102
103static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
104{
105 cycle_t cval, now;
106
107 cval = vcpu->arch.timer_cpu.cntv_cval;
108 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
109
110 if (now < cval) {
111 u64 ns;
112
113 ns = cyclecounter_cyc2ns(timecounter->cc,
114 cval - now,
115 timecounter->mask,
116 &timecounter->frac);
117 return ns;
118 }
119
120 return 0;
121}
122
101static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) 123static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
102{ 124{
103 struct arch_timer_cpu *timer; 125 struct arch_timer_cpu *timer;
126 struct kvm_vcpu *vcpu;
127 u64 ns;
128
104 timer = container_of(hrt, struct arch_timer_cpu, timer); 129 timer = container_of(hrt, struct arch_timer_cpu, timer);
130 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
131
132 /*
133 * Check that the timer has really expired from the guest's
134 * PoV (NTP on the host may have forced it to expire
135 * early). If we should have slept longer, restart it.
136 */
137 ns = kvm_timer_compute_delta(vcpu);
138 if (unlikely(ns)) {
139 hrtimer_forward_now(hrt, ns_to_ktime(ns));
140 return HRTIMER_RESTART;
141 }
142
105 queue_work(wqueue, &timer->expired); 143 queue_work(wqueue, &timer->expired);
106 return HRTIMER_NORESTART; 144 return HRTIMER_NORESTART;
107} 145}
@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
176void kvm_timer_schedule(struct kvm_vcpu *vcpu) 214void kvm_timer_schedule(struct kvm_vcpu *vcpu)
177{ 215{
178 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 216 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
179 u64 ns;
180 cycle_t cval, now;
181 217
182 BUG_ON(timer_is_armed(timer)); 218 BUG_ON(timer_is_armed(timer));
183 219
@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
197 return; 233 return;
198 234
199 /* The timer has not yet expired, schedule a background timer */ 235 /* The timer has not yet expired, schedule a background timer */
200 cval = timer->cntv_cval; 236 timer_arm(timer, kvm_timer_compute_delta(vcpu));
201 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
202
203 ns = cyclecounter_cyc2ns(timecounter->cc,
204 cval - now,
205 timecounter->mask,
206 &timecounter->frac);
207 timer_arm(timer, ns);
208} 237}
209 238
210void kvm_timer_unschedule(struct kvm_vcpu *vcpu) 239void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index b5754c6c5508..575c7aa30d7e 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
193{ 193{
194 u64 reg = 0; 194 u64 reg = 0;
195 195
196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) 196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0); 197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1); 199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
200 reg &= kvm_pmu_valid_counter_mask(vcpu); 200 reg &= kvm_pmu_valid_counter_mask(vcpu);
201 }
201 202
202 return reg; 203 return reg;
203} 204}