aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-14 05:22:51 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:14 -0500
commitfd10cde9294f73eeccbc16f3fec1ae6cde7b800c (patch)
tree6d49e440289c783ff1d5c635c57acd07a6b9c147 /arch
parent344d9588a9df06182684168be4f1408b55c7da3e (diff)
KVM paravirt: Add async PF initialization to PV guest.
Enable async PF in a guest if async PF capability is discovered. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_para.h6
-rw-r--r--arch/x86/kernel/kvm.c92
2 files changed, 98 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 8662ae0a035c..2315398230d1 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -65,6 +65,12 @@ struct kvm_mmu_op_release_pt {
65 __u64 pt_phys; 65 __u64 pt_phys;
66}; 66};
67 67
68struct kvm_vcpu_pv_apf_data {
69 __u32 reason;
70 __u8 pad[60];
71 __u32 enabled;
72};
73
68#ifdef __KERNEL__ 74#ifdef __KERNEL__
69#include <asm/processor.h> 75#include <asm/processor.h>
70 76
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index e6db17976b82..032d03b6b54a 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -27,16 +27,30 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/hardirq.h> 29#include <linux/hardirq.h>
30#include <linux/notifier.h>
31#include <linux/reboot.h>
30#include <asm/timer.h> 32#include <asm/timer.h>
33#include <asm/cpu.h>
31 34
32#define MMU_QUEUE_SIZE 1024 35#define MMU_QUEUE_SIZE 1024
33 36
37static int kvmapf = 1;
38
39static int parse_no_kvmapf(char *arg)
40{
41 kvmapf = 0;
42 return 0;
43}
44
45early_param("no-kvmapf", parse_no_kvmapf);
46
34struct kvm_para_state { 47struct kvm_para_state {
35 u8 mmu_queue[MMU_QUEUE_SIZE]; 48 u8 mmu_queue[MMU_QUEUE_SIZE];
36 int mmu_queue_len; 49 int mmu_queue_len;
37}; 50};
38 51
39static DEFINE_PER_CPU(struct kvm_para_state, para_state); 52static DEFINE_PER_CPU(struct kvm_para_state, para_state);
53static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
40 54
41static struct kvm_para_state *kvm_para_state(void) 55static struct kvm_para_state *kvm_para_state(void)
42{ 56{
@@ -231,12 +245,86 @@ static void __init paravirt_ops_setup(void)
231#endif 245#endif
232} 246}
233 247
248void __cpuinit kvm_guest_cpu_init(void)
249{
250 if (!kvm_para_available())
251 return;
252
253 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
254 u64 pa = __pa(&__get_cpu_var(apf_reason));
255
256 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
257 __get_cpu_var(apf_reason).enabled = 1;
258 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
259 smp_processor_id());
260 }
261}
262
263static void kvm_pv_disable_apf(void *unused)
264{
265 if (!__get_cpu_var(apf_reason).enabled)
266 return;
267
268 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
269 __get_cpu_var(apf_reason).enabled = 0;
270
271 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
272 smp_processor_id());
273}
274
275static int kvm_pv_reboot_notify(struct notifier_block *nb,
276 unsigned long code, void *unused)
277{
278 if (code == SYS_RESTART)
279 on_each_cpu(kvm_pv_disable_apf, NULL, 1);
280 return NOTIFY_DONE;
281}
282
283static struct notifier_block kvm_pv_reboot_nb = {
284 .notifier_call = kvm_pv_reboot_notify,
285};
286
234#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
235static void __init kvm_smp_prepare_boot_cpu(void) 288static void __init kvm_smp_prepare_boot_cpu(void)
236{ 289{
237 WARN_ON(kvm_register_clock("primary cpu clock")); 290 WARN_ON(kvm_register_clock("primary cpu clock"));
291 kvm_guest_cpu_init();
238 native_smp_prepare_boot_cpu(); 292 native_smp_prepare_boot_cpu();
239} 293}
294
295static void kvm_guest_cpu_online(void *dummy)
296{
297 kvm_guest_cpu_init();
298}
299
300static void kvm_guest_cpu_offline(void *dummy)
301{
302 kvm_pv_disable_apf(NULL);
303}
304
305static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
306 unsigned long action, void *hcpu)
307{
308 int cpu = (unsigned long)hcpu;
309 switch (action) {
310 case CPU_ONLINE:
311 case CPU_DOWN_FAILED:
312 case CPU_ONLINE_FROZEN:
313 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
314 break;
315 case CPU_DOWN_PREPARE:
316 case CPU_DOWN_PREPARE_FROZEN:
317 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
318 break;
319 default:
320 break;
321 }
322 return NOTIFY_OK;
323}
324
325static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
326 .notifier_call = kvm_cpu_notify,
327};
240#endif 328#endif
241 329
242void __init kvm_guest_init(void) 330void __init kvm_guest_init(void)
@@ -245,7 +333,11 @@ void __init kvm_guest_init(void)
245 return; 333 return;
246 334
247 paravirt_ops_setup(); 335 paravirt_ops_setup();
336 register_reboot_notifier(&kvm_pv_reboot_nb);
248#ifdef CONFIG_SMP 337#ifdef CONFIG_SMP
249 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 338 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
339 register_cpu_notifier(&kvm_cpu_notifier);
340#else
341 kvm_guest_cpu_init();
250#endif 342#endif
251} 343}