aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-07-29 08:47:58 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:50:51 -0400
commit73a18109829e7696226a9fd4062d339e7c6ee130 (patch)
tree8332df8ebbeace42e757ae2965ea6fca25f315b7 /arch
parentd17051cb8d223dffd6bb847b0565ef1654f8e0e1 (diff)
KVM: PPC: KVM PV guest stubs
We will soon start and replace instructions from the text section with other, paravirtualized versions. To ease the readability of those patches I split out the generic looping and magic page mapping code out. This patch still only contains stubs. But at least it loops through the text section :). Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/kvm.c95
1 files changed, 95 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index a5ece71ecdd2..e93366fbbd21 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -33,6 +33,62 @@
33#define KVM_MAGIC_PAGE (-4096L) 33#define KVM_MAGIC_PAGE (-4096L)
34#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) 34#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
35 35
36#define KVM_MASK_RT 0x03e00000
37
38static bool kvm_patching_worked = true;
39
40static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
41{
42 *inst = new_inst;
43 flush_icache_range((ulong)inst, (ulong)inst + 4);
44}
45
46static void kvm_map_magic_page(void *data)
47{
48 kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE,
49 KVM_MAGIC_PAGE, /* Physical Address */
50 KVM_MAGIC_PAGE); /* Effective Address */
51}
52
53static void kvm_check_ins(u32 *inst)
54{
55 u32 _inst = *inst;
56 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
57 u32 inst_rt = _inst & KVM_MASK_RT;
58
59 switch (inst_no_rt) {
60 }
61
62 switch (_inst) {
63 }
64}
65
66static void kvm_use_magic_page(void)
67{
68 u32 *p;
69 u32 *start, *end;
70 u32 tmp;
71
72 /* Tell the host to map the magic page to -4096 on all CPUs */
73 on_each_cpu(kvm_map_magic_page, NULL, 1);
74
75 /* Quick self-test to see if the mapping works */
76 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
77 kvm_patching_worked = false;
78 return;
79 }
80
81 /* Now loop through all code and find instructions */
82 start = (void*)_stext;
83 end = (void*)_etext;
84
85 for (p = start; p < end; p++)
86 kvm_check_ins(p);
87
88 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
89 kvm_patching_worked ? "worked" : "failed");
90}
91
36unsigned long kvm_hypercall(unsigned long *in, 92unsigned long kvm_hypercall(unsigned long *in,
37 unsigned long *out, 93 unsigned long *out,
38 unsigned long nr) 94 unsigned long nr)
@@ -69,3 +125,42 @@ unsigned long kvm_hypercall(unsigned long *in,
69 return r3; 125 return r3;
70} 126}
71EXPORT_SYMBOL_GPL(kvm_hypercall); 127EXPORT_SYMBOL_GPL(kvm_hypercall);
128
129static int kvm_para_setup(void)
130{
131 extern u32 kvm_hypercall_start;
132 struct device_node *hyper_node;
133 u32 *insts;
134 int len, i;
135
136 hyper_node = of_find_node_by_path("/hypervisor");
137 if (!hyper_node)
138 return -1;
139
140 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
141 if (len % 4)
142 return -1;
143 if (len > (4 * 4))
144 return -1;
145
146 for (i = 0; i < (len / 4); i++)
147 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
148
149 return 0;
150}
151
152static int __init kvm_guest_init(void)
153{
154 if (!kvm_para_available())
155 return 0;
156
157 if (kvm_para_setup())
158 return 0;
159
160 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
161 kvm_use_magic_page();
162
163 return 0;
164}
165
166postcore_initcall(kvm_guest_init);