diff options
author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-04-01 03:29:29 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 05:01:03 -0400 |
commit | b024b79322aad213cd2d4f30c23a6c626a0d5b31 (patch) | |
tree | e7c87b054bd2ff4c0539f908a938e6521469d846 /arch/ia64 | |
parent | 1a9c1ac46990194f6b6ddc591c24e385e611fa25 (diff) |
KVM: ia64: Add kvm arch-specific core code for kvm/ia64
kvm_ia64.c is created to handle kvm ia64-specific core logic.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 1789 |
1 files changed, 1789 insertions, 0 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c new file mode 100644 index 000000000000..9c56b6429cb6 --- /dev/null +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -0,0 +1,1789 @@ | |||
1 | |||
2 | /* | ||
3 | * kvm_ia64.c: Basic KVM suppport On Itanium series processors | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007, Intel Corporation. | ||
7 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
20 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/percpu.h> | ||
27 | #include <linux/gfp.h> | ||
28 | #include <linux/fs.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/kvm_host.h> | ||
31 | #include <linux/kvm.h> | ||
32 | #include <linux/bitops.h> | ||
33 | #include <linux/hrtimer.h> | ||
34 | #include <linux/uaccess.h> | ||
35 | |||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/gcc_intrin.h> | ||
38 | #include <asm/pal.h> | ||
39 | #include <asm/cacheflush.h> | ||
40 | #include <asm/div64.h> | ||
41 | #include <asm/tlb.h> | ||
42 | |||
43 | #include "misc.h" | ||
44 | #include "vti.h" | ||
45 | #include "iodev.h" | ||
46 | #include "ioapic.h" | ||
47 | #include "lapic.h" | ||
48 | |||
49 | static unsigned long kvm_vmm_base; | ||
50 | static unsigned long kvm_vsa_base; | ||
51 | static unsigned long kvm_vm_buffer; | ||
52 | static unsigned long kvm_vm_buffer_size; | ||
53 | unsigned long kvm_vmm_gp; | ||
54 | |||
55 | static long vp_env_info; | ||
56 | |||
57 | static struct kvm_vmm_info *kvm_vmm_info; | ||
58 | |||
59 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); | ||
60 | |||
61 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
62 | { NULL } | ||
63 | }; | ||
64 | |||
65 | |||
66 | struct fdesc{ | ||
67 | unsigned long ip; | ||
68 | unsigned long gp; | ||
69 | }; | ||
70 | |||
71 | static void kvm_flush_icache(unsigned long start, unsigned long len) | ||
72 | { | ||
73 | int l; | ||
74 | |||
75 | for (l = 0; l < (len + 32); l += 32) | ||
76 | ia64_fc(start + l); | ||
77 | |||
78 | ia64_sync_i(); | ||
79 | ia64_srlz_i(); | ||
80 | } | ||
81 | |||
82 | static void kvm_flush_tlb_all(void) | ||
83 | { | ||
84 | unsigned long i, j, count0, count1, stride0, stride1, addr; | ||
85 | long flags; | ||
86 | |||
87 | addr = local_cpu_data->ptce_base; | ||
88 | count0 = local_cpu_data->ptce_count[0]; | ||
89 | count1 = local_cpu_data->ptce_count[1]; | ||
90 | stride0 = local_cpu_data->ptce_stride[0]; | ||
91 | stride1 = local_cpu_data->ptce_stride[1]; | ||
92 | |||
93 | local_irq_save(flags); | ||
94 | for (i = 0; i < count0; ++i) { | ||
95 | for (j = 0; j < count1; ++j) { | ||
96 | ia64_ptce(addr); | ||
97 | addr += stride1; | ||
98 | } | ||
99 | addr += stride0; | ||
100 | } | ||
101 | local_irq_restore(flags); | ||
102 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | ||
103 | } | ||
104 | |||
105 | long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | ||
106 | { | ||
107 | struct ia64_pal_retval iprv; | ||
108 | |||
109 | PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, | ||
110 | (u64)opt_handler); | ||
111 | |||
112 | return iprv.status; | ||
113 | } | ||
114 | |||
115 | static DEFINE_SPINLOCK(vp_lock); | ||
116 | |||
117 | void kvm_arch_hardware_enable(void *garbage) | ||
118 | { | ||
119 | long status; | ||
120 | long tmp_base; | ||
121 | unsigned long pte; | ||
122 | unsigned long saved_psr; | ||
123 | int slot; | ||
124 | |||
125 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | ||
126 | PAGE_KERNEL)); | ||
127 | local_irq_save(saved_psr); | ||
128 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
129 | if (slot < 0) | ||
130 | return; | ||
131 | local_irq_restore(saved_psr); | ||
132 | |||
133 | spin_lock(&vp_lock); | ||
134 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | ||
135 | VP_INIT_ENV : VP_INIT_ENV_INITALIZE, | ||
136 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | ||
137 | if (status != 0) { | ||
138 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | ||
139 | return ; | ||
140 | } | ||
141 | |||
142 | if (!kvm_vsa_base) { | ||
143 | kvm_vsa_base = tmp_base; | ||
144 | printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); | ||
145 | } | ||
146 | spin_unlock(&vp_lock); | ||
147 | ia64_ptr_entry(0x3, slot); | ||
148 | } | ||
149 | |||
150 | void kvm_arch_hardware_disable(void *garbage) | ||
151 | { | ||
152 | |||
153 | long status; | ||
154 | int slot; | ||
155 | unsigned long pte; | ||
156 | unsigned long saved_psr; | ||
157 | unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); | ||
158 | |||
159 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | ||
160 | PAGE_KERNEL)); | ||
161 | |||
162 | local_irq_save(saved_psr); | ||
163 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
164 | if (slot < 0) | ||
165 | return; | ||
166 | local_irq_restore(saved_psr); | ||
167 | |||
168 | status = ia64_pal_vp_exit_env(host_iva); | ||
169 | if (status) | ||
170 | printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", | ||
171 | status); | ||
172 | ia64_ptr_entry(0x3, slot); | ||
173 | } | ||
174 | |||
175 | void kvm_arch_check_processor_compat(void *rtn) | ||
176 | { | ||
177 | *(int *)rtn = 0; | ||
178 | } | ||
179 | |||
180 | int kvm_dev_ioctl_check_extension(long ext) | ||
181 | { | ||
182 | |||
183 | int r; | ||
184 | |||
185 | switch (ext) { | ||
186 | case KVM_CAP_IRQCHIP: | ||
187 | case KVM_CAP_USER_MEMORY: | ||
188 | |||
189 | r = 1; | ||
190 | break; | ||
191 | default: | ||
192 | r = 0; | ||
193 | } | ||
194 | return r; | ||
195 | |||
196 | } | ||
197 | |||
198 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | ||
199 | gpa_t addr) | ||
200 | { | ||
201 | struct kvm_io_device *dev; | ||
202 | |||
203 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); | ||
204 | |||
205 | return dev; | ||
206 | } | ||
207 | |||
208 | static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
209 | { | ||
210 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
211 | kvm_run->hw.hardware_exit_reason = 1; | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
216 | { | ||
217 | struct kvm_mmio_req *p; | ||
218 | struct kvm_io_device *mmio_dev; | ||
219 | |||
220 | p = kvm_get_vcpu_ioreq(vcpu); | ||
221 | |||
222 | if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) | ||
223 | goto mmio; | ||
224 | vcpu->mmio_needed = 1; | ||
225 | vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; | ||
226 | vcpu->mmio_size = kvm_run->mmio.len = p->size; | ||
227 | vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; | ||
228 | |||
229 | if (vcpu->mmio_is_write) | ||
230 | memcpy(vcpu->mmio_data, &p->data, p->size); | ||
231 | memcpy(kvm_run->mmio.data, &p->data, p->size); | ||
232 | kvm_run->exit_reason = KVM_EXIT_MMIO; | ||
233 | return 0; | ||
234 | mmio: | ||
235 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr); | ||
236 | if (mmio_dev) { | ||
237 | if (!p->dir) | ||
238 | kvm_iodevice_write(mmio_dev, p->addr, p->size, | ||
239 | &p->data); | ||
240 | else | ||
241 | kvm_iodevice_read(mmio_dev, p->addr, p->size, | ||
242 | &p->data); | ||
243 | |||
244 | } else | ||
245 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); | ||
246 | p->state = STATE_IORESP_READY; | ||
247 | |||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
252 | { | ||
253 | struct exit_ctl_data *p; | ||
254 | |||
255 | p = kvm_get_exit_data(vcpu); | ||
256 | |||
257 | if (p->exit_reason == EXIT_REASON_PAL_CALL) | ||
258 | return kvm_pal_emul(vcpu, kvm_run); | ||
259 | else { | ||
260 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
261 | kvm_run->hw.hardware_exit_reason = 2; | ||
262 | return 0; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
267 | { | ||
268 | struct exit_ctl_data *p; | ||
269 | |||
270 | p = kvm_get_exit_data(vcpu); | ||
271 | |||
272 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
273 | kvm_sal_emul(vcpu); | ||
274 | return 1; | ||
275 | } else { | ||
276 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
277 | kvm_run->hw.hardware_exit_reason = 3; | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | } | ||
282 | |||
283 | /* | ||
284 | * offset: address offset to IPI space. | ||
285 | * value: deliver value. | ||
286 | */ | ||
287 | static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, | ||
288 | uint64_t vector) | ||
289 | { | ||
290 | switch (dm) { | ||
291 | case SAPIC_FIXED: | ||
292 | kvm_apic_set_irq(vcpu, vector, 0); | ||
293 | break; | ||
294 | case SAPIC_NMI: | ||
295 | kvm_apic_set_irq(vcpu, 2, 0); | ||
296 | break; | ||
297 | case SAPIC_EXTINT: | ||
298 | kvm_apic_set_irq(vcpu, 0, 0); | ||
299 | break; | ||
300 | case SAPIC_INIT: | ||
301 | case SAPIC_PMI: | ||
302 | default: | ||
303 | printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); | ||
304 | break; | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | ||
309 | unsigned long eid) | ||
310 | { | ||
311 | union ia64_lid lid; | ||
312 | int i; | ||
313 | |||
314 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | ||
315 | if (kvm->vcpus[i]) { | ||
316 | lid.val = VCPU_LID(kvm->vcpus[i]); | ||
317 | if (lid.id == id && lid.eid == eid) | ||
318 | return kvm->vcpus[i]; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | return NULL; | ||
323 | } | ||
324 | |||
325 | static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
326 | { | ||
327 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | ||
328 | struct kvm_vcpu *target_vcpu; | ||
329 | struct kvm_pt_regs *regs; | ||
330 | union ia64_ipi_a addr = p->u.ipi_data.addr; | ||
331 | union ia64_ipi_d data = p->u.ipi_data.data; | ||
332 | |||
333 | target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); | ||
334 | if (!target_vcpu) | ||
335 | return handle_vm_error(vcpu, kvm_run); | ||
336 | |||
337 | if (!target_vcpu->arch.launched) { | ||
338 | regs = vcpu_regs(target_vcpu); | ||
339 | |||
340 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; | ||
341 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; | ||
342 | |||
343 | target_vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | ||
344 | if (waitqueue_active(&target_vcpu->wq)) | ||
345 | wake_up_interruptible(&target_vcpu->wq); | ||
346 | } else { | ||
347 | vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); | ||
348 | if (target_vcpu != vcpu) | ||
349 | kvm_vcpu_kick(target_vcpu); | ||
350 | } | ||
351 | |||
352 | return 1; | ||
353 | } | ||
354 | |||
355 | struct call_data { | ||
356 | struct kvm_ptc_g ptc_g_data; | ||
357 | struct kvm_vcpu *vcpu; | ||
358 | }; | ||
359 | |||
360 | static void vcpu_global_purge(void *info) | ||
361 | { | ||
362 | struct call_data *p = (struct call_data *)info; | ||
363 | struct kvm_vcpu *vcpu = p->vcpu; | ||
364 | |||
365 | if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | ||
366 | return; | ||
367 | |||
368 | set_bit(KVM_REQ_PTC_G, &vcpu->requests); | ||
369 | if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { | ||
370 | vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = | ||
371 | p->ptc_g_data; | ||
372 | } else { | ||
373 | clear_bit(KVM_REQ_PTC_G, &vcpu->requests); | ||
374 | vcpu->arch.ptc_g_count = 0; | ||
375 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
380 | { | ||
381 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | ||
382 | struct kvm *kvm = vcpu->kvm; | ||
383 | struct call_data call_data; | ||
384 | int i; | ||
385 | call_data.ptc_g_data = p->u.ptc_g_data; | ||
386 | |||
387 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | ||
388 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | ||
389 | VCPU_MP_STATE_UNINITIALIZED || | ||
390 | vcpu == kvm->vcpus[i]) | ||
391 | continue; | ||
392 | |||
393 | if (waitqueue_active(&kvm->vcpus[i]->wq)) | ||
394 | wake_up_interruptible(&kvm->vcpus[i]->wq); | ||
395 | |||
396 | if (kvm->vcpus[i]->cpu != -1) { | ||
397 | call_data.vcpu = kvm->vcpus[i]; | ||
398 | smp_call_function_single(kvm->vcpus[i]->cpu, | ||
399 | vcpu_global_purge, &call_data, 0, 1); | ||
400 | } else | ||
401 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); | ||
402 | |||
403 | } | ||
404 | return 1; | ||
405 | } | ||
406 | |||
407 | static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
408 | { | ||
409 | return 1; | ||
410 | } | ||
411 | |||
412 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | ||
413 | { | ||
414 | |||
415 | ktime_t kt; | ||
416 | long itc_diff; | ||
417 | unsigned long vcpu_now_itc; | ||
418 | |||
419 | unsigned long expires; | ||
420 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | ||
421 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | ||
422 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
423 | |||
424 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; | ||
425 | |||
426 | if (time_after(vcpu_now_itc, vpd->itm)) { | ||
427 | vcpu->arch.timer_check = 1; | ||
428 | return 1; | ||
429 | } | ||
430 | itc_diff = vpd->itm - vcpu_now_itc; | ||
431 | if (itc_diff < 0) | ||
432 | itc_diff = -itc_diff; | ||
433 | |||
434 | expires = div64_64(itc_diff, cyc_per_usec); | ||
435 | kt = ktime_set(0, 1000 * expires); | ||
436 | vcpu->arch.ht_active = 1; | ||
437 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | ||
438 | |||
439 | if (irqchip_in_kernel(vcpu->kvm)) { | ||
440 | vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; | ||
441 | kvm_vcpu_block(vcpu); | ||
442 | hrtimer_cancel(p_ht); | ||
443 | vcpu->arch.ht_active = 0; | ||
444 | |||
445 | if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) | ||
446 | return -EINTR; | ||
447 | return 1; | ||
448 | } else { | ||
449 | printk(KERN_ERR"kvm: Unsupported userspace halt!"); | ||
450 | return 0; | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static int handle_vm_shutdown(struct kvm_vcpu *vcpu, | ||
455 | struct kvm_run *kvm_run) | ||
456 | { | ||
457 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | ||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | ||
462 | struct kvm_run *kvm_run) | ||
463 | { | ||
464 | return 1; | ||
465 | } | ||
466 | |||
467 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | ||
468 | struct kvm_run *kvm_run) = { | ||
469 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | ||
470 | [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, | ||
471 | [EXIT_REASON_PAL_CALL] = handle_pal_call, | ||
472 | [EXIT_REASON_SAL_CALL] = handle_sal_call, | ||
473 | [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, | ||
474 | [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, | ||
475 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | ||
476 | [EXIT_REASON_IPI] = handle_ipi, | ||
477 | [EXIT_REASON_PTC_G] = handle_global_purge, | ||
478 | |||
479 | }; | ||
480 | |||
481 | static const int kvm_vti_max_exit_handlers = | ||
482 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | ||
483 | |||
484 | static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) | ||
485 | { | ||
486 | } | ||
487 | |||
488 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) | ||
489 | { | ||
490 | struct exit_ctl_data *p_exit_data; | ||
491 | |||
492 | p_exit_data = kvm_get_exit_data(vcpu); | ||
493 | return p_exit_data->exit_reason; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * The guest has exited. See if we can fix it or if we need userspace | ||
498 | * assistance. | ||
499 | */ | ||
500 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
501 | { | ||
502 | u32 exit_reason = kvm_get_exit_reason(vcpu); | ||
503 | vcpu->arch.last_exit = exit_reason; | ||
504 | |||
505 | if (exit_reason < kvm_vti_max_exit_handlers | ||
506 | && kvm_vti_exit_handlers[exit_reason]) | ||
507 | return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); | ||
508 | else { | ||
509 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
510 | kvm_run->hw.hardware_exit_reason = exit_reason; | ||
511 | } | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static inline void vti_set_rr6(unsigned long rr6) | ||
516 | { | ||
517 | ia64_set_rr(RR6, rr6); | ||
518 | ia64_srlz_i(); | ||
519 | } | ||
520 | |||
521 | static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) | ||
522 | { | ||
523 | unsigned long pte; | ||
524 | struct kvm *kvm = vcpu->kvm; | ||
525 | int r; | ||
526 | |||
527 | /*Insert a pair of tr to map vmm*/ | ||
528 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | ||
529 | r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
530 | if (r < 0) | ||
531 | goto out; | ||
532 | vcpu->arch.vmm_tr_slot = r; | ||
533 | /*Insert a pairt of tr to map data of vm*/ | ||
534 | pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); | ||
535 | r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, | ||
536 | pte, KVM_VM_DATA_SHIFT); | ||
537 | if (r < 0) | ||
538 | goto out; | ||
539 | vcpu->arch.vm_tr_slot = r; | ||
540 | r = 0; | ||
541 | out: | ||
542 | return r; | ||
543 | |||
544 | } | ||
545 | |||
546 | static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) | ||
547 | { | ||
548 | |||
549 | ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); | ||
550 | ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); | ||
551 | |||
552 | } | ||
553 | |||
554 | static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) | ||
555 | { | ||
556 | int cpu = smp_processor_id(); | ||
557 | |||
558 | if (vcpu->arch.last_run_cpu != cpu || | ||
559 | per_cpu(last_vcpu, cpu) != vcpu) { | ||
560 | per_cpu(last_vcpu, cpu) = vcpu; | ||
561 | vcpu->arch.last_run_cpu = cpu; | ||
562 | kvm_flush_tlb_all(); | ||
563 | } | ||
564 | |||
565 | vcpu->arch.host_rr6 = ia64_get_rr(RR6); | ||
566 | vti_set_rr6(vcpu->arch.vmm_rr); | ||
567 | return kvm_insert_vmm_mapping(vcpu); | ||
568 | } | ||
569 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | ||
570 | { | ||
571 | kvm_purge_vmm_mapping(vcpu); | ||
572 | vti_set_rr6(vcpu->arch.host_rr6); | ||
573 | } | ||
574 | |||
575 | static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
576 | { | ||
577 | union context *host_ctx, *guest_ctx; | ||
578 | int r; | ||
579 | |||
580 | /*Get host and guest context with guest address space.*/ | ||
581 | host_ctx = kvm_get_host_context(vcpu); | ||
582 | guest_ctx = kvm_get_guest_context(vcpu); | ||
583 | |||
584 | r = kvm_vcpu_pre_transition(vcpu); | ||
585 | if (r < 0) | ||
586 | goto out; | ||
587 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); | ||
588 | kvm_vcpu_post_transition(vcpu); | ||
589 | r = 0; | ||
590 | out: | ||
591 | return r; | ||
592 | } | ||
593 | |||
594 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
595 | { | ||
596 | int r; | ||
597 | |||
598 | again: | ||
599 | preempt_disable(); | ||
600 | |||
601 | kvm_prepare_guest_switch(vcpu); | ||
602 | local_irq_disable(); | ||
603 | |||
604 | if (signal_pending(current)) { | ||
605 | local_irq_enable(); | ||
606 | preempt_enable(); | ||
607 | r = -EINTR; | ||
608 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
609 | goto out; | ||
610 | } | ||
611 | |||
612 | vcpu->guest_mode = 1; | ||
613 | kvm_guest_enter(); | ||
614 | |||
615 | r = vti_vcpu_run(vcpu, kvm_run); | ||
616 | if (r < 0) { | ||
617 | local_irq_enable(); | ||
618 | preempt_enable(); | ||
619 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
620 | goto out; | ||
621 | } | ||
622 | |||
623 | vcpu->arch.launched = 1; | ||
624 | vcpu->guest_mode = 0; | ||
625 | local_irq_enable(); | ||
626 | |||
627 | /* | ||
628 | * We must have an instruction between local_irq_enable() and | ||
629 | * kvm_guest_exit(), so the timer interrupt isn't delayed by | ||
630 | * the interrupt shadow. The stat.exits increment will do nicely. | ||
631 | * But we need to prevent reordering, hence this barrier(): | ||
632 | */ | ||
633 | barrier(); | ||
634 | |||
635 | kvm_guest_exit(); | ||
636 | |||
637 | preempt_enable(); | ||
638 | |||
639 | r = kvm_handle_exit(kvm_run, vcpu); | ||
640 | |||
641 | if (r > 0) { | ||
642 | if (!need_resched()) | ||
643 | goto again; | ||
644 | } | ||
645 | |||
646 | out: | ||
647 | if (r > 0) { | ||
648 | kvm_resched(vcpu); | ||
649 | goto again; | ||
650 | } | ||
651 | |||
652 | return r; | ||
653 | } | ||
654 | |||
655 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) | ||
656 | { | ||
657 | struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); | ||
658 | |||
659 | if (!vcpu->mmio_is_write) | ||
660 | memcpy(&p->data, vcpu->mmio_data, 8); | ||
661 | p->state = STATE_IORESP_READY; | ||
662 | } | ||
663 | |||
664 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
665 | { | ||
666 | int r; | ||
667 | sigset_t sigsaved; | ||
668 | |||
669 | vcpu_load(vcpu); | ||
670 | |||
671 | if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) { | ||
672 | kvm_vcpu_block(vcpu); | ||
673 | vcpu_put(vcpu); | ||
674 | return -EAGAIN; | ||
675 | } | ||
676 | |||
677 | if (vcpu->sigset_active) | ||
678 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
679 | |||
680 | if (vcpu->mmio_needed) { | ||
681 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | ||
682 | kvm_set_mmio_data(vcpu); | ||
683 | vcpu->mmio_read_completed = 1; | ||
684 | vcpu->mmio_needed = 0; | ||
685 | } | ||
686 | r = __vcpu_run(vcpu, kvm_run); | ||
687 | |||
688 | if (vcpu->sigset_active) | ||
689 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
690 | |||
691 | vcpu_put(vcpu); | ||
692 | return r; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * Allocate 16M memory for every vm to hold its specific data. | ||
697 | * Its memory map is defined in kvm_host.h. | ||
698 | */ | ||
699 | static struct kvm *kvm_alloc_kvm(void) | ||
700 | { | ||
701 | |||
702 | struct kvm *kvm; | ||
703 | uint64_t vm_base; | ||
704 | |||
705 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); | ||
706 | |||
707 | if (!vm_base) | ||
708 | return ERR_PTR(-ENOMEM); | ||
709 | printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base); | ||
710 | |||
711 | /* Zero all pages before use! */ | ||
712 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | ||
713 | |||
714 | kvm = (struct kvm *)(vm_base + KVM_VM_OFS); | ||
715 | kvm->arch.vm_base = vm_base; | ||
716 | |||
717 | return kvm; | ||
718 | } | ||
719 | |||
720 | struct kvm_io_range { | ||
721 | unsigned long start; | ||
722 | unsigned long size; | ||
723 | unsigned long type; | ||
724 | }; | ||
725 | |||
726 | static const struct kvm_io_range io_ranges[] = { | ||
727 | {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, | ||
728 | {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, | ||
729 | {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, | ||
730 | {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, | ||
731 | {PIB_START, PIB_SIZE, GPFN_PIB}, | ||
732 | }; | ||
733 | |||
734 | static void kvm_build_io_pmt(struct kvm *kvm) | ||
735 | { | ||
736 | unsigned long i, j; | ||
737 | |||
738 | /* Mark I/O ranges */ | ||
739 | for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); | ||
740 | i++) { | ||
741 | for (j = io_ranges[i].start; | ||
742 | j < io_ranges[i].start + io_ranges[i].size; | ||
743 | j += PAGE_SIZE) | ||
744 | kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, | ||
745 | io_ranges[i].type, 0); | ||
746 | } | ||
747 | |||
748 | } | ||
749 | |||
750 | /*Use unused rids to virtualize guest rid.*/ | ||
751 | #define GUEST_PHYSICAL_RR0 0x1739 | ||
752 | #define GUEST_PHYSICAL_RR4 0x2739 | ||
753 | #define VMM_INIT_RR 0x1660 | ||
754 | |||
755 | static void kvm_init_vm(struct kvm *kvm) | ||
756 | { | ||
757 | long vm_base; | ||
758 | |||
759 | BUG_ON(!kvm); | ||
760 | |||
761 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | ||
762 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | ||
763 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | ||
764 | |||
765 | vm_base = kvm->arch.vm_base; | ||
766 | if (vm_base) { | ||
767 | kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS; | ||
768 | kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS; | ||
769 | kvm->arch.vpd_base = vm_base + KVM_VPD_OFS; | ||
770 | } | ||
771 | |||
772 | /* | ||
773 | *Fill P2M entries for MMIO/IO ranges | ||
774 | */ | ||
775 | kvm_build_io_pmt(kvm); | ||
776 | |||
777 | } | ||
778 | |||
779 | struct kvm *kvm_arch_create_vm(void) | ||
780 | { | ||
781 | struct kvm *kvm = kvm_alloc_kvm(); | ||
782 | |||
783 | if (IS_ERR(kvm)) | ||
784 | return ERR_PTR(-ENOMEM); | ||
785 | kvm_init_vm(kvm); | ||
786 | |||
787 | return kvm; | ||
788 | |||
789 | } | ||
790 | |||
791 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | ||
792 | struct kvm_irqchip *chip) | ||
793 | { | ||
794 | int r; | ||
795 | |||
796 | r = 0; | ||
797 | switch (chip->chip_id) { | ||
798 | case KVM_IRQCHIP_IOAPIC: | ||
799 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | ||
800 | sizeof(struct kvm_ioapic_state)); | ||
801 | break; | ||
802 | default: | ||
803 | r = -EINVAL; | ||
804 | break; | ||
805 | } | ||
806 | return r; | ||
807 | } | ||
808 | |||
809 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | ||
810 | { | ||
811 | int r; | ||
812 | |||
813 | r = 0; | ||
814 | switch (chip->chip_id) { | ||
815 | case KVM_IRQCHIP_IOAPIC: | ||
816 | memcpy(ioapic_irqchip(kvm), | ||
817 | &chip->chip.ioapic, | ||
818 | sizeof(struct kvm_ioapic_state)); | ||
819 | break; | ||
820 | default: | ||
821 | r = -EINVAL; | ||
822 | break; | ||
823 | } | ||
824 | return r; | ||
825 | } | ||
826 | |||
827 | #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x | ||
828 | |||
829 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
830 | { | ||
831 | int i; | ||
832 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
833 | int r; | ||
834 | |||
835 | vcpu_load(vcpu); | ||
836 | |||
837 | for (i = 0; i < 16; i++) { | ||
838 | vpd->vgr[i] = regs->vpd.vgr[i]; | ||
839 | vpd->vbgr[i] = regs->vpd.vbgr[i]; | ||
840 | } | ||
841 | for (i = 0; i < 128; i++) | ||
842 | vpd->vcr[i] = regs->vpd.vcr[i]; | ||
843 | vpd->vhpi = regs->vpd.vhpi; | ||
844 | vpd->vnat = regs->vpd.vnat; | ||
845 | vpd->vbnat = regs->vpd.vbnat; | ||
846 | vpd->vpsr = regs->vpd.vpsr; | ||
847 | |||
848 | vpd->vpr = regs->vpd.vpr; | ||
849 | |||
850 | r = -EFAULT; | ||
851 | r = copy_from_user(&vcpu->arch.guest, regs->saved_guest, | ||
852 | sizeof(union context)); | ||
853 | if (r) | ||
854 | goto out; | ||
855 | r = copy_from_user(vcpu + 1, regs->saved_stack + | ||
856 | sizeof(struct kvm_vcpu), | ||
857 | IA64_STK_OFFSET - sizeof(struct kvm_vcpu)); | ||
858 | if (r) | ||
859 | goto out; | ||
860 | vcpu->arch.exit_data = | ||
861 | ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data; | ||
862 | |||
863 | RESTORE_REGS(mp_state); | ||
864 | RESTORE_REGS(vmm_rr); | ||
865 | memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); | ||
866 | memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); | ||
867 | RESTORE_REGS(itr_regions); | ||
868 | RESTORE_REGS(dtr_regions); | ||
869 | RESTORE_REGS(tc_regions); | ||
870 | RESTORE_REGS(irq_check); | ||
871 | RESTORE_REGS(itc_check); | ||
872 | RESTORE_REGS(timer_check); | ||
873 | RESTORE_REGS(timer_pending); | ||
874 | RESTORE_REGS(last_itc); | ||
875 | for (i = 0; i < 8; i++) { | ||
876 | vcpu->arch.vrr[i] = regs->vrr[i]; | ||
877 | vcpu->arch.ibr[i] = regs->ibr[i]; | ||
878 | vcpu->arch.dbr[i] = regs->dbr[i]; | ||
879 | } | ||
880 | for (i = 0; i < 4; i++) | ||
881 | vcpu->arch.insvc[i] = regs->insvc[i]; | ||
882 | RESTORE_REGS(xtp); | ||
883 | RESTORE_REGS(metaphysical_rr0); | ||
884 | RESTORE_REGS(metaphysical_rr4); | ||
885 | RESTORE_REGS(metaphysical_saved_rr0); | ||
886 | RESTORE_REGS(metaphysical_saved_rr4); | ||
887 | RESTORE_REGS(fp_psr); | ||
888 | RESTORE_REGS(saved_gp); | ||
889 | |||
890 | vcpu->arch.irq_new_pending = 1; | ||
891 | vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC); | ||
892 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | ||
893 | |||
894 | vcpu_put(vcpu); | ||
895 | r = 0; | ||
896 | out: | ||
897 | return r; | ||
898 | } | ||
899 | |||
900 | long kvm_arch_vm_ioctl(struct file *filp, | ||
901 | unsigned int ioctl, unsigned long arg) | ||
902 | { | ||
903 | struct kvm *kvm = filp->private_data; | ||
904 | void __user *argp = (void __user *)arg; | ||
905 | int r = -EINVAL; | ||
906 | |||
907 | switch (ioctl) { | ||
908 | case KVM_SET_MEMORY_REGION: { | ||
909 | struct kvm_memory_region kvm_mem; | ||
910 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
911 | |||
912 | r = -EFAULT; | ||
913 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | ||
914 | goto out; | ||
915 | kvm_userspace_mem.slot = kvm_mem.slot; | ||
916 | kvm_userspace_mem.flags = kvm_mem.flags; | ||
917 | kvm_userspace_mem.guest_phys_addr = | ||
918 | kvm_mem.guest_phys_addr; | ||
919 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; | ||
920 | r = kvm_vm_ioctl_set_memory_region(kvm, | ||
921 | &kvm_userspace_mem, 0); | ||
922 | if (r) | ||
923 | goto out; | ||
924 | break; | ||
925 | } | ||
926 | case KVM_CREATE_IRQCHIP: | ||
927 | r = -EFAULT; | ||
928 | r = kvm_ioapic_init(kvm); | ||
929 | if (r) | ||
930 | goto out; | ||
931 | break; | ||
932 | case KVM_IRQ_LINE: { | ||
933 | struct kvm_irq_level irq_event; | ||
934 | |||
935 | r = -EFAULT; | ||
936 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | ||
937 | goto out; | ||
938 | if (irqchip_in_kernel(kvm)) { | ||
939 | mutex_lock(&kvm->lock); | ||
940 | kvm_ioapic_set_irq(kvm->arch.vioapic, | ||
941 | irq_event.irq, | ||
942 | irq_event.level); | ||
943 | mutex_unlock(&kvm->lock); | ||
944 | r = 0; | ||
945 | } | ||
946 | break; | ||
947 | } | ||
948 | case KVM_GET_IRQCHIP: { | ||
949 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
950 | struct kvm_irqchip chip; | ||
951 | |||
952 | r = -EFAULT; | ||
953 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
954 | goto out; | ||
955 | r = -ENXIO; | ||
956 | if (!irqchip_in_kernel(kvm)) | ||
957 | goto out; | ||
958 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | ||
959 | if (r) | ||
960 | goto out; | ||
961 | r = -EFAULT; | ||
962 | if (copy_to_user(argp, &chip, sizeof chip)) | ||
963 | goto out; | ||
964 | r = 0; | ||
965 | break; | ||
966 | } | ||
967 | case KVM_SET_IRQCHIP: { | ||
968 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
969 | struct kvm_irqchip chip; | ||
970 | |||
971 | r = -EFAULT; | ||
972 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
973 | goto out; | ||
974 | r = -ENXIO; | ||
975 | if (!irqchip_in_kernel(kvm)) | ||
976 | goto out; | ||
977 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | ||
978 | if (r) | ||
979 | goto out; | ||
980 | r = 0; | ||
981 | break; | ||
982 | } | ||
983 | default: | ||
984 | ; | ||
985 | } | ||
986 | out: | ||
987 | return r; | ||
988 | } | ||
989 | |||
990 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
991 | struct kvm_sregs *sregs) | ||
992 | { | ||
993 | return -EINVAL; | ||
994 | } | ||
995 | |||
996 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
997 | struct kvm_sregs *sregs) | ||
998 | { | ||
999 | return -EINVAL; | ||
1000 | |||
1001 | } | ||
1002 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
1003 | struct kvm_translation *tr) | ||
1004 | { | ||
1005 | |||
1006 | return -EINVAL; | ||
1007 | } | ||
1008 | |||
1009 | static int kvm_alloc_vmm_area(void) | ||
1010 | { | ||
1011 | if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { | ||
1012 | kvm_vmm_base = __get_free_pages(GFP_KERNEL, | ||
1013 | get_order(KVM_VMM_SIZE)); | ||
1014 | if (!kvm_vmm_base) | ||
1015 | return -ENOMEM; | ||
1016 | |||
1017 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | ||
1018 | kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; | ||
1019 | |||
1020 | printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", | ||
1021 | kvm_vmm_base, kvm_vm_buffer); | ||
1022 | } | ||
1023 | |||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | static void kvm_free_vmm_area(void) | ||
1028 | { | ||
1029 | if (kvm_vmm_base) { | ||
1030 | /*Zero this area before free to avoid bits leak!!*/ | ||
1031 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | ||
1032 | free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); | ||
1033 | kvm_vmm_base = 0; | ||
1034 | kvm_vm_buffer = 0; | ||
1035 | kvm_vsa_base = 0; | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | /* | ||
1040 | * Make sure that a cpu that is being hot-unplugged does not have any vcpus | ||
1041 | * cached on it. Leave it as blank for IA64. | ||
1042 | */ | ||
1043 | void decache_vcpus_on_cpu(int cpu) | ||
1044 | { | ||
1045 | } | ||
1046 | |||
1047 | static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1048 | { | ||
1049 | } | ||
1050 | |||
1051 | static int vti_init_vpd(struct kvm_vcpu *vcpu) | ||
1052 | { | ||
1053 | int i; | ||
1054 | union cpuid3_t cpuid3; | ||
1055 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1056 | |||
1057 | if (IS_ERR(vpd)) | ||
1058 | return PTR_ERR(vpd); | ||
1059 | |||
1060 | /* CPUID init */ | ||
1061 | for (i = 0; i < 5; i++) | ||
1062 | vpd->vcpuid[i] = ia64_get_cpuid(i); | ||
1063 | |||
1064 | /* Limit the CPUID number to 5 */ | ||
1065 | cpuid3.value = vpd->vcpuid[3]; | ||
1066 | cpuid3.number = 4; /* 5 - 1 */ | ||
1067 | vpd->vcpuid[3] = cpuid3.value; | ||
1068 | |||
1069 | /*Set vac and vdc fields*/ | ||
1070 | vpd->vac.a_from_int_cr = 1; | ||
1071 | vpd->vac.a_to_int_cr = 1; | ||
1072 | vpd->vac.a_from_psr = 1; | ||
1073 | vpd->vac.a_from_cpuid = 1; | ||
1074 | vpd->vac.a_cover = 1; | ||
1075 | vpd->vac.a_bsw = 1; | ||
1076 | vpd->vac.a_int = 1; | ||
1077 | vpd->vdc.d_vmsw = 1; | ||
1078 | |||
1079 | /*Set virtual buffer*/ | ||
1080 | vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; | ||
1081 | |||
1082 | return 0; | ||
1083 | } | ||
1084 | |||
1085 | static int vti_create_vp(struct kvm_vcpu *vcpu) | ||
1086 | { | ||
1087 | long ret; | ||
1088 | struct vpd *vpd = vcpu->arch.vpd; | ||
1089 | unsigned long vmm_ivt; | ||
1090 | |||
1091 | vmm_ivt = kvm_vmm_info->vmm_ivt; | ||
1092 | |||
1093 | printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); | ||
1094 | |||
1095 | ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); | ||
1096 | |||
1097 | if (ret) { | ||
1098 | printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); | ||
1099 | return -EINVAL; | ||
1100 | } | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | static void init_ptce_info(struct kvm_vcpu *vcpu) | ||
1105 | { | ||
1106 | ia64_ptce_info_t ptce = {0}; | ||
1107 | |||
1108 | ia64_get_ptce(&ptce); | ||
1109 | vcpu->arch.ptce_base = ptce.base; | ||
1110 | vcpu->arch.ptce_count[0] = ptce.count[0]; | ||
1111 | vcpu->arch.ptce_count[1] = ptce.count[1]; | ||
1112 | vcpu->arch.ptce_stride[0] = ptce.stride[0]; | ||
1113 | vcpu->arch.ptce_stride[1] = ptce.stride[1]; | ||
1114 | } | ||
1115 | |||
1116 | static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) | ||
1117 | { | ||
1118 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | ||
1119 | |||
1120 | if (hrtimer_cancel(p_ht)) | ||
1121 | hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS); | ||
1122 | } | ||
1123 | |||
1124 | static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | ||
1125 | { | ||
1126 | struct kvm_vcpu *vcpu; | ||
1127 | wait_queue_head_t *q; | ||
1128 | |||
1129 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | ||
1130 | if (vcpu->arch.mp_state != VCPU_MP_STATE_HALTED) | ||
1131 | goto out; | ||
1132 | |||
1133 | q = &vcpu->wq; | ||
1134 | if (waitqueue_active(q)) { | ||
1135 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | ||
1136 | wake_up_interruptible(q); | ||
1137 | } | ||
1138 | out: | ||
1139 | vcpu->arch.timer_check = 1; | ||
1140 | return HRTIMER_NORESTART; | ||
1141 | } | ||
1142 | |||
1143 | #define PALE_RESET_ENTRY 0x80000000ffffffb0UL | ||
1144 | |||
1145 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | ||
1146 | { | ||
1147 | struct kvm_vcpu *v; | ||
1148 | int r; | ||
1149 | int i; | ||
1150 | long itc_offset; | ||
1151 | struct kvm *kvm = vcpu->kvm; | ||
1152 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1153 | |||
1154 | union context *p_ctx = &vcpu->arch.guest; | ||
1155 | struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); | ||
1156 | |||
1157 | /*Init vcpu context for first run.*/ | ||
1158 | if (IS_ERR(vmm_vcpu)) | ||
1159 | return PTR_ERR(vmm_vcpu); | ||
1160 | |||
1161 | if (vcpu->vcpu_id == 0) { | ||
1162 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | ||
1163 | |||
1164 | /*Set entry address for first run.*/ | ||
1165 | regs->cr_iip = PALE_RESET_ENTRY; | ||
1166 | |||
1167 | /*Initilize itc offset for vcpus*/ | ||
1168 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); | ||
1169 | for (i = 0; i < MAX_VCPU_NUM; i++) { | ||
1170 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | ||
1171 | v->arch.itc_offset = itc_offset; | ||
1172 | v->arch.last_itc = 0; | ||
1173 | } | ||
1174 | } else | ||
1175 | vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED; | ||
1176 | |||
1177 | r = -ENOMEM; | ||
1178 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); | ||
1179 | if (!vcpu->arch.apic) | ||
1180 | goto out; | ||
1181 | vcpu->arch.apic->vcpu = vcpu; | ||
1182 | |||
1183 | p_ctx->gr[1] = 0; | ||
1184 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); | ||
1185 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; | ||
1186 | p_ctx->psr = 0x1008522000UL; | ||
1187 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | ||
1188 | p_ctx->caller_unat = 0; | ||
1189 | p_ctx->pr = 0x0; | ||
1190 | p_ctx->ar[36] = 0x0; /*unat*/ | ||
1191 | p_ctx->ar[19] = 0x0; /*rnat*/ | ||
1192 | p_ctx->ar[18] = (unsigned long)vmm_vcpu + | ||
1193 | ((sizeof(struct kvm_vcpu)+15) & ~15); | ||
1194 | p_ctx->ar[64] = 0x0; /*pfs*/ | ||
1195 | p_ctx->cr[0] = 0x7e04UL; | ||
1196 | p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; | ||
1197 | p_ctx->cr[8] = 0x3c; | ||
1198 | |||
1199 | /*Initilize region register*/ | ||
1200 | p_ctx->rr[0] = 0x30; | ||
1201 | p_ctx->rr[1] = 0x30; | ||
1202 | p_ctx->rr[2] = 0x30; | ||
1203 | p_ctx->rr[3] = 0x30; | ||
1204 | p_ctx->rr[4] = 0x30; | ||
1205 | p_ctx->rr[5] = 0x30; | ||
1206 | p_ctx->rr[7] = 0x30; | ||
1207 | |||
1208 | /*Initilize branch register 0*/ | ||
1209 | p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; | ||
1210 | |||
1211 | vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; | ||
1212 | vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; | ||
1213 | vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; | ||
1214 | |||
1215 | hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
1216 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | ||
1217 | |||
1218 | vcpu->arch.last_run_cpu = -1; | ||
1219 | vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); | ||
1220 | vcpu->arch.vsa_base = kvm_vsa_base; | ||
1221 | vcpu->arch.__gp = kvm_vmm_gp; | ||
1222 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | ||
1223 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); | ||
1224 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); | ||
1225 | init_ptce_info(vcpu); | ||
1226 | |||
1227 | r = 0; | ||
1228 | out: | ||
1229 | return r; | ||
1230 | } | ||
1231 | |||
1232 | static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) | ||
1233 | { | ||
1234 | unsigned long psr; | ||
1235 | int r; | ||
1236 | |||
1237 | local_irq_save(psr); | ||
1238 | r = kvm_insert_vmm_mapping(vcpu); | ||
1239 | if (r) | ||
1240 | goto fail; | ||
1241 | r = kvm_vcpu_init(vcpu, vcpu->kvm, id); | ||
1242 | if (r) | ||
1243 | goto fail; | ||
1244 | |||
1245 | r = vti_init_vpd(vcpu); | ||
1246 | if (r) { | ||
1247 | printk(KERN_DEBUG"kvm: vpd init error!!\n"); | ||
1248 | goto uninit; | ||
1249 | } | ||
1250 | |||
1251 | r = vti_create_vp(vcpu); | ||
1252 | if (r) | ||
1253 | goto uninit; | ||
1254 | |||
1255 | kvm_purge_vmm_mapping(vcpu); | ||
1256 | local_irq_restore(psr); | ||
1257 | |||
1258 | return 0; | ||
1259 | uninit: | ||
1260 | kvm_vcpu_uninit(vcpu); | ||
1261 | fail: | ||
1262 | return r; | ||
1263 | } | ||
1264 | |||
1265 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | ||
1266 | unsigned int id) | ||
1267 | { | ||
1268 | struct kvm_vcpu *vcpu; | ||
1269 | unsigned long vm_base = kvm->arch.vm_base; | ||
1270 | int r; | ||
1271 | int cpu; | ||
1272 | |||
1273 | r = -ENOMEM; | ||
1274 | if (!vm_base) { | ||
1275 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | ||
1276 | goto fail; | ||
1277 | } | ||
1278 | vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); | ||
1279 | vcpu->kvm = kvm; | ||
1280 | |||
1281 | cpu = get_cpu(); | ||
1282 | vti_vcpu_load(vcpu, cpu); | ||
1283 | r = vti_vcpu_setup(vcpu, id); | ||
1284 | put_cpu(); | ||
1285 | |||
1286 | if (r) { | ||
1287 | printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); | ||
1288 | goto fail; | ||
1289 | } | ||
1290 | |||
1291 | return vcpu; | ||
1292 | fail: | ||
1293 | return ERR_PTR(r); | ||
1294 | } | ||
1295 | |||
1296 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
1297 | { | ||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1302 | { | ||
1303 | return -EINVAL; | ||
1304 | } | ||
1305 | |||
1306 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1307 | { | ||
1308 | return -EINVAL; | ||
1309 | } | ||
1310 | |||
1311 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | ||
1312 | struct kvm_debug_guest *dbg) | ||
1313 | { | ||
1314 | return -EINVAL; | ||
1315 | } | ||
1316 | |||
1317 | static void free_kvm(struct kvm *kvm) | ||
1318 | { | ||
1319 | unsigned long vm_base = kvm->arch.vm_base; | ||
1320 | |||
1321 | if (vm_base) { | ||
1322 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | ||
1323 | free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); | ||
1324 | } | ||
1325 | |||
1326 | } | ||
1327 | |||
1328 | static void kvm_release_vm_pages(struct kvm *kvm) | ||
1329 | { | ||
1330 | struct kvm_memory_slot *memslot; | ||
1331 | int i, j; | ||
1332 | unsigned long base_gfn; | ||
1333 | |||
1334 | for (i = 0; i < kvm->nmemslots; i++) { | ||
1335 | memslot = &kvm->memslots[i]; | ||
1336 | base_gfn = memslot->base_gfn; | ||
1337 | |||
1338 | for (j = 0; j < memslot->npages; j++) { | ||
1339 | if (memslot->rmap[j]) | ||
1340 | put_page((struct page *)memslot->rmap[j]); | ||
1341 | } | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
1346 | { | ||
1347 | kfree(kvm->arch.vioapic); | ||
1348 | kvm_release_vm_pages(kvm); | ||
1349 | kvm_free_physmem(kvm); | ||
1350 | free_kvm(kvm); | ||
1351 | } | ||
1352 | |||
1353 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | ||
1354 | { | ||
1355 | } | ||
1356 | |||
1357 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1358 | { | ||
1359 | if (cpu != vcpu->cpu) { | ||
1360 | vcpu->cpu = cpu; | ||
1361 | if (vcpu->arch.ht_active) | ||
1362 | kvm_migrate_hlt_timer(vcpu); | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | #define SAVE_REGS(_x) regs->_x = vcpu->arch._x | ||
1367 | |||
1368 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
1369 | { | ||
1370 | int i; | ||
1371 | int r; | ||
1372 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1373 | vcpu_load(vcpu); | ||
1374 | |||
1375 | for (i = 0; i < 16; i++) { | ||
1376 | regs->vpd.vgr[i] = vpd->vgr[i]; | ||
1377 | regs->vpd.vbgr[i] = vpd->vbgr[i]; | ||
1378 | } | ||
1379 | for (i = 0; i < 128; i++) | ||
1380 | regs->vpd.vcr[i] = vpd->vcr[i]; | ||
1381 | regs->vpd.vhpi = vpd->vhpi; | ||
1382 | regs->vpd.vnat = vpd->vnat; | ||
1383 | regs->vpd.vbnat = vpd->vbnat; | ||
1384 | regs->vpd.vpsr = vpd->vpsr; | ||
1385 | regs->vpd.vpr = vpd->vpr; | ||
1386 | |||
1387 | r = -EFAULT; | ||
1388 | r = copy_to_user(regs->saved_guest, &vcpu->arch.guest, | ||
1389 | sizeof(union context)); | ||
1390 | if (r) | ||
1391 | goto out; | ||
1392 | r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET); | ||
1393 | if (r) | ||
1394 | goto out; | ||
1395 | SAVE_REGS(mp_state); | ||
1396 | SAVE_REGS(vmm_rr); | ||
1397 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | ||
1398 | memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); | ||
1399 | SAVE_REGS(itr_regions); | ||
1400 | SAVE_REGS(dtr_regions); | ||
1401 | SAVE_REGS(tc_regions); | ||
1402 | SAVE_REGS(irq_check); | ||
1403 | SAVE_REGS(itc_check); | ||
1404 | SAVE_REGS(timer_check); | ||
1405 | SAVE_REGS(timer_pending); | ||
1406 | SAVE_REGS(last_itc); | ||
1407 | for (i = 0; i < 8; i++) { | ||
1408 | regs->vrr[i] = vcpu->arch.vrr[i]; | ||
1409 | regs->ibr[i] = vcpu->arch.ibr[i]; | ||
1410 | regs->dbr[i] = vcpu->arch.dbr[i]; | ||
1411 | } | ||
1412 | for (i = 0; i < 4; i++) | ||
1413 | regs->insvc[i] = vcpu->arch.insvc[i]; | ||
1414 | regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC); | ||
1415 | SAVE_REGS(xtp); | ||
1416 | SAVE_REGS(metaphysical_rr0); | ||
1417 | SAVE_REGS(metaphysical_rr4); | ||
1418 | SAVE_REGS(metaphysical_saved_rr0); | ||
1419 | SAVE_REGS(metaphysical_saved_rr4); | ||
1420 | SAVE_REGS(fp_psr); | ||
1421 | SAVE_REGS(saved_gp); | ||
1422 | vcpu_put(vcpu); | ||
1423 | r = 0; | ||
1424 | out: | ||
1425 | return r; | ||
1426 | } | ||
1427 | |||
1428 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
1429 | { | ||
1430 | |||
1431 | hrtimer_cancel(&vcpu->arch.hlt_timer); | ||
1432 | kfree(vcpu->arch.apic); | ||
1433 | } | ||
1434 | |||
1435 | |||
1436 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
1437 | unsigned int ioctl, unsigned long arg) | ||
1438 | { | ||
1439 | return -EINVAL; | ||
1440 | } | ||
1441 | |||
1442 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
1443 | struct kvm_userspace_memory_region *mem, | ||
1444 | struct kvm_memory_slot old, | ||
1445 | int user_alloc) | ||
1446 | { | ||
1447 | unsigned long i; | ||
1448 | struct page *page; | ||
1449 | int npages = mem->memory_size >> PAGE_SHIFT; | ||
1450 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | ||
1451 | unsigned long base_gfn = memslot->base_gfn; | ||
1452 | |||
1453 | for (i = 0; i < npages; i++) { | ||
1454 | page = gfn_to_page(kvm, base_gfn + i); | ||
1455 | kvm_set_pmt_entry(kvm, base_gfn + i, | ||
1456 | page_to_pfn(page) << PAGE_SHIFT, | ||
1457 | _PAGE_AR_RWX|_PAGE_MA_WB); | ||
1458 | memslot->rmap[i] = (unsigned long)page; | ||
1459 | } | ||
1460 | |||
1461 | return 0; | ||
1462 | } | ||
1463 | |||
1464 | |||
1465 | long kvm_arch_dev_ioctl(struct file *filp, | ||
1466 | unsigned int ioctl, unsigned long arg) | ||
1467 | { | ||
1468 | return -EINVAL; | ||
1469 | } | ||
1470 | |||
1471 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
1472 | { | ||
1473 | kvm_vcpu_uninit(vcpu); | ||
1474 | } | ||
1475 | |||
1476 | static int vti_cpu_has_kvm_support(void) | ||
1477 | { | ||
1478 | long avail = 1, status = 1, control = 1; | ||
1479 | long ret; | ||
1480 | |||
1481 | ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); | ||
1482 | if (ret) | ||
1483 | goto out; | ||
1484 | |||
1485 | if (!(avail & PAL_PROC_VM_BIT)) | ||
1486 | goto out; | ||
1487 | |||
1488 | printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); | ||
1489 | |||
1490 | ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); | ||
1491 | if (ret) | ||
1492 | goto out; | ||
1493 | printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); | ||
1494 | |||
1495 | if (!(vp_env_info & VP_OPCODE)) { | ||
1496 | printk(KERN_WARNING"kvm: No opcode ability on hardware, " | ||
1497 | "vm_env_info:0x%lx\n", vp_env_info); | ||
1498 | } | ||
1499 | |||
1500 | return 1; | ||
1501 | out: | ||
1502 | return 0; | ||
1503 | } | ||
1504 | |||
1505 | static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, | ||
1506 | struct module *module) | ||
1507 | { | ||
1508 | unsigned long module_base; | ||
1509 | unsigned long vmm_size; | ||
1510 | |||
1511 | unsigned long vmm_offset, func_offset, fdesc_offset; | ||
1512 | struct fdesc *p_fdesc; | ||
1513 | |||
1514 | BUG_ON(!module); | ||
1515 | |||
1516 | if (!kvm_vmm_base) { | ||
1517 | printk("kvm: kvm area hasn't been initilized yet!!\n"); | ||
1518 | return -EFAULT; | ||
1519 | } | ||
1520 | |||
1521 | /*Calculate new position of relocated vmm module.*/ | ||
1522 | module_base = (unsigned long)module->module_core; | ||
1523 | vmm_size = module->core_size; | ||
1524 | if (unlikely(vmm_size > KVM_VMM_SIZE)) | ||
1525 | return -EFAULT; | ||
1526 | |||
1527 | memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); | ||
1528 | kvm_flush_icache(kvm_vmm_base, vmm_size); | ||
1529 | |||
1530 | /*Recalculate kvm_vmm_info based on new VMM*/ | ||
1531 | vmm_offset = vmm_info->vmm_ivt - module_base; | ||
1532 | kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; | ||
1533 | printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", | ||
1534 | kvm_vmm_info->vmm_ivt); | ||
1535 | |||
1536 | fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; | ||
1537 | kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + | ||
1538 | fdesc_offset); | ||
1539 | func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; | ||
1540 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | ||
1541 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | ||
1542 | p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); | ||
1543 | |||
1544 | printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", | ||
1545 | KVM_VMM_BASE+func_offset); | ||
1546 | |||
1547 | fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; | ||
1548 | kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + | ||
1549 | fdesc_offset); | ||
1550 | func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; | ||
1551 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | ||
1552 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | ||
1553 | p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); | ||
1554 | |||
1555 | kvm_vmm_gp = p_fdesc->gp; | ||
1556 | |||
1557 | printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", | ||
1558 | kvm_vmm_info->vmm_entry); | ||
1559 | printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", | ||
1560 | KVM_VMM_BASE + func_offset); | ||
1561 | |||
1562 | return 0; | ||
1563 | } | ||
1564 | |||
1565 | int kvm_arch_init(void *opaque) | ||
1566 | { | ||
1567 | int r; | ||
1568 | struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; | ||
1569 | |||
1570 | if (!vti_cpu_has_kvm_support()) { | ||
1571 | printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); | ||
1572 | r = -EOPNOTSUPP; | ||
1573 | goto out; | ||
1574 | } | ||
1575 | |||
1576 | if (kvm_vmm_info) { | ||
1577 | printk(KERN_ERR "kvm: Already loaded VMM module!\n"); | ||
1578 | r = -EEXIST; | ||
1579 | goto out; | ||
1580 | } | ||
1581 | |||
1582 | r = -ENOMEM; | ||
1583 | kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); | ||
1584 | if (!kvm_vmm_info) | ||
1585 | goto out; | ||
1586 | |||
1587 | if (kvm_alloc_vmm_area()) | ||
1588 | goto out_free0; | ||
1589 | |||
1590 | r = kvm_relocate_vmm(vmm_info, vmm_info->module); | ||
1591 | if (r) | ||
1592 | goto out_free1; | ||
1593 | |||
1594 | return 0; | ||
1595 | |||
1596 | out_free1: | ||
1597 | kvm_free_vmm_area(); | ||
1598 | out_free0: | ||
1599 | kfree(kvm_vmm_info); | ||
1600 | out: | ||
1601 | return r; | ||
1602 | } | ||
1603 | |||
1604 | void kvm_arch_exit(void) | ||
1605 | { | ||
1606 | kvm_free_vmm_area(); | ||
1607 | kfree(kvm_vmm_info); | ||
1608 | kvm_vmm_info = NULL; | ||
1609 | } | ||
1610 | |||
1611 | static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | ||
1612 | struct kvm_dirty_log *log) | ||
1613 | { | ||
1614 | struct kvm_memory_slot *memslot; | ||
1615 | int r, i; | ||
1616 | long n, base; | ||
1617 | unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS | ||
1618 | + KVM_MEM_DIRTY_LOG_OFS); | ||
1619 | |||
1620 | r = -EINVAL; | ||
1621 | if (log->slot >= KVM_MEMORY_SLOTS) | ||
1622 | goto out; | ||
1623 | |||
1624 | memslot = &kvm->memslots[log->slot]; | ||
1625 | r = -ENOENT; | ||
1626 | if (!memslot->dirty_bitmap) | ||
1627 | goto out; | ||
1628 | |||
1629 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
1630 | base = memslot->base_gfn / BITS_PER_LONG; | ||
1631 | |||
1632 | for (i = 0; i < n/sizeof(long); ++i) { | ||
1633 | memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; | ||
1634 | dirty_bitmap[base + i] = 0; | ||
1635 | } | ||
1636 | r = 0; | ||
1637 | out: | ||
1638 | return r; | ||
1639 | } | ||
1640 | |||
1641 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
1642 | struct kvm_dirty_log *log) | ||
1643 | { | ||
1644 | int r; | ||
1645 | int n; | ||
1646 | struct kvm_memory_slot *memslot; | ||
1647 | int is_dirty = 0; | ||
1648 | |||
1649 | spin_lock(&kvm->arch.dirty_log_lock); | ||
1650 | |||
1651 | r = kvm_ia64_sync_dirty_log(kvm, log); | ||
1652 | if (r) | ||
1653 | goto out; | ||
1654 | |||
1655 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
1656 | if (r) | ||
1657 | goto out; | ||
1658 | |||
1659 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
1660 | if (is_dirty) { | ||
1661 | kvm_flush_remote_tlbs(kvm); | ||
1662 | memslot = &kvm->memslots[log->slot]; | ||
1663 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
1664 | memset(memslot->dirty_bitmap, 0, n); | ||
1665 | } | ||
1666 | r = 0; | ||
1667 | out: | ||
1668 | spin_unlock(&kvm->arch.dirty_log_lock); | ||
1669 | return r; | ||
1670 | } | ||
1671 | |||
1672 | int kvm_arch_hardware_setup(void) | ||
1673 | { | ||
1674 | return 0; | ||
1675 | } | ||
1676 | |||
1677 | void kvm_arch_hardware_unsetup(void) | ||
1678 | { | ||
1679 | } | ||
1680 | |||
1681 | static void vcpu_kick_intr(void *info) | ||
1682 | { | ||
1683 | #ifdef DEBUG | ||
1684 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info; | ||
1685 | printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu); | ||
1686 | #endif | ||
1687 | } | ||
1688 | |||
1689 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | ||
1690 | { | ||
1691 | int ipi_pcpu = vcpu->cpu; | ||
1692 | |||
1693 | if (waitqueue_active(&vcpu->wq)) | ||
1694 | wake_up_interruptible(&vcpu->wq); | ||
1695 | |||
1696 | if (vcpu->guest_mode) | ||
1697 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); | ||
1698 | } | ||
1699 | |||
1700 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | ||
1701 | { | ||
1702 | |||
1703 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1704 | |||
1705 | if (!test_and_set_bit(vec, &vpd->irr[0])) { | ||
1706 | vcpu->arch.irq_new_pending = 1; | ||
1707 | if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) | ||
1708 | kvm_vcpu_kick(vcpu); | ||
1709 | else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { | ||
1710 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | ||
1711 | if (waitqueue_active(&vcpu->wq)) | ||
1712 | wake_up_interruptible(&vcpu->wq); | ||
1713 | } | ||
1714 | return 1; | ||
1715 | } | ||
1716 | return 0; | ||
1717 | } | ||
1718 | |||
1719 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | ||
1720 | { | ||
1721 | return apic->vcpu->vcpu_id == dest; | ||
1722 | } | ||
1723 | |||
1724 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | ||
1725 | { | ||
1726 | return 0; | ||
1727 | } | ||
1728 | |||
1729 | struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | ||
1730 | unsigned long bitmap) | ||
1731 | { | ||
1732 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; | ||
1733 | int i; | ||
1734 | |||
1735 | for (i = 1; i < KVM_MAX_VCPUS; i++) { | ||
1736 | if (!kvm->vcpus[i]) | ||
1737 | continue; | ||
1738 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) | ||
1739 | lvcpu = kvm->vcpus[i]; | ||
1740 | } | ||
1741 | |||
1742 | return lvcpu; | ||
1743 | } | ||
1744 | |||
1745 | static int find_highest_bits(int *dat) | ||
1746 | { | ||
1747 | u32 bits, bitnum; | ||
1748 | int i; | ||
1749 | |||
1750 | /* loop for all 256 bits */ | ||
1751 | for (i = 7; i >= 0 ; i--) { | ||
1752 | bits = dat[i]; | ||
1753 | if (bits) { | ||
1754 | bitnum = fls(bits); | ||
1755 | return i * 32 + bitnum - 1; | ||
1756 | } | ||
1757 | } | ||
1758 | |||
1759 | return -1; | ||
1760 | } | ||
1761 | |||
1762 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) | ||
1763 | { | ||
1764 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1765 | |||
1766 | if (vpd->irr[0] & (1UL << NMI_VECTOR)) | ||
1767 | return NMI_VECTOR; | ||
1768 | if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) | ||
1769 | return ExtINT_VECTOR; | ||
1770 | |||
1771 | return find_highest_bits((int *)&vpd->irr[0]); | ||
1772 | } | ||
1773 | |||
1774 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | ||
1775 | { | ||
1776 | if (kvm_highest_pending_irq(vcpu) != -1) | ||
1777 | return 1; | ||
1778 | return 0; | ||
1779 | } | ||
1780 | |||
1781 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
1782 | { | ||
1783 | return gfn; | ||
1784 | } | ||
1785 | |||
1786 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | ||
1787 | { | ||
1788 | return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE; | ||
1789 | } | ||