diff options
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r-- | arch/ia64/kvm/Kconfig | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/irq.h | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 125 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_fw.c | 151 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 71 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.c | 44 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 44 |
8 files changed, 342 insertions, 103 deletions
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index f833a0b4188d..0a2d6b86075a 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig | |||
@@ -4,6 +4,10 @@ | |||
4 | config HAVE_KVM | 4 | config HAVE_KVM |
5 | bool | 5 | bool |
6 | 6 | ||
7 | config HAVE_KVM_IRQCHIP | ||
8 | bool | ||
9 | default y | ||
10 | |||
7 | menuconfig VIRTUALIZATION | 11 | menuconfig VIRTUALIZATION |
8 | bool "Virtualization" | 12 | bool "Virtualization" |
9 | depends on HAVE_KVM || IA64 | 13 | depends on HAVE_KVM || IA64 |
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h index c6786e8b1bf4..c0785a728271 100644 --- a/arch/ia64/kvm/irq.h +++ b/arch/ia64/kvm/irq.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #ifndef __IRQ_H | 23 | #ifndef __IRQ_H |
24 | #define __IRQ_H | 24 | #define __IRQ_H |
25 | 25 | ||
26 | #include "lapic.h" | ||
27 | |||
26 | static inline int irqchip_in_kernel(struct kvm *kvm) | 28 | static inline int irqchip_in_kernel(struct kvm *kvm) |
27 | { | 29 | { |
28 | return 1; | 30 | return 1; |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 28f982045f29..076b00d1dbff 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
182 | switch (ext) { | 182 | switch (ext) { |
183 | case KVM_CAP_IRQCHIP: | 183 | case KVM_CAP_IRQCHIP: |
184 | case KVM_CAP_MP_STATE: | 184 | case KVM_CAP_MP_STATE: |
185 | 185 | case KVM_CAP_IRQ_INJECT_STATUS: | |
186 | r = 1; | 186 | r = 1; |
187 | break; | 187 | break; |
188 | case KVM_CAP_COALESCED_MMIO: | 188 | case KVM_CAP_COALESCED_MMIO: |
@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | |||
314 | union ia64_lid lid; | 314 | union ia64_lid lid; |
315 | int i; | 315 | int i; |
316 | 316 | ||
317 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 317 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
318 | if (kvm->vcpus[i]) { | 318 | if (kvm->vcpus[i]) { |
319 | lid.val = VCPU_LID(kvm->vcpus[i]); | 319 | lid.val = VCPU_LID(kvm->vcpus[i]); |
320 | if (lid.id == id && lid.eid == eid) | 320 | if (lid.id == id && lid.eid == eid) |
@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
388 | 388 | ||
389 | call_data.ptc_g_data = p->u.ptc_g_data; | 389 | call_data.ptc_g_data = p->u.ptc_g_data; |
390 | 390 | ||
391 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 391 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
392 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | 392 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == |
393 | KVM_MP_STATE_UNINITIALIZED || | 393 | KVM_MP_STATE_UNINITIALIZED || |
394 | vcpu == kvm->vcpus[i]) | 394 | vcpu == kvm->vcpus[i]) |
@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void) | |||
788 | return ERR_PTR(-ENOMEM); | 788 | return ERR_PTR(-ENOMEM); |
789 | kvm_init_vm(kvm); | 789 | kvm_init_vm(kvm); |
790 | 790 | ||
791 | kvm->arch.online_vcpus = 0; | ||
792 | |||
791 | return kvm; | 793 | return kvm; |
792 | 794 | ||
793 | } | 795 | } |
@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
919 | r = kvm_ioapic_init(kvm); | 921 | r = kvm_ioapic_init(kvm); |
920 | if (r) | 922 | if (r) |
921 | goto out; | 923 | goto out; |
924 | r = kvm_setup_default_irq_routing(kvm); | ||
925 | if (r) { | ||
926 | kfree(kvm->arch.vioapic); | ||
927 | goto out; | ||
928 | } | ||
922 | break; | 929 | break; |
930 | case KVM_IRQ_LINE_STATUS: | ||
923 | case KVM_IRQ_LINE: { | 931 | case KVM_IRQ_LINE: { |
924 | struct kvm_irq_level irq_event; | 932 | struct kvm_irq_level irq_event; |
925 | 933 | ||
@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
927 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | 935 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) |
928 | goto out; | 936 | goto out; |
929 | if (irqchip_in_kernel(kvm)) { | 937 | if (irqchip_in_kernel(kvm)) { |
938 | __s32 status; | ||
930 | mutex_lock(&kvm->lock); | 939 | mutex_lock(&kvm->lock); |
931 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 940 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
932 | irq_event.irq, irq_event.level); | 941 | irq_event.irq, irq_event.level); |
933 | mutex_unlock(&kvm->lock); | 942 | mutex_unlock(&kvm->lock); |
943 | if (ioctl == KVM_IRQ_LINE_STATUS) { | ||
944 | irq_event.status = status; | ||
945 | if (copy_to_user(argp, &irq_event, | ||
946 | sizeof irq_event)) | ||
947 | goto out; | ||
948 | } | ||
934 | r = 0; | 949 | r = 0; |
935 | } | 950 | } |
936 | break; | 951 | break; |
@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1149 | 1164 | ||
1150 | /*Initialize itc offset for vcpus*/ | 1165 | /*Initialize itc offset for vcpus*/ |
1151 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); | 1166 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); |
1152 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 1167 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
1153 | v = (struct kvm_vcpu *)((char *)vcpu + | 1168 | v = (struct kvm_vcpu *)((char *)vcpu + |
1154 | sizeof(struct kvm_vcpu_data) * i); | 1169 | sizeof(struct kvm_vcpu_data) * i); |
1155 | v->arch.itc_offset = itc_offset; | 1170 | v->arch.itc_offset = itc_offset; |
@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1283 | goto fail; | 1298 | goto fail; |
1284 | } | 1299 | } |
1285 | 1300 | ||
1301 | kvm->arch.online_vcpus++; | ||
1302 | |||
1286 | return vcpu; | 1303 | return vcpu; |
1287 | fail: | 1304 | fail: |
1288 | return ERR_PTR(r); | 1305 | return ERR_PTR(r); |
@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |||
1303 | return -EINVAL; | 1320 | return -EINVAL; |
1304 | } | 1321 | } |
1305 | 1322 | ||
1306 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 1323 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1307 | struct kvm_debug_guest *dbg) | 1324 | struct kvm_guest_debug *dbg) |
1308 | { | 1325 | { |
1309 | return -EINVAL; | 1326 | return -EINVAL; |
1310 | } | 1327 | } |
@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1421 | return 0; | 1438 | return 0; |
1422 | } | 1439 | } |
1423 | 1440 | ||
1441 | int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, | ||
1442 | struct kvm_ia64_vcpu_stack *stack) | ||
1443 | { | ||
1444 | memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); | ||
1445 | return 0; | ||
1446 | } | ||
1447 | |||
1448 | int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, | ||
1449 | struct kvm_ia64_vcpu_stack *stack) | ||
1450 | { | ||
1451 | memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), | ||
1452 | sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); | ||
1453 | |||
1454 | vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; | ||
1455 | return 0; | ||
1456 | } | ||
1457 | |||
1424 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1458 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
1425 | { | 1459 | { |
1426 | 1460 | ||
@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
1430 | 1464 | ||
1431 | 1465 | ||
1432 | long kvm_arch_vcpu_ioctl(struct file *filp, | 1466 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1433 | unsigned int ioctl, unsigned long arg) | 1467 | unsigned int ioctl, unsigned long arg) |
1434 | { | 1468 | { |
1435 | return -EINVAL; | 1469 | struct kvm_vcpu *vcpu = filp->private_data; |
1470 | void __user *argp = (void __user *)arg; | ||
1471 | struct kvm_ia64_vcpu_stack *stack = NULL; | ||
1472 | long r; | ||
1473 | |||
1474 | switch (ioctl) { | ||
1475 | case KVM_IA64_VCPU_GET_STACK: { | ||
1476 | struct kvm_ia64_vcpu_stack __user *user_stack; | ||
1477 | void __user *first_p = argp; | ||
1478 | |||
1479 | r = -EFAULT; | ||
1480 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | ||
1481 | goto out; | ||
1482 | |||
1483 | if (!access_ok(VERIFY_WRITE, user_stack, | ||
1484 | sizeof(struct kvm_ia64_vcpu_stack))) { | ||
1485 | printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " | ||
1486 | "Illegal user destination address for stack\n"); | ||
1487 | goto out; | ||
1488 | } | ||
1489 | stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | ||
1490 | if (!stack) { | ||
1491 | r = -ENOMEM; | ||
1492 | goto out; | ||
1493 | } | ||
1494 | |||
1495 | r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); | ||
1496 | if (r) | ||
1497 | goto out; | ||
1498 | |||
1499 | if (copy_to_user(user_stack, stack, | ||
1500 | sizeof(struct kvm_ia64_vcpu_stack))) | ||
1501 | goto out; | ||
1502 | |||
1503 | break; | ||
1504 | } | ||
1505 | case KVM_IA64_VCPU_SET_STACK: { | ||
1506 | struct kvm_ia64_vcpu_stack __user *user_stack; | ||
1507 | void __user *first_p = argp; | ||
1508 | |||
1509 | r = -EFAULT; | ||
1510 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | ||
1511 | goto out; | ||
1512 | |||
1513 | if (!access_ok(VERIFY_READ, user_stack, | ||
1514 | sizeof(struct kvm_ia64_vcpu_stack))) { | ||
1515 | printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " | ||
1516 | "Illegal user address for stack\n"); | ||
1517 | goto out; | ||
1518 | } | ||
1519 | stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | ||
1520 | if (!stack) { | ||
1521 | r = -ENOMEM; | ||
1522 | goto out; | ||
1523 | } | ||
1524 | if (copy_from_user(stack, user_stack, | ||
1525 | sizeof(struct kvm_ia64_vcpu_stack))) | ||
1526 | goto out; | ||
1527 | |||
1528 | r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); | ||
1529 | break; | ||
1530 | } | ||
1531 | |||
1532 | default: | ||
1533 | r = -EINVAL; | ||
1534 | } | ||
1535 | |||
1536 | out: | ||
1537 | kfree(stack); | ||
1538 | return r; | ||
1436 | } | 1539 | } |
1437 | 1540 | ||
1438 | int kvm_arch_set_memory_region(struct kvm *kvm, | 1541 | int kvm_arch_set_memory_region(struct kvm *kvm, |
@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
1472 | } | 1575 | } |
1473 | 1576 | ||
1474 | long kvm_arch_dev_ioctl(struct file *filp, | 1577 | long kvm_arch_dev_ioctl(struct file *filp, |
1475 | unsigned int ioctl, unsigned long arg) | 1578 | unsigned int ioctl, unsigned long arg) |
1476 | { | 1579 | { |
1477 | return -EINVAL; | 1580 | return -EINVAL; |
1478 | } | 1581 | } |
@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | |||
1737 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; | 1840 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; |
1738 | int i; | 1841 | int i; |
1739 | 1842 | ||
1740 | for (i = 1; i < KVM_MAX_VCPUS; i++) { | 1843 | for (i = 1; i < kvm->arch.online_vcpus; i++) { |
1741 | if (!kvm->vcpus[i]) | 1844 | if (!kvm->vcpus[i]) |
1742 | continue; | 1845 | continue; |
1743 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) | 1846 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) |
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index cb7600bdff9d..a8ae52ed5635 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c | |||
@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) | |||
227 | return result; | 227 | return result; |
228 | } | 228 | } |
229 | 229 | ||
230 | static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu) | ||
231 | { | ||
232 | |||
233 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
234 | long in0, in1, in2, in3; | ||
235 | |||
236 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
237 | result.status = ia64_pal_register_info(in1, &result.v1, &result.v2); | ||
238 | |||
239 | return result; | ||
240 | } | ||
241 | |||
230 | static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) | 242 | static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) |
231 | { | 243 | { |
232 | 244 | ||
@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) | |||
268 | static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) | 280 | static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) |
269 | { | 281 | { |
270 | struct ia64_pal_retval result; | 282 | struct ia64_pal_retval result; |
283 | unsigned long in0, in1, in2, in3; | ||
271 | 284 | ||
272 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | 285 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); |
286 | |||
287 | result.status = ia64_pal_vm_info(in1, in2, | ||
288 | (pal_tc_info_u_t *)&result.v1, &result.v2); | ||
273 | 289 | ||
274 | return result; | 290 | return result; |
275 | } | 291 | } |
@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu) | |||
292 | vcpu->arch.timer_fired = 0; | 308 | vcpu->arch.timer_fired = 0; |
293 | } | 309 | } |
294 | 310 | ||
311 | static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu) | ||
312 | { | ||
313 | long status; | ||
314 | unsigned long in0, in1, in2, in3, r9; | ||
315 | unsigned long pm_buffer[16]; | ||
316 | |||
317 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
318 | status = ia64_pal_perf_mon_info(pm_buffer, | ||
319 | (pal_perf_mon_info_u_t *) &r9); | ||
320 | if (status != 0) { | ||
321 | printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status); | ||
322 | } else { | ||
323 | if (in1) | ||
324 | memcpy((void *)in1, pm_buffer, sizeof(pm_buffer)); | ||
325 | else { | ||
326 | status = PAL_STATUS_EINVAL; | ||
327 | printk(KERN_WARNING"Invalid parameters " | ||
328 | "for PAL call:0x%lx!\n", in0); | ||
329 | } | ||
330 | } | ||
331 | return (struct ia64_pal_retval){status, r9, 0, 0}; | ||
332 | } | ||
333 | |||
334 | static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu) | ||
335 | { | ||
336 | unsigned long in0, in1, in2, in3; | ||
337 | long status; | ||
338 | unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) | ||
339 | | (1UL << 61) | (1UL << 60); | ||
340 | |||
341 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
342 | if (in1) { | ||
343 | memcpy((void *)in1, &res, sizeof(res)); | ||
344 | status = 0; | ||
345 | } else{ | ||
346 | status = PAL_STATUS_EINVAL; | ||
347 | printk(KERN_WARNING"Invalid parameters " | ||
348 | "for PAL call:0x%lx!\n", in0); | ||
349 | } | ||
350 | |||
351 | return (struct ia64_pal_retval){status, 0, 0, 0}; | ||
352 | } | ||
353 | |||
354 | static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu) | ||
355 | { | ||
356 | unsigned long r9; | ||
357 | long status; | ||
358 | |||
359 | status = ia64_pal_mem_attrib(&r9); | ||
360 | |||
361 | return (struct ia64_pal_retval){status, r9, 0, 0}; | ||
362 | } | ||
363 | |||
364 | static void remote_pal_prefetch_visibility(void *v) | ||
365 | { | ||
366 | s64 trans_type = (s64)v; | ||
367 | ia64_pal_prefetch_visibility(trans_type); | ||
368 | } | ||
369 | |||
370 | static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu) | ||
371 | { | ||
372 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
373 | unsigned long in0, in1, in2, in3; | ||
374 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
375 | result.status = ia64_pal_prefetch_visibility(in1); | ||
376 | if (result.status == 0) { | ||
377 | /* Must be performed on all remote processors | ||
378 | in the coherence domain. */ | ||
379 | smp_call_function(remote_pal_prefetch_visibility, | ||
380 | (void *)in1, 1); | ||
381 | /* Unnecessary on remote processor for other vcpus!*/ | ||
382 | result.status = 1; | ||
383 | } | ||
384 | return result; | ||
385 | } | ||
386 | |||
387 | static void remote_pal_mc_drain(void *v) | ||
388 | { | ||
389 | ia64_pal_mc_drain(); | ||
390 | } | ||
391 | |||
392 | static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu) | ||
393 | { | ||
394 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
395 | unsigned long in0, in1, in2, in3; | ||
396 | |||
397 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
398 | |||
399 | if (in1 == 0 && in2) { | ||
400 | char brand_info[128]; | ||
401 | result.status = ia64_pal_get_brand_info(brand_info); | ||
402 | if (result.status == PAL_STATUS_SUCCESS) | ||
403 | memcpy((void *)in2, brand_info, 128); | ||
404 | } else { | ||
405 | result.status = PAL_STATUS_REQUIRES_MEMORY; | ||
406 | printk(KERN_WARNING"Invalid parameters for " | ||
407 | "PAL call:0x%lx!\n", in0); | ||
408 | } | ||
409 | |||
410 | return result; | ||
411 | } | ||
412 | |||
295 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | 413 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) |
296 | { | 414 | { |
297 | 415 | ||
@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
300 | int ret = 1; | 418 | int ret = 1; |
301 | 419 | ||
302 | gr28 = kvm_get_pal_call_index(vcpu); | 420 | gr28 = kvm_get_pal_call_index(vcpu); |
303 | /*printk("pal_call index:%lx\n",gr28);*/ | ||
304 | switch (gr28) { | 421 | switch (gr28) { |
305 | case PAL_CACHE_FLUSH: | 422 | case PAL_CACHE_FLUSH: |
306 | result = pal_cache_flush(vcpu); | 423 | result = pal_cache_flush(vcpu); |
307 | break; | 424 | break; |
425 | case PAL_MEM_ATTRIB: | ||
426 | result = pal_mem_attrib(vcpu); | ||
427 | break; | ||
308 | case PAL_CACHE_SUMMARY: | 428 | case PAL_CACHE_SUMMARY: |
309 | result = pal_cache_summary(vcpu); | 429 | result = pal_cache_summary(vcpu); |
310 | break; | 430 | break; |
431 | case PAL_PERF_MON_INFO: | ||
432 | result = pal_perf_mon_info(vcpu); | ||
433 | break; | ||
434 | case PAL_HALT_INFO: | ||
435 | result = pal_halt_info(vcpu); | ||
436 | break; | ||
311 | case PAL_HALT_LIGHT: | 437 | case PAL_HALT_LIGHT: |
312 | { | 438 | { |
313 | INIT_PAL_STATUS_SUCCESS(result); | 439 | INIT_PAL_STATUS_SUCCESS(result); |
@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
317 | } | 443 | } |
318 | break; | 444 | break; |
319 | 445 | ||
446 | case PAL_PREFETCH_VISIBILITY: | ||
447 | result = pal_prefetch_visibility(vcpu); | ||
448 | break; | ||
449 | case PAL_MC_DRAIN: | ||
450 | result.status = ia64_pal_mc_drain(); | ||
451 | /* FIXME: All vcpus likely call PAL_MC_DRAIN. | ||
452 | That causes the congestion. */ | ||
453 | smp_call_function(remote_pal_mc_drain, NULL, 1); | ||
454 | break; | ||
455 | |||
320 | case PAL_FREQ_RATIOS: | 456 | case PAL_FREQ_RATIOS: |
321 | result = pal_freq_ratios(vcpu); | 457 | result = pal_freq_ratios(vcpu); |
322 | break; | 458 | break; |
@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
346 | INIT_PAL_STATUS_SUCCESS(result); | 482 | INIT_PAL_STATUS_SUCCESS(result); |
347 | result.v1 = (1L << 32) | 1L; | 483 | result.v1 = (1L << 32) | 1L; |
348 | break; | 484 | break; |
485 | case PAL_REGISTER_INFO: | ||
486 | result = pal_register_info(vcpu); | ||
487 | break; | ||
349 | case PAL_VM_PAGE_SIZE: | 488 | case PAL_VM_PAGE_SIZE: |
350 | result.status = ia64_pal_vm_page_size(&result.v0, | 489 | result.status = ia64_pal_vm_page_size(&result.v0, |
351 | &result.v1); | 490 | &result.v1); |
@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
365 | result.status = ia64_pal_version( | 504 | result.status = ia64_pal_version( |
366 | (pal_version_u_t *)&result.v0, | 505 | (pal_version_u_t *)&result.v0, |
367 | (pal_version_u_t *)&result.v1); | 506 | (pal_version_u_t *)&result.v1); |
368 | |||
369 | break; | 507 | break; |
370 | case PAL_FIXED_ADDR: | 508 | case PAL_FIXED_ADDR: |
371 | result.status = PAL_STATUS_SUCCESS; | 509 | result.status = PAL_STATUS_SUCCESS; |
372 | result.v0 = vcpu->vcpu_id; | 510 | result.v0 = vcpu->vcpu_id; |
373 | break; | 511 | break; |
512 | case PAL_BRAND_INFO: | ||
513 | result = pal_get_brand_info(vcpu); | ||
514 | break; | ||
515 | case PAL_GET_PSTATE: | ||
516 | case PAL_CACHE_SHARED_INFO: | ||
517 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
518 | break; | ||
374 | default: | 519 | default: |
375 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | 520 | INIT_PAL_STATUS_UNIMPLEMENTED(result); |
376 | printk(KERN_WARNING"kvm: Unsupported pal call," | 521 | printk(KERN_WARNING"kvm: Unsupported pal call," |
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 230eae482f32..b1dc80952d91 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) | |||
167 | return (rr1.val); | 167 | return (rr1.val); |
168 | } | 168 | } |
169 | 169 | ||
170 | |||
171 | /* | 170 | /* |
172 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 | 171 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 |
173 | * Parameter: | 172 | * Parameter: |
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
222 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); | 221 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); |
223 | } | 222 | } |
224 | 223 | ||
225 | |||
226 | |||
227 | /* | 224 | /* |
228 | * Data Nested TLB Fault | 225 | * Data Nested TLB Fault |
229 | * @ Data Nested TLB Vector | 226 | * @ Data Nested TLB Vector |
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) | |||
245 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); | 242 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); |
246 | } | 243 | } |
247 | 244 | ||
248 | |||
249 | /* | 245 | /* |
250 | * Data TLB Fault | 246 | * Data TLB Fault |
251 | * @ Data TLB vector | 247 | * @ Data TLB vector |
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
265 | /* If vPSR.ic, IFA, ITIR, IHA*/ | 261 | /* If vPSR.ic, IFA, ITIR, IHA*/ |
266 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | 262 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); |
267 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); | 263 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); |
268 | |||
269 | |||
270 | } | 264 | } |
271 | 265 | ||
272 | /* | 266 | /* |
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
279 | _vhpt_fault(vcpu, vadr); | 273 | _vhpt_fault(vcpu, vadr); |
280 | } | 274 | } |
281 | 275 | ||
282 | |||
283 | /* | 276 | /* |
284 | * VHPT Data Fault | 277 | * VHPT Data Fault |
285 | * @ VHPT Translation vector | 278 | * @ VHPT Translation vector |
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
290 | _vhpt_fault(vcpu, vadr); | 283 | _vhpt_fault(vcpu, vadr); |
291 | } | 284 | } |
292 | 285 | ||
293 | |||
294 | |||
295 | /* | 286 | /* |
296 | * Deal with: | 287 | * Deal with: |
297 | * General Exception vector | 288 | * General Exception vector |
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu) | |||
301 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); | 292 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); |
302 | } | 293 | } |
303 | 294 | ||
304 | |||
305 | /* | 295 | /* |
306 | * Illegal Operation Fault | 296 | * Illegal Operation Fault |
307 | * @ General Exception Vector | 297 | * @ General Exception Vector |
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | |||
419 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); | 409 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); |
420 | } | 410 | } |
421 | 411 | ||
422 | |||
423 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | 412 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) |
424 | { | 413 | { |
425 | __page_not_present(vcpu, vadr); | 414 | __page_not_present(vcpu, vadr); |
426 | } | 415 | } |
427 | 416 | ||
428 | |||
429 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | 417 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) |
430 | { | 418 | { |
431 | __page_not_present(vcpu, vadr); | 419 | __page_not_present(vcpu, vadr); |
432 | } | 420 | } |
433 | 421 | ||
434 | |||
435 | /* Deal with | 422 | /* Deal with |
436 | * Data access rights vector | 423 | * Data access rights vector |
437 | */ | 424 | */ |
@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, | |||
563 | inject_guest_interruption(vcpu, vector); | 550 | inject_guest_interruption(vcpu, vector); |
564 | } | 551 | } |
565 | 552 | ||
553 | static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, | ||
554 | unsigned long arg) | ||
555 | { | ||
556 | struct thash_data *data; | ||
557 | unsigned long gpa, poff; | ||
558 | |||
559 | if (!is_physical_mode(vcpu)) { | ||
560 | /* Depends on caller to provide the DTR or DTC mapping.*/ | ||
561 | data = vtlb_lookup(vcpu, arg, D_TLB); | ||
562 | if (data) | ||
563 | gpa = data->page_flags & _PAGE_PPN_MASK; | ||
564 | else { | ||
565 | data = vhpt_lookup(arg); | ||
566 | if (!data) | ||
567 | return 0; | ||
568 | gpa = data->gpaddr & _PAGE_PPN_MASK; | ||
569 | } | ||
570 | |||
571 | poff = arg & (PSIZE(data->ps) - 1); | ||
572 | arg = PAGEALIGN(gpa, data->ps) | poff; | ||
573 | } | ||
574 | arg = kvm_gpa_to_mpa(arg << 1 >> 1); | ||
575 | |||
576 | return (unsigned long)__va(arg); | ||
577 | } | ||
578 | |||
566 | static void set_pal_call_data(struct kvm_vcpu *vcpu) | 579 | static void set_pal_call_data(struct kvm_vcpu *vcpu) |
567 | { | 580 | { |
568 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 581 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
582 | unsigned long gr28 = vcpu_get_gr(vcpu, 28); | ||
583 | unsigned long gr29 = vcpu_get_gr(vcpu, 29); | ||
584 | unsigned long gr30 = vcpu_get_gr(vcpu, 30); | ||
569 | 585 | ||
570 | /*FIXME:For static and stacked convention, firmware | 586 | /*FIXME:For static and stacked convention, firmware |
571 | * has put the parameters in gr28-gr31 before | 587 | * has put the parameters in gr28-gr31 before |
572 | * break to vmm !!*/ | 588 | * break to vmm !!*/ |
573 | 589 | ||
574 | p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); | 590 | switch (gr28) { |
575 | p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); | 591 | case PAL_PERF_MON_INFO: |
576 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | 592 | case PAL_HALT_INFO: |
593 | p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); | ||
594 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
595 | break; | ||
596 | case PAL_BRAND_INFO: | ||
597 | p->u.pal_data.gr29 = gr29;; | ||
598 | p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); | ||
599 | break; | ||
600 | default: | ||
601 | p->u.pal_data.gr29 = gr29;; | ||
602 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
603 | } | ||
604 | p->u.pal_data.gr28 = gr28; | ||
577 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); | 605 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); |
606 | |||
578 | p->exit_reason = EXIT_REASON_PAL_CALL; | 607 | p->exit_reason = EXIT_REASON_PAL_CALL; |
579 | } | 608 | } |
580 | 609 | ||
581 | static void set_pal_call_result(struct kvm_vcpu *vcpu) | 610 | static void get_pal_call_result(struct kvm_vcpu *vcpu) |
582 | { | 611 | { |
583 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 612 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
584 | 613 | ||
@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu) | |||
606 | p->exit_reason = EXIT_REASON_SAL_CALL; | 635 | p->exit_reason = EXIT_REASON_SAL_CALL; |
607 | } | 636 | } |
608 | 637 | ||
609 | static void set_sal_call_result(struct kvm_vcpu *vcpu) | 638 | static void get_sal_call_result(struct kvm_vcpu *vcpu) |
610 | { | 639 | { |
611 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 640 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
612 | 641 | ||
@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, | |||
629 | if (iim == DOMN_PAL_REQUEST) { | 658 | if (iim == DOMN_PAL_REQUEST) { |
630 | set_pal_call_data(v); | 659 | set_pal_call_data(v); |
631 | vmm_transition(v); | 660 | vmm_transition(v); |
632 | set_pal_call_result(v); | 661 | get_pal_call_result(v); |
633 | vcpu_increment_iip(v); | 662 | vcpu_increment_iip(v); |
634 | return; | 663 | return; |
635 | } else if (iim == DOMN_SAL_REQUEST) { | 664 | } else if (iim == DOMN_SAL_REQUEST) { |
636 | set_sal_call_data(v); | 665 | set_sal_call_data(v); |
637 | vmm_transition(v); | 666 | vmm_transition(v); |
638 | set_sal_call_result(v); | 667 | get_sal_call_result(v); |
639 | vcpu_increment_iip(v); | 668 | vcpu_increment_iip(v); |
640 | return; | 669 | return; |
641 | } | 670 | } |
@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu) | |||
703 | } | 732 | } |
704 | } | 733 | } |
705 | 734 | ||
706 | |||
707 | void leave_hypervisor_tail(void) | 735 | void leave_hypervisor_tail(void) |
708 | { | 736 | { |
709 | struct kvm_vcpu *v = current_vcpu; | 737 | struct kvm_vcpu *v = current_vcpu; |
@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void) | |||
737 | } | 765 | } |
738 | } | 766 | } |
739 | 767 | ||
740 | |||
741 | static inline void handle_lds(struct kvm_pt_regs *regs) | 768 | static inline void handle_lds(struct kvm_pt_regs *regs) |
742 | { | 769 | { |
743 | regs->cr_ipsr |= IA64_PSR_ED; | 770 | regs->cr_ipsr |= IA64_PSR_ED; |
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index ecd526b55323..d4d280505878 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu) | |||
112 | return; | 112 | return; |
113 | } | 113 | } |
114 | 114 | ||
115 | |||
116 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) | 115 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) |
117 | { | 116 | { |
118 | unsigned long psr; | 117 | unsigned long psr; |
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | |||
166 | return; | 165 | return; |
167 | } | 166 | } |
168 | 167 | ||
169 | |||
170 | |||
171 | /* | 168 | /* |
172 | * In physical mode, insert tc/tr for region 0 and 4 uses | 169 | * In physical mode, insert tc/tr for region 0 and 4 uses |
173 | * RID[0] and RID[4] which is for physical mode emulation. | 170 | * RID[0] and RID[4] which is for physical mode emulation. |
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs, | |||
269 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); | 266 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); |
270 | } | 267 | } |
271 | 268 | ||
272 | |||
273 | /* | 269 | /* |
274 | * The inverse of the above: given bspstore and the number of | 270 | * The inverse of the above: given bspstore and the number of |
275 | * registers, calculate ar.bsp. | 271 | * registers, calculate ar.bsp. |
@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); | |||
811 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) | 807 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) |
812 | { | 808 | { |
813 | struct kvm_vcpu *v; | 809 | struct kvm_vcpu *v; |
810 | struct kvm *kvm; | ||
814 | int i; | 811 | int i; |
815 | long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); | 812 | long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); |
816 | unsigned long vitv = VCPU(vcpu, itv); | 813 | unsigned long vitv = VCPU(vcpu, itv); |
817 | 814 | ||
815 | kvm = (struct kvm *)KVM_VM_BASE; | ||
816 | |||
818 | if (vcpu->vcpu_id == 0) { | 817 | if (vcpu->vcpu_id == 0) { |
819 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 818 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
820 | v = (struct kvm_vcpu *)((char *)vcpu + | 819 | v = (struct kvm_vcpu *)((char *)vcpu + |
821 | sizeof(struct kvm_vcpu_data) * i); | 820 | sizeof(struct kvm_vcpu_data) * i); |
822 | VMX(v, itc_offset) = itc_offset; | 821 | VMX(v, itc_offset) = itc_offset; |
@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) | |||
1039 | return key; | 1038 | return key; |
1040 | } | 1039 | } |
1041 | 1040 | ||
1042 | |||
1043 | |||
1044 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | 1041 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) |
1045 | { | 1042 | { |
1046 | unsigned long thash, vadr; | 1043 | unsigned long thash, vadr; |
@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | |||
1050 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); | 1047 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); |
1051 | } | 1048 | } |
1052 | 1049 | ||
1053 | |||
1054 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | 1050 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) |
1055 | { | 1051 | { |
1056 | unsigned long tag, vadr; | 1052 | unsigned long tag, vadr; |
@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) | |||
1131 | return IA64_NO_FAULT; | 1127 | return IA64_NO_FAULT; |
1132 | } | 1128 | } |
1133 | 1129 | ||
1134 | |||
1135 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) | 1130 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) |
1136 | { | 1131 | { |
1137 | unsigned long r1, r3; | 1132 | unsigned long r1, r3; |
@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) | |||
1154 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | 1149 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
1155 | } | 1150 | } |
1156 | 1151 | ||
1157 | |||
1158 | /************************************ | 1152 | /************************************ |
1159 | * Insert/Purge translation register/cache | 1153 | * Insert/Purge translation register/cache |
1160 | ************************************/ | 1154 | ************************************/ |
@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | |||
1385 | vcpu_set_itc(vcpu, r2); | 1379 | vcpu_set_itc(vcpu, r2); |
1386 | } | 1380 | } |
1387 | 1381 | ||
1388 | |||
1389 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | 1382 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) |
1390 | { | 1383 | { |
1391 | unsigned long r1; | 1384 | unsigned long r1; |
@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | |||
1393 | r1 = vcpu_get_itc(vcpu); | 1386 | r1 = vcpu_get_itc(vcpu); |
1394 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); | 1387 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); |
1395 | } | 1388 | } |
1389 | |||
1396 | /************************************************************************** | 1390 | /************************************************************************** |
1397 | struct kvm_vcpu*protection key register access routines | 1391 | struct kvm_vcpu protection key register access routines |
1398 | **************************************************************************/ | 1392 | **************************************************************************/ |
1399 | 1393 | ||
1400 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) | 1394 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) |
@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) | |||
1407 | ia64_set_pkr(reg, val); | 1401 | ia64_set_pkr(reg, val); |
1408 | } | 1402 | } |
1409 | 1403 | ||
1410 | |||
1411 | unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa) | ||
1412 | { | ||
1413 | union ia64_rr rr, rr1; | ||
1414 | |||
1415 | rr.val = vcpu_get_rr(vcpu, ifa); | ||
1416 | rr1.val = 0; | ||
1417 | rr1.ps = rr.ps; | ||
1418 | rr1.rid = rr.rid; | ||
1419 | return (rr1.val); | ||
1420 | } | ||
1421 | |||
1422 | |||
1423 | |||
1424 | /******************************** | 1404 | /******************************** |
1425 | * Moves to privileged registers | 1405 | * Moves to privileged registers |
1426 | ********************************/ | 1406 | ********************************/ |
@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, | |||
1464 | return (IA64_NO_FAULT); | 1444 | return (IA64_NO_FAULT); |
1465 | } | 1445 | } |
1466 | 1446 | ||
1467 | |||
1468 | |||
1469 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1447 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1470 | { | 1448 | { |
1471 | unsigned long r3, r2; | 1449 | unsigned long r3, r2; |
@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1510 | vcpu_set_pkr(vcpu, r3, r2); | 1488 | vcpu_set_pkr(vcpu, r3, r2); |
1511 | } | 1489 | } |
1512 | 1490 | ||
1513 | |||
1514 | |||
1515 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1491 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1516 | { | 1492 | { |
1517 | unsigned long r3, r1; | 1493 | unsigned long r3, r1; |
@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) | |||
1557 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1533 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1558 | } | 1534 | } |
1559 | 1535 | ||
1560 | |||
1561 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) | 1536 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) |
1562 | { | 1537 | { |
1563 | /* FIXME: This could get called as a result of a rsvd-reg fault */ | 1538 | /* FIXME: This could get called as a result of a rsvd-reg fault */ |
@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1609 | return 0; | 1584 | return 0; |
1610 | } | 1585 | } |
1611 | 1586 | ||
1612 | |||
1613 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | 1587 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) |
1614 | { | 1588 | { |
1615 | unsigned long tgt = inst.M33.r1; | 1589 | unsigned long tgt = inst.M33.r1; |
@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1633 | return 0; | 1607 | return 0; |
1634 | } | 1608 | } |
1635 | 1609 | ||
1636 | |||
1637 | |||
1638 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | 1610 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) |
1639 | { | 1611 | { |
1640 | 1612 | ||
@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu) | |||
1776 | } | 1748 | } |
1777 | } | 1749 | } |
1778 | 1750 | ||
1779 | |||
1780 | |||
1781 | |||
1782 | void vcpu_rfi(struct kvm_vcpu *vcpu) | 1751 | void vcpu_rfi(struct kvm_vcpu *vcpu) |
1783 | { | 1752 | { |
1784 | unsigned long ifs, psr; | 1753 | unsigned long ifs, psr; |
@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu) | |||
1796 | regs->cr_iip = VCPU(vcpu, iip); | 1765 | regs->cr_iip = VCPU(vcpu, iip); |
1797 | } | 1766 | } |
1798 | 1767 | ||
1799 | |||
1800 | /* | 1768 | /* |
1801 | VPSR can't keep track of below bits of guest PSR | 1769 | VPSR can't keep track of below bits of guest PSR |
1802 | This function gets guest PSR | 1770 | This function gets guest PSR |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index b2f12a562bdf..042af92ced83 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); | |||
703 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); | 703 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); |
704 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); | 704 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); |
705 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); | 705 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); |
706 | extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, | 706 | extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, |
707 | u64 itir, u64 ifa, int type); | 707 | u64 itir, u64 ifa, int type); |
708 | extern void thash_purge_all(struct kvm_vcpu *v); | 708 | extern void thash_purge_all(struct kvm_vcpu *v); |
709 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, | 709 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, |
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v); | |||
738 | void thash_init(struct thash_cb *hcb, u64 sz); | 738 | void thash_init(struct thash_cb *hcb, u64 sz); |
739 | 739 | ||
740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); | 740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); |
741 | 741 | u64 kvm_gpa_to_mpa(u64 gpa); | |
742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, | 742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, |
743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); | 743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); |
744 | 744 | ||
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 6b6307a3bd55..38232b37668b 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) | |||
164 | unsigned long ps, gpaddr; | 164 | unsigned long ps, gpaddr; |
165 | 165 | ||
166 | ps = itir_ps(itir); | 166 | ps = itir_ps(itir); |
167 | rr.val = ia64_get_rr(ifa); | ||
167 | 168 | ||
168 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | | 169 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | |
169 | (ifa & ((1UL << ps) - 1)); | 170 | (ifa & ((1UL << ps) - 1)); |
170 | 171 | ||
171 | rr.val = ia64_get_rr(ifa); | ||
172 | head = (struct thash_data *)ia64_thash(ifa); | 172 | head = (struct thash_data *)ia64_thash(ifa); |
173 | head->etag = INVALID_TI_TAG; | 173 | head->etag = INVALID_TI_TAG; |
174 | ia64_mf(); | 174 | ia64_mf(); |
@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | |||
412 | 412 | ||
413 | /* | 413 | /* |
414 | * Purge overlap TCs and then insert the new entry to emulate itc ops. | 414 | * Purge overlap TCs and then insert the new entry to emulate itc ops. |
415 | * Notes: Only TC entry can purge and insert. | 415 | * Notes: Only TC entry can purge and insert. |
416 | * 1 indicates this is MMIO | ||
417 | */ | 416 | */ |
418 | int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | 417 | void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, |
419 | u64 ifa, int type) | 418 | u64 ifa, int type) |
420 | { | 419 | { |
421 | u64 ps; | 420 | u64 ps; |
422 | u64 phy_pte, io_mask, index; | 421 | u64 phy_pte, io_mask, index; |
423 | union ia64_rr vrr, mrr; | 422 | union ia64_rr vrr, mrr; |
424 | int ret = 0; | ||
425 | 423 | ||
426 | ps = itir_ps(itir); | 424 | ps = itir_ps(itir); |
427 | vrr.val = vcpu_get_rr(v, ifa); | 425 | vrr.val = vcpu_get_rr(v, ifa); |
@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
441 | phy_pte &= ~_PAGE_MA_MASK; | 439 | phy_pte &= ~_PAGE_MA_MASK; |
442 | } | 440 | } |
443 | 441 | ||
444 | if (pte & VTLB_PTE_IO) | ||
445 | ret = 1; | ||
446 | |||
447 | vtlb_purge(v, ifa, ps); | 442 | vtlb_purge(v, ifa, ps); |
448 | vhpt_purge(v, ifa, ps); | 443 | vhpt_purge(v, ifa, ps); |
449 | 444 | ||
450 | if (ps == mrr.ps) { | 445 | if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) { |
451 | if (!(pte&VTLB_PTE_IO)) { | ||
452 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
453 | } else { | ||
454 | vtlb_insert(v, pte, itir, ifa); | ||
455 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
456 | } | ||
457 | } else if (ps > mrr.ps) { | ||
458 | vtlb_insert(v, pte, itir, ifa); | 446 | vtlb_insert(v, pte, itir, ifa); |
459 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | 447 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); |
460 | if (!(pte&VTLB_PTE_IO)) | 448 | } |
461 | vhpt_insert(phy_pte, itir, ifa, pte); | 449 | if (pte & VTLB_PTE_IO) |
462 | } else { | 450 | return; |
451 | |||
452 | if (ps >= mrr.ps) | ||
453 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
454 | else { | ||
463 | u64 psr; | 455 | u64 psr; |
464 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | 456 | phy_pte &= ~PAGE_FLAGS_RV_MASK; |
465 | psr = ia64_clear_ic(); | 457 | psr = ia64_clear_ic(); |
@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
469 | if (!(pte&VTLB_PTE_IO)) | 461 | if (!(pte&VTLB_PTE_IO)) |
470 | mark_pages_dirty(v, pte, ps); | 462 | mark_pages_dirty(v, pte, ps); |
471 | 463 | ||
472 | return ret; | ||
473 | } | 464 | } |
474 | 465 | ||
475 | /* | 466 | /* |
@@ -509,7 +500,6 @@ void thash_purge_all(struct kvm_vcpu *v) | |||
509 | local_flush_tlb_all(); | 500 | local_flush_tlb_all(); |
510 | } | 501 | } |
511 | 502 | ||
512 | |||
513 | /* | 503 | /* |
514 | * Lookup the hash table and its collision chain to find an entry | 504 | * Lookup the hash table and its collision chain to find an entry |
515 | * covering this address rid:va or the entry. | 505 | * covering this address rid:va or the entry. |
@@ -517,7 +507,6 @@ void thash_purge_all(struct kvm_vcpu *v) | |||
517 | * INPUT: | 507 | * INPUT: |
518 | * in: TLB format for both VHPT & TLB. | 508 | * in: TLB format for both VHPT & TLB. |
519 | */ | 509 | */ |
520 | |||
521 | struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | 510 | struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) |
522 | { | 511 | { |
523 | struct thash_data *cch; | 512 | struct thash_data *cch; |
@@ -547,7 +536,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | |||
547 | return NULL; | 536 | return NULL; |
548 | } | 537 | } |
549 | 538 | ||
550 | |||
551 | /* | 539 | /* |
552 | * Initialize internal control data before service. | 540 | * Initialize internal control data before service. |
553 | */ | 541 | */ |
@@ -573,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz) | |||
573 | u64 kvm_get_mpt_entry(u64 gpfn) | 561 | u64 kvm_get_mpt_entry(u64 gpfn) |
574 | { | 562 | { |
575 | u64 *base = (u64 *) KVM_P2M_BASE; | 563 | u64 *base = (u64 *) KVM_P2M_BASE; |
564 | |||
565 | if (gpfn >= (KVM_P2M_SIZE >> 3)) | ||
566 | panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); | ||
567 | |||
576 | return *(base + gpfn); | 568 | return *(base + gpfn); |
577 | } | 569 | } |
578 | 570 | ||
@@ -589,7 +581,6 @@ u64 kvm_gpa_to_mpa(u64 gpa) | |||
589 | return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); | 581 | return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); |
590 | } | 582 | } |
591 | 583 | ||
592 | |||
593 | /* | 584 | /* |
594 | * Fetch guest bundle code. | 585 | * Fetch guest bundle code. |
595 | * INPUT: | 586 | * INPUT: |
@@ -631,7 +622,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) | |||
631 | return IA64_NO_FAULT; | 622 | return IA64_NO_FAULT; |
632 | } | 623 | } |
633 | 624 | ||
634 | |||
635 | void kvm_init_vhpt(struct kvm_vcpu *v) | 625 | void kvm_init_vhpt(struct kvm_vcpu *v) |
636 | { | 626 | { |
637 | v->arch.vhpt.num = VHPT_NUM_ENTRIES; | 627 | v->arch.vhpt.num = VHPT_NUM_ENTRIES; |