diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 503 |
1 files changed, 360 insertions, 143 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 604af29b71ed..b998abf1a63d 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/slab.h> | ||
19 | 20 | ||
20 | #include <asm/reg.h> | 21 | #include <asm/reg.h> |
21 | #include <asm/cputable.h> | 22 | #include <asm/cputable.h> |
@@ -29,6 +30,7 @@ | |||
29 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
30 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
31 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
33 | #include <linux/highmem.h> | ||
32 | 34 | ||
33 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 35 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
34 | 36 | ||
@@ -36,7 +38,15 @@ | |||
36 | /* #define EXIT_DEBUG_SIMPLE */ | 38 | /* #define EXIT_DEBUG_SIMPLE */ |
37 | /* #define DEBUG_EXT */ | 39 | /* #define DEBUG_EXT */ |
38 | 40 | ||
39 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 41 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
42 | ulong msr); | ||
43 | |||
44 | /* Some compatibility defines */ | ||
45 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
46 | #define MSR_USER32 MSR_USER | ||
47 | #define MSR_USER64 MSR_USER | ||
48 | #define HW_PAGE_SIZE PAGE_SIZE | ||
49 | #endif | ||
40 | 50 | ||
41 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 51 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
42 | { "exits", VCPU_STAT(sum_exits) }, | 52 | { "exits", VCPU_STAT(sum_exits) }, |
@@ -69,18 +79,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
69 | 79 | ||
70 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 80 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
71 | { | 81 | { |
72 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | 82 | #ifdef CONFIG_PPC_BOOK3S_64 |
73 | memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, | 83 | memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); |
84 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, | ||
74 | sizeof(get_paca()->shadow_vcpu)); | 85 | sizeof(get_paca()->shadow_vcpu)); |
75 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; | 86 | to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; |
87 | #endif | ||
88 | |||
89 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
90 | current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; | ||
91 | #endif | ||
76 | } | 92 | } |
77 | 93 | ||
78 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 94 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
79 | { | 95 | { |
80 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | 96 | #ifdef CONFIG_PPC_BOOK3S_64 |
81 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | 97 | memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); |
98 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | ||
82 | sizeof(get_paca()->shadow_vcpu)); | 99 | sizeof(get_paca()->shadow_vcpu)); |
83 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | 100 | to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; |
101 | #endif | ||
84 | 102 | ||
85 | kvmppc_giveup_ext(vcpu, MSR_FP); | 103 | kvmppc_giveup_ext(vcpu, MSR_FP); |
86 | kvmppc_giveup_ext(vcpu, MSR_VEC); | 104 | kvmppc_giveup_ext(vcpu, MSR_VEC); |
@@ -131,18 +149,22 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
131 | } | 149 | } |
132 | } | 150 | } |
133 | 151 | ||
134 | if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || | 152 | if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != |
135 | (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { | 153 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
136 | kvmppc_mmu_flush_segments(vcpu); | 154 | kvmppc_mmu_flush_segments(vcpu); |
137 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | 155 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
138 | } | 156 | } |
157 | |||
158 | /* Preload FPU if it's enabled */ | ||
159 | if (vcpu->arch.msr & MSR_FP) | ||
160 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | ||
139 | } | 161 | } |
140 | 162 | ||
141 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 163 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
142 | { | 164 | { |
143 | vcpu->arch.srr0 = vcpu->arch.pc; | 165 | vcpu->arch.srr0 = kvmppc_get_pc(vcpu); |
144 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | 166 | vcpu->arch.srr1 = vcpu->arch.msr | flags; |
145 | vcpu->arch.pc = to_book3s(vcpu)->hior + vec; | 167 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); |
146 | vcpu->arch.mmu.reset_msr(vcpu); | 168 | vcpu->arch.mmu.reset_msr(vcpu); |
147 | } | 169 | } |
148 | 170 | ||
@@ -218,6 +240,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | |||
218 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | 240 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); |
219 | } | 241 | } |
220 | 242 | ||
243 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | ||
244 | struct kvm_interrupt *irq) | ||
245 | { | ||
246 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | ||
247 | } | ||
248 | |||
221 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | 249 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) |
222 | { | 250 | { |
223 | int deliver = 1; | 251 | int deliver = 1; |
@@ -302,7 +330,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
302 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | 330 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); |
303 | #endif | 331 | #endif |
304 | priority = __ffs(*pending); | 332 | priority = __ffs(*pending); |
305 | while (priority <= (sizeof(unsigned int) * 8)) { | 333 | while (priority < BOOK3S_IRQPRIO_MAX) { |
306 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && | 334 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
307 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | 335 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { |
308 | /* DEC interrupts get cleared by mtdec */ | 336 | /* DEC interrupts get cleared by mtdec */ |
@@ -318,13 +346,18 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
318 | 346 | ||
319 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | 347 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) |
320 | { | 348 | { |
349 | u32 host_pvr; | ||
350 | |||
321 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | 351 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; |
322 | vcpu->arch.pvr = pvr; | 352 | vcpu->arch.pvr = pvr; |
353 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
323 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | 354 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { |
324 | kvmppc_mmu_book3s_64_init(vcpu); | 355 | kvmppc_mmu_book3s_64_init(vcpu); |
325 | to_book3s(vcpu)->hior = 0xfff00000; | 356 | to_book3s(vcpu)->hior = 0xfff00000; |
326 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; | 357 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
327 | } else { | 358 | } else |
359 | #endif | ||
360 | { | ||
328 | kvmppc_mmu_book3s_32_init(vcpu); | 361 | kvmppc_mmu_book3s_32_init(vcpu); |
329 | to_book3s(vcpu)->hior = 0; | 362 | to_book3s(vcpu)->hior = 0; |
330 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; | 363 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
@@ -337,6 +370,32 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
337 | !strcmp(cur_cpu_spec->platform, "ppc970")) | 370 | !strcmp(cur_cpu_spec->platform, "ppc970")) |
338 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | 371 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; |
339 | 372 | ||
373 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | ||
374 | really needs them in a VM on Cell and force disable them. */ | ||
375 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | ||
376 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | ||
377 | |||
378 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
379 | /* 32 bit Book3S always has 32 byte dcbz */ | ||
380 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | ||
381 | #endif | ||
382 | |||
383 | /* On some CPUs we can execute paired single operations natively */ | ||
384 | asm ( "mfpvr %0" : "=r"(host_pvr)); | ||
385 | switch (host_pvr) { | ||
386 | case 0x00080200: /* lonestar 2.0 */ | ||
387 | case 0x00088202: /* lonestar 2.2 */ | ||
388 | case 0x70000100: /* gekko 1.0 */ | ||
389 | case 0x00080100: /* gekko 2.0 */ | ||
390 | case 0x00083203: /* gekko 2.3a */ | ||
391 | case 0x00083213: /* gekko 2.3b */ | ||
392 | case 0x00083204: /* gekko 2.4 */ | ||
393 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | ||
394 | case 0x00087200: /* broadway */ | ||
395 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | ||
396 | /* Enable HID2.PSE - in case we need it later */ | ||
397 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | ||
398 | } | ||
340 | } | 399 | } |
341 | 400 | ||
342 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | 401 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To |
@@ -350,34 +409,29 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
350 | */ | 409 | */ |
351 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | 410 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) |
352 | { | 411 | { |
353 | bool touched = false; | 412 | struct page *hpage; |
354 | hva_t hpage; | 413 | u64 hpage_offset; |
355 | u32 *page; | 414 | u32 *page; |
356 | int i; | 415 | int i; |
357 | 416 | ||
358 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | 417 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); |
359 | if (kvm_is_error_hva(hpage)) | 418 | if (is_error_page(hpage)) |
360 | return; | 419 | return; |
361 | 420 | ||
362 | hpage |= pte->raddr & ~PAGE_MASK; | 421 | hpage_offset = pte->raddr & ~PAGE_MASK; |
363 | hpage &= ~0xFFFULL; | 422 | hpage_offset &= ~0xFFFULL; |
364 | 423 | hpage_offset /= 4; | |
365 | page = vmalloc(HW_PAGE_SIZE); | ||
366 | |||
367 | if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE)) | ||
368 | goto out; | ||
369 | 424 | ||
370 | for (i=0; i < HW_PAGE_SIZE / 4; i++) | 425 | get_page(hpage); |
371 | if ((page[i] & 0xff0007ff) == INS_DCBZ) { | 426 | page = kmap_atomic(hpage, KM_USER0); |
372 | page[i] &= 0xfffffff7; // reserved instruction, so we trap | ||
373 | touched = true; | ||
374 | } | ||
375 | 427 | ||
376 | if (touched) | 428 | /* patch dcbz into reserved instruction, so we trap */ |
377 | copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); | 429 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) |
430 | if ((page[i] & 0xff0007ff) == INS_DCBZ) | ||
431 | page[i] &= 0xfffffff7; | ||
378 | 432 | ||
379 | out: | 433 | kunmap_atomic(page, KM_USER0); |
380 | vfree(page); | 434 | put_page(hpage); |
381 | } | 435 | } |
382 | 436 | ||
383 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 437 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
@@ -391,15 +445,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | |||
391 | } else { | 445 | } else { |
392 | pte->eaddr = eaddr; | 446 | pte->eaddr = eaddr; |
393 | pte->raddr = eaddr & 0xffffffff; | 447 | pte->raddr = eaddr & 0xffffffff; |
394 | pte->vpage = eaddr >> 12; | 448 | pte->vpage = VSID_REAL | eaddr >> 12; |
395 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | ||
396 | case 0: | ||
397 | pte->vpage |= VSID_REAL; | ||
398 | case MSR_DR: | ||
399 | pte->vpage |= VSID_REAL_DR; | ||
400 | case MSR_IR: | ||
401 | pte->vpage |= VSID_REAL_IR; | ||
402 | } | ||
403 | pte->may_read = true; | 449 | pte->may_read = true; |
404 | pte->may_write = true; | 450 | pte->may_write = true; |
405 | pte->may_execute = true; | 451 | pte->may_execute = true; |
@@ -434,55 +480,55 @@ err: | |||
434 | return kvmppc_bad_hva(); | 480 | return kvmppc_bad_hva(); |
435 | } | 481 | } |
436 | 482 | ||
437 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) | 483 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
484 | bool data) | ||
438 | { | 485 | { |
439 | struct kvmppc_pte pte; | 486 | struct kvmppc_pte pte; |
440 | hva_t hva = eaddr; | ||
441 | 487 | ||
442 | vcpu->stat.st++; | 488 | vcpu->stat.st++; |
443 | 489 | ||
444 | if (kvmppc_xlate(vcpu, eaddr, false, &pte)) | 490 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) |
445 | goto err; | 491 | return -ENOENT; |
446 | 492 | ||
447 | hva = kvmppc_pte_to_hva(vcpu, &pte, false); | 493 | *eaddr = pte.raddr; |
448 | if (kvm_is_error_hva(hva)) | ||
449 | goto err; | ||
450 | 494 | ||
451 | if (copy_to_user((void __user *)hva, ptr, size)) { | 495 | if (!pte.may_write) |
452 | printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); | 496 | return -EPERM; |
453 | goto err; | ||
454 | } | ||
455 | 497 | ||
456 | return 0; | 498 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
499 | return EMULATE_DO_MMIO; | ||
457 | 500 | ||
458 | err: | 501 | return EMULATE_DONE; |
459 | return -ENOENT; | ||
460 | } | 502 | } |
461 | 503 | ||
462 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, | 504 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
463 | bool data) | 505 | bool data) |
464 | { | 506 | { |
465 | struct kvmppc_pte pte; | 507 | struct kvmppc_pte pte; |
466 | hva_t hva = eaddr; | 508 | hva_t hva = *eaddr; |
467 | 509 | ||
468 | vcpu->stat.ld++; | 510 | vcpu->stat.ld++; |
469 | 511 | ||
470 | if (kvmppc_xlate(vcpu, eaddr, data, &pte)) | 512 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) |
471 | goto err; | 513 | goto nopte; |
514 | |||
515 | *eaddr = pte.raddr; | ||
472 | 516 | ||
473 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | 517 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); |
474 | if (kvm_is_error_hva(hva)) | 518 | if (kvm_is_error_hva(hva)) |
475 | goto err; | 519 | goto mmio; |
476 | 520 | ||
477 | if (copy_from_user(ptr, (void __user *)hva, size)) { | 521 | if (copy_from_user(ptr, (void __user *)hva, size)) { |
478 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); | 522 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); |
479 | goto err; | 523 | goto mmio; |
480 | } | 524 | } |
481 | 525 | ||
482 | return 0; | 526 | return EMULATE_DONE; |
483 | 527 | ||
484 | err: | 528 | nopte: |
485 | return -ENOENT; | 529 | return -ENOENT; |
530 | mmio: | ||
531 | return EMULATE_DO_MMIO; | ||
486 | } | 532 | } |
487 | 533 | ||
488 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 534 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
@@ -499,12 +545,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
499 | int page_found = 0; | 545 | int page_found = 0; |
500 | struct kvmppc_pte pte; | 546 | struct kvmppc_pte pte; |
501 | bool is_mmio = false; | 547 | bool is_mmio = false; |
548 | bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; | ||
549 | bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; | ||
550 | u64 vsid; | ||
502 | 551 | ||
503 | if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { | 552 | relocated = data ? dr : ir; |
504 | relocated = (vcpu->arch.msr & MSR_DR); | ||
505 | } else { | ||
506 | relocated = (vcpu->arch.msr & MSR_IR); | ||
507 | } | ||
508 | 553 | ||
509 | /* Resolve real address if translation turned on */ | 554 | /* Resolve real address if translation turned on */ |
510 | if (relocated) { | 555 | if (relocated) { |
@@ -516,14 +561,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
516 | pte.raddr = eaddr & 0xffffffff; | 561 | pte.raddr = eaddr & 0xffffffff; |
517 | pte.eaddr = eaddr; | 562 | pte.eaddr = eaddr; |
518 | pte.vpage = eaddr >> 12; | 563 | pte.vpage = eaddr >> 12; |
519 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 564 | } |
520 | case 0: | 565 | |
521 | pte.vpage |= VSID_REAL; | 566 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { |
522 | case MSR_DR: | 567 | case 0: |
523 | pte.vpage |= VSID_REAL_DR; | 568 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); |
524 | case MSR_IR: | 569 | break; |
525 | pte.vpage |= VSID_REAL_IR; | 570 | case MSR_DR: |
526 | } | 571 | case MSR_IR: |
572 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | ||
573 | |||
574 | if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) | ||
575 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | ||
576 | else | ||
577 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | ||
578 | pte.vpage |= vsid; | ||
579 | |||
580 | if (vsid == -1) | ||
581 | page_found = -EINVAL; | ||
582 | break; | ||
527 | } | 583 | } |
528 | 584 | ||
529 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 585 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
@@ -538,20 +594,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
538 | 594 | ||
539 | if (page_found == -ENOENT) { | 595 | if (page_found == -ENOENT) { |
540 | /* Page not found in guest PTE entries */ | 596 | /* Page not found in guest PTE entries */ |
541 | vcpu->arch.dear = vcpu->arch.fault_dear; | 597 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
542 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | 598 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; |
543 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); | 599 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); |
544 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 600 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
545 | } else if (page_found == -EPERM) { | 601 | } else if (page_found == -EPERM) { |
546 | /* Storage protection */ | 602 | /* Storage protection */ |
547 | vcpu->arch.dear = vcpu->arch.fault_dear; | 603 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
548 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | 604 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; |
549 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | 605 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; |
550 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); | 606 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); |
551 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 607 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
552 | } else if (page_found == -EINVAL) { | 608 | } else if (page_found == -EINVAL) { |
553 | /* Page not found in guest SLB */ | 609 | /* Page not found in guest SLB */ |
554 | vcpu->arch.dear = vcpu->arch.fault_dear; | 610 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
555 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 611 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
556 | } else if (!is_mmio && | 612 | } else if (!is_mmio && |
557 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 613 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
@@ -583,11 +639,13 @@ static inline int get_fpr_index(int i) | |||
583 | } | 639 | } |
584 | 640 | ||
585 | /* Give up external provider (FPU, Altivec, VSX) */ | 641 | /* Give up external provider (FPU, Altivec, VSX) */ |
586 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | 642 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) |
587 | { | 643 | { |
588 | struct thread_struct *t = ¤t->thread; | 644 | struct thread_struct *t = ¤t->thread; |
589 | u64 *vcpu_fpr = vcpu->arch.fpr; | 645 | u64 *vcpu_fpr = vcpu->arch.fpr; |
646 | #ifdef CONFIG_VSX | ||
590 | u64 *vcpu_vsx = vcpu->arch.vsr; | 647 | u64 *vcpu_vsx = vcpu->arch.vsr; |
648 | #endif | ||
591 | u64 *thread_fpr = (u64*)t->fpr; | 649 | u64 *thread_fpr = (u64*)t->fpr; |
592 | int i; | 650 | int i; |
593 | 651 | ||
@@ -629,21 +687,65 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |||
629 | kvmppc_recalc_shadow_msr(vcpu); | 687 | kvmppc_recalc_shadow_msr(vcpu); |
630 | } | 688 | } |
631 | 689 | ||
690 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | ||
691 | { | ||
692 | ulong srr0 = kvmppc_get_pc(vcpu); | ||
693 | u32 last_inst = kvmppc_get_last_inst(vcpu); | ||
694 | int ret; | ||
695 | |||
696 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | ||
697 | if (ret == -ENOENT) { | ||
698 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); | ||
699 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); | ||
700 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); | ||
701 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | ||
702 | return EMULATE_AGAIN; | ||
703 | } | ||
704 | |||
705 | return EMULATE_DONE; | ||
706 | } | ||
707 | |||
708 | static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) | ||
709 | { | ||
710 | |||
711 | /* Need to do paired single emulation? */ | ||
712 | if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) | ||
713 | return EMULATE_DONE; | ||
714 | |||
715 | /* Read out the instruction */ | ||
716 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) | ||
717 | /* Need to emulate */ | ||
718 | return EMULATE_FAIL; | ||
719 | |||
720 | return EMULATE_AGAIN; | ||
721 | } | ||
722 | |||
632 | /* Handle external providers (FPU, Altivec, VSX) */ | 723 | /* Handle external providers (FPU, Altivec, VSX) */ |
633 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | 724 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
634 | ulong msr) | 725 | ulong msr) |
635 | { | 726 | { |
636 | struct thread_struct *t = ¤t->thread; | 727 | struct thread_struct *t = ¤t->thread; |
637 | u64 *vcpu_fpr = vcpu->arch.fpr; | 728 | u64 *vcpu_fpr = vcpu->arch.fpr; |
729 | #ifdef CONFIG_VSX | ||
638 | u64 *vcpu_vsx = vcpu->arch.vsr; | 730 | u64 *vcpu_vsx = vcpu->arch.vsr; |
731 | #endif | ||
639 | u64 *thread_fpr = (u64*)t->fpr; | 732 | u64 *thread_fpr = (u64*)t->fpr; |
640 | int i; | 733 | int i; |
641 | 734 | ||
735 | /* When we have paired singles, we emulate in software */ | ||
736 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | ||
737 | return RESUME_GUEST; | ||
738 | |||
642 | if (!(vcpu->arch.msr & msr)) { | 739 | if (!(vcpu->arch.msr & msr)) { |
643 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 740 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
644 | return RESUME_GUEST; | 741 | return RESUME_GUEST; |
645 | } | 742 | } |
646 | 743 | ||
744 | /* We already own the ext */ | ||
745 | if (vcpu->arch.guest_owned_ext & msr) { | ||
746 | return RESUME_GUEST; | ||
747 | } | ||
748 | |||
647 | #ifdef DEBUG_EXT | 749 | #ifdef DEBUG_EXT |
648 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | 750 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); |
649 | #endif | 751 | #endif |
@@ -696,21 +798,33 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
696 | run->ready_for_interrupt_injection = 1; | 798 | run->ready_for_interrupt_injection = 1; |
697 | #ifdef EXIT_DEBUG | 799 | #ifdef EXIT_DEBUG |
698 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | 800 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", |
699 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | 801 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), |
700 | kvmppc_get_dec(vcpu), vcpu->arch.msr); | 802 | kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1); |
701 | #elif defined (EXIT_DEBUG_SIMPLE) | 803 | #elif defined (EXIT_DEBUG_SIMPLE) |
702 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | 804 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) |
703 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | 805 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", |
704 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | 806 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), |
705 | vcpu->arch.msr); | 807 | vcpu->arch.msr); |
706 | #endif | 808 | #endif |
707 | kvm_resched(vcpu); | 809 | kvm_resched(vcpu); |
708 | switch (exit_nr) { | 810 | switch (exit_nr) { |
709 | case BOOK3S_INTERRUPT_INST_STORAGE: | 811 | case BOOK3S_INTERRUPT_INST_STORAGE: |
710 | vcpu->stat.pf_instruc++; | 812 | vcpu->stat.pf_instruc++; |
813 | |||
814 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
815 | /* We set segments as unused segments when invalidating them. So | ||
816 | * treat the respective fault as segment fault. */ | ||
817 | if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] | ||
818 | == SR_INVALID) { | ||
819 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | ||
820 | r = RESUME_GUEST; | ||
821 | break; | ||
822 | } | ||
823 | #endif | ||
824 | |||
711 | /* only care about PTEG not found errors, but leave NX alone */ | 825 | /* only care about PTEG not found errors, but leave NX alone */ |
712 | if (vcpu->arch.shadow_srr1 & 0x40000000) { | 826 | if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { |
713 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | 827 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
714 | vcpu->stat.sp_instruc++; | 828 | vcpu->stat.sp_instruc++; |
715 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 829 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
716 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | 830 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { |
@@ -719,37 +833,52 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
719 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | 833 | * so we can't use the NX bit inside the guest. Let's cross our fingers, |
720 | * that no guest that needs the dcbz hack does NX. | 834 | * that no guest that needs the dcbz hack does NX. |
721 | */ | 835 | */ |
722 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 836 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
837 | r = RESUME_GUEST; | ||
723 | } else { | 838 | } else { |
724 | vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; | 839 | vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; |
725 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 840 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
726 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 841 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
727 | r = RESUME_GUEST; | 842 | r = RESUME_GUEST; |
728 | } | 843 | } |
729 | break; | 844 | break; |
730 | case BOOK3S_INTERRUPT_DATA_STORAGE: | 845 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
846 | { | ||
847 | ulong dar = kvmppc_get_fault_dar(vcpu); | ||
731 | vcpu->stat.pf_storage++; | 848 | vcpu->stat.pf_storage++; |
849 | |||
850 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
851 | /* We set segments as unused segments when invalidating them. So | ||
852 | * treat the respective fault as segment fault. */ | ||
853 | if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { | ||
854 | kvmppc_mmu_map_segment(vcpu, dar); | ||
855 | r = RESUME_GUEST; | ||
856 | break; | ||
857 | } | ||
858 | #endif | ||
859 | |||
732 | /* The only case we need to handle is missing shadow PTEs */ | 860 | /* The only case we need to handle is missing shadow PTEs */ |
733 | if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { | 861 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { |
734 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); | 862 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
735 | } else { | 863 | } else { |
736 | vcpu->arch.dear = vcpu->arch.fault_dear; | 864 | vcpu->arch.dear = dar; |
737 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | 865 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; |
738 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 866 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
739 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); | 867 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL); |
740 | r = RESUME_GUEST; | 868 | r = RESUME_GUEST; |
741 | } | 869 | } |
742 | break; | 870 | break; |
871 | } | ||
743 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | 872 | case BOOK3S_INTERRUPT_DATA_SEGMENT: |
744 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { | 873 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { |
745 | vcpu->arch.dear = vcpu->arch.fault_dear; | 874 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
746 | kvmppc_book3s_queue_irqprio(vcpu, | 875 | kvmppc_book3s_queue_irqprio(vcpu, |
747 | BOOK3S_INTERRUPT_DATA_SEGMENT); | 876 | BOOK3S_INTERRUPT_DATA_SEGMENT); |
748 | } | 877 | } |
749 | r = RESUME_GUEST; | 878 | r = RESUME_GUEST; |
750 | break; | 879 | break; |
751 | case BOOK3S_INTERRUPT_INST_SEGMENT: | 880 | case BOOK3S_INTERRUPT_INST_SEGMENT: |
752 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { | 881 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { |
753 | kvmppc_book3s_queue_irqprio(vcpu, | 882 | kvmppc_book3s_queue_irqprio(vcpu, |
754 | BOOK3S_INTERRUPT_INST_SEGMENT); | 883 | BOOK3S_INTERRUPT_INST_SEGMENT); |
755 | } | 884 | } |
@@ -764,18 +893,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
764 | vcpu->stat.ext_intr_exits++; | 893 | vcpu->stat.ext_intr_exits++; |
765 | r = RESUME_GUEST; | 894 | r = RESUME_GUEST; |
766 | break; | 895 | break; |
896 | case BOOK3S_INTERRUPT_PERFMON: | ||
897 | r = RESUME_GUEST; | ||
898 | break; | ||
767 | case BOOK3S_INTERRUPT_PROGRAM: | 899 | case BOOK3S_INTERRUPT_PROGRAM: |
768 | { | 900 | { |
769 | enum emulation_result er; | 901 | enum emulation_result er; |
770 | ulong flags; | 902 | ulong flags; |
771 | 903 | ||
772 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | 904 | program_interrupt: |
905 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; | ||
773 | 906 | ||
774 | if (vcpu->arch.msr & MSR_PR) { | 907 | if (vcpu->arch.msr & MSR_PR) { |
775 | #ifdef EXIT_DEBUG | 908 | #ifdef EXIT_DEBUG |
776 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); | 909 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
777 | #endif | 910 | #endif |
778 | if ((vcpu->arch.last_inst & 0xff0007ff) != | 911 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != |
779 | (INS_DCBZ & 0xfffffff7)) { | 912 | (INS_DCBZ & 0xfffffff7)) { |
780 | kvmppc_core_queue_program(vcpu, flags); | 913 | kvmppc_core_queue_program(vcpu, flags); |
781 | r = RESUME_GUEST; | 914 | r = RESUME_GUEST; |
@@ -789,33 +922,80 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
789 | case EMULATE_DONE: | 922 | case EMULATE_DONE: |
790 | r = RESUME_GUEST_NV; | 923 | r = RESUME_GUEST_NV; |
791 | break; | 924 | break; |
925 | case EMULATE_AGAIN: | ||
926 | r = RESUME_GUEST; | ||
927 | break; | ||
792 | case EMULATE_FAIL: | 928 | case EMULATE_FAIL: |
793 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 929 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
794 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 930 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
795 | kvmppc_core_queue_program(vcpu, flags); | 931 | kvmppc_core_queue_program(vcpu, flags); |
796 | r = RESUME_GUEST; | 932 | r = RESUME_GUEST; |
797 | break; | 933 | break; |
934 | case EMULATE_DO_MMIO: | ||
935 | run->exit_reason = KVM_EXIT_MMIO; | ||
936 | r = RESUME_HOST_NV; | ||
937 | break; | ||
798 | default: | 938 | default: |
799 | BUG(); | 939 | BUG(); |
800 | } | 940 | } |
801 | break; | 941 | break; |
802 | } | 942 | } |
803 | case BOOK3S_INTERRUPT_SYSCALL: | 943 | case BOOK3S_INTERRUPT_SYSCALL: |
804 | #ifdef EXIT_DEBUG | 944 | // XXX make user settable |
805 | printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); | 945 | if (vcpu->arch.osi_enabled && |
806 | #endif | 946 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
807 | vcpu->stat.syscall_exits++; | 947 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { |
808 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 948 | u64 *gprs = run->osi.gprs; |
809 | r = RESUME_GUEST; | 949 | int i; |
950 | |||
951 | run->exit_reason = KVM_EXIT_OSI; | ||
952 | for (i = 0; i < 32; i++) | ||
953 | gprs[i] = kvmppc_get_gpr(vcpu, i); | ||
954 | vcpu->arch.osi_needed = 1; | ||
955 | r = RESUME_HOST_NV; | ||
956 | |||
957 | } else { | ||
958 | vcpu->stat.syscall_exits++; | ||
959 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
960 | r = RESUME_GUEST; | ||
961 | } | ||
810 | break; | 962 | break; |
811 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | 963 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
812 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); | ||
813 | break; | ||
814 | case BOOK3S_INTERRUPT_ALTIVEC: | 964 | case BOOK3S_INTERRUPT_ALTIVEC: |
815 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); | ||
816 | break; | ||
817 | case BOOK3S_INTERRUPT_VSX: | 965 | case BOOK3S_INTERRUPT_VSX: |
818 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); | 966 | { |
967 | int ext_msr = 0; | ||
968 | |||
969 | switch (exit_nr) { | ||
970 | case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; | ||
971 | case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; | ||
972 | case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; | ||
973 | } | ||
974 | |||
975 | switch (kvmppc_check_ext(vcpu, exit_nr)) { | ||
976 | case EMULATE_DONE: | ||
977 | /* everything ok - let's enable the ext */ | ||
978 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | ||
979 | break; | ||
980 | case EMULATE_FAIL: | ||
981 | /* we need to emulate this instruction */ | ||
982 | goto program_interrupt; | ||
983 | break; | ||
984 | default: | ||
985 | /* nothing to worry about - go again */ | ||
986 | break; | ||
987 | } | ||
988 | break; | ||
989 | } | ||
990 | case BOOK3S_INTERRUPT_ALIGNMENT: | ||
991 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | ||
992 | to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, | ||
993 | kvmppc_get_last_inst(vcpu)); | ||
994 | vcpu->arch.dear = kvmppc_alignment_dar(vcpu, | ||
995 | kvmppc_get_last_inst(vcpu)); | ||
996 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
997 | } | ||
998 | r = RESUME_GUEST; | ||
819 | break; | 999 | break; |
820 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | 1000 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
821 | case BOOK3S_INTERRUPT_TRACE: | 1001 | case BOOK3S_INTERRUPT_TRACE: |
@@ -825,7 +1005,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
825 | default: | 1005 | default: |
826 | /* Ugh - bork here! What did we get? */ | 1006 | /* Ugh - bork here! What did we get? */ |
827 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | 1007 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
828 | exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); | 1008 | exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); |
829 | r = RESUME_HOST; | 1009 | r = RESUME_HOST; |
830 | BUG(); | 1010 | BUG(); |
831 | break; | 1011 | break; |
@@ -852,7 +1032,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
852 | } | 1032 | } |
853 | 1033 | ||
854 | #ifdef EXIT_DEBUG | 1034 | #ifdef EXIT_DEBUG |
855 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); | 1035 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r); |
856 | #endif | 1036 | #endif |
857 | 1037 | ||
858 | return r; | 1038 | return r; |
@@ -867,10 +1047,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
867 | { | 1047 | { |
868 | int i; | 1048 | int i; |
869 | 1049 | ||
870 | regs->pc = vcpu->arch.pc; | 1050 | vcpu_load(vcpu); |
1051 | |||
1052 | regs->pc = kvmppc_get_pc(vcpu); | ||
871 | regs->cr = kvmppc_get_cr(vcpu); | 1053 | regs->cr = kvmppc_get_cr(vcpu); |
872 | regs->ctr = vcpu->arch.ctr; | 1054 | regs->ctr = kvmppc_get_ctr(vcpu); |
873 | regs->lr = vcpu->arch.lr; | 1055 | regs->lr = kvmppc_get_lr(vcpu); |
874 | regs->xer = kvmppc_get_xer(vcpu); | 1056 | regs->xer = kvmppc_get_xer(vcpu); |
875 | regs->msr = vcpu->arch.msr; | 1057 | regs->msr = vcpu->arch.msr; |
876 | regs->srr0 = vcpu->arch.srr0; | 1058 | regs->srr0 = vcpu->arch.srr0; |
@@ -887,6 +1069,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
887 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1069 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
888 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 1070 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
889 | 1071 | ||
1072 | vcpu_put(vcpu); | ||
1073 | |||
890 | return 0; | 1074 | return 0; |
891 | } | 1075 | } |
892 | 1076 | ||
@@ -894,10 +1078,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
894 | { | 1078 | { |
895 | int i; | 1079 | int i; |
896 | 1080 | ||
897 | vcpu->arch.pc = regs->pc; | 1081 | vcpu_load(vcpu); |
1082 | |||
1083 | kvmppc_set_pc(vcpu, regs->pc); | ||
898 | kvmppc_set_cr(vcpu, regs->cr); | 1084 | kvmppc_set_cr(vcpu, regs->cr); |
899 | vcpu->arch.ctr = regs->ctr; | 1085 | kvmppc_set_ctr(vcpu, regs->ctr); |
900 | vcpu->arch.lr = regs->lr; | 1086 | kvmppc_set_lr(vcpu, regs->lr); |
901 | kvmppc_set_xer(vcpu, regs->xer); | 1087 | kvmppc_set_xer(vcpu, regs->xer); |
902 | kvmppc_set_msr(vcpu, regs->msr); | 1088 | kvmppc_set_msr(vcpu, regs->msr); |
903 | vcpu->arch.srr0 = regs->srr0; | 1089 | vcpu->arch.srr0 = regs->srr0; |
@@ -913,6 +1099,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
913 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1099 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
914 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 1100 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
915 | 1101 | ||
1102 | vcpu_put(vcpu); | ||
1103 | |||
916 | return 0; | 1104 | return 0; |
917 | } | 1105 | } |
918 | 1106 | ||
@@ -922,6 +1110,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
922 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1110 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
923 | int i; | 1111 | int i; |
924 | 1112 | ||
1113 | vcpu_load(vcpu); | ||
1114 | |||
925 | sregs->pvr = vcpu->arch.pvr; | 1115 | sregs->pvr = vcpu->arch.pvr; |
926 | 1116 | ||
927 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | 1117 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; |
@@ -940,6 +1130,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
940 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | 1130 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; |
941 | } | 1131 | } |
942 | } | 1132 | } |
1133 | |||
1134 | vcpu_put(vcpu); | ||
1135 | |||
943 | return 0; | 1136 | return 0; |
944 | } | 1137 | } |
945 | 1138 | ||
@@ -949,6 +1142,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
949 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1142 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
950 | int i; | 1143 | int i; |
951 | 1144 | ||
1145 | vcpu_load(vcpu); | ||
1146 | |||
952 | kvmppc_set_pvr(vcpu, sregs->pvr); | 1147 | kvmppc_set_pvr(vcpu, sregs->pvr); |
953 | 1148 | ||
954 | vcpu3s->sdr1 = sregs->u.s.sdr1; | 1149 | vcpu3s->sdr1 = sregs->u.s.sdr1; |
@@ -975,6 +1170,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
975 | 1170 | ||
976 | /* Flush the MMU after messing with the segments */ | 1171 | /* Flush the MMU after messing with the segments */ |
977 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 1172 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
1173 | |||
1174 | vcpu_put(vcpu); | ||
1175 | |||
978 | return 0; | 1176 | return 0; |
979 | } | 1177 | } |
980 | 1178 | ||
@@ -1042,24 +1240,33 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1042 | { | 1240 | { |
1043 | struct kvmppc_vcpu_book3s *vcpu_book3s; | 1241 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
1044 | struct kvm_vcpu *vcpu; | 1242 | struct kvm_vcpu *vcpu; |
1045 | int err; | 1243 | int err = -ENOMEM; |
1046 | 1244 | ||
1047 | vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, | 1245 | vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1048 | get_order(sizeof(struct kvmppc_vcpu_book3s))); | 1246 | if (!vcpu_book3s) |
1049 | if (!vcpu_book3s) { | ||
1050 | err = -ENOMEM; | ||
1051 | goto out; | 1247 | goto out; |
1052 | } | 1248 | |
1249 | memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); | ||
1250 | |||
1251 | vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) | ||
1252 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | ||
1253 | if (!vcpu_book3s->shadow_vcpu) | ||
1254 | goto free_vcpu; | ||
1053 | 1255 | ||
1054 | vcpu = &vcpu_book3s->vcpu; | 1256 | vcpu = &vcpu_book3s->vcpu; |
1055 | err = kvm_vcpu_init(vcpu, kvm, id); | 1257 | err = kvm_vcpu_init(vcpu, kvm, id); |
1056 | if (err) | 1258 | if (err) |
1057 | goto free_vcpu; | 1259 | goto free_shadow_vcpu; |
1058 | 1260 | ||
1059 | vcpu->arch.host_retip = kvm_return_point; | 1261 | vcpu->arch.host_retip = kvm_return_point; |
1060 | vcpu->arch.host_msr = mfmsr(); | 1262 | vcpu->arch.host_msr = mfmsr(); |
1263 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1061 | /* default to book3s_64 (970fx) */ | 1264 | /* default to book3s_64 (970fx) */ |
1062 | vcpu->arch.pvr = 0x3C0301; | 1265 | vcpu->arch.pvr = 0x3C0301; |
1266 | #else | ||
1267 | /* default to book3s_32 (750) */ | ||
1268 | vcpu->arch.pvr = 0x84202; | ||
1269 | #endif | ||
1063 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | 1270 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); |
1064 | vcpu_book3s->slb_nr = 64; | 1271 | vcpu_book3s->slb_nr = 64; |
1065 | 1272 | ||
@@ -1067,23 +1274,24 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1067 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; | 1274 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; |
1068 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; | 1275 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; |
1069 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | 1276 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; |
1277 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1070 | vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; | 1278 | vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; |
1279 | #else | ||
1280 | vcpu->arch.rmcall = (ulong)kvmppc_rmcall; | ||
1281 | #endif | ||
1071 | 1282 | ||
1072 | vcpu->arch.shadow_msr = MSR_USER64; | 1283 | vcpu->arch.shadow_msr = MSR_USER64; |
1073 | 1284 | ||
1074 | err = __init_new_context(); | 1285 | err = kvmppc_mmu_init(vcpu); |
1075 | if (err < 0) | 1286 | if (err < 0) |
1076 | goto free_vcpu; | 1287 | goto free_shadow_vcpu; |
1077 | vcpu_book3s->context_id = err; | ||
1078 | |||
1079 | vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; | ||
1080 | vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS; | ||
1081 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | ||
1082 | 1288 | ||
1083 | return vcpu; | 1289 | return vcpu; |
1084 | 1290 | ||
1291 | free_shadow_vcpu: | ||
1292 | kfree(vcpu_book3s->shadow_vcpu); | ||
1085 | free_vcpu: | 1293 | free_vcpu: |
1086 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | 1294 | vfree(vcpu_book3s); |
1087 | out: | 1295 | out: |
1088 | return ERR_PTR(err); | 1296 | return ERR_PTR(err); |
1089 | } | 1297 | } |
@@ -1092,9 +1300,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
1092 | { | 1300 | { |
1093 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 1301 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
1094 | 1302 | ||
1095 | __destroy_context(vcpu_book3s->context_id); | ||
1096 | kvm_vcpu_uninit(vcpu); | 1303 | kvm_vcpu_uninit(vcpu); |
1097 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | 1304 | kfree(vcpu_book3s->shadow_vcpu); |
1305 | vfree(vcpu_book3s); | ||
1098 | } | 1306 | } |
1099 | 1307 | ||
1100 | extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 1308 | extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
@@ -1102,8 +1310,12 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1102 | { | 1310 | { |
1103 | int ret; | 1311 | int ret; |
1104 | struct thread_struct ext_bkp; | 1312 | struct thread_struct ext_bkp; |
1313 | #ifdef CONFIG_ALTIVEC | ||
1105 | bool save_vec = current->thread.used_vr; | 1314 | bool save_vec = current->thread.used_vr; |
1315 | #endif | ||
1316 | #ifdef CONFIG_VSX | ||
1106 | bool save_vsx = current->thread.used_vsr; | 1317 | bool save_vsx = current->thread.used_vsr; |
1318 | #endif | ||
1107 | ulong ext_msr; | 1319 | ulong ext_msr; |
1108 | 1320 | ||
1109 | /* No need to go into the guest when all we do is going out */ | 1321 | /* No need to go into the guest when all we do is going out */ |
@@ -1144,6 +1356,10 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1144 | /* XXX we get called with irq disabled - change that! */ | 1356 | /* XXX we get called with irq disabled - change that! */ |
1145 | local_irq_enable(); | 1357 | local_irq_enable(); |
1146 | 1358 | ||
1359 | /* Preload FPU if it's enabled */ | ||
1360 | if (vcpu->arch.msr & MSR_FP) | ||
1361 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | ||
1362 | |||
1147 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | 1363 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); |
1148 | 1364 | ||
1149 | local_irq_disable(); | 1365 | local_irq_disable(); |
@@ -1179,7 +1395,8 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1179 | 1395 | ||
1180 | static int kvmppc_book3s_init(void) | 1396 | static int kvmppc_book3s_init(void) |
1181 | { | 1397 | { |
1182 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); | 1398 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, |
1399 | THIS_MODULE); | ||
1183 | } | 1400 | } |
1184 | 1401 | ||
1185 | static void kvmppc_book3s_exit(void) | 1402 | static void kvmppc_book3s_exit(void) |