aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 20:16:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 20:16:21 -0400
commit98edb6ca4174f17a64890a02f44c211c8b44fb3c (patch)
tree033bc5f7da410046d28dd1cefcd2d63cda33d25b /arch/powerpc/kvm
parenta8251096b427283c47e7d8f9568be6b388dd68ec (diff)
parent8fbf065d625617bbbf6b72d5f78f84ad13c8b547 (diff)
Merge branch 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (269 commits) KVM: x86: Add missing locking to arch specific vcpu ioctls KVM: PPC: Add missing vcpu_load()/vcpu_put() in vcpu ioctls KVM: MMU: Segregate shadow pages with different cr0.wp KVM: x86: Check LMA bit before set_efer KVM: Don't allow lmsw to clear cr0.pe KVM: Add cpuid.txt file KVM: x86: Tell the guest we'll warn it about tsc stability x86, paravirt: don't compute pvclock adjustments if we trust the tsc x86: KVM guest: Try using new kvm clock msrs KVM: x86: export paravirtual cpuid flags in KVM_GET_SUPPORTED_CPUID KVM: x86: add new KVMCLOCK cpuid feature KVM: x86: change msr numbers for kvmclock x86, paravirt: Add a global synchronization point for pvclock x86, paravirt: Enable pvclock flags in vcpu_time_info structure KVM: x86: Inject #GP with the right rip on efer writes KVM: SVM: Don't allow nested guest to VMMCALL into host KVM: x86: Fix exception reinjection forced to true KVM: Fix wallclock version writing race KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots KVM: VMX: enable VMXON check with SMX enabled (Intel TXT) ...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/44x.c2
-rw-r--r--arch/powerpc/kvm/Kconfig24
-rw-r--r--arch/powerpc/kvm/Makefile20
-rw-r--r--arch/powerpc/kvm/book3s.c503
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c483
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S143
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c36
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c102
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S183
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c (renamed from arch/powerpc/kvm/book3s_64_emulate.c)245
-rw-r--r--arch/powerpc/kvm/book3s_exports.c (renamed from arch/powerpc/kvm/book3s_64_exports.c)0
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S (renamed from arch/powerpc/kvm/book3s_64_interrupts.S)204
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c1289
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S (renamed from arch/powerpc/kvm/book3s_64_rmhandlers.S)135
-rw-r--r--arch/powerpc/kvm/book3s_segment.S259
-rw-r--r--arch/powerpc/kvm/booke.c21
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/emulate.c55
-rw-r--r--arch/powerpc/kvm/fpu.S273
-rw-r--r--arch/powerpc/kvm/powerpc.c110
21 files changed, 3598 insertions, 545 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 689a57c2ac80..73c0a3f64ed1 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -147,7 +147,7 @@ static int __init kvmppc_44x_init(void)
147 if (r) 147 if (r)
148 return r; 148 return r;
149 149
150 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); 150 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
151} 151}
152 152
153static void __exit kvmppc_44x_exit(void) 153static void __exit kvmppc_44x_exit(void)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 60624cc9f4d4..b7baff78f90c 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,12 +22,34 @@ config KVM
22 select ANON_INODES 22 select ANON_INODES
23 select KVM_MMIO 23 select KVM_MMIO
24 24
25config KVM_BOOK3S_HANDLER
26 bool
27
28config KVM_BOOK3S_32_HANDLER
29 bool
30 select KVM_BOOK3S_HANDLER
31
25config KVM_BOOK3S_64_HANDLER 32config KVM_BOOK3S_64_HANDLER
26 bool 33 bool
34 select KVM_BOOK3S_HANDLER
35
36config KVM_BOOK3S_32
37 tristate "KVM support for PowerPC book3s_32 processors"
38 depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
39 select KVM
40 select KVM_BOOK3S_32_HANDLER
41 ---help---
42 Support running unmodified book3s_32 guest kernels
43 in virtual machines on book3s_32 host processors.
44
45 This module provides access to the hardware capabilities through
46 a character device node named /dev/kvm.
47
48 If unsure, say N.
27 49
28config KVM_BOOK3S_64 50config KVM_BOOK3S_64
29 tristate "KVM support for PowerPC book3s_64 processors" 51 tristate "KVM support for PowerPC book3s_64 processors"
30 depends on EXPERIMENTAL && PPC64 52 depends on EXPERIMENTAL && PPC_BOOK3S_64
31 select KVM 53 select KVM
32 select KVM_BOOK3S_64_HANDLER 54 select KVM_BOOK3S_64_HANDLER
33 ---help--- 55 ---help---
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 56484d652377..ff436066bf77 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -14,7 +14,7 @@ CFLAGS_emulate.o := -I.
14 14
15common-objs-y += powerpc.o emulate.o 15common-objs-y += powerpc.o emulate.o
16obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o 16obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
17obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o 17obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
18 18
19AFLAGS_booke_interrupts.o := -I$(obj) 19AFLAGS_booke_interrupts.o := -I$(obj)
20 20
@@ -40,17 +40,31 @@ kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
40 40
41kvm-book3s_64-objs := \ 41kvm-book3s_64-objs := \
42 $(common-objs-y) \ 42 $(common-objs-y) \
43 fpu.o \
44 book3s_paired_singles.o \
43 book3s.o \ 45 book3s.o \
44 book3s_64_emulate.o \ 46 book3s_emulate.o \
45 book3s_64_interrupts.o \ 47 book3s_interrupts.o \
46 book3s_64_mmu_host.o \ 48 book3s_64_mmu_host.o \
47 book3s_64_mmu.o \ 49 book3s_64_mmu.o \
48 book3s_32_mmu.o 50 book3s_32_mmu.o
49kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) 51kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
50 52
53kvm-book3s_32-objs := \
54 $(common-objs-y) \
55 fpu.o \
56 book3s_paired_singles.o \
57 book3s.o \
58 book3s_emulate.o \
59 book3s_interrupts.o \
60 book3s_32_mmu_host.o \
61 book3s_32_mmu.o
62kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
63
51kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 64kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
52 65
53obj-$(CONFIG_KVM_440) += kvm.o 66obj-$(CONFIG_KVM_440) += kvm.o
54obj-$(CONFIG_KVM_E500) += kvm.o 67obj-$(CONFIG_KVM_E500) += kvm.o
55obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 68obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
69obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
56 70
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 604af29b71ed..b998abf1a63d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h>
19 20
20#include <asm/reg.h> 21#include <asm/reg.h>
21#include <asm/cputable.h> 22#include <asm/cputable.h>
@@ -29,6 +30,7 @@
29#include <linux/gfp.h> 30#include <linux/gfp.h>
30#include <linux/sched.h> 31#include <linux/sched.h>
31#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/highmem.h>
32 34
33#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34 36
@@ -36,7 +38,15 @@
36/* #define EXIT_DEBUG_SIMPLE */ 38/* #define EXIT_DEBUG_SIMPLE */
37/* #define DEBUG_EXT */ 39/* #define DEBUG_EXT */
38 40
39static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
42 ulong msr);
43
44/* Some compatibility defines */
45#ifdef CONFIG_PPC_BOOK3S_32
46#define MSR_USER32 MSR_USER
47#define MSR_USER64 MSR_USER
48#define HW_PAGE_SIZE PAGE_SIZE
49#endif
40 50
41struct kvm_stats_debugfs_item debugfs_entries[] = { 51struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "exits", VCPU_STAT(sum_exits) }, 52 { "exits", VCPU_STAT(sum_exits) },
@@ -69,18 +79,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
69 79
70void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 80void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
71{ 81{
72 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); 82#ifdef CONFIG_PPC_BOOK3S_64
73 memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, 83 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
84 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
74 sizeof(get_paca()->shadow_vcpu)); 85 sizeof(get_paca()->shadow_vcpu));
75 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; 86 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
87#endif
88
89#ifdef CONFIG_PPC_BOOK3S_32
90 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
91#endif
76} 92}
77 93
78void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 94void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
79{ 95{
80 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); 96#ifdef CONFIG_PPC_BOOK3S_64
81 memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, 97 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
98 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
82 sizeof(get_paca()->shadow_vcpu)); 99 sizeof(get_paca()->shadow_vcpu));
83 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; 100 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
101#endif
84 102
85 kvmppc_giveup_ext(vcpu, MSR_FP); 103 kvmppc_giveup_ext(vcpu, MSR_FP);
86 kvmppc_giveup_ext(vcpu, MSR_VEC); 104 kvmppc_giveup_ext(vcpu, MSR_VEC);
@@ -131,18 +149,22 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
131 } 149 }
132 } 150 }
133 151
134 if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || 152 if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
135 (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { 153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
136 kvmppc_mmu_flush_segments(vcpu); 154 kvmppc_mmu_flush_segments(vcpu);
137 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); 155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
138 } 156 }
157
158 /* Preload FPU if it's enabled */
159 if (vcpu->arch.msr & MSR_FP)
160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
139} 161}
140 162
141void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 163void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
142{ 164{
143 vcpu->arch.srr0 = vcpu->arch.pc; 165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
144 vcpu->arch.srr1 = vcpu->arch.msr | flags; 166 vcpu->arch.srr1 = vcpu->arch.msr | flags;
145 vcpu->arch.pc = to_book3s(vcpu)->hior + vec; 167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
146 vcpu->arch.mmu.reset_msr(vcpu); 168 vcpu->arch.mmu.reset_msr(vcpu);
147} 169}
148 170
@@ -218,6 +240,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
218 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
219} 241}
220 242
243void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
244 struct kvm_interrupt *irq)
245{
246 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
247}
248
221int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 249int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
222{ 250{
223 int deliver = 1; 251 int deliver = 1;
@@ -302,7 +330,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
302 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 330 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
303#endif 331#endif
304 priority = __ffs(*pending); 332 priority = __ffs(*pending);
305 while (priority <= (sizeof(unsigned int) * 8)) { 333 while (priority < BOOK3S_IRQPRIO_MAX) {
306 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 334 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
307 (priority != BOOK3S_IRQPRIO_DECREMENTER)) { 335 (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
308 /* DEC interrupts get cleared by mtdec */ 336 /* DEC interrupts get cleared by mtdec */
@@ -318,13 +346,18 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
318 346
319void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 347void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
320{ 348{
349 u32 host_pvr;
350
321 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; 351 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
322 vcpu->arch.pvr = pvr; 352 vcpu->arch.pvr = pvr;
353#ifdef CONFIG_PPC_BOOK3S_64
323 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 354 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
324 kvmppc_mmu_book3s_64_init(vcpu); 355 kvmppc_mmu_book3s_64_init(vcpu);
325 to_book3s(vcpu)->hior = 0xfff00000; 356 to_book3s(vcpu)->hior = 0xfff00000;
326 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; 357 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
327 } else { 358 } else
359#endif
360 {
328 kvmppc_mmu_book3s_32_init(vcpu); 361 kvmppc_mmu_book3s_32_init(vcpu);
329 to_book3s(vcpu)->hior = 0; 362 to_book3s(vcpu)->hior = 0;
330 to_book3s(vcpu)->msr_mask = 0xffffffffULL; 363 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
@@ -337,6 +370,32 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
337 !strcmp(cur_cpu_spec->platform, "ppc970")) 370 !strcmp(cur_cpu_spec->platform, "ppc970"))
338 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 371 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
339 372
373 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
374 really needs them in a VM on Cell and force disable them. */
375 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
376 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
377
378#ifdef CONFIG_PPC_BOOK3S_32
379 /* 32 bit Book3S always has 32 byte dcbz */
380 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
381#endif
382
383 /* On some CPUs we can execute paired single operations natively */
384 asm ( "mfpvr %0" : "=r"(host_pvr));
385 switch (host_pvr) {
386 case 0x00080200: /* lonestar 2.0 */
387 case 0x00088202: /* lonestar 2.2 */
388 case 0x70000100: /* gekko 1.0 */
389 case 0x00080100: /* gekko 2.0 */
390 case 0x00083203: /* gekko 2.3a */
391 case 0x00083213: /* gekko 2.3b */
392 case 0x00083204: /* gekko 2.4 */
393 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
394 case 0x00087200: /* broadway */
395 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
396 /* Enable HID2.PSE - in case we need it later */
397 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
398 }
340} 399}
341 400
342/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 401/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
@@ -350,34 +409,29 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
350 */ 409 */
351static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) 410static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
352{ 411{
353 bool touched = false; 412 struct page *hpage;
354 hva_t hpage; 413 u64 hpage_offset;
355 u32 *page; 414 u32 *page;
356 int i; 415 int i;
357 416
358 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 417 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
359 if (kvm_is_error_hva(hpage)) 418 if (is_error_page(hpage))
360 return; 419 return;
361 420
362 hpage |= pte->raddr & ~PAGE_MASK; 421 hpage_offset = pte->raddr & ~PAGE_MASK;
363 hpage &= ~0xFFFULL; 422 hpage_offset &= ~0xFFFULL;
364 423 hpage_offset /= 4;
365 page = vmalloc(HW_PAGE_SIZE);
366
367 if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
368 goto out;
369 424
370 for (i=0; i < HW_PAGE_SIZE / 4; i++) 425 get_page(hpage);
371 if ((page[i] & 0xff0007ff) == INS_DCBZ) { 426 page = kmap_atomic(hpage, KM_USER0);
372 page[i] &= 0xfffffff7; // reserved instruction, so we trap
373 touched = true;
374 }
375 427
376 if (touched) 428 /* patch dcbz into reserved instruction, so we trap */
377 copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); 429 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
430 if ((page[i] & 0xff0007ff) == INS_DCBZ)
431 page[i] &= 0xfffffff7;
378 432
379out: 433 kunmap_atomic(page, KM_USER0);
380 vfree(page); 434 put_page(hpage);
381} 435}
382 436
383static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 437static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
@@ -391,15 +445,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
391 } else { 445 } else {
392 pte->eaddr = eaddr; 446 pte->eaddr = eaddr;
393 pte->raddr = eaddr & 0xffffffff; 447 pte->raddr = eaddr & 0xffffffff;
394 pte->vpage = eaddr >> 12; 448 pte->vpage = VSID_REAL | eaddr >> 12;
395 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
396 case 0:
397 pte->vpage |= VSID_REAL;
398 case MSR_DR:
399 pte->vpage |= VSID_REAL_DR;
400 case MSR_IR:
401 pte->vpage |= VSID_REAL_IR;
402 }
403 pte->may_read = true; 449 pte->may_read = true;
404 pte->may_write = true; 450 pte->may_write = true;
405 pte->may_execute = true; 451 pte->may_execute = true;
@@ -434,55 +480,55 @@ err:
434 return kvmppc_bad_hva(); 480 return kvmppc_bad_hva();
435} 481}
436 482
437int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) 483int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
484 bool data)
438{ 485{
439 struct kvmppc_pte pte; 486 struct kvmppc_pte pte;
440 hva_t hva = eaddr;
441 487
442 vcpu->stat.st++; 488 vcpu->stat.st++;
443 489
444 if (kvmppc_xlate(vcpu, eaddr, false, &pte)) 490 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
445 goto err; 491 return -ENOENT;
446 492
447 hva = kvmppc_pte_to_hva(vcpu, &pte, false); 493 *eaddr = pte.raddr;
448 if (kvm_is_error_hva(hva))
449 goto err;
450 494
451 if (copy_to_user((void __user *)hva, ptr, size)) { 495 if (!pte.may_write)
452 printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); 496 return -EPERM;
453 goto err;
454 }
455 497
456 return 0; 498 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
499 return EMULATE_DO_MMIO;
457 500
458err: 501 return EMULATE_DONE;
459 return -ENOENT;
460} 502}
461 503
462int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, 504int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
463 bool data) 505 bool data)
464{ 506{
465 struct kvmppc_pte pte; 507 struct kvmppc_pte pte;
466 hva_t hva = eaddr; 508 hva_t hva = *eaddr;
467 509
468 vcpu->stat.ld++; 510 vcpu->stat.ld++;
469 511
470 if (kvmppc_xlate(vcpu, eaddr, data, &pte)) 512 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
471 goto err; 513 goto nopte;
514
515 *eaddr = pte.raddr;
472 516
473 hva = kvmppc_pte_to_hva(vcpu, &pte, true); 517 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
474 if (kvm_is_error_hva(hva)) 518 if (kvm_is_error_hva(hva))
475 goto err; 519 goto mmio;
476 520
477 if (copy_from_user(ptr, (void __user *)hva, size)) { 521 if (copy_from_user(ptr, (void __user *)hva, size)) {
478 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); 522 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
479 goto err; 523 goto mmio;
480 } 524 }
481 525
482 return 0; 526 return EMULATE_DONE;
483 527
484err: 528nopte:
485 return -ENOENT; 529 return -ENOENT;
530mmio:
531 return EMULATE_DO_MMIO;
486} 532}
487 533
488static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 534static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -499,12 +545,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
499 int page_found = 0; 545 int page_found = 0;
500 struct kvmppc_pte pte; 546 struct kvmppc_pte pte;
501 bool is_mmio = false; 547 bool is_mmio = false;
548 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
549 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
550 u64 vsid;
502 551
503 if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { 552 relocated = data ? dr : ir;
504 relocated = (vcpu->arch.msr & MSR_DR);
505 } else {
506 relocated = (vcpu->arch.msr & MSR_IR);
507 }
508 553
509 /* Resolve real address if translation turned on */ 554 /* Resolve real address if translation turned on */
510 if (relocated) { 555 if (relocated) {
@@ -516,14 +561,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
516 pte.raddr = eaddr & 0xffffffff; 561 pte.raddr = eaddr & 0xffffffff;
517 pte.eaddr = eaddr; 562 pte.eaddr = eaddr;
518 pte.vpage = eaddr >> 12; 563 pte.vpage = eaddr >> 12;
519 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 564 }
520 case 0: 565
521 pte.vpage |= VSID_REAL; 566 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
522 case MSR_DR: 567 case 0:
523 pte.vpage |= VSID_REAL_DR; 568 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
524 case MSR_IR: 569 break;
525 pte.vpage |= VSID_REAL_IR; 570 case MSR_DR:
526 } 571 case MSR_IR:
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
573
574 if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
576 else
577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
578 pte.vpage |= vsid;
579
580 if (vsid == -1)
581 page_found = -EINVAL;
582 break;
527 } 583 }
528 584
529 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 585 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -538,20 +594,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
538 594
539 if (page_found == -ENOENT) { 595 if (page_found == -ENOENT) {
540 /* Page not found in guest PTE entries */ 596 /* Page not found in guest PTE entries */
541 vcpu->arch.dear = vcpu->arch.fault_dear; 597 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
542 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; 598 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
543 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); 599 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
544 kvmppc_book3s_queue_irqprio(vcpu, vec); 600 kvmppc_book3s_queue_irqprio(vcpu, vec);
545 } else if (page_found == -EPERM) { 601 } else if (page_found == -EPERM) {
546 /* Storage protection */ 602 /* Storage protection */
547 vcpu->arch.dear = vcpu->arch.fault_dear; 603 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
548 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; 604 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
549 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 605 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
550 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); 606 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
551 kvmppc_book3s_queue_irqprio(vcpu, vec); 607 kvmppc_book3s_queue_irqprio(vcpu, vec);
552 } else if (page_found == -EINVAL) { 608 } else if (page_found == -EINVAL) {
553 /* Page not found in guest SLB */ 609 /* Page not found in guest SLB */
554 vcpu->arch.dear = vcpu->arch.fault_dear; 610 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
555 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 611 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
556 } else if (!is_mmio && 612 } else if (!is_mmio &&
557 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 613 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -583,11 +639,13 @@ static inline int get_fpr_index(int i)
583} 639}
584 640
585/* Give up external provider (FPU, Altivec, VSX) */ 641/* Give up external provider (FPU, Altivec, VSX) */
586static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) 642void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
587{ 643{
588 struct thread_struct *t = &current->thread; 644 struct thread_struct *t = &current->thread;
589 u64 *vcpu_fpr = vcpu->arch.fpr; 645 u64 *vcpu_fpr = vcpu->arch.fpr;
646#ifdef CONFIG_VSX
590 u64 *vcpu_vsx = vcpu->arch.vsr; 647 u64 *vcpu_vsx = vcpu->arch.vsr;
648#endif
591 u64 *thread_fpr = (u64*)t->fpr; 649 u64 *thread_fpr = (u64*)t->fpr;
592 int i; 650 int i;
593 651
@@ -629,21 +687,65 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
629 kvmppc_recalc_shadow_msr(vcpu); 687 kvmppc_recalc_shadow_msr(vcpu);
630} 688}
631 689
690static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
691{
692 ulong srr0 = kvmppc_get_pc(vcpu);
693 u32 last_inst = kvmppc_get_last_inst(vcpu);
694 int ret;
695
696 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
697 if (ret == -ENOENT) {
698 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
699 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
700 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
701 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
702 return EMULATE_AGAIN;
703 }
704
705 return EMULATE_DONE;
706}
707
708static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
709{
710
711 /* Need to do paired single emulation? */
712 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
713 return EMULATE_DONE;
714
715 /* Read out the instruction */
716 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
717 /* Need to emulate */
718 return EMULATE_FAIL;
719
720 return EMULATE_AGAIN;
721}
722
632/* Handle external providers (FPU, Altivec, VSX) */ 723/* Handle external providers (FPU, Altivec, VSX) */
633static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 724static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
634 ulong msr) 725 ulong msr)
635{ 726{
636 struct thread_struct *t = &current->thread; 727 struct thread_struct *t = &current->thread;
637 u64 *vcpu_fpr = vcpu->arch.fpr; 728 u64 *vcpu_fpr = vcpu->arch.fpr;
729#ifdef CONFIG_VSX
638 u64 *vcpu_vsx = vcpu->arch.vsr; 730 u64 *vcpu_vsx = vcpu->arch.vsr;
731#endif
639 u64 *thread_fpr = (u64*)t->fpr; 732 u64 *thread_fpr = (u64*)t->fpr;
640 int i; 733 int i;
641 734
735 /* When we have paired singles, we emulate in software */
736 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
737 return RESUME_GUEST;
738
642 if (!(vcpu->arch.msr & msr)) { 739 if (!(vcpu->arch.msr & msr)) {
643 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 740 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
644 return RESUME_GUEST; 741 return RESUME_GUEST;
645 } 742 }
646 743
744 /* We already own the ext */
745 if (vcpu->arch.guest_owned_ext & msr) {
746 return RESUME_GUEST;
747 }
748
647#ifdef DEBUG_EXT 749#ifdef DEBUG_EXT
648 printk(KERN_INFO "Loading up ext 0x%lx\n", msr); 750 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
649#endif 751#endif
@@ -696,21 +798,33 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
696 run->ready_for_interrupt_injection = 1; 798 run->ready_for_interrupt_injection = 1;
697#ifdef EXIT_DEBUG 799#ifdef EXIT_DEBUG
698 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", 800 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
699 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, 801 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
700 kvmppc_get_dec(vcpu), vcpu->arch.msr); 802 kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
701#elif defined (EXIT_DEBUG_SIMPLE) 803#elif defined (EXIT_DEBUG_SIMPLE)
702 if ((exit_nr != 0x900) && (exit_nr != 0x500)) 804 if ((exit_nr != 0x900) && (exit_nr != 0x500))
703 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", 805 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
704 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, 806 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
705 vcpu->arch.msr); 807 vcpu->arch.msr);
706#endif 808#endif
707 kvm_resched(vcpu); 809 kvm_resched(vcpu);
708 switch (exit_nr) { 810 switch (exit_nr) {
709 case BOOK3S_INTERRUPT_INST_STORAGE: 811 case BOOK3S_INTERRUPT_INST_STORAGE:
710 vcpu->stat.pf_instruc++; 812 vcpu->stat.pf_instruc++;
813
814#ifdef CONFIG_PPC_BOOK3S_32
815 /* We set segments as unused segments when invalidating them. So
816 * treat the respective fault as segment fault. */
817 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
818 == SR_INVALID) {
819 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
820 r = RESUME_GUEST;
821 break;
822 }
823#endif
824
711 /* only care about PTEG not found errors, but leave NX alone */ 825 /* only care about PTEG not found errors, but leave NX alone */
712 if (vcpu->arch.shadow_srr1 & 0x40000000) { 826 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
713 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); 827 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
714 vcpu->stat.sp_instruc++; 828 vcpu->stat.sp_instruc++;
715 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 829 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
716 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 830 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -719,37 +833,52 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
719 * so we can't use the NX bit inside the guest. Let's cross our fingers, 833 * so we can't use the NX bit inside the guest. Let's cross our fingers,
720 * that no guest that needs the dcbz hack does NX. 834 * that no guest that needs the dcbz hack does NX.
721 */ 835 */
722 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 836 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
837 r = RESUME_GUEST;
723 } else { 838 } else {
724 vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; 839 vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
725 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 840 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
726 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 841 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
727 r = RESUME_GUEST; 842 r = RESUME_GUEST;
728 } 843 }
729 break; 844 break;
730 case BOOK3S_INTERRUPT_DATA_STORAGE: 845 case BOOK3S_INTERRUPT_DATA_STORAGE:
846 {
847 ulong dar = kvmppc_get_fault_dar(vcpu);
731 vcpu->stat.pf_storage++; 848 vcpu->stat.pf_storage++;
849
850#ifdef CONFIG_PPC_BOOK3S_32
851 /* We set segments as unused segments when invalidating them. So
852 * treat the respective fault as segment fault. */
853 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
854 kvmppc_mmu_map_segment(vcpu, dar);
855 r = RESUME_GUEST;
856 break;
857 }
858#endif
859
732 /* The only case we need to handle is missing shadow PTEs */ 860 /* The only case we need to handle is missing shadow PTEs */
733 if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { 861 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
734 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); 862 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
735 } else { 863 } else {
736 vcpu->arch.dear = vcpu->arch.fault_dear; 864 vcpu->arch.dear = dar;
737 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; 865 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
738 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 866 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
739 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); 867 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
740 r = RESUME_GUEST; 868 r = RESUME_GUEST;
741 } 869 }
742 break; 870 break;
871 }
743 case BOOK3S_INTERRUPT_DATA_SEGMENT: 872 case BOOK3S_INTERRUPT_DATA_SEGMENT:
744 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { 873 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
745 vcpu->arch.dear = vcpu->arch.fault_dear; 874 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
746 kvmppc_book3s_queue_irqprio(vcpu, 875 kvmppc_book3s_queue_irqprio(vcpu,
747 BOOK3S_INTERRUPT_DATA_SEGMENT); 876 BOOK3S_INTERRUPT_DATA_SEGMENT);
748 } 877 }
749 r = RESUME_GUEST; 878 r = RESUME_GUEST;
750 break; 879 break;
751 case BOOK3S_INTERRUPT_INST_SEGMENT: 880 case BOOK3S_INTERRUPT_INST_SEGMENT:
752 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { 881 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
753 kvmppc_book3s_queue_irqprio(vcpu, 882 kvmppc_book3s_queue_irqprio(vcpu,
754 BOOK3S_INTERRUPT_INST_SEGMENT); 883 BOOK3S_INTERRUPT_INST_SEGMENT);
755 } 884 }
@@ -764,18 +893,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
764 vcpu->stat.ext_intr_exits++; 893 vcpu->stat.ext_intr_exits++;
765 r = RESUME_GUEST; 894 r = RESUME_GUEST;
766 break; 895 break;
896 case BOOK3S_INTERRUPT_PERFMON:
897 r = RESUME_GUEST;
898 break;
767 case BOOK3S_INTERRUPT_PROGRAM: 899 case BOOK3S_INTERRUPT_PROGRAM:
768 { 900 {
769 enum emulation_result er; 901 enum emulation_result er;
770 ulong flags; 902 ulong flags;
771 903
772 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; 904program_interrupt:
905 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
773 906
774 if (vcpu->arch.msr & MSR_PR) { 907 if (vcpu->arch.msr & MSR_PR) {
775#ifdef EXIT_DEBUG 908#ifdef EXIT_DEBUG
776 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); 909 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
777#endif 910#endif
778 if ((vcpu->arch.last_inst & 0xff0007ff) != 911 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
779 (INS_DCBZ & 0xfffffff7)) { 912 (INS_DCBZ & 0xfffffff7)) {
780 kvmppc_core_queue_program(vcpu, flags); 913 kvmppc_core_queue_program(vcpu, flags);
781 r = RESUME_GUEST; 914 r = RESUME_GUEST;
@@ -789,33 +922,80 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
789 case EMULATE_DONE: 922 case EMULATE_DONE:
790 r = RESUME_GUEST_NV; 923 r = RESUME_GUEST_NV;
791 break; 924 break;
925 case EMULATE_AGAIN:
926 r = RESUME_GUEST;
927 break;
792 case EMULATE_FAIL: 928 case EMULATE_FAIL:
793 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 929 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
794 __func__, vcpu->arch.pc, vcpu->arch.last_inst); 930 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
795 kvmppc_core_queue_program(vcpu, flags); 931 kvmppc_core_queue_program(vcpu, flags);
796 r = RESUME_GUEST; 932 r = RESUME_GUEST;
797 break; 933 break;
934 case EMULATE_DO_MMIO:
935 run->exit_reason = KVM_EXIT_MMIO;
936 r = RESUME_HOST_NV;
937 break;
798 default: 938 default:
799 BUG(); 939 BUG();
800 } 940 }
801 break; 941 break;
802 } 942 }
803 case BOOK3S_INTERRUPT_SYSCALL: 943 case BOOK3S_INTERRUPT_SYSCALL:
804#ifdef EXIT_DEBUG 944 // XXX make user settable
805 printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); 945 if (vcpu->arch.osi_enabled &&
806#endif 946 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
807 vcpu->stat.syscall_exits++; 947 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
808 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 948 u64 *gprs = run->osi.gprs;
809 r = RESUME_GUEST; 949 int i;
950
951 run->exit_reason = KVM_EXIT_OSI;
952 for (i = 0; i < 32; i++)
953 gprs[i] = kvmppc_get_gpr(vcpu, i);
954 vcpu->arch.osi_needed = 1;
955 r = RESUME_HOST_NV;
956
957 } else {
958 vcpu->stat.syscall_exits++;
959 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
960 r = RESUME_GUEST;
961 }
810 break; 962 break;
811 case BOOK3S_INTERRUPT_FP_UNAVAIL: 963 case BOOK3S_INTERRUPT_FP_UNAVAIL:
812 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
813 break;
814 case BOOK3S_INTERRUPT_ALTIVEC: 964 case BOOK3S_INTERRUPT_ALTIVEC:
815 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
816 break;
817 case BOOK3S_INTERRUPT_VSX: 965 case BOOK3S_INTERRUPT_VSX:
818 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); 966 {
967 int ext_msr = 0;
968
969 switch (exit_nr) {
970 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
971 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
972 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
973 }
974
975 switch (kvmppc_check_ext(vcpu, exit_nr)) {
976 case EMULATE_DONE:
977 /* everything ok - let's enable the ext */
978 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
979 break;
980 case EMULATE_FAIL:
981 /* we need to emulate this instruction */
982 goto program_interrupt;
983 break;
984 default:
985 /* nothing to worry about - go again */
986 break;
987 }
988 break;
989 }
990 case BOOK3S_INTERRUPT_ALIGNMENT:
991 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
992 to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu,
993 kvmppc_get_last_inst(vcpu));
994 vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
995 kvmppc_get_last_inst(vcpu));
996 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
997 }
998 r = RESUME_GUEST;
819 break; 999 break;
820 case BOOK3S_INTERRUPT_MACHINE_CHECK: 1000 case BOOK3S_INTERRUPT_MACHINE_CHECK:
821 case BOOK3S_INTERRUPT_TRACE: 1001 case BOOK3S_INTERRUPT_TRACE:
@@ -825,7 +1005,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
825 default: 1005 default:
826 /* Ugh - bork here! What did we get? */ 1006 /* Ugh - bork here! What did we get? */
827 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 1007 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
828 exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); 1008 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
829 r = RESUME_HOST; 1009 r = RESUME_HOST;
830 BUG(); 1010 BUG();
831 break; 1011 break;
@@ -852,7 +1032,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
852 } 1032 }
853 1033
854#ifdef EXIT_DEBUG 1034#ifdef EXIT_DEBUG
855 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); 1035 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
856#endif 1036#endif
857 1037
858 return r; 1038 return r;
@@ -867,10 +1047,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
867{ 1047{
868 int i; 1048 int i;
869 1049
870 regs->pc = vcpu->arch.pc; 1050 vcpu_load(vcpu);
1051
1052 regs->pc = kvmppc_get_pc(vcpu);
871 regs->cr = kvmppc_get_cr(vcpu); 1053 regs->cr = kvmppc_get_cr(vcpu);
872 regs->ctr = vcpu->arch.ctr; 1054 regs->ctr = kvmppc_get_ctr(vcpu);
873 regs->lr = vcpu->arch.lr; 1055 regs->lr = kvmppc_get_lr(vcpu);
874 regs->xer = kvmppc_get_xer(vcpu); 1056 regs->xer = kvmppc_get_xer(vcpu);
875 regs->msr = vcpu->arch.msr; 1057 regs->msr = vcpu->arch.msr;
876 regs->srr0 = vcpu->arch.srr0; 1058 regs->srr0 = vcpu->arch.srr0;
@@ -887,6 +1069,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
887 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1069 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
888 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 1070 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
889 1071
1072 vcpu_put(vcpu);
1073
890 return 0; 1074 return 0;
891} 1075}
892 1076
@@ -894,10 +1078,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
894{ 1078{
895 int i; 1079 int i;
896 1080
897 vcpu->arch.pc = regs->pc; 1081 vcpu_load(vcpu);
1082
1083 kvmppc_set_pc(vcpu, regs->pc);
898 kvmppc_set_cr(vcpu, regs->cr); 1084 kvmppc_set_cr(vcpu, regs->cr);
899 vcpu->arch.ctr = regs->ctr; 1085 kvmppc_set_ctr(vcpu, regs->ctr);
900 vcpu->arch.lr = regs->lr; 1086 kvmppc_set_lr(vcpu, regs->lr);
901 kvmppc_set_xer(vcpu, regs->xer); 1087 kvmppc_set_xer(vcpu, regs->xer);
902 kvmppc_set_msr(vcpu, regs->msr); 1088 kvmppc_set_msr(vcpu, regs->msr);
903 vcpu->arch.srr0 = regs->srr0; 1089 vcpu->arch.srr0 = regs->srr0;
@@ -913,6 +1099,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
913 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1099 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
914 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 1100 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
915 1101
1102 vcpu_put(vcpu);
1103
916 return 0; 1104 return 0;
917} 1105}
918 1106
@@ -922,6 +1110,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
922 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1110 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
923 int i; 1111 int i;
924 1112
1113 vcpu_load(vcpu);
1114
925 sregs->pvr = vcpu->arch.pvr; 1115 sregs->pvr = vcpu->arch.pvr;
926 1116
927 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; 1117 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
@@ -940,6 +1130,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
940 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; 1130 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
941 } 1131 }
942 } 1132 }
1133
1134 vcpu_put(vcpu);
1135
943 return 0; 1136 return 0;
944} 1137}
945 1138
@@ -949,6 +1142,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
949 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1142 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
950 int i; 1143 int i;
951 1144
1145 vcpu_load(vcpu);
1146
952 kvmppc_set_pvr(vcpu, sregs->pvr); 1147 kvmppc_set_pvr(vcpu, sregs->pvr);
953 1148
954 vcpu3s->sdr1 = sregs->u.s.sdr1; 1149 vcpu3s->sdr1 = sregs->u.s.sdr1;
@@ -975,6 +1170,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
975 1170
976 /* Flush the MMU after messing with the segments */ 1171 /* Flush the MMU after messing with the segments */
977 kvmppc_mmu_pte_flush(vcpu, 0, 0); 1172 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1173
1174 vcpu_put(vcpu);
1175
978 return 0; 1176 return 0;
979} 1177}
980 1178
@@ -1042,24 +1240,33 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1042{ 1240{
1043 struct kvmppc_vcpu_book3s *vcpu_book3s; 1241 struct kvmppc_vcpu_book3s *vcpu_book3s;
1044 struct kvm_vcpu *vcpu; 1242 struct kvm_vcpu *vcpu;
1045 int err; 1243 int err = -ENOMEM;
1046 1244
1047 vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, 1245 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
1048 get_order(sizeof(struct kvmppc_vcpu_book3s))); 1246 if (!vcpu_book3s)
1049 if (!vcpu_book3s) {
1050 err = -ENOMEM;
1051 goto out; 1247 goto out;
1052 } 1248
1249 memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
1250
1251 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
1252 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1253 if (!vcpu_book3s->shadow_vcpu)
1254 goto free_vcpu;
1053 1255
1054 vcpu = &vcpu_book3s->vcpu; 1256 vcpu = &vcpu_book3s->vcpu;
1055 err = kvm_vcpu_init(vcpu, kvm, id); 1257 err = kvm_vcpu_init(vcpu, kvm, id);
1056 if (err) 1258 if (err)
1057 goto free_vcpu; 1259 goto free_shadow_vcpu;
1058 1260
1059 vcpu->arch.host_retip = kvm_return_point; 1261 vcpu->arch.host_retip = kvm_return_point;
1060 vcpu->arch.host_msr = mfmsr(); 1262 vcpu->arch.host_msr = mfmsr();
1263#ifdef CONFIG_PPC_BOOK3S_64
1061 /* default to book3s_64 (970fx) */ 1264 /* default to book3s_64 (970fx) */
1062 vcpu->arch.pvr = 0x3C0301; 1265 vcpu->arch.pvr = 0x3C0301;
1266#else
1267 /* default to book3s_32 (750) */
1268 vcpu->arch.pvr = 0x84202;
1269#endif
1063 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1270 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1064 vcpu_book3s->slb_nr = 64; 1271 vcpu_book3s->slb_nr = 64;
1065 1272
@@ -1067,23 +1274,24 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1067 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; 1274 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
1068 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; 1275 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
1069 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; 1276 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
1277#ifdef CONFIG_PPC_BOOK3S_64
1070 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; 1278 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
1279#else
1280 vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
1281#endif
1071 1282
1072 vcpu->arch.shadow_msr = MSR_USER64; 1283 vcpu->arch.shadow_msr = MSR_USER64;
1073 1284
1074 err = __init_new_context(); 1285 err = kvmppc_mmu_init(vcpu);
1075 if (err < 0) 1286 if (err < 0)
1076 goto free_vcpu; 1287 goto free_shadow_vcpu;
1077 vcpu_book3s->context_id = err;
1078
1079 vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
1080 vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
1081 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
1082 1288
1083 return vcpu; 1289 return vcpu;
1084 1290
1291free_shadow_vcpu:
1292 kfree(vcpu_book3s->shadow_vcpu);
1085free_vcpu: 1293free_vcpu:
1086 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); 1294 vfree(vcpu_book3s);
1087out: 1295out:
1088 return ERR_PTR(err); 1296 return ERR_PTR(err);
1089} 1297}
@@ -1092,9 +1300,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1092{ 1300{
1093 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1301 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1094 1302
1095 __destroy_context(vcpu_book3s->context_id);
1096 kvm_vcpu_uninit(vcpu); 1303 kvm_vcpu_uninit(vcpu);
1097 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); 1304 kfree(vcpu_book3s->shadow_vcpu);
1305 vfree(vcpu_book3s);
1098} 1306}
1099 1307
1100extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1308extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -1102,8 +1310,12 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1102{ 1310{
1103 int ret; 1311 int ret;
1104 struct thread_struct ext_bkp; 1312 struct thread_struct ext_bkp;
1313#ifdef CONFIG_ALTIVEC
1105 bool save_vec = current->thread.used_vr; 1314 bool save_vec = current->thread.used_vr;
1315#endif
1316#ifdef CONFIG_VSX
1106 bool save_vsx = current->thread.used_vsr; 1317 bool save_vsx = current->thread.used_vsr;
1318#endif
1107 ulong ext_msr; 1319 ulong ext_msr;
1108 1320
1109 /* No need to go into the guest when all we do is going out */ 1321 /* No need to go into the guest when all we do is going out */
@@ -1144,6 +1356,10 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1144 /* XXX we get called with irq disabled - change that! */ 1356 /* XXX we get called with irq disabled - change that! */
1145 local_irq_enable(); 1357 local_irq_enable();
1146 1358
1359 /* Preload FPU if it's enabled */
1360 if (vcpu->arch.msr & MSR_FP)
1361 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1362
1147 ret = __kvmppc_vcpu_entry(kvm_run, vcpu); 1363 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
1148 1364
1149 local_irq_disable(); 1365 local_irq_disable();
@@ -1179,7 +1395,8 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1179 1395
1180static int kvmppc_book3s_init(void) 1396static int kvmppc_book3s_init(void)
1181{ 1397{
1182 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); 1398 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1399 THIS_MODULE);
1183} 1400}
1184 1401
1185static void kvmppc_book3s_exit(void) 1402static void kvmppc_book3s_exit(void)
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index faf99f20d993..0b10503c8a4a 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -37,7 +37,7 @@
37#define dprintk(X...) do { } while(0) 37#define dprintk(X...) do { } while(0)
38#endif 38#endif
39 39
40#ifdef DEBUG_PTE 40#ifdef DEBUG_MMU_PTE
41#define dprintk_pte(X...) printk(KERN_INFO X) 41#define dprintk_pte(X...) printk(KERN_INFO X)
42#else 42#else
43#define dprintk_pte(X...) do { } while(0) 43#define dprintk_pte(X...) do { } while(0)
@@ -45,6 +45,9 @@
45 45
46#define PTEG_FLAG_ACCESSED 0x00000100 46#define PTEG_FLAG_ACCESSED 0x00000100
47#define PTEG_FLAG_DIRTY 0x00000080 47#define PTEG_FLAG_DIRTY 0x00000080
48#ifndef SID_SHIFT
49#define SID_SHIFT 28
50#endif
48 51
49static inline bool check_debug_ip(struct kvm_vcpu *vcpu) 52static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
50{ 53{
@@ -57,6 +60,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
57 60
58static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 61static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
59 struct kvmppc_pte *pte, bool data); 62 struct kvmppc_pte *pte, bool data);
63static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
64 u64 *vsid);
60 65
61static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) 66static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
62{ 67{
@@ -66,13 +71,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e
66static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 71static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
67 bool data) 72 bool data)
68{ 73{
69 struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr); 74 u64 vsid;
70 struct kvmppc_pte pte; 75 struct kvmppc_pte pte;
71 76
72 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) 77 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
73 return pte.vpage; 78 return pte.vpage;
74 79
75 return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16); 80 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
81 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
76} 82}
77 83
78static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) 84static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
@@ -142,8 +148,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
142 bat->bepi_mask); 148 bat->bepi_mask);
143 } 149 }
144 if ((eaddr & bat->bepi_mask) == bat->bepi) { 150 if ((eaddr & bat->bepi_mask) == bat->bepi) {
151 u64 vsid;
152 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
153 eaddr >> SID_SHIFT, &vsid);
154 vsid <<= 16;
155 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
156
145 pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); 157 pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
146 pte->vpage = (eaddr >> 12) | VSID_BAT;
147 pte->may_read = bat->pp; 158 pte->may_read = bat->pp;
148 pte->may_write = bat->pp > 1; 159 pte->may_write = bat->pp > 1;
149 pte->may_execute = true; 160 pte->may_execute = true;
@@ -172,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
172 struct kvmppc_sr *sre; 183 struct kvmppc_sr *sre;
173 hva_t ptegp; 184 hva_t ptegp;
174 u32 pteg[16]; 185 u32 pteg[16];
175 u64 ptem = 0; 186 u32 ptem = 0;
176 int i; 187 int i;
177 int found = 0; 188 int found = 0;
178 189
@@ -302,6 +313,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
302 /* And then put in the new SR */ 313 /* And then put in the new SR */
303 sre->raw = value; 314 sre->raw = value;
304 sre->vsid = (value & 0x0fffffff); 315 sre->vsid = (value & 0x0fffffff);
316 sre->valid = (value & 0x80000000) ? false : true;
305 sre->Ks = (value & 0x40000000) ? true : false; 317 sre->Ks = (value & 0x40000000) ? true : false;
306 sre->Kp = (value & 0x20000000) ? true : false; 318 sre->Kp = (value & 0x20000000) ? true : false;
307 sre->nx = (value & 0x10000000) ? true : false; 319 sre->nx = (value & 0x10000000) ? true : false;
@@ -312,36 +324,48 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
312 324
313static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) 325static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
314{ 326{
315 kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL); 327 kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
316} 328}
317 329
318static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, 330static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
319 u64 *vsid) 331 u64 *vsid)
320{ 332{
333 ulong ea = esid << SID_SHIFT;
334 struct kvmppc_sr *sr;
335 u64 gvsid = esid;
336
337 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
338 sr = find_sr(to_book3s(vcpu), ea);
339 if (sr->valid)
340 gvsid = sr->vsid;
341 }
342
321 /* In case we only have one of MSR_IR or MSR_DR set, let's put 343 /* In case we only have one of MSR_IR or MSR_DR set, let's put
322 that in the real-mode context (and hope RM doesn't access 344 that in the real-mode context (and hope RM doesn't access
323 high memory) */ 345 high memory) */
324 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 346 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
325 case 0: 347 case 0:
326 *vsid = (VSID_REAL >> 16) | esid; 348 *vsid = VSID_REAL | esid;
327 break; 349 break;
328 case MSR_IR: 350 case MSR_IR:
329 *vsid = (VSID_REAL_IR >> 16) | esid; 351 *vsid = VSID_REAL_IR | gvsid;
330 break; 352 break;
331 case MSR_DR: 353 case MSR_DR:
332 *vsid = (VSID_REAL_DR >> 16) | esid; 354 *vsid = VSID_REAL_DR | gvsid;
333 break; 355 break;
334 case MSR_DR|MSR_IR: 356 case MSR_DR|MSR_IR:
335 { 357 if (!sr->valid)
336 ulong ea; 358 return -1;
337 ea = esid << SID_SHIFT; 359
338 *vsid = find_sr(to_book3s(vcpu), ea)->vsid; 360 *vsid = sr->vsid;
339 break; 361 break;
340 }
341 default: 362 default:
342 BUG(); 363 BUG();
343 } 364 }
344 365
366 if (vcpu->arch.msr & MSR_PR)
367 *vsid |= VSID_PR;
368
345 return 0; 369 return 0;
346} 370}
347 371
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
new file mode 100644
index 000000000000..0bb66005338f
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -0,0 +1,483 @@
1/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22
23#include <asm/kvm_ppc.h>
24#include <asm/kvm_book3s.h>
25#include <asm/mmu-hash32.h>
26#include <asm/machdep.h>
27#include <asm/mmu_context.h>
28#include <asm/hw_irq.h>
29
30/* #define DEBUG_MMU */
31/* #define DEBUG_SR */
32
33#ifdef DEBUG_MMU
34#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
35#else
36#define dprintk_mmu(a, ...) do { } while(0)
37#endif
38
39#ifdef DEBUG_SR
40#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
41#else
42#define dprintk_sr(a, ...) do { } while(0)
43#endif
44
45#if PAGE_SHIFT != 12
46#error Unknown page size
47#endif
48
49#ifdef CONFIG_SMP
50#error XXX need to grab mmu_hash_lock
51#endif
52
53#ifdef CONFIG_PTE_64BIT
54#error Only 32 bit pages are supported for now
55#endif
56
57static ulong htab;
58static u32 htabmask;
59
60static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
61{
62 volatile u32 *pteg;
63
64 dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n",
65 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
66
67 pteg = (u32*)pte->slot;
68
69 pteg[0] = 0;
70 asm volatile ("sync");
71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
72 asm volatile ("sync");
73 asm volatile ("tlbsync");
74
75 pte->host_va = 0;
76
77 if (pte->pte.may_write)
78 kvm_release_pfn_dirty(pte->pfn);
79 else
80 kvm_release_pfn_clean(pte->pfn);
81}
82
83void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
84{
85 int i;
86
87 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
88 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
89 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
90
91 guest_ea &= ea_mask;
92 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
93 struct hpte_cache *pte;
94
95 pte = &vcpu->arch.hpte_cache[i];
96 if (!pte->host_va)
97 continue;
98
99 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
100 invalidate_pte(vcpu, pte);
101 }
102 }
103
104 /* Doing a complete flush -> start from scratch */
105 if (!ea_mask)
106 vcpu->arch.hpte_cache_offset = 0;
107}
108
109void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
110{
111 int i;
112
113 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
114 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
115 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
116
117 guest_vp &= vp_mask;
118 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
119 struct hpte_cache *pte;
120
121 pte = &vcpu->arch.hpte_cache[i];
122 if (!pte->host_va)
123 continue;
124
125 if ((pte->pte.vpage & vp_mask) == guest_vp) {
126 invalidate_pte(vcpu, pte);
127 }
128 }
129}
130
131void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
132{
133 int i;
134
135 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
136 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
137 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
138
139 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
140 struct hpte_cache *pte;
141
142 pte = &vcpu->arch.hpte_cache[i];
143 if (!pte->host_va)
144 continue;
145
146 if ((pte->pte.raddr >= pa_start) &&
147 (pte->pte.raddr < pa_end)) {
148 invalidate_pte(vcpu, pte);
149 }
150 }
151}
152
153struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
154{
155 int i;
156 u64 guest_vp;
157
158 guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
159 for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
160 struct hpte_cache *pte;
161
162 pte = &vcpu->arch.hpte_cache[i];
163 if (!pte->host_va)
164 continue;
165
166 if (pte->pte.vpage == guest_vp)
167 return &pte->pte;
168 }
169
170 return NULL;
171}
172
173static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
174{
175 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
176 kvmppc_mmu_pte_flush(vcpu, 0, 0);
177
178 return vcpu->arch.hpte_cache_offset++;
179}
180
181/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
182 * a hash, so we don't waste cycles on looping */
183static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
184{
185 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
186 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
187 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
188 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
189 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
190 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
191 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
192 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
193}
194
195
196static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
197{
198 struct kvmppc_sid_map *map;
199 u16 sid_map_mask;
200
201 if (vcpu->arch.msr & MSR_PR)
202 gvsid |= VSID_PR;
203
204 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
205 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
206 if (map->guest_vsid == gvsid) {
207 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
208 gvsid, map->host_vsid);
209 return map;
210 }
211
212 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
213 if (map->guest_vsid == gvsid) {
214 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
215 gvsid, map->host_vsid);
216 return map;
217 }
218
219 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
220 return NULL;
221}
222
223static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
224 bool primary)
225{
226 u32 page, hash;
227 ulong pteg = htab;
228
229 page = (eaddr & ~ESID_MASK) >> 12;
230
231 hash = ((vsid ^ page) << 6);
232 if (!primary)
233 hash = ~hash;
234
235 hash &= htabmask;
236
237 pteg |= hash;
238
239 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
240 htab, hash, htabmask, pteg);
241
242 return (u32*)pteg;
243}
244
245extern char etext[];
246
247int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
248{
249 pfn_t hpaddr;
250 u64 va;
251 u64 vsid;
252 struct kvmppc_sid_map *map;
253 volatile u32 *pteg;
254 u32 eaddr = orig_pte->eaddr;
255 u32 pteg0, pteg1;
256 register int rr = 0;
257 bool primary = false;
258 bool evict = false;
259 int hpte_id;
260 struct hpte_cache *pte;
261
262 /* Get host physical address for gpa */
263 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
264 if (kvm_is_error_hva(hpaddr)) {
265 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
266 orig_pte->eaddr);
267 return -EINVAL;
268 }
269 hpaddr <<= PAGE_SHIFT;
270
271 /* and write the mapping ea -> hpa into the pt */
272 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
273 map = find_sid_vsid(vcpu, vsid);
274 if (!map) {
275 kvmppc_mmu_map_segment(vcpu, eaddr);
276 map = find_sid_vsid(vcpu, vsid);
277 }
278 BUG_ON(!map);
279
280 vsid = map->host_vsid;
281 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
282
283next_pteg:
284 if (rr == 16) {
285 primary = !primary;
286 evict = true;
287 rr = 0;
288 }
289
290 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
291
292 /* not evicting yet */
293 if (!evict && (pteg[rr] & PTE_V)) {
294 rr += 2;
295 goto next_pteg;
296 }
297
298 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
299 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
300 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
301 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
302 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
303 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
304 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
305 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
306 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
307
308 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
309 (primary ? 0 : PTE_SEC);
310 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
311
312 if (orig_pte->may_write) {
313 pteg1 |= PP_RWRW;
314 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
315 } else {
316 pteg1 |= PP_RWRX;
317 }
318
319 local_irq_disable();
320
321 if (pteg[rr]) {
322 pteg[rr] = 0;
323 asm volatile ("sync");
324 }
325 pteg[rr + 1] = pteg1;
326 pteg[rr] = pteg0;
327 asm volatile ("sync");
328
329 local_irq_enable();
330
331 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
332 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
333 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
334 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
335 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
336 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
337 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
338 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
339 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
340
341
342 /* Now tell our Shadow PTE code about the new page */
343
344 hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
345 pte = &vcpu->arch.hpte_cache[hpte_id];
346
347 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
348 orig_pte->may_write ? 'w' : '-',
349 orig_pte->may_execute ? 'x' : '-',
350 orig_pte->eaddr, (ulong)pteg, va,
351 orig_pte->vpage, hpaddr);
352
353 pte->slot = (ulong)&pteg[rr];
354 pte->host_va = va;
355 pte->pte = *orig_pte;
356 pte->pfn = hpaddr >> PAGE_SHIFT;
357
358 return 0;
359}
360
361static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
362{
363 struct kvmppc_sid_map *map;
364 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
365 u16 sid_map_mask;
366 static int backwards_map = 0;
367
368 if (vcpu->arch.msr & MSR_PR)
369 gvsid |= VSID_PR;
370
371 /* We might get collisions that trap in preceding order, so let's
372 map them differently */
373
374 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
375 if (backwards_map)
376 sid_map_mask = SID_MAP_MASK - sid_map_mask;
377
378 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
379
380 /* Make sure we're taking the other map next time */
381 backwards_map = !backwards_map;
382
383 /* Uh-oh ... out of mappings. Let's flush! */
384 if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) {
385 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
386 memset(vcpu_book3s->sid_map, 0,
387 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
388 kvmppc_mmu_pte_flush(vcpu, 0, 0);
389 kvmppc_mmu_flush_segments(vcpu);
390 }
391 map->host_vsid = vcpu_book3s->vsid_next;
392
393 /* Would have to be 111 to be completely aligned with the rest of
394 Linux, but that is just way too little space! */
395 vcpu_book3s->vsid_next+=1;
396
397 map->guest_vsid = gvsid;
398 map->valid = true;
399
400 return map;
401}
402
403int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
404{
405 u32 esid = eaddr >> SID_SHIFT;
406 u64 gvsid;
407 u32 sr;
408 struct kvmppc_sid_map *map;
409 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
410
411 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
412 /* Invalidate an entry */
413 svcpu->sr[esid] = SR_INVALID;
414 return -ENOENT;
415 }
416
417 map = find_sid_vsid(vcpu, gvsid);
418 if (!map)
419 map = create_sid_map(vcpu, gvsid);
420
421 map->guest_esid = esid;
422 sr = map->host_vsid | SR_KP;
423 svcpu->sr[esid] = sr;
424
425 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
426
427 return 0;
428}
429
430void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
431{
432 int i;
433 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
434
435 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
436 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
437 svcpu->sr[i] = SR_INVALID;
438}
439
440void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
441{
442 kvmppc_mmu_pte_flush(vcpu, 0, 0);
443 preempt_disable();
444 __destroy_context(to_book3s(vcpu)->context_id);
445 preempt_enable();
446}
447
448/* From mm/mmu_context_hash32.c */
449#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff)
450
451int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
452{
453 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
454 int err;
455 ulong sdr1;
456
457 err = __init_new_context();
458 if (err < 0)
459 return -1;
460 vcpu3s->context_id = err;
461
462 vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
463 vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
464
465#if 0 /* XXX still doesn't guarantee uniqueness */
466 /* We could collide with the Linux vsid space because the vsid
467 * wraps around at 24 bits. We're safe if we do our own space
468 * though, so let's always set the highest bit. */
469
470 vcpu3s->vsid_max |= 0x00800000;
471 vcpu3s->vsid_first |= 0x00800000;
472#endif
473 BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first);
474
475 vcpu3s->vsid_next = vcpu3s->vsid_first;
476
477 /* Remember where the HTAB is */
478 asm ( "mfsdr1 %0" : "=r"(sdr1) );
479 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
480 htab = (ulong)__va(sdr1 & 0xffff0000);
481
482 return 0;
483}
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
new file mode 100644
index 000000000000..3608471ad2d8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -0,0 +1,143 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20/******************************************************************************
21 * *
22 * Entry code *
23 * *
24 *****************************************************************************/
25
26.macro LOAD_GUEST_SEGMENTS
27
28 /* Required state:
29 *
30 * MSR = ~IR|DR
31 * R1 = host R1
32 * R2 = host R2
33 * R3 = shadow vcpu
34 * all other volatile GPRS = free
35 * SVCPU[CR] = guest CR
36 * SVCPU[XER] = guest XER
37 * SVCPU[CTR] = guest CTR
38 * SVCPU[LR] = guest LR
39 */
40
41#define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \
42 mtsr n, r9
43
44 XCHG_SR(0)
45 XCHG_SR(1)
46 XCHG_SR(2)
47 XCHG_SR(3)
48 XCHG_SR(4)
49 XCHG_SR(5)
50 XCHG_SR(6)
51 XCHG_SR(7)
52 XCHG_SR(8)
53 XCHG_SR(9)
54 XCHG_SR(10)
55 XCHG_SR(11)
56 XCHG_SR(12)
57 XCHG_SR(13)
58 XCHG_SR(14)
59 XCHG_SR(15)
60
61 /* Clear BATs. */
62
63#define KVM_KILL_BAT(n, reg) \
64 mtspr SPRN_IBAT##n##U,reg; \
65 mtspr SPRN_IBAT##n##L,reg; \
66 mtspr SPRN_DBAT##n##U,reg; \
67 mtspr SPRN_DBAT##n##L,reg; \
68
69 li r9, 0
70 KVM_KILL_BAT(0, r9)
71 KVM_KILL_BAT(1, r9)
72 KVM_KILL_BAT(2, r9)
73 KVM_KILL_BAT(3, r9)
74
75.endm
76
77/******************************************************************************
78 * *
79 * Exit code *
80 * *
81 *****************************************************************************/
82
83.macro LOAD_HOST_SEGMENTS
84
85 /* Register usage at this point:
86 *
87 * R1 = host R1
88 * R2 = host R2
89 * R12 = exit handler id
90 * R13 = shadow vcpu - SHADOW_VCPU_OFF
91 * SVCPU.* = guest *
92 * SVCPU[CR] = guest CR
93 * SVCPU[XER] = guest XER
94 * SVCPU[CTR] = guest CTR
95 * SVCPU[LR] = guest LR
96 *
97 */
98
99 /* Restore BATs */
100
101 /* We only overwrite the upper part, so we only restoree
102 the upper part. */
103#define KVM_LOAD_BAT(n, reg, RA, RB) \
104 lwz RA,(n*16)+0(reg); \
105 lwz RB,(n*16)+4(reg); \
106 mtspr SPRN_IBAT##n##U,RA; \
107 mtspr SPRN_IBAT##n##L,RB; \
108 lwz RA,(n*16)+8(reg); \
109 lwz RB,(n*16)+12(reg); \
110 mtspr SPRN_DBAT##n##U,RA; \
111 mtspr SPRN_DBAT##n##L,RB; \
112
113 lis r9, BATS@ha
114 addi r9, r9, BATS@l
115 tophys(r9, r9)
116 KVM_LOAD_BAT(0, r9, r10, r11)
117 KVM_LOAD_BAT(1, r9, r10, r11)
118 KVM_LOAD_BAT(2, r9, r10, r11)
119 KVM_LOAD_BAT(3, r9, r10, r11)
120
121 /* Restore Segment Registers */
122
123 /* 0xc - 0xf */
124
125 li r0, 4
126 mtctr r0
127 LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
128 lis r4, 0xc000
1293: mtsrin r3, r4
130 addi r3, r3, 0x111 /* increment VSID */
131 addis r4, r4, 0x1000 /* address of next segment */
132 bdnz 3b
133
134 /* 0x0 - 0xb */
135
136 /* 'current->mm' needs to be in r4 */
137 tophys(r4, r2)
138 lwz r4, MM(r4)
139 tophys(r4, r4)
140 /* This only clobbers r0, r3, r4 and r5 */
141 bl switch_mmu_context
142
143.endm
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 512dcff77554..4025ea26b3c1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -232,7 +232,7 @@ do_second:
232 } 232 }
233 233
234 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " 234 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
235 "-> 0x%llx\n", 235 "-> 0x%lx\n",
236 eaddr, avpn, gpte->vpage, gpte->raddr); 236 eaddr, avpn, gpte->vpage, gpte->raddr);
237 found = true; 237 found = true;
238 break; 238 break;
@@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
383 383
384 if (vcpu->arch.msr & MSR_IR) { 384 if (vcpu->arch.msr & MSR_IR) {
385 kvmppc_mmu_flush_segments(vcpu); 385 kvmppc_mmu_flush_segments(vcpu);
386 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); 386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
387 } 387 }
388} 388}
389 389
@@ -439,37 +439,43 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
439 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); 439 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
440} 440}
441 441
442static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, 442static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
443 u64 *vsid) 443 u64 *vsid)
444{ 444{
445 ulong ea = esid << SID_SHIFT;
446 struct kvmppc_slb *slb;
447 u64 gvsid = esid;
448
449 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
451 if (slb)
452 gvsid = slb->vsid;
453 }
454
445 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 455 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
446 case 0: 456 case 0:
447 *vsid = (VSID_REAL >> 16) | esid; 457 *vsid = VSID_REAL | esid;
448 break; 458 break;
449 case MSR_IR: 459 case MSR_IR:
450 *vsid = (VSID_REAL_IR >> 16) | esid; 460 *vsid = VSID_REAL_IR | gvsid;
451 break; 461 break;
452 case MSR_DR: 462 case MSR_DR:
453 *vsid = (VSID_REAL_DR >> 16) | esid; 463 *vsid = VSID_REAL_DR | gvsid;
454 break; 464 break;
455 case MSR_DR|MSR_IR: 465 case MSR_DR|MSR_IR:
456 { 466 if (!slb)
457 ulong ea;
458 struct kvmppc_slb *slb;
459 ea = esid << SID_SHIFT;
460 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
461 if (slb)
462 *vsid = slb->vsid;
463 else
464 return -ENOENT; 467 return -ENOENT;
465 468
469 *vsid = gvsid;
466 break; 470 break;
467 }
468 default: 471 default:
469 BUG(); 472 BUG();
470 break; 473 break;
471 } 474 }
472 475
476 if (vcpu->arch.msr & MSR_PR)
477 *vsid |= VSID_PR;
478
473 return 0; 479 return 0;
474} 480}
475 481
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index f2899b297ffd..e4b5744977f6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -48,21 +48,25 @@
48 48
49static void invalidate_pte(struct hpte_cache *pte) 49static void invalidate_pte(struct hpte_cache *pte)
50{ 50{
51 dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 51 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
52 i, pte->pte.eaddr, pte->pte.vpage, pte->host_va); 52 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
53 53
54 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 54 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
55 MMU_PAGE_4K, MMU_SEGSIZE_256M, 55 MMU_PAGE_4K, MMU_SEGSIZE_256M,
56 false); 56 false);
57 pte->host_va = 0; 57 pte->host_va = 0;
58 kvm_release_pfn_dirty(pte->pfn); 58
59 if (pte->pte.may_write)
60 kvm_release_pfn_dirty(pte->pfn);
61 else
62 kvm_release_pfn_clean(pte->pfn);
59} 63}
60 64
61void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask) 65void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
62{ 66{
63 int i; 67 int i;
64 68
65 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n", 69 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
66 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); 70 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
67 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); 71 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
68 72
@@ -106,12 +110,12 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
106 } 110 }
107} 111}
108 112
109void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end) 113void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
110{ 114{
111 int i; 115 int i;
112 116
113 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", 117 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
114 vcpu->arch.hpte_cache_offset, guest_pa, pa_mask); 118 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
115 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); 119 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
116 120
117 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { 121 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
@@ -182,7 +186,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
182 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 186 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
183 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 187 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
184 if (map->guest_vsid == gvsid) { 188 if (map->guest_vsid == gvsid) {
185 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", 189 dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
186 gvsid, map->host_vsid); 190 gvsid, map->host_vsid);
187 return map; 191 return map;
188 } 192 }
@@ -194,7 +198,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
194 return map; 198 return map;
195 } 199 }
196 200
197 dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid); 201 dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
202 sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
198 return NULL; 203 return NULL;
199} 204}
200 205
@@ -212,7 +217,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
212 /* Get host physical address for gpa */ 217 /* Get host physical address for gpa */
213 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 218 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
214 if (kvm_is_error_hva(hpaddr)) { 219 if (kvm_is_error_hva(hpaddr)) {
215 printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr); 220 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
216 return -EINVAL; 221 return -EINVAL;
217 } 222 }
218 hpaddr <<= PAGE_SHIFT; 223 hpaddr <<= PAGE_SHIFT;
@@ -227,10 +232,16 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
227 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 232 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
228 map = find_sid_vsid(vcpu, vsid); 233 map = find_sid_vsid(vcpu, vsid);
229 if (!map) { 234 if (!map) {
230 kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); 235 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
236 WARN_ON(ret < 0);
231 map = find_sid_vsid(vcpu, vsid); 237 map = find_sid_vsid(vcpu, vsid);
232 } 238 }
233 BUG_ON(!map); 239 if (!map) {
240 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
241 vsid, orig_pte->eaddr);
242 WARN_ON(true);
243 return -EINVAL;
244 }
234 245
235 vsid = map->host_vsid; 246 vsid = map->host_vsid;
236 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 247 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
@@ -257,26 +268,26 @@ map_again:
257 268
258 if (ret < 0) { 269 if (ret < 0) {
259 /* If we couldn't map a primary PTE, try a secondary */ 270 /* If we couldn't map a primary PTE, try a secondary */
260#ifdef USE_SECONDARY
261 hash = ~hash; 271 hash = ~hash;
272 vflags ^= HPTE_V_SECONDARY;
262 attempt++; 273 attempt++;
263 if (attempt % 2)
264 vflags = HPTE_V_SECONDARY;
265 else
266 vflags = 0;
267#else
268 attempt = 2;
269#endif
270 goto map_again; 274 goto map_again;
271 } else { 275 } else {
272 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); 276 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
273 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id]; 277 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
274 278
275 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n", 279 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
276 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', 280 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
277 (rflags & HPTE_R_N) ? '-' : 'x', 281 (rflags & HPTE_R_N) ? '-' : 'x',
278 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr); 282 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
279 283
284 /* The ppc_md code may give us a secondary entry even though we
285 asked for a primary. Fix up. */
286 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
287 hash = ~hash;
288 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
289 }
290
280 pte->slot = hpteg + (ret & 7); 291 pte->slot = hpteg + (ret & 7);
281 pte->host_va = va; 292 pte->host_va = va;
282 pte->pte = *orig_pte; 293 pte->pte = *orig_pte;
@@ -321,6 +332,9 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
321 map->guest_vsid = gvsid; 332 map->guest_vsid = gvsid;
322 map->valid = true; 333 map->valid = true;
323 334
335 dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
336 sid_map_mask, gvsid, map->host_vsid);
337
324 return map; 338 return map;
325} 339}
326 340
@@ -331,14 +345,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
331 int found_inval = -1; 345 int found_inval = -1;
332 int r; 346 int r;
333 347
334 if (!get_paca()->kvm_slb_max) 348 if (!to_svcpu(vcpu)->slb_max)
335 get_paca()->kvm_slb_max = 1; 349 to_svcpu(vcpu)->slb_max = 1;
336 350
337 /* Are we overwriting? */ 351 /* Are we overwriting? */
338 for (i = 1; i < get_paca()->kvm_slb_max; i++) { 352 for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
339 if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V)) 353 if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
340 found_inval = i; 354 found_inval = i;
341 else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid) 355 else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
342 return i; 356 return i;
343 } 357 }
344 358
@@ -352,11 +366,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
352 max_slb_size = mmu_slb_size; 366 max_slb_size = mmu_slb_size;
353 367
354 /* Overflowing -> purge */ 368 /* Overflowing -> purge */
355 if ((get_paca()->kvm_slb_max) == max_slb_size) 369 if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
356 kvmppc_mmu_flush_segments(vcpu); 370 kvmppc_mmu_flush_segments(vcpu);
357 371
358 r = get_paca()->kvm_slb_max; 372 r = to_svcpu(vcpu)->slb_max;
359 get_paca()->kvm_slb_max++; 373 to_svcpu(vcpu)->slb_max++;
360 374
361 return r; 375 return r;
362} 376}
@@ -374,7 +388,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
374 388
375 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 389 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
376 /* Invalidate an entry */ 390 /* Invalidate an entry */
377 get_paca()->kvm_slb[slb_index].esid = 0; 391 to_svcpu(vcpu)->slb[slb_index].esid = 0;
378 return -ENOENT; 392 return -ENOENT;
379 } 393 }
380 394
@@ -388,8 +402,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
388 slb_vsid &= ~SLB_VSID_KP; 402 slb_vsid &= ~SLB_VSID_KP;
389 slb_esid |= slb_index; 403 slb_esid |= slb_index;
390 404
391 get_paca()->kvm_slb[slb_index].esid = slb_esid; 405 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
392 get_paca()->kvm_slb[slb_index].vsid = slb_vsid; 406 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
393 407
394 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); 408 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
395 409
@@ -398,11 +412,29 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
398 412
399void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 413void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
400{ 414{
401 get_paca()->kvm_slb_max = 1; 415 to_svcpu(vcpu)->slb_max = 1;
402 get_paca()->kvm_slb[0].esid = 0; 416 to_svcpu(vcpu)->slb[0].esid = 0;
403} 417}
404 418
405void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 419void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
406{ 420{
407 kvmppc_mmu_pte_flush(vcpu, 0, 0); 421 kvmppc_mmu_pte_flush(vcpu, 0, 0);
422 __destroy_context(to_book3s(vcpu)->context_id);
423}
424
425int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
426{
427 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
428 int err;
429
430 err = __init_new_context();
431 if (err < 0)
432 return -1;
433 vcpu3s->context_id = err;
434
435 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
436 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
437 vcpu3s->vsid_next = vcpu3s->vsid_first;
438
439 return 0;
408} 440}
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 35b762722187..04e7d3bbfe8b 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -44,8 +44,7 @@ slb_exit_skip_ ## num:
44 * * 44 * *
45 *****************************************************************************/ 45 *****************************************************************************/
46 46
47.global kvmppc_handler_trampoline_enter 47.macro LOAD_GUEST_SEGMENTS
48kvmppc_handler_trampoline_enter:
49 48
50 /* Required state: 49 /* Required state:
51 * 50 *
@@ -53,20 +52,14 @@ kvmppc_handler_trampoline_enter:
53 * R13 = PACA 52 * R13 = PACA
54 * R1 = host R1 53 * R1 = host R1
55 * R2 = host R2 54 * R2 = host R2
56 * R9 = guest IP 55 * R3 = shadow vcpu
57 * R10 = guest MSR 56 * all other volatile GPRS = free
58 * all other GPRS = free 57 * SVCPU[CR] = guest CR
59 * PACA[KVM_CR] = guest CR 58 * SVCPU[XER] = guest XER
60 * PACA[KVM_XER] = guest XER 59 * SVCPU[CTR] = guest CTR
60 * SVCPU[LR] = guest LR
61 */ 61 */
62 62
63 mtsrr0 r9
64 mtsrr1 r10
65
66 /* Activate guest mode, so faults get handled by KVM */
67 li r11, KVM_GUEST_MODE_GUEST
68 stb r11, PACA_KVM_IN_GUEST(r13)
69
70 /* Remove LPAR shadow entries */ 63 /* Remove LPAR shadow entries */
71 64
72#if SLB_NUM_BOLTED == 3 65#if SLB_NUM_BOLTED == 3
@@ -101,14 +94,14 @@ kvmppc_handler_trampoline_enter:
101 94
102 /* Fill SLB with our shadow */ 95 /* Fill SLB with our shadow */
103 96
104 lbz r12, PACA_KVM_SLB_MAX(r13) 97 lbz r12, SVCPU_SLB_MAX(r3)
105 mulli r12, r12, 16 98 mulli r12, r12, 16
106 addi r12, r12, PACA_KVM_SLB 99 addi r12, r12, SVCPU_SLB
107 add r12, r12, r13 100 add r12, r12, r3
108 101
109 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */ 102 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
110 li r11, PACA_KVM_SLB 103 li r11, SVCPU_SLB
111 add r11, r11, r13 104 add r11, r11, r3
112 105
113slb_loop_enter: 106slb_loop_enter:
114 107
@@ -127,34 +120,7 @@ slb_loop_enter_skip:
127 120
128slb_do_enter: 121slb_do_enter:
129 122
130 /* Enter guest */ 123.endm
131
132 ld r0, (PACA_KVM_R0)(r13)
133 ld r1, (PACA_KVM_R1)(r13)
134 ld r2, (PACA_KVM_R2)(r13)
135 ld r3, (PACA_KVM_R3)(r13)
136 ld r4, (PACA_KVM_R4)(r13)
137 ld r5, (PACA_KVM_R5)(r13)
138 ld r6, (PACA_KVM_R6)(r13)
139 ld r7, (PACA_KVM_R7)(r13)
140 ld r8, (PACA_KVM_R8)(r13)
141 ld r9, (PACA_KVM_R9)(r13)
142 ld r10, (PACA_KVM_R10)(r13)
143 ld r12, (PACA_KVM_R12)(r13)
144
145 lwz r11, (PACA_KVM_CR)(r13)
146 mtcr r11
147
148 ld r11, (PACA_KVM_XER)(r13)
149 mtxer r11
150
151 ld r11, (PACA_KVM_R11)(r13)
152 ld r13, (PACA_KVM_R13)(r13)
153
154 RFI
155kvmppc_handler_trampoline_enter_end:
156
157
158 124
159/****************************************************************************** 125/******************************************************************************
160 * * 126 * *
@@ -162,99 +128,22 @@ kvmppc_handler_trampoline_enter_end:
162 * * 128 * *
163 *****************************************************************************/ 129 *****************************************************************************/
164 130
165.global kvmppc_handler_trampoline_exit 131.macro LOAD_HOST_SEGMENTS
166kvmppc_handler_trampoline_exit:
167 132
168 /* Register usage at this point: 133 /* Register usage at this point:
169 * 134 *
170 * SPRG_SCRATCH0 = guest R13 135 * R1 = host R1
171 * R12 = exit handler id 136 * R2 = host R2
172 * R13 = PACA 137 * R12 = exit handler id
173 * PACA.KVM.SCRATCH0 = guest R12 138 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
174 * PACA.KVM.SCRATCH1 = guest CR 139 * SVCPU.* = guest *
140 * SVCPU[CR] = guest CR
141 * SVCPU[XER] = guest XER
142 * SVCPU[CTR] = guest CTR
143 * SVCPU[LR] = guest LR
175 * 144 *
176 */ 145 */
177 146
178 /* Save registers */
179
180 std r0, PACA_KVM_R0(r13)
181 std r1, PACA_KVM_R1(r13)
182 std r2, PACA_KVM_R2(r13)
183 std r3, PACA_KVM_R3(r13)
184 std r4, PACA_KVM_R4(r13)
185 std r5, PACA_KVM_R5(r13)
186 std r6, PACA_KVM_R6(r13)
187 std r7, PACA_KVM_R7(r13)
188 std r8, PACA_KVM_R8(r13)
189 std r9, PACA_KVM_R9(r13)
190 std r10, PACA_KVM_R10(r13)
191 std r11, PACA_KVM_R11(r13)
192
193 /* Restore R1/R2 so we can handle faults */
194 ld r1, PACA_KVM_HOST_R1(r13)
195 ld r2, PACA_KVM_HOST_R2(r13)
196
197 /* Save guest PC and MSR in GPRs */
198 mfsrr0 r3
199 mfsrr1 r4
200
201 /* Get scratch'ed off registers */
202 mfspr r9, SPRN_SPRG_SCRATCH0
203 std r9, PACA_KVM_R13(r13)
204
205 ld r8, PACA_KVM_SCRATCH0(r13)
206 std r8, PACA_KVM_R12(r13)
207
208 lwz r7, PACA_KVM_SCRATCH1(r13)
209 stw r7, PACA_KVM_CR(r13)
210
211 /* Save more register state */
212
213 mfxer r6
214 stw r6, PACA_KVM_XER(r13)
215
216 mfdar r5
217 mfdsisr r6
218
219 /*
220 * In order for us to easily get the last instruction,
221 * we got the #vmexit at, we exploit the fact that the
222 * virtual layout is still the same here, so we can just
223 * ld from the guest's PC address
224 */
225
226 /* We only load the last instruction when it's safe */
227 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
228 beq ld_last_inst
229 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
230 beq ld_last_inst
231
232 b no_ld_last_inst
233
234ld_last_inst:
235 /* Save off the guest instruction we're at */
236
237 /* Set guest mode to 'jump over instruction' so if lwz faults
238 * we'll just continue at the next IP. */
239 li r9, KVM_GUEST_MODE_SKIP
240 stb r9, PACA_KVM_IN_GUEST(r13)
241
242 /* 1) enable paging for data */
243 mfmsr r9
244 ori r11, r9, MSR_DR /* Enable paging for data */
245 mtmsr r11
246 /* 2) fetch the instruction */
247 li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
248 lwz r0, 0(r3)
249 /* 3) disable paging again */
250 mtmsr r9
251
252no_ld_last_inst:
253
254 /* Unset guest mode */
255 li r9, KVM_GUEST_MODE_NONE
256 stb r9, PACA_KVM_IN_GUEST(r13)
257
258 /* Restore bolted entries from the shadow and fix it along the way */ 147 /* Restore bolted entries from the shadow and fix it along the way */
259 148
260 /* We don't store anything in entry 0, so we don't need to take care of it */ 149 /* We don't store anything in entry 0, so we don't need to take care of it */
@@ -275,28 +164,4 @@ no_ld_last_inst:
275 164
276slb_do_exit: 165slb_do_exit:
277 166
278 /* Register usage at this point: 167.endm
279 *
280 * R0 = guest last inst
281 * R1 = host R1
282 * R2 = host R2
283 * R3 = guest PC
284 * R4 = guest MSR
285 * R5 = guest DAR
286 * R6 = guest DSISR
287 * R12 = exit handler id
288 * R13 = PACA
289 * PACA.KVM.* = guest *
290 *
291 */
292
293 /* RFI into the highmem handler */
294 mfmsr r7
295 ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
296 mtsrr1 r7
297 ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
298 mtsrr0 r8
299
300 RFI
301kvmppc_handler_trampoline_exit_end:
302
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 2b0ee7e040c9..c85f906038ce 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -28,13 +28,16 @@
28#define OP_31_XOP_MFMSR 83 28#define OP_31_XOP_MFMSR 83
29#define OP_31_XOP_MTMSR 146 29#define OP_31_XOP_MTMSR 146
30#define OP_31_XOP_MTMSRD 178 30#define OP_31_XOP_MTMSRD 178
31#define OP_31_XOP_MTSR 210
31#define OP_31_XOP_MTSRIN 242 32#define OP_31_XOP_MTSRIN 242
32#define OP_31_XOP_TLBIEL 274 33#define OP_31_XOP_TLBIEL 274
33#define OP_31_XOP_TLBIE 306 34#define OP_31_XOP_TLBIE 306
34#define OP_31_XOP_SLBMTE 402 35#define OP_31_XOP_SLBMTE 402
35#define OP_31_XOP_SLBIE 434 36#define OP_31_XOP_SLBIE 434
36#define OP_31_XOP_SLBIA 498 37#define OP_31_XOP_SLBIA 498
38#define OP_31_XOP_MFSR 595
37#define OP_31_XOP_MFSRIN 659 39#define OP_31_XOP_MFSRIN 659
40#define OP_31_XOP_DCBA 758
38#define OP_31_XOP_SLBMFEV 851 41#define OP_31_XOP_SLBMFEV 851
39#define OP_31_XOP_EIOIO 854 42#define OP_31_XOP_EIOIO 854
40#define OP_31_XOP_SLBMFEE 915 43#define OP_31_XOP_SLBMFEE 915
@@ -42,6 +45,24 @@
42/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 45/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
43#define OP_31_XOP_DCBZ 1010 46#define OP_31_XOP_DCBZ 1010
44 47
48#define OP_LFS 48
49#define OP_LFD 50
50#define OP_STFS 52
51#define OP_STFD 54
52
53#define SPRN_GQR0 912
54#define SPRN_GQR1 913
55#define SPRN_GQR2 914
56#define SPRN_GQR3 915
57#define SPRN_GQR4 916
58#define SPRN_GQR5 917
59#define SPRN_GQR6 918
60#define SPRN_GQR7 919
61
62/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
63 * function pointers, so let's just disable the define. */
64#undef mfsrin
65
45int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 66int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
46 unsigned int inst, int *advance) 67 unsigned int inst, int *advance)
47{ 68{
@@ -52,7 +73,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
52 switch (get_xop(inst)) { 73 switch (get_xop(inst)) {
53 case OP_19_XOP_RFID: 74 case OP_19_XOP_RFID:
54 case OP_19_XOP_RFI: 75 case OP_19_XOP_RFI:
55 vcpu->arch.pc = vcpu->arch.srr0; 76 kvmppc_set_pc(vcpu, vcpu->arch.srr0);
56 kvmppc_set_msr(vcpu, vcpu->arch.srr1); 77 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
57 *advance = 0; 78 *advance = 0;
58 break; 79 break;
@@ -80,6 +101,18 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
80 case OP_31_XOP_MTMSR: 101 case OP_31_XOP_MTMSR:
81 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); 102 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
82 break; 103 break;
104 case OP_31_XOP_MFSR:
105 {
106 int srnum;
107
108 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
109 if (vcpu->arch.mmu.mfsrin) {
110 u32 sr;
111 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
112 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
113 }
114 break;
115 }
83 case OP_31_XOP_MFSRIN: 116 case OP_31_XOP_MFSRIN:
84 { 117 {
85 int srnum; 118 int srnum;
@@ -92,6 +125,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
92 } 125 }
93 break; 126 break;
94 } 127 }
128 case OP_31_XOP_MTSR:
129 vcpu->arch.mmu.mtsrin(vcpu,
130 (inst >> 16) & 0xf,
131 kvmppc_get_gpr(vcpu, get_rs(inst)));
132 break;
95 case OP_31_XOP_MTSRIN: 133 case OP_31_XOP_MTSRIN:
96 vcpu->arch.mmu.mtsrin(vcpu, 134 vcpu->arch.mmu.mtsrin(vcpu,
97 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, 135 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
@@ -150,12 +188,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
150 kvmppc_set_gpr(vcpu, get_rt(inst), t); 188 kvmppc_set_gpr(vcpu, get_rt(inst), t);
151 } 189 }
152 break; 190 break;
191 case OP_31_XOP_DCBA:
192 /* Gets treated as NOP */
193 break;
153 case OP_31_XOP_DCBZ: 194 case OP_31_XOP_DCBZ:
154 { 195 {
155 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 196 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
156 ulong ra = 0; 197 ulong ra = 0;
157 ulong addr; 198 ulong addr, vaddr;
158 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 199 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
200 u32 dsisr;
201 int r;
159 202
160 if (get_ra(inst)) 203 if (get_ra(inst))
161 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 204 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
@@ -163,15 +206,25 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
163 addr = (ra + rb) & ~31ULL; 206 addr = (ra + rb) & ~31ULL;
164 if (!(vcpu->arch.msr & MSR_SF)) 207 if (!(vcpu->arch.msr & MSR_SF))
165 addr &= 0xffffffff; 208 addr &= 0xffffffff;
209 vaddr = addr;
210
211 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
212 if ((r == -ENOENT) || (r == -EPERM)) {
213 *advance = 0;
214 vcpu->arch.dear = vaddr;
215 to_svcpu(vcpu)->fault_dar = vaddr;
216
217 dsisr = DSISR_ISSTORE;
218 if (r == -ENOENT)
219 dsisr |= DSISR_NOHPTE;
220 else if (r == -EPERM)
221 dsisr |= DSISR_PROTFAULT;
222
223 to_book3s(vcpu)->dsisr = dsisr;
224 to_svcpu(vcpu)->fault_dsisr = dsisr;
166 225
167 if (kvmppc_st(vcpu, addr, 32, zeros)) {
168 vcpu->arch.dear = addr;
169 vcpu->arch.fault_dear = addr;
170 to_book3s(vcpu)->dsisr = DSISR_PROTFAULT |
171 DSISR_ISSTORE;
172 kvmppc_book3s_queue_irqprio(vcpu, 226 kvmppc_book3s_queue_irqprio(vcpu,
173 BOOK3S_INTERRUPT_DATA_STORAGE); 227 BOOK3S_INTERRUPT_DATA_STORAGE);
174 kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL);
175 } 228 }
176 229
177 break; 230 break;
@@ -184,6 +237,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
184 emulated = EMULATE_FAIL; 237 emulated = EMULATE_FAIL;
185 } 238 }
186 239
240 if (emulated == EMULATE_FAIL)
241 emulated = kvmppc_emulate_paired_single(run, vcpu);
242
187 return emulated; 243 return emulated;
188} 244}
189 245
@@ -207,6 +263,34 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
207 } 263 }
208} 264}
209 265
266static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
267{
268 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
269 struct kvmppc_bat *bat;
270
271 switch (sprn) {
272 case SPRN_IBAT0U ... SPRN_IBAT3L:
273 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
274 break;
275 case SPRN_IBAT4U ... SPRN_IBAT7L:
276 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
277 break;
278 case SPRN_DBAT0U ... SPRN_DBAT3L:
279 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
280 break;
281 case SPRN_DBAT4U ... SPRN_DBAT7L:
282 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
283 break;
284 default:
285 BUG();
286 }
287
288 if (sprn % 2)
289 return bat->raw >> 32;
290 else
291 return bat->raw;
292}
293
210static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) 294static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
211{ 295{
212 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 296 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -217,13 +301,13 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
217 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 301 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
218 break; 302 break;
219 case SPRN_IBAT4U ... SPRN_IBAT7L: 303 case SPRN_IBAT4U ... SPRN_IBAT7L:
220 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2]; 304 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
221 break; 305 break;
222 case SPRN_DBAT0U ... SPRN_DBAT3L: 306 case SPRN_DBAT0U ... SPRN_DBAT3L:
223 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 307 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
224 break; 308 break;
225 case SPRN_DBAT4U ... SPRN_DBAT7L: 309 case SPRN_DBAT4U ... SPRN_DBAT7L:
226 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2]; 310 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
227 break; 311 break;
228 default: 312 default:
229 BUG(); 313 BUG();
@@ -258,6 +342,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
258 /* BAT writes happen so rarely that we're ok to flush 342 /* BAT writes happen so rarely that we're ok to flush
259 * everything here */ 343 * everything here */
260 kvmppc_mmu_pte_flush(vcpu, 0, 0); 344 kvmppc_mmu_pte_flush(vcpu, 0, 0);
345 kvmppc_mmu_flush_segments(vcpu);
261 break; 346 break;
262 case SPRN_HID0: 347 case SPRN_HID0:
263 to_book3s(vcpu)->hid[0] = spr_val; 348 to_book3s(vcpu)->hid[0] = spr_val;
@@ -268,7 +353,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
268 case SPRN_HID2: 353 case SPRN_HID2:
269 to_book3s(vcpu)->hid[2] = spr_val; 354 to_book3s(vcpu)->hid[2] = spr_val;
270 break; 355 break;
356 case SPRN_HID2_GEKKO:
357 to_book3s(vcpu)->hid[2] = spr_val;
358 /* HID2.PSE controls paired single on gekko */
359 switch (vcpu->arch.pvr) {
360 case 0x00080200: /* lonestar 2.0 */
361 case 0x00088202: /* lonestar 2.2 */
362 case 0x70000100: /* gekko 1.0 */
363 case 0x00080100: /* gekko 2.0 */
364 case 0x00083203: /* gekko 2.3a */
365 case 0x00083213: /* gekko 2.3b */
366 case 0x00083204: /* gekko 2.4 */
367 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
368 case 0x00087200: /* broadway */
369 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
370 /* Native paired singles */
371 } else if (spr_val & (1 << 29)) { /* HID2.PSE */
372 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
373 kvmppc_giveup_ext(vcpu, MSR_FP);
374 } else {
375 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
376 }
377 break;
378 }
379 break;
271 case SPRN_HID4: 380 case SPRN_HID4:
381 case SPRN_HID4_GEKKO:
272 to_book3s(vcpu)->hid[4] = spr_val; 382 to_book3s(vcpu)->hid[4] = spr_val;
273 break; 383 break;
274 case SPRN_HID5: 384 case SPRN_HID5:
@@ -278,12 +388,30 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
278 (mfmsr() & MSR_HV)) 388 (mfmsr() & MSR_HV))
279 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 389 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
280 break; 390 break;
391 case SPRN_GQR0:
392 case SPRN_GQR1:
393 case SPRN_GQR2:
394 case SPRN_GQR3:
395 case SPRN_GQR4:
396 case SPRN_GQR5:
397 case SPRN_GQR6:
398 case SPRN_GQR7:
399 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
400 break;
281 case SPRN_ICTC: 401 case SPRN_ICTC:
282 case SPRN_THRM1: 402 case SPRN_THRM1:
283 case SPRN_THRM2: 403 case SPRN_THRM2:
284 case SPRN_THRM3: 404 case SPRN_THRM3:
285 case SPRN_CTRLF: 405 case SPRN_CTRLF:
286 case SPRN_CTRLT: 406 case SPRN_CTRLT:
407 case SPRN_L2CR:
408 case SPRN_MMCR0_GEKKO:
409 case SPRN_MMCR1_GEKKO:
410 case SPRN_PMC1_GEKKO:
411 case SPRN_PMC2_GEKKO:
412 case SPRN_PMC3_GEKKO:
413 case SPRN_PMC4_GEKKO:
414 case SPRN_WPAR_GEKKO:
287 break; 415 break;
288 default: 416 default:
289 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); 417 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
@@ -301,6 +429,12 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
301 int emulated = EMULATE_DONE; 429 int emulated = EMULATE_DONE;
302 430
303 switch (sprn) { 431 switch (sprn) {
432 case SPRN_IBAT0U ... SPRN_IBAT3L:
433 case SPRN_IBAT4U ... SPRN_IBAT7L:
434 case SPRN_DBAT0U ... SPRN_DBAT3L:
435 case SPRN_DBAT4U ... SPRN_DBAT7L:
436 kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn));
437 break;
304 case SPRN_SDR1: 438 case SPRN_SDR1:
305 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); 439 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
306 break; 440 break;
@@ -320,19 +454,40 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
320 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); 454 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
321 break; 455 break;
322 case SPRN_HID2: 456 case SPRN_HID2:
457 case SPRN_HID2_GEKKO:
323 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); 458 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
324 break; 459 break;
325 case SPRN_HID4: 460 case SPRN_HID4:
461 case SPRN_HID4_GEKKO:
326 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); 462 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
327 break; 463 break;
328 case SPRN_HID5: 464 case SPRN_HID5:
329 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); 465 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
330 break; 466 break;
467 case SPRN_GQR0:
468 case SPRN_GQR1:
469 case SPRN_GQR2:
470 case SPRN_GQR3:
471 case SPRN_GQR4:
472 case SPRN_GQR5:
473 case SPRN_GQR6:
474 case SPRN_GQR7:
475 kvmppc_set_gpr(vcpu, rt,
476 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
477 break;
331 case SPRN_THRM1: 478 case SPRN_THRM1:
332 case SPRN_THRM2: 479 case SPRN_THRM2:
333 case SPRN_THRM3: 480 case SPRN_THRM3:
334 case SPRN_CTRLF: 481 case SPRN_CTRLF:
335 case SPRN_CTRLT: 482 case SPRN_CTRLT:
483 case SPRN_L2CR:
484 case SPRN_MMCR0_GEKKO:
485 case SPRN_MMCR1_GEKKO:
486 case SPRN_PMC1_GEKKO:
487 case SPRN_PMC2_GEKKO:
488 case SPRN_PMC3_GEKKO:
489 case SPRN_PMC4_GEKKO:
490 case SPRN_WPAR_GEKKO:
336 kvmppc_set_gpr(vcpu, rt, 0); 491 kvmppc_set_gpr(vcpu, rt, 0);
337 break; 492 break;
338 default: 493 default:
@@ -346,3 +501,73 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
346 return emulated; 501 return emulated;
347} 502}
348 503
504u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
505{
506 u32 dsisr = 0;
507
508 /*
509 * This is what the spec says about DSISR bits (not mentioned = 0):
510 *
511 * 12:13 [DS] Set to bits 30:31
512 * 15:16 [X] Set to bits 29:30
513 * 17 [X] Set to bit 25
514 * [D/DS] Set to bit 5
515 * 18:21 [X] Set to bits 21:24
516 * [D/DS] Set to bits 1:4
517 * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS)
518 * 27:31 Set to bits 11:15 (RA)
519 */
520
521 switch (get_op(inst)) {
522 /* D-form */
523 case OP_LFS:
524 case OP_LFD:
525 case OP_STFD:
526 case OP_STFS:
527 dsisr |= (inst >> 12) & 0x4000; /* bit 17 */
528 dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
529 break;
530 /* X-form */
531 case 31:
532 dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
533 dsisr |= (inst << 8) & 0x04000; /* bit 17 */
534 dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */
535 break;
536 default:
537 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
538 break;
539 }
540
541 dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
542
543 return dsisr;
544}
545
546ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
547{
548 ulong dar = 0;
549 ulong ra;
550
551 switch (get_op(inst)) {
552 case OP_LFS:
553 case OP_LFD:
554 case OP_STFD:
555 case OP_STFS:
556 ra = get_ra(inst);
557 if (ra)
558 dar = kvmppc_get_gpr(vcpu, ra);
559 dar += (s32)((s16)inst);
560 break;
561 case 31:
562 ra = get_ra(inst);
563 if (ra)
564 dar = kvmppc_get_gpr(vcpu, ra);
565 dar += kvmppc_get_gpr(vcpu, get_rb(inst));
566 break;
567 default:
568 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
569 break;
570 }
571
572 return dar;
573}
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0d..1dd5a1ddfd0d 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c1584d0cbce8..2f0bc928b08a 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -24,36 +24,56 @@
24#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit 27#if defined(CONFIG_PPC_BOOK3S_64)
28#define ULONG_SIZE 8
29#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
30 28
31.macro DISABLE_INTERRUPTS 29#define ULONG_SIZE 8
32 mfmsr r0 30#define FUNC(name) GLUE(.,name)
33 rldicl r0,r0,48,1
34 rotldi r0,r0,16
35 mtmsrd r0,1
36.endm
37 31
32#define GET_SHADOW_VCPU(reg) \
33 addi reg, r13, PACA_KVM_SVCPU
34
35#define DISABLE_INTERRUPTS \
36 mfmsr r0; \
37 rldicl r0,r0,48,1; \
38 rotldi r0,r0,16; \
39 mtmsrd r0,1; \
40
41#elif defined(CONFIG_PPC_BOOK3S_32)
42
43#define ULONG_SIZE 4
44#define FUNC(name) name
45
46#define GET_SHADOW_VCPU(reg) \
47 lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
48
49#define DISABLE_INTERRUPTS \
50 mfmsr r0; \
51 rlwinm r0,r0,0,17,15; \
52 mtmsr r0; \
53
54#endif /* CONFIG_PPC_BOOK3S_XX */
55
56
57#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
38#define VCPU_LOAD_NVGPRS(vcpu) \ 58#define VCPU_LOAD_NVGPRS(vcpu) \
39 ld r14, VCPU_GPR(r14)(vcpu); \ 59 PPC_LL r14, VCPU_GPR(r14)(vcpu); \
40 ld r15, VCPU_GPR(r15)(vcpu); \ 60 PPC_LL r15, VCPU_GPR(r15)(vcpu); \
41 ld r16, VCPU_GPR(r16)(vcpu); \ 61 PPC_LL r16, VCPU_GPR(r16)(vcpu); \
42 ld r17, VCPU_GPR(r17)(vcpu); \ 62 PPC_LL r17, VCPU_GPR(r17)(vcpu); \
43 ld r18, VCPU_GPR(r18)(vcpu); \ 63 PPC_LL r18, VCPU_GPR(r18)(vcpu); \
44 ld r19, VCPU_GPR(r19)(vcpu); \ 64 PPC_LL r19, VCPU_GPR(r19)(vcpu); \
45 ld r20, VCPU_GPR(r20)(vcpu); \ 65 PPC_LL r20, VCPU_GPR(r20)(vcpu); \
46 ld r21, VCPU_GPR(r21)(vcpu); \ 66 PPC_LL r21, VCPU_GPR(r21)(vcpu); \
47 ld r22, VCPU_GPR(r22)(vcpu); \ 67 PPC_LL r22, VCPU_GPR(r22)(vcpu); \
48 ld r23, VCPU_GPR(r23)(vcpu); \ 68 PPC_LL r23, VCPU_GPR(r23)(vcpu); \
49 ld r24, VCPU_GPR(r24)(vcpu); \ 69 PPC_LL r24, VCPU_GPR(r24)(vcpu); \
50 ld r25, VCPU_GPR(r25)(vcpu); \ 70 PPC_LL r25, VCPU_GPR(r25)(vcpu); \
51 ld r26, VCPU_GPR(r26)(vcpu); \ 71 PPC_LL r26, VCPU_GPR(r26)(vcpu); \
52 ld r27, VCPU_GPR(r27)(vcpu); \ 72 PPC_LL r27, VCPU_GPR(r27)(vcpu); \
53 ld r28, VCPU_GPR(r28)(vcpu); \ 73 PPC_LL r28, VCPU_GPR(r28)(vcpu); \
54 ld r29, VCPU_GPR(r29)(vcpu); \ 74 PPC_LL r29, VCPU_GPR(r29)(vcpu); \
55 ld r30, VCPU_GPR(r30)(vcpu); \ 75 PPC_LL r30, VCPU_GPR(r30)(vcpu); \
56 ld r31, VCPU_GPR(r31)(vcpu); \ 76 PPC_LL r31, VCPU_GPR(r31)(vcpu); \
57 77
58/***************************************************************************** 78/*****************************************************************************
59 * * 79 * *
@@ -69,11 +89,11 @@ _GLOBAL(__kvmppc_vcpu_entry)
69 89
70kvm_start_entry: 90kvm_start_entry:
71 /* Write correct stack frame */ 91 /* Write correct stack frame */
72 mflr r0 92 mflr r0
73 std r0,16(r1) 93 PPC_STL r0,PPC_LR_STKOFF(r1)
74 94
75 /* Save host state to the stack */ 95 /* Save host state to the stack */
76 stdu r1, -SWITCH_FRAME_SIZE(r1) 96 PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
77 97
78 /* Save r3 (kvm_run) and r4 (vcpu) */ 98 /* Save r3 (kvm_run) and r4 (vcpu) */
79 SAVE_2GPRS(3, r1) 99 SAVE_2GPRS(3, r1)
@@ -82,33 +102,28 @@ kvm_start_entry:
82 SAVE_NVGPRS(r1) 102 SAVE_NVGPRS(r1)
83 103
84 /* Save LR */ 104 /* Save LR */
85 std r0, _LINK(r1) 105 PPC_STL r0, _LINK(r1)
86 106
87 /* Load non-volatile guest state from the vcpu */ 107 /* Load non-volatile guest state from the vcpu */
88 VCPU_LOAD_NVGPRS(r4) 108 VCPU_LOAD_NVGPRS(r4)
89 109
110 GET_SHADOW_VCPU(r5)
111
90 /* Save R1/R2 in the PACA */ 112 /* Save R1/R2 in the PACA */
91 std r1, PACA_KVM_HOST_R1(r13) 113 PPC_STL r1, SVCPU_HOST_R1(r5)
92 std r2, PACA_KVM_HOST_R2(r13) 114 PPC_STL r2, SVCPU_HOST_R2(r5)
93 115
94 /* XXX swap in/out on load? */ 116 /* XXX swap in/out on load? */
95 ld r3, VCPU_HIGHMEM_HANDLER(r4) 117 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
96 std r3, PACA_KVM_VMHANDLER(r13) 118 PPC_STL r3, SVCPU_VMHANDLER(r5)
97 119
98kvm_start_lightweight: 120kvm_start_lightweight:
99 121
100 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ 122 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
101 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
102
103 /* Load some guest state in the respective registers */
104 ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
105 /* will be swapped in by rmcall */
106
107 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
108 mtlr r3 /* LR = r3 */
109 123
110 DISABLE_INTERRUPTS 124 DISABLE_INTERRUPTS
111 125
126#ifdef CONFIG_PPC_BOOK3S_64
112 /* Some guests may need to have dcbz set to 32 byte length. 127 /* Some guests may need to have dcbz set to 32 byte length.
113 * 128 *
114 * Usually we ensure that by patching the guest's instructions 129 * Usually we ensure that by patching the guest's instructions
@@ -118,7 +133,7 @@ kvm_start_lightweight:
118 * because that's a lot faster. 133 * because that's a lot faster.
119 */ 134 */
120 135
121 ld r3, VCPU_HFLAGS(r4) 136 PPC_LL r3, VCPU_HFLAGS(r4)
122 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ 137 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
123 beq no_dcbz32_on 138 beq no_dcbz32_on
124 139
@@ -128,13 +143,15 @@ kvm_start_lightweight:
128 143
129no_dcbz32_on: 144no_dcbz32_on:
130 145
131 ld r6, VCPU_RMCALL(r4) 146#endif /* CONFIG_PPC_BOOK3S_64 */
147
148 PPC_LL r6, VCPU_RMCALL(r4)
132 mtctr r6 149 mtctr r6
133 150
134 ld r3, VCPU_TRAMPOLINE_ENTER(r4) 151 PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4)
135 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 152 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
136 153
137 /* Jump to SLB patching handlder and into our guest */ 154 /* Jump to segment patching handler and into our guest */
138 bctr 155 bctr
139 156
140/* 157/*
@@ -149,31 +166,20 @@ kvmppc_handler_highmem:
149 /* 166 /*
150 * Register usage at this point: 167 * Register usage at this point:
151 * 168 *
152 * R0 = guest last inst 169 * R1 = host R1
153 * R1 = host R1 170 * R2 = host R2
154 * R2 = host R2 171 * R12 = exit handler id
155 * R3 = guest PC 172 * R13 = PACA
156 * R4 = guest MSR 173 * SVCPU.* = guest *
157 * R5 = guest DAR
158 * R6 = guest DSISR
159 * R13 = PACA
160 * PACA.KVM.* = guest *
161 * 174 *
162 */ 175 */
163 176
164 /* R7 = vcpu */ 177 /* R7 = vcpu */
165 ld r7, GPR4(r1) 178 PPC_LL r7, GPR4(r1)
166 179
167 /* Now save the guest state */ 180#ifdef CONFIG_PPC_BOOK3S_64
168 181
169 stw r0, VCPU_LAST_INST(r7) 182 PPC_LL r5, VCPU_HFLAGS(r7)
170
171 std r3, VCPU_PC(r7)
172 std r4, VCPU_SHADOW_SRR1(r7)
173 std r5, VCPU_FAULT_DEAR(r7)
174 std r6, VCPU_FAULT_DSISR(r7)
175
176 ld r5, VCPU_HFLAGS(r7)
177 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ 183 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
178 beq no_dcbz32_off 184 beq no_dcbz32_off
179 185
@@ -184,35 +190,29 @@ kvmppc_handler_highmem:
184 190
185no_dcbz32_off: 191no_dcbz32_off:
186 192
187 std r14, VCPU_GPR(r14)(r7) 193#endif /* CONFIG_PPC_BOOK3S_64 */
188 std r15, VCPU_GPR(r15)(r7) 194
189 std r16, VCPU_GPR(r16)(r7) 195 PPC_STL r14, VCPU_GPR(r14)(r7)
190 std r17, VCPU_GPR(r17)(r7) 196 PPC_STL r15, VCPU_GPR(r15)(r7)
191 std r18, VCPU_GPR(r18)(r7) 197 PPC_STL r16, VCPU_GPR(r16)(r7)
192 std r19, VCPU_GPR(r19)(r7) 198 PPC_STL r17, VCPU_GPR(r17)(r7)
193 std r20, VCPU_GPR(r20)(r7) 199 PPC_STL r18, VCPU_GPR(r18)(r7)
194 std r21, VCPU_GPR(r21)(r7) 200 PPC_STL r19, VCPU_GPR(r19)(r7)
195 std r22, VCPU_GPR(r22)(r7) 201 PPC_STL r20, VCPU_GPR(r20)(r7)
196 std r23, VCPU_GPR(r23)(r7) 202 PPC_STL r21, VCPU_GPR(r21)(r7)
197 std r24, VCPU_GPR(r24)(r7) 203 PPC_STL r22, VCPU_GPR(r22)(r7)
198 std r25, VCPU_GPR(r25)(r7) 204 PPC_STL r23, VCPU_GPR(r23)(r7)
199 std r26, VCPU_GPR(r26)(r7) 205 PPC_STL r24, VCPU_GPR(r24)(r7)
200 std r27, VCPU_GPR(r27)(r7) 206 PPC_STL r25, VCPU_GPR(r25)(r7)
201 std r28, VCPU_GPR(r28)(r7) 207 PPC_STL r26, VCPU_GPR(r26)(r7)
202 std r29, VCPU_GPR(r29)(r7) 208 PPC_STL r27, VCPU_GPR(r27)(r7)
203 std r30, VCPU_GPR(r30)(r7) 209 PPC_STL r28, VCPU_GPR(r28)(r7)
204 std r31, VCPU_GPR(r31)(r7) 210 PPC_STL r29, VCPU_GPR(r29)(r7)
205 211 PPC_STL r30, VCPU_GPR(r30)(r7)
206 /* Save guest CTR */ 212 PPC_STL r31, VCPU_GPR(r31)(r7)
207 mfctr r5
208 std r5, VCPU_CTR(r7)
209
210 /* Save guest LR */
211 mflr r5
212 std r5, VCPU_LR(r7)
213 213
214 /* Restore host msr -> SRR1 */ 214 /* Restore host msr -> SRR1 */
215 ld r6, VCPU_HOST_MSR(r7) 215 PPC_LL r6, VCPU_HOST_MSR(r7)
216 216
217 /* 217 /*
218 * For some interrupts, we need to call the real Linux 218 * For some interrupts, we need to call the real Linux
@@ -228,9 +228,12 @@ no_dcbz32_off:
228 beq call_linux_handler 228 beq call_linux_handler
229 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 229 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
230 beq call_linux_handler 230 beq call_linux_handler
231 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
232 beq call_linux_handler
231 233
232 /* Back to EE=1 */ 234 /* Back to EE=1 */
233 mtmsr r6 235 mtmsr r6
236 sync
234 b kvm_return_point 237 b kvm_return_point
235 238
236call_linux_handler: 239call_linux_handler:
@@ -249,14 +252,14 @@ call_linux_handler:
249 */ 252 */
250 253
251 /* Restore host IP -> SRR0 */ 254 /* Restore host IP -> SRR0 */
252 ld r5, VCPU_HOST_RETIP(r7) 255 PPC_LL r5, VCPU_HOST_RETIP(r7)
253 256
254 /* XXX Better move to a safe function? 257 /* XXX Better move to a safe function?
255 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ 258 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
256 259
257 mtlr r12 260 mtlr r12
258 261
259 ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) 262 PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7)
260 mtsrr0 r4 263 mtsrr0 r4
261 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 264 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
262 mtsrr1 r3 265 mtsrr1 r3
@@ -274,7 +277,7 @@ kvm_return_point:
274 277
275 /* Restore r3 (kvm_run) and r4 (vcpu) */ 278 /* Restore r3 (kvm_run) and r4 (vcpu) */
276 REST_2GPRS(3, r1) 279 REST_2GPRS(3, r1)
277 bl KVMPPC_HANDLE_EXIT 280 bl FUNC(kvmppc_handle_exit)
278 281
279 /* If RESUME_GUEST, get back in the loop */ 282 /* If RESUME_GUEST, get back in the loop */
280 cmpwi r3, RESUME_GUEST 283 cmpwi r3, RESUME_GUEST
@@ -285,7 +288,7 @@ kvm_return_point:
285 288
286kvm_exit_loop: 289kvm_exit_loop:
287 290
288 ld r4, _LINK(r1) 291 PPC_LL r4, _LINK(r1)
289 mtlr r4 292 mtlr r4
290 293
291 /* Restore non-volatile host registers (r14 - r31) */ 294 /* Restore non-volatile host registers (r14 - r31) */
@@ -296,8 +299,8 @@ kvm_exit_loop:
296 299
297kvm_loop_heavyweight: 300kvm_loop_heavyweight:
298 301
299 ld r4, _LINK(r1) 302 PPC_LL r4, _LINK(r1)
300 std r4, (16 + SWITCH_FRAME_SIZE)(r1) 303 PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
301 304
302 /* Load vcpu and cpu_run */ 305 /* Load vcpu and cpu_run */
303 REST_2GPRS(3, r1) 306 REST_2GPRS(3, r1)
@@ -315,4 +318,3 @@ kvm_loop_lightweight:
315 318
316 /* Jump back into the beginning of this function */ 319 /* Jump back into the beginning of this function */
317 b kvm_start_lightweight 320 b kvm_start_lightweight
318
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
new file mode 100644
index 000000000000..a9f66abafcb3
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -0,0 +1,1289 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright Novell Inc 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/kvm.h>
21#include <asm/kvm_ppc.h>
22#include <asm/disassemble.h>
23#include <asm/kvm_book3s.h>
24#include <asm/kvm_fpu.h>
25#include <asm/reg.h>
26#include <asm/cacheflush.h>
27#include <linux/vmalloc.h>
28
29/* #define DEBUG */
30
31#ifdef DEBUG
32#define dprintk printk
33#else
34#define dprintk(...) do { } while(0);
35#endif
36
37#define OP_LFS 48
38#define OP_LFSU 49
39#define OP_LFD 50
40#define OP_LFDU 51
41#define OP_STFS 52
42#define OP_STFSU 53
43#define OP_STFD 54
44#define OP_STFDU 55
45#define OP_PSQ_L 56
46#define OP_PSQ_LU 57
47#define OP_PSQ_ST 60
48#define OP_PSQ_STU 61
49
50#define OP_31_LFSX 535
51#define OP_31_LFSUX 567
52#define OP_31_LFDX 599
53#define OP_31_LFDUX 631
54#define OP_31_STFSX 663
55#define OP_31_STFSUX 695
56#define OP_31_STFX 727
57#define OP_31_STFUX 759
58#define OP_31_LWIZX 887
59#define OP_31_STFIWX 983
60
61#define OP_59_FADDS 21
62#define OP_59_FSUBS 20
63#define OP_59_FSQRTS 22
64#define OP_59_FDIVS 18
65#define OP_59_FRES 24
66#define OP_59_FMULS 25
67#define OP_59_FRSQRTES 26
68#define OP_59_FMSUBS 28
69#define OP_59_FMADDS 29
70#define OP_59_FNMSUBS 30
71#define OP_59_FNMADDS 31
72
73#define OP_63_FCMPU 0
74#define OP_63_FCPSGN 8
75#define OP_63_FRSP 12
76#define OP_63_FCTIW 14
77#define OP_63_FCTIWZ 15
78#define OP_63_FDIV 18
79#define OP_63_FADD 21
80#define OP_63_FSQRT 22
81#define OP_63_FSEL 23
82#define OP_63_FRE 24
83#define OP_63_FMUL 25
84#define OP_63_FRSQRTE 26
85#define OP_63_FMSUB 28
86#define OP_63_FMADD 29
87#define OP_63_FNMSUB 30
88#define OP_63_FNMADD 31
89#define OP_63_FCMPO 32
90#define OP_63_MTFSB1 38 // XXX
91#define OP_63_FSUB 20
92#define OP_63_FNEG 40
93#define OP_63_MCRFS 64
94#define OP_63_MTFSB0 70
95#define OP_63_FMR 72
96#define OP_63_MTFSFI 134
97#define OP_63_FABS 264
98#define OP_63_MFFS 583
99#define OP_63_MTFSF 711
100
101#define OP_4X_PS_CMPU0 0
102#define OP_4X_PSQ_LX 6
103#define OP_4XW_PSQ_STX 7
104#define OP_4A_PS_SUM0 10
105#define OP_4A_PS_SUM1 11
106#define OP_4A_PS_MULS0 12
107#define OP_4A_PS_MULS1 13
108#define OP_4A_PS_MADDS0 14
109#define OP_4A_PS_MADDS1 15
110#define OP_4A_PS_DIV 18
111#define OP_4A_PS_SUB 20
112#define OP_4A_PS_ADD 21
113#define OP_4A_PS_SEL 23
114#define OP_4A_PS_RES 24
115#define OP_4A_PS_MUL 25
116#define OP_4A_PS_RSQRTE 26
117#define OP_4A_PS_MSUB 28
118#define OP_4A_PS_MADD 29
119#define OP_4A_PS_NMSUB 30
120#define OP_4A_PS_NMADD 31
121#define OP_4X_PS_CMPO0 32
122#define OP_4X_PSQ_LUX 38
123#define OP_4XW_PSQ_STUX 39
124#define OP_4X_PS_NEG 40
125#define OP_4X_PS_CMPU1 64
126#define OP_4X_PS_MR 72
127#define OP_4X_PS_CMPO1 96
128#define OP_4X_PS_NABS 136
129#define OP_4X_PS_ABS 264
130#define OP_4X_PS_MERGE00 528
131#define OP_4X_PS_MERGE01 560
132#define OP_4X_PS_MERGE10 592
133#define OP_4X_PS_MERGE11 624
134
135#define SCALAR_NONE 0
136#define SCALAR_HIGH (1 << 0)
137#define SCALAR_LOW (1 << 1)
138#define SCALAR_NO_PS0 (1 << 2)
139#define SCALAR_NO_PS1 (1 << 3)
140
141#define GQR_ST_TYPE_MASK 0x00000007
142#define GQR_ST_TYPE_SHIFT 0
143#define GQR_ST_SCALE_MASK 0x00003f00
144#define GQR_ST_SCALE_SHIFT 8
145#define GQR_LD_TYPE_MASK 0x00070000
146#define GQR_LD_TYPE_SHIFT 16
147#define GQR_LD_SCALE_MASK 0x3f000000
148#define GQR_LD_SCALE_SHIFT 24
149
150#define GQR_QUANTIZE_FLOAT 0
151#define GQR_QUANTIZE_U8 4
152#define GQR_QUANTIZE_U16 5
153#define GQR_QUANTIZE_S8 6
154#define GQR_QUANTIZE_S16 7
155
156#define FPU_LS_SINGLE 0
157#define FPU_LS_DOUBLE 1
158#define FPU_LS_SINGLE_LOW 2
159
160static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
161{
162 struct thread_struct t;
163
164 t.fpscr.val = vcpu->arch.fpscr;
165 cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t);
166}
167
168static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
169{
170 u64 dsisr;
171
172 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0);
173 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
174 vcpu->arch.dear = eaddr;
175 /* Page Fault */
176 dsisr = kvmppc_set_field(0, 33, 33, 1);
177 if (is_store)
178 to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
179 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
180}
181
182static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
183 int rs, ulong addr, int ls_type)
184{
185 int emulated = EMULATE_FAIL;
186 struct thread_struct t;
187 int r;
188 char tmp[8];
189 int len = sizeof(u32);
190
191 if (ls_type == FPU_LS_DOUBLE)
192 len = sizeof(u64);
193
194 t.fpscr.val = vcpu->arch.fpscr;
195
196 /* read from memory */
197 r = kvmppc_ld(vcpu, &addr, len, tmp, true);
198 vcpu->arch.paddr_accessed = addr;
199
200 if (r < 0) {
201 kvmppc_inject_pf(vcpu, addr, false);
202 goto done_load;
203 } else if (r == EMULATE_DO_MMIO) {
204 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1);
205 goto done_load;
206 }
207
208 emulated = EMULATE_DONE;
209
210 /* put in registers */
211 switch (ls_type) {
212 case FPU_LS_SINGLE:
213 cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t);
214 vcpu->arch.qpr[rs] = *((u32*)tmp);
215 break;
216 case FPU_LS_DOUBLE:
217 vcpu->arch.fpr[rs] = *((u64*)tmp);
218 break;
219 }
220
221 dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
222 addr, len);
223
224done_load:
225 return emulated;
226}
227
228static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
229 int rs, ulong addr, int ls_type)
230{
231 int emulated = EMULATE_FAIL;
232 struct thread_struct t;
233 int r;
234 char tmp[8];
235 u64 val;
236 int len;
237
238 t.fpscr.val = vcpu->arch.fpscr;
239
240 switch (ls_type) {
241 case FPU_LS_SINGLE:
242 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t);
243 val = *((u32*)tmp);
244 len = sizeof(u32);
245 break;
246 case FPU_LS_SINGLE_LOW:
247 *((u32*)tmp) = vcpu->arch.fpr[rs];
248 val = vcpu->arch.fpr[rs] & 0xffffffff;
249 len = sizeof(u32);
250 break;
251 case FPU_LS_DOUBLE:
252 *((u64*)tmp) = vcpu->arch.fpr[rs];
253 val = vcpu->arch.fpr[rs];
254 len = sizeof(u64);
255 break;
256 default:
257 val = 0;
258 len = 0;
259 }
260
261 r = kvmppc_st(vcpu, &addr, len, tmp, true);
262 vcpu->arch.paddr_accessed = addr;
263 if (r < 0) {
264 kvmppc_inject_pf(vcpu, addr, true);
265 } else if (r == EMULATE_DO_MMIO) {
266 emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
267 } else {
268 emulated = EMULATE_DONE;
269 }
270
271 dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
272 val, addr, len);
273
274 return emulated;
275}
276
277static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
278 int rs, ulong addr, bool w, int i)
279{
280 int emulated = EMULATE_FAIL;
281 struct thread_struct t;
282 int r;
283 float one = 1.0;
284 u32 tmp[2];
285
286 t.fpscr.val = vcpu->arch.fpscr;
287
288 /* read from memory */
289 if (w) {
290 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
291 memcpy(&tmp[1], &one, sizeof(u32));
292 } else {
293 r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
294 }
295 vcpu->arch.paddr_accessed = addr;
296 if (r < 0) {
297 kvmppc_inject_pf(vcpu, addr, false);
298 goto done_load;
299 } else if ((r == EMULATE_DO_MMIO) && w) {
300 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1);
301 vcpu->arch.qpr[rs] = tmp[1];
302 goto done_load;
303 } else if (r == EMULATE_DO_MMIO) {
304 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1);
305 goto done_load;
306 }
307
308 emulated = EMULATE_DONE;
309
310 /* put in registers */
311 cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t);
312 vcpu->arch.qpr[rs] = tmp[1];
313
314 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
315 tmp[1], addr, w ? 4 : 8);
316
317done_load:
318 return emulated;
319}
320
321static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
322 int rs, ulong addr, bool w, int i)
323{
324 int emulated = EMULATE_FAIL;
325 struct thread_struct t;
326 int r;
327 u32 tmp[2];
328 int len = w ? sizeof(u32) : sizeof(u64);
329
330 t.fpscr.val = vcpu->arch.fpscr;
331
332 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t);
333 tmp[1] = vcpu->arch.qpr[rs];
334
335 r = kvmppc_st(vcpu, &addr, len, tmp, true);
336 vcpu->arch.paddr_accessed = addr;
337 if (r < 0) {
338 kvmppc_inject_pf(vcpu, addr, true);
339 } else if ((r == EMULATE_DO_MMIO) && w) {
340 emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
341 } else if (r == EMULATE_DO_MMIO) {
342 u64 val = ((u64)tmp[0] << 32) | tmp[1];
343 emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
344 } else {
345 emulated = EMULATE_DONE;
346 }
347
348 dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
349 tmp[0], tmp[1], addr, len);
350
351 return emulated;
352}
353
354/*
355 * Cuts out inst bits with ordering according to spec.
356 * That means the leftmost bit is zero. All given bits are included.
357 */
358static inline u32 inst_get_field(u32 inst, int msb, int lsb)
359{
360 return kvmppc_get_field(inst, msb + 32, lsb + 32);
361}
362
363/*
364 * Replaces inst bits with ordering according to spec.
365 */
366static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
367{
368 return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
369}
370
371bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
372{
373 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
374 return false;
375
376 switch (get_op(inst)) {
377 case OP_PSQ_L:
378 case OP_PSQ_LU:
379 case OP_PSQ_ST:
380 case OP_PSQ_STU:
381 case OP_LFS:
382 case OP_LFSU:
383 case OP_LFD:
384 case OP_LFDU:
385 case OP_STFS:
386 case OP_STFSU:
387 case OP_STFD:
388 case OP_STFDU:
389 return true;
390 case 4:
391 /* X form */
392 switch (inst_get_field(inst, 21, 30)) {
393 case OP_4X_PS_CMPU0:
394 case OP_4X_PSQ_LX:
395 case OP_4X_PS_CMPO0:
396 case OP_4X_PSQ_LUX:
397 case OP_4X_PS_NEG:
398 case OP_4X_PS_CMPU1:
399 case OP_4X_PS_MR:
400 case OP_4X_PS_CMPO1:
401 case OP_4X_PS_NABS:
402 case OP_4X_PS_ABS:
403 case OP_4X_PS_MERGE00:
404 case OP_4X_PS_MERGE01:
405 case OP_4X_PS_MERGE10:
406 case OP_4X_PS_MERGE11:
407 return true;
408 }
409 /* XW form */
410 switch (inst_get_field(inst, 25, 30)) {
411 case OP_4XW_PSQ_STX:
412 case OP_4XW_PSQ_STUX:
413 return true;
414 }
415 /* A form */
416 switch (inst_get_field(inst, 26, 30)) {
417 case OP_4A_PS_SUM1:
418 case OP_4A_PS_SUM0:
419 case OP_4A_PS_MULS0:
420 case OP_4A_PS_MULS1:
421 case OP_4A_PS_MADDS0:
422 case OP_4A_PS_MADDS1:
423 case OP_4A_PS_DIV:
424 case OP_4A_PS_SUB:
425 case OP_4A_PS_ADD:
426 case OP_4A_PS_SEL:
427 case OP_4A_PS_RES:
428 case OP_4A_PS_MUL:
429 case OP_4A_PS_RSQRTE:
430 case OP_4A_PS_MSUB:
431 case OP_4A_PS_MADD:
432 case OP_4A_PS_NMSUB:
433 case OP_4A_PS_NMADD:
434 return true;
435 }
436 break;
437 case 59:
438 switch (inst_get_field(inst, 21, 30)) {
439 case OP_59_FADDS:
440 case OP_59_FSUBS:
441 case OP_59_FDIVS:
442 case OP_59_FRES:
443 case OP_59_FRSQRTES:
444 return true;
445 }
446 switch (inst_get_field(inst, 26, 30)) {
447 case OP_59_FMULS:
448 case OP_59_FMSUBS:
449 case OP_59_FMADDS:
450 case OP_59_FNMSUBS:
451 case OP_59_FNMADDS:
452 return true;
453 }
454 break;
455 case 63:
456 switch (inst_get_field(inst, 21, 30)) {
457 case OP_63_MTFSB0:
458 case OP_63_MTFSB1:
459 case OP_63_MTFSF:
460 case OP_63_MTFSFI:
461 case OP_63_MCRFS:
462 case OP_63_MFFS:
463 case OP_63_FCMPU:
464 case OP_63_FCMPO:
465 case OP_63_FNEG:
466 case OP_63_FMR:
467 case OP_63_FABS:
468 case OP_63_FRSP:
469 case OP_63_FDIV:
470 case OP_63_FADD:
471 case OP_63_FSUB:
472 case OP_63_FCTIW:
473 case OP_63_FCTIWZ:
474 case OP_63_FRSQRTE:
475 case OP_63_FCPSGN:
476 return true;
477 }
478 switch (inst_get_field(inst, 26, 30)) {
479 case OP_63_FMUL:
480 case OP_63_FSEL:
481 case OP_63_FMSUB:
482 case OP_63_FMADD:
483 case OP_63_FNMSUB:
484 case OP_63_FNMADD:
485 return true;
486 }
487 break;
488 case 31:
489 switch (inst_get_field(inst, 21, 30)) {
490 case OP_31_LFSX:
491 case OP_31_LFSUX:
492 case OP_31_LFDX:
493 case OP_31_LFDUX:
494 case OP_31_STFSX:
495 case OP_31_STFSUX:
496 case OP_31_STFX:
497 case OP_31_STFUX:
498 case OP_31_STFIWX:
499 return true;
500 }
501 break;
502 }
503
504 return false;
505}
506
507static int get_d_signext(u32 inst)
508{
509 int d = inst & 0x8ff;
510
511 if (d & 0x800)
512 return -(d & 0x7ff);
513
514 return (d & 0x7ff);
515}
516
517static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
518 int reg_out, int reg_in1, int reg_in2,
519 int reg_in3, int scalar,
520 void (*func)(struct thread_struct *t,
521 u32 *dst, u32 *src1,
522 u32 *src2, u32 *src3))
523{
524 u32 *qpr = vcpu->arch.qpr;
525 u64 *fpr = vcpu->arch.fpr;
526 u32 ps0_out;
527 u32 ps0_in1, ps0_in2, ps0_in3;
528 u32 ps1_in1, ps1_in2, ps1_in3;
529 struct thread_struct t;
530 t.fpscr.val = vcpu->arch.fpscr;
531
532 /* RC */
533 WARN_ON(rc);
534
535 /* PS0 */
536 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
537 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
538 cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t);
539
540 if (scalar & SCALAR_LOW)
541 ps0_in2 = qpr[reg_in2];
542
543 func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
544
545 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
546 ps0_in1, ps0_in2, ps0_in3, ps0_out);
547
548 if (!(scalar & SCALAR_NO_PS0))
549 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
550
551 /* PS1 */
552 ps1_in1 = qpr[reg_in1];
553 ps1_in2 = qpr[reg_in2];
554 ps1_in3 = qpr[reg_in3];
555
556 if (scalar & SCALAR_HIGH)
557 ps1_in2 = ps0_in2;
558
559 if (!(scalar & SCALAR_NO_PS1))
560 func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
561
562 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
563 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
564
565 return EMULATE_DONE;
566}
567
568static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
569 int reg_out, int reg_in1, int reg_in2,
570 int scalar,
571 void (*func)(struct thread_struct *t,
572 u32 *dst, u32 *src1,
573 u32 *src2))
574{
575 u32 *qpr = vcpu->arch.qpr;
576 u64 *fpr = vcpu->arch.fpr;
577 u32 ps0_out;
578 u32 ps0_in1, ps0_in2;
579 u32 ps1_out;
580 u32 ps1_in1, ps1_in2;
581 struct thread_struct t;
582 t.fpscr.val = vcpu->arch.fpscr;
583
584 /* RC */
585 WARN_ON(rc);
586
587 /* PS0 */
588 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
589
590 if (scalar & SCALAR_LOW)
591 ps0_in2 = qpr[reg_in2];
592 else
593 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
594
595 func(&t, &ps0_out, &ps0_in1, &ps0_in2);
596
597 if (!(scalar & SCALAR_NO_PS0)) {
598 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
599 ps0_in1, ps0_in2, ps0_out);
600
601 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
602 }
603
604 /* PS1 */
605 ps1_in1 = qpr[reg_in1];
606 ps1_in2 = qpr[reg_in2];
607
608 if (scalar & SCALAR_HIGH)
609 ps1_in2 = ps0_in2;
610
611 func(&t, &ps1_out, &ps1_in1, &ps1_in2);
612
613 if (!(scalar & SCALAR_NO_PS1)) {
614 qpr[reg_out] = ps1_out;
615
616 dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
617 ps1_in1, ps1_in2, qpr[reg_out]);
618 }
619
620 return EMULATE_DONE;
621}
622
623static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
624 int reg_out, int reg_in,
625 void (*func)(struct thread_struct *t,
626 u32 *dst, u32 *src1))
627{
628 u32 *qpr = vcpu->arch.qpr;
629 u64 *fpr = vcpu->arch.fpr;
630 u32 ps0_out, ps0_in;
631 u32 ps1_in;
632 struct thread_struct t;
633 t.fpscr.val = vcpu->arch.fpscr;
634
635 /* RC */
636 WARN_ON(rc);
637
638 /* PS0 */
639 cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t);
640 func(&t, &ps0_out, &ps0_in);
641
642 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
643 ps0_in, ps0_out);
644
645 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
646
647 /* PS1 */
648 ps1_in = qpr[reg_in];
649 func(&t, &qpr[reg_out], &ps1_in);
650
651 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
652 ps1_in, qpr[reg_out]);
653
654 return EMULATE_DONE;
655}
656
657int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
658{
659 u32 inst = kvmppc_get_last_inst(vcpu);
660 enum emulation_result emulated = EMULATE_DONE;
661
662 int ax_rd = inst_get_field(inst, 6, 10);
663 int ax_ra = inst_get_field(inst, 11, 15);
664 int ax_rb = inst_get_field(inst, 16, 20);
665 int ax_rc = inst_get_field(inst, 21, 25);
666 short full_d = inst_get_field(inst, 16, 31);
667
668 u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
669 u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
670 u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
671 u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
672
673 bool rcomp = (inst & 1) ? true : false;
674 u32 cr = kvmppc_get_cr(vcpu);
675 struct thread_struct t;
676#ifdef DEBUG
677 int i;
678#endif
679
680 t.fpscr.val = vcpu->arch.fpscr;
681
682 if (!kvmppc_inst_is_paired_single(vcpu, inst))
683 return EMULATE_FAIL;
684
685 if (!(vcpu->arch.msr & MSR_FP)) {
686 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
687 return EMULATE_AGAIN;
688 }
689
690 kvmppc_giveup_ext(vcpu, MSR_FP);
691 preempt_disable();
692 enable_kernel_fp();
693 /* Do we need to clear FE0 / FE1 here? Don't think so. */
694
695#ifdef DEBUG
696 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
697 u32 f;
698 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
699 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
700 i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
701 }
702#endif
703
704 switch (get_op(inst)) {
705 case OP_PSQ_L:
706 {
707 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
708 bool w = inst_get_field(inst, 16, 16) ? true : false;
709 int i = inst_get_field(inst, 17, 19);
710
711 addr += get_d_signext(inst);
712 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
713 break;
714 }
715 case OP_PSQ_LU:
716 {
717 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
718 bool w = inst_get_field(inst, 16, 16) ? true : false;
719 int i = inst_get_field(inst, 17, 19);
720
721 addr += get_d_signext(inst);
722 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
723
724 if (emulated == EMULATE_DONE)
725 kvmppc_set_gpr(vcpu, ax_ra, addr);
726 break;
727 }
728 case OP_PSQ_ST:
729 {
730 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
731 bool w = inst_get_field(inst, 16, 16) ? true : false;
732 int i = inst_get_field(inst, 17, 19);
733
734 addr += get_d_signext(inst);
735 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
736 break;
737 }
738 case OP_PSQ_STU:
739 {
740 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
741 bool w = inst_get_field(inst, 16, 16) ? true : false;
742 int i = inst_get_field(inst, 17, 19);
743
744 addr += get_d_signext(inst);
745 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
746
747 if (emulated == EMULATE_DONE)
748 kvmppc_set_gpr(vcpu, ax_ra, addr);
749 break;
750 }
751 case 4:
752 /* X form */
753 switch (inst_get_field(inst, 21, 30)) {
754 case OP_4X_PS_CMPU0:
755 /* XXX */
756 emulated = EMULATE_FAIL;
757 break;
758 case OP_4X_PSQ_LX:
759 {
760 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
761 bool w = inst_get_field(inst, 21, 21) ? true : false;
762 int i = inst_get_field(inst, 22, 24);
763
764 addr += kvmppc_get_gpr(vcpu, ax_rb);
765 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
766 break;
767 }
768 case OP_4X_PS_CMPO0:
769 /* XXX */
770 emulated = EMULATE_FAIL;
771 break;
772 case OP_4X_PSQ_LUX:
773 {
774 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
775 bool w = inst_get_field(inst, 21, 21) ? true : false;
776 int i = inst_get_field(inst, 22, 24);
777
778 addr += kvmppc_get_gpr(vcpu, ax_rb);
779 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
780
781 if (emulated == EMULATE_DONE)
782 kvmppc_set_gpr(vcpu, ax_ra, addr);
783 break;
784 }
785 case OP_4X_PS_NEG:
786 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
787 vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
788 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
789 vcpu->arch.qpr[ax_rd] ^= 0x80000000;
790 break;
791 case OP_4X_PS_CMPU1:
792 /* XXX */
793 emulated = EMULATE_FAIL;
794 break;
795 case OP_4X_PS_MR:
796 WARN_ON(rcomp);
797 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
798 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
799 break;
800 case OP_4X_PS_CMPO1:
801 /* XXX */
802 emulated = EMULATE_FAIL;
803 break;
804 case OP_4X_PS_NABS:
805 WARN_ON(rcomp);
806 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
807 vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
808 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
809 vcpu->arch.qpr[ax_rd] |= 0x80000000;
810 break;
811 case OP_4X_PS_ABS:
812 WARN_ON(rcomp);
813 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
814 vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
815 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
816 vcpu->arch.qpr[ax_rd] &= ~0x80000000;
817 break;
818 case OP_4X_PS_MERGE00:
819 WARN_ON(rcomp);
820 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
821 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
822 cvt_df((double*)&vcpu->arch.fpr[ax_rb],
823 (float*)&vcpu->arch.qpr[ax_rd], &t);
824 break;
825 case OP_4X_PS_MERGE01:
826 WARN_ON(rcomp);
827 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
828 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
829 break;
830 case OP_4X_PS_MERGE10:
831 WARN_ON(rcomp);
832 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
833 cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
834 (double*)&vcpu->arch.fpr[ax_rd], &t);
835 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
836 cvt_df((double*)&vcpu->arch.fpr[ax_rb],
837 (float*)&vcpu->arch.qpr[ax_rd], &t);
838 break;
839 case OP_4X_PS_MERGE11:
840 WARN_ON(rcomp);
841 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
842 cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
843 (double*)&vcpu->arch.fpr[ax_rd], &t);
844 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
845 break;
846 }
847 /* XW form */
848 switch (inst_get_field(inst, 25, 30)) {
849 case OP_4XW_PSQ_STX:
850 {
851 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
852 bool w = inst_get_field(inst, 21, 21) ? true : false;
853 int i = inst_get_field(inst, 22, 24);
854
855 addr += kvmppc_get_gpr(vcpu, ax_rb);
856 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
857 break;
858 }
859 case OP_4XW_PSQ_STUX:
860 {
861 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
862 bool w = inst_get_field(inst, 21, 21) ? true : false;
863 int i = inst_get_field(inst, 22, 24);
864
865 addr += kvmppc_get_gpr(vcpu, ax_rb);
866 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
867
868 if (emulated == EMULATE_DONE)
869 kvmppc_set_gpr(vcpu, ax_ra, addr);
870 break;
871 }
872 }
873 /* A form */
874 switch (inst_get_field(inst, 26, 30)) {
875 case OP_4A_PS_SUM1:
876 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
877 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
878 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
879 break;
880 case OP_4A_PS_SUM0:
881 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
882 ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
883 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
884 break;
885 case OP_4A_PS_MULS0:
886 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
887 ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
888 break;
889 case OP_4A_PS_MULS1:
890 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
891 ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
892 break;
893 case OP_4A_PS_MADDS0:
894 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
895 ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
896 break;
897 case OP_4A_PS_MADDS1:
898 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
899 ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
900 break;
901 case OP_4A_PS_DIV:
902 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
903 ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
904 break;
905 case OP_4A_PS_SUB:
906 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
907 ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
908 break;
909 case OP_4A_PS_ADD:
910 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
911 ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
912 break;
913 case OP_4A_PS_SEL:
914 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
915 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
916 break;
917 case OP_4A_PS_RES:
918 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
919 ax_rb, fps_fres);
920 break;
921 case OP_4A_PS_MUL:
922 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
923 ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
924 break;
925 case OP_4A_PS_RSQRTE:
926 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
927 ax_rb, fps_frsqrte);
928 break;
929 case OP_4A_PS_MSUB:
930 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
931 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
932 break;
933 case OP_4A_PS_MADD:
934 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
935 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
936 break;
937 case OP_4A_PS_NMSUB:
938 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
939 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
940 break;
941 case OP_4A_PS_NMADD:
942 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
943 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
944 break;
945 }
946 break;
947
948 /* Real FPU operations */
949
950 case OP_LFS:
951 {
952 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
953
954 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
955 FPU_LS_SINGLE);
956 break;
957 }
958 case OP_LFSU:
959 {
960 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
961
962 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
963 FPU_LS_SINGLE);
964
965 if (emulated == EMULATE_DONE)
966 kvmppc_set_gpr(vcpu, ax_ra, addr);
967 break;
968 }
969 case OP_LFD:
970 {
971 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
972
973 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
974 FPU_LS_DOUBLE);
975 break;
976 }
977 case OP_LFDU:
978 {
979 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
980
981 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
982 FPU_LS_DOUBLE);
983
984 if (emulated == EMULATE_DONE)
985 kvmppc_set_gpr(vcpu, ax_ra, addr);
986 break;
987 }
988 case OP_STFS:
989 {
990 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
991
992 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
993 FPU_LS_SINGLE);
994 break;
995 }
996 case OP_STFSU:
997 {
998 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
999
1000 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1001 FPU_LS_SINGLE);
1002
1003 if (emulated == EMULATE_DONE)
1004 kvmppc_set_gpr(vcpu, ax_ra, addr);
1005 break;
1006 }
1007 case OP_STFD:
1008 {
1009 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
1010
1011 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1012 FPU_LS_DOUBLE);
1013 break;
1014 }
1015 case OP_STFDU:
1016 {
1017 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
1018
1019 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1020 FPU_LS_DOUBLE);
1021
1022 if (emulated == EMULATE_DONE)
1023 kvmppc_set_gpr(vcpu, ax_ra, addr);
1024 break;
1025 }
1026 case 31:
1027 switch (inst_get_field(inst, 21, 30)) {
1028 case OP_31_LFSX:
1029 {
1030 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
1031
1032 addr += kvmppc_get_gpr(vcpu, ax_rb);
1033 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1034 addr, FPU_LS_SINGLE);
1035 break;
1036 }
1037 case OP_31_LFSUX:
1038 {
1039 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1040 kvmppc_get_gpr(vcpu, ax_rb);
1041
1042 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1043 addr, FPU_LS_SINGLE);
1044
1045 if (emulated == EMULATE_DONE)
1046 kvmppc_set_gpr(vcpu, ax_ra, addr);
1047 break;
1048 }
1049 case OP_31_LFDX:
1050 {
1051 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1052 kvmppc_get_gpr(vcpu, ax_rb);
1053
1054 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1055 addr, FPU_LS_DOUBLE);
1056 break;
1057 }
1058 case OP_31_LFDUX:
1059 {
1060 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1061 kvmppc_get_gpr(vcpu, ax_rb);
1062
1063 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1064 addr, FPU_LS_DOUBLE);
1065
1066 if (emulated == EMULATE_DONE)
1067 kvmppc_set_gpr(vcpu, ax_ra, addr);
1068 break;
1069 }
1070 case OP_31_STFSX:
1071 {
1072 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1073 kvmppc_get_gpr(vcpu, ax_rb);
1074
1075 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1076 addr, FPU_LS_SINGLE);
1077 break;
1078 }
1079 case OP_31_STFSUX:
1080 {
1081 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1082 kvmppc_get_gpr(vcpu, ax_rb);
1083
1084 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1085 addr, FPU_LS_SINGLE);
1086
1087 if (emulated == EMULATE_DONE)
1088 kvmppc_set_gpr(vcpu, ax_ra, addr);
1089 break;
1090 }
1091 case OP_31_STFX:
1092 {
1093 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1094 kvmppc_get_gpr(vcpu, ax_rb);
1095
1096 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1097 addr, FPU_LS_DOUBLE);
1098 break;
1099 }
1100 case OP_31_STFUX:
1101 {
1102 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1103 kvmppc_get_gpr(vcpu, ax_rb);
1104
1105 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1106 addr, FPU_LS_DOUBLE);
1107
1108 if (emulated == EMULATE_DONE)
1109 kvmppc_set_gpr(vcpu, ax_ra, addr);
1110 break;
1111 }
1112 case OP_31_STFIWX:
1113 {
1114 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1115 kvmppc_get_gpr(vcpu, ax_rb);
1116
1117 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1118 addr,
1119 FPU_LS_SINGLE_LOW);
1120 break;
1121 }
1122 break;
1123 }
1124 break;
1125 case 59:
1126 switch (inst_get_field(inst, 21, 30)) {
1127 case OP_59_FADDS:
1128 fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1129 kvmppc_sync_qpr(vcpu, ax_rd);
1130 break;
1131 case OP_59_FSUBS:
1132 fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1133 kvmppc_sync_qpr(vcpu, ax_rd);
1134 break;
1135 case OP_59_FDIVS:
1136 fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1137 kvmppc_sync_qpr(vcpu, ax_rd);
1138 break;
1139 case OP_59_FRES:
1140 fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1141 kvmppc_sync_qpr(vcpu, ax_rd);
1142 break;
1143 case OP_59_FRSQRTES:
1144 fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1145 kvmppc_sync_qpr(vcpu, ax_rd);
1146 break;
1147 }
1148 switch (inst_get_field(inst, 26, 30)) {
1149 case OP_59_FMULS:
1150 fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1151 kvmppc_sync_qpr(vcpu, ax_rd);
1152 break;
1153 case OP_59_FMSUBS:
1154 fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1155 kvmppc_sync_qpr(vcpu, ax_rd);
1156 break;
1157 case OP_59_FMADDS:
1158 fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1159 kvmppc_sync_qpr(vcpu, ax_rd);
1160 break;
1161 case OP_59_FNMSUBS:
1162 fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1163 kvmppc_sync_qpr(vcpu, ax_rd);
1164 break;
1165 case OP_59_FNMADDS:
1166 fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1167 kvmppc_sync_qpr(vcpu, ax_rd);
1168 break;
1169 }
1170 break;
1171 case 63:
1172 switch (inst_get_field(inst, 21, 30)) {
1173 case OP_63_MTFSB0:
1174 case OP_63_MTFSB1:
1175 case OP_63_MCRFS:
1176 case OP_63_MTFSFI:
1177 /* XXX need to implement */
1178 break;
1179 case OP_63_MFFS:
1180 /* XXX missing CR */
1181 *fpr_d = vcpu->arch.fpscr;
1182 break;
1183 case OP_63_MTFSF:
1184 /* XXX missing fm bits */
1185 /* XXX missing CR */
1186 vcpu->arch.fpscr = *fpr_b;
1187 break;
1188 case OP_63_FCMPU:
1189 {
1190 u32 tmp_cr;
1191 u32 cr0_mask = 0xf0000000;
1192 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1193
1194 fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1195 cr &= ~(cr0_mask >> cr_shift);
1196 cr |= (cr & cr0_mask) >> cr_shift;
1197 break;
1198 }
1199 case OP_63_FCMPO:
1200 {
1201 u32 tmp_cr;
1202 u32 cr0_mask = 0xf0000000;
1203 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1204
1205 fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1206 cr &= ~(cr0_mask >> cr_shift);
1207 cr |= (cr & cr0_mask) >> cr_shift;
1208 break;
1209 }
1210 case OP_63_FNEG:
1211 fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1212 break;
1213 case OP_63_FMR:
1214 *fpr_d = *fpr_b;
1215 break;
1216 case OP_63_FABS:
1217 fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1218 break;
1219 case OP_63_FCPSGN:
1220 fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1221 break;
1222 case OP_63_FDIV:
1223 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1224 break;
1225 case OP_63_FADD:
1226 fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1227 break;
1228 case OP_63_FSUB:
1229 fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1230 break;
1231 case OP_63_FCTIW:
1232 fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1233 break;
1234 case OP_63_FCTIWZ:
1235 fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1236 break;
1237 case OP_63_FRSP:
1238 fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1239 kvmppc_sync_qpr(vcpu, ax_rd);
1240 break;
1241 case OP_63_FRSQRTE:
1242 {
1243 double one = 1.0f;
1244
1245 /* fD = sqrt(fB) */
1246 fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1247 /* fD = 1.0f / fD */
1248 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1249 break;
1250 }
1251 }
1252 switch (inst_get_field(inst, 26, 30)) {
1253 case OP_63_FMUL:
1254 fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1255 break;
1256 case OP_63_FSEL:
1257 fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1258 break;
1259 case OP_63_FMSUB:
1260 fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1261 break;
1262 case OP_63_FMADD:
1263 fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1264 break;
1265 case OP_63_FNMSUB:
1266 fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1267 break;
1268 case OP_63_FNMADD:
1269 fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1270 break;
1271 }
1272 break;
1273 }
1274
1275#ifdef DEBUG
1276 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
1277 u32 f;
1278 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
1279 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1280 }
1281#endif
1282
1283 if (rcomp)
1284 kvmppc_set_cr(vcpu, cr);
1285
1286 preempt_enable();
1287
1288 return emulated;
1289}
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index c83c60ad96c5..506d5c316c96 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -22,7 +22,10 @@
22#include <asm/reg.h> 22#include <asm/reg.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
25
26#ifdef CONFIG_PPC_BOOK3S_64
25#include <asm/exception-64s.h> 27#include <asm/exception-64s.h>
28#endif
26 29
27/***************************************************************************** 30/*****************************************************************************
28 * * 31 * *
@@ -30,6 +33,39 @@
30 * * 33 * *
31 ****************************************************************************/ 34 ****************************************************************************/
32 35
36#if defined(CONFIG_PPC_BOOK3S_64)
37
38#define LOAD_SHADOW_VCPU(reg) \
39 mfspr reg, SPRN_SPRG_PACA
40
41#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
42#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
43#define FUNC(name) GLUE(.,name)
44
45#elif defined(CONFIG_PPC_BOOK3S_32)
46
47#define LOAD_SHADOW_VCPU(reg) \
48 mfspr reg, SPRN_SPRG_THREAD; \
49 lwz reg, THREAD_KVM_SVCPU(reg); \
50 /* PPC32 can have a NULL pointer - let's check for that */ \
51 mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
52 mfcr r12; \
53 cmpwi reg, 0; \
54 bne 1f; \
55 mfspr reg, SPRN_SPRG_SCRATCH0; \
56 mtcr r12; \
57 mfspr r12, SPRN_SPRG_SCRATCH1; \
58 b kvmppc_resume_\intno; \
591:; \
60 mtcr r12; \
61 mfspr r12, SPRN_SPRG_SCRATCH1; \
62 tophys(reg, reg)
63
64#define SHADOW_VCPU_OFF 0
65#define MSR_NOIRQ MSR_KERNEL
66#define FUNC(name) name
67
68#endif
33 69
34.macro INTERRUPT_TRAMPOLINE intno 70.macro INTERRUPT_TRAMPOLINE intno
35 71
@@ -42,19 +78,19 @@ kvmppc_trampoline_\intno:
42 * First thing to do is to find out if we're coming 78 * First thing to do is to find out if we're coming
43 * from a KVM guest or a Linux process. 79 * from a KVM guest or a Linux process.
44 * 80 *
45 * To distinguish, we check a magic byte in the PACA 81 * To distinguish, we check a magic byte in the PACA/current
46 */ 82 */
47 mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ 83 LOAD_SHADOW_VCPU(r13)
48 std r12, PACA_KVM_SCRATCH0(r13) 84 PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
49 mfcr r12 85 mfcr r12
50 stw r12, PACA_KVM_SCRATCH1(r13) 86 stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
51 lbz r12, PACA_KVM_IN_GUEST(r13) 87 lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
52 cmpwi r12, KVM_GUEST_MODE_NONE 88 cmpwi r12, KVM_GUEST_MODE_NONE
53 bne ..kvmppc_handler_hasmagic_\intno 89 bne ..kvmppc_handler_hasmagic_\intno
54 /* No KVM guest? Then jump back to the Linux handler! */ 90 /* No KVM guest? Then jump back to the Linux handler! */
55 lwz r12, PACA_KVM_SCRATCH1(r13) 91 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
56 mtcr r12 92 mtcr r12
57 ld r12, PACA_KVM_SCRATCH0(r13) 93 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
58 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ 94 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
59 b kvmppc_resume_\intno /* Get back original handler */ 95 b kvmppc_resume_\intno /* Get back original handler */
60 96
@@ -76,9 +112,7 @@ kvmppc_trampoline_\intno:
76INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET 112INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
77INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK 113INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
78INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE 114INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
79INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
80INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE 115INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
81INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
82INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL 116INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
83INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT 117INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
84INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM 118INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
@@ -88,7 +122,14 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
88INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE 122INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
89INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON 123INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
90INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC 124INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
125
126/* Those are only available on 64 bit machines */
127
128#ifdef CONFIG_PPC_BOOK3S_64
129INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
130INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
91INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX 131INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
132#endif
92 133
93/* 134/*
94 * Bring us back to the faulting code, but skip the 135 * Bring us back to the faulting code, but skip the
@@ -99,11 +140,11 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
99 * 140 *
100 * Input Registers: 141 * Input Registers:
101 * 142 *
102 * R12 = free 143 * R12 = free
103 * R13 = PACA 144 * R13 = Shadow VCPU (PACA)
104 * PACA.KVM.SCRATCH0 = guest R12 145 * SVCPU.SCRATCH0 = guest R12
105 * PACA.KVM.SCRATCH1 = guest CR 146 * SVCPU.SCRATCH1 = guest CR
106 * SPRG_SCRATCH0 = guest R13 147 * SPRG_SCRATCH0 = guest R13
107 * 148 *
108 */ 149 */
109kvmppc_handler_skip_ins: 150kvmppc_handler_skip_ins:
@@ -114,9 +155,9 @@ kvmppc_handler_skip_ins:
114 mtsrr0 r12 155 mtsrr0 r12
115 156
116 /* Clean up all state */ 157 /* Clean up all state */
117 lwz r12, PACA_KVM_SCRATCH1(r13) 158 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
118 mtcr r12 159 mtcr r12
119 ld r12, PACA_KVM_SCRATCH0(r13) 160 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
120 mfspr r13, SPRN_SPRG_SCRATCH0 161 mfspr r13, SPRN_SPRG_SCRATCH0
121 162
122 /* And get back into the code */ 163 /* And get back into the code */
@@ -147,41 +188,48 @@ kvmppc_handler_lowmem_trampoline_end:
147 * 188 *
148 * R3 = function 189 * R3 = function
149 * R4 = MSR 190 * R4 = MSR
150 * R5 = CTR 191 * R5 = scratch register
151 * 192 *
152 */ 193 */
153_GLOBAL(kvmppc_rmcall) 194_GLOBAL(kvmppc_rmcall)
154 mtmsr r4 /* Disable relocation, so mtsrr 195 LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
196 mtmsr r5 /* Disable relocation and interrupts, so mtsrr
155 doesn't get interrupted */ 197 doesn't get interrupted */
156 mtctr r5 198 sync
157 mtsrr0 r3 199 mtsrr0 r3
158 mtsrr1 r4 200 mtsrr1 r4
159 RFI 201 RFI
160 202
203#if defined(CONFIG_PPC_BOOK3S_32)
204#define STACK_LR INT_FRAME_SIZE+4
205#elif defined(CONFIG_PPC_BOOK3S_64)
206#define STACK_LR _LINK
207#endif
208
161/* 209/*
162 * Activate current's external feature (FPU/Altivec/VSX) 210 * Activate current's external feature (FPU/Altivec/VSX)
163 */ 211 */
164#define define_load_up(what) \ 212#define define_load_up(what) \
165 \ 213 \
166_GLOBAL(kvmppc_load_up_ ## what); \ 214_GLOBAL(kvmppc_load_up_ ## what); \
167 subi r1, r1, INT_FRAME_SIZE; \ 215 PPC_STLU r1, -INT_FRAME_SIZE(r1); \
168 mflr r3; \ 216 mflr r3; \
169 std r3, _LINK(r1); \ 217 PPC_STL r3, STACK_LR(r1); \
170 mfmsr r4; \ 218 PPC_STL r20, _NIP(r1); \
171 std r31, GPR3(r1); \ 219 mfmsr r20; \
172 mr r31, r4; \ 220 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
173 li r5, MSR_DR; \ 221 andc r3,r20,r3; /* Disable DR,EE */ \
174 oris r5, r5, MSR_EE@h; \ 222 mtmsr r3; \
175 andc r4, r4, r5; \ 223 sync; \
176 mtmsr r4; \ 224 \
177 \ 225 bl FUNC(load_up_ ## what); \
178 bl .load_up_ ## what; \ 226 \
179 \ 227 mtmsr r20; /* Enable DR,EE */ \
180 mtmsr r31; \ 228 sync; \
181 ld r3, _LINK(r1); \ 229 PPC_LL r3, STACK_LR(r1); \
182 ld r31, GPR3(r1); \ 230 PPC_LL r20, _NIP(r1); \
183 addi r1, r1, INT_FRAME_SIZE; \ 231 mtlr r3; \
184 mtlr r3; \ 232 addi r1, r1, INT_FRAME_SIZE; \
185 blr 233 blr
186 234
187define_load_up(fpu) 235define_load_up(fpu)
@@ -194,11 +242,10 @@ define_load_up(vsx)
194 242
195.global kvmppc_trampoline_lowmem 243.global kvmppc_trampoline_lowmem
196kvmppc_trampoline_lowmem: 244kvmppc_trampoline_lowmem:
197 .long kvmppc_handler_lowmem_trampoline - _stext 245 .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
198 246
199.global kvmppc_trampoline_enter 247.global kvmppc_trampoline_enter
200kvmppc_trampoline_enter: 248kvmppc_trampoline_enter:
201 .long kvmppc_handler_trampoline_enter - _stext 249 .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
202
203#include "book3s_64_slb.S"
204 250
251#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
new file mode 100644
index 000000000000..7c52ed0b7051
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -0,0 +1,259 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20/* Real mode helpers */
21
22#if defined(CONFIG_PPC_BOOK3S_64)
23
24#define GET_SHADOW_VCPU(reg) \
25 addi reg, r13, PACA_KVM_SVCPU
26
27#elif defined(CONFIG_PPC_BOOK3S_32)
28
29#define GET_SHADOW_VCPU(reg) \
30 tophys(reg, r2); \
31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
32 tophys(reg, reg)
33
34#endif
35
36/* Disable for nested KVM */
37#define USE_QUICK_LAST_INST
38
39
40/* Get helper functions for subarch specific functionality */
41
42#if defined(CONFIG_PPC_BOOK3S_64)
43#include "book3s_64_slb.S"
44#elif defined(CONFIG_PPC_BOOK3S_32)
45#include "book3s_32_sr.S"
46#endif
47
48/******************************************************************************
49 * *
50 * Entry code *
51 * *
52 *****************************************************************************/
53
54.global kvmppc_handler_trampoline_enter
55kvmppc_handler_trampoline_enter:
56
57 /* Required state:
58 *
59 * MSR = ~IR|DR
60 * R13 = PACA
61 * R1 = host R1
62 * R2 = host R2
63 * R10 = guest MSR
64 * all other volatile GPRS = free
65 * SVCPU[CR] = guest CR
66 * SVCPU[XER] = guest XER
67 * SVCPU[CTR] = guest CTR
68 * SVCPU[LR] = guest LR
69 */
70
71 /* r3 = shadow vcpu */
72 GET_SHADOW_VCPU(r3)
73
74 /* Move SRR0 and SRR1 into the respective regs */
75 PPC_LL r9, SVCPU_PC(r3)
76 mtsrr0 r9
77 mtsrr1 r10
78
79 /* Activate guest mode, so faults get handled by KVM */
80 li r11, KVM_GUEST_MODE_GUEST
81 stb r11, SVCPU_IN_GUEST(r3)
82
83 /* Switch to guest segment. This is subarch specific. */
84 LOAD_GUEST_SEGMENTS
85
86 /* Enter guest */
87
88 PPC_LL r4, (SVCPU_CTR)(r3)
89 PPC_LL r5, (SVCPU_LR)(r3)
90 lwz r6, (SVCPU_CR)(r3)
91 lwz r7, (SVCPU_XER)(r3)
92
93 mtctr r4
94 mtlr r5
95 mtcr r6
96 mtxer r7
97
98 PPC_LL r0, (SVCPU_R0)(r3)
99 PPC_LL r1, (SVCPU_R1)(r3)
100 PPC_LL r2, (SVCPU_R2)(r3)
101 PPC_LL r4, (SVCPU_R4)(r3)
102 PPC_LL r5, (SVCPU_R5)(r3)
103 PPC_LL r6, (SVCPU_R6)(r3)
104 PPC_LL r7, (SVCPU_R7)(r3)
105 PPC_LL r8, (SVCPU_R8)(r3)
106 PPC_LL r9, (SVCPU_R9)(r3)
107 PPC_LL r10, (SVCPU_R10)(r3)
108 PPC_LL r11, (SVCPU_R11)(r3)
109 PPC_LL r12, (SVCPU_R12)(r3)
110 PPC_LL r13, (SVCPU_R13)(r3)
111
112 PPC_LL r3, (SVCPU_R3)(r3)
113
114 RFI
115kvmppc_handler_trampoline_enter_end:
116
117
118
119/******************************************************************************
120 * *
121 * Exit code *
122 * *
123 *****************************************************************************/
124
125.global kvmppc_handler_trampoline_exit
126kvmppc_handler_trampoline_exit:
127
128 /* Register usage at this point:
129 *
130 * SPRG_SCRATCH0 = guest R13
131 * R12 = exit handler id
132 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
133 * SVCPU.SCRATCH0 = guest R12
134 * SVCPU.SCRATCH1 = guest CR
135 *
136 */
137
138 /* Save registers */
139
140 PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
141 PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
142 PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
143 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
144 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
145 PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
146 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
147 PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
148 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
149 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
150 PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
151 PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
152
153 /* Restore R1/R2 so we can handle faults */
154 PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
155 PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
156
157 /* Save guest PC and MSR */
158 mfsrr0 r3
159 mfsrr1 r4
160
161 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
162 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
163
164 /* Get scratch'ed off registers */
165 mfspr r9, SPRN_SPRG_SCRATCH0
166 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
167 lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
168
169 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
170 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
171 stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
172
173 /* Save more register state */
174
175 mfxer r5
176 mfdar r6
177 mfdsisr r7
178 mfctr r8
179 mflr r9
180
181 stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
182 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
183 stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
184 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
185 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
186
187 /*
188 * In order for us to easily get the last instruction,
189 * we got the #vmexit at, we exploit the fact that the
190 * virtual layout is still the same here, so we can just
191 * ld from the guest's PC address
192 */
193
194 /* We only load the last instruction when it's safe */
195 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
196 beq ld_last_inst
197 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
198 beq ld_last_inst
199 cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
200 beq- ld_last_inst
201
202 b no_ld_last_inst
203
204ld_last_inst:
205 /* Save off the guest instruction we're at */
206
207 /* In case lwz faults */
208 li r0, KVM_INST_FETCH_FAILED
209
210#ifdef USE_QUICK_LAST_INST
211
212 /* Set guest mode to 'jump over instruction' so if lwz faults
213 * we'll just continue at the next IP. */
214 li r9, KVM_GUEST_MODE_SKIP
215 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
216
217 /* 1) enable paging for data */
218 mfmsr r9
219 ori r11, r9, MSR_DR /* Enable paging for data */
220 mtmsr r11
221 sync
222 /* 2) fetch the instruction */
223 lwz r0, 0(r3)
224 /* 3) disable paging again */
225 mtmsr r9
226 sync
227
228#endif
229 stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
230
231no_ld_last_inst:
232
233 /* Unset guest mode */
234 li r9, KVM_GUEST_MODE_NONE
235 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
236
237 /* Switch back to host MMU */
238 LOAD_HOST_SEGMENTS
239
240 /* Register usage at this point:
241 *
242 * R1 = host R1
243 * R2 = host R2
244 * R12 = exit handler id
245 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
246 * SVCPU.* = guest *
247 *
248 */
249
250 /* RFI into the highmem handler */
251 mfmsr r7
252 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
253 mtsrr1 r7
254 /* Load highmem handler address */
255 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
256 mtsrr0 r8
257
258 RFI
259kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 2a3a1953d4bd..a33ab8cc2ccc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -133,6 +133,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
133 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); 133 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
134} 134}
135 135
136void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
137 struct kvm_interrupt *irq)
138{
139 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
140}
141
136/* Deliver the interrupt of the corresponding priority, if possible. */ 142/* Deliver the interrupt of the corresponding priority, if possible. */
137static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 143static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
138 unsigned int priority) 144 unsigned int priority)
@@ -479,6 +485,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
479{ 485{
480 int i; 486 int i;
481 487
488 vcpu_load(vcpu);
489
482 regs->pc = vcpu->arch.pc; 490 regs->pc = vcpu->arch.pc;
483 regs->cr = kvmppc_get_cr(vcpu); 491 regs->cr = kvmppc_get_cr(vcpu);
484 regs->ctr = vcpu->arch.ctr; 492 regs->ctr = vcpu->arch.ctr;
@@ -499,6 +507,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
499 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 507 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
500 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 508 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
501 509
510 vcpu_put(vcpu);
511
502 return 0; 512 return 0;
503} 513}
504 514
@@ -506,6 +516,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
506{ 516{
507 int i; 517 int i;
508 518
519 vcpu_load(vcpu);
520
509 vcpu->arch.pc = regs->pc; 521 vcpu->arch.pc = regs->pc;
510 kvmppc_set_cr(vcpu, regs->cr); 522 kvmppc_set_cr(vcpu, regs->cr);
511 vcpu->arch.ctr = regs->ctr; 523 vcpu->arch.ctr = regs->ctr;
@@ -525,6 +537,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
525 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 537 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
526 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 538 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
527 539
540 vcpu_put(vcpu);
541
528 return 0; 542 return 0;
529} 543}
530 544
@@ -553,7 +567,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
553int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 567int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
554 struct kvm_translation *tr) 568 struct kvm_translation *tr)
555{ 569{
556 return kvmppc_core_vcpu_translate(vcpu, tr); 570 int r;
571
572 vcpu_load(vcpu);
573 r = kvmppc_core_vcpu_translate(vcpu, tr);
574 vcpu_put(vcpu);
575 return r;
557} 576}
558 577
559int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 578int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 669a5c5fc7d7..bc2b4004eb26 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -161,7 +161,7 @@ static int __init kvmppc_e500_init(void)
161 flush_icache_range(kvmppc_booke_handlers, 161 flush_icache_range(kvmppc_booke_handlers,
162 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); 162 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
163 163
164 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE); 164 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
165} 165}
166 166
167static void __init kvmppc_e500_exit(void) 167static void __init kvmppc_e500_exit(void)
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index cb72a65f4ecc..4568ec386c2a 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -38,10 +38,12 @@
38#define OP_31_XOP_LBZX 87 38#define OP_31_XOP_LBZX 87
39#define OP_31_XOP_STWX 151 39#define OP_31_XOP_STWX 151
40#define OP_31_XOP_STBX 215 40#define OP_31_XOP_STBX 215
41#define OP_31_XOP_LBZUX 119
41#define OP_31_XOP_STBUX 247 42#define OP_31_XOP_STBUX 247
42#define OP_31_XOP_LHZX 279 43#define OP_31_XOP_LHZX 279
43#define OP_31_XOP_LHZUX 311 44#define OP_31_XOP_LHZUX 311
44#define OP_31_XOP_MFSPR 339 45#define OP_31_XOP_MFSPR 339
46#define OP_31_XOP_LHAX 343
45#define OP_31_XOP_STHX 407 47#define OP_31_XOP_STHX 407
46#define OP_31_XOP_STHUX 439 48#define OP_31_XOP_STHUX 439
47#define OP_31_XOP_MTSPR 467 49#define OP_31_XOP_MTSPR 467
@@ -62,10 +64,12 @@
62#define OP_STBU 39 64#define OP_STBU 39
63#define OP_LHZ 40 65#define OP_LHZ 40
64#define OP_LHZU 41 66#define OP_LHZU 41
67#define OP_LHA 42
68#define OP_LHAU 43
65#define OP_STH 44 69#define OP_STH 44
66#define OP_STHU 45 70#define OP_STHU 45
67 71
68#ifdef CONFIG_PPC64 72#ifdef CONFIG_PPC_BOOK3S
69static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 73static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
70{ 74{
71 return 1; 75 return 1;
@@ -82,7 +86,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
82 unsigned long dec_nsec; 86 unsigned long dec_nsec;
83 87
84 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 88 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85#ifdef CONFIG_PPC64 89#ifdef CONFIG_PPC_BOOK3S
86 /* mtdec lowers the interrupt line when positive. */ 90 /* mtdec lowers the interrupt line when positive. */
87 kvmppc_core_dequeue_dec(vcpu); 91 kvmppc_core_dequeue_dec(vcpu);
88 92
@@ -128,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
128 * from opcode tables in the future. */ 132 * from opcode tables in the future. */
129int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 133int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
130{ 134{
131 u32 inst = vcpu->arch.last_inst; 135 u32 inst = kvmppc_get_last_inst(vcpu);
132 u32 ea; 136 u32 ea;
133 int ra; 137 int ra;
134 int rb; 138 int rb;
@@ -143,13 +147,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
143 147
144 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 148 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
145 149
146 /* Try again next time */
147 if (inst == KVM_INST_FETCH_FAILED)
148 return EMULATE_DONE;
149
150 switch (get_op(inst)) { 150 switch (get_op(inst)) {
151 case OP_TRAP: 151 case OP_TRAP:
152#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC_BOOK3S
153 case OP_TRAP_64: 153 case OP_TRAP_64:
154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
155#else 155#else
@@ -171,6 +171,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
171 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 171 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
172 break; 172 break;
173 173
174 case OP_31_XOP_LBZUX:
175 rt = get_rt(inst);
176 ra = get_ra(inst);
177 rb = get_rb(inst);
178
179 ea = kvmppc_get_gpr(vcpu, rb);
180 if (ra)
181 ea += kvmppc_get_gpr(vcpu, ra);
182
183 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
184 kvmppc_set_gpr(vcpu, ra, ea);
185 break;
186
174 case OP_31_XOP_STWX: 187 case OP_31_XOP_STWX:
175 rs = get_rs(inst); 188 rs = get_rs(inst);
176 emulated = kvmppc_handle_store(run, vcpu, 189 emulated = kvmppc_handle_store(run, vcpu,
@@ -200,6 +213,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
200 kvmppc_set_gpr(vcpu, rs, ea); 213 kvmppc_set_gpr(vcpu, rs, ea);
201 break; 214 break;
202 215
216 case OP_31_XOP_LHAX:
217 rt = get_rt(inst);
218 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
219 break;
220
203 case OP_31_XOP_LHZX: 221 case OP_31_XOP_LHZX:
204 rt = get_rt(inst); 222 rt = get_rt(inst);
205 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 223 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
@@ -450,6 +468,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 468 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
451 break; 469 break;
452 470
471 case OP_LHA:
472 rt = get_rt(inst);
473 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
474 break;
475
476 case OP_LHAU:
477 ra = get_ra(inst);
478 rt = get_rt(inst);
479 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
480 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
481 break;
482
453 case OP_STH: 483 case OP_STH:
454 rs = get_rs(inst); 484 rs = get_rs(inst);
455 emulated = kvmppc_handle_store(run, vcpu, 485 emulated = kvmppc_handle_store(run, vcpu,
@@ -472,7 +502,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
472 502
473 if (emulated == EMULATE_FAIL) { 503 if (emulated == EMULATE_FAIL) {
474 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 504 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
475 if (emulated == EMULATE_FAIL) { 505 if (emulated == EMULATE_AGAIN) {
506 advance = 0;
507 } else if (emulated == EMULATE_FAIL) {
476 advance = 0; 508 advance = 0;
477 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 509 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
478 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 510 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
@@ -480,10 +512,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
480 } 512 }
481 } 513 }
482 514
483 trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); 515 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
484 516
517 /* Advance past emulated instruction. */
485 if (advance) 518 if (advance)
486 vcpu->arch.pc += 4; /* Advance past emulated instruction. */ 519 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
487 520
488 return emulated; 521 return emulated;
489} 522}
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
new file mode 100644
index 000000000000..2b340a3eee90
--- /dev/null
+++ b/arch/powerpc/kvm/fpu.S
@@ -0,0 +1,273 @@
1/*
2 * FPU helper code to use FPU operations from inside the kernel
3 *
4 * Copyright (C) 2010 Alexander Graf (agraf@suse.de)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <asm/reg.h>
14#include <asm/page.h>
15#include <asm/mmu.h>
16#include <asm/pgtable.h>
17#include <asm/cputable.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20#include <asm/ppc_asm.h>
21#include <asm/asm-offsets.h>
22
23/* Instructions operating on single parameters */
24
25/*
26 * Single operation with one input operand
27 *
28 * R3 = (double*)&fpscr
29 * R4 = (short*)&result
30 * R5 = (short*)&param1
31 */
32#define FPS_ONE_IN(name) \
33_GLOBAL(fps_ ## name); \
34 lfd 0,0(r3); /* load up fpscr value */ \
35 MTFSF_L(0); \
36 lfs 0,0(r5); \
37 \
38 name 0,0; \
39 \
40 stfs 0,0(r4); \
41 mffs 0; \
42 stfd 0,0(r3); /* save new fpscr value */ \
43 blr
44
45/*
46 * Single operation with two input operands
47 *
48 * R3 = (double*)&fpscr
49 * R4 = (short*)&result
50 * R5 = (short*)&param1
51 * R6 = (short*)&param2
52 */
53#define FPS_TWO_IN(name) \
54_GLOBAL(fps_ ## name); \
55 lfd 0,0(r3); /* load up fpscr value */ \
56 MTFSF_L(0); \
57 lfs 0,0(r5); \
58 lfs 1,0(r6); \
59 \
60 name 0,0,1; \
61 \
62 stfs 0,0(r4); \
63 mffs 0; \
64 stfd 0,0(r3); /* save new fpscr value */ \
65 blr
66
67/*
68 * Single operation with three input operands
69 *
70 * R3 = (double*)&fpscr
71 * R4 = (short*)&result
72 * R5 = (short*)&param1
73 * R6 = (short*)&param2
74 * R7 = (short*)&param3
75 */
76#define FPS_THREE_IN(name) \
77_GLOBAL(fps_ ## name); \
78 lfd 0,0(r3); /* load up fpscr value */ \
79 MTFSF_L(0); \
80 lfs 0,0(r5); \
81 lfs 1,0(r6); \
82 lfs 2,0(r7); \
83 \
84 name 0,0,1,2; \
85 \
86 stfs 0,0(r4); \
87 mffs 0; \
88 stfd 0,0(r3); /* save new fpscr value */ \
89 blr
90
91FPS_ONE_IN(fres)
92FPS_ONE_IN(frsqrte)
93FPS_ONE_IN(fsqrts)
94FPS_TWO_IN(fadds)
95FPS_TWO_IN(fdivs)
96FPS_TWO_IN(fmuls)
97FPS_TWO_IN(fsubs)
98FPS_THREE_IN(fmadds)
99FPS_THREE_IN(fmsubs)
100FPS_THREE_IN(fnmadds)
101FPS_THREE_IN(fnmsubs)
102FPS_THREE_IN(fsel)
103
104
105/* Instructions operating on double parameters */
106
107/*
108 * Beginning of double instruction processing
109 *
110 * R3 = (double*)&fpscr
111 * R4 = (u32*)&cr
112 * R5 = (double*)&result
113 * R6 = (double*)&param1
114 * R7 = (double*)&param2 [load_two]
115 * R8 = (double*)&param3 [load_three]
116 * LR = instruction call function
117 */
118fpd_load_three:
119 lfd 2,0(r8) /* load param3 */
120fpd_load_two:
121 lfd 1,0(r7) /* load param2 */
122fpd_load_one:
123 lfd 0,0(r6) /* load param1 */
124fpd_load_none:
125 lfd 3,0(r3) /* load up fpscr value */
126 MTFSF_L(3)
127 lwz r6, 0(r4) /* load cr */
128 mtcr r6
129 blr
130
131/*
132 * End of double instruction processing
133 *
134 * R3 = (double*)&fpscr
135 * R4 = (u32*)&cr
136 * R5 = (double*)&result
137 * LR = caller of instruction call function
138 */
139fpd_return:
140 mfcr r6
141 stfd 0,0(r5) /* save result */
142 mffs 0
143 stfd 0,0(r3) /* save new fpscr value */
144 stw r6,0(r4) /* save new cr value */
145 blr
146
147/*
148 * Double operation with no input operand
149 *
150 * R3 = (double*)&fpscr
151 * R4 = (u32*)&cr
152 * R5 = (double*)&result
153 */
154#define FPD_NONE_IN(name) \
155_GLOBAL(fpd_ ## name); \
156 mflr r12; \
157 bl fpd_load_none; \
158 mtlr r12; \
159 \
160 name. 0; /* call instruction */ \
161 b fpd_return
162
163/*
164 * Double operation with one input operand
165 *
166 * R3 = (double*)&fpscr
167 * R4 = (u32*)&cr
168 * R5 = (double*)&result
169 * R6 = (double*)&param1
170 */
171#define FPD_ONE_IN(name) \
172_GLOBAL(fpd_ ## name); \
173 mflr r12; \
174 bl fpd_load_one; \
175 mtlr r12; \
176 \
177 name. 0,0; /* call instruction */ \
178 b fpd_return
179
180/*
181 * Double operation with two input operands
182 *
183 * R3 = (double*)&fpscr
184 * R4 = (u32*)&cr
185 * R5 = (double*)&result
186 * R6 = (double*)&param1
187 * R7 = (double*)&param2
188 * R8 = (double*)&param3
189 */
190#define FPD_TWO_IN(name) \
191_GLOBAL(fpd_ ## name); \
192 mflr r12; \
193 bl fpd_load_two; \
194 mtlr r12; \
195 \
196 name. 0,0,1; /* call instruction */ \
197 b fpd_return
198
199/*
200 * CR Double operation with two input operands
201 *
202 * R3 = (double*)&fpscr
203 * R4 = (u32*)&cr
204 * R5 = (double*)&param1
205 * R6 = (double*)&param2
206 * R7 = (double*)&param3
207 */
208#define FPD_TWO_IN_CR(name) \
209_GLOBAL(fpd_ ## name); \
210 lfd 1,0(r6); /* load param2 */ \
211 lfd 0,0(r5); /* load param1 */ \
212 lfd 3,0(r3); /* load up fpscr value */ \
213 MTFSF_L(3); \
214 lwz r6, 0(r4); /* load cr */ \
215 mtcr r6; \
216 \
217 name 0,0,1; /* call instruction */ \
218 mfcr r6; \
219 mffs 0; \
220 stfd 0,0(r3); /* save new fpscr value */ \
221 stw r6,0(r4); /* save new cr value */ \
222 blr
223
224/*
225 * Double operation with three input operands
226 *
227 * R3 = (double*)&fpscr
228 * R4 = (u32*)&cr
229 * R5 = (double*)&result
230 * R6 = (double*)&param1
231 * R7 = (double*)&param2
232 * R8 = (double*)&param3
233 */
234#define FPD_THREE_IN(name) \
235_GLOBAL(fpd_ ## name); \
236 mflr r12; \
237 bl fpd_load_three; \
238 mtlr r12; \
239 \
240 name. 0,0,1,2; /* call instruction */ \
241 b fpd_return
242
243FPD_ONE_IN(fsqrts)
244FPD_ONE_IN(frsqrtes)
245FPD_ONE_IN(fres)
246FPD_ONE_IN(frsp)
247FPD_ONE_IN(fctiw)
248FPD_ONE_IN(fctiwz)
249FPD_ONE_IN(fsqrt)
250FPD_ONE_IN(fre)
251FPD_ONE_IN(frsqrte)
252FPD_ONE_IN(fneg)
253FPD_ONE_IN(fabs)
254FPD_TWO_IN(fadds)
255FPD_TWO_IN(fsubs)
256FPD_TWO_IN(fdivs)
257FPD_TWO_IN(fmuls)
258FPD_TWO_IN_CR(fcmpu)
259FPD_TWO_IN(fcpsgn)
260FPD_TWO_IN(fdiv)
261FPD_TWO_IN(fadd)
262FPD_TWO_IN(fmul)
263FPD_TWO_IN_CR(fcmpo)
264FPD_TWO_IN(fsub)
265FPD_THREE_IN(fmsubs)
266FPD_THREE_IN(fmadds)
267FPD_THREE_IN(fnmsubs)
268FPD_THREE_IN(fnmadds)
269FPD_THREE_IN(fsel)
270FPD_THREE_IN(fmsub)
271FPD_THREE_IN(fmadd)
272FPD_THREE_IN(fnmsub)
273FPD_THREE_IN(fnmadd)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 297fcd2ff7d0..9b8683f39e05 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
70 case EMULATE_FAIL: 70 case EMULATE_FAIL:
71 /* XXX Deliver Program interrupt to guest. */ 71 /* XXX Deliver Program interrupt to guest. */
72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
73 vcpu->arch.last_inst); 73 kvmppc_get_last_inst(vcpu));
74 r = RESUME_HOST; 74 r = RESUME_HOST;
75 break; 75 break;
76 default: 76 default:
@@ -148,6 +148,10 @@ int kvm_dev_ioctl_check_extension(long ext)
148 148
149 switch (ext) { 149 switch (ext) {
150 case KVM_CAP_PPC_SEGSTATE: 150 case KVM_CAP_PPC_SEGSTATE:
151 case KVM_CAP_PPC_PAIRED_SINGLES:
152 case KVM_CAP_PPC_UNSET_IRQ:
153 case KVM_CAP_ENABLE_CAP:
154 case KVM_CAP_PPC_OSI:
151 r = 1; 155 r = 1;
152 break; 156 break;
153 case KVM_CAP_COALESCED_MMIO: 157 case KVM_CAP_COALESCED_MMIO:
@@ -193,12 +197,17 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
193{ 197{
194 struct kvm_vcpu *vcpu; 198 struct kvm_vcpu *vcpu;
195 vcpu = kvmppc_core_vcpu_create(kvm, id); 199 vcpu = kvmppc_core_vcpu_create(kvm, id);
196 kvmppc_create_vcpu_debugfs(vcpu, id); 200 if (!IS_ERR(vcpu))
201 kvmppc_create_vcpu_debugfs(vcpu, id);
197 return vcpu; 202 return vcpu;
198} 203}
199 204
200void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 205void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
201{ 206{
207 /* Make sure we're not using the vcpu anymore */
208 hrtimer_cancel(&vcpu->arch.dec_timer);
209 tasklet_kill(&vcpu->arch.tasklet);
210
202 kvmppc_remove_vcpu_debugfs(vcpu); 211 kvmppc_remove_vcpu_debugfs(vcpu);
203 kvmppc_core_vcpu_free(vcpu); 212 kvmppc_core_vcpu_free(vcpu);
204} 213}
@@ -278,7 +287,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
278static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 287static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
279 struct kvm_run *run) 288 struct kvm_run *run)
280{ 289{
281 ulong gpr; 290 u64 gpr;
282 291
283 if (run->mmio.len > sizeof(gpr)) { 292 if (run->mmio.len > sizeof(gpr)) {
284 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 293 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -287,6 +296,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
287 296
288 if (vcpu->arch.mmio_is_bigendian) { 297 if (vcpu->arch.mmio_is_bigendian) {
289 switch (run->mmio.len) { 298 switch (run->mmio.len) {
299 case 8: gpr = *(u64 *)run->mmio.data; break;
290 case 4: gpr = *(u32 *)run->mmio.data; break; 300 case 4: gpr = *(u32 *)run->mmio.data; break;
291 case 2: gpr = *(u16 *)run->mmio.data; break; 301 case 2: gpr = *(u16 *)run->mmio.data; break;
292 case 1: gpr = *(u8 *)run->mmio.data; break; 302 case 1: gpr = *(u8 *)run->mmio.data; break;
@@ -300,7 +310,43 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
300 } 310 }
301 } 311 }
302 312
313 if (vcpu->arch.mmio_sign_extend) {
314 switch (run->mmio.len) {
315#ifdef CONFIG_PPC64
316 case 4:
317 gpr = (s64)(s32)gpr;
318 break;
319#endif
320 case 2:
321 gpr = (s64)(s16)gpr;
322 break;
323 case 1:
324 gpr = (s64)(s8)gpr;
325 break;
326 }
327 }
328
303 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 329 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
330
331 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
332 case KVM_REG_GPR:
333 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
334 break;
335 case KVM_REG_FPR:
336 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
337 break;
338#ifdef CONFIG_PPC_BOOK3S
339 case KVM_REG_QPR:
340 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
341 break;
342 case KVM_REG_FQPR:
343 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
344 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
345 break;
346#endif
347 default:
348 BUG();
349 }
304} 350}
305 351
306int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 352int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -319,12 +365,25 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
319 vcpu->arch.mmio_is_bigendian = is_bigendian; 365 vcpu->arch.mmio_is_bigendian = is_bigendian;
320 vcpu->mmio_needed = 1; 366 vcpu->mmio_needed = 1;
321 vcpu->mmio_is_write = 0; 367 vcpu->mmio_is_write = 0;
368 vcpu->arch.mmio_sign_extend = 0;
322 369
323 return EMULATE_DO_MMIO; 370 return EMULATE_DO_MMIO;
324} 371}
325 372
373/* Same as above, but sign extends */
374int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
375 unsigned int rt, unsigned int bytes, int is_bigendian)
376{
377 int r;
378
379 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
380 vcpu->arch.mmio_sign_extend = 1;
381
382 return r;
383}
384
326int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 385int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
327 u32 val, unsigned int bytes, int is_bigendian) 386 u64 val, unsigned int bytes, int is_bigendian)
328{ 387{
329 void *data = run->mmio.data; 388 void *data = run->mmio.data;
330 389
@@ -342,6 +401,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
342 /* Store the value at the lowest bytes in 'data'. */ 401 /* Store the value at the lowest bytes in 'data'. */
343 if (is_bigendian) { 402 if (is_bigendian) {
344 switch (bytes) { 403 switch (bytes) {
404 case 8: *(u64 *)data = val; break;
345 case 4: *(u32 *)data = val; break; 405 case 4: *(u32 *)data = val; break;
346 case 2: *(u16 *)data = val; break; 406 case 2: *(u16 *)data = val; break;
347 case 1: *(u8 *)data = val; break; 407 case 1: *(u8 *)data = val; break;
@@ -376,6 +436,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
376 if (!vcpu->arch.dcr_is_write) 436 if (!vcpu->arch.dcr_is_write)
377 kvmppc_complete_dcr_load(vcpu, run); 437 kvmppc_complete_dcr_load(vcpu, run);
378 vcpu->arch.dcr_needed = 0; 438 vcpu->arch.dcr_needed = 0;
439 } else if (vcpu->arch.osi_needed) {
440 u64 *gprs = run->osi.gprs;
441 int i;
442
443 for (i = 0; i < 32; i++)
444 kvmppc_set_gpr(vcpu, i, gprs[i]);
445 vcpu->arch.osi_needed = 0;
379 } 446 }
380 447
381 kvmppc_core_deliver_interrupts(vcpu); 448 kvmppc_core_deliver_interrupts(vcpu);
@@ -396,7 +463,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
396 463
397int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 464int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
398{ 465{
399 kvmppc_core_queue_external(vcpu, irq); 466 if (irq->irq == KVM_INTERRUPT_UNSET)
467 kvmppc_core_dequeue_external(vcpu, irq);
468 else
469 kvmppc_core_queue_external(vcpu, irq);
400 470
401 if (waitqueue_active(&vcpu->wq)) { 471 if (waitqueue_active(&vcpu->wq)) {
402 wake_up_interruptible(&vcpu->wq); 472 wake_up_interruptible(&vcpu->wq);
@@ -406,6 +476,27 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
406 return 0; 476 return 0;
407} 477}
408 478
479static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
480 struct kvm_enable_cap *cap)
481{
482 int r;
483
484 if (cap->flags)
485 return -EINVAL;
486
487 switch (cap->cap) {
488 case KVM_CAP_PPC_OSI:
489 r = 0;
490 vcpu->arch.osi_enabled = true;
491 break;
492 default:
493 r = -EINVAL;
494 break;
495 }
496
497 return r;
498}
499
409int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 500int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
410 struct kvm_mp_state *mp_state) 501 struct kvm_mp_state *mp_state)
411{ 502{
@@ -434,6 +525,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
434 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 525 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
435 break; 526 break;
436 } 527 }
528 case KVM_ENABLE_CAP:
529 {
530 struct kvm_enable_cap cap;
531 r = -EFAULT;
532 if (copy_from_user(&cap, argp, sizeof(cap)))
533 goto out;
534 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
535 break;
536 }
437 default: 537 default:
438 r = -EINVAL; 538 r = -EINVAL;
439 } 539 }