diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-05 16:12:34 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-05 16:12:34 -0500 |
commit | c812a51d11bbe983f4c24e32b59b265705ddd3c2 (patch) | |
tree | d454f518db51a4de700cf3dcd4c3c71ee7288b47 /arch/powerpc/kvm | |
parent | 9467c4fdd66f6810cecef0f1173330f3c6e67d45 (diff) | |
parent | d2be1651b736002e0c76d7095d6c0ba77b4a897c (diff) |
Merge branch 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (145 commits)
KVM: x86: Add KVM_CAP_X86_ROBUST_SINGLESTEP
KVM: VMX: Update instruction length on intercepted BP
KVM: Fix emulate_sys[call, enter, exit]()'s fault handling
KVM: Fix segment descriptor loading
KVM: Fix load_guest_segment_descriptor() to inject page fault
KVM: x86 emulator: Forbid modifying CS segment register by mov instruction
KVM: Convert kvm->requests_lock to raw_spinlock_t
KVM: Convert i8254/i8259 locks to raw_spinlocks
KVM: x86 emulator: disallow opcode 82 in 64-bit mode
KVM: x86 emulator: code style cleanup
KVM: Plan obsolescence of kernel allocated slots, paravirt mmu
KVM: x86 emulator: Add LOCK prefix validity checking
KVM: x86 emulator: Check CPL level during privilege instruction emulation
KVM: x86 emulator: Fix popf emulation
KVM: x86 emulator: Check IOPL level during io instruction emulation
KVM: x86 emulator: fix memory access during x86 emulation
KVM: x86 emulator: Add Virtual-8086 mode of emulation
KVM: x86 emulator: Add group9 instruction decoding
KVM: x86 emulator: Add group8 instruction decoding
KVM: do not store wqh in irqfd
...
Trivial conflicts in Documentation/feature-removal-schedule.txt
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/44x_emulate.c | 25 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 20 | ||||
-rw-r--r-- | arch/powerpc/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 309 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_emulate.c | 77 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_exports.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_interrupts.S | 336 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_rmhandlers.S | 119 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_slb.S | 160 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 87 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_emulate.c | 107 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_emulate.c | 93 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_tlb.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 118 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 40 |
17 files changed, 936 insertions, 590 deletions
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 61af58fcecee..65ea083a5b27 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
65 | */ | 65 | */ |
66 | switch (dcrn) { | 66 | switch (dcrn) { |
67 | case DCRN_CPR0_CONFIG_ADDR: | 67 | case DCRN_CPR0_CONFIG_ADDR: |
68 | vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; | 68 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); |
69 | break; | 69 | break; |
70 | case DCRN_CPR0_CONFIG_DATA: | 70 | case DCRN_CPR0_CONFIG_DATA: |
71 | local_irq_disable(); | 71 | local_irq_disable(); |
72 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | 72 | mtdcr(DCRN_CPR0_CONFIG_ADDR, |
73 | vcpu->arch.cpr0_cfgaddr); | 73 | vcpu->arch.cpr0_cfgaddr); |
74 | vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); | 74 | kvmppc_set_gpr(vcpu, rt, |
75 | mfdcr(DCRN_CPR0_CONFIG_DATA)); | ||
75 | local_irq_enable(); | 76 | local_irq_enable(); |
76 | break; | 77 | break; |
77 | default: | 78 | default: |
@@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
93 | /* emulate some access in kernel */ | 94 | /* emulate some access in kernel */ |
94 | switch (dcrn) { | 95 | switch (dcrn) { |
95 | case DCRN_CPR0_CONFIG_ADDR: | 96 | case DCRN_CPR0_CONFIG_ADDR: |
96 | vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; | 97 | vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); |
97 | break; | 98 | break; |
98 | default: | 99 | default: |
99 | run->dcr.dcrn = dcrn; | 100 | run->dcr.dcrn = dcrn; |
100 | run->dcr.data = vcpu->arch.gpr[rs]; | 101 | run->dcr.data = kvmppc_get_gpr(vcpu, rs); |
101 | run->dcr.is_write = 1; | 102 | run->dcr.is_write = 1; |
102 | vcpu->arch.dcr_needed = 1; | 103 | vcpu->arch.dcr_needed = 1; |
103 | kvmppc_account_exit(vcpu, DCR_EXITS); | 104 | kvmppc_account_exit(vcpu, DCR_EXITS); |
@@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
146 | 147 | ||
147 | switch (sprn) { | 148 | switch (sprn) { |
148 | case SPRN_PID: | 149 | case SPRN_PID: |
149 | kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; | 150 | kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; |
150 | case SPRN_MMUCR: | 151 | case SPRN_MMUCR: |
151 | vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; | 152 | vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; |
152 | case SPRN_CCR0: | 153 | case SPRN_CCR0: |
153 | vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; | 154 | vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; |
154 | case SPRN_CCR1: | 155 | case SPRN_CCR1: |
155 | vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; | 156 | vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; |
156 | default: | 157 | default: |
157 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 158 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); |
158 | } | 159 | } |
@@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
167 | 168 | ||
168 | switch (sprn) { | 169 | switch (sprn) { |
169 | case SPRN_PID: | 170 | case SPRN_PID: |
170 | vcpu->arch.gpr[rt] = vcpu->arch.pid; break; | 171 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; |
171 | case SPRN_MMUCR: | 172 | case SPRN_MMUCR: |
172 | vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; | 173 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; |
173 | case SPRN_CCR0: | 174 | case SPRN_CCR0: |
174 | vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; | 175 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; |
175 | case SPRN_CCR1: | 176 | case SPRN_CCR1: |
176 | vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; | 177 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; |
177 | default: | 178 | default: |
178 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 179 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); |
179 | } | 180 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ff3cb63b8117..2570fcc7665d 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -439,7 +439,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
439 | struct kvmppc_44x_tlbe *tlbe; | 439 | struct kvmppc_44x_tlbe *tlbe; |
440 | unsigned int gtlb_index; | 440 | unsigned int gtlb_index; |
441 | 441 | ||
442 | gtlb_index = vcpu->arch.gpr[ra]; | 442 | gtlb_index = kvmppc_get_gpr(vcpu, ra); |
443 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { | 443 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { |
444 | printk("%s: index %d\n", __func__, gtlb_index); | 444 | printk("%s: index %d\n", __func__, gtlb_index); |
445 | kvmppc_dump_vcpu(vcpu); | 445 | kvmppc_dump_vcpu(vcpu); |
@@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
455 | switch (ws) { | 455 | switch (ws) { |
456 | case PPC44x_TLB_PAGEID: | 456 | case PPC44x_TLB_PAGEID: |
457 | tlbe->tid = get_mmucr_stid(vcpu); | 457 | tlbe->tid = get_mmucr_stid(vcpu); |
458 | tlbe->word0 = vcpu->arch.gpr[rs]; | 458 | tlbe->word0 = kvmppc_get_gpr(vcpu, rs); |
459 | break; | 459 | break; |
460 | 460 | ||
461 | case PPC44x_TLB_XLAT: | 461 | case PPC44x_TLB_XLAT: |
462 | tlbe->word1 = vcpu->arch.gpr[rs]; | 462 | tlbe->word1 = kvmppc_get_gpr(vcpu, rs); |
463 | break; | 463 | break; |
464 | 464 | ||
465 | case PPC44x_TLB_ATTRIB: | 465 | case PPC44x_TLB_ATTRIB: |
466 | tlbe->word2 = vcpu->arch.gpr[rs]; | 466 | tlbe->word2 = kvmppc_get_gpr(vcpu, rs); |
467 | break; | 467 | break; |
468 | 468 | ||
469 | default: | 469 | default: |
@@ -500,18 +500,20 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | |||
500 | unsigned int as = get_mmucr_sts(vcpu); | 500 | unsigned int as = get_mmucr_sts(vcpu); |
501 | unsigned int pid = get_mmucr_stid(vcpu); | 501 | unsigned int pid = get_mmucr_stid(vcpu); |
502 | 502 | ||
503 | ea = vcpu->arch.gpr[rb]; | 503 | ea = kvmppc_get_gpr(vcpu, rb); |
504 | if (ra) | 504 | if (ra) |
505 | ea += vcpu->arch.gpr[ra]; | 505 | ea += kvmppc_get_gpr(vcpu, ra); |
506 | 506 | ||
507 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | 507 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); |
508 | if (rc) { | 508 | if (rc) { |
509 | u32 cr = kvmppc_get_cr(vcpu); | ||
510 | |||
509 | if (gtlb_index < 0) | 511 | if (gtlb_index < 0) |
510 | vcpu->arch.cr &= ~0x20000000; | 512 | kvmppc_set_cr(vcpu, cr & ~0x20000000); |
511 | else | 513 | else |
512 | vcpu->arch.cr |= 0x20000000; | 514 | kvmppc_set_cr(vcpu, cr | 0x20000000); |
513 | } | 515 | } |
514 | vcpu->arch.gpr[rt] = gtlb_index; | 516 | kvmppc_set_gpr(vcpu, rt, gtlb_index); |
515 | 517 | ||
516 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | 518 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); |
517 | return EMULATE_DONE; | 519 | return EMULATE_DONE; |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index fe037fdaf1b3..60624cc9f4d4 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -20,6 +20,7 @@ config KVM | |||
20 | bool | 20 | bool |
21 | select PREEMPT_NOTIFIERS | 21 | select PREEMPT_NOTIFIERS |
22 | select ANON_INODES | 22 | select ANON_INODES |
23 | select KVM_MMIO | ||
23 | 24 | ||
24 | config KVM_BOOK3S_64_HANDLER | 25 | config KVM_BOOK3S_64_HANDLER |
25 | bool | 26 | bool |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3e294bd9b8c6..9a271f0929c7 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -33,12 +33,9 @@ | |||
33 | 33 | ||
34 | /* #define EXIT_DEBUG */ | 34 | /* #define EXIT_DEBUG */ |
35 | /* #define EXIT_DEBUG_SIMPLE */ | 35 | /* #define EXIT_DEBUG_SIMPLE */ |
36 | /* #define DEBUG_EXT */ | ||
36 | 37 | ||
37 | /* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0. | 38 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
38 | * When set, we retrigger a DEC interrupt after that if DEC <= 0. | ||
39 | * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */ | ||
40 | |||
41 | /* #define AGGRESSIVE_DEC */ | ||
42 | 39 | ||
43 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 40 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
44 | { "exits", VCPU_STAT(sum_exits) }, | 41 | { "exits", VCPU_STAT(sum_exits) }, |
@@ -72,16 +69,24 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
72 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 69 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
73 | { | 70 | { |
74 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | 71 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); |
72 | memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, | ||
73 | sizeof(get_paca()->shadow_vcpu)); | ||
75 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; | 74 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; |
76 | } | 75 | } |
77 | 76 | ||
78 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 77 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
79 | { | 78 | { |
80 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | 79 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); |
80 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | ||
81 | sizeof(get_paca()->shadow_vcpu)); | ||
81 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | 82 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; |
83 | |||
84 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
85 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
86 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
82 | } | 87 | } |
83 | 88 | ||
84 | #if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG) | 89 | #if defined(EXIT_DEBUG) |
85 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | 90 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) |
86 | { | 91 | { |
87 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | 92 | u64 jd = mftb() - vcpu->arch.dec_jiffies; |
@@ -89,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | |||
89 | } | 94 | } |
90 | #endif | 95 | #endif |
91 | 96 | ||
97 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
98 | { | ||
99 | vcpu->arch.shadow_msr = vcpu->arch.msr; | ||
100 | /* Guest MSR values */ | ||
101 | vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | | ||
102 | MSR_BE | MSR_DE; | ||
103 | /* Process MSR values */ | ||
104 | vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | | ||
105 | MSR_EE; | ||
106 | /* External providers the guest reserved */ | ||
107 | vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); | ||
108 | /* 64-bit Process MSR values */ | ||
109 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
110 | vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; | ||
111 | #endif | ||
112 | } | ||
113 | |||
92 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 114 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
93 | { | 115 | { |
94 | ulong old_msr = vcpu->arch.msr; | 116 | ulong old_msr = vcpu->arch.msr; |
@@ -96,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
96 | #ifdef EXIT_DEBUG | 118 | #ifdef EXIT_DEBUG |
97 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 119 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
98 | #endif | 120 | #endif |
121 | |||
99 | msr &= to_book3s(vcpu)->msr_mask; | 122 | msr &= to_book3s(vcpu)->msr_mask; |
100 | vcpu->arch.msr = msr; | 123 | vcpu->arch.msr = msr; |
101 | vcpu->arch.shadow_msr = msr | MSR_USER32; | 124 | kvmppc_recalc_shadow_msr(vcpu); |
102 | vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | | ||
103 | MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | | ||
104 | MSR_FE1); | ||
105 | 125 | ||
106 | if (msr & (MSR_WE|MSR_POW)) { | 126 | if (msr & (MSR_WE|MSR_POW)) { |
107 | if (!vcpu->arch.pending_exceptions) { | 127 | if (!vcpu->arch.pending_exceptions) { |
@@ -125,11 +145,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | |||
125 | vcpu->arch.mmu.reset_msr(vcpu); | 145 | vcpu->arch.mmu.reset_msr(vcpu); |
126 | } | 146 | } |
127 | 147 | ||
128 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | 148 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) |
129 | { | 149 | { |
130 | unsigned int prio; | 150 | unsigned int prio; |
131 | 151 | ||
132 | vcpu->stat.queue_intr++; | ||
133 | switch (vec) { | 152 | switch (vec) { |
134 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; | 153 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; |
135 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; | 154 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; |
@@ -149,15 +168,31 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | |||
149 | default: prio = BOOK3S_IRQPRIO_MAX; break; | 168 | default: prio = BOOK3S_IRQPRIO_MAX; break; |
150 | } | 169 | } |
151 | 170 | ||
152 | set_bit(prio, &vcpu->arch.pending_exceptions); | 171 | return prio; |
172 | } | ||
173 | |||
174 | static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | ||
175 | unsigned int vec) | ||
176 | { | ||
177 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | ||
178 | &vcpu->arch.pending_exceptions); | ||
179 | } | ||
180 | |||
181 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | ||
182 | { | ||
183 | vcpu->stat.queue_intr++; | ||
184 | |||
185 | set_bit(kvmppc_book3s_vec2irqprio(vec), | ||
186 | &vcpu->arch.pending_exceptions); | ||
153 | #ifdef EXIT_DEBUG | 187 | #ifdef EXIT_DEBUG |
154 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | 188 | printk(KERN_INFO "Queueing interrupt %x\n", vec); |
155 | #endif | 189 | #endif |
156 | } | 190 | } |
157 | 191 | ||
158 | 192 | ||
159 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) | 193 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) |
160 | { | 194 | { |
195 | to_book3s(vcpu)->prog_flags = flags; | ||
161 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); | 196 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); |
162 | } | 197 | } |
163 | 198 | ||
@@ -171,6 +206,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |||
171 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); | 206 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); |
172 | } | 207 | } |
173 | 208 | ||
209 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | ||
210 | { | ||
211 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | ||
212 | } | ||
213 | |||
174 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 214 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
175 | struct kvm_interrupt *irq) | 215 | struct kvm_interrupt *irq) |
176 | { | 216 | { |
@@ -181,6 +221,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
181 | { | 221 | { |
182 | int deliver = 1; | 222 | int deliver = 1; |
183 | int vec = 0; | 223 | int vec = 0; |
224 | ulong flags = 0ULL; | ||
184 | 225 | ||
185 | switch (priority) { | 226 | switch (priority) { |
186 | case BOOK3S_IRQPRIO_DECREMENTER: | 227 | case BOOK3S_IRQPRIO_DECREMENTER: |
@@ -214,6 +255,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
214 | break; | 255 | break; |
215 | case BOOK3S_IRQPRIO_PROGRAM: | 256 | case BOOK3S_IRQPRIO_PROGRAM: |
216 | vec = BOOK3S_INTERRUPT_PROGRAM; | 257 | vec = BOOK3S_INTERRUPT_PROGRAM; |
258 | flags = to_book3s(vcpu)->prog_flags; | ||
217 | break; | 259 | break; |
218 | case BOOK3S_IRQPRIO_VSX: | 260 | case BOOK3S_IRQPRIO_VSX: |
219 | vec = BOOK3S_INTERRUPT_VSX; | 261 | vec = BOOK3S_INTERRUPT_VSX; |
@@ -244,7 +286,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
244 | #endif | 286 | #endif |
245 | 287 | ||
246 | if (deliver) | 288 | if (deliver) |
247 | kvmppc_inject_interrupt(vcpu, vec, 0ULL); | 289 | kvmppc_inject_interrupt(vcpu, vec, flags); |
248 | 290 | ||
249 | return deliver; | 291 | return deliver; |
250 | } | 292 | } |
@@ -254,21 +296,15 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
254 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 296 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
255 | unsigned int priority; | 297 | unsigned int priority; |
256 | 298 | ||
257 | /* XXX be more clever here - no need to mftb() on every entry */ | ||
258 | /* Issue DEC again if it's still active */ | ||
259 | #ifdef AGGRESSIVE_DEC | ||
260 | if (vcpu->arch.msr & MSR_EE) | ||
261 | if (kvmppc_get_dec(vcpu) & 0x80000000) | ||
262 | kvmppc_core_queue_dec(vcpu); | ||
263 | #endif | ||
264 | |||
265 | #ifdef EXIT_DEBUG | 299 | #ifdef EXIT_DEBUG |
266 | if (vcpu->arch.pending_exceptions) | 300 | if (vcpu->arch.pending_exceptions) |
267 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | 301 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); |
268 | #endif | 302 | #endif |
269 | priority = __ffs(*pending); | 303 | priority = __ffs(*pending); |
270 | while (priority <= (sizeof(unsigned int) * 8)) { | 304 | while (priority <= (sizeof(unsigned int) * 8)) { |
271 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) { | 305 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
306 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | ||
307 | /* DEC interrupts get cleared by mtdec */ | ||
272 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 308 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
273 | break; | 309 | break; |
274 | } | 310 | } |
@@ -503,14 +539,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
503 | /* Page not found in guest PTE entries */ | 539 | /* Page not found in guest PTE entries */ |
504 | vcpu->arch.dear = vcpu->arch.fault_dear; | 540 | vcpu->arch.dear = vcpu->arch.fault_dear; |
505 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | 541 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; |
506 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | 542 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); |
507 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 543 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
508 | } else if (page_found == -EPERM) { | 544 | } else if (page_found == -EPERM) { |
509 | /* Storage protection */ | 545 | /* Storage protection */ |
510 | vcpu->arch.dear = vcpu->arch.fault_dear; | 546 | vcpu->arch.dear = vcpu->arch.fault_dear; |
511 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | 547 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; |
512 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | 548 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; |
513 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | 549 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); |
514 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 550 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
515 | } else if (page_found == -EINVAL) { | 551 | } else if (page_found == -EINVAL) { |
516 | /* Page not found in guest SLB */ | 552 | /* Page not found in guest SLB */ |
@@ -532,13 +568,122 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
532 | r = kvmppc_emulate_mmio(run, vcpu); | 568 | r = kvmppc_emulate_mmio(run, vcpu); |
533 | if ( r == RESUME_HOST_NV ) | 569 | if ( r == RESUME_HOST_NV ) |
534 | r = RESUME_HOST; | 570 | r = RESUME_HOST; |
535 | if ( r == RESUME_GUEST_NV ) | ||
536 | r = RESUME_GUEST; | ||
537 | } | 571 | } |
538 | 572 | ||
539 | return r; | 573 | return r; |
540 | } | 574 | } |
541 | 575 | ||
576 | static inline int get_fpr_index(int i) | ||
577 | { | ||
578 | #ifdef CONFIG_VSX | ||
579 | i *= 2; | ||
580 | #endif | ||
581 | return i; | ||
582 | } | ||
583 | |||
584 | /* Give up external provider (FPU, Altivec, VSX) */ | ||
585 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | ||
586 | { | ||
587 | struct thread_struct *t = ¤t->thread; | ||
588 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
589 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
590 | u64 *thread_fpr = (u64*)t->fpr; | ||
591 | int i; | ||
592 | |||
593 | if (!(vcpu->arch.guest_owned_ext & msr)) | ||
594 | return; | ||
595 | |||
596 | #ifdef DEBUG_EXT | ||
597 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | ||
598 | #endif | ||
599 | |||
600 | switch (msr) { | ||
601 | case MSR_FP: | ||
602 | giveup_fpu(current); | ||
603 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
604 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | ||
605 | |||
606 | vcpu->arch.fpscr = t->fpscr.val; | ||
607 | break; | ||
608 | case MSR_VEC: | ||
609 | #ifdef CONFIG_ALTIVEC | ||
610 | giveup_altivec(current); | ||
611 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); | ||
612 | vcpu->arch.vscr = t->vscr; | ||
613 | #endif | ||
614 | break; | ||
615 | case MSR_VSX: | ||
616 | #ifdef CONFIG_VSX | ||
617 | __giveup_vsx(current); | ||
618 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
619 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | ||
620 | #endif | ||
621 | break; | ||
622 | default: | ||
623 | BUG(); | ||
624 | } | ||
625 | |||
626 | vcpu->arch.guest_owned_ext &= ~msr; | ||
627 | current->thread.regs->msr &= ~msr; | ||
628 | kvmppc_recalc_shadow_msr(vcpu); | ||
629 | } | ||
630 | |||
631 | /* Handle external providers (FPU, Altivec, VSX) */ | ||
632 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | ||
633 | ulong msr) | ||
634 | { | ||
635 | struct thread_struct *t = ¤t->thread; | ||
636 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
637 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
638 | u64 *thread_fpr = (u64*)t->fpr; | ||
639 | int i; | ||
640 | |||
641 | if (!(vcpu->arch.msr & msr)) { | ||
642 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
643 | return RESUME_GUEST; | ||
644 | } | ||
645 | |||
646 | #ifdef DEBUG_EXT | ||
647 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | ||
648 | #endif | ||
649 | |||
650 | current->thread.regs->msr |= msr; | ||
651 | |||
652 | switch (msr) { | ||
653 | case MSR_FP: | ||
654 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
655 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | ||
656 | |||
657 | t->fpscr.val = vcpu->arch.fpscr; | ||
658 | t->fpexc_mode = 0; | ||
659 | kvmppc_load_up_fpu(); | ||
660 | break; | ||
661 | case MSR_VEC: | ||
662 | #ifdef CONFIG_ALTIVEC | ||
663 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | ||
664 | t->vscr = vcpu->arch.vscr; | ||
665 | t->vrsave = -1; | ||
666 | kvmppc_load_up_altivec(); | ||
667 | #endif | ||
668 | break; | ||
669 | case MSR_VSX: | ||
670 | #ifdef CONFIG_VSX | ||
671 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
672 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | ||
673 | kvmppc_load_up_vsx(); | ||
674 | #endif | ||
675 | break; | ||
676 | default: | ||
677 | BUG(); | ||
678 | } | ||
679 | |||
680 | vcpu->arch.guest_owned_ext |= msr; | ||
681 | |||
682 | kvmppc_recalc_shadow_msr(vcpu); | ||
683 | |||
684 | return RESUME_GUEST; | ||
685 | } | ||
686 | |||
542 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 687 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
543 | unsigned int exit_nr) | 688 | unsigned int exit_nr) |
544 | { | 689 | { |
@@ -563,7 +708,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
563 | case BOOK3S_INTERRUPT_INST_STORAGE: | 708 | case BOOK3S_INTERRUPT_INST_STORAGE: |
564 | vcpu->stat.pf_instruc++; | 709 | vcpu->stat.pf_instruc++; |
565 | /* only care about PTEG not found errors, but leave NX alone */ | 710 | /* only care about PTEG not found errors, but leave NX alone */ |
566 | if (vcpu->arch.shadow_msr & 0x40000000) { | 711 | if (vcpu->arch.shadow_srr1 & 0x40000000) { |
567 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | 712 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); |
568 | vcpu->stat.sp_instruc++; | 713 | vcpu->stat.sp_instruc++; |
569 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 714 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
@@ -575,7 +720,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
575 | */ | 720 | */ |
576 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 721 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); |
577 | } else { | 722 | } else { |
578 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); | 723 | vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; |
579 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 724 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
580 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 725 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); |
581 | r = RESUME_GUEST; | 726 | r = RESUME_GUEST; |
@@ -621,6 +766,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
621 | case BOOK3S_INTERRUPT_PROGRAM: | 766 | case BOOK3S_INTERRUPT_PROGRAM: |
622 | { | 767 | { |
623 | enum emulation_result er; | 768 | enum emulation_result er; |
769 | ulong flags; | ||
770 | |||
771 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | ||
624 | 772 | ||
625 | if (vcpu->arch.msr & MSR_PR) { | 773 | if (vcpu->arch.msr & MSR_PR) { |
626 | #ifdef EXIT_DEBUG | 774 | #ifdef EXIT_DEBUG |
@@ -628,7 +776,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
628 | #endif | 776 | #endif |
629 | if ((vcpu->arch.last_inst & 0xff0007ff) != | 777 | if ((vcpu->arch.last_inst & 0xff0007ff) != |
630 | (INS_DCBZ & 0xfffffff7)) { | 778 | (INS_DCBZ & 0xfffffff7)) { |
631 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 779 | kvmppc_core_queue_program(vcpu, flags); |
632 | r = RESUME_GUEST; | 780 | r = RESUME_GUEST; |
633 | break; | 781 | break; |
634 | } | 782 | } |
@@ -638,12 +786,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
638 | er = kvmppc_emulate_instruction(run, vcpu); | 786 | er = kvmppc_emulate_instruction(run, vcpu); |
639 | switch (er) { | 787 | switch (er) { |
640 | case EMULATE_DONE: | 788 | case EMULATE_DONE: |
641 | r = RESUME_GUEST; | 789 | r = RESUME_GUEST_NV; |
642 | break; | 790 | break; |
643 | case EMULATE_FAIL: | 791 | case EMULATE_FAIL: |
644 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 792 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
645 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 793 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); |
646 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 794 | kvmppc_core_queue_program(vcpu, flags); |
647 | r = RESUME_GUEST; | 795 | r = RESUME_GUEST; |
648 | break; | 796 | break; |
649 | default: | 797 | default: |
@@ -653,23 +801,30 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
653 | } | 801 | } |
654 | case BOOK3S_INTERRUPT_SYSCALL: | 802 | case BOOK3S_INTERRUPT_SYSCALL: |
655 | #ifdef EXIT_DEBUG | 803 | #ifdef EXIT_DEBUG |
656 | printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); | 804 | printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); |
657 | #endif | 805 | #endif |
658 | vcpu->stat.syscall_exits++; | 806 | vcpu->stat.syscall_exits++; |
659 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 807 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
660 | r = RESUME_GUEST; | 808 | r = RESUME_GUEST; |
661 | break; | 809 | break; |
662 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | ||
663 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | 810 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
664 | case BOOK3S_INTERRUPT_TRACE: | 811 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); |
812 | break; | ||
665 | case BOOK3S_INTERRUPT_ALTIVEC: | 813 | case BOOK3S_INTERRUPT_ALTIVEC: |
814 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); | ||
815 | break; | ||
666 | case BOOK3S_INTERRUPT_VSX: | 816 | case BOOK3S_INTERRUPT_VSX: |
817 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); | ||
818 | break; | ||
819 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | ||
820 | case BOOK3S_INTERRUPT_TRACE: | ||
667 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 821 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
668 | r = RESUME_GUEST; | 822 | r = RESUME_GUEST; |
669 | break; | 823 | break; |
670 | default: | 824 | default: |
671 | /* Ugh - bork here! What did we get? */ | 825 | /* Ugh - bork here! What did we get? */ |
672 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); | 826 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
827 | exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); | ||
673 | r = RESUME_HOST; | 828 | r = RESUME_HOST; |
674 | BUG(); | 829 | BUG(); |
675 | break; | 830 | break; |
@@ -712,10 +867,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
712 | int i; | 867 | int i; |
713 | 868 | ||
714 | regs->pc = vcpu->arch.pc; | 869 | regs->pc = vcpu->arch.pc; |
715 | regs->cr = vcpu->arch.cr; | 870 | regs->cr = kvmppc_get_cr(vcpu); |
716 | regs->ctr = vcpu->arch.ctr; | 871 | regs->ctr = vcpu->arch.ctr; |
717 | regs->lr = vcpu->arch.lr; | 872 | regs->lr = vcpu->arch.lr; |
718 | regs->xer = vcpu->arch.xer; | 873 | regs->xer = kvmppc_get_xer(vcpu); |
719 | regs->msr = vcpu->arch.msr; | 874 | regs->msr = vcpu->arch.msr; |
720 | regs->srr0 = vcpu->arch.srr0; | 875 | regs->srr0 = vcpu->arch.srr0; |
721 | regs->srr1 = vcpu->arch.srr1; | 876 | regs->srr1 = vcpu->arch.srr1; |
@@ -729,7 +884,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
729 | regs->sprg7 = vcpu->arch.sprg6; | 884 | regs->sprg7 = vcpu->arch.sprg6; |
730 | 885 | ||
731 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 886 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
732 | regs->gpr[i] = vcpu->arch.gpr[i]; | 887 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
733 | 888 | ||
734 | return 0; | 889 | return 0; |
735 | } | 890 | } |
@@ -739,10 +894,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
739 | int i; | 894 | int i; |
740 | 895 | ||
741 | vcpu->arch.pc = regs->pc; | 896 | vcpu->arch.pc = regs->pc; |
742 | vcpu->arch.cr = regs->cr; | 897 | kvmppc_set_cr(vcpu, regs->cr); |
743 | vcpu->arch.ctr = regs->ctr; | 898 | vcpu->arch.ctr = regs->ctr; |
744 | vcpu->arch.lr = regs->lr; | 899 | vcpu->arch.lr = regs->lr; |
745 | vcpu->arch.xer = regs->xer; | 900 | kvmppc_set_xer(vcpu, regs->xer); |
746 | kvmppc_set_msr(vcpu, regs->msr); | 901 | kvmppc_set_msr(vcpu, regs->msr); |
747 | vcpu->arch.srr0 = regs->srr0; | 902 | vcpu->arch.srr0 = regs->srr0; |
748 | vcpu->arch.srr1 = regs->srr1; | 903 | vcpu->arch.srr1 = regs->srr1; |
@@ -754,8 +909,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
754 | vcpu->arch.sprg6 = regs->sprg5; | 909 | vcpu->arch.sprg6 = regs->sprg5; |
755 | vcpu->arch.sprg7 = regs->sprg6; | 910 | vcpu->arch.sprg7 = regs->sprg6; |
756 | 911 | ||
757 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) | 912 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
758 | vcpu->arch.gpr[i] = regs->gpr[i]; | 913 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
759 | 914 | ||
760 | return 0; | 915 | return 0; |
761 | } | 916 | } |
@@ -850,7 +1005,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
850 | int is_dirty = 0; | 1005 | int is_dirty = 0; |
851 | int r, n; | 1006 | int r, n; |
852 | 1007 | ||
853 | down_write(&kvm->slots_lock); | 1008 | mutex_lock(&kvm->slots_lock); |
854 | 1009 | ||
855 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 1010 | r = kvm_get_dirty_log(kvm, log, &is_dirty); |
856 | if (r) | 1011 | if (r) |
@@ -858,7 +1013,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
858 | 1013 | ||
859 | /* If nothing is dirty, don't bother messing with page tables. */ | 1014 | /* If nothing is dirty, don't bother messing with page tables. */ |
860 | if (is_dirty) { | 1015 | if (is_dirty) { |
861 | memslot = &kvm->memslots[log->slot]; | 1016 | memslot = &kvm->memslots->memslots[log->slot]; |
862 | 1017 | ||
863 | ga = memslot->base_gfn << PAGE_SHIFT; | 1018 | ga = memslot->base_gfn << PAGE_SHIFT; |
864 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | 1019 | ga_end = ga + (memslot->npages << PAGE_SHIFT); |
@@ -872,7 +1027,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
872 | 1027 | ||
873 | r = 0; | 1028 | r = 0; |
874 | out: | 1029 | out: |
875 | up_write(&kvm->slots_lock); | 1030 | mutex_unlock(&kvm->slots_lock); |
876 | return r; | 1031 | return r; |
877 | } | 1032 | } |
878 | 1033 | ||
@@ -910,6 +1065,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
910 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; | 1065 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; |
911 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; | 1066 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; |
912 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | 1067 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; |
1068 | vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; | ||
913 | 1069 | ||
914 | vcpu->arch.shadow_msr = MSR_USER64; | 1070 | vcpu->arch.shadow_msr = MSR_USER64; |
915 | 1071 | ||
@@ -943,6 +1099,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |||
943 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1099 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
944 | { | 1100 | { |
945 | int ret; | 1101 | int ret; |
1102 | struct thread_struct ext_bkp; | ||
1103 | bool save_vec = current->thread.used_vr; | ||
1104 | bool save_vsx = current->thread.used_vsr; | ||
1105 | ulong ext_msr; | ||
946 | 1106 | ||
947 | /* No need to go into the guest when all we do is going out */ | 1107 | /* No need to go into the guest when all we do is going out */ |
948 | if (signal_pending(current)) { | 1108 | if (signal_pending(current)) { |
@@ -950,6 +1110,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
950 | return -EINTR; | 1110 | return -EINTR; |
951 | } | 1111 | } |
952 | 1112 | ||
1113 | /* Save FPU state in stack */ | ||
1114 | if (current->thread.regs->msr & MSR_FP) | ||
1115 | giveup_fpu(current); | ||
1116 | memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); | ||
1117 | ext_bkp.fpscr = current->thread.fpscr; | ||
1118 | ext_bkp.fpexc_mode = current->thread.fpexc_mode; | ||
1119 | |||
1120 | #ifdef CONFIG_ALTIVEC | ||
1121 | /* Save Altivec state in stack */ | ||
1122 | if (save_vec) { | ||
1123 | if (current->thread.regs->msr & MSR_VEC) | ||
1124 | giveup_altivec(current); | ||
1125 | memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); | ||
1126 | ext_bkp.vscr = current->thread.vscr; | ||
1127 | ext_bkp.vrsave = current->thread.vrsave; | ||
1128 | } | ||
1129 | ext_bkp.used_vr = current->thread.used_vr; | ||
1130 | #endif | ||
1131 | |||
1132 | #ifdef CONFIG_VSX | ||
1133 | /* Save VSX state in stack */ | ||
1134 | if (save_vsx && (current->thread.regs->msr & MSR_VSX)) | ||
1135 | __giveup_vsx(current); | ||
1136 | ext_bkp.used_vsr = current->thread.used_vsr; | ||
1137 | #endif | ||
1138 | |||
1139 | /* Remember the MSR with disabled extensions */ | ||
1140 | ext_msr = current->thread.regs->msr; | ||
1141 | |||
953 | /* XXX we get called with irq disabled - change that! */ | 1142 | /* XXX we get called with irq disabled - change that! */ |
954 | local_irq_enable(); | 1143 | local_irq_enable(); |
955 | 1144 | ||
@@ -957,6 +1146,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
957 | 1146 | ||
958 | local_irq_disable(); | 1147 | local_irq_disable(); |
959 | 1148 | ||
1149 | current->thread.regs->msr = ext_msr; | ||
1150 | |||
1151 | /* Make sure we save the guest FPU/Altivec/VSX state */ | ||
1152 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
1153 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
1154 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
1155 | |||
1156 | /* Restore FPU state from stack */ | ||
1157 | memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); | ||
1158 | current->thread.fpscr = ext_bkp.fpscr; | ||
1159 | current->thread.fpexc_mode = ext_bkp.fpexc_mode; | ||
1160 | |||
1161 | #ifdef CONFIG_ALTIVEC | ||
1162 | /* Restore Altivec state from stack */ | ||
1163 | if (save_vec && current->thread.used_vr) { | ||
1164 | memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); | ||
1165 | current->thread.vscr = ext_bkp.vscr; | ||
1166 | current->thread.vrsave= ext_bkp.vrsave; | ||
1167 | } | ||
1168 | current->thread.used_vr = ext_bkp.used_vr; | ||
1169 | #endif | ||
1170 | |||
1171 | #ifdef CONFIG_VSX | ||
1172 | current->thread.used_vsr = ext_bkp.used_vsr; | ||
1173 | #endif | ||
1174 | |||
960 | return ret; | 1175 | return ret; |
961 | } | 1176 | } |
962 | 1177 | ||
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c index 1027eac6d474..2b0ee7e040c9 100644 --- a/arch/powerpc/kvm/book3s_64_emulate.c +++ b/arch/powerpc/kvm/book3s_64_emulate.c | |||
@@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
65 | case 31: | 65 | case 31: |
66 | switch (get_xop(inst)) { | 66 | switch (get_xop(inst)) { |
67 | case OP_31_XOP_MFMSR: | 67 | case OP_31_XOP_MFMSR: |
68 | vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr; | 68 | kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); |
69 | break; | 69 | break; |
70 | case OP_31_XOP_MTMSRD: | 70 | case OP_31_XOP_MTMSRD: |
71 | { | 71 | { |
72 | ulong rs = vcpu->arch.gpr[get_rs(inst)]; | 72 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); |
73 | if (inst & 0x10000) { | 73 | if (inst & 0x10000) { |
74 | vcpu->arch.msr &= ~(MSR_RI | MSR_EE); | 74 | vcpu->arch.msr &= ~(MSR_RI | MSR_EE); |
75 | vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); | 75 | vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); |
@@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
78 | break; | 78 | break; |
79 | } | 79 | } |
80 | case OP_31_XOP_MTMSR: | 80 | case OP_31_XOP_MTMSR: |
81 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]); | 81 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); |
82 | break; | 82 | break; |
83 | case OP_31_XOP_MFSRIN: | 83 | case OP_31_XOP_MFSRIN: |
84 | { | 84 | { |
85 | int srnum; | 85 | int srnum; |
86 | 86 | ||
87 | srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf; | 87 | srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; |
88 | if (vcpu->arch.mmu.mfsrin) { | 88 | if (vcpu->arch.mmu.mfsrin) { |
89 | u32 sr; | 89 | u32 sr; |
90 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 90 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
91 | vcpu->arch.gpr[get_rt(inst)] = sr; | 91 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); |
92 | } | 92 | } |
93 | break; | 93 | break; |
94 | } | 94 | } |
95 | case OP_31_XOP_MTSRIN: | 95 | case OP_31_XOP_MTSRIN: |
96 | vcpu->arch.mmu.mtsrin(vcpu, | 96 | vcpu->arch.mmu.mtsrin(vcpu, |
97 | (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf, | 97 | (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, |
98 | vcpu->arch.gpr[get_rs(inst)]); | 98 | kvmppc_get_gpr(vcpu, get_rs(inst))); |
99 | break; | 99 | break; |
100 | case OP_31_XOP_TLBIE: | 100 | case OP_31_XOP_TLBIE: |
101 | case OP_31_XOP_TLBIEL: | 101 | case OP_31_XOP_TLBIEL: |
102 | { | 102 | { |
103 | bool large = (inst & 0x00200000) ? true : false; | 103 | bool large = (inst & 0x00200000) ? true : false; |
104 | ulong addr = vcpu->arch.gpr[get_rb(inst)]; | 104 | ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); |
105 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | 105 | vcpu->arch.mmu.tlbie(vcpu, addr, large); |
106 | break; | 106 | break; |
107 | } | 107 | } |
@@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
111 | if (!vcpu->arch.mmu.slbmte) | 111 | if (!vcpu->arch.mmu.slbmte) |
112 | return EMULATE_FAIL; | 112 | return EMULATE_FAIL; |
113 | 113 | ||
114 | vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)], | 114 | vcpu->arch.mmu.slbmte(vcpu, |
115 | vcpu->arch.gpr[get_rb(inst)]); | 115 | kvmppc_get_gpr(vcpu, get_rs(inst)), |
116 | kvmppc_get_gpr(vcpu, get_rb(inst))); | ||
116 | break; | 117 | break; |
117 | case OP_31_XOP_SLBIE: | 118 | case OP_31_XOP_SLBIE: |
118 | if (!vcpu->arch.mmu.slbie) | 119 | if (!vcpu->arch.mmu.slbie) |
119 | return EMULATE_FAIL; | 120 | return EMULATE_FAIL; |
120 | 121 | ||
121 | vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]); | 122 | vcpu->arch.mmu.slbie(vcpu, |
123 | kvmppc_get_gpr(vcpu, get_rb(inst))); | ||
122 | break; | 124 | break; |
123 | case OP_31_XOP_SLBIA: | 125 | case OP_31_XOP_SLBIA: |
124 | if (!vcpu->arch.mmu.slbia) | 126 | if (!vcpu->arch.mmu.slbia) |
@@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
132 | } else { | 134 | } else { |
133 | ulong t, rb; | 135 | ulong t, rb; |
134 | 136 | ||
135 | rb = vcpu->arch.gpr[get_rb(inst)]; | 137 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); |
136 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); | 138 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); |
137 | vcpu->arch.gpr[get_rt(inst)] = t; | 139 | kvmppc_set_gpr(vcpu, get_rt(inst), t); |
138 | } | 140 | } |
139 | break; | 141 | break; |
140 | case OP_31_XOP_SLBMFEV: | 142 | case OP_31_XOP_SLBMFEV: |
@@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
143 | } else { | 145 | } else { |
144 | ulong t, rb; | 146 | ulong t, rb; |
145 | 147 | ||
146 | rb = vcpu->arch.gpr[get_rb(inst)]; | 148 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); |
147 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); | 149 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); |
148 | vcpu->arch.gpr[get_rt(inst)] = t; | 150 | kvmppc_set_gpr(vcpu, get_rt(inst), t); |
149 | } | 151 | } |
150 | break; | 152 | break; |
151 | case OP_31_XOP_DCBZ: | 153 | case OP_31_XOP_DCBZ: |
152 | { | 154 | { |
153 | ulong rb = vcpu->arch.gpr[get_rb(inst)]; | 155 | ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); |
154 | ulong ra = 0; | 156 | ulong ra = 0; |
155 | ulong addr; | 157 | ulong addr; |
156 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; | 158 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; |
157 | 159 | ||
158 | if (get_ra(inst)) | 160 | if (get_ra(inst)) |
159 | ra = vcpu->arch.gpr[get_ra(inst)]; | 161 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); |
160 | 162 | ||
161 | addr = (ra + rb) & ~31ULL; | 163 | addr = (ra + rb) & ~31ULL; |
162 | if (!(vcpu->arch.msr & MSR_SF)) | 164 | if (!(vcpu->arch.msr & MSR_SF)) |
@@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) | |||
233 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 235 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
234 | { | 236 | { |
235 | int emulated = EMULATE_DONE; | 237 | int emulated = EMULATE_DONE; |
238 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
236 | 239 | ||
237 | switch (sprn) { | 240 | switch (sprn) { |
238 | case SPRN_SDR1: | 241 | case SPRN_SDR1: |
239 | to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs]; | 242 | to_book3s(vcpu)->sdr1 = spr_val; |
240 | break; | 243 | break; |
241 | case SPRN_DSISR: | 244 | case SPRN_DSISR: |
242 | to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs]; | 245 | to_book3s(vcpu)->dsisr = spr_val; |
243 | break; | 246 | break; |
244 | case SPRN_DAR: | 247 | case SPRN_DAR: |
245 | vcpu->arch.dear = vcpu->arch.gpr[rs]; | 248 | vcpu->arch.dear = spr_val; |
246 | break; | 249 | break; |
247 | case SPRN_HIOR: | 250 | case SPRN_HIOR: |
248 | to_book3s(vcpu)->hior = vcpu->arch.gpr[rs]; | 251 | to_book3s(vcpu)->hior = spr_val; |
249 | break; | 252 | break; |
250 | case SPRN_IBAT0U ... SPRN_IBAT3L: | 253 | case SPRN_IBAT0U ... SPRN_IBAT3L: |
251 | case SPRN_IBAT4U ... SPRN_IBAT7L: | 254 | case SPRN_IBAT4U ... SPRN_IBAT7L: |
252 | case SPRN_DBAT0U ... SPRN_DBAT3L: | 255 | case SPRN_DBAT0U ... SPRN_DBAT3L: |
253 | case SPRN_DBAT4U ... SPRN_DBAT7L: | 256 | case SPRN_DBAT4U ... SPRN_DBAT7L: |
254 | kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); | 257 | kvmppc_write_bat(vcpu, sprn, (u32)spr_val); |
255 | /* BAT writes happen so rarely that we're ok to flush | 258 | /* BAT writes happen so rarely that we're ok to flush |
256 | * everything here */ | 259 | * everything here */ |
257 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 260 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
258 | break; | 261 | break; |
259 | case SPRN_HID0: | 262 | case SPRN_HID0: |
260 | to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs]; | 263 | to_book3s(vcpu)->hid[0] = spr_val; |
261 | break; | 264 | break; |
262 | case SPRN_HID1: | 265 | case SPRN_HID1: |
263 | to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs]; | 266 | to_book3s(vcpu)->hid[1] = spr_val; |
264 | break; | 267 | break; |
265 | case SPRN_HID2: | 268 | case SPRN_HID2: |
266 | to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs]; | 269 | to_book3s(vcpu)->hid[2] = spr_val; |
267 | break; | 270 | break; |
268 | case SPRN_HID4: | 271 | case SPRN_HID4: |
269 | to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs]; | 272 | to_book3s(vcpu)->hid[4] = spr_val; |
270 | break; | 273 | break; |
271 | case SPRN_HID5: | 274 | case SPRN_HID5: |
272 | to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs]; | 275 | to_book3s(vcpu)->hid[5] = spr_val; |
273 | /* guest HID5 set can change is_dcbz32 */ | 276 | /* guest HID5 set can change is_dcbz32 */ |
274 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 277 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
275 | (mfmsr() & MSR_HV)) | 278 | (mfmsr() & MSR_HV)) |
@@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
299 | 302 | ||
300 | switch (sprn) { | 303 | switch (sprn) { |
301 | case SPRN_SDR1: | 304 | case SPRN_SDR1: |
302 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1; | 305 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); |
303 | break; | 306 | break; |
304 | case SPRN_DSISR: | 307 | case SPRN_DSISR: |
305 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr; | 308 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); |
306 | break; | 309 | break; |
307 | case SPRN_DAR: | 310 | case SPRN_DAR: |
308 | vcpu->arch.gpr[rt] = vcpu->arch.dear; | 311 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); |
309 | break; | 312 | break; |
310 | case SPRN_HIOR: | 313 | case SPRN_HIOR: |
311 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior; | 314 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); |
312 | break; | 315 | break; |
313 | case SPRN_HID0: | 316 | case SPRN_HID0: |
314 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0]; | 317 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); |
315 | break; | 318 | break; |
316 | case SPRN_HID1: | 319 | case SPRN_HID1: |
317 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1]; | 320 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); |
318 | break; | 321 | break; |
319 | case SPRN_HID2: | 322 | case SPRN_HID2: |
320 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2]; | 323 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); |
321 | break; | 324 | break; |
322 | case SPRN_HID4: | 325 | case SPRN_HID4: |
323 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4]; | 326 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); |
324 | break; | 327 | break; |
325 | case SPRN_HID5: | 328 | case SPRN_HID5: |
326 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5]; | 329 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); |
327 | break; | 330 | break; |
328 | case SPRN_THRM1: | 331 | case SPRN_THRM1: |
329 | case SPRN_THRM2: | 332 | case SPRN_THRM2: |
330 | case SPRN_THRM3: | 333 | case SPRN_THRM3: |
331 | case SPRN_CTRLF: | 334 | case SPRN_CTRLF: |
332 | case SPRN_CTRLT: | 335 | case SPRN_CTRLT: |
333 | vcpu->arch.gpr[rt] = 0; | 336 | kvmppc_set_gpr(vcpu, rt, 0); |
334 | break; | 337 | break; |
335 | default: | 338 | default: |
336 | printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); | 339 | printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); |
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c index 5b2db38ed86c..1dd5a1ddfd0d 100644 --- a/arch/powerpc/kvm/book3s_64_exports.c +++ b/arch/powerpc/kvm/book3s_64_exports.c | |||
@@ -22,3 +22,11 @@ | |||
22 | 22 | ||
23 | EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); | 23 | EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); |
24 | EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); | 24 | EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); |
25 | EXPORT_SYMBOL_GPL(kvmppc_rmcall); | ||
26 | EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); | ||
27 | #ifdef CONFIG_ALTIVEC | ||
28 | EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); | ||
29 | #endif | ||
30 | #ifdef CONFIG_VSX | ||
31 | EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); | ||
32 | #endif | ||
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 7b55d8094c8b..c1584d0cbce8 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S | |||
@@ -28,11 +28,6 @@ | |||
28 | #define ULONG_SIZE 8 | 28 | #define ULONG_SIZE 8 |
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | 29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) |
30 | 30 | ||
31 | .macro mfpaca tmp_reg, src_reg, offset, vcpu_reg | ||
32 | ld \tmp_reg, (PACA_EXMC+\offset)(r13) | ||
33 | std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg) | ||
34 | .endm | ||
35 | |||
36 | .macro DISABLE_INTERRUPTS | 31 | .macro DISABLE_INTERRUPTS |
37 | mfmsr r0 | 32 | mfmsr r0 |
38 | rldicl r0,r0,48,1 | 33 | rldicl r0,r0,48,1 |
@@ -40,6 +35,26 @@ | |||
40 | mtmsrd r0,1 | 35 | mtmsrd r0,1 |
41 | .endm | 36 | .endm |
42 | 37 | ||
38 | #define VCPU_LOAD_NVGPRS(vcpu) \ | ||
39 | ld r14, VCPU_GPR(r14)(vcpu); \ | ||
40 | ld r15, VCPU_GPR(r15)(vcpu); \ | ||
41 | ld r16, VCPU_GPR(r16)(vcpu); \ | ||
42 | ld r17, VCPU_GPR(r17)(vcpu); \ | ||
43 | ld r18, VCPU_GPR(r18)(vcpu); \ | ||
44 | ld r19, VCPU_GPR(r19)(vcpu); \ | ||
45 | ld r20, VCPU_GPR(r20)(vcpu); \ | ||
46 | ld r21, VCPU_GPR(r21)(vcpu); \ | ||
47 | ld r22, VCPU_GPR(r22)(vcpu); \ | ||
48 | ld r23, VCPU_GPR(r23)(vcpu); \ | ||
49 | ld r24, VCPU_GPR(r24)(vcpu); \ | ||
50 | ld r25, VCPU_GPR(r25)(vcpu); \ | ||
51 | ld r26, VCPU_GPR(r26)(vcpu); \ | ||
52 | ld r27, VCPU_GPR(r27)(vcpu); \ | ||
53 | ld r28, VCPU_GPR(r28)(vcpu); \ | ||
54 | ld r29, VCPU_GPR(r29)(vcpu); \ | ||
55 | ld r30, VCPU_GPR(r30)(vcpu); \ | ||
56 | ld r31, VCPU_GPR(r31)(vcpu); \ | ||
57 | |||
43 | /***************************************************************************** | 58 | /***************************************************************************** |
44 | * * | 59 | * * |
45 | * Guest entry / exit code that is in kernel module memory (highmem) * | 60 | * Guest entry / exit code that is in kernel module memory (highmem) * |
@@ -67,61 +82,32 @@ kvm_start_entry: | |||
67 | SAVE_NVGPRS(r1) | 82 | SAVE_NVGPRS(r1) |
68 | 83 | ||
69 | /* Save LR */ | 84 | /* Save LR */ |
70 | mflr r14 | 85 | std r0, _LINK(r1) |
71 | std r14, _LINK(r1) | ||
72 | |||
73 | /* XXX optimize non-volatile loading away */ | ||
74 | kvm_start_lightweight: | ||
75 | 86 | ||
76 | DISABLE_INTERRUPTS | 87 | /* Load non-volatile guest state from the vcpu */ |
88 | VCPU_LOAD_NVGPRS(r4) | ||
77 | 89 | ||
78 | /* Save R1/R2 in the PACA */ | 90 | /* Save R1/R2 in the PACA */ |
79 | std r1, PACAR1(r13) | 91 | std r1, PACA_KVM_HOST_R1(r13) |
80 | std r2, (PACA_EXMC+EX_SRR0)(r13) | 92 | std r2, PACA_KVM_HOST_R2(r13) |
93 | |||
94 | /* XXX swap in/out on load? */ | ||
81 | ld r3, VCPU_HIGHMEM_HANDLER(r4) | 95 | ld r3, VCPU_HIGHMEM_HANDLER(r4) |
82 | std r3, PACASAVEDMSR(r13) | 96 | std r3, PACA_KVM_VMHANDLER(r13) |
83 | 97 | ||
84 | /* Load non-volatile guest state from the vcpu */ | 98 | kvm_start_lightweight: |
85 | ld r14, VCPU_GPR(r14)(r4) | ||
86 | ld r15, VCPU_GPR(r15)(r4) | ||
87 | ld r16, VCPU_GPR(r16)(r4) | ||
88 | ld r17, VCPU_GPR(r17)(r4) | ||
89 | ld r18, VCPU_GPR(r18)(r4) | ||
90 | ld r19, VCPU_GPR(r19)(r4) | ||
91 | ld r20, VCPU_GPR(r20)(r4) | ||
92 | ld r21, VCPU_GPR(r21)(r4) | ||
93 | ld r22, VCPU_GPR(r22)(r4) | ||
94 | ld r23, VCPU_GPR(r23)(r4) | ||
95 | ld r24, VCPU_GPR(r24)(r4) | ||
96 | ld r25, VCPU_GPR(r25)(r4) | ||
97 | ld r26, VCPU_GPR(r26)(r4) | ||
98 | ld r27, VCPU_GPR(r27)(r4) | ||
99 | ld r28, VCPU_GPR(r28)(r4) | ||
100 | ld r29, VCPU_GPR(r29)(r4) | ||
101 | ld r30, VCPU_GPR(r30)(r4) | ||
102 | ld r31, VCPU_GPR(r31)(r4) | ||
103 | 99 | ||
104 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | 100 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ |
105 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | 101 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ |
106 | 102 | ||
107 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) | 103 | /* Load some guest state in the respective registers */ |
108 | mtsrr0 r3 | 104 | ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ |
109 | 105 | /* will be swapped in by rmcall */ | |
110 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | ||
111 | mtsrr1 r3 | ||
112 | |||
113 | /* Load guest state in the respective registers */ | ||
114 | lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */ | ||
115 | stw r3, (PACA_EXMC + EX_CCR)(r13) | ||
116 | |||
117 | ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ | ||
118 | mtctr r3 /* CTR = r3 */ | ||
119 | 106 | ||
120 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ | 107 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ |
121 | mtlr r3 /* LR = r3 */ | 108 | mtlr r3 /* LR = r3 */ |
122 | 109 | ||
123 | ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */ | 110 | DISABLE_INTERRUPTS |
124 | std r3, (PACA_EXMC + EX_R3)(r13) | ||
125 | 111 | ||
126 | /* Some guests may need to have dcbz set to 32 byte length. | 112 | /* Some guests may need to have dcbz set to 32 byte length. |
127 | * | 113 | * |
@@ -141,36 +127,15 @@ kvm_start_lightweight: | |||
141 | mtspr SPRN_HID5,r3 | 127 | mtspr SPRN_HID5,r3 |
142 | 128 | ||
143 | no_dcbz32_on: | 129 | no_dcbz32_on: |
144 | /* Load guest GPRs */ | 130 | |
145 | 131 | ld r6, VCPU_RMCALL(r4) | |
146 | ld r3, VCPU_GPR(r9)(r4) | 132 | mtctr r6 |
147 | std r3, (PACA_EXMC + EX_R9)(r13) | 133 | |
148 | ld r3, VCPU_GPR(r10)(r4) | 134 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) |
149 | std r3, (PACA_EXMC + EX_R10)(r13) | 135 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) |
150 | ld r3, VCPU_GPR(r11)(r4) | ||
151 | std r3, (PACA_EXMC + EX_R11)(r13) | ||
152 | ld r3, VCPU_GPR(r12)(r4) | ||
153 | std r3, (PACA_EXMC + EX_R12)(r13) | ||
154 | ld r3, VCPU_GPR(r13)(r4) | ||
155 | std r3, (PACA_EXMC + EX_R13)(r13) | ||
156 | |||
157 | ld r0, VCPU_GPR(r0)(r4) | ||
158 | ld r1, VCPU_GPR(r1)(r4) | ||
159 | ld r2, VCPU_GPR(r2)(r4) | ||
160 | ld r3, VCPU_GPR(r3)(r4) | ||
161 | ld r5, VCPU_GPR(r5)(r4) | ||
162 | ld r6, VCPU_GPR(r6)(r4) | ||
163 | ld r7, VCPU_GPR(r7)(r4) | ||
164 | ld r8, VCPU_GPR(r8)(r4) | ||
165 | ld r4, VCPU_GPR(r4)(r4) | ||
166 | |||
167 | /* This sets the Magic value for the trampoline */ | ||
168 | |||
169 | li r11, 1 | ||
170 | stb r11, PACA_KVM_IN_GUEST(r13) | ||
171 | 136 | ||
172 | /* Jump to SLB patching handlder and into our guest */ | 137 | /* Jump to SLB patching handlder and into our guest */ |
173 | RFI | 138 | bctr |
174 | 139 | ||
175 | /* | 140 | /* |
176 | * This is the handler in module memory. It gets jumped at from the | 141 | * This is the handler in module memory. It gets jumped at from the |
@@ -184,125 +149,70 @@ kvmppc_handler_highmem: | |||
184 | /* | 149 | /* |
185 | * Register usage at this point: | 150 | * Register usage at this point: |
186 | * | 151 | * |
187 | * R00 = guest R13 | 152 | * R0 = guest last inst |
188 | * R01 = host R1 | 153 | * R1 = host R1 |
189 | * R02 = host R2 | 154 | * R2 = host R2 |
190 | * R10 = guest PC | 155 | * R3 = guest PC |
191 | * R11 = guest MSR | 156 | * R4 = guest MSR |
192 | * R12 = exit handler id | 157 | * R5 = guest DAR |
193 | * R13 = PACA | 158 | * R6 = guest DSISR |
194 | * PACA.exmc.R9 = guest R1 | 159 | * R13 = PACA |
195 | * PACA.exmc.R10 = guest R10 | 160 | * PACA.KVM.* = guest * |
196 | * PACA.exmc.R11 = guest R11 | ||
197 | * PACA.exmc.R12 = guest R12 | ||
198 | * PACA.exmc.R13 = guest R2 | ||
199 | * PACA.exmc.DAR = guest DAR | ||
200 | * PACA.exmc.DSISR = guest DSISR | ||
201 | * PACA.exmc.LR = guest instruction | ||
202 | * PACA.exmc.CCR = guest CR | ||
203 | * PACA.exmc.SRR0 = guest R0 | ||
204 | * | 161 | * |
205 | */ | 162 | */ |
206 | 163 | ||
207 | std r3, (PACA_EXMC+EX_R3)(r13) | 164 | /* R7 = vcpu */ |
165 | ld r7, GPR4(r1) | ||
208 | 166 | ||
209 | /* save the exit id in R3 */ | 167 | /* Now save the guest state */ |
210 | mr r3, r12 | ||
211 | 168 | ||
212 | /* R12 = vcpu */ | 169 | stw r0, VCPU_LAST_INST(r7) |
213 | ld r12, GPR4(r1) | ||
214 | 170 | ||
215 | /* Now save the guest state */ | 171 | std r3, VCPU_PC(r7) |
172 | std r4, VCPU_SHADOW_SRR1(r7) | ||
173 | std r5, VCPU_FAULT_DEAR(r7) | ||
174 | std r6, VCPU_FAULT_DSISR(r7) | ||
216 | 175 | ||
217 | std r0, VCPU_GPR(r13)(r12) | 176 | ld r5, VCPU_HFLAGS(r7) |
218 | std r4, VCPU_GPR(r4)(r12) | ||
219 | std r5, VCPU_GPR(r5)(r12) | ||
220 | std r6, VCPU_GPR(r6)(r12) | ||
221 | std r7, VCPU_GPR(r7)(r12) | ||
222 | std r8, VCPU_GPR(r8)(r12) | ||
223 | std r9, VCPU_GPR(r9)(r12) | ||
224 | |||
225 | /* get registers from PACA */ | ||
226 | mfpaca r5, r0, EX_SRR0, r12 | ||
227 | mfpaca r5, r3, EX_R3, r12 | ||
228 | mfpaca r5, r1, EX_R9, r12 | ||
229 | mfpaca r5, r10, EX_R10, r12 | ||
230 | mfpaca r5, r11, EX_R11, r12 | ||
231 | mfpaca r5, r12, EX_R12, r12 | ||
232 | mfpaca r5, r2, EX_R13, r12 | ||
233 | |||
234 | lwz r5, (PACA_EXMC+EX_LR)(r13) | ||
235 | stw r5, VCPU_LAST_INST(r12) | ||
236 | |||
237 | lwz r5, (PACA_EXMC+EX_CCR)(r13) | ||
238 | stw r5, VCPU_CR(r12) | ||
239 | |||
240 | ld r5, VCPU_HFLAGS(r12) | ||
241 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ | 177 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ |
242 | beq no_dcbz32_off | 178 | beq no_dcbz32_off |
243 | 179 | ||
180 | li r4, 0 | ||
244 | mfspr r5,SPRN_HID5 | 181 | mfspr r5,SPRN_HID5 |
245 | rldimi r5,r5,6,56 | 182 | rldimi r5,r4,6,56 |
246 | mtspr SPRN_HID5,r5 | 183 | mtspr SPRN_HID5,r5 |
247 | 184 | ||
248 | no_dcbz32_off: | 185 | no_dcbz32_off: |
249 | 186 | ||
250 | /* XXX maybe skip on lightweight? */ | 187 | std r14, VCPU_GPR(r14)(r7) |
251 | std r14, VCPU_GPR(r14)(r12) | 188 | std r15, VCPU_GPR(r15)(r7) |
252 | std r15, VCPU_GPR(r15)(r12) | 189 | std r16, VCPU_GPR(r16)(r7) |
253 | std r16, VCPU_GPR(r16)(r12) | 190 | std r17, VCPU_GPR(r17)(r7) |
254 | std r17, VCPU_GPR(r17)(r12) | 191 | std r18, VCPU_GPR(r18)(r7) |
255 | std r18, VCPU_GPR(r18)(r12) | 192 | std r19, VCPU_GPR(r19)(r7) |
256 | std r19, VCPU_GPR(r19)(r12) | 193 | std r20, VCPU_GPR(r20)(r7) |
257 | std r20, VCPU_GPR(r20)(r12) | 194 | std r21, VCPU_GPR(r21)(r7) |
258 | std r21, VCPU_GPR(r21)(r12) | 195 | std r22, VCPU_GPR(r22)(r7) |
259 | std r22, VCPU_GPR(r22)(r12) | 196 | std r23, VCPU_GPR(r23)(r7) |
260 | std r23, VCPU_GPR(r23)(r12) | 197 | std r24, VCPU_GPR(r24)(r7) |
261 | std r24, VCPU_GPR(r24)(r12) | 198 | std r25, VCPU_GPR(r25)(r7) |
262 | std r25, VCPU_GPR(r25)(r12) | 199 | std r26, VCPU_GPR(r26)(r7) |
263 | std r26, VCPU_GPR(r26)(r12) | 200 | std r27, VCPU_GPR(r27)(r7) |
264 | std r27, VCPU_GPR(r27)(r12) | 201 | std r28, VCPU_GPR(r28)(r7) |
265 | std r28, VCPU_GPR(r28)(r12) | 202 | std r29, VCPU_GPR(r29)(r7) |
266 | std r29, VCPU_GPR(r29)(r12) | 203 | std r30, VCPU_GPR(r30)(r7) |
267 | std r30, VCPU_GPR(r30)(r12) | 204 | std r31, VCPU_GPR(r31)(r7) |
268 | std r31, VCPU_GPR(r31)(r12) | 205 | |
269 | 206 | /* Save guest CTR */ | |
270 | /* Restore non-volatile host registers (r14 - r31) */ | ||
271 | REST_NVGPRS(r1) | ||
272 | |||
273 | /* Save guest PC (R10) */ | ||
274 | std r10, VCPU_PC(r12) | ||
275 | |||
276 | /* Save guest msr (R11) */ | ||
277 | std r11, VCPU_SHADOW_MSR(r12) | ||
278 | |||
279 | /* Save guest CTR (in R12) */ | ||
280 | mfctr r5 | 207 | mfctr r5 |
281 | std r5, VCPU_CTR(r12) | 208 | std r5, VCPU_CTR(r7) |
282 | 209 | ||
283 | /* Save guest LR */ | 210 | /* Save guest LR */ |
284 | mflr r5 | 211 | mflr r5 |
285 | std r5, VCPU_LR(r12) | 212 | std r5, VCPU_LR(r7) |
286 | |||
287 | /* Save guest XER */ | ||
288 | mfxer r5 | ||
289 | std r5, VCPU_XER(r12) | ||
290 | |||
291 | /* Save guest DAR */ | ||
292 | ld r5, (PACA_EXMC+EX_DAR)(r13) | ||
293 | std r5, VCPU_FAULT_DEAR(r12) | ||
294 | |||
295 | /* Save guest DSISR */ | ||
296 | lwz r5, (PACA_EXMC+EX_DSISR)(r13) | ||
297 | std r5, VCPU_FAULT_DSISR(r12) | ||
298 | 213 | ||
299 | /* Restore host msr -> SRR1 */ | 214 | /* Restore host msr -> SRR1 */ |
300 | ld r7, VCPU_HOST_MSR(r12) | 215 | ld r6, VCPU_HOST_MSR(r7) |
301 | mtsrr1 r7 | ||
302 | |||
303 | /* Restore host IP -> SRR0 */ | ||
304 | ld r6, VCPU_HOST_RETIP(r12) | ||
305 | mtsrr0 r6 | ||
306 | 216 | ||
307 | /* | 217 | /* |
308 | * For some interrupts, we need to call the real Linux | 218 | * For some interrupts, we need to call the real Linux |
@@ -314,13 +224,14 @@ no_dcbz32_off: | |||
314 | * r3 = address of interrupt handler (exit reason) | 224 | * r3 = address of interrupt handler (exit reason) |
315 | */ | 225 | */ |
316 | 226 | ||
317 | cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL | 227 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
318 | beq call_linux_handler | 228 | beq call_linux_handler |
319 | cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER | 229 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER |
320 | beq call_linux_handler | 230 | beq call_linux_handler |
321 | 231 | ||
322 | /* Back to Interruptable Mode! (goto kvm_return_point) */ | 232 | /* Back to EE=1 */ |
323 | RFI | 233 | mtmsr r6 |
234 | b kvm_return_point | ||
324 | 235 | ||
325 | call_linux_handler: | 236 | call_linux_handler: |
326 | 237 | ||
@@ -333,16 +244,22 @@ call_linux_handler: | |||
333 | * interrupt handler! | 244 | * interrupt handler! |
334 | * | 245 | * |
335 | * R3 still contains the exit code, | 246 | * R3 still contains the exit code, |
336 | * R6 VCPU_HOST_RETIP and | 247 | * R5 VCPU_HOST_RETIP and |
337 | * R7 VCPU_HOST_MSR | 248 | * R6 VCPU_HOST_MSR |
338 | */ | 249 | */ |
339 | 250 | ||
340 | mtlr r3 | 251 | /* Restore host IP -> SRR0 */ |
252 | ld r5, VCPU_HOST_RETIP(r7) | ||
253 | |||
254 | /* XXX Better move to a safe function? | ||
255 | * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ | ||
341 | 256 | ||
342 | ld r5, VCPU_TRAMPOLINE_LOWMEM(r12) | 257 | mtlr r12 |
343 | mtsrr0 r5 | 258 | |
344 | LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | 259 | ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) |
345 | mtsrr1 r5 | 260 | mtsrr0 r4 |
261 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | ||
262 | mtsrr1 r3 | ||
346 | 263 | ||
347 | RFI | 264 | RFI |
348 | 265 | ||
@@ -351,42 +268,51 @@ kvm_return_point: | |||
351 | 268 | ||
352 | /* Jump back to lightweight entry if we're supposed to */ | 269 | /* Jump back to lightweight entry if we're supposed to */ |
353 | /* go back into the guest */ | 270 | /* go back into the guest */ |
354 | mr r5, r3 | 271 | |
272 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | ||
273 | mr r5, r12 | ||
274 | |||
355 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 275 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
356 | REST_2GPRS(3, r1) | 276 | REST_2GPRS(3, r1) |
357 | bl KVMPPC_HANDLE_EXIT | 277 | bl KVMPPC_HANDLE_EXIT |
358 | 278 | ||
359 | #if 0 /* XXX get lightweight exits back */ | 279 | /* If RESUME_GUEST, get back in the loop */ |
360 | cmpwi r3, RESUME_GUEST | 280 | cmpwi r3, RESUME_GUEST |
361 | bne kvm_exit_heavyweight | 281 | beq kvm_loop_lightweight |
362 | 282 | ||
363 | /* put VCPU and KVM_RUN back into place and roll again! */ | 283 | cmpwi r3, RESUME_GUEST_NV |
364 | REST_2GPRS(3, r1) | 284 | beq kvm_loop_heavyweight |
365 | b kvm_start_lightweight | ||
366 | 285 | ||
367 | kvm_exit_heavyweight: | 286 | kvm_exit_loop: |
368 | /* Restore non-volatile host registers */ | ||
369 | ld r14, _LINK(r1) | ||
370 | mtlr r14 | ||
371 | REST_NVGPRS(r1) | ||
372 | 287 | ||
373 | addi r1, r1, SWITCH_FRAME_SIZE | ||
374 | #else | ||
375 | ld r4, _LINK(r1) | 288 | ld r4, _LINK(r1) |
376 | mtlr r4 | 289 | mtlr r4 |
377 | 290 | ||
378 | cmpwi r3, RESUME_GUEST | 291 | /* Restore non-volatile host registers (r14 - r31) */ |
379 | bne kvm_exit_heavyweight | 292 | REST_NVGPRS(r1) |
293 | |||
294 | addi r1, r1, SWITCH_FRAME_SIZE | ||
295 | blr | ||
296 | |||
297 | kvm_loop_heavyweight: | ||
298 | |||
299 | ld r4, _LINK(r1) | ||
300 | std r4, (16 + SWITCH_FRAME_SIZE)(r1) | ||
380 | 301 | ||
302 | /* Load vcpu and cpu_run */ | ||
381 | REST_2GPRS(3, r1) | 303 | REST_2GPRS(3, r1) |
382 | 304 | ||
383 | addi r1, r1, SWITCH_FRAME_SIZE | 305 | /* Load non-volatile guest state from the vcpu */ |
306 | VCPU_LOAD_NVGPRS(r4) | ||
384 | 307 | ||
385 | b kvm_start_entry | 308 | /* Jump back into the beginning of this function */ |
309 | b kvm_start_lightweight | ||
386 | 310 | ||
387 | kvm_exit_heavyweight: | 311 | kvm_loop_lightweight: |
388 | 312 | ||
389 | addi r1, r1, SWITCH_FRAME_SIZE | 313 | /* We'll need the vcpu pointer */ |
390 | #endif | 314 | REST_GPR(4, r1) |
315 | |||
316 | /* Jump back into the beginning of this function */ | ||
317 | b kvm_start_lightweight | ||
391 | 318 | ||
392 | blr | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index e4beeb371a73..512dcff77554 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -54,7 +54,7 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | |||
54 | if (!vcpu_book3s->slb[i].valid) | 54 | if (!vcpu_book3s->slb[i].valid) |
55 | continue; | 55 | continue; |
56 | 56 | ||
57 | if (vcpu_book3s->slb[i].large) | 57 | if (vcpu_book3s->slb[i].tb) |
58 | cmp_esid = esid_1t; | 58 | cmp_esid = esid_1t; |
59 | 59 | ||
60 | if (vcpu_book3s->slb[i].esid == cmp_esid) | 60 | if (vcpu_book3s->slb[i].esid == cmp_esid) |
@@ -65,9 +65,10 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | |||
65 | eaddr, esid, esid_1t); | 65 | eaddr, esid, esid_1t); |
66 | for (i = 0; i < vcpu_book3s->slb_nr; i++) { | 66 | for (i = 0; i < vcpu_book3s->slb_nr; i++) { |
67 | if (vcpu_book3s->slb[i].vsid) | 67 | if (vcpu_book3s->slb[i].vsid) |
68 | dprintk(" %d: %c%c %llx %llx\n", i, | 68 | dprintk(" %d: %c%c%c %llx %llx\n", i, |
69 | vcpu_book3s->slb[i].valid ? 'v' : ' ', | 69 | vcpu_book3s->slb[i].valid ? 'v' : ' ', |
70 | vcpu_book3s->slb[i].large ? 'l' : ' ', | 70 | vcpu_book3s->slb[i].large ? 'l' : ' ', |
71 | vcpu_book3s->slb[i].tb ? 't' : ' ', | ||
71 | vcpu_book3s->slb[i].esid, | 72 | vcpu_book3s->slb[i].esid, |
72 | vcpu_book3s->slb[i].vsid); | 73 | vcpu_book3s->slb[i].vsid); |
73 | } | 74 | } |
@@ -84,7 +85,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
84 | if (!slb) | 85 | if (!slb) |
85 | return 0; | 86 | return 0; |
86 | 87 | ||
87 | if (slb->large) | 88 | if (slb->tb) |
88 | return (((u64)eaddr >> 12) & 0xfffffff) | | 89 | return (((u64)eaddr >> 12) & 0xfffffff) | |
89 | (((u64)slb->vsid) << 28); | 90 | (((u64)slb->vsid) << 28); |
90 | 91 | ||
@@ -309,7 +310,8 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) | |||
309 | slbe = &vcpu_book3s->slb[slb_nr]; | 310 | slbe = &vcpu_book3s->slb[slb_nr]; |
310 | 311 | ||
311 | slbe->large = (rs & SLB_VSID_L) ? 1 : 0; | 312 | slbe->large = (rs & SLB_VSID_L) ? 1 : 0; |
312 | slbe->esid = slbe->large ? esid_1t : esid; | 313 | slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; |
314 | slbe->esid = slbe->tb ? esid_1t : esid; | ||
313 | slbe->vsid = rs >> 12; | 315 | slbe->vsid = rs >> 12; |
314 | slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; | 316 | slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; |
315 | slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; | 317 | slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; |
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S index fb7dd2e9ac88..c83c60ad96c5 100644 --- a/arch/powerpc/kvm/book3s_64_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S | |||
@@ -45,36 +45,25 @@ kvmppc_trampoline_\intno: | |||
45 | * To distinguish, we check a magic byte in the PACA | 45 | * To distinguish, we check a magic byte in the PACA |
46 | */ | 46 | */ |
47 | mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ | 47 | mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ |
48 | std r12, (PACA_EXMC + EX_R12)(r13) | 48 | std r12, PACA_KVM_SCRATCH0(r13) |
49 | mfcr r12 | 49 | mfcr r12 |
50 | stw r12, (PACA_EXMC + EX_CCR)(r13) | 50 | stw r12, PACA_KVM_SCRATCH1(r13) |
51 | lbz r12, PACA_KVM_IN_GUEST(r13) | 51 | lbz r12, PACA_KVM_IN_GUEST(r13) |
52 | cmpwi r12, 0 | 52 | cmpwi r12, KVM_GUEST_MODE_NONE |
53 | bne ..kvmppc_handler_hasmagic_\intno | 53 | bne ..kvmppc_handler_hasmagic_\intno |
54 | /* No KVM guest? Then jump back to the Linux handler! */ | 54 | /* No KVM guest? Then jump back to the Linux handler! */ |
55 | lwz r12, (PACA_EXMC + EX_CCR)(r13) | 55 | lwz r12, PACA_KVM_SCRATCH1(r13) |
56 | mtcr r12 | 56 | mtcr r12 |
57 | ld r12, (PACA_EXMC + EX_R12)(r13) | 57 | ld r12, PACA_KVM_SCRATCH0(r13) |
58 | mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ | 58 | mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ |
59 | b kvmppc_resume_\intno /* Get back original handler */ | 59 | b kvmppc_resume_\intno /* Get back original handler */ |
60 | 60 | ||
61 | /* Now we know we're handling a KVM guest */ | 61 | /* Now we know we're handling a KVM guest */ |
62 | ..kvmppc_handler_hasmagic_\intno: | 62 | ..kvmppc_handler_hasmagic_\intno: |
63 | /* Unset guest state */ | ||
64 | li r12, 0 | ||
65 | stb r12, PACA_KVM_IN_GUEST(r13) | ||
66 | 63 | ||
67 | std r1, (PACA_EXMC+EX_R9)(r13) | 64 | /* Should we just skip the faulting instruction? */ |
68 | std r10, (PACA_EXMC+EX_R10)(r13) | 65 | cmpwi r12, KVM_GUEST_MODE_SKIP |
69 | std r11, (PACA_EXMC+EX_R11)(r13) | 66 | beq kvmppc_handler_skip_ins |
70 | std r2, (PACA_EXMC+EX_R13)(r13) | ||
71 | |||
72 | mfsrr0 r10 | ||
73 | mfsrr1 r11 | ||
74 | |||
75 | /* Restore R1/R2 so we can handle faults */ | ||
76 | ld r1, PACAR1(r13) | ||
77 | ld r2, (PACA_EXMC+EX_SRR0)(r13) | ||
78 | 67 | ||
79 | /* Let's store which interrupt we're handling */ | 68 | /* Let's store which interrupt we're handling */ |
80 | li r12, \intno | 69 | li r12, \intno |
@@ -102,23 +91,107 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC | |||
102 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX | 91 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX |
103 | 92 | ||
104 | /* | 93 | /* |
94 | * Bring us back to the faulting code, but skip the | ||
95 | * faulting instruction. | ||
96 | * | ||
97 | * This is a generic exit path from the interrupt | ||
98 | * trampolines above. | ||
99 | * | ||
100 | * Input Registers: | ||
101 | * | ||
102 | * R12 = free | ||
103 | * R13 = PACA | ||
104 | * PACA.KVM.SCRATCH0 = guest R12 | ||
105 | * PACA.KVM.SCRATCH1 = guest CR | ||
106 | * SPRG_SCRATCH0 = guest R13 | ||
107 | * | ||
108 | */ | ||
109 | kvmppc_handler_skip_ins: | ||
110 | |||
111 | /* Patch the IP to the next instruction */ | ||
112 | mfsrr0 r12 | ||
113 | addi r12, r12, 4 | ||
114 | mtsrr0 r12 | ||
115 | |||
116 | /* Clean up all state */ | ||
117 | lwz r12, PACA_KVM_SCRATCH1(r13) | ||
118 | mtcr r12 | ||
119 | ld r12, PACA_KVM_SCRATCH0(r13) | ||
120 | mfspr r13, SPRN_SPRG_SCRATCH0 | ||
121 | |||
122 | /* And get back into the code */ | ||
123 | RFI | ||
124 | |||
125 | /* | ||
105 | * This trampoline brings us back to a real mode handler | 126 | * This trampoline brings us back to a real mode handler |
106 | * | 127 | * |
107 | * Input Registers: | 128 | * Input Registers: |
108 | * | 129 | * |
109 | * R6 = SRR0 | 130 | * R5 = SRR0 |
110 | * R7 = SRR1 | 131 | * R6 = SRR1 |
111 | * LR = real-mode IP | 132 | * LR = real-mode IP |
112 | * | 133 | * |
113 | */ | 134 | */ |
114 | .global kvmppc_handler_lowmem_trampoline | 135 | .global kvmppc_handler_lowmem_trampoline |
115 | kvmppc_handler_lowmem_trampoline: | 136 | kvmppc_handler_lowmem_trampoline: |
116 | 137 | ||
117 | mtsrr0 r6 | 138 | mtsrr0 r5 |
118 | mtsrr1 r7 | 139 | mtsrr1 r6 |
119 | blr | 140 | blr |
120 | kvmppc_handler_lowmem_trampoline_end: | 141 | kvmppc_handler_lowmem_trampoline_end: |
121 | 142 | ||
143 | /* | ||
144 | * Call a function in real mode | ||
145 | * | ||
146 | * Input Registers: | ||
147 | * | ||
148 | * R3 = function | ||
149 | * R4 = MSR | ||
150 | * R5 = CTR | ||
151 | * | ||
152 | */ | ||
153 | _GLOBAL(kvmppc_rmcall) | ||
154 | mtmsr r4 /* Disable relocation, so mtsrr | ||
155 | doesn't get interrupted */ | ||
156 | mtctr r5 | ||
157 | mtsrr0 r3 | ||
158 | mtsrr1 r4 | ||
159 | RFI | ||
160 | |||
161 | /* | ||
162 | * Activate current's external feature (FPU/Altivec/VSX) | ||
163 | */ | ||
164 | #define define_load_up(what) \ | ||
165 | \ | ||
166 | _GLOBAL(kvmppc_load_up_ ## what); \ | ||
167 | subi r1, r1, INT_FRAME_SIZE; \ | ||
168 | mflr r3; \ | ||
169 | std r3, _LINK(r1); \ | ||
170 | mfmsr r4; \ | ||
171 | std r31, GPR3(r1); \ | ||
172 | mr r31, r4; \ | ||
173 | li r5, MSR_DR; \ | ||
174 | oris r5, r5, MSR_EE@h; \ | ||
175 | andc r4, r4, r5; \ | ||
176 | mtmsr r4; \ | ||
177 | \ | ||
178 | bl .load_up_ ## what; \ | ||
179 | \ | ||
180 | mtmsr r31; \ | ||
181 | ld r3, _LINK(r1); \ | ||
182 | ld r31, GPR3(r1); \ | ||
183 | addi r1, r1, INT_FRAME_SIZE; \ | ||
184 | mtlr r3; \ | ||
185 | blr | ||
186 | |||
187 | define_load_up(fpu) | ||
188 | #ifdef CONFIG_ALTIVEC | ||
189 | define_load_up(altivec) | ||
190 | #endif | ||
191 | #ifdef CONFIG_VSX | ||
192 | define_load_up(vsx) | ||
193 | #endif | ||
194 | |||
122 | .global kvmppc_trampoline_lowmem | 195 | .global kvmppc_trampoline_lowmem |
123 | kvmppc_trampoline_lowmem: | 196 | kvmppc_trampoline_lowmem: |
124 | .long kvmppc_handler_lowmem_trampoline - _stext | 197 | .long kvmppc_handler_lowmem_trampoline - _stext |
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index ecd237a03fd0..35b762722187 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -31,7 +31,7 @@ | |||
31 | #define REBOLT_SLB_ENTRY(num) \ | 31 | #define REBOLT_SLB_ENTRY(num) \ |
32 | ld r10, SHADOW_SLB_ESID(num)(r11); \ | 32 | ld r10, SHADOW_SLB_ESID(num)(r11); \ |
33 | cmpdi r10, 0; \ | 33 | cmpdi r10, 0; \ |
34 | beq slb_exit_skip_1; \ | 34 | beq slb_exit_skip_ ## num; \ |
35 | oris r10, r10, SLB_ESID_V@h; \ | 35 | oris r10, r10, SLB_ESID_V@h; \ |
36 | ld r9, SHADOW_SLB_VSID(num)(r11); \ | 36 | ld r9, SHADOW_SLB_VSID(num)(r11); \ |
37 | slbmte r9, r10; \ | 37 | slbmte r9, r10; \ |
@@ -51,23 +51,21 @@ kvmppc_handler_trampoline_enter: | |||
51 | * | 51 | * |
52 | * MSR = ~IR|DR | 52 | * MSR = ~IR|DR |
53 | * R13 = PACA | 53 | * R13 = PACA |
54 | * R1 = host R1 | ||
55 | * R2 = host R2 | ||
54 | * R9 = guest IP | 56 | * R9 = guest IP |
55 | * R10 = guest MSR | 57 | * R10 = guest MSR |
56 | * R11 = free | 58 | * all other GPRS = free |
57 | * R12 = free | 59 | * PACA[KVM_CR] = guest CR |
58 | * PACA[PACA_EXMC + EX_R9] = guest R9 | 60 | * PACA[KVM_XER] = guest XER |
59 | * PACA[PACA_EXMC + EX_R10] = guest R10 | ||
60 | * PACA[PACA_EXMC + EX_R11] = guest R11 | ||
61 | * PACA[PACA_EXMC + EX_R12] = guest R12 | ||
62 | * PACA[PACA_EXMC + EX_R13] = guest R13 | ||
63 | * PACA[PACA_EXMC + EX_CCR] = guest CR | ||
64 | * PACA[PACA_EXMC + EX_R3] = guest XER | ||
65 | */ | 61 | */ |
66 | 62 | ||
67 | mtsrr0 r9 | 63 | mtsrr0 r9 |
68 | mtsrr1 r10 | 64 | mtsrr1 r10 |
69 | 65 | ||
70 | mtspr SPRN_SPRG_SCRATCH0, r0 | 66 | /* Activate guest mode, so faults get handled by KVM */ |
67 | li r11, KVM_GUEST_MODE_GUEST | ||
68 | stb r11, PACA_KVM_IN_GUEST(r13) | ||
71 | 69 | ||
72 | /* Remove LPAR shadow entries */ | 70 | /* Remove LPAR shadow entries */ |
73 | 71 | ||
@@ -131,20 +129,27 @@ slb_do_enter: | |||
131 | 129 | ||
132 | /* Enter guest */ | 130 | /* Enter guest */ |
133 | 131 | ||
134 | mfspr r0, SPRN_SPRG_SCRATCH0 | 132 | ld r0, (PACA_KVM_R0)(r13) |
135 | 133 | ld r1, (PACA_KVM_R1)(r13) | |
136 | ld r9, (PACA_EXMC+EX_R9)(r13) | 134 | ld r2, (PACA_KVM_R2)(r13) |
137 | ld r10, (PACA_EXMC+EX_R10)(r13) | 135 | ld r3, (PACA_KVM_R3)(r13) |
138 | ld r12, (PACA_EXMC+EX_R12)(r13) | 136 | ld r4, (PACA_KVM_R4)(r13) |
139 | 137 | ld r5, (PACA_KVM_R5)(r13) | |
140 | lwz r11, (PACA_EXMC+EX_CCR)(r13) | 138 | ld r6, (PACA_KVM_R6)(r13) |
139 | ld r7, (PACA_KVM_R7)(r13) | ||
140 | ld r8, (PACA_KVM_R8)(r13) | ||
141 | ld r9, (PACA_KVM_R9)(r13) | ||
142 | ld r10, (PACA_KVM_R10)(r13) | ||
143 | ld r12, (PACA_KVM_R12)(r13) | ||
144 | |||
145 | lwz r11, (PACA_KVM_CR)(r13) | ||
141 | mtcr r11 | 146 | mtcr r11 |
142 | 147 | ||
143 | ld r11, (PACA_EXMC+EX_R3)(r13) | 148 | ld r11, (PACA_KVM_XER)(r13) |
144 | mtxer r11 | 149 | mtxer r11 |
145 | 150 | ||
146 | ld r11, (PACA_EXMC+EX_R11)(r13) | 151 | ld r11, (PACA_KVM_R11)(r13) |
147 | ld r13, (PACA_EXMC+EX_R13)(r13) | 152 | ld r13, (PACA_KVM_R13)(r13) |
148 | 153 | ||
149 | RFI | 154 | RFI |
150 | kvmppc_handler_trampoline_enter_end: | 155 | kvmppc_handler_trampoline_enter_end: |
@@ -162,28 +167,54 @@ kvmppc_handler_trampoline_exit: | |||
162 | 167 | ||
163 | /* Register usage at this point: | 168 | /* Register usage at this point: |
164 | * | 169 | * |
165 | * SPRG_SCRATCH0 = guest R13 | 170 | * SPRG_SCRATCH0 = guest R13 |
166 | * R01 = host R1 | 171 | * R12 = exit handler id |
167 | * R02 = host R2 | 172 | * R13 = PACA |
168 | * R10 = guest PC | 173 | * PACA.KVM.SCRATCH0 = guest R12 |
169 | * R11 = guest MSR | 174 | * PACA.KVM.SCRATCH1 = guest CR |
170 | * R12 = exit handler id | ||
171 | * R13 = PACA | ||
172 | * PACA.exmc.CCR = guest CR | ||
173 | * PACA.exmc.R9 = guest R1 | ||
174 | * PACA.exmc.R10 = guest R10 | ||
175 | * PACA.exmc.R11 = guest R11 | ||
176 | * PACA.exmc.R12 = guest R12 | ||
177 | * PACA.exmc.R13 = guest R2 | ||
178 | * | 175 | * |
179 | */ | 176 | */ |
180 | 177 | ||
181 | /* Save registers */ | 178 | /* Save registers */ |
182 | 179 | ||
183 | std r0, (PACA_EXMC+EX_SRR0)(r13) | 180 | std r0, PACA_KVM_R0(r13) |
184 | std r9, (PACA_EXMC+EX_R3)(r13) | 181 | std r1, PACA_KVM_R1(r13) |
185 | std r10, (PACA_EXMC+EX_LR)(r13) | 182 | std r2, PACA_KVM_R2(r13) |
186 | std r11, (PACA_EXMC+EX_DAR)(r13) | 183 | std r3, PACA_KVM_R3(r13) |
184 | std r4, PACA_KVM_R4(r13) | ||
185 | std r5, PACA_KVM_R5(r13) | ||
186 | std r6, PACA_KVM_R6(r13) | ||
187 | std r7, PACA_KVM_R7(r13) | ||
188 | std r8, PACA_KVM_R8(r13) | ||
189 | std r9, PACA_KVM_R9(r13) | ||
190 | std r10, PACA_KVM_R10(r13) | ||
191 | std r11, PACA_KVM_R11(r13) | ||
192 | |||
193 | /* Restore R1/R2 so we can handle faults */ | ||
194 | ld r1, PACA_KVM_HOST_R1(r13) | ||
195 | ld r2, PACA_KVM_HOST_R2(r13) | ||
196 | |||
197 | /* Save guest PC and MSR in GPRs */ | ||
198 | mfsrr0 r3 | ||
199 | mfsrr1 r4 | ||
200 | |||
201 | /* Get scratch'ed off registers */ | ||
202 | mfspr r9, SPRN_SPRG_SCRATCH0 | ||
203 | std r9, PACA_KVM_R13(r13) | ||
204 | |||
205 | ld r8, PACA_KVM_SCRATCH0(r13) | ||
206 | std r8, PACA_KVM_R12(r13) | ||
207 | |||
208 | lwz r7, PACA_KVM_SCRATCH1(r13) | ||
209 | stw r7, PACA_KVM_CR(r13) | ||
210 | |||
211 | /* Save more register state */ | ||
212 | |||
213 | mfxer r6 | ||
214 | stw r6, PACA_KVM_XER(r13) | ||
215 | |||
216 | mfdar r5 | ||
217 | mfdsisr r6 | ||
187 | 218 | ||
188 | /* | 219 | /* |
189 | * In order for us to easily get the last instruction, | 220 | * In order for us to easily get the last instruction, |
@@ -202,17 +233,28 @@ kvmppc_handler_trampoline_exit: | |||
202 | 233 | ||
203 | ld_last_inst: | 234 | ld_last_inst: |
204 | /* Save off the guest instruction we're at */ | 235 | /* Save off the guest instruction we're at */ |
236 | |||
237 | /* Set guest mode to 'jump over instruction' so if lwz faults | ||
238 | * we'll just continue at the next IP. */ | ||
239 | li r9, KVM_GUEST_MODE_SKIP | ||
240 | stb r9, PACA_KVM_IN_GUEST(r13) | ||
241 | |||
205 | /* 1) enable paging for data */ | 242 | /* 1) enable paging for data */ |
206 | mfmsr r9 | 243 | mfmsr r9 |
207 | ori r11, r9, MSR_DR /* Enable paging for data */ | 244 | ori r11, r9, MSR_DR /* Enable paging for data */ |
208 | mtmsr r11 | 245 | mtmsr r11 |
209 | /* 2) fetch the instruction */ | 246 | /* 2) fetch the instruction */ |
210 | lwz r0, 0(r10) | 247 | li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */ |
248 | lwz r0, 0(r3) | ||
211 | /* 3) disable paging again */ | 249 | /* 3) disable paging again */ |
212 | mtmsr r9 | 250 | mtmsr r9 |
213 | 251 | ||
214 | no_ld_last_inst: | 252 | no_ld_last_inst: |
215 | 253 | ||
254 | /* Unset guest mode */ | ||
255 | li r9, KVM_GUEST_MODE_NONE | ||
256 | stb r9, PACA_KVM_IN_GUEST(r13) | ||
257 | |||
216 | /* Restore bolted entries from the shadow and fix it along the way */ | 258 | /* Restore bolted entries from the shadow and fix it along the way */ |
217 | 259 | ||
218 | /* We don't store anything in entry 0, so we don't need to take care of it */ | 260 | /* We don't store anything in entry 0, so we don't need to take care of it */ |
@@ -233,29 +275,27 @@ no_ld_last_inst: | |||
233 | 275 | ||
234 | slb_do_exit: | 276 | slb_do_exit: |
235 | 277 | ||
236 | /* Restore registers */ | 278 | /* Register usage at this point: |
237 | 279 | * | |
238 | ld r11, (PACA_EXMC+EX_DAR)(r13) | 280 | * R0 = guest last inst |
239 | ld r10, (PACA_EXMC+EX_LR)(r13) | 281 | * R1 = host R1 |
240 | ld r9, (PACA_EXMC+EX_R3)(r13) | 282 | * R2 = host R2 |
241 | 283 | * R3 = guest PC | |
242 | /* Save last inst */ | 284 | * R4 = guest MSR |
243 | stw r0, (PACA_EXMC+EX_LR)(r13) | 285 | * R5 = guest DAR |
244 | 286 | * R6 = guest DSISR | |
245 | /* Save DAR and DSISR before going to paged mode */ | 287 | * R12 = exit handler id |
246 | mfdar r0 | 288 | * R13 = PACA |
247 | std r0, (PACA_EXMC+EX_DAR)(r13) | 289 | * PACA.KVM.* = guest * |
248 | mfdsisr r0 | 290 | * |
249 | stw r0, (PACA_EXMC+EX_DSISR)(r13) | 291 | */ |
250 | 292 | ||
251 | /* RFI into the highmem handler */ | 293 | /* RFI into the highmem handler */ |
252 | mfmsr r0 | 294 | mfmsr r7 |
253 | ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ | 295 | ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ |
254 | mtsrr1 r0 | 296 | mtsrr1 r7 |
255 | ld r0, PACASAVEDMSR(r13) /* Highmem handler address */ | 297 | ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */ |
256 | mtsrr0 r0 | 298 | mtsrr0 r8 |
257 | |||
258 | mfspr r0, SPRN_SPRG_SCRATCH0 | ||
259 | 299 | ||
260 | RFI | 300 | RFI |
261 | kvmppc_handler_trampoline_exit_end: | 301 | kvmppc_handler_trampoline_exit_end: |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 06f5a9ecc42c..4d686cc6b260 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -69,10 +69,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
69 | 69 | ||
70 | for (i = 0; i < 32; i += 4) { | 70 | for (i = 0; i < 32; i += 4) { |
71 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, | 71 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
72 | vcpu->arch.gpr[i], | 72 | kvmppc_get_gpr(vcpu, i), |
73 | vcpu->arch.gpr[i+1], | 73 | kvmppc_get_gpr(vcpu, i+1), |
74 | vcpu->arch.gpr[i+2], | 74 | kvmppc_get_gpr(vcpu, i+2), |
75 | vcpu->arch.gpr[i+3]); | 75 | kvmppc_get_gpr(vcpu, i+3)); |
76 | } | 76 | } |
77 | } | 77 | } |
78 | 78 | ||
@@ -82,8 +82,32 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, | |||
82 | set_bit(priority, &vcpu->arch.pending_exceptions); | 82 | set_bit(priority, &vcpu->arch.pending_exceptions); |
83 | } | 83 | } |
84 | 84 | ||
85 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) | 85 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
86 | ulong dear_flags, ulong esr_flags) | ||
86 | { | 87 | { |
88 | vcpu->arch.queued_dear = dear_flags; | ||
89 | vcpu->arch.queued_esr = esr_flags; | ||
90 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | ||
91 | } | ||
92 | |||
93 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | ||
94 | ulong dear_flags, ulong esr_flags) | ||
95 | { | ||
96 | vcpu->arch.queued_dear = dear_flags; | ||
97 | vcpu->arch.queued_esr = esr_flags; | ||
98 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | ||
99 | } | ||
100 | |||
101 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | ||
102 | ulong esr_flags) | ||
103 | { | ||
104 | vcpu->arch.queued_esr = esr_flags; | ||
105 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | ||
106 | } | ||
107 | |||
108 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) | ||
109 | { | ||
110 | vcpu->arch.queued_esr = esr_flags; | ||
87 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | 111 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
88 | } | 112 | } |
89 | 113 | ||
@@ -97,6 +121,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |||
97 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | 121 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
98 | } | 122 | } |
99 | 123 | ||
124 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | ||
127 | } | ||
128 | |||
100 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 129 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
101 | struct kvm_interrupt *irq) | 130 | struct kvm_interrupt *irq) |
102 | { | 131 | { |
@@ -109,14 +138,19 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
109 | { | 138 | { |
110 | int allowed = 0; | 139 | int allowed = 0; |
111 | ulong msr_mask; | 140 | ulong msr_mask; |
141 | bool update_esr = false, update_dear = false; | ||
112 | 142 | ||
113 | switch (priority) { | 143 | switch (priority) { |
114 | case BOOKE_IRQPRIO_PROGRAM: | ||
115 | case BOOKE_IRQPRIO_DTLB_MISS: | 144 | case BOOKE_IRQPRIO_DTLB_MISS: |
116 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
117 | case BOOKE_IRQPRIO_SYSCALL: | ||
118 | case BOOKE_IRQPRIO_DATA_STORAGE: | 145 | case BOOKE_IRQPRIO_DATA_STORAGE: |
146 | update_dear = true; | ||
147 | /* fall through */ | ||
119 | case BOOKE_IRQPRIO_INST_STORAGE: | 148 | case BOOKE_IRQPRIO_INST_STORAGE: |
149 | case BOOKE_IRQPRIO_PROGRAM: | ||
150 | update_esr = true; | ||
151 | /* fall through */ | ||
152 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
153 | case BOOKE_IRQPRIO_SYSCALL: | ||
120 | case BOOKE_IRQPRIO_FP_UNAVAIL: | 154 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
121 | case BOOKE_IRQPRIO_SPE_UNAVAIL: | 155 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
122 | case BOOKE_IRQPRIO_SPE_FP_DATA: | 156 | case BOOKE_IRQPRIO_SPE_FP_DATA: |
@@ -151,6 +185,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
151 | vcpu->arch.srr0 = vcpu->arch.pc; | 185 | vcpu->arch.srr0 = vcpu->arch.pc; |
152 | vcpu->arch.srr1 = vcpu->arch.msr; | 186 | vcpu->arch.srr1 = vcpu->arch.msr; |
153 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 187 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
188 | if (update_esr == true) | ||
189 | vcpu->arch.esr = vcpu->arch.queued_esr; | ||
190 | if (update_dear == true) | ||
191 | vcpu->arch.dear = vcpu->arch.queued_dear; | ||
154 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | 192 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); |
155 | 193 | ||
156 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 194 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
@@ -223,8 +261,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
223 | if (vcpu->arch.msr & MSR_PR) { | 261 | if (vcpu->arch.msr & MSR_PR) { |
224 | /* Program traps generated by user-level software must be handled | 262 | /* Program traps generated by user-level software must be handled |
225 | * by the guest kernel. */ | 263 | * by the guest kernel. */ |
226 | vcpu->arch.esr = vcpu->arch.fault_esr; | 264 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
227 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | ||
228 | r = RESUME_GUEST; | 265 | r = RESUME_GUEST; |
229 | kvmppc_account_exit(vcpu, USR_PR_INST); | 266 | kvmppc_account_exit(vcpu, USR_PR_INST); |
230 | break; | 267 | break; |
@@ -280,16 +317,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
280 | break; | 317 | break; |
281 | 318 | ||
282 | case BOOKE_INTERRUPT_DATA_STORAGE: | 319 | case BOOKE_INTERRUPT_DATA_STORAGE: |
283 | vcpu->arch.dear = vcpu->arch.fault_dear; | 320 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
284 | vcpu->arch.esr = vcpu->arch.fault_esr; | 321 | vcpu->arch.fault_esr); |
285 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | ||
286 | kvmppc_account_exit(vcpu, DSI_EXITS); | 322 | kvmppc_account_exit(vcpu, DSI_EXITS); |
287 | r = RESUME_GUEST; | 323 | r = RESUME_GUEST; |
288 | break; | 324 | break; |
289 | 325 | ||
290 | case BOOKE_INTERRUPT_INST_STORAGE: | 326 | case BOOKE_INTERRUPT_INST_STORAGE: |
291 | vcpu->arch.esr = vcpu->arch.fault_esr; | 327 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); |
292 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | ||
293 | kvmppc_account_exit(vcpu, ISI_EXITS); | 328 | kvmppc_account_exit(vcpu, ISI_EXITS); |
294 | r = RESUME_GUEST; | 329 | r = RESUME_GUEST; |
295 | break; | 330 | break; |
@@ -310,9 +345,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
310 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); | 345 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
311 | if (gtlb_index < 0) { | 346 | if (gtlb_index < 0) { |
312 | /* The guest didn't have a mapping for it. */ | 347 | /* The guest didn't have a mapping for it. */ |
313 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 348 | kvmppc_core_queue_dtlb_miss(vcpu, |
314 | vcpu->arch.dear = vcpu->arch.fault_dear; | 349 | vcpu->arch.fault_dear, |
315 | vcpu->arch.esr = vcpu->arch.fault_esr; | 350 | vcpu->arch.fault_esr); |
316 | kvmppc_mmu_dtlb_miss(vcpu); | 351 | kvmppc_mmu_dtlb_miss(vcpu); |
317 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); | 352 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
318 | r = RESUME_GUEST; | 353 | r = RESUME_GUEST; |
@@ -426,7 +461,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
426 | { | 461 | { |
427 | vcpu->arch.pc = 0; | 462 | vcpu->arch.pc = 0; |
428 | vcpu->arch.msr = 0; | 463 | vcpu->arch.msr = 0; |
429 | vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ | 464 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
430 | 465 | ||
431 | vcpu->arch.shadow_pid = 1; | 466 | vcpu->arch.shadow_pid = 1; |
432 | 467 | ||
@@ -444,10 +479,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
444 | int i; | 479 | int i; |
445 | 480 | ||
446 | regs->pc = vcpu->arch.pc; | 481 | regs->pc = vcpu->arch.pc; |
447 | regs->cr = vcpu->arch.cr; | 482 | regs->cr = kvmppc_get_cr(vcpu); |
448 | regs->ctr = vcpu->arch.ctr; | 483 | regs->ctr = vcpu->arch.ctr; |
449 | regs->lr = vcpu->arch.lr; | 484 | regs->lr = vcpu->arch.lr; |
450 | regs->xer = vcpu->arch.xer; | 485 | regs->xer = kvmppc_get_xer(vcpu); |
451 | regs->msr = vcpu->arch.msr; | 486 | regs->msr = vcpu->arch.msr; |
452 | regs->srr0 = vcpu->arch.srr0; | 487 | regs->srr0 = vcpu->arch.srr0; |
453 | regs->srr1 = vcpu->arch.srr1; | 488 | regs->srr1 = vcpu->arch.srr1; |
@@ -461,7 +496,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
461 | regs->sprg7 = vcpu->arch.sprg6; | 496 | regs->sprg7 = vcpu->arch.sprg6; |
462 | 497 | ||
463 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 498 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
464 | regs->gpr[i] = vcpu->arch.gpr[i]; | 499 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
465 | 500 | ||
466 | return 0; | 501 | return 0; |
467 | } | 502 | } |
@@ -471,10 +506,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
471 | int i; | 506 | int i; |
472 | 507 | ||
473 | vcpu->arch.pc = regs->pc; | 508 | vcpu->arch.pc = regs->pc; |
474 | vcpu->arch.cr = regs->cr; | 509 | kvmppc_set_cr(vcpu, regs->cr); |
475 | vcpu->arch.ctr = regs->ctr; | 510 | vcpu->arch.ctr = regs->ctr; |
476 | vcpu->arch.lr = regs->lr; | 511 | vcpu->arch.lr = regs->lr; |
477 | vcpu->arch.xer = regs->xer; | 512 | kvmppc_set_xer(vcpu, regs->xer); |
478 | kvmppc_set_msr(vcpu, regs->msr); | 513 | kvmppc_set_msr(vcpu, regs->msr); |
479 | vcpu->arch.srr0 = regs->srr0; | 514 | vcpu->arch.srr0 = regs->srr0; |
480 | vcpu->arch.srr1 = regs->srr1; | 515 | vcpu->arch.srr1 = regs->srr1; |
@@ -486,8 +521,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
486 | vcpu->arch.sprg6 = regs->sprg5; | 521 | vcpu->arch.sprg6 = regs->sprg5; |
487 | vcpu->arch.sprg7 = regs->sprg6; | 522 | vcpu->arch.sprg7 = regs->sprg6; |
488 | 523 | ||
489 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) | 524 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
490 | vcpu->arch.gpr[i] = regs->gpr[i]; | 525 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
491 | 526 | ||
492 | return 0; | 527 | return 0; |
493 | } | 528 | } |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index aebc65e93f4b..cbc790ee1928 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -62,20 +62,20 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | 62 | ||
63 | case OP_31_XOP_MFMSR: | 63 | case OP_31_XOP_MFMSR: |
64 | rt = get_rt(inst); | 64 | rt = get_rt(inst); |
65 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); |
66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
67 | break; | 67 | break; |
68 | 68 | ||
69 | case OP_31_XOP_MTMSR: | 69 | case OP_31_XOP_MTMSR: |
70 | rs = get_rs(inst); | 70 | rs = get_rs(inst); |
71 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | 71 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); |
72 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | 72 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
73 | break; | 73 | break; |
74 | 74 | ||
75 | case OP_31_XOP_WRTEE: | 75 | case OP_31_XOP_WRTEE: |
76 | rs = get_rs(inst); | 76 | rs = get_rs(inst); |
77 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 77 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) |
78 | | (vcpu->arch.gpr[rs] & MSR_EE); | 78 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
80 | break; | 80 | break; |
81 | 81 | ||
@@ -101,22 +101,23 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
101 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 101 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
102 | { | 102 | { |
103 | int emulated = EMULATE_DONE; | 103 | int emulated = EMULATE_DONE; |
104 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
104 | 105 | ||
105 | switch (sprn) { | 106 | switch (sprn) { |
106 | case SPRN_DEAR: | 107 | case SPRN_DEAR: |
107 | vcpu->arch.dear = vcpu->arch.gpr[rs]; break; | 108 | vcpu->arch.dear = spr_val; break; |
108 | case SPRN_ESR: | 109 | case SPRN_ESR: |
109 | vcpu->arch.esr = vcpu->arch.gpr[rs]; break; | 110 | vcpu->arch.esr = spr_val; break; |
110 | case SPRN_DBCR0: | 111 | case SPRN_DBCR0: |
111 | vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; | 112 | vcpu->arch.dbcr0 = spr_val; break; |
112 | case SPRN_DBCR1: | 113 | case SPRN_DBCR1: |
113 | vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; | 114 | vcpu->arch.dbcr1 = spr_val; break; |
114 | case SPRN_DBSR: | 115 | case SPRN_DBSR: |
115 | vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break; | 116 | vcpu->arch.dbsr &= ~spr_val; break; |
116 | case SPRN_TSR: | 117 | case SPRN_TSR: |
117 | vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; | 118 | vcpu->arch.tsr &= ~spr_val; break; |
118 | case SPRN_TCR: | 119 | case SPRN_TCR: |
119 | vcpu->arch.tcr = vcpu->arch.gpr[rs]; | 120 | vcpu->arch.tcr = spr_val; |
120 | kvmppc_emulate_dec(vcpu); | 121 | kvmppc_emulate_dec(vcpu); |
121 | break; | 122 | break; |
122 | 123 | ||
@@ -124,64 +125,64 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
124 | * loaded into the real SPRGs when resuming the | 125 | * loaded into the real SPRGs when resuming the |
125 | * guest. */ | 126 | * guest. */ |
126 | case SPRN_SPRG4: | 127 | case SPRN_SPRG4: |
127 | vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; | 128 | vcpu->arch.sprg4 = spr_val; break; |
128 | case SPRN_SPRG5: | 129 | case SPRN_SPRG5: |
129 | vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; | 130 | vcpu->arch.sprg5 = spr_val; break; |
130 | case SPRN_SPRG6: | 131 | case SPRN_SPRG6: |
131 | vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; | 132 | vcpu->arch.sprg6 = spr_val; break; |
132 | case SPRN_SPRG7: | 133 | case SPRN_SPRG7: |
133 | vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; | 134 | vcpu->arch.sprg7 = spr_val; break; |
134 | 135 | ||
135 | case SPRN_IVPR: | 136 | case SPRN_IVPR: |
136 | vcpu->arch.ivpr = vcpu->arch.gpr[rs]; | 137 | vcpu->arch.ivpr = spr_val; |
137 | break; | 138 | break; |
138 | case SPRN_IVOR0: | 139 | case SPRN_IVOR0: |
139 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; | 140 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; |
140 | break; | 141 | break; |
141 | case SPRN_IVOR1: | 142 | case SPRN_IVOR1: |
142 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; | 143 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; |
143 | break; | 144 | break; |
144 | case SPRN_IVOR2: | 145 | case SPRN_IVOR2: |
145 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; | 146 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; |
146 | break; | 147 | break; |
147 | case SPRN_IVOR3: | 148 | case SPRN_IVOR3: |
148 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; | 149 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; |
149 | break; | 150 | break; |
150 | case SPRN_IVOR4: | 151 | case SPRN_IVOR4: |
151 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; | 152 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; |
152 | break; | 153 | break; |
153 | case SPRN_IVOR5: | 154 | case SPRN_IVOR5: |
154 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; | 155 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; |
155 | break; | 156 | break; |
156 | case SPRN_IVOR6: | 157 | case SPRN_IVOR6: |
157 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; | 158 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; |
158 | break; | 159 | break; |
159 | case SPRN_IVOR7: | 160 | case SPRN_IVOR7: |
160 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; | 161 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; |
161 | break; | 162 | break; |
162 | case SPRN_IVOR8: | 163 | case SPRN_IVOR8: |
163 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; | 164 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; |
164 | break; | 165 | break; |
165 | case SPRN_IVOR9: | 166 | case SPRN_IVOR9: |
166 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; | 167 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; |
167 | break; | 168 | break; |
168 | case SPRN_IVOR10: | 169 | case SPRN_IVOR10: |
169 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; | 170 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; |
170 | break; | 171 | break; |
171 | case SPRN_IVOR11: | 172 | case SPRN_IVOR11: |
172 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; | 173 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; |
173 | break; | 174 | break; |
174 | case SPRN_IVOR12: | 175 | case SPRN_IVOR12: |
175 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; | 176 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; |
176 | break; | 177 | break; |
177 | case SPRN_IVOR13: | 178 | case SPRN_IVOR13: |
178 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; | 179 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; |
179 | break; | 180 | break; |
180 | case SPRN_IVOR14: | 181 | case SPRN_IVOR14: |
181 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; | 182 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; |
182 | break; | 183 | break; |
183 | case SPRN_IVOR15: | 184 | case SPRN_IVOR15: |
184 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; | 185 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; |
185 | break; | 186 | break; |
186 | 187 | ||
187 | default: | 188 | default: |
@@ -197,65 +198,65 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
197 | 198 | ||
198 | switch (sprn) { | 199 | switch (sprn) { |
199 | case SPRN_IVPR: | 200 | case SPRN_IVPR: |
200 | vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; | 201 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; |
201 | case SPRN_DEAR: | 202 | case SPRN_DEAR: |
202 | vcpu->arch.gpr[rt] = vcpu->arch.dear; break; | 203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; |
203 | case SPRN_ESR: | 204 | case SPRN_ESR: |
204 | vcpu->arch.gpr[rt] = vcpu->arch.esr; break; | 205 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; |
205 | case SPRN_DBCR0: | 206 | case SPRN_DBCR0: |
206 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; | 207 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; |
207 | case SPRN_DBCR1: | 208 | case SPRN_DBCR1: |
208 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; | 209 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; |
209 | case SPRN_DBSR: | 210 | case SPRN_DBSR: |
210 | vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break; | 211 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; |
211 | 212 | ||
212 | case SPRN_IVOR0: | 213 | case SPRN_IVOR0: |
213 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | 214 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); |
214 | break; | 215 | break; |
215 | case SPRN_IVOR1: | 216 | case SPRN_IVOR1: |
216 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | 217 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); |
217 | break; | 218 | break; |
218 | case SPRN_IVOR2: | 219 | case SPRN_IVOR2: |
219 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | 220 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); |
220 | break; | 221 | break; |
221 | case SPRN_IVOR3: | 222 | case SPRN_IVOR3: |
222 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | 223 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); |
223 | break; | 224 | break; |
224 | case SPRN_IVOR4: | 225 | case SPRN_IVOR4: |
225 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | 226 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); |
226 | break; | 227 | break; |
227 | case SPRN_IVOR5: | 228 | case SPRN_IVOR5: |
228 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | 229 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); |
229 | break; | 230 | break; |
230 | case SPRN_IVOR6: | 231 | case SPRN_IVOR6: |
231 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | 232 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); |
232 | break; | 233 | break; |
233 | case SPRN_IVOR7: | 234 | case SPRN_IVOR7: |
234 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | 235 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); |
235 | break; | 236 | break; |
236 | case SPRN_IVOR8: | 237 | case SPRN_IVOR8: |
237 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | 238 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); |
238 | break; | 239 | break; |
239 | case SPRN_IVOR9: | 240 | case SPRN_IVOR9: |
240 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | 241 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); |
241 | break; | 242 | break; |
242 | case SPRN_IVOR10: | 243 | case SPRN_IVOR10: |
243 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | 244 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); |
244 | break; | 245 | break; |
245 | case SPRN_IVOR11: | 246 | case SPRN_IVOR11: |
246 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | 247 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); |
247 | break; | 248 | break; |
248 | case SPRN_IVOR12: | 249 | case SPRN_IVOR12: |
249 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | 250 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); |
250 | break; | 251 | break; |
251 | case SPRN_IVOR13: | 252 | case SPRN_IVOR13: |
252 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | 253 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); |
253 | break; | 254 | break; |
254 | case SPRN_IVOR14: | 255 | case SPRN_IVOR14: |
255 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | 256 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); |
256 | break; | 257 | break; |
257 | case SPRN_IVOR15: | 258 | case SPRN_IVOR15: |
258 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | 259 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); |
259 | break; | 260 | break; |
260 | 261 | ||
261 | default: | 262 | default: |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 64949eef43f1..efa1198940ab 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -60,6 +60,12 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
60 | 60 | ||
61 | kvmppc_e500_tlb_setup(vcpu_e500); | 61 | kvmppc_e500_tlb_setup(vcpu_e500); |
62 | 62 | ||
63 | /* Registers init */ | ||
64 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
65 | |||
66 | /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ | ||
67 | vcpu->vcpu_id = 0; | ||
68 | |||
63 | return 0; | 69 | return 0; |
64 | } | 70 | } |
65 | 71 | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index be95b8d8e3b7..8e3edfbc9634 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -74,54 +74,59 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
74 | { | 74 | { |
75 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 75 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
76 | int emulated = EMULATE_DONE; | 76 | int emulated = EMULATE_DONE; |
77 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
77 | 78 | ||
78 | switch (sprn) { | 79 | switch (sprn) { |
79 | case SPRN_PID: | 80 | case SPRN_PID: |
80 | vcpu_e500->pid[0] = vcpu->arch.shadow_pid = | 81 | vcpu_e500->pid[0] = vcpu->arch.shadow_pid = |
81 | vcpu->arch.pid = vcpu->arch.gpr[rs]; | 82 | vcpu->arch.pid = spr_val; |
82 | break; | 83 | break; |
83 | case SPRN_PID1: | 84 | case SPRN_PID1: |
84 | vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break; | 85 | vcpu_e500->pid[1] = spr_val; break; |
85 | case SPRN_PID2: | 86 | case SPRN_PID2: |
86 | vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break; | 87 | vcpu_e500->pid[2] = spr_val; break; |
87 | case SPRN_MAS0: | 88 | case SPRN_MAS0: |
88 | vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break; | 89 | vcpu_e500->mas0 = spr_val; break; |
89 | case SPRN_MAS1: | 90 | case SPRN_MAS1: |
90 | vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break; | 91 | vcpu_e500->mas1 = spr_val; break; |
91 | case SPRN_MAS2: | 92 | case SPRN_MAS2: |
92 | vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break; | 93 | vcpu_e500->mas2 = spr_val; break; |
93 | case SPRN_MAS3: | 94 | case SPRN_MAS3: |
94 | vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break; | 95 | vcpu_e500->mas3 = spr_val; break; |
95 | case SPRN_MAS4: | 96 | case SPRN_MAS4: |
96 | vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break; | 97 | vcpu_e500->mas4 = spr_val; break; |
97 | case SPRN_MAS6: | 98 | case SPRN_MAS6: |
98 | vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break; | 99 | vcpu_e500->mas6 = spr_val; break; |
99 | case SPRN_MAS7: | 100 | case SPRN_MAS7: |
100 | vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break; | 101 | vcpu_e500->mas7 = spr_val; break; |
102 | case SPRN_L1CSR0: | ||
103 | vcpu_e500->l1csr0 = spr_val; | ||
104 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); | ||
105 | break; | ||
101 | case SPRN_L1CSR1: | 106 | case SPRN_L1CSR1: |
102 | vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break; | 107 | vcpu_e500->l1csr1 = spr_val; break; |
103 | case SPRN_HID0: | 108 | case SPRN_HID0: |
104 | vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break; | 109 | vcpu_e500->hid0 = spr_val; break; |
105 | case SPRN_HID1: | 110 | case SPRN_HID1: |
106 | vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; | 111 | vcpu_e500->hid1 = spr_val; break; |
107 | 112 | ||
108 | case SPRN_MMUCSR0: | 113 | case SPRN_MMUCSR0: |
109 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, | 114 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, |
110 | vcpu->arch.gpr[rs]); | 115 | spr_val); |
111 | break; | 116 | break; |
112 | 117 | ||
113 | /* extra exceptions */ | 118 | /* extra exceptions */ |
114 | case SPRN_IVOR32: | 119 | case SPRN_IVOR32: |
115 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs]; | 120 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; |
116 | break; | 121 | break; |
117 | case SPRN_IVOR33: | 122 | case SPRN_IVOR33: |
118 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs]; | 123 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val; |
119 | break; | 124 | break; |
120 | case SPRN_IVOR34: | 125 | case SPRN_IVOR34: |
121 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs]; | 126 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; |
122 | break; | 127 | break; |
123 | case SPRN_IVOR35: | 128 | case SPRN_IVOR35: |
124 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs]; | 129 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; |
125 | break; | 130 | break; |
126 | 131 | ||
127 | default: | 132 | default: |
@@ -138,63 +143,57 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
138 | 143 | ||
139 | switch (sprn) { | 144 | switch (sprn) { |
140 | case SPRN_PID: | 145 | case SPRN_PID: |
141 | vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break; | 146 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; |
142 | case SPRN_PID1: | 147 | case SPRN_PID1: |
143 | vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break; | 148 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; |
144 | case SPRN_PID2: | 149 | case SPRN_PID2: |
145 | vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break; | 150 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; |
146 | case SPRN_MAS0: | 151 | case SPRN_MAS0: |
147 | vcpu->arch.gpr[rt] = vcpu_e500->mas0; break; | 152 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break; |
148 | case SPRN_MAS1: | 153 | case SPRN_MAS1: |
149 | vcpu->arch.gpr[rt] = vcpu_e500->mas1; break; | 154 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break; |
150 | case SPRN_MAS2: | 155 | case SPRN_MAS2: |
151 | vcpu->arch.gpr[rt] = vcpu_e500->mas2; break; | 156 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break; |
152 | case SPRN_MAS3: | 157 | case SPRN_MAS3: |
153 | vcpu->arch.gpr[rt] = vcpu_e500->mas3; break; | 158 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break; |
154 | case SPRN_MAS4: | 159 | case SPRN_MAS4: |
155 | vcpu->arch.gpr[rt] = vcpu_e500->mas4; break; | 160 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break; |
156 | case SPRN_MAS6: | 161 | case SPRN_MAS6: |
157 | vcpu->arch.gpr[rt] = vcpu_e500->mas6; break; | 162 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break; |
158 | case SPRN_MAS7: | 163 | case SPRN_MAS7: |
159 | vcpu->arch.gpr[rt] = vcpu_e500->mas7; break; | 164 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break; |
160 | 165 | ||
161 | case SPRN_TLB0CFG: | 166 | case SPRN_TLB0CFG: |
162 | vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG); | 167 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; |
163 | vcpu->arch.gpr[rt] &= ~0xfffUL; | ||
164 | vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0]; | ||
165 | break; | ||
166 | |||
167 | case SPRN_TLB1CFG: | 168 | case SPRN_TLB1CFG: |
168 | vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG); | 169 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; |
169 | vcpu->arch.gpr[rt] &= ~0xfffUL; | 170 | case SPRN_L1CSR0: |
170 | vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1]; | 171 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; |
171 | break; | ||
172 | |||
173 | case SPRN_L1CSR1: | 172 | case SPRN_L1CSR1: |
174 | vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break; | 173 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; |
175 | case SPRN_HID0: | 174 | case SPRN_HID0: |
176 | vcpu->arch.gpr[rt] = vcpu_e500->hid0; break; | 175 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; |
177 | case SPRN_HID1: | 176 | case SPRN_HID1: |
178 | vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; | 177 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; |
179 | 178 | ||
180 | case SPRN_MMUCSR0: | 179 | case SPRN_MMUCSR0: |
181 | vcpu->arch.gpr[rt] = 0; break; | 180 | kvmppc_set_gpr(vcpu, rt, 0); break; |
182 | 181 | ||
183 | case SPRN_MMUCFG: | 182 | case SPRN_MMUCFG: |
184 | vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break; | 183 | kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; |
185 | 184 | ||
186 | /* extra exceptions */ | 185 | /* extra exceptions */ |
187 | case SPRN_IVOR32: | 186 | case SPRN_IVOR32: |
188 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | 187 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); |
189 | break; | 188 | break; |
190 | case SPRN_IVOR33: | 189 | case SPRN_IVOR33: |
191 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; | 190 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); |
192 | break; | 191 | break; |
193 | case SPRN_IVOR34: | 192 | case SPRN_IVOR34: |
194 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; | 193 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); |
195 | break; | 194 | break; |
196 | case SPRN_IVOR35: | 195 | case SPRN_IVOR35: |
197 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | 196 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); |
198 | break; | 197 | break; |
199 | default: | 198 | default: |
200 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 199 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index fb1e1dc11ba5..0d772e6b6318 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -417,7 +417,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) | |||
417 | int esel, tlbsel; | 417 | int esel, tlbsel; |
418 | gva_t ea; | 418 | gva_t ea; |
419 | 419 | ||
420 | ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb]; | 420 | ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb); |
421 | 421 | ||
422 | ia = (ea >> 2) & 0x1; | 422 | ia = (ea >> 2) & 0x1; |
423 | 423 | ||
@@ -470,7 +470,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) | |||
470 | struct tlbe *gtlbe = NULL; | 470 | struct tlbe *gtlbe = NULL; |
471 | gva_t ea; | 471 | gva_t ea; |
472 | 472 | ||
473 | ea = vcpu->arch.gpr[rb]; | 473 | ea = kvmppc_get_gpr(vcpu, rb); |
474 | 474 | ||
475 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | 475 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { |
476 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); | 476 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); |
@@ -728,6 +728,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
728 | if (vcpu_e500->shadow_pages[1] == NULL) | 728 | if (vcpu_e500->shadow_pages[1] == NULL) |
729 | goto err_out_page0; | 729 | goto err_out_page0; |
730 | 730 | ||
731 | /* Init TLB configuration register */ | ||
732 | vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; | ||
733 | vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0]; | ||
734 | vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; | ||
735 | vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1]; | ||
736 | |||
731 | return 0; | 737 | return 0; |
732 | 738 | ||
733 | err_out_page0: | 739 | err_out_page0: |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4a9ac6640fad..cb72a65f4ecc 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -83,6 +83,9 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
83 | 83 | ||
84 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); | 84 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); |
85 | #ifdef CONFIG_PPC64 | 85 | #ifdef CONFIG_PPC64 |
86 | /* mtdec lowers the interrupt line when positive. */ | ||
87 | kvmppc_core_dequeue_dec(vcpu); | ||
88 | |||
86 | /* POWER4+ triggers a dec interrupt if the value is < 0 */ | 89 | /* POWER4+ triggers a dec interrupt if the value is < 0 */ |
87 | if (vcpu->arch.dec & 0x80000000) { | 90 | if (vcpu->arch.dec & 0x80000000) { |
88 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | 91 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); |
@@ -140,14 +143,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
140 | 143 | ||
141 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 144 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
142 | 145 | ||
146 | /* Try again next time */ | ||
147 | if (inst == KVM_INST_FETCH_FAILED) | ||
148 | return EMULATE_DONE; | ||
149 | |||
143 | switch (get_op(inst)) { | 150 | switch (get_op(inst)) { |
144 | case OP_TRAP: | 151 | case OP_TRAP: |
145 | #ifdef CONFIG_PPC64 | 152 | #ifdef CONFIG_PPC64 |
146 | case OP_TRAP_64: | 153 | case OP_TRAP_64: |
154 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | ||
147 | #else | 155 | #else |
148 | vcpu->arch.esr |= ESR_PTR; | 156 | kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); |
149 | #endif | 157 | #endif |
150 | kvmppc_core_queue_program(vcpu); | ||
151 | advance = 0; | 158 | advance = 0; |
152 | break; | 159 | break; |
153 | 160 | ||
@@ -167,14 +174,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
167 | case OP_31_XOP_STWX: | 174 | case OP_31_XOP_STWX: |
168 | rs = get_rs(inst); | 175 | rs = get_rs(inst); |
169 | emulated = kvmppc_handle_store(run, vcpu, | 176 | emulated = kvmppc_handle_store(run, vcpu, |
170 | vcpu->arch.gpr[rs], | 177 | kvmppc_get_gpr(vcpu, rs), |
171 | 4, 1); | 178 | 4, 1); |
172 | break; | 179 | break; |
173 | 180 | ||
174 | case OP_31_XOP_STBX: | 181 | case OP_31_XOP_STBX: |
175 | rs = get_rs(inst); | 182 | rs = get_rs(inst); |
176 | emulated = kvmppc_handle_store(run, vcpu, | 183 | emulated = kvmppc_handle_store(run, vcpu, |
177 | vcpu->arch.gpr[rs], | 184 | kvmppc_get_gpr(vcpu, rs), |
178 | 1, 1); | 185 | 1, 1); |
179 | break; | 186 | break; |
180 | 187 | ||
@@ -183,14 +190,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
183 | ra = get_ra(inst); | 190 | ra = get_ra(inst); |
184 | rb = get_rb(inst); | 191 | rb = get_rb(inst); |
185 | 192 | ||
186 | ea = vcpu->arch.gpr[rb]; | 193 | ea = kvmppc_get_gpr(vcpu, rb); |
187 | if (ra) | 194 | if (ra) |
188 | ea += vcpu->arch.gpr[ra]; | 195 | ea += kvmppc_get_gpr(vcpu, ra); |
189 | 196 | ||
190 | emulated = kvmppc_handle_store(run, vcpu, | 197 | emulated = kvmppc_handle_store(run, vcpu, |
191 | vcpu->arch.gpr[rs], | 198 | kvmppc_get_gpr(vcpu, rs), |
192 | 1, 1); | 199 | 1, 1); |
193 | vcpu->arch.gpr[rs] = ea; | 200 | kvmppc_set_gpr(vcpu, rs, ea); |
194 | break; | 201 | break; |
195 | 202 | ||
196 | case OP_31_XOP_LHZX: | 203 | case OP_31_XOP_LHZX: |
@@ -203,12 +210,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
203 | ra = get_ra(inst); | 210 | ra = get_ra(inst); |
204 | rb = get_rb(inst); | 211 | rb = get_rb(inst); |
205 | 212 | ||
206 | ea = vcpu->arch.gpr[rb]; | 213 | ea = kvmppc_get_gpr(vcpu, rb); |
207 | if (ra) | 214 | if (ra) |
208 | ea += vcpu->arch.gpr[ra]; | 215 | ea += kvmppc_get_gpr(vcpu, ra); |
209 | 216 | ||
210 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 217 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
211 | vcpu->arch.gpr[ra] = ea; | 218 | kvmppc_set_gpr(vcpu, ra, ea); |
212 | break; | 219 | break; |
213 | 220 | ||
214 | case OP_31_XOP_MFSPR: | 221 | case OP_31_XOP_MFSPR: |
@@ -217,47 +224,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
217 | 224 | ||
218 | switch (sprn) { | 225 | switch (sprn) { |
219 | case SPRN_SRR0: | 226 | case SPRN_SRR0: |
220 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; | 227 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; |
221 | case SPRN_SRR1: | 228 | case SPRN_SRR1: |
222 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; | 229 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; |
223 | case SPRN_PVR: | 230 | case SPRN_PVR: |
224 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; | 231 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; |
225 | case SPRN_PIR: | 232 | case SPRN_PIR: |
226 | vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; | 233 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; |
227 | case SPRN_MSSSR0: | 234 | case SPRN_MSSSR0: |
228 | vcpu->arch.gpr[rt] = 0; break; | 235 | kvmppc_set_gpr(vcpu, rt, 0); break; |
229 | 236 | ||
230 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 237 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
231 | * the guest can always access the real TB anyways. | 238 | * the guest can always access the real TB anyways. |
232 | * In fact, we probably will never see these traps. */ | 239 | * In fact, we probably will never see these traps. */ |
233 | case SPRN_TBWL: | 240 | case SPRN_TBWL: |
234 | vcpu->arch.gpr[rt] = get_tb() >> 32; break; | 241 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; |
235 | case SPRN_TBWU: | 242 | case SPRN_TBWU: |
236 | vcpu->arch.gpr[rt] = get_tb(); break; | 243 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; |
237 | 244 | ||
238 | case SPRN_SPRG0: | 245 | case SPRN_SPRG0: |
239 | vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; | 246 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; |
240 | case SPRN_SPRG1: | 247 | case SPRN_SPRG1: |
241 | vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; | 248 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; |
242 | case SPRN_SPRG2: | 249 | case SPRN_SPRG2: |
243 | vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; | 250 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; |
244 | case SPRN_SPRG3: | 251 | case SPRN_SPRG3: |
245 | vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; | 252 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; |
246 | /* Note: SPRG4-7 are user-readable, so we don't get | 253 | /* Note: SPRG4-7 are user-readable, so we don't get |
247 | * a trap. */ | 254 | * a trap. */ |
248 | 255 | ||
249 | case SPRN_DEC: | 256 | case SPRN_DEC: |
250 | { | 257 | { |
251 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; | 258 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; |
252 | vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; | 259 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); |
253 | pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); | 260 | pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", |
261 | vcpu->arch.dec, jd, | ||
262 | kvmppc_get_gpr(vcpu, rt)); | ||
254 | break; | 263 | break; |
255 | } | 264 | } |
256 | default: | 265 | default: |
257 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); | 266 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); |
258 | if (emulated == EMULATE_FAIL) { | 267 | if (emulated == EMULATE_FAIL) { |
259 | printk("mfspr: unknown spr %x\n", sprn); | 268 | printk("mfspr: unknown spr %x\n", sprn); |
260 | vcpu->arch.gpr[rt] = 0; | 269 | kvmppc_set_gpr(vcpu, rt, 0); |
261 | } | 270 | } |
262 | break; | 271 | break; |
263 | } | 272 | } |
@@ -269,7 +278,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
269 | rb = get_rb(inst); | 278 | rb = get_rb(inst); |
270 | 279 | ||
271 | emulated = kvmppc_handle_store(run, vcpu, | 280 | emulated = kvmppc_handle_store(run, vcpu, |
272 | vcpu->arch.gpr[rs], | 281 | kvmppc_get_gpr(vcpu, rs), |
273 | 2, 1); | 282 | 2, 1); |
274 | break; | 283 | break; |
275 | 284 | ||
@@ -278,14 +287,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
278 | ra = get_ra(inst); | 287 | ra = get_ra(inst); |
279 | rb = get_rb(inst); | 288 | rb = get_rb(inst); |
280 | 289 | ||
281 | ea = vcpu->arch.gpr[rb]; | 290 | ea = kvmppc_get_gpr(vcpu, rb); |
282 | if (ra) | 291 | if (ra) |
283 | ea += vcpu->arch.gpr[ra]; | 292 | ea += kvmppc_get_gpr(vcpu, ra); |
284 | 293 | ||
285 | emulated = kvmppc_handle_store(run, vcpu, | 294 | emulated = kvmppc_handle_store(run, vcpu, |
286 | vcpu->arch.gpr[rs], | 295 | kvmppc_get_gpr(vcpu, rs), |
287 | 2, 1); | 296 | 2, 1); |
288 | vcpu->arch.gpr[ra] = ea; | 297 | kvmppc_set_gpr(vcpu, ra, ea); |
289 | break; | 298 | break; |
290 | 299 | ||
291 | case OP_31_XOP_MTSPR: | 300 | case OP_31_XOP_MTSPR: |
@@ -293,9 +302,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
293 | rs = get_rs(inst); | 302 | rs = get_rs(inst); |
294 | switch (sprn) { | 303 | switch (sprn) { |
295 | case SPRN_SRR0: | 304 | case SPRN_SRR0: |
296 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; | 305 | vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; |
297 | case SPRN_SRR1: | 306 | case SPRN_SRR1: |
298 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; | 307 | vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; |
299 | 308 | ||
300 | /* XXX We need to context-switch the timebase for | 309 | /* XXX We need to context-switch the timebase for |
301 | * watchdog and FIT. */ | 310 | * watchdog and FIT. */ |
@@ -305,18 +314,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
305 | case SPRN_MSSSR0: break; | 314 | case SPRN_MSSSR0: break; |
306 | 315 | ||
307 | case SPRN_DEC: | 316 | case SPRN_DEC: |
308 | vcpu->arch.dec = vcpu->arch.gpr[rs]; | 317 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); |
309 | kvmppc_emulate_dec(vcpu); | 318 | kvmppc_emulate_dec(vcpu); |
310 | break; | 319 | break; |
311 | 320 | ||
312 | case SPRN_SPRG0: | 321 | case SPRN_SPRG0: |
313 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; | 322 | vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; |
314 | case SPRN_SPRG1: | 323 | case SPRN_SPRG1: |
315 | vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; | 324 | vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; |
316 | case SPRN_SPRG2: | 325 | case SPRN_SPRG2: |
317 | vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; | 326 | vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; |
318 | case SPRN_SPRG3: | 327 | case SPRN_SPRG3: |
319 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; | 328 | vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; |
320 | 329 | ||
321 | default: | 330 | default: |
322 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 331 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
@@ -348,7 +357,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
348 | rb = get_rb(inst); | 357 | rb = get_rb(inst); |
349 | 358 | ||
350 | emulated = kvmppc_handle_store(run, vcpu, | 359 | emulated = kvmppc_handle_store(run, vcpu, |
351 | vcpu->arch.gpr[rs], | 360 | kvmppc_get_gpr(vcpu, rs), |
352 | 4, 0); | 361 | 4, 0); |
353 | break; | 362 | break; |
354 | 363 | ||
@@ -363,7 +372,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
363 | rb = get_rb(inst); | 372 | rb = get_rb(inst); |
364 | 373 | ||
365 | emulated = kvmppc_handle_store(run, vcpu, | 374 | emulated = kvmppc_handle_store(run, vcpu, |
366 | vcpu->arch.gpr[rs], | 375 | kvmppc_get_gpr(vcpu, rs), |
367 | 2, 0); | 376 | 2, 0); |
368 | break; | 377 | break; |
369 | 378 | ||
@@ -382,7 +391,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
382 | ra = get_ra(inst); | 391 | ra = get_ra(inst); |
383 | rt = get_rt(inst); | 392 | rt = get_rt(inst); |
384 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 393 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
385 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 394 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
386 | break; | 395 | break; |
387 | 396 | ||
388 | case OP_LBZ: | 397 | case OP_LBZ: |
@@ -394,35 +403,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
394 | ra = get_ra(inst); | 403 | ra = get_ra(inst); |
395 | rt = get_rt(inst); | 404 | rt = get_rt(inst); |
396 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 405 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
397 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 406 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
398 | break; | 407 | break; |
399 | 408 | ||
400 | case OP_STW: | 409 | case OP_STW: |
401 | rs = get_rs(inst); | 410 | rs = get_rs(inst); |
402 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 411 | emulated = kvmppc_handle_store(run, vcpu, |
412 | kvmppc_get_gpr(vcpu, rs), | ||
403 | 4, 1); | 413 | 4, 1); |
404 | break; | 414 | break; |
405 | 415 | ||
406 | case OP_STWU: | 416 | case OP_STWU: |
407 | ra = get_ra(inst); | 417 | ra = get_ra(inst); |
408 | rs = get_rs(inst); | 418 | rs = get_rs(inst); |
409 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 419 | emulated = kvmppc_handle_store(run, vcpu, |
420 | kvmppc_get_gpr(vcpu, rs), | ||
410 | 4, 1); | 421 | 4, 1); |
411 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 422 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
412 | break; | 423 | break; |
413 | 424 | ||
414 | case OP_STB: | 425 | case OP_STB: |
415 | rs = get_rs(inst); | 426 | rs = get_rs(inst); |
416 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 427 | emulated = kvmppc_handle_store(run, vcpu, |
428 | kvmppc_get_gpr(vcpu, rs), | ||
417 | 1, 1); | 429 | 1, 1); |
418 | break; | 430 | break; |
419 | 431 | ||
420 | case OP_STBU: | 432 | case OP_STBU: |
421 | ra = get_ra(inst); | 433 | ra = get_ra(inst); |
422 | rs = get_rs(inst); | 434 | rs = get_rs(inst); |
423 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 435 | emulated = kvmppc_handle_store(run, vcpu, |
436 | kvmppc_get_gpr(vcpu, rs), | ||
424 | 1, 1); | 437 | 1, 1); |
425 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 438 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
426 | break; | 439 | break; |
427 | 440 | ||
428 | case OP_LHZ: | 441 | case OP_LHZ: |
@@ -434,21 +447,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
434 | ra = get_ra(inst); | 447 | ra = get_ra(inst); |
435 | rt = get_rt(inst); | 448 | rt = get_rt(inst); |
436 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 449 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
437 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 450 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
438 | break; | 451 | break; |
439 | 452 | ||
440 | case OP_STH: | 453 | case OP_STH: |
441 | rs = get_rs(inst); | 454 | rs = get_rs(inst); |
442 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 455 | emulated = kvmppc_handle_store(run, vcpu, |
456 | kvmppc_get_gpr(vcpu, rs), | ||
443 | 2, 1); | 457 | 2, 1); |
444 | break; | 458 | break; |
445 | 459 | ||
446 | case OP_STHU: | 460 | case OP_STHU: |
447 | ra = get_ra(inst); | 461 | ra = get_ra(inst); |
448 | rs = get_rs(inst); | 462 | rs = get_rs(inst); |
449 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 463 | emulated = kvmppc_handle_store(run, vcpu, |
464 | kvmppc_get_gpr(vcpu, rs), | ||
450 | 2, 1); | 465 | 2, 1); |
451 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 466 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
452 | break; | 467 | break; |
453 | 468 | ||
454 | default: | 469 | default: |
@@ -461,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
461 | advance = 0; | 476 | advance = 0; |
462 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " | 477 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " |
463 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); | 478 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); |
479 | kvmppc_core_queue_program(vcpu, 0); | ||
464 | } | 480 | } |
465 | } | 481 | } |
466 | 482 | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f06cf93b178e..51aedd7f16bc 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -137,6 +137,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
137 | { | 137 | { |
138 | kvmppc_free_vcpus(kvm); | 138 | kvmppc_free_vcpus(kvm); |
139 | kvm_free_physmem(kvm); | 139 | kvm_free_physmem(kvm); |
140 | cleanup_srcu_struct(&kvm->srcu); | ||
140 | kfree(kvm); | 141 | kfree(kvm); |
141 | } | 142 | } |
142 | 143 | ||
@@ -165,14 +166,24 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
165 | return -EINVAL; | 166 | return -EINVAL; |
166 | } | 167 | } |
167 | 168 | ||
168 | int kvm_arch_set_memory_region(struct kvm *kvm, | 169 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
169 | struct kvm_userspace_memory_region *mem, | 170 | struct kvm_memory_slot *memslot, |
170 | struct kvm_memory_slot old, | 171 | struct kvm_memory_slot old, |
171 | int user_alloc) | 172 | struct kvm_userspace_memory_region *mem, |
173 | int user_alloc) | ||
172 | { | 174 | { |
173 | return 0; | 175 | return 0; |
174 | } | 176 | } |
175 | 177 | ||
178 | void kvm_arch_commit_memory_region(struct kvm *kvm, | ||
179 | struct kvm_userspace_memory_region *mem, | ||
180 | struct kvm_memory_slot old, | ||
181 | int user_alloc) | ||
182 | { | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | |||
176 | void kvm_arch_flush_shadow(struct kvm *kvm) | 187 | void kvm_arch_flush_shadow(struct kvm *kvm) |
177 | { | 188 | { |
178 | } | 189 | } |
@@ -260,34 +271,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
260 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | 271 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, |
261 | struct kvm_run *run) | 272 | struct kvm_run *run) |
262 | { | 273 | { |
263 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 274 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); |
264 | *gpr = run->dcr.data; | ||
265 | } | 275 | } |
266 | 276 | ||
267 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | 277 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
268 | struct kvm_run *run) | 278 | struct kvm_run *run) |
269 | { | 279 | { |
270 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 280 | ulong gpr; |
271 | 281 | ||
272 | if (run->mmio.len > sizeof(*gpr)) { | 282 | if (run->mmio.len > sizeof(gpr)) { |
273 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | 283 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
274 | return; | 284 | return; |
275 | } | 285 | } |
276 | 286 | ||
277 | if (vcpu->arch.mmio_is_bigendian) { | 287 | if (vcpu->arch.mmio_is_bigendian) { |
278 | switch (run->mmio.len) { | 288 | switch (run->mmio.len) { |
279 | case 4: *gpr = *(u32 *)run->mmio.data; break; | 289 | case 4: gpr = *(u32 *)run->mmio.data; break; |
280 | case 2: *gpr = *(u16 *)run->mmio.data; break; | 290 | case 2: gpr = *(u16 *)run->mmio.data; break; |
281 | case 1: *gpr = *(u8 *)run->mmio.data; break; | 291 | case 1: gpr = *(u8 *)run->mmio.data; break; |
282 | } | 292 | } |
283 | } else { | 293 | } else { |
284 | /* Convert BE data from userland back to LE. */ | 294 | /* Convert BE data from userland back to LE. */ |
285 | switch (run->mmio.len) { | 295 | switch (run->mmio.len) { |
286 | case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; | 296 | case 4: gpr = ld_le32((u32 *)run->mmio.data); break; |
287 | case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; | 297 | case 2: gpr = ld_le16((u16 *)run->mmio.data); break; |
288 | case 1: *gpr = *(u8 *)run->mmio.data; break; | 298 | case 1: gpr = *(u8 *)run->mmio.data; break; |
289 | } | 299 | } |
290 | } | 300 | } |
301 | |||
302 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | ||
291 | } | 303 | } |
292 | 304 | ||
293 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | 305 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |