diff options
author | Avi Kivity <avi@redhat.com> | 2012-05-08 10:00:13 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-05-08 10:00:13 -0400 |
commit | f2569053e0b3ae092e2f35514f8d108647baa01f (patch) | |
tree | f850e7d06b446c6bc598d12d40b2582ef274abf7 /arch | |
parent | 9f4260e73ac43aaa91eb5de95950e1de7002f467 (diff) | |
parent | 54771e6217ce05a474827d9b23ff03de9d2ef2a0 (diff) |
Merge branch 'for-upstream' of git://github.com/agraf/linux-2.6 into next
PPC updates from Alex.
* 'for-upstream' of git://github.com/agraf/linux-2.6:
KVM: PPC: Emulator: clean up SPR reads and writes
KVM: PPC: Emulator: clean up instruction parsing
kvm/powerpc: Add new ioctl to retreive server MMU infos
kvm/book3s: Make kernel emulated H_PUT_TCE available for "PR" KVM
KVM: PPC: bookehv: Fix r8/r13 storing in level exception handler
KVM: PPC: Book3S: Enable IRQs during exit handling
KVM: PPC: Fix PR KVM on POWER7 bare metal
KVM: PPC: Fix stbux emulation
KVM: PPC: bookehv: Use lwz/stw instead of PPC_LL/PPC_STL for 32-bit fields
KVM: PPC: Book3S: PR: No isync in slbie path
KVM: PPC: Book3S: PR: Optimize entry path
KVM: PPC: booke(hv): Fix save/restore of guest accessible SPRGs.
KVM: PPC: Restrict PPC_[L|ST]D macro to asm code
KVM: PPC: bookehv: Use a Macro for saving/restoring guest registers to/from their 64 bit copies.
KVM: PPC: Use clockevent multiplier and shifter for decrementer
KVM: Use minimum and maximum address mapped by TLB1
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
25 files changed, 685 insertions, 462 deletions
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 097815233284..76fdcfef0889 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -20,6 +20,16 @@ | |||
20 | #ifndef __POWERPC_KVM_ASM_H__ | 20 | #ifndef __POWERPC_KVM_ASM_H__ |
21 | #define __POWERPC_KVM_ASM_H__ | 21 | #define __POWERPC_KVM_ASM_H__ |
22 | 22 | ||
23 | #ifdef __ASSEMBLY__ | ||
24 | #ifdef CONFIG_64BIT | ||
25 | #define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg) | ||
26 | #define PPC_LD(treg, offset, areg) ld treg, (offset)(areg) | ||
27 | #else | ||
28 | #define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg) | ||
29 | #define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg) | ||
30 | #endif | ||
31 | #endif | ||
32 | |||
23 | /* IVPR must be 64KiB-aligned. */ | 33 | /* IVPR must be 64KiB-aligned. */ |
24 | #define VCPU_SIZE_ORDER 4 | 34 | #define VCPU_SIZE_ORDER 4 |
25 | #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) | 35 | #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 42a527e70490..d848cdc49715 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -237,7 +237,6 @@ struct kvm_arch { | |||
237 | unsigned long vrma_slb_v; | 237 | unsigned long vrma_slb_v; |
238 | int rma_setup_done; | 238 | int rma_setup_done; |
239 | int using_mmu_notifiers; | 239 | int using_mmu_notifiers; |
240 | struct list_head spapr_tce_tables; | ||
241 | spinlock_t slot_phys_lock; | 240 | spinlock_t slot_phys_lock; |
242 | unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; | 241 | unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; |
243 | int slot_npages[KVM_MEM_SLOTS_NUM]; | 242 | int slot_npages[KVM_MEM_SLOTS_NUM]; |
@@ -245,6 +244,9 @@ struct kvm_arch { | |||
245 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; | 244 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; |
246 | struct kvmppc_linear_info *hpt_li; | 245 | struct kvmppc_linear_info *hpt_li; |
247 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 246 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
247 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
248 | struct list_head spapr_tce_tables; | ||
249 | #endif | ||
248 | }; | 250 | }; |
249 | 251 | ||
250 | /* | 252 | /* |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 7f0a3dae7cde..f68c22fa2fce 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | |||
107 | 107 | ||
108 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 108 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
109 | unsigned int op, int *advance); | 109 | unsigned int op, int *advance); |
110 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | 110 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, |
111 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); | 111 | ulong val); |
112 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, | ||
113 | ulong *val); | ||
112 | 114 | ||
113 | extern int kvmppc_booke_init(void); | 115 | extern int kvmppc_booke_init(void); |
114 | extern void kvmppc_booke_exit(void); | 116 | extern void kvmppc_booke_exit(void); |
@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, | |||
126 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); | 128 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); |
127 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | 129 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, |
128 | struct kvm_create_spapr_tce *args); | 130 | struct kvm_create_spapr_tce *args); |
131 | extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | ||
132 | unsigned long ioba, unsigned long tce); | ||
129 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, | 133 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, |
130 | struct kvm_allocate_rma *rma); | 134 | struct kvm_allocate_rma *rma); |
131 | extern struct kvmppc_linear_info *kvm_alloc_rma(void); | 135 | extern struct kvmppc_linear_info *kvm_alloc_rma(void); |
@@ -138,6 +142,8 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, | |||
138 | struct kvm_userspace_memory_region *mem); | 142 | struct kvm_userspace_memory_region *mem); |
139 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, | 143 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, |
140 | struct kvm_userspace_memory_region *mem); | 144 | struct kvm_userspace_memory_region *mem); |
145 | extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, | ||
146 | struct kvm_ppc_smmu_info *info); | ||
141 | 147 | ||
142 | extern int kvmppc_bookehv_init(void); | 148 | extern int kvmppc_bookehv_init(void); |
143 | extern void kvmppc_bookehv_exit(void); | 149 | extern void kvmppc_bookehv_exit(void); |
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 2136f58a54e8..3b4b4a8da922 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h | |||
@@ -23,6 +23,7 @@ | |||
23 | extern unsigned long tb_ticks_per_jiffy; | 23 | extern unsigned long tb_ticks_per_jiffy; |
24 | extern unsigned long tb_ticks_per_usec; | 24 | extern unsigned long tb_ticks_per_usec; |
25 | extern unsigned long tb_ticks_per_sec; | 25 | extern unsigned long tb_ticks_per_sec; |
26 | extern struct clock_event_device decrementer_clockevent; | ||
26 | 27 | ||
27 | struct rtc_time; | 28 | struct rtc_time; |
28 | extern void to_tm(int tim, struct rtc_time * tm); | 29 | extern void to_tm(int tim, struct rtc_time * tm); |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 786a2700ec2d..d1f2aafcbe8c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -190,3 +190,7 @@ EXPORT_SYMBOL(__arch_hweight16); | |||
190 | EXPORT_SYMBOL(__arch_hweight32); | 190 | EXPORT_SYMBOL(__arch_hweight32); |
191 | EXPORT_SYMBOL(__arch_hweight64); | 191 | EXPORT_SYMBOL(__arch_hweight64); |
192 | #endif | 192 | #endif |
193 | |||
194 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
195 | EXPORT_SYMBOL_GPL(mmu_psize_defs); | ||
196 | #endif | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 2c42cd72d0f5..99a995c2a3f2 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt, | |||
100 | static void decrementer_set_mode(enum clock_event_mode mode, | 100 | static void decrementer_set_mode(enum clock_event_mode mode, |
101 | struct clock_event_device *dev); | 101 | struct clock_event_device *dev); |
102 | 102 | ||
103 | static struct clock_event_device decrementer_clockevent = { | 103 | struct clock_event_device decrementer_clockevent = { |
104 | .name = "decrementer", | 104 | .name = "decrementer", |
105 | .rating = 200, | 105 | .rating = 200, |
106 | .irq = 0, | 106 | .irq = 0, |
@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = { | |||
108 | .set_mode = decrementer_set_mode, | 108 | .set_mode = decrementer_set_mode, |
109 | .features = CLOCK_EVT_FEAT_ONESHOT, | 109 | .features = CLOCK_EVT_FEAT_ONESHOT, |
110 | }; | 110 | }; |
111 | EXPORT_SYMBOL(decrementer_clockevent); | ||
111 | 112 | ||
112 | DEFINE_PER_CPU(u64, decrementers_next_tb); | 113 | DEFINE_PER_CPU(u64, decrementers_next_tb); |
113 | static DEFINE_PER_CPU(struct clock_event_device, decrementers); | 114 | static DEFINE_PER_CPU(struct clock_event_device, decrementers); |
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 549bb2c9a47a..c8c61578fdfc 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
37 | unsigned int inst, int *advance) | 37 | unsigned int inst, int *advance) |
38 | { | 38 | { |
39 | int emulated = EMULATE_DONE; | 39 | int emulated = EMULATE_DONE; |
40 | int dcrn; | 40 | int dcrn = get_dcrn(inst); |
41 | int ra; | 41 | int ra = get_ra(inst); |
42 | int rb; | 42 | int rb = get_rb(inst); |
43 | int rc; | 43 | int rc = get_rc(inst); |
44 | int rs; | 44 | int rs = get_rs(inst); |
45 | int rt; | 45 | int rt = get_rt(inst); |
46 | int ws; | 46 | int ws = get_ws(inst); |
47 | 47 | ||
48 | switch (get_op(inst)) { | 48 | switch (get_op(inst)) { |
49 | case 31: | 49 | case 31: |
50 | switch (get_xop(inst)) { | 50 | switch (get_xop(inst)) { |
51 | 51 | ||
52 | case XOP_MFDCR: | 52 | case XOP_MFDCR: |
53 | dcrn = get_dcrn(inst); | ||
54 | rt = get_rt(inst); | ||
55 | |||
56 | /* The guest may access CPR0 registers to determine the timebase | 53 | /* The guest may access CPR0 registers to determine the timebase |
57 | * frequency, and it must know the real host frequency because it | 54 | * frequency, and it must know the real host frequency because it |
58 | * can directly access the timebase registers. | 55 | * can directly access the timebase registers. |
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
88 | break; | 85 | break; |
89 | 86 | ||
90 | case XOP_MTDCR: | 87 | case XOP_MTDCR: |
91 | dcrn = get_dcrn(inst); | ||
92 | rs = get_rs(inst); | ||
93 | |||
94 | /* emulate some access in kernel */ | 88 | /* emulate some access in kernel */ |
95 | switch (dcrn) { | 89 | switch (dcrn) { |
96 | case DCRN_CPR0_CONFIG_ADDR: | 90 | case DCRN_CPR0_CONFIG_ADDR: |
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
108 | break; | 102 | break; |
109 | 103 | ||
110 | case XOP_TLBWE: | 104 | case XOP_TLBWE: |
111 | ra = get_ra(inst); | ||
112 | rs = get_rs(inst); | ||
113 | ws = get_ws(inst); | ||
114 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); | 105 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); |
115 | break; | 106 | break; |
116 | 107 | ||
117 | case XOP_TLBSX: | 108 | case XOP_TLBSX: |
118 | rt = get_rt(inst); | ||
119 | ra = get_ra(inst); | ||
120 | rb = get_rb(inst); | ||
121 | rc = get_rc(inst); | ||
122 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); | 109 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); |
123 | break; | 110 | break; |
124 | 111 | ||
@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
141 | return emulated; | 128 | return emulated; |
142 | } | 129 | } |
143 | 130 | ||
144 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 131 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
145 | { | 132 | { |
146 | int emulated = EMULATE_DONE; | 133 | int emulated = EMULATE_DONE; |
147 | 134 | ||
148 | switch (sprn) { | 135 | switch (sprn) { |
149 | case SPRN_PID: | 136 | case SPRN_PID: |
150 | kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; | 137 | kvmppc_set_pid(vcpu, spr_val); break; |
151 | case SPRN_MMUCR: | 138 | case SPRN_MMUCR: |
152 | vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; | 139 | vcpu->arch.mmucr = spr_val; break; |
153 | case SPRN_CCR0: | 140 | case SPRN_CCR0: |
154 | vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; | 141 | vcpu->arch.ccr0 = spr_val; break; |
155 | case SPRN_CCR1: | 142 | case SPRN_CCR1: |
156 | vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; | 143 | vcpu->arch.ccr1 = spr_val; break; |
157 | default: | 144 | default: |
158 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 145 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); |
159 | } | 146 | } |
160 | 147 | ||
161 | return emulated; | 148 | return emulated; |
162 | } | 149 | } |
163 | 150 | ||
164 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 151 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
165 | { | 152 | { |
166 | int emulated = EMULATE_DONE; | 153 | int emulated = EMULATE_DONE; |
167 | 154 | ||
168 | switch (sprn) { | 155 | switch (sprn) { |
169 | case SPRN_PID: | 156 | case SPRN_PID: |
170 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; | 157 | *spr_val = vcpu->arch.pid; break; |
171 | case SPRN_MMUCR: | 158 | case SPRN_MMUCR: |
172 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; | 159 | *spr_val = vcpu->arch.mmucr; break; |
173 | case SPRN_CCR0: | 160 | case SPRN_CCR0: |
174 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; | 161 | *spr_val = vcpu->arch.ccr0; break; |
175 | case SPRN_CCR1: | 162 | case SPRN_CCR1: |
176 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; | 163 | *spr_val = vcpu->arch.ccr1; break; |
177 | default: | 164 | default: |
178 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 165 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); |
179 | } | 166 | } |
180 | 167 | ||
181 | return emulated; | 168 | return emulated; |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 25225aea4c39..c2a08636e6d4 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -54,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ | |||
54 | book3s_paired_singles.o \ | 54 | book3s_paired_singles.o \ |
55 | book3s_pr.o \ | 55 | book3s_pr.o \ |
56 | book3s_pr_papr.o \ | 56 | book3s_pr_papr.o \ |
57 | book3s_64_vio_hv.o \ | ||
57 | book3s_emulate.o \ | 58 | book3s_emulate.o \ |
58 | book3s_interrupts.o \ | 59 | book3s_interrupts.o \ |
59 | book3s_mmu_hpte.o \ | 60 | book3s_mmu_hpte.o \ |
@@ -78,6 +79,7 @@ kvm-book3s_64-module-objs := \ | |||
78 | powerpc.o \ | 79 | powerpc.o \ |
79 | emulate.o \ | 80 | emulate.o \ |
80 | book3s.o \ | 81 | book3s.o \ |
82 | book3s_64_vio.o \ | ||
81 | $(kvm-book3s_64-objs-y) | 83 | $(kvm-book3s_64-objs-y) |
82 | 84 | ||
83 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) | 85 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) |
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index f2e6e48ea463..56b983e7b738 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -90,8 +90,6 @@ slb_exit_skip_ ## num: | |||
90 | or r10, r10, r12 | 90 | or r10, r10, r12 |
91 | slbie r10 | 91 | slbie r10 |
92 | 92 | ||
93 | isync | ||
94 | |||
95 | /* Fill SLB with our shadow */ | 93 | /* Fill SLB with our shadow */ |
96 | 94 | ||
97 | lbz r12, SVCPU_SLB_MAX(r3) | 95 | lbz r12, SVCPU_SLB_MAX(r3) |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c new file mode 100644 index 000000000000..72ffc899c082 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
16 | * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/gfp.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/hugetlb.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/anon_inodes.h> | ||
29 | |||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/kvm_ppc.h> | ||
32 | #include <asm/kvm_book3s.h> | ||
33 | #include <asm/mmu-hash64.h> | ||
34 | #include <asm/hvcall.h> | ||
35 | #include <asm/synch.h> | ||
36 | #include <asm/ppc-opcode.h> | ||
37 | #include <asm/kvm_host.h> | ||
38 | #include <asm/udbg.h> | ||
39 | |||
40 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) | ||
41 | |||
42 | static long kvmppc_stt_npages(unsigned long window_size) | ||
43 | { | ||
44 | return ALIGN((window_size >> SPAPR_TCE_SHIFT) | ||
45 | * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; | ||
46 | } | ||
47 | |||
48 | static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) | ||
49 | { | ||
50 | struct kvm *kvm = stt->kvm; | ||
51 | int i; | ||
52 | |||
53 | mutex_lock(&kvm->lock); | ||
54 | list_del(&stt->list); | ||
55 | for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) | ||
56 | __free_page(stt->pages[i]); | ||
57 | kfree(stt); | ||
58 | mutex_unlock(&kvm->lock); | ||
59 | |||
60 | kvm_put_kvm(kvm); | ||
61 | } | ||
62 | |||
63 | static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
64 | { | ||
65 | struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; | ||
66 | struct page *page; | ||
67 | |||
68 | if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) | ||
69 | return VM_FAULT_SIGBUS; | ||
70 | |||
71 | page = stt->pages[vmf->pgoff]; | ||
72 | get_page(page); | ||
73 | vmf->page = page; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { | ||
78 | .fault = kvm_spapr_tce_fault, | ||
79 | }; | ||
80 | |||
81 | static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) | ||
82 | { | ||
83 | vma->vm_ops = &kvm_spapr_tce_vm_ops; | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | ||
88 | { | ||
89 | struct kvmppc_spapr_tce_table *stt = filp->private_data; | ||
90 | |||
91 | release_spapr_tce_table(stt); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static struct file_operations kvm_spapr_tce_fops = { | ||
96 | .mmap = kvm_spapr_tce_mmap, | ||
97 | .release = kvm_spapr_tce_release, | ||
98 | }; | ||
99 | |||
100 | long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | ||
101 | struct kvm_create_spapr_tce *args) | ||
102 | { | ||
103 | struct kvmppc_spapr_tce_table *stt = NULL; | ||
104 | long npages; | ||
105 | int ret = -ENOMEM; | ||
106 | int i; | ||
107 | |||
108 | /* Check this LIOBN hasn't been previously allocated */ | ||
109 | list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { | ||
110 | if (stt->liobn == args->liobn) | ||
111 | return -EBUSY; | ||
112 | } | ||
113 | |||
114 | npages = kvmppc_stt_npages(args->window_size); | ||
115 | |||
116 | stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), | ||
117 | GFP_KERNEL); | ||
118 | if (!stt) | ||
119 | goto fail; | ||
120 | |||
121 | stt->liobn = args->liobn; | ||
122 | stt->window_size = args->window_size; | ||
123 | stt->kvm = kvm; | ||
124 | |||
125 | for (i = 0; i < npages; i++) { | ||
126 | stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
127 | if (!stt->pages[i]) | ||
128 | goto fail; | ||
129 | } | ||
130 | |||
131 | kvm_get_kvm(kvm); | ||
132 | |||
133 | mutex_lock(&kvm->lock); | ||
134 | list_add(&stt->list, &kvm->arch.spapr_tce_tables); | ||
135 | |||
136 | mutex_unlock(&kvm->lock); | ||
137 | |||
138 | return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, | ||
139 | stt, O_RDWR); | ||
140 | |||
141 | fail: | ||
142 | if (stt) { | ||
143 | for (i = 0; i < npages; i++) | ||
144 | if (stt->pages[i]) | ||
145 | __free_page(stt->pages[i]); | ||
146 | |||
147 | kfree(stt); | ||
148 | } | ||
149 | return ret; | ||
150 | } | ||
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index ea0f8c537c28..30c2f3b134c6 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
@@ -38,6 +38,9 @@ | |||
38 | 38 | ||
39 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) | 39 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) |
40 | 40 | ||
41 | /* WARNING: This will be called in real-mode on HV KVM and virtual | ||
42 | * mode on PR KVM | ||
43 | */ | ||
41 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 44 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
42 | unsigned long ioba, unsigned long tce) | 45 | unsigned long ioba, unsigned long tce) |
43 | { | 46 | { |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 135663a3e4fc..b9a989dc76cc 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
87 | unsigned int inst, int *advance) | 87 | unsigned int inst, int *advance) |
88 | { | 88 | { |
89 | int emulated = EMULATE_DONE; | 89 | int emulated = EMULATE_DONE; |
90 | int rt = get_rt(inst); | ||
91 | int rs = get_rs(inst); | ||
92 | int ra = get_ra(inst); | ||
93 | int rb = get_rb(inst); | ||
90 | 94 | ||
91 | switch (get_op(inst)) { | 95 | switch (get_op(inst)) { |
92 | case 19: | 96 | case 19: |
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
106 | case 31: | 110 | case 31: |
107 | switch (get_xop(inst)) { | 111 | switch (get_xop(inst)) { |
108 | case OP_31_XOP_MFMSR: | 112 | case OP_31_XOP_MFMSR: |
109 | kvmppc_set_gpr(vcpu, get_rt(inst), | 113 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
110 | vcpu->arch.shared->msr); | ||
111 | break; | 114 | break; |
112 | case OP_31_XOP_MTMSRD: | 115 | case OP_31_XOP_MTMSRD: |
113 | { | 116 | { |
114 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); | 117 | ulong rs_val = kvmppc_get_gpr(vcpu, rs); |
115 | if (inst & 0x10000) { | 118 | if (inst & 0x10000) { |
116 | vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); | 119 | ulong new_msr = vcpu->arch.shared->msr; |
117 | vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); | 120 | new_msr &= ~(MSR_RI | MSR_EE); |
121 | new_msr |= rs_val & (MSR_RI | MSR_EE); | ||
122 | vcpu->arch.shared->msr = new_msr; | ||
118 | } else | 123 | } else |
119 | kvmppc_set_msr(vcpu, rs); | 124 | kvmppc_set_msr(vcpu, rs_val); |
120 | break; | 125 | break; |
121 | } | 126 | } |
122 | case OP_31_XOP_MTMSR: | 127 | case OP_31_XOP_MTMSR: |
123 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); | 128 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
124 | break; | 129 | break; |
125 | case OP_31_XOP_MFSR: | 130 | case OP_31_XOP_MFSR: |
126 | { | 131 | { |
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
130 | if (vcpu->arch.mmu.mfsrin) { | 135 | if (vcpu->arch.mmu.mfsrin) { |
131 | u32 sr; | 136 | u32 sr; |
132 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 137 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
133 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 138 | kvmppc_set_gpr(vcpu, rt, sr); |
134 | } | 139 | } |
135 | break; | 140 | break; |
136 | } | 141 | } |
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
138 | { | 143 | { |
139 | int srnum; | 144 | int srnum; |
140 | 145 | ||
141 | srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; | 146 | srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; |
142 | if (vcpu->arch.mmu.mfsrin) { | 147 | if (vcpu->arch.mmu.mfsrin) { |
143 | u32 sr; | 148 | u32 sr; |
144 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 149 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
145 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 150 | kvmppc_set_gpr(vcpu, rt, sr); |
146 | } | 151 | } |
147 | break; | 152 | break; |
148 | } | 153 | } |
149 | case OP_31_XOP_MTSR: | 154 | case OP_31_XOP_MTSR: |
150 | vcpu->arch.mmu.mtsrin(vcpu, | 155 | vcpu->arch.mmu.mtsrin(vcpu, |
151 | (inst >> 16) & 0xf, | 156 | (inst >> 16) & 0xf, |
152 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 157 | kvmppc_get_gpr(vcpu, rs)); |
153 | break; | 158 | break; |
154 | case OP_31_XOP_MTSRIN: | 159 | case OP_31_XOP_MTSRIN: |
155 | vcpu->arch.mmu.mtsrin(vcpu, | 160 | vcpu->arch.mmu.mtsrin(vcpu, |
156 | (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, | 161 | (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, |
157 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 162 | kvmppc_get_gpr(vcpu, rs)); |
158 | break; | 163 | break; |
159 | case OP_31_XOP_TLBIE: | 164 | case OP_31_XOP_TLBIE: |
160 | case OP_31_XOP_TLBIEL: | 165 | case OP_31_XOP_TLBIEL: |
161 | { | 166 | { |
162 | bool large = (inst & 0x00200000) ? true : false; | 167 | bool large = (inst & 0x00200000) ? true : false; |
163 | ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); | 168 | ulong addr = kvmppc_get_gpr(vcpu, rb); |
164 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | 169 | vcpu->arch.mmu.tlbie(vcpu, addr, large); |
165 | break; | 170 | break; |
166 | } | 171 | } |
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
171 | return EMULATE_FAIL; | 176 | return EMULATE_FAIL; |
172 | 177 | ||
173 | vcpu->arch.mmu.slbmte(vcpu, | 178 | vcpu->arch.mmu.slbmte(vcpu, |
174 | kvmppc_get_gpr(vcpu, get_rs(inst)), | 179 | kvmppc_get_gpr(vcpu, rs), |
175 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 180 | kvmppc_get_gpr(vcpu, rb)); |
176 | break; | 181 | break; |
177 | case OP_31_XOP_SLBIE: | 182 | case OP_31_XOP_SLBIE: |
178 | if (!vcpu->arch.mmu.slbie) | 183 | if (!vcpu->arch.mmu.slbie) |
179 | return EMULATE_FAIL; | 184 | return EMULATE_FAIL; |
180 | 185 | ||
181 | vcpu->arch.mmu.slbie(vcpu, | 186 | vcpu->arch.mmu.slbie(vcpu, |
182 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 187 | kvmppc_get_gpr(vcpu, rb)); |
183 | break; | 188 | break; |
184 | case OP_31_XOP_SLBIA: | 189 | case OP_31_XOP_SLBIA: |
185 | if (!vcpu->arch.mmu.slbia) | 190 | if (!vcpu->arch.mmu.slbia) |
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
191 | if (!vcpu->arch.mmu.slbmfee) { | 196 | if (!vcpu->arch.mmu.slbmfee) { |
192 | emulated = EMULATE_FAIL; | 197 | emulated = EMULATE_FAIL; |
193 | } else { | 198 | } else { |
194 | ulong t, rb; | 199 | ulong t, rb_val; |
195 | 200 | ||
196 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 201 | rb_val = kvmppc_get_gpr(vcpu, rb); |
197 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); | 202 | t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); |
198 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 203 | kvmppc_set_gpr(vcpu, rt, t); |
199 | } | 204 | } |
200 | break; | 205 | break; |
201 | case OP_31_XOP_SLBMFEV: | 206 | case OP_31_XOP_SLBMFEV: |
202 | if (!vcpu->arch.mmu.slbmfev) { | 207 | if (!vcpu->arch.mmu.slbmfev) { |
203 | emulated = EMULATE_FAIL; | 208 | emulated = EMULATE_FAIL; |
204 | } else { | 209 | } else { |
205 | ulong t, rb; | 210 | ulong t, rb_val; |
206 | 211 | ||
207 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 212 | rb_val = kvmppc_get_gpr(vcpu, rb); |
208 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); | 213 | t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); |
209 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 214 | kvmppc_set_gpr(vcpu, rt, t); |
210 | } | 215 | } |
211 | break; | 216 | break; |
212 | case OP_31_XOP_DCBA: | 217 | case OP_31_XOP_DCBA: |
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
214 | break; | 219 | break; |
215 | case OP_31_XOP_DCBZ: | 220 | case OP_31_XOP_DCBZ: |
216 | { | 221 | { |
217 | ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 222 | ulong rb_val = kvmppc_get_gpr(vcpu, rb); |
218 | ulong ra = 0; | 223 | ulong ra_val = 0; |
219 | ulong addr, vaddr; | 224 | ulong addr, vaddr; |
220 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; | 225 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; |
221 | u32 dsisr; | 226 | u32 dsisr; |
222 | int r; | 227 | int r; |
223 | 228 | ||
224 | if (get_ra(inst)) | 229 | if (ra) |
225 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); | 230 | ra_val = kvmppc_get_gpr(vcpu, ra); |
226 | 231 | ||
227 | addr = (ra + rb) & ~31ULL; | 232 | addr = (ra_val + rb_val) & ~31ULL; |
228 | if (!(vcpu->arch.shared->msr & MSR_SF)) | 233 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
229 | addr &= 0xffffffff; | 234 | addr &= 0xffffffff; |
230 | vaddr = addr; | 235 | vaddr = addr; |
@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) | |||
313 | return bat; | 318 | return bat; |
314 | } | 319 | } |
315 | 320 | ||
316 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 321 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
317 | { | 322 | { |
318 | int emulated = EMULATE_DONE; | 323 | int emulated = EMULATE_DONE; |
319 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
320 | 324 | ||
321 | switch (sprn) { | 325 | switch (sprn) { |
322 | case SPRN_SDR1: | 326 | case SPRN_SDR1: |
@@ -428,7 +432,7 @@ unprivileged: | |||
428 | return emulated; | 432 | return emulated; |
429 | } | 433 | } |
430 | 434 | ||
431 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 435 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
432 | { | 436 | { |
433 | int emulated = EMULATE_DONE; | 437 | int emulated = EMULATE_DONE; |
434 | 438 | ||
@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
441 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); | 445 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); |
442 | 446 | ||
443 | if (sprn % 2) | 447 | if (sprn % 2) |
444 | kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); | 448 | *spr_val = bat->raw >> 32; |
445 | else | 449 | else |
446 | kvmppc_set_gpr(vcpu, rt, bat->raw); | 450 | *spr_val = bat->raw; |
447 | 451 | ||
448 | break; | 452 | break; |
449 | } | 453 | } |
450 | case SPRN_SDR1: | 454 | case SPRN_SDR1: |
451 | if (!spr_allowed(vcpu, PRIV_HYPER)) | 455 | if (!spr_allowed(vcpu, PRIV_HYPER)) |
452 | goto unprivileged; | 456 | goto unprivileged; |
453 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); | 457 | *spr_val = to_book3s(vcpu)->sdr1; |
454 | break; | 458 | break; |
455 | case SPRN_DSISR: | 459 | case SPRN_DSISR: |
456 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); | 460 | *spr_val = vcpu->arch.shared->dsisr; |
457 | break; | 461 | break; |
458 | case SPRN_DAR: | 462 | case SPRN_DAR: |
459 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); | 463 | *spr_val = vcpu->arch.shared->dar; |
460 | break; | 464 | break; |
461 | case SPRN_HIOR: | 465 | case SPRN_HIOR: |
462 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); | 466 | *spr_val = to_book3s(vcpu)->hior; |
463 | break; | 467 | break; |
464 | case SPRN_HID0: | 468 | case SPRN_HID0: |
465 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); | 469 | *spr_val = to_book3s(vcpu)->hid[0]; |
466 | break; | 470 | break; |
467 | case SPRN_HID1: | 471 | case SPRN_HID1: |
468 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); | 472 | *spr_val = to_book3s(vcpu)->hid[1]; |
469 | break; | 473 | break; |
470 | case SPRN_HID2: | 474 | case SPRN_HID2: |
471 | case SPRN_HID2_GEKKO: | 475 | case SPRN_HID2_GEKKO: |
472 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); | 476 | *spr_val = to_book3s(vcpu)->hid[2]; |
473 | break; | 477 | break; |
474 | case SPRN_HID4: | 478 | case SPRN_HID4: |
475 | case SPRN_HID4_GEKKO: | 479 | case SPRN_HID4_GEKKO: |
476 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); | 480 | *spr_val = to_book3s(vcpu)->hid[4]; |
477 | break; | 481 | break; |
478 | case SPRN_HID5: | 482 | case SPRN_HID5: |
479 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); | 483 | *spr_val = to_book3s(vcpu)->hid[5]; |
480 | break; | 484 | break; |
481 | case SPRN_CFAR: | 485 | case SPRN_CFAR: |
482 | case SPRN_PURR: | 486 | case SPRN_PURR: |
483 | kvmppc_set_gpr(vcpu, rt, 0); | 487 | *spr_val = 0; |
484 | break; | 488 | break; |
485 | case SPRN_GQR0: | 489 | case SPRN_GQR0: |
486 | case SPRN_GQR1: | 490 | case SPRN_GQR1: |
@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
490 | case SPRN_GQR5: | 494 | case SPRN_GQR5: |
491 | case SPRN_GQR6: | 495 | case SPRN_GQR6: |
492 | case SPRN_GQR7: | 496 | case SPRN_GQR7: |
493 | kvmppc_set_gpr(vcpu, rt, | 497 | *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; |
494 | to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]); | ||
495 | break; | 498 | break; |
496 | case SPRN_THRM1: | 499 | case SPRN_THRM1: |
497 | case SPRN_THRM2: | 500 | case SPRN_THRM2: |
@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
506 | case SPRN_PMC3_GEKKO: | 509 | case SPRN_PMC3_GEKKO: |
507 | case SPRN_PMC4_GEKKO: | 510 | case SPRN_PMC4_GEKKO: |
508 | case SPRN_WPAR_GEKKO: | 511 | case SPRN_WPAR_GEKKO: |
509 | kvmppc_set_gpr(vcpu, rt, 0); | 512 | *spr_val = 0; |
510 | break; | 513 | break; |
511 | default: | 514 | default: |
512 | unprivileged: | 515 | unprivileged: |
@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) | |||
565 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) | 568 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) |
566 | { | 569 | { |
567 | ulong dar = 0; | 570 | ulong dar = 0; |
568 | ulong ra; | 571 | ulong ra = get_ra(inst); |
572 | ulong rb = get_rb(inst); | ||
569 | 573 | ||
570 | switch (get_op(inst)) { | 574 | switch (get_op(inst)) { |
571 | case OP_LFS: | 575 | case OP_LFS: |
572 | case OP_LFD: | 576 | case OP_LFD: |
573 | case OP_STFD: | 577 | case OP_STFD: |
574 | case OP_STFS: | 578 | case OP_STFS: |
575 | ra = get_ra(inst); | ||
576 | if (ra) | 579 | if (ra) |
577 | dar = kvmppc_get_gpr(vcpu, ra); | 580 | dar = kvmppc_get_gpr(vcpu, ra); |
578 | dar += (s32)((s16)inst); | 581 | dar += (s32)((s16)inst); |
579 | break; | 582 | break; |
580 | case 31: | 583 | case 31: |
581 | ra = get_ra(inst); | ||
582 | if (ra) | 584 | if (ra) |
583 | dar = kvmppc_get_gpr(vcpu, ra); | 585 | dar = kvmppc_get_gpr(vcpu, ra); |
584 | dar += kvmppc_get_gpr(vcpu, get_rb(inst)); | 586 | dar += kvmppc_get_gpr(vcpu, rb); |
585 | break; | 587 | break; |
586 | default: | 588 | default: |
587 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); | 589 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 907935764de0..db36598a90d7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -1093,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1093 | return r; | 1093 | return r; |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | static long kvmppc_stt_npages(unsigned long window_size) | ||
1097 | { | ||
1098 | return ALIGN((window_size >> SPAPR_TCE_SHIFT) | ||
1099 | * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; | ||
1100 | } | ||
1101 | |||
1102 | static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) | ||
1103 | { | ||
1104 | struct kvm *kvm = stt->kvm; | ||
1105 | int i; | ||
1106 | |||
1107 | mutex_lock(&kvm->lock); | ||
1108 | list_del(&stt->list); | ||
1109 | for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) | ||
1110 | __free_page(stt->pages[i]); | ||
1111 | kfree(stt); | ||
1112 | mutex_unlock(&kvm->lock); | ||
1113 | |||
1114 | kvm_put_kvm(kvm); | ||
1115 | } | ||
1116 | |||
1117 | static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
1118 | { | ||
1119 | struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; | ||
1120 | struct page *page; | ||
1121 | |||
1122 | if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) | ||
1123 | return VM_FAULT_SIGBUS; | ||
1124 | |||
1125 | page = stt->pages[vmf->pgoff]; | ||
1126 | get_page(page); | ||
1127 | vmf->page = page; | ||
1128 | return 0; | ||
1129 | } | ||
1130 | |||
1131 | static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { | ||
1132 | .fault = kvm_spapr_tce_fault, | ||
1133 | }; | ||
1134 | |||
1135 | static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) | ||
1136 | { | ||
1137 | vma->vm_ops = &kvm_spapr_tce_vm_ops; | ||
1138 | return 0; | ||
1139 | } | ||
1140 | |||
1141 | static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | ||
1142 | { | ||
1143 | struct kvmppc_spapr_tce_table *stt = filp->private_data; | ||
1144 | |||
1145 | release_spapr_tce_table(stt); | ||
1146 | return 0; | ||
1147 | } | ||
1148 | |||
1149 | static struct file_operations kvm_spapr_tce_fops = { | ||
1150 | .mmap = kvm_spapr_tce_mmap, | ||
1151 | .release = kvm_spapr_tce_release, | ||
1152 | }; | ||
1153 | |||
1154 | long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | ||
1155 | struct kvm_create_spapr_tce *args) | ||
1156 | { | ||
1157 | struct kvmppc_spapr_tce_table *stt = NULL; | ||
1158 | long npages; | ||
1159 | int ret = -ENOMEM; | ||
1160 | int i; | ||
1161 | |||
1162 | /* Check this LIOBN hasn't been previously allocated */ | ||
1163 | list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { | ||
1164 | if (stt->liobn == args->liobn) | ||
1165 | return -EBUSY; | ||
1166 | } | ||
1167 | |||
1168 | npages = kvmppc_stt_npages(args->window_size); | ||
1169 | |||
1170 | stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *), | ||
1171 | GFP_KERNEL); | ||
1172 | if (!stt) | ||
1173 | goto fail; | ||
1174 | |||
1175 | stt->liobn = args->liobn; | ||
1176 | stt->window_size = args->window_size; | ||
1177 | stt->kvm = kvm; | ||
1178 | |||
1179 | for (i = 0; i < npages; i++) { | ||
1180 | stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
1181 | if (!stt->pages[i]) | ||
1182 | goto fail; | ||
1183 | } | ||
1184 | |||
1185 | kvm_get_kvm(kvm); | ||
1186 | |||
1187 | mutex_lock(&kvm->lock); | ||
1188 | list_add(&stt->list, &kvm->arch.spapr_tce_tables); | ||
1189 | |||
1190 | mutex_unlock(&kvm->lock); | ||
1191 | |||
1192 | return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, | ||
1193 | stt, O_RDWR); | ||
1194 | |||
1195 | fail: | ||
1196 | if (stt) { | ||
1197 | for (i = 0; i < npages; i++) | ||
1198 | if (stt->pages[i]) | ||
1199 | __free_page(stt->pages[i]); | ||
1200 | |||
1201 | kfree(stt); | ||
1202 | } | ||
1203 | return ret; | ||
1204 | } | ||
1205 | 1096 | ||
1206 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. | 1097 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
1207 | Assumes POWER7 or PPC970. */ | 1098 | Assumes POWER7 or PPC970. */ |
@@ -1284,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) | |||
1284 | return fd; | 1175 | return fd; |
1285 | } | 1176 | } |
1286 | 1177 | ||
1178 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, | ||
1179 | int linux_psize) | ||
1180 | { | ||
1181 | struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; | ||
1182 | |||
1183 | if (!def->shift) | ||
1184 | return; | ||
1185 | (*sps)->page_shift = def->shift; | ||
1186 | (*sps)->slb_enc = def->sllp; | ||
1187 | (*sps)->enc[0].page_shift = def->shift; | ||
1188 | (*sps)->enc[0].pte_enc = def->penc; | ||
1189 | (*sps)++; | ||
1190 | } | ||
1191 | |||
1192 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | ||
1193 | { | ||
1194 | struct kvm_ppc_one_seg_page_size *sps; | ||
1195 | |||
1196 | info->flags = KVM_PPC_PAGE_SIZES_REAL; | ||
1197 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | ||
1198 | info->flags |= KVM_PPC_1T_SEGMENTS; | ||
1199 | info->slb_size = mmu_slb_size; | ||
1200 | |||
1201 | /* We only support these sizes for now, and no muti-size segments */ | ||
1202 | sps = &info->sps[0]; | ||
1203 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); | ||
1204 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); | ||
1205 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); | ||
1206 | |||
1207 | return 0; | ||
1208 | } | ||
1209 | |||
1287 | /* | 1210 | /* |
1288 | * Get (and clear) the dirty memory log for a memory slot. | 1211 | * Get (and clear) the dirty memory log for a memory slot. |
1289 | */ | 1212 | */ |
@@ -1582,12 +1505,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1582 | return EMULATE_FAIL; | 1505 | return EMULATE_FAIL; |
1583 | } | 1506 | } |
1584 | 1507 | ||
1585 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 1508 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
1586 | { | 1509 | { |
1587 | return EMULATE_FAIL; | 1510 | return EMULATE_FAIL; |
1588 | } | 1511 | } |
1589 | 1512 | ||
1590 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 1513 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
1591 | { | 1514 | { |
1592 | return EMULATE_FAIL; | 1515 | return EMULATE_FAIL; |
1593 | } | 1516 | } |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index dba282e5093f..a1baec340f7e 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -548,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
548 | run->exit_reason = KVM_EXIT_UNKNOWN; | 548 | run->exit_reason = KVM_EXIT_UNKNOWN; |
549 | run->ready_for_interrupt_injection = 1; | 549 | run->ready_for_interrupt_injection = 1; |
550 | 550 | ||
551 | /* We get here with MSR.EE=0, so enable it to be a nice citizen */ | ||
552 | __hard_irq_enable(); | ||
553 | |||
551 | trace_kvm_book3s_exit(exit_nr, vcpu); | 554 | trace_kvm_book3s_exit(exit_nr, vcpu); |
552 | preempt_enable(); | 555 | preempt_enable(); |
553 | kvm_resched(vcpu); | 556 | kvm_resched(vcpu); |
@@ -1155,6 +1158,31 @@ out: | |||
1155 | return r; | 1158 | return r; |
1156 | } | 1159 | } |
1157 | 1160 | ||
1161 | #ifdef CONFIG_PPC64 | ||
1162 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | ||
1163 | { | ||
1164 | /* No flags */ | ||
1165 | info->flags = 0; | ||
1166 | |||
1167 | /* SLB is always 64 entries */ | ||
1168 | info->slb_size = 64; | ||
1169 | |||
1170 | /* Standard 4k base page size segment */ | ||
1171 | info->sps[0].page_shift = 12; | ||
1172 | info->sps[0].slb_enc = 0; | ||
1173 | info->sps[0].enc[0].page_shift = 12; | ||
1174 | info->sps[0].enc[0].pte_enc = 0; | ||
1175 | |||
1176 | /* Standard 16M large page size segment */ | ||
1177 | info->sps[1].page_shift = 24; | ||
1178 | info->sps[1].slb_enc = SLB_VSID_L; | ||
1179 | info->sps[1].enc[0].page_shift = 24; | ||
1180 | info->sps[1].enc[0].pte_enc = 0; | ||
1181 | |||
1182 | return 0; | ||
1183 | } | ||
1184 | #endif /* CONFIG_PPC64 */ | ||
1185 | |||
1158 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1186 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1159 | struct kvm_userspace_memory_region *mem) | 1187 | struct kvm_userspace_memory_region *mem) |
1160 | { | 1188 | { |
@@ -1168,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, | |||
1168 | 1196 | ||
1169 | int kvmppc_core_init_vm(struct kvm *kvm) | 1197 | int kvmppc_core_init_vm(struct kvm *kvm) |
1170 | { | 1198 | { |
1199 | #ifdef CONFIG_PPC64 | ||
1200 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | ||
1201 | #endif | ||
1202 | |||
1171 | return 0; | 1203 | return 0; |
1172 | } | 1204 | } |
1173 | 1205 | ||
1174 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 1206 | void kvmppc_core_destroy_vm(struct kvm *kvm) |
1175 | { | 1207 | { |
1208 | #ifdef CONFIG_PPC64 | ||
1209 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | ||
1210 | #endif | ||
1176 | } | 1211 | } |
1177 | 1212 | ||
1178 | static int kvmppc_book3s_init(void) | 1213 | static int kvmppc_book3s_init(void) |
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index 60ac0e793476..3ff9013d6e79 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * published by the Free Software Foundation. | 15 | * published by the Free Software Foundation. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/anon_inodes.h> | ||
19 | |||
18 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
19 | #include <asm/kvm_ppc.h> | 21 | #include <asm/kvm_ppc.h> |
20 | #include <asm/kvm_book3s.h> | 22 | #include <asm/kvm_book3s.h> |
@@ -211,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
211 | return EMULATE_DONE; | 213 | return EMULATE_DONE; |
212 | } | 214 | } |
213 | 215 | ||
216 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | ||
217 | { | ||
218 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); | ||
219 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); | ||
220 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); | ||
221 | long rc; | ||
222 | |||
223 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); | ||
224 | if (rc == H_TOO_HARD) | ||
225 | return EMULATE_FAIL; | ||
226 | kvmppc_set_gpr(vcpu, 3, rc); | ||
227 | return EMULATE_DONE; | ||
228 | } | ||
229 | |||
214 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | 230 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) |
215 | { | 231 | { |
216 | switch (cmd) { | 232 | switch (cmd) { |
@@ -222,6 +238,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | |||
222 | return kvmppc_h_pr_protect(vcpu); | 238 | return kvmppc_h_pr_protect(vcpu); |
223 | case H_BULK_REMOVE: | 239 | case H_BULK_REMOVE: |
224 | return kvmppc_h_pr_bulk_remove(vcpu); | 240 | return kvmppc_h_pr_bulk_remove(vcpu); |
241 | case H_PUT_TCE: | ||
242 | return kvmppc_h_pr_put_tce(vcpu); | ||
225 | case H_CEDE: | 243 | case H_CEDE: |
226 | kvm_vcpu_block(vcpu); | 244 | kvm_vcpu_block(vcpu); |
227 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 245 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 0676ae249b9f..8b2fc66a3066 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -128,24 +128,25 @@ no_dcbz32_on: | |||
128 | /* First clear RI in our current MSR value */ | 128 | /* First clear RI in our current MSR value */ |
129 | li r0, MSR_RI | 129 | li r0, MSR_RI |
130 | andc r6, r6, r0 | 130 | andc r6, r6, r0 |
131 | MTMSR_EERI(r6) | ||
132 | mtsrr0 r9 | ||
133 | mtsrr1 r4 | ||
134 | 131 | ||
135 | PPC_LL r0, SVCPU_R0(r3) | 132 | PPC_LL r0, SVCPU_R0(r3) |
136 | PPC_LL r1, SVCPU_R1(r3) | 133 | PPC_LL r1, SVCPU_R1(r3) |
137 | PPC_LL r2, SVCPU_R2(r3) | 134 | PPC_LL r2, SVCPU_R2(r3) |
138 | PPC_LL r4, SVCPU_R4(r3) | ||
139 | PPC_LL r5, SVCPU_R5(r3) | 135 | PPC_LL r5, SVCPU_R5(r3) |
140 | PPC_LL r6, SVCPU_R6(r3) | ||
141 | PPC_LL r7, SVCPU_R7(r3) | 136 | PPC_LL r7, SVCPU_R7(r3) |
142 | PPC_LL r8, SVCPU_R8(r3) | 137 | PPC_LL r8, SVCPU_R8(r3) |
143 | PPC_LL r9, SVCPU_R9(r3) | ||
144 | PPC_LL r10, SVCPU_R10(r3) | 138 | PPC_LL r10, SVCPU_R10(r3) |
145 | PPC_LL r11, SVCPU_R11(r3) | 139 | PPC_LL r11, SVCPU_R11(r3) |
146 | PPC_LL r12, SVCPU_R12(r3) | 140 | PPC_LL r12, SVCPU_R12(r3) |
147 | PPC_LL r13, SVCPU_R13(r3) | 141 | PPC_LL r13, SVCPU_R13(r3) |
148 | 142 | ||
143 | MTMSR_EERI(r6) | ||
144 | mtsrr0 r9 | ||
145 | mtsrr1 r4 | ||
146 | |||
147 | PPC_LL r4, SVCPU_R4(r3) | ||
148 | PPC_LL r6, SVCPU_R6(r3) | ||
149 | PPC_LL r9, SVCPU_R9(r3) | ||
149 | PPC_LL r3, (SVCPU_R3)(r3) | 150 | PPC_LL r3, (SVCPU_R3)(r3) |
150 | 151 | ||
151 | RFI | 152 | RFI |
@@ -197,6 +198,7 @@ kvmppc_interrupt: | |||
197 | /* Save guest PC and MSR */ | 198 | /* Save guest PC and MSR */ |
198 | #ifdef CONFIG_PPC64 | 199 | #ifdef CONFIG_PPC64 |
199 | BEGIN_FTR_SECTION | 200 | BEGIN_FTR_SECTION |
201 | mr r10, r12 | ||
200 | andi. r0,r12,0x2 | 202 | andi. r0,r12,0x2 |
201 | beq 1f | 203 | beq 1f |
202 | mfspr r3,SPRN_HSRR0 | 204 | mfspr r3,SPRN_HSRR0 |
@@ -316,23 +318,17 @@ no_dcbz32_off: | |||
316 | * Having set up SRR0/1 with the address where we want | 318 | * Having set up SRR0/1 with the address where we want |
317 | * to continue with relocation on (potentially in module | 319 | * to continue with relocation on (potentially in module |
318 | * space), we either just go straight there with rfi[d], | 320 | * space), we either just go straight there with rfi[d], |
319 | * or we jump to an interrupt handler with bctr if there | 321 | * or we jump to an interrupt handler if there is an |
320 | * is an interrupt to be handled first. In the latter | 322 | * interrupt to be handled first. In the latter case, |
321 | * case, the rfi[d] at the end of the interrupt handler | 323 | * the rfi[d] at the end of the interrupt handler will |
322 | * will get us back to where we want to continue. | 324 | * get us back to where we want to continue. |
323 | */ | 325 | */ |
324 | 326 | ||
325 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
326 | beq 1f | ||
327 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | ||
328 | beq 1f | ||
329 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON | ||
330 | 1: mtctr r12 | ||
331 | |||
332 | /* Register usage at this point: | 327 | /* Register usage at this point: |
333 | * | 328 | * |
334 | * R1 = host R1 | 329 | * R1 = host R1 |
335 | * R2 = host R2 | 330 | * R2 = host R2 |
331 | * R10 = raw exit handler id | ||
336 | * R12 = exit handler id | 332 | * R12 = exit handler id |
337 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) | 333 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) |
338 | * SVCPU.* = guest * | 334 | * SVCPU.* = guest * |
@@ -342,12 +338,26 @@ no_dcbz32_off: | |||
342 | PPC_LL r6, HSTATE_HOST_MSR(r13) | 338 | PPC_LL r6, HSTATE_HOST_MSR(r13) |
343 | PPC_LL r8, HSTATE_VMHANDLER(r13) | 339 | PPC_LL r8, HSTATE_VMHANDLER(r13) |
344 | 340 | ||
345 | /* Restore host msr -> SRR1 */ | 341 | #ifdef CONFIG_PPC64 |
342 | BEGIN_FTR_SECTION | ||
343 | andi. r0,r10,0x2 | ||
344 | beq 1f | ||
345 | mtspr SPRN_HSRR1, r6 | ||
346 | mtspr SPRN_HSRR0, r8 | ||
347 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | ||
348 | #endif | ||
349 | 1: /* Restore host msr -> SRR1 */ | ||
346 | mtsrr1 r6 | 350 | mtsrr1 r6 |
347 | /* Load highmem handler address */ | 351 | /* Load highmem handler address */ |
348 | mtsrr0 r8 | 352 | mtsrr0 r8 |
349 | 353 | ||
350 | /* RFI into the highmem handler, or jump to interrupt handler */ | 354 | /* RFI into the highmem handler, or jump to interrupt handler */ |
351 | beqctr | 355 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
356 | beqa BOOK3S_INTERRUPT_EXTERNAL | ||
357 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | ||
358 | beqa BOOK3S_INTERRUPT_DECREMENTER | ||
359 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON | ||
360 | beqa BOOK3S_INTERRUPT_PERFMON | ||
361 | |||
352 | RFI | 362 | RFI |
353 | kvmppc_handler_trampoline_exit_end: | 363 | kvmppc_handler_trampoline_exit_end: |
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 62c4fe55d19b..ba61974c1e20 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -75,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); | |||
75 | 75 | ||
76 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 76 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
77 | unsigned int inst, int *advance); | 77 | unsigned int inst, int *advance); |
78 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); | 78 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); |
79 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | 79 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); |
80 | 80 | ||
81 | /* low-level asm code to transfer guest state */ | 81 | /* low-level asm code to transfer guest state */ |
82 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); | 82 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 904412bbea40..6c76397f2af4 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
40 | unsigned int inst, int *advance) | 40 | unsigned int inst, int *advance) |
41 | { | 41 | { |
42 | int emulated = EMULATE_DONE; | 42 | int emulated = EMULATE_DONE; |
43 | int rs; | 43 | int rs = get_rs(inst); |
44 | int rt; | 44 | int rt = get_rt(inst); |
45 | 45 | ||
46 | switch (get_op(inst)) { | 46 | switch (get_op(inst)) { |
47 | case 19: | 47 | case 19: |
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | switch (get_xop(inst)) { | 62 | switch (get_xop(inst)) { |
63 | 63 | ||
64 | case OP_31_XOP_MFMSR: | 64 | case OP_31_XOP_MFMSR: |
65 | rt = get_rt(inst); | ||
66 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
67 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
68 | break; | 67 | break; |
69 | 68 | ||
70 | case OP_31_XOP_MTMSR: | 69 | case OP_31_XOP_MTMSR: |
71 | rs = get_rs(inst); | ||
72 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | 70 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); |
73 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); | 71 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
74 | break; | 72 | break; |
75 | 73 | ||
76 | case OP_31_XOP_WRTEE: | 74 | case OP_31_XOP_WRTEE: |
77 | rs = get_rs(inst); | ||
78 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | 75 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
79 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | 76 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
80 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 77 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
@@ -105,22 +102,26 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
105 | * will return the wrong result if called for them in another context | 102 | * will return the wrong result if called for them in another context |
106 | * (such as debugging). | 103 | * (such as debugging). |
107 | */ | 104 | */ |
108 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 105 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
109 | { | 106 | { |
110 | int emulated = EMULATE_DONE; | 107 | int emulated = EMULATE_DONE; |
111 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
112 | 108 | ||
113 | switch (sprn) { | 109 | switch (sprn) { |
114 | case SPRN_DEAR: | 110 | case SPRN_DEAR: |
115 | vcpu->arch.shared->dar = spr_val; break; | 111 | vcpu->arch.shared->dar = spr_val; |
112 | break; | ||
116 | case SPRN_ESR: | 113 | case SPRN_ESR: |
117 | vcpu->arch.shared->esr = spr_val; break; | 114 | vcpu->arch.shared->esr = spr_val; |
115 | break; | ||
118 | case SPRN_DBCR0: | 116 | case SPRN_DBCR0: |
119 | vcpu->arch.dbcr0 = spr_val; break; | 117 | vcpu->arch.dbcr0 = spr_val; |
118 | break; | ||
120 | case SPRN_DBCR1: | 119 | case SPRN_DBCR1: |
121 | vcpu->arch.dbcr1 = spr_val; break; | 120 | vcpu->arch.dbcr1 = spr_val; |
121 | break; | ||
122 | case SPRN_DBSR: | 122 | case SPRN_DBSR: |
123 | vcpu->arch.dbsr &= ~spr_val; break; | 123 | vcpu->arch.dbsr &= ~spr_val; |
124 | break; | ||
124 | case SPRN_TSR: | 125 | case SPRN_TSR: |
125 | kvmppc_clr_tsr_bits(vcpu, spr_val); | 126 | kvmppc_clr_tsr_bits(vcpu, spr_val); |
126 | break; | 127 | break; |
@@ -134,13 +135,17 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
134 | * guest (PR-mode only). | 135 | * guest (PR-mode only). |
135 | */ | 136 | */ |
136 | case SPRN_SPRG4: | 137 | case SPRN_SPRG4: |
137 | vcpu->arch.shared->sprg4 = spr_val; break; | 138 | vcpu->arch.shared->sprg4 = spr_val; |
139 | break; | ||
138 | case SPRN_SPRG5: | 140 | case SPRN_SPRG5: |
139 | vcpu->arch.shared->sprg5 = spr_val; break; | 141 | vcpu->arch.shared->sprg5 = spr_val; |
142 | break; | ||
140 | case SPRN_SPRG6: | 143 | case SPRN_SPRG6: |
141 | vcpu->arch.shared->sprg6 = spr_val; break; | 144 | vcpu->arch.shared->sprg6 = spr_val; |
145 | break; | ||
142 | case SPRN_SPRG7: | 146 | case SPRN_SPRG7: |
143 | vcpu->arch.shared->sprg7 = spr_val; break; | 147 | vcpu->arch.shared->sprg7 = spr_val; |
148 | break; | ||
144 | 149 | ||
145 | case SPRN_IVPR: | 150 | case SPRN_IVPR: |
146 | vcpu->arch.ivpr = spr_val; | 151 | vcpu->arch.ivpr = spr_val; |
@@ -210,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
210 | return emulated; | 215 | return emulated; |
211 | } | 216 | } |
212 | 217 | ||
213 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 218 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
214 | { | 219 | { |
215 | int emulated = EMULATE_DONE; | 220 | int emulated = EMULATE_DONE; |
216 | 221 | ||
217 | switch (sprn) { | 222 | switch (sprn) { |
218 | case SPRN_IVPR: | 223 | case SPRN_IVPR: |
219 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; | 224 | *spr_val = vcpu->arch.ivpr; |
225 | break; | ||
220 | case SPRN_DEAR: | 226 | case SPRN_DEAR: |
221 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; | 227 | *spr_val = vcpu->arch.shared->dar; |
228 | break; | ||
222 | case SPRN_ESR: | 229 | case SPRN_ESR: |
223 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; | 230 | *spr_val = vcpu->arch.shared->esr; |
231 | break; | ||
224 | case SPRN_DBCR0: | 232 | case SPRN_DBCR0: |
225 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; | 233 | *spr_val = vcpu->arch.dbcr0; |
234 | break; | ||
226 | case SPRN_DBCR1: | 235 | case SPRN_DBCR1: |
227 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; | 236 | *spr_val = vcpu->arch.dbcr1; |
237 | break; | ||
228 | case SPRN_DBSR: | 238 | case SPRN_DBSR: |
229 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; | 239 | *spr_val = vcpu->arch.dbsr; |
240 | break; | ||
230 | case SPRN_TSR: | 241 | case SPRN_TSR: |
231 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; | 242 | *spr_val = vcpu->arch.tsr; |
243 | break; | ||
232 | case SPRN_TCR: | 244 | case SPRN_TCR: |
233 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; | 245 | *spr_val = vcpu->arch.tcr; |
246 | break; | ||
234 | 247 | ||
235 | case SPRN_IVOR0: | 248 | case SPRN_IVOR0: |
236 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); | 249 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; |
237 | break; | 250 | break; |
238 | case SPRN_IVOR1: | 251 | case SPRN_IVOR1: |
239 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); | 252 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; |
240 | break; | 253 | break; |
241 | case SPRN_IVOR2: | 254 | case SPRN_IVOR2: |
242 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); | 255 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; |
243 | break; | 256 | break; |
244 | case SPRN_IVOR3: | 257 | case SPRN_IVOR3: |
245 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); | 258 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; |
246 | break; | 259 | break; |
247 | case SPRN_IVOR4: | 260 | case SPRN_IVOR4: |
248 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); | 261 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; |
249 | break; | 262 | break; |
250 | case SPRN_IVOR5: | 263 | case SPRN_IVOR5: |
251 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); | 264 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; |
252 | break; | 265 | break; |
253 | case SPRN_IVOR6: | 266 | case SPRN_IVOR6: |
254 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); | 267 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; |
255 | break; | 268 | break; |
256 | case SPRN_IVOR7: | 269 | case SPRN_IVOR7: |
257 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); | 270 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; |
258 | break; | 271 | break; |
259 | case SPRN_IVOR8: | 272 | case SPRN_IVOR8: |
260 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); | 273 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; |
261 | break; | 274 | break; |
262 | case SPRN_IVOR9: | 275 | case SPRN_IVOR9: |
263 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); | 276 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; |
264 | break; | 277 | break; |
265 | case SPRN_IVOR10: | 278 | case SPRN_IVOR10: |
266 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); | 279 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; |
267 | break; | 280 | break; |
268 | case SPRN_IVOR11: | 281 | case SPRN_IVOR11: |
269 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); | 282 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; |
270 | break; | 283 | break; |
271 | case SPRN_IVOR12: | 284 | case SPRN_IVOR12: |
272 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); | 285 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; |
273 | break; | 286 | break; |
274 | case SPRN_IVOR13: | 287 | case SPRN_IVOR13: |
275 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); | 288 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; |
276 | break; | 289 | break; |
277 | case SPRN_IVOR14: | 290 | case SPRN_IVOR14: |
278 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); | 291 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; |
279 | break; | 292 | break; |
280 | case SPRN_IVOR15: | 293 | case SPRN_IVOR15: |
281 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); | 294 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; |
282 | break; | 295 | break; |
283 | 296 | ||
284 | default: | 297 | default: |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index c8c4b878795a..8feec2ff3928 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -419,13 +419,13 @@ lightweight_exit: | |||
419 | * written directly to the shared area, so we | 419 | * written directly to the shared area, so we |
420 | * need to reload them here with the guest's values. | 420 | * need to reload them here with the guest's values. |
421 | */ | 421 | */ |
422 | lwz r3, VCPU_SHARED_SPRG4(r5) | 422 | PPC_LD(r3, VCPU_SHARED_SPRG4, r5) |
423 | mtspr SPRN_SPRG4W, r3 | 423 | mtspr SPRN_SPRG4W, r3 |
424 | lwz r3, VCPU_SHARED_SPRG5(r5) | 424 | PPC_LD(r3, VCPU_SHARED_SPRG5, r5) |
425 | mtspr SPRN_SPRG5W, r3 | 425 | mtspr SPRN_SPRG5W, r3 |
426 | lwz r3, VCPU_SHARED_SPRG6(r5) | 426 | PPC_LD(r3, VCPU_SHARED_SPRG6, r5) |
427 | mtspr SPRN_SPRG6W, r3 | 427 | mtspr SPRN_SPRG6W, r3 |
428 | lwz r3, VCPU_SHARED_SPRG7(r5) | 428 | PPC_LD(r3, VCPU_SHARED_SPRG7, r5) |
429 | mtspr SPRN_SPRG7W, r3 | 429 | mtspr SPRN_SPRG7W, r3 |
430 | 430 | ||
431 | #ifdef CONFIG_KVM_EXIT_TIMING | 431 | #ifdef CONFIG_KVM_EXIT_TIMING |
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 909e96e0650c..6048a00515d7 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -87,17 +87,13 @@ | |||
87 | mfspr r8, SPRN_TBRL | 87 | mfspr r8, SPRN_TBRL |
88 | mfspr r9, SPRN_TBRU | 88 | mfspr r9, SPRN_TBRU |
89 | cmpw r9, r7 | 89 | cmpw r9, r7 |
90 | PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4) | 90 | stw r8, VCPU_TIMING_EXIT_TBL(r4) |
91 | bne- 1b | 91 | bne- 1b |
92 | PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4) | 92 | stw r9, VCPU_TIMING_EXIT_TBU(r4) |
93 | #endif | 93 | #endif |
94 | 94 | ||
95 | oris r8, r6, MSR_CE@h | 95 | oris r8, r6, MSR_CE@h |
96 | #ifdef CONFIG_64BIT | 96 | PPC_STD(r6, VCPU_SHARED_MSR, r11) |
97 | std r6, (VCPU_SHARED_MSR)(r11) | ||
98 | #else | ||
99 | stw r6, (VCPU_SHARED_MSR + 4)(r11) | ||
100 | #endif | ||
101 | ori r8, r8, MSR_ME | MSR_RI | 97 | ori r8, r8, MSR_ME | MSR_RI |
102 | PPC_STL r5, VCPU_PC(r4) | 98 | PPC_STL r5, VCPU_PC(r4) |
103 | 99 | ||
@@ -220,7 +216,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |||
220 | PPC_STL r4, VCPU_GPR(r4)(r11) | 216 | PPC_STL r4, VCPU_GPR(r4)(r11) |
221 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | 217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) |
222 | PPC_STL r5, VCPU_GPR(r5)(r11) | 218 | PPC_STL r5, VCPU_GPR(r5)(r11) |
223 | PPC_STL r13, VCPU_CR(r11) | 219 | stw r13, VCPU_CR(r11) |
224 | mfspr r5, \srr0 | 220 | mfspr r5, \srr0 |
225 | PPC_STL r3, VCPU_GPR(r10)(r11) | 221 | PPC_STL r3, VCPU_GPR(r10)(r11) |
226 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | 222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) |
@@ -247,7 +243,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |||
247 | PPC_STL r4, VCPU_GPR(r4)(r11) | 243 | PPC_STL r4, VCPU_GPR(r4)(r11) |
248 | PPC_LL r4, GPR9(r8) | 244 | PPC_LL r4, GPR9(r8) |
249 | PPC_STL r5, VCPU_GPR(r5)(r11) | 245 | PPC_STL r5, VCPU_GPR(r5)(r11) |
250 | PPC_STL r9, VCPU_CR(r11) | 246 | stw r9, VCPU_CR(r11) |
251 | mfspr r5, \srr0 | 247 | mfspr r5, \srr0 |
252 | PPC_STL r3, VCPU_GPR(r8)(r11) | 248 | PPC_STL r3, VCPU_GPR(r8)(r11) |
253 | PPC_LL r3, GPR10(r8) | 249 | PPC_LL r3, GPR10(r8) |
@@ -256,10 +252,10 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |||
256 | mfspr r6, \srr1 | 252 | mfspr r6, \srr1 |
257 | PPC_LL r4, GPR11(r8) | 253 | PPC_LL r4, GPR11(r8) |
258 | PPC_STL r7, VCPU_GPR(r7)(r11) | 254 | PPC_STL r7, VCPU_GPR(r7)(r11) |
259 | PPC_STL r8, VCPU_GPR(r8)(r11) | ||
260 | PPC_STL r3, VCPU_GPR(r10)(r11) | 255 | PPC_STL r3, VCPU_GPR(r10)(r11) |
261 | mfctr r7 | 256 | mfctr r7 |
262 | PPC_STL r12, VCPU_GPR(r12)(r11) | 257 | PPC_STL r12, VCPU_GPR(r12)(r11) |
258 | PPC_STL r13, VCPU_GPR(r13)(r11) | ||
263 | PPC_STL r4, VCPU_GPR(r11)(r11) | 259 | PPC_STL r4, VCPU_GPR(r11)(r11) |
264 | PPC_STL r7, VCPU_CTR(r11) | 260 | PPC_STL r7, VCPU_CTR(r11) |
265 | mr r4, r11 | 261 | mr r4, r11 |
@@ -319,14 +315,14 @@ _GLOBAL(kvmppc_resume_host) | |||
319 | mfspr r6, SPRN_SPRG4 | 315 | mfspr r6, SPRN_SPRG4 |
320 | PPC_STL r5, VCPU_LR(r4) | 316 | PPC_STL r5, VCPU_LR(r4) |
321 | mfspr r7, SPRN_SPRG5 | 317 | mfspr r7, SPRN_SPRG5 |
322 | PPC_STL r3, VCPU_VRSAVE(r4) | 318 | stw r3, VCPU_VRSAVE(r4) |
323 | PPC_STL r6, VCPU_SHARED_SPRG4(r11) | 319 | PPC_STD(r6, VCPU_SHARED_SPRG4, r11) |
324 | mfspr r8, SPRN_SPRG6 | 320 | mfspr r8, SPRN_SPRG6 |
325 | PPC_STL r7, VCPU_SHARED_SPRG5(r11) | 321 | PPC_STD(r7, VCPU_SHARED_SPRG5, r11) |
326 | mfspr r9, SPRN_SPRG7 | 322 | mfspr r9, SPRN_SPRG7 |
327 | PPC_STL r8, VCPU_SHARED_SPRG6(r11) | 323 | PPC_STD(r8, VCPU_SHARED_SPRG6, r11) |
328 | mfxer r3 | 324 | mfxer r3 |
329 | PPC_STL r9, VCPU_SHARED_SPRG7(r11) | 325 | PPC_STD(r9, VCPU_SHARED_SPRG7, r11) |
330 | 326 | ||
331 | /* save guest MAS registers and restore host mas4 & mas6 */ | 327 | /* save guest MAS registers and restore host mas4 & mas6 */ |
332 | mfspr r5, SPRN_MAS0 | 328 | mfspr r5, SPRN_MAS0 |
@@ -335,11 +331,7 @@ _GLOBAL(kvmppc_resume_host) | |||
335 | stw r5, VCPU_SHARED_MAS0(r11) | 331 | stw r5, VCPU_SHARED_MAS0(r11) |
336 | mfspr r7, SPRN_MAS2 | 332 | mfspr r7, SPRN_MAS2 |
337 | stw r6, VCPU_SHARED_MAS1(r11) | 333 | stw r6, VCPU_SHARED_MAS1(r11) |
338 | #ifdef CONFIG_64BIT | 334 | PPC_STD(r7, VCPU_SHARED_MAS2, r11) |
339 | std r7, (VCPU_SHARED_MAS2)(r11) | ||
340 | #else | ||
341 | stw r7, (VCPU_SHARED_MAS2 + 4)(r11) | ||
342 | #endif | ||
343 | mfspr r5, SPRN_MAS3 | 335 | mfspr r5, SPRN_MAS3 |
344 | mfspr r6, SPRN_MAS4 | 336 | mfspr r6, SPRN_MAS4 |
345 | stw r5, VCPU_SHARED_MAS7_3+4(r11) | 337 | stw r5, VCPU_SHARED_MAS7_3+4(r11) |
@@ -527,11 +519,7 @@ lightweight_exit: | |||
527 | stw r3, VCPU_HOST_MAS6(r4) | 519 | stw r3, VCPU_HOST_MAS6(r4) |
528 | lwz r3, VCPU_SHARED_MAS0(r11) | 520 | lwz r3, VCPU_SHARED_MAS0(r11) |
529 | lwz r5, VCPU_SHARED_MAS1(r11) | 521 | lwz r5, VCPU_SHARED_MAS1(r11) |
530 | #ifdef CONFIG_64BIT | 522 | PPC_LD(r6, VCPU_SHARED_MAS2, r11) |
531 | ld r6, (VCPU_SHARED_MAS2)(r11) | ||
532 | #else | ||
533 | lwz r6, (VCPU_SHARED_MAS2 + 4)(r11) | ||
534 | #endif | ||
535 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) | 523 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) |
536 | lwz r8, VCPU_SHARED_MAS4(r11) | 524 | lwz r8, VCPU_SHARED_MAS4(r11) |
537 | mtspr SPRN_MAS0, r3 | 525 | mtspr SPRN_MAS0, r3 |
@@ -549,13 +537,13 @@ lightweight_exit: | |||
549 | * SPRGs, so we need to reload them here with the guest's values. | 537 | * SPRGs, so we need to reload them here with the guest's values. |
550 | */ | 538 | */ |
551 | lwz r3, VCPU_VRSAVE(r4) | 539 | lwz r3, VCPU_VRSAVE(r4) |
552 | lwz r5, VCPU_SHARED_SPRG4(r11) | 540 | PPC_LD(r5, VCPU_SHARED_SPRG4, r11) |
553 | mtspr SPRN_VRSAVE, r3 | 541 | mtspr SPRN_VRSAVE, r3 |
554 | lwz r6, VCPU_SHARED_SPRG5(r11) | 542 | PPC_LD(r6, VCPU_SHARED_SPRG5, r11) |
555 | mtspr SPRN_SPRG4W, r5 | 543 | mtspr SPRN_SPRG4W, r5 |
556 | lwz r7, VCPU_SHARED_SPRG6(r11) | 544 | PPC_LD(r7, VCPU_SHARED_SPRG6, r11) |
557 | mtspr SPRN_SPRG5W, r6 | 545 | mtspr SPRN_SPRG5W, r6 |
558 | lwz r8, VCPU_SHARED_SPRG7(r11) | 546 | PPC_LD(r8, VCPU_SHARED_SPRG7, r11) |
559 | mtspr SPRN_SPRG6W, r7 | 547 | mtspr SPRN_SPRG6W, r7 |
560 | mtspr SPRN_SPRG7W, r8 | 548 | mtspr SPRN_SPRG7W, r8 |
561 | 549 | ||
@@ -563,13 +551,9 @@ lightweight_exit: | |||
563 | PPC_LL r3, VCPU_LR(r4) | 551 | PPC_LL r3, VCPU_LR(r4) |
564 | PPC_LL r5, VCPU_XER(r4) | 552 | PPC_LL r5, VCPU_XER(r4) |
565 | PPC_LL r6, VCPU_CTR(r4) | 553 | PPC_LL r6, VCPU_CTR(r4) |
566 | PPC_LL r7, VCPU_CR(r4) | 554 | lwz r7, VCPU_CR(r4) |
567 | PPC_LL r8, VCPU_PC(r4) | 555 | PPC_LL r8, VCPU_PC(r4) |
568 | #ifdef CONFIG_64BIT | 556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) |
569 | ld r9, (VCPU_SHARED_MSR)(r11) | ||
570 | #else | ||
571 | lwz r9, (VCPU_SHARED_MSR + 4)(r11) | ||
572 | #endif | ||
573 | PPC_LL r0, VCPU_GPR(r0)(r4) | 557 | PPC_LL r0, VCPU_GPR(r0)(r4) |
574 | PPC_LL r1, VCPU_GPR(r1)(r4) | 558 | PPC_LL r1, VCPU_GPR(r1)(r4) |
575 | PPC_LL r2, VCPU_GPR(r2)(r4) | 559 | PPC_LL r2, VCPU_GPR(r2)(r4) |
@@ -590,9 +574,9 @@ lightweight_exit: | |||
590 | mfspr r9, SPRN_TBRL | 574 | mfspr r9, SPRN_TBRL |
591 | mfspr r8, SPRN_TBRU | 575 | mfspr r8, SPRN_TBRU |
592 | cmpw r8, r6 | 576 | cmpw r8, r6 |
593 | PPC_STL r9, VCPU_TIMING_LAST_ENTER_TBL(r4) | 577 | stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4) |
594 | bne 1b | 578 | bne 1b |
595 | PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | 579 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) |
596 | #endif | 580 | #endif |
597 | 581 | ||
598 | /* | 582 | /* |
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 7967f3f10a16..aa8b81428bf4 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h | |||
@@ -89,6 +89,10 @@ struct kvmppc_vcpu_e500 { | |||
89 | u64 *g2h_tlb1_map; | 89 | u64 *g2h_tlb1_map; |
90 | unsigned int *h2g_tlb1_rmap; | 90 | unsigned int *h2g_tlb1_rmap; |
91 | 91 | ||
92 | /* Minimum and maximum address mapped my TLB1 */ | ||
93 | unsigned long tlb1_min_eaddr; | ||
94 | unsigned long tlb1_max_eaddr; | ||
95 | |||
92 | #ifdef CONFIG_KVM_E500V2 | 96 | #ifdef CONFIG_KVM_E500V2 |
93 | u32 pid[E500_PID_NUM]; | 97 | u32 pid[E500_PID_NUM]; |
94 | 98 | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 99155f847a6a..8b99e076dc81 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -86,9 +86,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
86 | unsigned int inst, int *advance) | 86 | unsigned int inst, int *advance) |
87 | { | 87 | { |
88 | int emulated = EMULATE_DONE; | 88 | int emulated = EMULATE_DONE; |
89 | int ra; | 89 | int ra = get_ra(inst); |
90 | int rb; | 90 | int rb = get_rb(inst); |
91 | int rt; | 91 | int rt = get_rt(inst); |
92 | 92 | ||
93 | switch (get_op(inst)) { | 93 | switch (get_op(inst)) { |
94 | case 31: | 94 | case 31: |
@@ -96,11 +96,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
96 | 96 | ||
97 | #ifdef CONFIG_KVM_E500MC | 97 | #ifdef CONFIG_KVM_E500MC |
98 | case XOP_MSGSND: | 98 | case XOP_MSGSND: |
99 | emulated = kvmppc_e500_emul_msgsnd(vcpu, get_rb(inst)); | 99 | emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); |
100 | break; | 100 | break; |
101 | 101 | ||
102 | case XOP_MSGCLR: | 102 | case XOP_MSGCLR: |
103 | emulated = kvmppc_e500_emul_msgclr(vcpu, get_rb(inst)); | 103 | emulated = kvmppc_e500_emul_msgclr(vcpu, rb); |
104 | break; | 104 | break; |
105 | #endif | 105 | #endif |
106 | 106 | ||
@@ -113,20 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
113 | break; | 113 | break; |
114 | 114 | ||
115 | case XOP_TLBSX: | 115 | case XOP_TLBSX: |
116 | rb = get_rb(inst); | ||
117 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); | 116 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); |
118 | break; | 117 | break; |
119 | 118 | ||
120 | case XOP_TLBILX: | 119 | case XOP_TLBILX: |
121 | ra = get_ra(inst); | ||
122 | rb = get_rb(inst); | ||
123 | rt = get_rt(inst); | ||
124 | emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); | 120 | emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); |
125 | break; | 121 | break; |
126 | 122 | ||
127 | case XOP_TLBIVAX: | 123 | case XOP_TLBIVAX: |
128 | ra = get_ra(inst); | ||
129 | rb = get_rb(inst); | ||
130 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); | 124 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); |
131 | break; | 125 | break; |
132 | 126 | ||
@@ -146,11 +140,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
146 | return emulated; | 140 | return emulated; |
147 | } | 141 | } |
148 | 142 | ||
149 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 143 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
150 | { | 144 | { |
151 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 145 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
152 | int emulated = EMULATE_DONE; | 146 | int emulated = EMULATE_DONE; |
153 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
154 | 147 | ||
155 | switch (sprn) { | 148 | switch (sprn) { |
156 | #ifndef CONFIG_KVM_BOOKE_HV | 149 | #ifndef CONFIG_KVM_BOOKE_HV |
@@ -160,25 +153,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
160 | case SPRN_PID1: | 153 | case SPRN_PID1: |
161 | if (spr_val != 0) | 154 | if (spr_val != 0) |
162 | return EMULATE_FAIL; | 155 | return EMULATE_FAIL; |
163 | vcpu_e500->pid[1] = spr_val; break; | 156 | vcpu_e500->pid[1] = spr_val; |
157 | break; | ||
164 | case SPRN_PID2: | 158 | case SPRN_PID2: |
165 | if (spr_val != 0) | 159 | if (spr_val != 0) |
166 | return EMULATE_FAIL; | 160 | return EMULATE_FAIL; |
167 | vcpu_e500->pid[2] = spr_val; break; | 161 | vcpu_e500->pid[2] = spr_val; |
162 | break; | ||
168 | case SPRN_MAS0: | 163 | case SPRN_MAS0: |
169 | vcpu->arch.shared->mas0 = spr_val; break; | 164 | vcpu->arch.shared->mas0 = spr_val; |
165 | break; | ||
170 | case SPRN_MAS1: | 166 | case SPRN_MAS1: |
171 | vcpu->arch.shared->mas1 = spr_val; break; | 167 | vcpu->arch.shared->mas1 = spr_val; |
168 | break; | ||
172 | case SPRN_MAS2: | 169 | case SPRN_MAS2: |
173 | vcpu->arch.shared->mas2 = spr_val; break; | 170 | vcpu->arch.shared->mas2 = spr_val; |
171 | break; | ||
174 | case SPRN_MAS3: | 172 | case SPRN_MAS3: |
175 | vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; | 173 | vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; |
176 | vcpu->arch.shared->mas7_3 |= spr_val; | 174 | vcpu->arch.shared->mas7_3 |= spr_val; |
177 | break; | 175 | break; |
178 | case SPRN_MAS4: | 176 | case SPRN_MAS4: |
179 | vcpu->arch.shared->mas4 = spr_val; break; | 177 | vcpu->arch.shared->mas4 = spr_val; |
178 | break; | ||
180 | case SPRN_MAS6: | 179 | case SPRN_MAS6: |
181 | vcpu->arch.shared->mas6 = spr_val; break; | 180 | vcpu->arch.shared->mas6 = spr_val; |
181 | break; | ||
182 | case SPRN_MAS7: | 182 | case SPRN_MAS7: |
183 | vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; | 183 | vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; |
184 | vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; | 184 | vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; |
@@ -189,11 +189,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
189 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); | 189 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); |
190 | break; | 190 | break; |
191 | case SPRN_L1CSR1: | 191 | case SPRN_L1CSR1: |
192 | vcpu_e500->l1csr1 = spr_val; break; | 192 | vcpu_e500->l1csr1 = spr_val; |
193 | break; | ||
193 | case SPRN_HID0: | 194 | case SPRN_HID0: |
194 | vcpu_e500->hid0 = spr_val; break; | 195 | vcpu_e500->hid0 = spr_val; |
196 | break; | ||
195 | case SPRN_HID1: | 197 | case SPRN_HID1: |
196 | vcpu_e500->hid1 = spr_val; break; | 198 | vcpu_e500->hid1 = spr_val; |
199 | break; | ||
197 | 200 | ||
198 | case SPRN_MMUCSR0: | 201 | case SPRN_MMUCSR0: |
199 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, | 202 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, |
@@ -222,90 +225,103 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
222 | break; | 225 | break; |
223 | #endif | 226 | #endif |
224 | default: | 227 | default: |
225 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 228 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); |
226 | } | 229 | } |
227 | 230 | ||
228 | return emulated; | 231 | return emulated; |
229 | } | 232 | } |
230 | 233 | ||
231 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 234 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
232 | { | 235 | { |
233 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 236 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
234 | int emulated = EMULATE_DONE; | 237 | int emulated = EMULATE_DONE; |
235 | 238 | ||
236 | switch (sprn) { | 239 | switch (sprn) { |
237 | #ifndef CONFIG_KVM_BOOKE_HV | 240 | #ifndef CONFIG_KVM_BOOKE_HV |
238 | unsigned long val; | ||
239 | |||
240 | case SPRN_PID: | 241 | case SPRN_PID: |
241 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; | 242 | *spr_val = vcpu_e500->pid[0]; |
243 | break; | ||
242 | case SPRN_PID1: | 244 | case SPRN_PID1: |
243 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; | 245 | *spr_val = vcpu_e500->pid[1]; |
246 | break; | ||
244 | case SPRN_PID2: | 247 | case SPRN_PID2: |
245 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; | 248 | *spr_val = vcpu_e500->pid[2]; |
249 | break; | ||
246 | case SPRN_MAS0: | 250 | case SPRN_MAS0: |
247 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; | 251 | *spr_val = vcpu->arch.shared->mas0; |
252 | break; | ||
248 | case SPRN_MAS1: | 253 | case SPRN_MAS1: |
249 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; | 254 | *spr_val = vcpu->arch.shared->mas1; |
255 | break; | ||
250 | case SPRN_MAS2: | 256 | case SPRN_MAS2: |
251 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; | 257 | *spr_val = vcpu->arch.shared->mas2; |
258 | break; | ||
252 | case SPRN_MAS3: | 259 | case SPRN_MAS3: |
253 | val = (u32)vcpu->arch.shared->mas7_3; | 260 | *spr_val = (u32)vcpu->arch.shared->mas7_3; |
254 | kvmppc_set_gpr(vcpu, rt, val); | ||
255 | break; | 261 | break; |
256 | case SPRN_MAS4: | 262 | case SPRN_MAS4: |
257 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; | 263 | *spr_val = vcpu->arch.shared->mas4; |
264 | break; | ||
258 | case SPRN_MAS6: | 265 | case SPRN_MAS6: |
259 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; | 266 | *spr_val = vcpu->arch.shared->mas6; |
267 | break; | ||
260 | case SPRN_MAS7: | 268 | case SPRN_MAS7: |
261 | val = vcpu->arch.shared->mas7_3 >> 32; | 269 | *spr_val = vcpu->arch.shared->mas7_3 >> 32; |
262 | kvmppc_set_gpr(vcpu, rt, val); | ||
263 | break; | 270 | break; |
264 | #endif | 271 | #endif |
265 | case SPRN_TLB0CFG: | 272 | case SPRN_TLB0CFG: |
266 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[0]); break; | 273 | *spr_val = vcpu->arch.tlbcfg[0]; |
274 | break; | ||
267 | case SPRN_TLB1CFG: | 275 | case SPRN_TLB1CFG: |
268 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[1]); break; | 276 | *spr_val = vcpu->arch.tlbcfg[1]; |
277 | break; | ||
269 | case SPRN_L1CSR0: | 278 | case SPRN_L1CSR0: |
270 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; | 279 | *spr_val = vcpu_e500->l1csr0; |
280 | break; | ||
271 | case SPRN_L1CSR1: | 281 | case SPRN_L1CSR1: |
272 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; | 282 | *spr_val = vcpu_e500->l1csr1; |
283 | break; | ||
273 | case SPRN_HID0: | 284 | case SPRN_HID0: |
274 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; | 285 | *spr_val = vcpu_e500->hid0; |
286 | break; | ||
275 | case SPRN_HID1: | 287 | case SPRN_HID1: |
276 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; | 288 | *spr_val = vcpu_e500->hid1; |
289 | break; | ||
277 | case SPRN_SVR: | 290 | case SPRN_SVR: |
278 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; | 291 | *spr_val = vcpu_e500->svr; |
292 | break; | ||
279 | 293 | ||
280 | case SPRN_MMUCSR0: | 294 | case SPRN_MMUCSR0: |
281 | kvmppc_set_gpr(vcpu, rt, 0); break; | 295 | *spr_val = 0; |
296 | break; | ||
282 | 297 | ||
283 | case SPRN_MMUCFG: | 298 | case SPRN_MMUCFG: |
284 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucfg); break; | 299 | *spr_val = vcpu->arch.mmucfg; |
300 | break; | ||
285 | 301 | ||
286 | /* extra exceptions */ | 302 | /* extra exceptions */ |
287 | case SPRN_IVOR32: | 303 | case SPRN_IVOR32: |
288 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); | 304 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
289 | break; | 305 | break; |
290 | case SPRN_IVOR33: | 306 | case SPRN_IVOR33: |
291 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); | 307 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; |
292 | break; | 308 | break; |
293 | case SPRN_IVOR34: | 309 | case SPRN_IVOR34: |
294 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); | 310 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; |
295 | break; | 311 | break; |
296 | case SPRN_IVOR35: | 312 | case SPRN_IVOR35: |
297 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); | 313 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; |
298 | break; | 314 | break; |
299 | #ifdef CONFIG_KVM_BOOKE_HV | 315 | #ifdef CONFIG_KVM_BOOKE_HV |
300 | case SPRN_IVOR36: | 316 | case SPRN_IVOR36: |
301 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]); | 317 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; |
302 | break; | 318 | break; |
303 | case SPRN_IVOR37: | 319 | case SPRN_IVOR37: |
304 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]); | 320 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; |
305 | break; | 321 | break; |
306 | #endif | 322 | #endif |
307 | default: | 323 | default: |
308 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 324 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); |
309 | } | 325 | } |
310 | 326 | ||
311 | return emulated; | 327 | return emulated; |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index e05232b746ff..c510fc961302 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -261,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
261 | set_base = gtlb0_set_base(vcpu_e500, eaddr); | 261 | set_base = gtlb0_set_base(vcpu_e500, eaddr); |
262 | size = vcpu_e500->gtlb_params[0].ways; | 262 | size = vcpu_e500->gtlb_params[0].ways; |
263 | } else { | 263 | } else { |
264 | if (eaddr < vcpu_e500->tlb1_min_eaddr || | ||
265 | eaddr > vcpu_e500->tlb1_max_eaddr) | ||
266 | return -1; | ||
264 | set_base = 0; | 267 | set_base = 0; |
265 | } | 268 | } |
266 | 269 | ||
@@ -583,6 +586,65 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
583 | return victim; | 586 | return victim; |
584 | } | 587 | } |
585 | 588 | ||
589 | static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
590 | { | ||
591 | int size = vcpu_e500->gtlb_params[1].entries; | ||
592 | unsigned int offset; | ||
593 | gva_t eaddr; | ||
594 | int i; | ||
595 | |||
596 | vcpu_e500->tlb1_min_eaddr = ~0UL; | ||
597 | vcpu_e500->tlb1_max_eaddr = 0; | ||
598 | offset = vcpu_e500->gtlb_offset[1]; | ||
599 | |||
600 | for (i = 0; i < size; i++) { | ||
601 | struct kvm_book3e_206_tlb_entry *tlbe = | ||
602 | &vcpu_e500->gtlb_arch[offset + i]; | ||
603 | |||
604 | if (!get_tlb_v(tlbe)) | ||
605 | continue; | ||
606 | |||
607 | eaddr = get_tlb_eaddr(tlbe); | ||
608 | vcpu_e500->tlb1_min_eaddr = | ||
609 | min(vcpu_e500->tlb1_min_eaddr, eaddr); | ||
610 | |||
611 | eaddr = get_tlb_end(tlbe); | ||
612 | vcpu_e500->tlb1_max_eaddr = | ||
613 | max(vcpu_e500->tlb1_max_eaddr, eaddr); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
618 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
619 | { | ||
620 | unsigned long start, end, size; | ||
621 | |||
622 | size = get_tlb_bytes(gtlbe); | ||
623 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
624 | end = start + size - 1; | ||
625 | |||
626 | return vcpu_e500->tlb1_min_eaddr == start || | ||
627 | vcpu_e500->tlb1_max_eaddr == end; | ||
628 | } | ||
629 | |||
630 | /* This function is supposed to be called for a adding a new valid tlb entry */ | ||
631 | static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, | ||
632 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
633 | { | ||
634 | unsigned long start, end, size; | ||
635 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
636 | |||
637 | if (!get_tlb_v(gtlbe)) | ||
638 | return; | ||
639 | |||
640 | size = get_tlb_bytes(gtlbe); | ||
641 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
642 | end = start + size - 1; | ||
643 | |||
644 | vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); | ||
645 | vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); | ||
646 | } | ||
647 | |||
586 | static inline int kvmppc_e500_gtlbe_invalidate( | 648 | static inline int kvmppc_e500_gtlbe_invalidate( |
587 | struct kvmppc_vcpu_e500 *vcpu_e500, | 649 | struct kvmppc_vcpu_e500 *vcpu_e500, |
588 | int tlbsel, int esel) | 650 | int tlbsel, int esel) |
@@ -593,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate( | |||
593 | if (unlikely(get_tlb_iprot(gtlbe))) | 655 | if (unlikely(get_tlb_iprot(gtlbe))) |
594 | return -1; | 656 | return -1; |
595 | 657 | ||
658 | if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
659 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
660 | |||
596 | gtlbe->mas1 = 0; | 661 | gtlbe->mas1 = 0; |
597 | 662 | ||
598 | return 0; | 663 | return 0; |
@@ -792,14 +857,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
792 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 857 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
793 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; | 858 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; |
794 | int tlbsel, esel, stlbsel, sesel; | 859 | int tlbsel, esel, stlbsel, sesel; |
860 | int recal = 0; | ||
795 | 861 | ||
796 | tlbsel = get_tlb_tlbsel(vcpu); | 862 | tlbsel = get_tlb_tlbsel(vcpu); |
797 | esel = get_tlb_esel(vcpu, tlbsel); | 863 | esel = get_tlb_esel(vcpu, tlbsel); |
798 | 864 | ||
799 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); | 865 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); |
800 | 866 | ||
801 | if (get_tlb_v(gtlbe)) | 867 | if (get_tlb_v(gtlbe)) { |
802 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | 868 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); |
869 | if ((tlbsel == 1) && | ||
870 | kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
871 | recal = 1; | ||
872 | } | ||
803 | 873 | ||
804 | gtlbe->mas1 = vcpu->arch.shared->mas1; | 874 | gtlbe->mas1 = vcpu->arch.shared->mas1; |
805 | gtlbe->mas2 = vcpu->arch.shared->mas2; | 875 | gtlbe->mas2 = vcpu->arch.shared->mas2; |
@@ -808,6 +878,18 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
808 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, | 878 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, |
809 | gtlbe->mas2, gtlbe->mas7_3); | 879 | gtlbe->mas2, gtlbe->mas7_3); |
810 | 880 | ||
881 | if (tlbsel == 1) { | ||
882 | /* | ||
883 | * If a valid tlb1 entry is overwritten then recalculate the | ||
884 | * min/max TLB1 map address range otherwise no need to look | ||
885 | * in tlb1 array. | ||
886 | */ | ||
887 | if (recal) | ||
888 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
889 | else | ||
890 | kvmppc_set_tlb1map_range(vcpu, gtlbe); | ||
891 | } | ||
892 | |||
811 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 893 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ |
812 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | 894 | if (tlbe_is_host_safe(vcpu, gtlbe)) { |
813 | u64 eaddr; | 895 | u64 eaddr; |
@@ -1145,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1145 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; | 1227 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; |
1146 | vcpu_e500->gtlb_params[1].sets = 1; | 1228 | vcpu_e500->gtlb_params[1].sets = 1; |
1147 | 1229 | ||
1230 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1148 | return 0; | 1231 | return 0; |
1149 | 1232 | ||
1150 | err_put_page: | 1233 | err_put_page: |
@@ -1163,7 +1246,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | |||
1163 | struct kvm_dirty_tlb *dirty) | 1246 | struct kvm_dirty_tlb *dirty) |
1164 | { | 1247 | { |
1165 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 1248 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
1166 | 1249 | kvmppc_recalc_tlb1map_range(vcpu_e500); | |
1167 | clear_tlb_refs(vcpu_e500); | 1250 | clear_tlb_refs(vcpu_e500); |
1168 | return 0; | 1251 | return 0; |
1169 | } | 1252 | } |
@@ -1272,6 +1355,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1272 | vcpu->arch.tlbcfg[1] |= | 1355 | vcpu->arch.tlbcfg[1] |= |
1273 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; | 1356 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; |
1274 | 1357 | ||
1358 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1275 | return 0; | 1359 | return 0; |
1276 | 1360 | ||
1277 | err: | 1361 | err: |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index afc9154f1aef..f90e86dea7a2 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
26 | #include <linux/clockchips.h> | ||
26 | 27 | ||
27 | #include <asm/reg.h> | 28 | #include <asm/reg.h> |
28 | #include <asm/time.h> | 29 | #include <asm/time.h> |
@@ -104,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
104 | */ | 105 | */ |
105 | 106 | ||
106 | dec_time = vcpu->arch.dec; | 107 | dec_time = vcpu->arch.dec; |
107 | dec_time *= 1000; | 108 | /* |
108 | do_div(dec_time, tb_ticks_per_usec); | 109 | * Guest timebase ticks at the same frequency as host decrementer. |
110 | * So use the host decrementer calculations for decrementer emulation. | ||
111 | */ | ||
112 | dec_time = dec_time << decrementer_clockevent.shift; | ||
113 | do_div(dec_time, decrementer_clockevent.mult); | ||
109 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); | 114 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); |
110 | hrtimer_start(&vcpu->arch.dec_timer, | 115 | hrtimer_start(&vcpu->arch.dec_timer, |
111 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); | 116 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); |
@@ -143,13 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) | |||
143 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 148 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
144 | { | 149 | { |
145 | u32 inst = kvmppc_get_last_inst(vcpu); | 150 | u32 inst = kvmppc_get_last_inst(vcpu); |
146 | int ra; | 151 | int ra = get_ra(inst); |
147 | int rb; | 152 | int rs = get_rs(inst); |
148 | int rs; | 153 | int rt = get_rt(inst); |
149 | int rt; | 154 | int sprn = get_sprn(inst); |
150 | int sprn; | ||
151 | enum emulation_result emulated = EMULATE_DONE; | 155 | enum emulation_result emulated = EMULATE_DONE; |
152 | int advance = 1; | 156 | int advance = 1; |
157 | ulong spr_val = 0; | ||
153 | 158 | ||
154 | /* this default type might be overwritten by subcategories */ | 159 | /* this default type might be overwritten by subcategories */ |
155 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 160 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
@@ -184,141 +189,116 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
184 | advance = 0; | 189 | advance = 0; |
185 | break; | 190 | break; |
186 | case OP_31_XOP_LWZX: | 191 | case OP_31_XOP_LWZX: |
187 | rt = get_rt(inst); | ||
188 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 192 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
189 | break; | 193 | break; |
190 | 194 | ||
191 | case OP_31_XOP_LBZX: | 195 | case OP_31_XOP_LBZX: |
192 | rt = get_rt(inst); | ||
193 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 196 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
194 | break; | 197 | break; |
195 | 198 | ||
196 | case OP_31_XOP_LBZUX: | 199 | case OP_31_XOP_LBZUX: |
197 | rt = get_rt(inst); | ||
198 | ra = get_ra(inst); | ||
199 | rb = get_rb(inst); | ||
200 | |||
201 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 200 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
202 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 201 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
203 | break; | 202 | break; |
204 | 203 | ||
205 | case OP_31_XOP_STWX: | 204 | case OP_31_XOP_STWX: |
206 | rs = get_rs(inst); | ||
207 | emulated = kvmppc_handle_store(run, vcpu, | 205 | emulated = kvmppc_handle_store(run, vcpu, |
208 | kvmppc_get_gpr(vcpu, rs), | 206 | kvmppc_get_gpr(vcpu, rs), |
209 | 4, 1); | 207 | 4, 1); |
210 | break; | 208 | break; |
211 | 209 | ||
212 | case OP_31_XOP_STBX: | 210 | case OP_31_XOP_STBX: |
213 | rs = get_rs(inst); | ||
214 | emulated = kvmppc_handle_store(run, vcpu, | 211 | emulated = kvmppc_handle_store(run, vcpu, |
215 | kvmppc_get_gpr(vcpu, rs), | 212 | kvmppc_get_gpr(vcpu, rs), |
216 | 1, 1); | 213 | 1, 1); |
217 | break; | 214 | break; |
218 | 215 | ||
219 | case OP_31_XOP_STBUX: | 216 | case OP_31_XOP_STBUX: |
220 | rs = get_rs(inst); | ||
221 | ra = get_ra(inst); | ||
222 | rb = get_rb(inst); | ||
223 | |||
224 | emulated = kvmppc_handle_store(run, vcpu, | 217 | emulated = kvmppc_handle_store(run, vcpu, |
225 | kvmppc_get_gpr(vcpu, rs), | 218 | kvmppc_get_gpr(vcpu, rs), |
226 | 1, 1); | 219 | 1, 1); |
227 | kvmppc_set_gpr(vcpu, rs, vcpu->arch.vaddr_accessed); | 220 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
228 | break; | 221 | break; |
229 | 222 | ||
230 | case OP_31_XOP_LHAX: | 223 | case OP_31_XOP_LHAX: |
231 | rt = get_rt(inst); | ||
232 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 224 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
233 | break; | 225 | break; |
234 | 226 | ||
235 | case OP_31_XOP_LHZX: | 227 | case OP_31_XOP_LHZX: |
236 | rt = get_rt(inst); | ||
237 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 228 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
238 | break; | 229 | break; |
239 | 230 | ||
240 | case OP_31_XOP_LHZUX: | 231 | case OP_31_XOP_LHZUX: |
241 | rt = get_rt(inst); | ||
242 | ra = get_ra(inst); | ||
243 | rb = get_rb(inst); | ||
244 | |||
245 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 232 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
246 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 233 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
247 | break; | 234 | break; |
248 | 235 | ||
249 | case OP_31_XOP_MFSPR: | 236 | case OP_31_XOP_MFSPR: |
250 | sprn = get_sprn(inst); | ||
251 | rt = get_rt(inst); | ||
252 | |||
253 | switch (sprn) { | 237 | switch (sprn) { |
254 | case SPRN_SRR0: | 238 | case SPRN_SRR0: |
255 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); | 239 | spr_val = vcpu->arch.shared->srr0; |
256 | break; | 240 | break; |
257 | case SPRN_SRR1: | 241 | case SPRN_SRR1: |
258 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); | 242 | spr_val = vcpu->arch.shared->srr1; |
259 | break; | 243 | break; |
260 | case SPRN_PVR: | 244 | case SPRN_PVR: |
261 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; | 245 | spr_val = vcpu->arch.pvr; |
246 | break; | ||
262 | case SPRN_PIR: | 247 | case SPRN_PIR: |
263 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; | 248 | spr_val = vcpu->vcpu_id; |
249 | break; | ||
264 | case SPRN_MSSSR0: | 250 | case SPRN_MSSSR0: |
265 | kvmppc_set_gpr(vcpu, rt, 0); break; | 251 | spr_val = 0; |
252 | break; | ||
266 | 253 | ||
267 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 254 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
268 | * the guest can always access the real TB anyways. | 255 | * the guest can always access the real TB anyways. |
269 | * In fact, we probably will never see these traps. */ | 256 | * In fact, we probably will never see these traps. */ |
270 | case SPRN_TBWL: | 257 | case SPRN_TBWL: |
271 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; | 258 | spr_val = get_tb() >> 32; |
259 | break; | ||
272 | case SPRN_TBWU: | 260 | case SPRN_TBWU: |
273 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; | 261 | spr_val = get_tb(); |
262 | break; | ||
274 | 263 | ||
275 | case SPRN_SPRG0: | 264 | case SPRN_SPRG0: |
276 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); | 265 | spr_val = vcpu->arch.shared->sprg0; |
277 | break; | 266 | break; |
278 | case SPRN_SPRG1: | 267 | case SPRN_SPRG1: |
279 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); | 268 | spr_val = vcpu->arch.shared->sprg1; |
280 | break; | 269 | break; |
281 | case SPRN_SPRG2: | 270 | case SPRN_SPRG2: |
282 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); | 271 | spr_val = vcpu->arch.shared->sprg2; |
283 | break; | 272 | break; |
284 | case SPRN_SPRG3: | 273 | case SPRN_SPRG3: |
285 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); | 274 | spr_val = vcpu->arch.shared->sprg3; |
286 | break; | 275 | break; |
287 | /* Note: SPRG4-7 are user-readable, so we don't get | 276 | /* Note: SPRG4-7 are user-readable, so we don't get |
288 | * a trap. */ | 277 | * a trap. */ |
289 | 278 | ||
290 | case SPRN_DEC: | 279 | case SPRN_DEC: |
291 | { | 280 | spr_val = kvmppc_get_dec(vcpu, get_tb()); |
292 | kvmppc_set_gpr(vcpu, rt, | ||
293 | kvmppc_get_dec(vcpu, get_tb())); | ||
294 | break; | 281 | break; |
295 | } | ||
296 | default: | 282 | default: |
297 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); | 283 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, |
298 | if (emulated == EMULATE_FAIL) { | 284 | &spr_val); |
299 | printk("mfspr: unknown spr %x\n", sprn); | 285 | if (unlikely(emulated == EMULATE_FAIL)) { |
300 | kvmppc_set_gpr(vcpu, rt, 0); | 286 | printk(KERN_INFO "mfspr: unknown spr " |
287 | "0x%x\n", sprn); | ||
301 | } | 288 | } |
302 | break; | 289 | break; |
303 | } | 290 | } |
291 | kvmppc_set_gpr(vcpu, rt, spr_val); | ||
304 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | 292 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); |
305 | break; | 293 | break; |
306 | 294 | ||
307 | case OP_31_XOP_STHX: | 295 | case OP_31_XOP_STHX: |
308 | rs = get_rs(inst); | ||
309 | ra = get_ra(inst); | ||
310 | rb = get_rb(inst); | ||
311 | |||
312 | emulated = kvmppc_handle_store(run, vcpu, | 296 | emulated = kvmppc_handle_store(run, vcpu, |
313 | kvmppc_get_gpr(vcpu, rs), | 297 | kvmppc_get_gpr(vcpu, rs), |
314 | 2, 1); | 298 | 2, 1); |
315 | break; | 299 | break; |
316 | 300 | ||
317 | case OP_31_XOP_STHUX: | 301 | case OP_31_XOP_STHUX: |
318 | rs = get_rs(inst); | ||
319 | ra = get_ra(inst); | ||
320 | rb = get_rb(inst); | ||
321 | |||
322 | emulated = kvmppc_handle_store(run, vcpu, | 302 | emulated = kvmppc_handle_store(run, vcpu, |
323 | kvmppc_get_gpr(vcpu, rs), | 303 | kvmppc_get_gpr(vcpu, rs), |
324 | 2, 1); | 304 | 2, 1); |
@@ -326,14 +306,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
326 | break; | 306 | break; |
327 | 307 | ||
328 | case OP_31_XOP_MTSPR: | 308 | case OP_31_XOP_MTSPR: |
329 | sprn = get_sprn(inst); | 309 | spr_val = kvmppc_get_gpr(vcpu, rs); |
330 | rs = get_rs(inst); | ||
331 | switch (sprn) { | 310 | switch (sprn) { |
332 | case SPRN_SRR0: | 311 | case SPRN_SRR0: |
333 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); | 312 | vcpu->arch.shared->srr0 = spr_val; |
334 | break; | 313 | break; |
335 | case SPRN_SRR1: | 314 | case SPRN_SRR1: |
336 | vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); | 315 | vcpu->arch.shared->srr1 = spr_val; |
337 | break; | 316 | break; |
338 | 317 | ||
339 | /* XXX We need to context-switch the timebase for | 318 | /* XXX We need to context-switch the timebase for |
@@ -344,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
344 | case SPRN_MSSSR0: break; | 323 | case SPRN_MSSSR0: break; |
345 | 324 | ||
346 | case SPRN_DEC: | 325 | case SPRN_DEC: |
347 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); | 326 | vcpu->arch.dec = spr_val; |
348 | kvmppc_emulate_dec(vcpu); | 327 | kvmppc_emulate_dec(vcpu); |
349 | break; | 328 | break; |
350 | 329 | ||
351 | case SPRN_SPRG0: | 330 | case SPRN_SPRG0: |
352 | vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); | 331 | vcpu->arch.shared->sprg0 = spr_val; |
353 | break; | 332 | break; |
354 | case SPRN_SPRG1: | 333 | case SPRN_SPRG1: |
355 | vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); | 334 | vcpu->arch.shared->sprg1 = spr_val; |
356 | break; | 335 | break; |
357 | case SPRN_SPRG2: | 336 | case SPRN_SPRG2: |
358 | vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); | 337 | vcpu->arch.shared->sprg2 = spr_val; |
359 | break; | 338 | break; |
360 | case SPRN_SPRG3: | 339 | case SPRN_SPRG3: |
361 | vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); | 340 | vcpu->arch.shared->sprg3 = spr_val; |
362 | break; | 341 | break; |
363 | 342 | ||
364 | default: | 343 | default: |
365 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 344 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, |
345 | spr_val); | ||
366 | if (emulated == EMULATE_FAIL) | 346 | if (emulated == EMULATE_FAIL) |
367 | printk("mtspr: unknown spr %x\n", sprn); | 347 | printk(KERN_INFO "mtspr: unknown spr " |
348 | "0x%x\n", sprn); | ||
368 | break; | 349 | break; |
369 | } | 350 | } |
370 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | 351 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); |
@@ -379,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
379 | break; | 360 | break; |
380 | 361 | ||
381 | case OP_31_XOP_LWBRX: | 362 | case OP_31_XOP_LWBRX: |
382 | rt = get_rt(inst); | ||
383 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | 363 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); |
384 | break; | 364 | break; |
385 | 365 | ||
@@ -387,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
387 | break; | 367 | break; |
388 | 368 | ||
389 | case OP_31_XOP_STWBRX: | 369 | case OP_31_XOP_STWBRX: |
390 | rs = get_rs(inst); | ||
391 | ra = get_ra(inst); | ||
392 | rb = get_rb(inst); | ||
393 | |||
394 | emulated = kvmppc_handle_store(run, vcpu, | 370 | emulated = kvmppc_handle_store(run, vcpu, |
395 | kvmppc_get_gpr(vcpu, rs), | 371 | kvmppc_get_gpr(vcpu, rs), |
396 | 4, 0); | 372 | 4, 0); |
397 | break; | 373 | break; |
398 | 374 | ||
399 | case OP_31_XOP_LHBRX: | 375 | case OP_31_XOP_LHBRX: |
400 | rt = get_rt(inst); | ||
401 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 376 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
402 | break; | 377 | break; |
403 | 378 | ||
404 | case OP_31_XOP_STHBRX: | 379 | case OP_31_XOP_STHBRX: |
405 | rs = get_rs(inst); | ||
406 | ra = get_ra(inst); | ||
407 | rb = get_rb(inst); | ||
408 | |||
409 | emulated = kvmppc_handle_store(run, vcpu, | 380 | emulated = kvmppc_handle_store(run, vcpu, |
410 | kvmppc_get_gpr(vcpu, rs), | 381 | kvmppc_get_gpr(vcpu, rs), |
411 | 2, 0); | 382 | 2, 0); |
@@ -418,39 +389,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
418 | break; | 389 | break; |
419 | 390 | ||
420 | case OP_LWZ: | 391 | case OP_LWZ: |
421 | rt = get_rt(inst); | ||
422 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 392 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
423 | break; | 393 | break; |
424 | 394 | ||
425 | case OP_LWZU: | 395 | case OP_LWZU: |
426 | ra = get_ra(inst); | ||
427 | rt = get_rt(inst); | ||
428 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 396 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
429 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 397 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
430 | break; | 398 | break; |
431 | 399 | ||
432 | case OP_LBZ: | 400 | case OP_LBZ: |
433 | rt = get_rt(inst); | ||
434 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 401 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
435 | break; | 402 | break; |
436 | 403 | ||
437 | case OP_LBZU: | 404 | case OP_LBZU: |
438 | ra = get_ra(inst); | ||
439 | rt = get_rt(inst); | ||
440 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 405 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
441 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 406 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
442 | break; | 407 | break; |
443 | 408 | ||
444 | case OP_STW: | 409 | case OP_STW: |
445 | rs = get_rs(inst); | ||
446 | emulated = kvmppc_handle_store(run, vcpu, | 410 | emulated = kvmppc_handle_store(run, vcpu, |
447 | kvmppc_get_gpr(vcpu, rs), | 411 | kvmppc_get_gpr(vcpu, rs), |
448 | 4, 1); | 412 | 4, 1); |
449 | break; | 413 | break; |
450 | 414 | ||
451 | case OP_STWU: | 415 | case OP_STWU: |
452 | ra = get_ra(inst); | ||
453 | rs = get_rs(inst); | ||
454 | emulated = kvmppc_handle_store(run, vcpu, | 416 | emulated = kvmppc_handle_store(run, vcpu, |
455 | kvmppc_get_gpr(vcpu, rs), | 417 | kvmppc_get_gpr(vcpu, rs), |
456 | 4, 1); | 418 | 4, 1); |
@@ -458,15 +420,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
458 | break; | 420 | break; |
459 | 421 | ||
460 | case OP_STB: | 422 | case OP_STB: |
461 | rs = get_rs(inst); | ||
462 | emulated = kvmppc_handle_store(run, vcpu, | 423 | emulated = kvmppc_handle_store(run, vcpu, |
463 | kvmppc_get_gpr(vcpu, rs), | 424 | kvmppc_get_gpr(vcpu, rs), |
464 | 1, 1); | 425 | 1, 1); |
465 | break; | 426 | break; |
466 | 427 | ||
467 | case OP_STBU: | 428 | case OP_STBU: |
468 | ra = get_ra(inst); | ||
469 | rs = get_rs(inst); | ||
470 | emulated = kvmppc_handle_store(run, vcpu, | 429 | emulated = kvmppc_handle_store(run, vcpu, |
471 | kvmppc_get_gpr(vcpu, rs), | 430 | kvmppc_get_gpr(vcpu, rs), |
472 | 1, 1); | 431 | 1, 1); |
@@ -474,39 +433,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
474 | break; | 433 | break; |
475 | 434 | ||
476 | case OP_LHZ: | 435 | case OP_LHZ: |
477 | rt = get_rt(inst); | ||
478 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 436 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
479 | break; | 437 | break; |
480 | 438 | ||
481 | case OP_LHZU: | 439 | case OP_LHZU: |
482 | ra = get_ra(inst); | ||
483 | rt = get_rt(inst); | ||
484 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 440 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
485 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 441 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
486 | break; | 442 | break; |
487 | 443 | ||
488 | case OP_LHA: | 444 | case OP_LHA: |
489 | rt = get_rt(inst); | ||
490 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 445 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
491 | break; | 446 | break; |
492 | 447 | ||
493 | case OP_LHAU: | 448 | case OP_LHAU: |
494 | ra = get_ra(inst); | ||
495 | rt = get_rt(inst); | ||
496 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 449 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
497 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 450 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
498 | break; | 451 | break; |
499 | 452 | ||
500 | case OP_STH: | 453 | case OP_STH: |
501 | rs = get_rs(inst); | ||
502 | emulated = kvmppc_handle_store(run, vcpu, | 454 | emulated = kvmppc_handle_store(run, vcpu, |
503 | kvmppc_get_gpr(vcpu, rs), | 455 | kvmppc_get_gpr(vcpu, rs), |
504 | 2, 1); | 456 | 2, 1); |
505 | break; | 457 | break; |
506 | 458 | ||
507 | case OP_STHU: | 459 | case OP_STHU: |
508 | ra = get_ra(inst); | ||
509 | rs = get_rs(inst); | ||
510 | emulated = kvmppc_handle_store(run, vcpu, | 460 | emulated = kvmppc_handle_store(run, vcpu, |
511 | kvmppc_get_gpr(vcpu, rs), | 461 | kvmppc_get_gpr(vcpu, rs), |
512 | 2, 1); | 462 | 2, 1); |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 58ad8609bb43..1493c8de947b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -244,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
244 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 244 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
245 | break; | 245 | break; |
246 | #endif | 246 | #endif |
247 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 247 | #ifdef CONFIG_PPC_BOOK3S_64 |
248 | case KVM_CAP_SPAPR_TCE: | 248 | case KVM_CAP_SPAPR_TCE: |
249 | r = 1; | 249 | r = 1; |
250 | break; | 250 | break; |
251 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
252 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
251 | case KVM_CAP_PPC_SMT: | 253 | case KVM_CAP_PPC_SMT: |
252 | r = threads_per_core; | 254 | r = threads_per_core; |
253 | break; | 255 | break; |
@@ -277,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
277 | case KVM_CAP_MAX_VCPUS: | 279 | case KVM_CAP_MAX_VCPUS: |
278 | r = KVM_MAX_VCPUS; | 280 | r = KVM_MAX_VCPUS; |
279 | break; | 281 | break; |
282 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
283 | case KVM_CAP_PPC_GET_SMMU_INFO: | ||
284 | r = 1; | ||
285 | break; | ||
286 | #endif | ||
280 | default: | 287 | default: |
281 | r = 0; | 288 | r = 0; |
282 | break; | 289 | break; |
@@ -716,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
716 | break; | 723 | break; |
717 | } | 724 | } |
718 | #endif | 725 | #endif |
719 | |||
720 | default: | 726 | default: |
721 | r = -EINVAL; | 727 | r = -EINVAL; |
722 | } | 728 | } |
@@ -773,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
773 | 779 | ||
774 | break; | 780 | break; |
775 | } | 781 | } |
776 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 782 | #ifdef CONFIG_PPC_BOOK3S_64 |
777 | case KVM_CREATE_SPAPR_TCE: { | 783 | case KVM_CREATE_SPAPR_TCE: { |
778 | struct kvm_create_spapr_tce create_tce; | 784 | struct kvm_create_spapr_tce create_tce; |
779 | struct kvm *kvm = filp->private_data; | 785 | struct kvm *kvm = filp->private_data; |
@@ -784,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
784 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | 790 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); |
785 | goto out; | 791 | goto out; |
786 | } | 792 | } |
793 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
787 | 794 | ||
795 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
788 | case KVM_ALLOCATE_RMA: { | 796 | case KVM_ALLOCATE_RMA: { |
789 | struct kvm *kvm = filp->private_data; | 797 | struct kvm *kvm = filp->private_data; |
790 | struct kvm_allocate_rma rma; | 798 | struct kvm_allocate_rma rma; |
@@ -796,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
796 | } | 804 | } |
797 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 805 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
798 | 806 | ||
807 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
808 | case KVM_PPC_GET_SMMU_INFO: { | ||
809 | struct kvm *kvm = filp->private_data; | ||
810 | struct kvm_ppc_smmu_info info; | ||
811 | |||
812 | memset(&info, 0, sizeof(info)); | ||
813 | r = kvm_vm_ioctl_get_smmu_info(kvm, &info); | ||
814 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | ||
815 | r = -EFAULT; | ||
816 | break; | ||
817 | } | ||
818 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
799 | default: | 819 | default: |
800 | r = -ENOTTY; | 820 | r = -ENOTTY; |
801 | } | 821 | } |