diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-08-05 03:55:22 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-08-05 03:58:11 -0400 |
commit | cc568ead3ce8e0284e7e2cc77bd1dafb03ba4ca1 (patch) | |
tree | 6525ab90e70f0e0736e9bc050f66645ca373c802 /arch/powerpc/kvm | |
parent | 5d5768660539b6d0da0d46113ffb0676540579a6 (diff) | |
parent | 8e6afa36e754be84b468d7df9e5aa71cf4003f3b (diff) |
Merge tag 'signed-kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm
Patch queue for ppc - 2014-08-01
Highlights in this release include:
- BookE: Rework instruction fetch, not racy anymore now
- BookE HV: Fix ONE_REG accessors for some in-hardware registers
- Book3S: Good number of LE host fixes, enable HV on LE
- Book3S: Some misc bug fixes
- Book3S HV: Add in-guest debug support
- Book3S HV: Preload cache lines on context switch
- Remove 440 support
Alexander Graf (31):
KVM: PPC: Book3s PR: Disable AIL mode with OPAL
KVM: PPC: Book3s HV: Fix tlbie compile error
KVM: PPC: Book3S PR: Handle hyp doorbell exits
KVM: PPC: Book3S PR: Fix ABIv2 on LE
KVM: PPC: Book3S PR: Fix sparse endian checks
PPC: Add asm helpers for BE 32bit load/store
KVM: PPC: Book3S HV: Make HTAB code LE host aware
KVM: PPC: Book3S HV: Access guest VPA in BE
KVM: PPC: Book3S HV: Access host lppaca and shadow slb in BE
KVM: PPC: Book3S HV: Access XICS in BE
KVM: PPC: Book3S HV: Fix ABIv2 on LE
KVM: PPC: Book3S HV: Enable for little endian hosts
KVM: PPC: Book3S: Move vcore definition to end of kvm_arch struct
KVM: PPC: Deflect page write faults properly in kvmppc_st
KVM: PPC: Book3S: Stop PTE lookup on write errors
KVM: PPC: Book3S: Add hack for split real mode
KVM: PPC: Book3S: Make magic page properly 4k mappable
KVM: PPC: Remove 440 support
KVM: Rename and add argument to check_extension
KVM: Allow KVM_CHECK_EXTENSION on the vm fd
KVM: PPC: Book3S: Provide different CAPs based on HV or PR mode
KVM: PPC: Implement kvmppc_xlate for all targets
KVM: PPC: Move kvmppc_ld/st to common code
KVM: PPC: Remove kvmppc_bad_hva()
KVM: PPC: Use kvm_read_guest in kvmppc_ld
KVM: PPC: Handle magic page in kvmppc_ld/st
KVM: PPC: Separate loadstore emulation from priv emulation
KVM: PPC: Expose helper functions for data/inst faults
KVM: PPC: Remove DCR handling
KVM: PPC: HV: Remove generic instruction emulation
KVM: PPC: PR: Handle FSCR feature deselects
Alexey Kardashevskiy (1):
KVM: PPC: Book3S: Fix LPCR one_reg interface
Aneesh Kumar K.V (4):
KVM: PPC: BOOK3S: PR: Fix PURR and SPURR emulation
KVM: PPC: BOOK3S: PR: Emulate virtual timebase register
KVM: PPC: BOOK3S: PR: Emulate instruction counter
KVM: PPC: BOOK3S: HV: Update compute_tlbie_rb to handle 16MB base page
Anton Blanchard (2):
KVM: PPC: Book3S HV: Fix ABIv2 indirect branch issue
KVM: PPC: Assembly functions exported to modules need _GLOBAL_TOC()
Bharat Bhushan (10):
kvm: ppc: bookehv: Added wrapper macros for shadow registers
kvm: ppc: booke: Use the shared struct helpers of SRR0 and SRR1
kvm: ppc: booke: Use the shared struct helpers of SPRN_DEAR
kvm: ppc: booke: Add shared struct helpers of SPRN_ESR
kvm: ppc: booke: Use the shared struct helpers for SPRN_SPRG0-7
kvm: ppc: Add SPRN_EPR get helper function
kvm: ppc: bookehv: Save restore SPRN_SPRG9 on guest entry exit
KVM: PPC: Booke-hv: Add one reg interface for SPRG9
KVM: PPC: Remove comment saying SPRG1 is used for vcpu pointer
KVM: PPC: BOOKEHV: rename e500hv_spr to bookehv_spr
Michael Neuling (1):
KVM: PPC: Book3S HV: Add H_SET_MODE hcall handling
Mihai Caraman (8):
KVM: PPC: e500mc: Enhance tlb invalidation condition on vcpu schedule
KVM: PPC: e500: Fix default tlb for victim hint
KVM: PPC: e500: Emulate power management control SPR
KVM: PPC: e500mc: Revert "add load inst fixup"
KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1
KVM: PPC: Book3s: Remove kvmppc_read_inst() function
KVM: PPC: Allow kvmppc_get_last_inst() to fail
KVM: PPC: Bookehv: Get vcpu's last instruction for emulation
Paul Mackerras (4):
KVM: PPC: Book3S: Controls for in-kernel sPAPR hypercall handling
KVM: PPC: Book3S: Allow only implemented hcalls to be enabled or disabled
KVM: PPC: Book3S PR: Take SRCU read lock around RTAS kvm_read_guest() call
KVM: PPC: Book3S: Make kvmppc_ld return a more accurate error indication
Stewart Smith (2):
Split out struct kvmppc_vcore creation to separate function
Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8
Conflicts:
Documentation/virtual/kvm/api.txt
Diffstat (limited to 'arch/powerpc/kvm')
34 files changed, 1565 insertions, 1845 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c deleted file mode 100644 index 9cb4b0a36031..000000000000 --- a/arch/powerpc/kvm/44x.c +++ /dev/null | |||
@@ -1,237 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/export.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | |||
27 | #include <asm/reg.h> | ||
28 | #include <asm/cputable.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | #include <asm/kvm_44x.h> | ||
31 | #include <asm/kvm_ppc.h> | ||
32 | |||
33 | #include "44x_tlb.h" | ||
34 | #include "booke.h" | ||
35 | |||
36 | static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu) | ||
37 | { | ||
38 | kvmppc_booke_vcpu_load(vcpu, cpu); | ||
39 | kvmppc_44x_tlb_load(vcpu); | ||
40 | } | ||
41 | |||
42 | static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu) | ||
43 | { | ||
44 | kvmppc_44x_tlb_put(vcpu); | ||
45 | kvmppc_booke_vcpu_put(vcpu); | ||
46 | } | ||
47 | |||
48 | int kvmppc_core_check_processor_compat(void) | ||
49 | { | ||
50 | int r; | ||
51 | |||
52 | if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0) | ||
53 | r = 0; | ||
54 | else | ||
55 | r = -ENOTSUPP; | ||
56 | |||
57 | return r; | ||
58 | } | ||
59 | |||
60 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | ||
61 | { | ||
62 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
63 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0]; | ||
64 | int i; | ||
65 | |||
66 | tlbe->tid = 0; | ||
67 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; | ||
68 | tlbe->word1 = 0; | ||
69 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR; | ||
70 | |||
71 | tlbe++; | ||
72 | tlbe->tid = 0; | ||
73 | tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID; | ||
74 | tlbe->word1 = 0xef600000; | ||
75 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR | ||
76 | | PPC44x_TLB_I | PPC44x_TLB_G; | ||
77 | |||
78 | /* Since the guest can directly access the timebase, it must know the | ||
79 | * real timebase frequency. Accordingly, it must see the state of | ||
80 | * CCR1[TCS]. */ | ||
81 | /* XXX CCR1 doesn't exist on all 440 SoCs. */ | ||
82 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | ||
83 | |||
84 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) | ||
85 | vcpu_44x->shadow_refs[i].gtlb_index = -1; | ||
86 | |||
87 | vcpu->arch.cpu_type = KVM_CPU_440; | ||
88 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
94 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
95 | struct kvm_translation *tr) | ||
96 | { | ||
97 | int index; | ||
98 | gva_t eaddr; | ||
99 | u8 pid; | ||
100 | u8 as; | ||
101 | |||
102 | eaddr = tr->linear_address; | ||
103 | pid = (tr->linear_address >> 32) & 0xff; | ||
104 | as = (tr->linear_address >> 40) & 0x1; | ||
105 | |||
106 | index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); | ||
107 | if (index == -1) { | ||
108 | tr->valid = 0; | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); | ||
113 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
114 | tr->valid = 1; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu, | ||
120 | struct kvm_sregs *sregs) | ||
121 | { | ||
122 | return kvmppc_get_sregs_ivor(vcpu, sregs); | ||
123 | } | ||
124 | |||
125 | static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu, | ||
126 | struct kvm_sregs *sregs) | ||
127 | { | ||
128 | return kvmppc_set_sregs_ivor(vcpu, sregs); | ||
129 | } | ||
130 | |||
131 | static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, | ||
132 | union kvmppc_one_reg *val) | ||
133 | { | ||
134 | return -EINVAL; | ||
135 | } | ||
136 | |||
137 | static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, | ||
138 | union kvmppc_one_reg *val) | ||
139 | { | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | |||
143 | static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm, | ||
144 | unsigned int id) | ||
145 | { | ||
146 | struct kvmppc_vcpu_44x *vcpu_44x; | ||
147 | struct kvm_vcpu *vcpu; | ||
148 | int err; | ||
149 | |||
150 | vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
151 | if (!vcpu_44x) { | ||
152 | err = -ENOMEM; | ||
153 | goto out; | ||
154 | } | ||
155 | |||
156 | vcpu = &vcpu_44x->vcpu; | ||
157 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
158 | if (err) | ||
159 | goto free_vcpu; | ||
160 | |||
161 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
162 | if (!vcpu->arch.shared) | ||
163 | goto uninit_vcpu; | ||
164 | |||
165 | return vcpu; | ||
166 | |||
167 | uninit_vcpu: | ||
168 | kvm_vcpu_uninit(vcpu); | ||
169 | free_vcpu: | ||
170 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | ||
171 | out: | ||
172 | return ERR_PTR(err); | ||
173 | } | ||
174 | |||
175 | static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu) | ||
176 | { | ||
177 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
178 | |||
179 | free_page((unsigned long)vcpu->arch.shared); | ||
180 | kvm_vcpu_uninit(vcpu); | ||
181 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | ||
182 | } | ||
183 | |||
184 | static int kvmppc_core_init_vm_44x(struct kvm *kvm) | ||
185 | { | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static void kvmppc_core_destroy_vm_44x(struct kvm *kvm) | ||
190 | { | ||
191 | } | ||
192 | |||
193 | static struct kvmppc_ops kvm_ops_44x = { | ||
194 | .get_sregs = kvmppc_core_get_sregs_44x, | ||
195 | .set_sregs = kvmppc_core_set_sregs_44x, | ||
196 | .get_one_reg = kvmppc_get_one_reg_44x, | ||
197 | .set_one_reg = kvmppc_set_one_reg_44x, | ||
198 | .vcpu_load = kvmppc_core_vcpu_load_44x, | ||
199 | .vcpu_put = kvmppc_core_vcpu_put_44x, | ||
200 | .vcpu_create = kvmppc_core_vcpu_create_44x, | ||
201 | .vcpu_free = kvmppc_core_vcpu_free_44x, | ||
202 | .mmu_destroy = kvmppc_mmu_destroy_44x, | ||
203 | .init_vm = kvmppc_core_init_vm_44x, | ||
204 | .destroy_vm = kvmppc_core_destroy_vm_44x, | ||
205 | .emulate_op = kvmppc_core_emulate_op_44x, | ||
206 | .emulate_mtspr = kvmppc_core_emulate_mtspr_44x, | ||
207 | .emulate_mfspr = kvmppc_core_emulate_mfspr_44x, | ||
208 | }; | ||
209 | |||
210 | static int __init kvmppc_44x_init(void) | ||
211 | { | ||
212 | int r; | ||
213 | |||
214 | r = kvmppc_booke_init(); | ||
215 | if (r) | ||
216 | goto err_out; | ||
217 | |||
218 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); | ||
219 | if (r) | ||
220 | goto err_out; | ||
221 | kvm_ops_44x.owner = THIS_MODULE; | ||
222 | kvmppc_pr_ops = &kvm_ops_44x; | ||
223 | |||
224 | err_out: | ||
225 | return r; | ||
226 | } | ||
227 | |||
228 | static void __exit kvmppc_44x_exit(void) | ||
229 | { | ||
230 | kvmppc_pr_ops = NULL; | ||
231 | kvmppc_booke_exit(); | ||
232 | } | ||
233 | |||
234 | module_init(kvmppc_44x_init); | ||
235 | module_exit(kvmppc_44x_exit); | ||
236 | MODULE_ALIAS_MISCDEV(KVM_MINOR); | ||
237 | MODULE_ALIAS("devname:kvm"); | ||
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c deleted file mode 100644 index 92c9ab4bcfec..000000000000 --- a/arch/powerpc/kvm/44x_emulate.c +++ /dev/null | |||
@@ -1,194 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <asm/kvm_ppc.h> | ||
21 | #include <asm/dcr.h> | ||
22 | #include <asm/dcr-regs.h> | ||
23 | #include <asm/disassemble.h> | ||
24 | #include <asm/kvm_44x.h> | ||
25 | #include "timing.h" | ||
26 | |||
27 | #include "booke.h" | ||
28 | #include "44x_tlb.h" | ||
29 | |||
30 | #define XOP_MFDCRX 259 | ||
31 | #define XOP_MFDCR 323 | ||
32 | #define XOP_MTDCRX 387 | ||
33 | #define XOP_MTDCR 451 | ||
34 | #define XOP_TLBSX 914 | ||
35 | #define XOP_ICCCI 966 | ||
36 | #define XOP_TLBWE 978 | ||
37 | |||
38 | static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn) | ||
39 | { | ||
40 | /* emulate some access in kernel */ | ||
41 | switch (dcrn) { | ||
42 | case DCRN_CPR0_CONFIG_ADDR: | ||
43 | vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); | ||
44 | return EMULATE_DONE; | ||
45 | default: | ||
46 | vcpu->run->dcr.dcrn = dcrn; | ||
47 | vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs); | ||
48 | vcpu->run->dcr.is_write = 1; | ||
49 | vcpu->arch.dcr_is_write = 1; | ||
50 | vcpu->arch.dcr_needed = 1; | ||
51 | kvmppc_account_exit(vcpu, DCR_EXITS); | ||
52 | return EMULATE_DO_DCR; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn) | ||
57 | { | ||
58 | /* The guest may access CPR0 registers to determine the timebase | ||
59 | * frequency, and it must know the real host frequency because it | ||
60 | * can directly access the timebase registers. | ||
61 | * | ||
62 | * It would be possible to emulate those accesses in userspace, | ||
63 | * but userspace can really only figure out the end frequency. | ||
64 | * We could decompose that into the factors that compute it, but | ||
65 | * that's tricky math, and it's easier to just report the real | ||
66 | * CPR0 values. | ||
67 | */ | ||
68 | switch (dcrn) { | ||
69 | case DCRN_CPR0_CONFIG_ADDR: | ||
70 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); | ||
71 | break; | ||
72 | case DCRN_CPR0_CONFIG_DATA: | ||
73 | local_irq_disable(); | ||
74 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | ||
75 | vcpu->arch.cpr0_cfgaddr); | ||
76 | kvmppc_set_gpr(vcpu, rt, | ||
77 | mfdcr(DCRN_CPR0_CONFIG_DATA)); | ||
78 | local_irq_enable(); | ||
79 | break; | ||
80 | default: | ||
81 | vcpu->run->dcr.dcrn = dcrn; | ||
82 | vcpu->run->dcr.data = 0; | ||
83 | vcpu->run->dcr.is_write = 0; | ||
84 | vcpu->arch.dcr_is_write = 0; | ||
85 | vcpu->arch.io_gpr = rt; | ||
86 | vcpu->arch.dcr_needed = 1; | ||
87 | kvmppc_account_exit(vcpu, DCR_EXITS); | ||
88 | return EMULATE_DO_DCR; | ||
89 | } | ||
90 | |||
91 | return EMULATE_DONE; | ||
92 | } | ||
93 | |||
94 | int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
95 | unsigned int inst, int *advance) | ||
96 | { | ||
97 | int emulated = EMULATE_DONE; | ||
98 | int dcrn = get_dcrn(inst); | ||
99 | int ra = get_ra(inst); | ||
100 | int rb = get_rb(inst); | ||
101 | int rc = get_rc(inst); | ||
102 | int rs = get_rs(inst); | ||
103 | int rt = get_rt(inst); | ||
104 | int ws = get_ws(inst); | ||
105 | |||
106 | switch (get_op(inst)) { | ||
107 | case 31: | ||
108 | switch (get_xop(inst)) { | ||
109 | |||
110 | case XOP_MFDCR: | ||
111 | emulated = emulate_mfdcr(vcpu, rt, dcrn); | ||
112 | break; | ||
113 | |||
114 | case XOP_MFDCRX: | ||
115 | emulated = emulate_mfdcr(vcpu, rt, | ||
116 | kvmppc_get_gpr(vcpu, ra)); | ||
117 | break; | ||
118 | |||
119 | case XOP_MTDCR: | ||
120 | emulated = emulate_mtdcr(vcpu, rs, dcrn); | ||
121 | break; | ||
122 | |||
123 | case XOP_MTDCRX: | ||
124 | emulated = emulate_mtdcr(vcpu, rs, | ||
125 | kvmppc_get_gpr(vcpu, ra)); | ||
126 | break; | ||
127 | |||
128 | case XOP_TLBWE: | ||
129 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); | ||
130 | break; | ||
131 | |||
132 | case XOP_TLBSX: | ||
133 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); | ||
134 | break; | ||
135 | |||
136 | case XOP_ICCCI: | ||
137 | break; | ||
138 | |||
139 | default: | ||
140 | emulated = EMULATE_FAIL; | ||
141 | } | ||
142 | |||
143 | break; | ||
144 | |||
145 | default: | ||
146 | emulated = EMULATE_FAIL; | ||
147 | } | ||
148 | |||
149 | if (emulated == EMULATE_FAIL) | ||
150 | emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); | ||
151 | |||
152 | return emulated; | ||
153 | } | ||
154 | |||
155 | int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | ||
156 | { | ||
157 | int emulated = EMULATE_DONE; | ||
158 | |||
159 | switch (sprn) { | ||
160 | case SPRN_PID: | ||
161 | kvmppc_set_pid(vcpu, spr_val); break; | ||
162 | case SPRN_MMUCR: | ||
163 | vcpu->arch.mmucr = spr_val; break; | ||
164 | case SPRN_CCR0: | ||
165 | vcpu->arch.ccr0 = spr_val; break; | ||
166 | case SPRN_CCR1: | ||
167 | vcpu->arch.ccr1 = spr_val; break; | ||
168 | default: | ||
169 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); | ||
170 | } | ||
171 | |||
172 | return emulated; | ||
173 | } | ||
174 | |||
175 | int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) | ||
176 | { | ||
177 | int emulated = EMULATE_DONE; | ||
178 | |||
179 | switch (sprn) { | ||
180 | case SPRN_PID: | ||
181 | *spr_val = vcpu->arch.pid; break; | ||
182 | case SPRN_MMUCR: | ||
183 | *spr_val = vcpu->arch.mmucr; break; | ||
184 | case SPRN_CCR0: | ||
185 | *spr_val = vcpu->arch.ccr0; break; | ||
186 | case SPRN_CCR1: | ||
187 | *spr_val = vcpu->arch.ccr1; break; | ||
188 | default: | ||
189 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); | ||
190 | } | ||
191 | |||
192 | return emulated; | ||
193 | } | ||
194 | |||
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c deleted file mode 100644 index 0deef1082e02..000000000000 --- a/arch/powerpc/kvm/44x_tlb.c +++ /dev/null | |||
@@ -1,528 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kvm.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/highmem.h> | ||
25 | |||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/mmu-44x.h> | ||
28 | #include <asm/kvm_ppc.h> | ||
29 | #include <asm/kvm_44x.h> | ||
30 | #include "timing.h" | ||
31 | |||
32 | #include "44x_tlb.h" | ||
33 | #include "trace.h" | ||
34 | |||
35 | #ifndef PPC44x_TLBE_SIZE | ||
36 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | ||
37 | #endif | ||
38 | |||
39 | #define PAGE_SIZE_4K (1<<12) | ||
40 | #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) | ||
41 | |||
42 | #define PPC44x_TLB_UATTR_MASK \ | ||
43 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) | ||
44 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | ||
45 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | ||
46 | |||
47 | #ifdef DEBUG | ||
48 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
49 | { | ||
50 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
51 | struct kvmppc_44x_tlbe *tlbe; | ||
52 | int i; | ||
53 | |||
54 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
55 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
56 | "nr", "tid", "word0", "word1", "word2"); | ||
57 | |||
58 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { | ||
59 | tlbe = &vcpu_44x->guest_tlb[i]; | ||
60 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
61 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
62 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
63 | tlbe->word2); | ||
64 | } | ||
65 | } | ||
66 | #endif | ||
67 | |||
68 | static inline void kvmppc_44x_tlbie(unsigned int index) | ||
69 | { | ||
70 | /* 0 <= index < 64, so the V bit is clear and we can use the index as | ||
71 | * word0. */ | ||
72 | asm volatile( | ||
73 | "tlbwe %[index], %[index], 0\n" | ||
74 | : | ||
75 | : [index] "r"(index) | ||
76 | ); | ||
77 | } | ||
78 | |||
79 | static inline void kvmppc_44x_tlbre(unsigned int index, | ||
80 | struct kvmppc_44x_tlbe *tlbe) | ||
81 | { | ||
82 | asm volatile( | ||
83 | "tlbre %[word0], %[index], 0\n" | ||
84 | "mfspr %[tid], %[sprn_mmucr]\n" | ||
85 | "andi. %[tid], %[tid], 0xff\n" | ||
86 | "tlbre %[word1], %[index], 1\n" | ||
87 | "tlbre %[word2], %[index], 2\n" | ||
88 | : [word0] "=r"(tlbe->word0), | ||
89 | [word1] "=r"(tlbe->word1), | ||
90 | [word2] "=r"(tlbe->word2), | ||
91 | [tid] "=r"(tlbe->tid) | ||
92 | : [index] "r"(index), | ||
93 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
94 | : "cc" | ||
95 | ); | ||
96 | } | ||
97 | |||
98 | static inline void kvmppc_44x_tlbwe(unsigned int index, | ||
99 | struct kvmppc_44x_tlbe *stlbe) | ||
100 | { | ||
101 | unsigned long tmp; | ||
102 | |||
103 | asm volatile( | ||
104 | "mfspr %[tmp], %[sprn_mmucr]\n" | ||
105 | "rlwimi %[tmp], %[tid], 0, 0xff\n" | ||
106 | "mtspr %[sprn_mmucr], %[tmp]\n" | ||
107 | "tlbwe %[word0], %[index], 0\n" | ||
108 | "tlbwe %[word1], %[index], 1\n" | ||
109 | "tlbwe %[word2], %[index], 2\n" | ||
110 | : [tmp] "=&r"(tmp) | ||
111 | : [word0] "r"(stlbe->word0), | ||
112 | [word1] "r"(stlbe->word1), | ||
113 | [word2] "r"(stlbe->word2), | ||
114 | [tid] "r"(stlbe->tid), | ||
115 | [index] "r"(index), | ||
116 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
117 | ); | ||
118 | } | ||
119 | |||
120 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | ||
121 | { | ||
122 | /* We only care about the guest's permission and user bits. */ | ||
123 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; | ||
124 | |||
125 | if (!usermode) { | ||
126 | /* Guest is in supervisor mode, so we need to translate guest | ||
127 | * supervisor permissions into user permissions. */ | ||
128 | attrib &= ~PPC44x_TLB_USER_PERM_MASK; | ||
129 | attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3; | ||
130 | } | ||
131 | |||
132 | /* Make sure host can always access this memory. */ | ||
133 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; | ||
134 | |||
135 | /* WIMGE = 0b00100 */ | ||
136 | attrib |= PPC44x_TLB_M; | ||
137 | |||
138 | return attrib; | ||
139 | } | ||
140 | |||
141 | /* Load shadow TLB back into hardware. */ | ||
142 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) | ||
143 | { | ||
144 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
145 | int i; | ||
146 | |||
147 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
148 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
149 | |||
150 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
151 | kvmppc_44x_tlbwe(i, stlbe); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, | ||
156 | unsigned int i) | ||
157 | { | ||
158 | vcpu_44x->shadow_tlb_mod[i] = 1; | ||
159 | } | ||
160 | |||
161 | /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ | ||
162 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) | ||
163 | { | ||
164 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
165 | int i; | ||
166 | |||
167 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
168 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
169 | |||
170 | if (vcpu_44x->shadow_tlb_mod[i]) | ||
171 | kvmppc_44x_tlbre(i, stlbe); | ||
172 | |||
173 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
174 | kvmppc_44x_tlbie(i); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | |||
179 | /* Search the guest TLB for a matching entry. */ | ||
180 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | ||
181 | unsigned int as) | ||
182 | { | ||
183 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
184 | int i; | ||
185 | |||
186 | /* XXX Replace loop with fancy data structures. */ | ||
187 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { | ||
188 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; | ||
189 | unsigned int tid; | ||
190 | |||
191 | if (eaddr < get_tlb_eaddr(tlbe)) | ||
192 | continue; | ||
193 | |||
194 | if (eaddr > get_tlb_end(tlbe)) | ||
195 | continue; | ||
196 | |||
197 | tid = get_tlb_tid(tlbe); | ||
198 | if (tid && (tid != pid)) | ||
199 | continue; | ||
200 | |||
201 | if (!get_tlb_v(tlbe)) | ||
202 | continue; | ||
203 | |||
204 | if (get_tlb_ts(tlbe) != as) | ||
205 | continue; | ||
206 | |||
207 | return i; | ||
208 | } | ||
209 | |||
210 | return -1; | ||
211 | } | ||
212 | |||
213 | gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, | ||
214 | gva_t eaddr) | ||
215 | { | ||
216 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
217 | struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
218 | unsigned int pgmask = get_tlb_bytes(gtlbe) - 1; | ||
219 | |||
220 | return get_tlb_raddr(gtlbe) | (eaddr & pgmask); | ||
221 | } | ||
222 | |||
223 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | ||
224 | { | ||
225 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); | ||
226 | |||
227 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | ||
228 | } | ||
229 | |||
230 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | ||
231 | { | ||
232 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); | ||
233 | |||
234 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | ||
235 | } | ||
236 | |||
237 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) | ||
238 | { | ||
239 | } | ||
240 | |||
241 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | ||
242 | { | ||
243 | } | ||
244 | |||
245 | static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, | ||
246 | unsigned int stlb_index) | ||
247 | { | ||
248 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; | ||
249 | |||
250 | if (!ref->page) | ||
251 | return; | ||
252 | |||
253 | /* Discard from the TLB. */ | ||
254 | /* Note: we could actually invalidate a host mapping, if the host overwrote | ||
255 | * this TLB entry since we inserted a guest mapping. */ | ||
256 | kvmppc_44x_tlbie(stlb_index); | ||
257 | |||
258 | /* Now release the page. */ | ||
259 | if (ref->writeable) | ||
260 | kvm_release_page_dirty(ref->page); | ||
261 | else | ||
262 | kvm_release_page_clean(ref->page); | ||
263 | |||
264 | ref->page = NULL; | ||
265 | |||
266 | /* XXX set tlb_44x_index to stlb_index? */ | ||
267 | |||
268 | trace_kvm_stlb_inval(stlb_index); | ||
269 | } | ||
270 | |||
271 | void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu) | ||
272 | { | ||
273 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
274 | int i; | ||
275 | |||
276 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
277 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * kvmppc_mmu_map -- create a host mapping for guest memory | ||
282 | * | ||
283 | * If the guest wanted a larger page than the host supports, only the first | ||
284 | * host page is mapped here and the rest are demand faulted. | ||
285 | * | ||
286 | * If the guest wanted a smaller page than the host page size, we map only the | ||
287 | * guest-size page (i.e. not a full host page mapping). | ||
288 | * | ||
289 | * Caller must ensure that the specified guest TLB entry is safe to insert into | ||
290 | * the shadow TLB. | ||
291 | */ | ||
292 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | ||
293 | unsigned int gtlb_index) | ||
294 | { | ||
295 | struct kvmppc_44x_tlbe stlbe; | ||
296 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
297 | struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
298 | struct kvmppc_44x_shadow_ref *ref; | ||
299 | struct page *new_page; | ||
300 | hpa_t hpaddr; | ||
301 | gfn_t gfn; | ||
302 | u32 asid = gtlbe->tid; | ||
303 | u32 flags = gtlbe->word2; | ||
304 | u32 max_bytes = get_tlb_bytes(gtlbe); | ||
305 | unsigned int victim; | ||
306 | |||
307 | /* Select TLB entry to clobber. Indirectly guard against races with the TLB | ||
308 | * miss handler by disabling interrupts. */ | ||
309 | local_irq_disable(); | ||
310 | victim = ++tlb_44x_index; | ||
311 | if (victim > tlb_44x_hwater) | ||
312 | victim = 0; | ||
313 | tlb_44x_index = victim; | ||
314 | local_irq_enable(); | ||
315 | |||
316 | /* Get reference to new page. */ | ||
317 | gfn = gpaddr >> PAGE_SHIFT; | ||
318 | new_page = gfn_to_page(vcpu->kvm, gfn); | ||
319 | if (is_error_page(new_page)) { | ||
320 | printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", | ||
321 | (unsigned long long)gfn); | ||
322 | return; | ||
323 | } | ||
324 | hpaddr = page_to_phys(new_page); | ||
325 | |||
326 | /* Invalidate any previous shadow mappings. */ | ||
327 | kvmppc_44x_shadow_release(vcpu_44x, victim); | ||
328 | |||
329 | /* XXX Make sure (va, size) doesn't overlap any other | ||
330 | * entries. 440x6 user manual says the result would be | ||
331 | * "undefined." */ | ||
332 | |||
333 | /* XXX what about AS? */ | ||
334 | |||
335 | /* Force TS=1 for all guest mappings. */ | ||
336 | stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; | ||
337 | |||
338 | if (max_bytes >= PAGE_SIZE) { | ||
339 | /* Guest mapping is larger than or equal to host page size. We can use | ||
340 | * a "native" host mapping. */ | ||
341 | stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; | ||
342 | } else { | ||
343 | /* Guest mapping is smaller than host page size. We must restrict the | ||
344 | * size of the mapping to be at most the smaller of the two, but for | ||
345 | * simplicity we fall back to a 4K mapping (this is probably what the | ||
346 | * guest is using anyways). */ | ||
347 | stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; | ||
348 | |||
349 | /* 'hpaddr' is a host page, which is larger than the mapping we're | ||
350 | * inserting here. To compensate, we must add the in-page offset to the | ||
351 | * sub-page. */ | ||
352 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | ||
353 | } | ||
354 | |||
355 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | ||
356 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, | ||
357 | vcpu->arch.shared->msr & MSR_PR); | ||
358 | stlbe.tid = !(asid & 0xff); | ||
359 | |||
360 | /* Keep track of the reference so we can properly release it later. */ | ||
361 | ref = &vcpu_44x->shadow_refs[victim]; | ||
362 | ref->page = new_page; | ||
363 | ref->gtlb_index = gtlb_index; | ||
364 | ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); | ||
365 | ref->tid = stlbe.tid; | ||
366 | |||
367 | /* Insert shadow mapping into hardware TLB. */ | ||
368 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); | ||
369 | kvmppc_44x_tlbwe(victim, &stlbe); | ||
370 | trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1, | ||
371 | stlbe.word2); | ||
372 | } | ||
373 | |||
374 | /* For a particular guest TLB entry, invalidate the corresponding host TLB | ||
375 | * mappings and release the host pages. */ | ||
376 | static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, | ||
377 | unsigned int gtlb_index) | ||
378 | { | ||
379 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
380 | int i; | ||
381 | |||
382 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { | ||
383 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; | ||
384 | if (ref->gtlb_index == gtlb_index) | ||
385 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
386 | } | ||
387 | } | ||
388 | |||
389 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
390 | { | ||
391 | int usermode = vcpu->arch.shared->msr & MSR_PR; | ||
392 | |||
393 | vcpu->arch.shadow_pid = !usermode; | ||
394 | } | ||
395 | |||
396 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) | ||
397 | { | ||
398 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
399 | int i; | ||
400 | |||
401 | if (unlikely(vcpu->arch.pid == new_pid)) | ||
402 | return; | ||
403 | |||
404 | vcpu->arch.pid = new_pid; | ||
405 | |||
406 | /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it | ||
407 | * can't access guest kernel mappings (TID=1). When we switch to a new | ||
408 | * guest PID, which will also use host PID=0, we must discard the old guest | ||
409 | * userspace mappings. */ | ||
410 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { | ||
411 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; | ||
412 | |||
413 | if (ref->tid == 0) | ||
414 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
419 | const struct kvmppc_44x_tlbe *tlbe) | ||
420 | { | ||
421 | gpa_t gpa; | ||
422 | |||
423 | if (!get_tlb_v(tlbe)) | ||
424 | return 0; | ||
425 | |||
426 | /* Does it match current guest AS? */ | ||
427 | /* XXX what about IS != DS? */ | ||
428 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) | ||
429 | return 0; | ||
430 | |||
431 | gpa = get_tlb_raddr(tlbe); | ||
432 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
433 | /* Mapping is not for RAM. */ | ||
434 | return 0; | ||
435 | |||
436 | return 1; | ||
437 | } | ||
438 | |||
439 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | ||
440 | { | ||
441 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
442 | struct kvmppc_44x_tlbe *tlbe; | ||
443 | unsigned int gtlb_index; | ||
444 | int idx; | ||
445 | |||
446 | gtlb_index = kvmppc_get_gpr(vcpu, ra); | ||
447 | if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { | ||
448 | printk("%s: index %d\n", __func__, gtlb_index); | ||
449 | kvmppc_dump_vcpu(vcpu); | ||
450 | return EMULATE_FAIL; | ||
451 | } | ||
452 | |||
453 | tlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
454 | |||
455 | /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ | ||
456 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
457 | kvmppc_44x_invalidate(vcpu, gtlb_index); | ||
458 | |||
459 | switch (ws) { | ||
460 | case PPC44x_TLB_PAGEID: | ||
461 | tlbe->tid = get_mmucr_stid(vcpu); | ||
462 | tlbe->word0 = kvmppc_get_gpr(vcpu, rs); | ||
463 | break; | ||
464 | |||
465 | case PPC44x_TLB_XLAT: | ||
466 | tlbe->word1 = kvmppc_get_gpr(vcpu, rs); | ||
467 | break; | ||
468 | |||
469 | case PPC44x_TLB_ATTRIB: | ||
470 | tlbe->word2 = kvmppc_get_gpr(vcpu, rs); | ||
471 | break; | ||
472 | |||
473 | default: | ||
474 | return EMULATE_FAIL; | ||
475 | } | ||
476 | |||
477 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
478 | |||
479 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
480 | gva_t eaddr; | ||
481 | gpa_t gpaddr; | ||
482 | u32 bytes; | ||
483 | |||
484 | eaddr = get_tlb_eaddr(tlbe); | ||
485 | gpaddr = get_tlb_raddr(tlbe); | ||
486 | |||
487 | /* Use the advertised page size to mask effective and real addrs. */ | ||
488 | bytes = get_tlb_bytes(tlbe); | ||
489 | eaddr &= ~(bytes - 1); | ||
490 | gpaddr &= ~(bytes - 1); | ||
491 | |||
492 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); | ||
493 | } | ||
494 | |||
495 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
496 | |||
497 | trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, | ||
498 | tlbe->word2); | ||
499 | |||
500 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | ||
501 | return EMULATE_DONE; | ||
502 | } | ||
503 | |||
504 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | ||
505 | { | ||
506 | u32 ea; | ||
507 | int gtlb_index; | ||
508 | unsigned int as = get_mmucr_sts(vcpu); | ||
509 | unsigned int pid = get_mmucr_stid(vcpu); | ||
510 | |||
511 | ea = kvmppc_get_gpr(vcpu, rb); | ||
512 | if (ra) | ||
513 | ea += kvmppc_get_gpr(vcpu, ra); | ||
514 | |||
515 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
516 | if (rc) { | ||
517 | u32 cr = kvmppc_get_cr(vcpu); | ||
518 | |||
519 | if (gtlb_index < 0) | ||
520 | kvmppc_set_cr(vcpu, cr & ~0x20000000); | ||
521 | else | ||
522 | kvmppc_set_cr(vcpu, cr | 0x20000000); | ||
523 | } | ||
524 | kvmppc_set_gpr(vcpu, rt, gtlb_index); | ||
525 | |||
526 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | ||
527 | return EMULATE_DONE; | ||
528 | } | ||
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h deleted file mode 100644 index a9ff80e51526..000000000000 --- a/arch/powerpc/kvm/44x_tlb.h +++ /dev/null | |||
@@ -1,86 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __KVM_POWERPC_TLB_H__ | ||
21 | #define __KVM_POWERPC_TLB_H__ | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | #include <asm/mmu-44x.h> | ||
25 | |||
26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
27 | unsigned int pid, unsigned int as); | ||
28 | |||
29 | extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, | ||
30 | u8 rc); | ||
31 | extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws); | ||
32 | |||
33 | /* TLB helper functions */ | ||
34 | static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe) | ||
35 | { | ||
36 | return (tlbe->word0 >> 4) & 0xf; | ||
37 | } | ||
38 | |||
39 | static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe) | ||
40 | { | ||
41 | return tlbe->word0 & 0xfffffc00; | ||
42 | } | ||
43 | |||
44 | static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe) | ||
45 | { | ||
46 | unsigned int pgsize = get_tlb_size(tlbe); | ||
47 | return 1 << 10 << (pgsize << 1); | ||
48 | } | ||
49 | |||
50 | static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe) | ||
51 | { | ||
52 | return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; | ||
53 | } | ||
54 | |||
55 | static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe) | ||
56 | { | ||
57 | u64 word1 = tlbe->word1; | ||
58 | return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); | ||
59 | } | ||
60 | |||
61 | static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe) | ||
62 | { | ||
63 | return tlbe->tid & 0xff; | ||
64 | } | ||
65 | |||
66 | static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe) | ||
67 | { | ||
68 | return (tlbe->word0 >> 8) & 0x1; | ||
69 | } | ||
70 | |||
71 | static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe) | ||
72 | { | ||
73 | return (tlbe->word0 >> 9) & 0x1; | ||
74 | } | ||
75 | |||
76 | static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu) | ||
77 | { | ||
78 | return vcpu->arch.mmucr & 0xff; | ||
79 | } | ||
80 | |||
81 | static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) | ||
82 | { | ||
83 | return (vcpu->arch.mmucr >> 16) & 0x1; | ||
84 | } | ||
85 | |||
86 | #endif /* __KVM_POWERPC_TLB_H__ */ | ||
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index d6a53b95de94..8f104a6879f0 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -75,7 +75,6 @@ config KVM_BOOK3S_64 | |||
75 | config KVM_BOOK3S_64_HV | 75 | config KVM_BOOK3S_64_HV |
76 | tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" | 76 | tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" |
77 | depends on KVM_BOOK3S_64 | 77 | depends on KVM_BOOK3S_64 |
78 | depends on !CPU_LITTLE_ENDIAN | ||
79 | select KVM_BOOK3S_HV_POSSIBLE | 78 | select KVM_BOOK3S_HV_POSSIBLE |
80 | select MMU_NOTIFIER | 79 | select MMU_NOTIFIER |
81 | select CMA | 80 | select CMA |
@@ -113,23 +112,9 @@ config KVM_BOOK3S_64_PR | |||
113 | config KVM_BOOKE_HV | 112 | config KVM_BOOKE_HV |
114 | bool | 113 | bool |
115 | 114 | ||
116 | config KVM_440 | ||
117 | bool "KVM support for PowerPC 440 processors" | ||
118 | depends on 44x | ||
119 | select KVM | ||
120 | select KVM_MMIO | ||
121 | ---help--- | ||
122 | Support running unmodified 440 guest kernels in virtual machines on | ||
123 | 440 host processors. | ||
124 | |||
125 | This module provides access to the hardware capabilities through | ||
126 | a character device node named /dev/kvm. | ||
127 | |||
128 | If unsure, say N. | ||
129 | |||
130 | config KVM_EXIT_TIMING | 115 | config KVM_EXIT_TIMING |
131 | bool "Detailed exit timing" | 116 | bool "Detailed exit timing" |
132 | depends on KVM_440 || KVM_E500V2 || KVM_E500MC | 117 | depends on KVM_E500V2 || KVM_E500MC |
133 | ---help--- | 118 | ---help--- |
134 | Calculate elapsed time for every exit/enter cycle. A per-vcpu | 119 | Calculate elapsed time for every exit/enter cycle. A per-vcpu |
135 | report is available in debugfs kvm/vm#_vcpu#_timing. | 120 | report is available in debugfs kvm/vm#_vcpu#_timing. |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index ce569b6bf4d8..2d590dea5482 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -10,27 +10,17 @@ KVM := ../../../virt/kvm | |||
10 | common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ | 10 | common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ |
11 | $(KVM)/eventfd.o | 11 | $(KVM)/eventfd.o |
12 | 12 | ||
13 | CFLAGS_44x_tlb.o := -I. | ||
14 | CFLAGS_e500_mmu.o := -I. | 13 | CFLAGS_e500_mmu.o := -I. |
15 | CFLAGS_e500_mmu_host.o := -I. | 14 | CFLAGS_e500_mmu_host.o := -I. |
16 | CFLAGS_emulate.o := -I. | 15 | CFLAGS_emulate.o := -I. |
16 | CFLAGS_emulate_loadstore.o := -I. | ||
17 | 17 | ||
18 | common-objs-y += powerpc.o emulate.o | 18 | common-objs-y += powerpc.o emulate.o emulate_loadstore.o |
19 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o | 19 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o |
20 | obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o | 20 | obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o |
21 | 21 | ||
22 | AFLAGS_booke_interrupts.o := -I$(obj) | 22 | AFLAGS_booke_interrupts.o := -I$(obj) |
23 | 23 | ||
24 | kvm-440-objs := \ | ||
25 | $(common-objs-y) \ | ||
26 | booke.o \ | ||
27 | booke_emulate.o \ | ||
28 | booke_interrupts.o \ | ||
29 | 44x.o \ | ||
30 | 44x_tlb.o \ | ||
31 | 44x_emulate.o | ||
32 | kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs) | ||
33 | |||
34 | kvm-e500-objs := \ | 24 | kvm-e500-objs := \ |
35 | $(common-objs-y) \ | 25 | $(common-objs-y) \ |
36 | booke.o \ | 26 | booke.o \ |
@@ -58,6 +48,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \ | |||
58 | 48 | ||
59 | kvm-pr-y := \ | 49 | kvm-pr-y := \ |
60 | fpu.o \ | 50 | fpu.o \ |
51 | emulate.o \ | ||
61 | book3s_paired_singles.o \ | 52 | book3s_paired_singles.o \ |
62 | book3s_pr.o \ | 53 | book3s_pr.o \ |
63 | book3s_pr_papr.o \ | 54 | book3s_pr_papr.o \ |
@@ -101,7 +92,7 @@ kvm-book3s_64-module-objs += \ | |||
101 | $(KVM)/kvm_main.o \ | 92 | $(KVM)/kvm_main.o \ |
102 | $(KVM)/eventfd.o \ | 93 | $(KVM)/eventfd.o \ |
103 | powerpc.o \ | 94 | powerpc.o \ |
104 | emulate.o \ | 95 | emulate_loadstore.o \ |
105 | book3s.o \ | 96 | book3s.o \ |
106 | book3s_64_vio.o \ | 97 | book3s_64_vio.o \ |
107 | book3s_rtas.o \ | 98 | book3s_rtas.o \ |
@@ -127,7 +118,6 @@ kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o | |||
127 | 118 | ||
128 | kvm-objs := $(kvm-objs-m) $(kvm-objs-y) | 119 | kvm-objs := $(kvm-objs-m) $(kvm-objs-y) |
129 | 120 | ||
130 | obj-$(CONFIG_KVM_440) += kvm.o | ||
131 | obj-$(CONFIG_KVM_E500V2) += kvm.o | 121 | obj-$(CONFIG_KVM_E500V2) += kvm.o |
132 | obj-$(CONFIG_KVM_E500MC) += kvm.o | 122 | obj-$(CONFIG_KVM_E500MC) += kvm.o |
133 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o | 123 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index c254c27f240e..dd03f6b299ba 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -72,6 +72,17 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
72 | { | 72 | { |
73 | } | 73 | } |
74 | 74 | ||
75 | void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) | ||
76 | { | ||
77 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { | ||
78 | ulong pc = kvmppc_get_pc(vcpu); | ||
79 | if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) | ||
80 | kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); | ||
81 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; | ||
82 | } | ||
83 | } | ||
84 | EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real); | ||
85 | |||
75 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | 86 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) |
76 | { | 87 | { |
77 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) | 88 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) |
@@ -118,6 +129,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
118 | 129 | ||
119 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 130 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
120 | { | 131 | { |
132 | kvmppc_unfixup_split_real(vcpu); | ||
121 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); | 133 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); |
122 | kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); | 134 | kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); |
123 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); | 135 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); |
@@ -218,6 +230,23 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) | |||
218 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); | 230 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); |
219 | } | 231 | } |
220 | 232 | ||
233 | void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, | ||
234 | ulong flags) | ||
235 | { | ||
236 | kvmppc_set_dar(vcpu, dar); | ||
237 | kvmppc_set_dsisr(vcpu, flags); | ||
238 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); | ||
239 | } | ||
240 | |||
241 | void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) | ||
242 | { | ||
243 | u64 msr = kvmppc_get_msr(vcpu); | ||
244 | msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); | ||
245 | msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); | ||
246 | kvmppc_set_msr_fast(vcpu, msr); | ||
247 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | ||
248 | } | ||
249 | |||
221 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | 250 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) |
222 | { | 251 | { |
223 | int deliver = 1; | 252 | int deliver = 1; |
@@ -342,18 +371,18 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
342 | } | 371 | } |
343 | EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); | 372 | EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); |
344 | 373 | ||
345 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, | 374 | pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, |
346 | bool *writable) | 375 | bool *writable) |
347 | { | 376 | { |
348 | ulong mp_pa = vcpu->arch.magic_page_pa; | 377 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; |
378 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
349 | 379 | ||
350 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) | 380 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
351 | mp_pa = (uint32_t)mp_pa; | 381 | mp_pa = (uint32_t)mp_pa; |
352 | 382 | ||
353 | /* Magic page override */ | 383 | /* Magic page override */ |
354 | if (unlikely(mp_pa) && | 384 | gpa &= ~0xFFFULL; |
355 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == | 385 | if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { |
356 | ((mp_pa & PAGE_MASK) & KVM_PAM))) { | ||
357 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | 386 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; |
358 | pfn_t pfn; | 387 | pfn_t pfn; |
359 | 388 | ||
@@ -366,11 +395,13 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, | |||
366 | 395 | ||
367 | return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); | 396 | return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); |
368 | } | 397 | } |
369 | EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); | 398 | EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); |
370 | 399 | ||
371 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 400 | int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, |
372 | bool iswrite, struct kvmppc_pte *pte) | 401 | enum xlate_readwrite xlrw, struct kvmppc_pte *pte) |
373 | { | 402 | { |
403 | bool data = (xlid == XLATE_DATA); | ||
404 | bool iswrite = (xlrw == XLATE_WRITE); | ||
374 | int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); | 405 | int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); |
375 | int r; | 406 | int r; |
376 | 407 | ||
@@ -384,88 +415,34 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | |||
384 | pte->may_write = true; | 415 | pte->may_write = true; |
385 | pte->may_execute = true; | 416 | pte->may_execute = true; |
386 | r = 0; | 417 | r = 0; |
418 | |||
419 | if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR && | ||
420 | !data) { | ||
421 | if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && | ||
422 | ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) | ||
423 | pte->raddr &= ~SPLIT_HACK_MASK; | ||
424 | } | ||
387 | } | 425 | } |
388 | 426 | ||
389 | return r; | 427 | return r; |
390 | } | 428 | } |
391 | 429 | ||
392 | static hva_t kvmppc_bad_hva(void) | 430 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, |
393 | { | 431 | u32 *inst) |
394 | return PAGE_OFFSET; | ||
395 | } | ||
396 | |||
397 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, | ||
398 | bool read) | ||
399 | { | ||
400 | hva_t hpage; | ||
401 | |||
402 | if (read && !pte->may_read) | ||
403 | goto err; | ||
404 | |||
405 | if (!read && !pte->may_write) | ||
406 | goto err; | ||
407 | |||
408 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | ||
409 | if (kvm_is_error_hva(hpage)) | ||
410 | goto err; | ||
411 | |||
412 | return hpage | (pte->raddr & ~PAGE_MASK); | ||
413 | err: | ||
414 | return kvmppc_bad_hva(); | ||
415 | } | ||
416 | |||
417 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | ||
418 | bool data) | ||
419 | { | ||
420 | struct kvmppc_pte pte; | ||
421 | |||
422 | vcpu->stat.st++; | ||
423 | |||
424 | if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte)) | ||
425 | return -ENOENT; | ||
426 | |||
427 | *eaddr = pte.raddr; | ||
428 | |||
429 | if (!pte.may_write) | ||
430 | return -EPERM; | ||
431 | |||
432 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) | ||
433 | return EMULATE_DO_MMIO; | ||
434 | |||
435 | return EMULATE_DONE; | ||
436 | } | ||
437 | EXPORT_SYMBOL_GPL(kvmppc_st); | ||
438 | |||
439 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | ||
440 | bool data) | ||
441 | { | 432 | { |
442 | struct kvmppc_pte pte; | 433 | ulong pc = kvmppc_get_pc(vcpu); |
443 | hva_t hva = *eaddr; | 434 | int r; |
444 | |||
445 | vcpu->stat.ld++; | ||
446 | |||
447 | if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte)) | ||
448 | goto nopte; | ||
449 | |||
450 | *eaddr = pte.raddr; | ||
451 | |||
452 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | ||
453 | if (kvm_is_error_hva(hva)) | ||
454 | goto mmio; | ||
455 | |||
456 | if (copy_from_user(ptr, (void __user *)hva, size)) { | ||
457 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); | ||
458 | goto mmio; | ||
459 | } | ||
460 | 435 | ||
461 | return EMULATE_DONE; | 436 | if (type == INST_SC) |
437 | pc -= 4; | ||
462 | 438 | ||
463 | nopte: | 439 | r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); |
464 | return -ENOENT; | 440 | if (r == EMULATE_DONE) |
465 | mmio: | 441 | return r; |
466 | return EMULATE_DO_MMIO; | 442 | else |
443 | return EMULATE_AGAIN; | ||
467 | } | 444 | } |
468 | EXPORT_SYMBOL_GPL(kvmppc_ld); | 445 | EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); |
469 | 446 | ||
470 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 447 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
471 | { | 448 | { |
@@ -646,6 +623,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
646 | case KVM_REG_PPC_BESCR: | 623 | case KVM_REG_PPC_BESCR: |
647 | val = get_reg_val(reg->id, vcpu->arch.bescr); | 624 | val = get_reg_val(reg->id, vcpu->arch.bescr); |
648 | break; | 625 | break; |
626 | case KVM_REG_PPC_VTB: | ||
627 | val = get_reg_val(reg->id, vcpu->arch.vtb); | ||
628 | break; | ||
629 | case KVM_REG_PPC_IC: | ||
630 | val = get_reg_val(reg->id, vcpu->arch.ic); | ||
631 | break; | ||
649 | default: | 632 | default: |
650 | r = -EINVAL; | 633 | r = -EINVAL; |
651 | break; | 634 | break; |
@@ -750,6 +733,12 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
750 | case KVM_REG_PPC_BESCR: | 733 | case KVM_REG_PPC_BESCR: |
751 | vcpu->arch.bescr = set_reg_val(reg->id, val); | 734 | vcpu->arch.bescr = set_reg_val(reg->id, val); |
752 | break; | 735 | break; |
736 | case KVM_REG_PPC_VTB: | ||
737 | vcpu->arch.vtb = set_reg_val(reg->id, val); | ||
738 | break; | ||
739 | case KVM_REG_PPC_IC: | ||
740 | vcpu->arch.ic = set_reg_val(reg->id, val); | ||
741 | break; | ||
753 | default: | 742 | default: |
754 | r = -EINVAL; | 743 | r = -EINVAL; |
755 | break; | 744 | break; |
@@ -913,6 +902,11 @@ int kvmppc_core_check_processor_compat(void) | |||
913 | return 0; | 902 | return 0; |
914 | } | 903 | } |
915 | 904 | ||
905 | int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) | ||
906 | { | ||
907 | return kvm->arch.kvm_ops->hcall_implemented(hcall); | ||
908 | } | ||
909 | |||
916 | static int kvmppc_book3s_init(void) | 910 | static int kvmppc_book3s_init(void) |
917 | { | 911 | { |
918 | int r; | 912 | int r; |
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 93503bbdae43..cd0b0730e29e 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -335,7 +335,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
335 | if (r < 0) | 335 | if (r < 0) |
336 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, | 336 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, |
337 | data, iswrite, true); | 337 | data, iswrite, true); |
338 | if (r < 0) | 338 | if (r == -ENOENT) |
339 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, | 339 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, |
340 | data, iswrite, false); | 340 | data, iswrite, false); |
341 | 341 | ||
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 678e75370495..2035d16a9262 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c | |||
@@ -156,11 +156,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, | |||
156 | bool writable; | 156 | bool writable; |
157 | 157 | ||
158 | /* Get host physical address for gpa */ | 158 | /* Get host physical address for gpa */ |
159 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, | 159 | hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); |
160 | iswrite, &writable); | ||
161 | if (is_error_noslot_pfn(hpaddr)) { | 160 | if (is_error_noslot_pfn(hpaddr)) { |
162 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", | 161 | printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", |
163 | orig_pte->eaddr); | 162 | orig_pte->raddr); |
164 | r = -EINVAL; | 163 | r = -EINVAL; |
165 | goto out; | 164 | goto out; |
166 | } | 165 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 0ac98392f363..b982d925c710 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -104,9 +104,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, | |||
104 | smp_rmb(); | 104 | smp_rmb(); |
105 | 105 | ||
106 | /* Get host physical address for gpa */ | 106 | /* Get host physical address for gpa */ |
107 | pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable); | 107 | pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); |
108 | if (is_error_noslot_pfn(pfn)) { | 108 | if (is_error_noslot_pfn(pfn)) { |
109 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn); | 109 | printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", |
110 | orig_pte->raddr); | ||
110 | r = -EINVAL; | 111 | r = -EINVAL; |
111 | goto out; | 112 | goto out; |
112 | } | 113 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 68468d695f12..e3d17f571085 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -450,7 +450,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
450 | unsigned long slb_v; | 450 | unsigned long slb_v; |
451 | unsigned long pp, key; | 451 | unsigned long pp, key; |
452 | unsigned long v, gr; | 452 | unsigned long v, gr; |
453 | unsigned long *hptep; | 453 | __be64 *hptep; |
454 | int index; | 454 | int index; |
455 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); | 455 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); |
456 | 456 | ||
@@ -473,13 +473,13 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
473 | preempt_enable(); | 473 | preempt_enable(); |
474 | return -ENOENT; | 474 | return -ENOENT; |
475 | } | 475 | } |
476 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 476 | hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); |
477 | v = hptep[0] & ~HPTE_V_HVLOCK; | 477 | v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
478 | gr = kvm->arch.revmap[index].guest_rpte; | 478 | gr = kvm->arch.revmap[index].guest_rpte; |
479 | 479 | ||
480 | /* Unlock the HPTE */ | 480 | /* Unlock the HPTE */ |
481 | asm volatile("lwsync" : : : "memory"); | 481 | asm volatile("lwsync" : : : "memory"); |
482 | hptep[0] = v; | 482 | hptep[0] = cpu_to_be64(v); |
483 | preempt_enable(); | 483 | preempt_enable(); |
484 | 484 | ||
485 | gpte->eaddr = eaddr; | 485 | gpte->eaddr = eaddr; |
@@ -530,21 +530,14 @@ static int instruction_is_store(unsigned int instr) | |||
530 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | 530 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, |
531 | unsigned long gpa, gva_t ea, int is_store) | 531 | unsigned long gpa, gva_t ea, int is_store) |
532 | { | 532 | { |
533 | int ret; | ||
534 | u32 last_inst; | 533 | u32 last_inst; |
535 | unsigned long srr0 = kvmppc_get_pc(vcpu); | ||
536 | 534 | ||
537 | /* We try to load the last instruction. We don't let | 535 | /* |
538 | * emulate_instruction do it as it doesn't check what | ||
539 | * kvmppc_ld returns. | ||
540 | * If we fail, we just return to the guest and try executing it again. | 536 | * If we fail, we just return to the guest and try executing it again. |
541 | */ | 537 | */ |
542 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { | 538 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != |
543 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | 539 | EMULATE_DONE) |
544 | if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) | 540 | return RESUME_GUEST; |
545 | return RESUME_GUEST; | ||
546 | vcpu->arch.last_inst = last_inst; | ||
547 | } | ||
548 | 541 | ||
549 | /* | 542 | /* |
550 | * WARNING: We do not know for sure whether the instruction we just | 543 | * WARNING: We do not know for sure whether the instruction we just |
@@ -558,7 +551,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
558 | * we just return and retry the instruction. | 551 | * we just return and retry the instruction. |
559 | */ | 552 | */ |
560 | 553 | ||
561 | if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) | 554 | if (instruction_is_store(last_inst) != !!is_store) |
562 | return RESUME_GUEST; | 555 | return RESUME_GUEST; |
563 | 556 | ||
564 | /* | 557 | /* |
@@ -583,7 +576,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
583 | unsigned long ea, unsigned long dsisr) | 576 | unsigned long ea, unsigned long dsisr) |
584 | { | 577 | { |
585 | struct kvm *kvm = vcpu->kvm; | 578 | struct kvm *kvm = vcpu->kvm; |
586 | unsigned long *hptep, hpte[3], r; | 579 | unsigned long hpte[3], r; |
580 | __be64 *hptep; | ||
587 | unsigned long mmu_seq, psize, pte_size; | 581 | unsigned long mmu_seq, psize, pte_size; |
588 | unsigned long gpa_base, gfn_base; | 582 | unsigned long gpa_base, gfn_base; |
589 | unsigned long gpa, gfn, hva, pfn; | 583 | unsigned long gpa, gfn, hva, pfn; |
@@ -606,16 +600,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
606 | if (ea != vcpu->arch.pgfault_addr) | 600 | if (ea != vcpu->arch.pgfault_addr) |
607 | return RESUME_GUEST; | 601 | return RESUME_GUEST; |
608 | index = vcpu->arch.pgfault_index; | 602 | index = vcpu->arch.pgfault_index; |
609 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 603 | hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); |
610 | rev = &kvm->arch.revmap[index]; | 604 | rev = &kvm->arch.revmap[index]; |
611 | preempt_disable(); | 605 | preempt_disable(); |
612 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | 606 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
613 | cpu_relax(); | 607 | cpu_relax(); |
614 | hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; | 608 | hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
615 | hpte[1] = hptep[1]; | 609 | hpte[1] = be64_to_cpu(hptep[1]); |
616 | hpte[2] = r = rev->guest_rpte; | 610 | hpte[2] = r = rev->guest_rpte; |
617 | asm volatile("lwsync" : : : "memory"); | 611 | asm volatile("lwsync" : : : "memory"); |
618 | hptep[0] = hpte[0]; | 612 | hptep[0] = cpu_to_be64(hpte[0]); |
619 | preempt_enable(); | 613 | preempt_enable(); |
620 | 614 | ||
621 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || | 615 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || |
@@ -731,8 +725,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
731 | preempt_disable(); | 725 | preempt_disable(); |
732 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | 726 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
733 | cpu_relax(); | 727 | cpu_relax(); |
734 | if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || | 728 | if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] || |
735 | rev->guest_rpte != hpte[2]) | 729 | be64_to_cpu(hptep[1]) != hpte[1] || |
730 | rev->guest_rpte != hpte[2]) | ||
736 | /* HPTE has been changed under us; let the guest retry */ | 731 | /* HPTE has been changed under us; let the guest retry */ |
737 | goto out_unlock; | 732 | goto out_unlock; |
738 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | 733 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; |
@@ -752,20 +747,20 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
752 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | 747 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; |
753 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); | 748 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); |
754 | 749 | ||
755 | if (hptep[0] & HPTE_V_VALID) { | 750 | if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { |
756 | /* HPTE was previously valid, so we need to invalidate it */ | 751 | /* HPTE was previously valid, so we need to invalidate it */ |
757 | unlock_rmap(rmap); | 752 | unlock_rmap(rmap); |
758 | hptep[0] |= HPTE_V_ABSENT; | 753 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
759 | kvmppc_invalidate_hpte(kvm, hptep, index); | 754 | kvmppc_invalidate_hpte(kvm, hptep, index); |
760 | /* don't lose previous R and C bits */ | 755 | /* don't lose previous R and C bits */ |
761 | r |= hptep[1] & (HPTE_R_R | HPTE_R_C); | 756 | r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
762 | } else { | 757 | } else { |
763 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); | 758 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); |
764 | } | 759 | } |
765 | 760 | ||
766 | hptep[1] = r; | 761 | hptep[1] = cpu_to_be64(r); |
767 | eieio(); | 762 | eieio(); |
768 | hptep[0] = hpte[0]; | 763 | hptep[0] = cpu_to_be64(hpte[0]); |
769 | asm volatile("ptesync" : : : "memory"); | 764 | asm volatile("ptesync" : : : "memory"); |
770 | preempt_enable(); | 765 | preempt_enable(); |
771 | if (page && hpte_is_writable(r)) | 766 | if (page && hpte_is_writable(r)) |
@@ -784,7 +779,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
784 | return ret; | 779 | return ret; |
785 | 780 | ||
786 | out_unlock: | 781 | out_unlock: |
787 | hptep[0] &= ~HPTE_V_HVLOCK; | 782 | hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
788 | preempt_enable(); | 783 | preempt_enable(); |
789 | goto out_put; | 784 | goto out_put; |
790 | } | 785 | } |
@@ -860,7 +855,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
860 | { | 855 | { |
861 | struct revmap_entry *rev = kvm->arch.revmap; | 856 | struct revmap_entry *rev = kvm->arch.revmap; |
862 | unsigned long h, i, j; | 857 | unsigned long h, i, j; |
863 | unsigned long *hptep; | 858 | __be64 *hptep; |
864 | unsigned long ptel, psize, rcbits; | 859 | unsigned long ptel, psize, rcbits; |
865 | 860 | ||
866 | for (;;) { | 861 | for (;;) { |
@@ -876,11 +871,11 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
876 | * rmap chain lock. | 871 | * rmap chain lock. |
877 | */ | 872 | */ |
878 | i = *rmapp & KVMPPC_RMAP_INDEX; | 873 | i = *rmapp & KVMPPC_RMAP_INDEX; |
879 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | 874 | hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); |
880 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | 875 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
881 | /* unlock rmap before spinning on the HPTE lock */ | 876 | /* unlock rmap before spinning on the HPTE lock */ |
882 | unlock_rmap(rmapp); | 877 | unlock_rmap(rmapp); |
883 | while (hptep[0] & HPTE_V_HVLOCK) | 878 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
884 | cpu_relax(); | 879 | cpu_relax(); |
885 | continue; | 880 | continue; |
886 | } | 881 | } |
@@ -899,14 +894,14 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
899 | 894 | ||
900 | /* Now check and modify the HPTE */ | 895 | /* Now check and modify the HPTE */ |
901 | ptel = rev[i].guest_rpte; | 896 | ptel = rev[i].guest_rpte; |
902 | psize = hpte_page_size(hptep[0], ptel); | 897 | psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel); |
903 | if ((hptep[0] & HPTE_V_VALID) && | 898 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
904 | hpte_rpn(ptel, psize) == gfn) { | 899 | hpte_rpn(ptel, psize) == gfn) { |
905 | if (kvm->arch.using_mmu_notifiers) | 900 | if (kvm->arch.using_mmu_notifiers) |
906 | hptep[0] |= HPTE_V_ABSENT; | 901 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
907 | kvmppc_invalidate_hpte(kvm, hptep, i); | 902 | kvmppc_invalidate_hpte(kvm, hptep, i); |
908 | /* Harvest R and C */ | 903 | /* Harvest R and C */ |
909 | rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); | 904 | rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
910 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; | 905 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; |
911 | if (rcbits & ~rev[i].guest_rpte) { | 906 | if (rcbits & ~rev[i].guest_rpte) { |
912 | rev[i].guest_rpte = ptel | rcbits; | 907 | rev[i].guest_rpte = ptel | rcbits; |
@@ -914,7 +909,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
914 | } | 909 | } |
915 | } | 910 | } |
916 | unlock_rmap(rmapp); | 911 | unlock_rmap(rmapp); |
917 | hptep[0] &= ~HPTE_V_HVLOCK; | 912 | hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
918 | } | 913 | } |
919 | return 0; | 914 | return 0; |
920 | } | 915 | } |
@@ -961,7 +956,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
961 | { | 956 | { |
962 | struct revmap_entry *rev = kvm->arch.revmap; | 957 | struct revmap_entry *rev = kvm->arch.revmap; |
963 | unsigned long head, i, j; | 958 | unsigned long head, i, j; |
964 | unsigned long *hptep; | 959 | __be64 *hptep; |
965 | int ret = 0; | 960 | int ret = 0; |
966 | 961 | ||
967 | retry: | 962 | retry: |
@@ -977,23 +972,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
977 | 972 | ||
978 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | 973 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
979 | do { | 974 | do { |
980 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | 975 | hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); |
981 | j = rev[i].forw; | 976 | j = rev[i].forw; |
982 | 977 | ||
983 | /* If this HPTE isn't referenced, ignore it */ | 978 | /* If this HPTE isn't referenced, ignore it */ |
984 | if (!(hptep[1] & HPTE_R_R)) | 979 | if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) |
985 | continue; | 980 | continue; |
986 | 981 | ||
987 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | 982 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
988 | /* unlock rmap before spinning on the HPTE lock */ | 983 | /* unlock rmap before spinning on the HPTE lock */ |
989 | unlock_rmap(rmapp); | 984 | unlock_rmap(rmapp); |
990 | while (hptep[0] & HPTE_V_HVLOCK) | 985 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
991 | cpu_relax(); | 986 | cpu_relax(); |
992 | goto retry; | 987 | goto retry; |
993 | } | 988 | } |
994 | 989 | ||
995 | /* Now check and modify the HPTE */ | 990 | /* Now check and modify the HPTE */ |
996 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { | 991 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
992 | (be64_to_cpu(hptep[1]) & HPTE_R_R)) { | ||
997 | kvmppc_clear_ref_hpte(kvm, hptep, i); | 993 | kvmppc_clear_ref_hpte(kvm, hptep, i); |
998 | if (!(rev[i].guest_rpte & HPTE_R_R)) { | 994 | if (!(rev[i].guest_rpte & HPTE_R_R)) { |
999 | rev[i].guest_rpte |= HPTE_R_R; | 995 | rev[i].guest_rpte |= HPTE_R_R; |
@@ -1001,7 +997,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
1001 | } | 997 | } |
1002 | ret = 1; | 998 | ret = 1; |
1003 | } | 999 | } |
1004 | hptep[0] &= ~HPTE_V_HVLOCK; | 1000 | hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
1005 | } while ((i = j) != head); | 1001 | } while ((i = j) != head); |
1006 | 1002 | ||
1007 | unlock_rmap(rmapp); | 1003 | unlock_rmap(rmapp); |
@@ -1035,7 +1031,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
1035 | do { | 1031 | do { |
1036 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); | 1032 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); |
1037 | j = rev[i].forw; | 1033 | j = rev[i].forw; |
1038 | if (hp[1] & HPTE_R_R) | 1034 | if (be64_to_cpu(hp[1]) & HPTE_R_R) |
1039 | goto out; | 1035 | goto out; |
1040 | } while ((i = j) != head); | 1036 | } while ((i = j) != head); |
1041 | } | 1037 | } |
@@ -1075,7 +1071,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1075 | unsigned long head, i, j; | 1071 | unsigned long head, i, j; |
1076 | unsigned long n; | 1072 | unsigned long n; |
1077 | unsigned long v, r; | 1073 | unsigned long v, r; |
1078 | unsigned long *hptep; | 1074 | __be64 *hptep; |
1079 | int npages_dirty = 0; | 1075 | int npages_dirty = 0; |
1080 | 1076 | ||
1081 | retry: | 1077 | retry: |
@@ -1091,7 +1087,8 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1091 | 1087 | ||
1092 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | 1088 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
1093 | do { | 1089 | do { |
1094 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | 1090 | unsigned long hptep1; |
1091 | hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); | ||
1095 | j = rev[i].forw; | 1092 | j = rev[i].forw; |
1096 | 1093 | ||
1097 | /* | 1094 | /* |
@@ -1108,29 +1105,30 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1108 | * Otherwise we need to do the tlbie even if C==0 in | 1105 | * Otherwise we need to do the tlbie even if C==0 in |
1109 | * order to pick up any delayed writeback of C. | 1106 | * order to pick up any delayed writeback of C. |
1110 | */ | 1107 | */ |
1111 | if (!(hptep[1] & HPTE_R_C) && | 1108 | hptep1 = be64_to_cpu(hptep[1]); |
1112 | (!hpte_is_writable(hptep[1]) || vcpus_running(kvm))) | 1109 | if (!(hptep1 & HPTE_R_C) && |
1110 | (!hpte_is_writable(hptep1) || vcpus_running(kvm))) | ||
1113 | continue; | 1111 | continue; |
1114 | 1112 | ||
1115 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | 1113 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
1116 | /* unlock rmap before spinning on the HPTE lock */ | 1114 | /* unlock rmap before spinning on the HPTE lock */ |
1117 | unlock_rmap(rmapp); | 1115 | unlock_rmap(rmapp); |
1118 | while (hptep[0] & HPTE_V_HVLOCK) | 1116 | while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK)) |
1119 | cpu_relax(); | 1117 | cpu_relax(); |
1120 | goto retry; | 1118 | goto retry; |
1121 | } | 1119 | } |
1122 | 1120 | ||
1123 | /* Now check and modify the HPTE */ | 1121 | /* Now check and modify the HPTE */ |
1124 | if (!(hptep[0] & HPTE_V_VALID)) | 1122 | if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) |
1125 | continue; | 1123 | continue; |
1126 | 1124 | ||
1127 | /* need to make it temporarily absent so C is stable */ | 1125 | /* need to make it temporarily absent so C is stable */ |
1128 | hptep[0] |= HPTE_V_ABSENT; | 1126 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
1129 | kvmppc_invalidate_hpte(kvm, hptep, i); | 1127 | kvmppc_invalidate_hpte(kvm, hptep, i); |
1130 | v = hptep[0]; | 1128 | v = be64_to_cpu(hptep[0]); |
1131 | r = hptep[1]; | 1129 | r = be64_to_cpu(hptep[1]); |
1132 | if (r & HPTE_R_C) { | 1130 | if (r & HPTE_R_C) { |
1133 | hptep[1] = r & ~HPTE_R_C; | 1131 | hptep[1] = cpu_to_be64(r & ~HPTE_R_C); |
1134 | if (!(rev[i].guest_rpte & HPTE_R_C)) { | 1132 | if (!(rev[i].guest_rpte & HPTE_R_C)) { |
1135 | rev[i].guest_rpte |= HPTE_R_C; | 1133 | rev[i].guest_rpte |= HPTE_R_C; |
1136 | note_hpte_modification(kvm, &rev[i]); | 1134 | note_hpte_modification(kvm, &rev[i]); |
@@ -1143,7 +1141,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1143 | } | 1141 | } |
1144 | v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); | 1142 | v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); |
1145 | v |= HPTE_V_VALID; | 1143 | v |= HPTE_V_VALID; |
1146 | hptep[0] = v; | 1144 | hptep[0] = cpu_to_be64(v); |
1147 | } while ((i = j) != head); | 1145 | } while ((i = j) != head); |
1148 | 1146 | ||
1149 | unlock_rmap(rmapp); | 1147 | unlock_rmap(rmapp); |
@@ -1307,7 +1305,7 @@ struct kvm_htab_ctx { | |||
1307 | * Returns 1 if this HPT entry has been modified or has pending | 1305 | * Returns 1 if this HPT entry has been modified or has pending |
1308 | * R/C bit changes. | 1306 | * R/C bit changes. |
1309 | */ | 1307 | */ |
1310 | static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp) | 1308 | static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp) |
1311 | { | 1309 | { |
1312 | unsigned long rcbits_unset; | 1310 | unsigned long rcbits_unset; |
1313 | 1311 | ||
@@ -1316,13 +1314,14 @@ static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp) | |||
1316 | 1314 | ||
1317 | /* Also need to consider changes in reference and changed bits */ | 1315 | /* Also need to consider changes in reference and changed bits */ |
1318 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); | 1316 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
1319 | if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset)) | 1317 | if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && |
1318 | (be64_to_cpu(hptp[1]) & rcbits_unset)) | ||
1320 | return 1; | 1319 | return 1; |
1321 | 1320 | ||
1322 | return 0; | 1321 | return 0; |
1323 | } | 1322 | } |
1324 | 1323 | ||
1325 | static long record_hpte(unsigned long flags, unsigned long *hptp, | 1324 | static long record_hpte(unsigned long flags, __be64 *hptp, |
1326 | unsigned long *hpte, struct revmap_entry *revp, | 1325 | unsigned long *hpte, struct revmap_entry *revp, |
1327 | int want_valid, int first_pass) | 1326 | int want_valid, int first_pass) |
1328 | { | 1327 | { |
@@ -1337,10 +1336,10 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1337 | return 0; | 1336 | return 0; |
1338 | 1337 | ||
1339 | valid = 0; | 1338 | valid = 0; |
1340 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { | 1339 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
1341 | valid = 1; | 1340 | valid = 1; |
1342 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && | 1341 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && |
1343 | !(hptp[0] & HPTE_V_BOLTED)) | 1342 | !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) |
1344 | valid = 0; | 1343 | valid = 0; |
1345 | } | 1344 | } |
1346 | if (valid != want_valid) | 1345 | if (valid != want_valid) |
@@ -1352,7 +1351,7 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1352 | preempt_disable(); | 1351 | preempt_disable(); |
1353 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) | 1352 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) |
1354 | cpu_relax(); | 1353 | cpu_relax(); |
1355 | v = hptp[0]; | 1354 | v = be64_to_cpu(hptp[0]); |
1356 | 1355 | ||
1357 | /* re-evaluate valid and dirty from synchronized HPTE value */ | 1356 | /* re-evaluate valid and dirty from synchronized HPTE value */ |
1358 | valid = !!(v & HPTE_V_VALID); | 1357 | valid = !!(v & HPTE_V_VALID); |
@@ -1360,9 +1359,9 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1360 | 1359 | ||
1361 | /* Harvest R and C into guest view if necessary */ | 1360 | /* Harvest R and C into guest view if necessary */ |
1362 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); | 1361 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
1363 | if (valid && (rcbits_unset & hptp[1])) { | 1362 | if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) { |
1364 | revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) | | 1363 | revp->guest_rpte |= (be64_to_cpu(hptp[1]) & |
1365 | HPTE_GR_MODIFIED; | 1364 | (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED; |
1366 | dirty = 1; | 1365 | dirty = 1; |
1367 | } | 1366 | } |
1368 | 1367 | ||
@@ -1381,13 +1380,13 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1381 | revp->guest_rpte = r; | 1380 | revp->guest_rpte = r; |
1382 | } | 1381 | } |
1383 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); | 1382 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); |
1384 | hptp[0] &= ~HPTE_V_HVLOCK; | 1383 | hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
1385 | preempt_enable(); | 1384 | preempt_enable(); |
1386 | if (!(valid == want_valid && (first_pass || dirty))) | 1385 | if (!(valid == want_valid && (first_pass || dirty))) |
1387 | ok = 0; | 1386 | ok = 0; |
1388 | } | 1387 | } |
1389 | hpte[0] = v; | 1388 | hpte[0] = cpu_to_be64(v); |
1390 | hpte[1] = r; | 1389 | hpte[1] = cpu_to_be64(r); |
1391 | return ok; | 1390 | return ok; |
1392 | } | 1391 | } |
1393 | 1392 | ||
@@ -1397,7 +1396,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, | |||
1397 | struct kvm_htab_ctx *ctx = file->private_data; | 1396 | struct kvm_htab_ctx *ctx = file->private_data; |
1398 | struct kvm *kvm = ctx->kvm; | 1397 | struct kvm *kvm = ctx->kvm; |
1399 | struct kvm_get_htab_header hdr; | 1398 | struct kvm_get_htab_header hdr; |
1400 | unsigned long *hptp; | 1399 | __be64 *hptp; |
1401 | struct revmap_entry *revp; | 1400 | struct revmap_entry *revp; |
1402 | unsigned long i, nb, nw; | 1401 | unsigned long i, nb, nw; |
1403 | unsigned long __user *lbuf; | 1402 | unsigned long __user *lbuf; |
@@ -1413,7 +1412,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, | |||
1413 | flags = ctx->flags; | 1412 | flags = ctx->flags; |
1414 | 1413 | ||
1415 | i = ctx->index; | 1414 | i = ctx->index; |
1416 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); | 1415 | hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); |
1417 | revp = kvm->arch.revmap + i; | 1416 | revp = kvm->arch.revmap + i; |
1418 | lbuf = (unsigned long __user *)buf; | 1417 | lbuf = (unsigned long __user *)buf; |
1419 | 1418 | ||
@@ -1497,7 +1496,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1497 | unsigned long i, j; | 1496 | unsigned long i, j; |
1498 | unsigned long v, r; | 1497 | unsigned long v, r; |
1499 | unsigned long __user *lbuf; | 1498 | unsigned long __user *lbuf; |
1500 | unsigned long *hptp; | 1499 | __be64 *hptp; |
1501 | unsigned long tmp[2]; | 1500 | unsigned long tmp[2]; |
1502 | ssize_t nb; | 1501 | ssize_t nb; |
1503 | long int err, ret; | 1502 | long int err, ret; |
@@ -1539,7 +1538,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1539 | i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) | 1538 | i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) |
1540 | break; | 1539 | break; |
1541 | 1540 | ||
1542 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); | 1541 | hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); |
1543 | lbuf = (unsigned long __user *)buf; | 1542 | lbuf = (unsigned long __user *)buf; |
1544 | for (j = 0; j < hdr.n_valid; ++j) { | 1543 | for (j = 0; j < hdr.n_valid; ++j) { |
1545 | err = -EFAULT; | 1544 | err = -EFAULT; |
@@ -1551,7 +1550,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1551 | lbuf += 2; | 1550 | lbuf += 2; |
1552 | nb += HPTE_SIZE; | 1551 | nb += HPTE_SIZE; |
1553 | 1552 | ||
1554 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) | 1553 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
1555 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); | 1554 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1556 | err = -EIO; | 1555 | err = -EIO; |
1557 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, | 1556 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, |
@@ -1577,7 +1576,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1577 | } | 1576 | } |
1578 | 1577 | ||
1579 | for (j = 0; j < hdr.n_invalid; ++j) { | 1578 | for (j = 0; j < hdr.n_invalid; ++j) { |
1580 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) | 1579 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
1581 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); | 1580 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1582 | ++i; | 1581 | ++i; |
1583 | hptp += 2; | 1582 | hptp += 2; |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 3f295269af37..5a2bc4b0dfe5 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -439,12 +439,6 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
439 | (mfmsr() & MSR_HV)) | 439 | (mfmsr() & MSR_HV)) |
440 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | 440 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; |
441 | break; | 441 | break; |
442 | case SPRN_PURR: | ||
443 | to_book3s(vcpu)->purr_offset = spr_val - get_tb(); | ||
444 | break; | ||
445 | case SPRN_SPURR: | ||
446 | to_book3s(vcpu)->spurr_offset = spr_val - get_tb(); | ||
447 | break; | ||
448 | case SPRN_GQR0: | 442 | case SPRN_GQR0: |
449 | case SPRN_GQR1: | 443 | case SPRN_GQR1: |
450 | case SPRN_GQR2: | 444 | case SPRN_GQR2: |
@@ -455,10 +449,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
455 | case SPRN_GQR7: | 449 | case SPRN_GQR7: |
456 | to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; | 450 | to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; |
457 | break; | 451 | break; |
452 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
458 | case SPRN_FSCR: | 453 | case SPRN_FSCR: |
459 | vcpu->arch.fscr = spr_val; | 454 | kvmppc_set_fscr(vcpu, spr_val); |
460 | break; | 455 | break; |
461 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
462 | case SPRN_BESCR: | 456 | case SPRN_BESCR: |
463 | vcpu->arch.bescr = spr_val; | 457 | vcpu->arch.bescr = spr_val; |
464 | break; | 458 | break; |
@@ -572,10 +566,22 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val | |||
572 | *spr_val = 0; | 566 | *spr_val = 0; |
573 | break; | 567 | break; |
574 | case SPRN_PURR: | 568 | case SPRN_PURR: |
575 | *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; | 569 | /* |
570 | * On exit we would have updated purr | ||
571 | */ | ||
572 | *spr_val = vcpu->arch.purr; | ||
576 | break; | 573 | break; |
577 | case SPRN_SPURR: | 574 | case SPRN_SPURR: |
578 | *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; | 575 | /* |
576 | * On exit we would have updated spurr | ||
577 | */ | ||
578 | *spr_val = vcpu->arch.spurr; | ||
579 | break; | ||
580 | case SPRN_VTB: | ||
581 | *spr_val = vcpu->arch.vtb; | ||
582 | break; | ||
583 | case SPRN_IC: | ||
584 | *spr_val = vcpu->arch.ic; | ||
579 | break; | 585 | break; |
580 | case SPRN_GQR0: | 586 | case SPRN_GQR0: |
581 | case SPRN_GQR1: | 587 | case SPRN_GQR1: |
@@ -587,10 +593,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val | |||
587 | case SPRN_GQR7: | 593 | case SPRN_GQR7: |
588 | *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; | 594 | *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; |
589 | break; | 595 | break; |
596 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
590 | case SPRN_FSCR: | 597 | case SPRN_FSCR: |
591 | *spr_val = vcpu->arch.fscr; | 598 | *spr_val = vcpu->arch.fscr; |
592 | break; | 599 | break; |
593 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
594 | case SPRN_BESCR: | 600 | case SPRN_BESCR: |
595 | *spr_val = vcpu->arch.bescr; | 601 | *spr_val = vcpu->arch.bescr; |
596 | break; | 602 | break; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7a12edbb61e7..27cced9c7249 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include <asm/reg.h> | 36 | #include <asm/reg.h> |
37 | #include <asm/cputable.h> | 37 | #include <asm/cputable.h> |
38 | #include <asm/cache.h> | ||
38 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
39 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
40 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
@@ -67,6 +68,15 @@ | |||
67 | /* Used as a "null" value for timebase values */ | 68 | /* Used as a "null" value for timebase values */ |
68 | #define TB_NIL (~(u64)0) | 69 | #define TB_NIL (~(u64)0) |
69 | 70 | ||
71 | static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); | ||
72 | |||
73 | #if defined(CONFIG_PPC_64K_PAGES) | ||
74 | #define MPP_BUFFER_ORDER 0 | ||
75 | #elif defined(CONFIG_PPC_4K_PAGES) | ||
76 | #define MPP_BUFFER_ORDER 3 | ||
77 | #endif | ||
78 | |||
79 | |||
70 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); | 80 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
71 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); | 81 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
72 | 82 | ||
@@ -270,7 +280,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) | |||
270 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | 280 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) |
271 | { | 281 | { |
272 | vpa->__old_status |= LPPACA_OLD_SHARED_PROC; | 282 | vpa->__old_status |= LPPACA_OLD_SHARED_PROC; |
273 | vpa->yield_count = 1; | 283 | vpa->yield_count = cpu_to_be32(1); |
274 | } | 284 | } |
275 | 285 | ||
276 | static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, | 286 | static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, |
@@ -293,8 +303,8 @@ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, | |||
293 | struct reg_vpa { | 303 | struct reg_vpa { |
294 | u32 dummy; | 304 | u32 dummy; |
295 | union { | 305 | union { |
296 | u16 hword; | 306 | __be16 hword; |
297 | u32 word; | 307 | __be32 word; |
298 | } length; | 308 | } length; |
299 | }; | 309 | }; |
300 | 310 | ||
@@ -333,9 +343,9 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | |||
333 | if (va == NULL) | 343 | if (va == NULL) |
334 | return H_PARAMETER; | 344 | return H_PARAMETER; |
335 | if (subfunc == H_VPA_REG_VPA) | 345 | if (subfunc == H_VPA_REG_VPA) |
336 | len = ((struct reg_vpa *)va)->length.hword; | 346 | len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); |
337 | else | 347 | else |
338 | len = ((struct reg_vpa *)va)->length.word; | 348 | len = be32_to_cpu(((struct reg_vpa *)va)->length.word); |
339 | kvmppc_unpin_guest_page(kvm, va, vpa, false); | 349 | kvmppc_unpin_guest_page(kvm, va, vpa, false); |
340 | 350 | ||
341 | /* Check length */ | 351 | /* Check length */ |
@@ -540,21 +550,63 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
540 | return; | 550 | return; |
541 | memset(dt, 0, sizeof(struct dtl_entry)); | 551 | memset(dt, 0, sizeof(struct dtl_entry)); |
542 | dt->dispatch_reason = 7; | 552 | dt->dispatch_reason = 7; |
543 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; | 553 | dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); |
544 | dt->timebase = now + vc->tb_offset; | 554 | dt->timebase = cpu_to_be64(now + vc->tb_offset); |
545 | dt->enqueue_to_dispatch_time = stolen; | 555 | dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); |
546 | dt->srr0 = kvmppc_get_pc(vcpu); | 556 | dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); |
547 | dt->srr1 = vcpu->arch.shregs.msr; | 557 | dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); |
548 | ++dt; | 558 | ++dt; |
549 | if (dt == vcpu->arch.dtl.pinned_end) | 559 | if (dt == vcpu->arch.dtl.pinned_end) |
550 | dt = vcpu->arch.dtl.pinned_addr; | 560 | dt = vcpu->arch.dtl.pinned_addr; |
551 | vcpu->arch.dtl_ptr = dt; | 561 | vcpu->arch.dtl_ptr = dt; |
552 | /* order writing *dt vs. writing vpa->dtl_idx */ | 562 | /* order writing *dt vs. writing vpa->dtl_idx */ |
553 | smp_wmb(); | 563 | smp_wmb(); |
554 | vpa->dtl_idx = ++vcpu->arch.dtl_index; | 564 | vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); |
555 | vcpu->arch.dtl.dirty = true; | 565 | vcpu->arch.dtl.dirty = true; |
556 | } | 566 | } |
557 | 567 | ||
568 | static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) | ||
569 | { | ||
570 | if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) | ||
571 | return true; | ||
572 | if ((!vcpu->arch.vcore->arch_compat) && | ||
573 | cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
574 | return true; | ||
575 | return false; | ||
576 | } | ||
577 | |||
578 | static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, | ||
579 | unsigned long resource, unsigned long value1, | ||
580 | unsigned long value2) | ||
581 | { | ||
582 | switch (resource) { | ||
583 | case H_SET_MODE_RESOURCE_SET_CIABR: | ||
584 | if (!kvmppc_power8_compatible(vcpu)) | ||
585 | return H_P2; | ||
586 | if (value2) | ||
587 | return H_P4; | ||
588 | if (mflags) | ||
589 | return H_UNSUPPORTED_FLAG_START; | ||
590 | /* Guests can't breakpoint the hypervisor */ | ||
591 | if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) | ||
592 | return H_P3; | ||
593 | vcpu->arch.ciabr = value1; | ||
594 | return H_SUCCESS; | ||
595 | case H_SET_MODE_RESOURCE_SET_DAWR: | ||
596 | if (!kvmppc_power8_compatible(vcpu)) | ||
597 | return H_P2; | ||
598 | if (mflags) | ||
599 | return H_UNSUPPORTED_FLAG_START; | ||
600 | if (value2 & DABRX_HYP) | ||
601 | return H_P4; | ||
602 | vcpu->arch.dawr = value1; | ||
603 | vcpu->arch.dawrx = value2; | ||
604 | return H_SUCCESS; | ||
605 | default: | ||
606 | return H_TOO_HARD; | ||
607 | } | ||
608 | } | ||
609 | |||
558 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | 610 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
559 | { | 611 | { |
560 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | 612 | unsigned long req = kvmppc_get_gpr(vcpu, 3); |
@@ -562,6 +614,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
562 | struct kvm_vcpu *tvcpu; | 614 | struct kvm_vcpu *tvcpu; |
563 | int idx, rc; | 615 | int idx, rc; |
564 | 616 | ||
617 | if (req <= MAX_HCALL_OPCODE && | ||
618 | !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) | ||
619 | return RESUME_HOST; | ||
620 | |||
565 | switch (req) { | 621 | switch (req) { |
566 | case H_ENTER: | 622 | case H_ENTER: |
567 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 623 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
@@ -620,7 +676,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
620 | 676 | ||
621 | /* Send the error out to userspace via KVM_RUN */ | 677 | /* Send the error out to userspace via KVM_RUN */ |
622 | return rc; | 678 | return rc; |
623 | 679 | case H_SET_MODE: | |
680 | ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), | ||
681 | kvmppc_get_gpr(vcpu, 5), | ||
682 | kvmppc_get_gpr(vcpu, 6), | ||
683 | kvmppc_get_gpr(vcpu, 7)); | ||
684 | if (ret == H_TOO_HARD) | ||
685 | return RESUME_HOST; | ||
686 | break; | ||
624 | case H_XIRR: | 687 | case H_XIRR: |
625 | case H_CPPR: | 688 | case H_CPPR: |
626 | case H_EOI: | 689 | case H_EOI: |
@@ -639,6 +702,29 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
639 | return RESUME_GUEST; | 702 | return RESUME_GUEST; |
640 | } | 703 | } |
641 | 704 | ||
705 | static int kvmppc_hcall_impl_hv(unsigned long cmd) | ||
706 | { | ||
707 | switch (cmd) { | ||
708 | case H_CEDE: | ||
709 | case H_PROD: | ||
710 | case H_CONFER: | ||
711 | case H_REGISTER_VPA: | ||
712 | case H_SET_MODE: | ||
713 | #ifdef CONFIG_KVM_XICS | ||
714 | case H_XIRR: | ||
715 | case H_CPPR: | ||
716 | case H_EOI: | ||
717 | case H_IPI: | ||
718 | case H_IPOLL: | ||
719 | case H_XIRR_X: | ||
720 | #endif | ||
721 | return 1; | ||
722 | } | ||
723 | |||
724 | /* See if it's in the real-mode table */ | ||
725 | return kvmppc_hcall_impl_hv_realmode(cmd); | ||
726 | } | ||
727 | |||
642 | static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, | 728 | static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
643 | struct task_struct *tsk) | 729 | struct task_struct *tsk) |
644 | { | 730 | { |
@@ -785,7 +871,8 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, | |||
785 | return 0; | 871 | return 0; |
786 | } | 872 | } |
787 | 873 | ||
788 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) | 874 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
875 | bool preserve_top32) | ||
789 | { | 876 | { |
790 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 877 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
791 | u64 mask; | 878 | u64 mask; |
@@ -820,6 +907,10 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) | |||
820 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; | 907 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; |
821 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | 908 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
822 | mask |= LPCR_AIL; | 909 | mask |= LPCR_AIL; |
910 | |||
911 | /* Broken 32-bit version of LPCR must not clear top bits */ | ||
912 | if (preserve_top32) | ||
913 | mask &= 0xFFFFFFFF; | ||
823 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); | 914 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
824 | spin_unlock(&vc->lock); | 915 | spin_unlock(&vc->lock); |
825 | } | 916 | } |
@@ -894,12 +985,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
894 | case KVM_REG_PPC_CIABR: | 985 | case KVM_REG_PPC_CIABR: |
895 | *val = get_reg_val(id, vcpu->arch.ciabr); | 986 | *val = get_reg_val(id, vcpu->arch.ciabr); |
896 | break; | 987 | break; |
897 | case KVM_REG_PPC_IC: | ||
898 | *val = get_reg_val(id, vcpu->arch.ic); | ||
899 | break; | ||
900 | case KVM_REG_PPC_VTB: | ||
901 | *val = get_reg_val(id, vcpu->arch.vtb); | ||
902 | break; | ||
903 | case KVM_REG_PPC_CSIGR: | 988 | case KVM_REG_PPC_CSIGR: |
904 | *val = get_reg_val(id, vcpu->arch.csigr); | 989 | *val = get_reg_val(id, vcpu->arch.csigr); |
905 | break; | 990 | break; |
@@ -939,6 +1024,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
939 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); | 1024 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); |
940 | break; | 1025 | break; |
941 | case KVM_REG_PPC_LPCR: | 1026 | case KVM_REG_PPC_LPCR: |
1027 | case KVM_REG_PPC_LPCR_64: | ||
942 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); | 1028 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); |
943 | break; | 1029 | break; |
944 | case KVM_REG_PPC_PPR: | 1030 | case KVM_REG_PPC_PPR: |
@@ -1094,12 +1180,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
1094 | if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) | 1180 | if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) |
1095 | vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ | 1181 | vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ |
1096 | break; | 1182 | break; |
1097 | case KVM_REG_PPC_IC: | ||
1098 | vcpu->arch.ic = set_reg_val(id, *val); | ||
1099 | break; | ||
1100 | case KVM_REG_PPC_VTB: | ||
1101 | vcpu->arch.vtb = set_reg_val(id, *val); | ||
1102 | break; | ||
1103 | case KVM_REG_PPC_CSIGR: | 1183 | case KVM_REG_PPC_CSIGR: |
1104 | vcpu->arch.csigr = set_reg_val(id, *val); | 1184 | vcpu->arch.csigr = set_reg_val(id, *val); |
1105 | break; | 1185 | break; |
@@ -1150,7 +1230,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
1150 | ALIGN(set_reg_val(id, *val), 1UL << 24); | 1230 | ALIGN(set_reg_val(id, *val), 1UL << 24); |
1151 | break; | 1231 | break; |
1152 | case KVM_REG_PPC_LPCR: | 1232 | case KVM_REG_PPC_LPCR: |
1153 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); | 1233 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); |
1234 | break; | ||
1235 | case KVM_REG_PPC_LPCR_64: | ||
1236 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); | ||
1154 | break; | 1237 | break; |
1155 | case KVM_REG_PPC_PPR: | 1238 | case KVM_REG_PPC_PPR: |
1156 | vcpu->arch.ppr = set_reg_val(id, *val); | 1239 | vcpu->arch.ppr = set_reg_val(id, *val); |
@@ -1228,6 +1311,33 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
1228 | return r; | 1311 | return r; |
1229 | } | 1312 | } |
1230 | 1313 | ||
1314 | static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) | ||
1315 | { | ||
1316 | struct kvmppc_vcore *vcore; | ||
1317 | |||
1318 | vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); | ||
1319 | |||
1320 | if (vcore == NULL) | ||
1321 | return NULL; | ||
1322 | |||
1323 | INIT_LIST_HEAD(&vcore->runnable_threads); | ||
1324 | spin_lock_init(&vcore->lock); | ||
1325 | init_waitqueue_head(&vcore->wq); | ||
1326 | vcore->preempt_tb = TB_NIL; | ||
1327 | vcore->lpcr = kvm->arch.lpcr; | ||
1328 | vcore->first_vcpuid = core * threads_per_subcore; | ||
1329 | vcore->kvm = kvm; | ||
1330 | |||
1331 | vcore->mpp_buffer_is_valid = false; | ||
1332 | |||
1333 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
1334 | vcore->mpp_buffer = (void *)__get_free_pages( | ||
1335 | GFP_KERNEL|__GFP_ZERO, | ||
1336 | MPP_BUFFER_ORDER); | ||
1337 | |||
1338 | return vcore; | ||
1339 | } | ||
1340 | |||
1231 | static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, | 1341 | static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
1232 | unsigned int id) | 1342 | unsigned int id) |
1233 | { | 1343 | { |
@@ -1279,16 +1389,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, | |||
1279 | mutex_lock(&kvm->lock); | 1389 | mutex_lock(&kvm->lock); |
1280 | vcore = kvm->arch.vcores[core]; | 1390 | vcore = kvm->arch.vcores[core]; |
1281 | if (!vcore) { | 1391 | if (!vcore) { |
1282 | vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); | 1392 | vcore = kvmppc_vcore_create(kvm, core); |
1283 | if (vcore) { | ||
1284 | INIT_LIST_HEAD(&vcore->runnable_threads); | ||
1285 | spin_lock_init(&vcore->lock); | ||
1286 | init_waitqueue_head(&vcore->wq); | ||
1287 | vcore->preempt_tb = TB_NIL; | ||
1288 | vcore->lpcr = kvm->arch.lpcr; | ||
1289 | vcore->first_vcpuid = core * threads_per_subcore; | ||
1290 | vcore->kvm = kvm; | ||
1291 | } | ||
1292 | kvm->arch.vcores[core] = vcore; | 1393 | kvm->arch.vcores[core] = vcore; |
1293 | kvm->arch.online_vcores++; | 1394 | kvm->arch.online_vcores++; |
1294 | } | 1395 | } |
@@ -1500,6 +1601,33 @@ static int on_primary_thread(void) | |||
1500 | return 1; | 1601 | return 1; |
1501 | } | 1602 | } |
1502 | 1603 | ||
1604 | static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc) | ||
1605 | { | ||
1606 | phys_addr_t phy_addr, mpp_addr; | ||
1607 | |||
1608 | phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer); | ||
1609 | mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; | ||
1610 | |||
1611 | mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT); | ||
1612 | logmpp(mpp_addr | PPC_LOGMPP_LOG_L2); | ||
1613 | |||
1614 | vc->mpp_buffer_is_valid = true; | ||
1615 | } | ||
1616 | |||
1617 | static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc) | ||
1618 | { | ||
1619 | phys_addr_t phy_addr, mpp_addr; | ||
1620 | |||
1621 | phy_addr = virt_to_phys(vc->mpp_buffer); | ||
1622 | mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; | ||
1623 | |||
1624 | /* We must abort any in-progress save operations to ensure | ||
1625 | * the table is valid so that prefetch engine knows when to | ||
1626 | * stop prefetching. */ | ||
1627 | logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT); | ||
1628 | mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE); | ||
1629 | } | ||
1630 | |||
1503 | /* | 1631 | /* |
1504 | * Run a set of guest threads on a physical core. | 1632 | * Run a set of guest threads on a physical core. |
1505 | * Called with vc->lock held. | 1633 | * Called with vc->lock held. |
@@ -1577,9 +1705,16 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
1577 | 1705 | ||
1578 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); | 1706 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); |
1579 | 1707 | ||
1708 | if (vc->mpp_buffer_is_valid) | ||
1709 | kvmppc_start_restoring_l2_cache(vc); | ||
1710 | |||
1580 | __kvmppc_vcore_entry(); | 1711 | __kvmppc_vcore_entry(); |
1581 | 1712 | ||
1582 | spin_lock(&vc->lock); | 1713 | spin_lock(&vc->lock); |
1714 | |||
1715 | if (vc->mpp_buffer) | ||
1716 | kvmppc_start_saving_l2_cache(vc); | ||
1717 | |||
1583 | /* disable sending of IPIs on virtual external irqs */ | 1718 | /* disable sending of IPIs on virtual external irqs */ |
1584 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | 1719 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) |
1585 | vcpu->cpu = -1; | 1720 | vcpu->cpu = -1; |
@@ -1929,12 +2064,6 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, | |||
1929 | (*sps)->page_shift = def->shift; | 2064 | (*sps)->page_shift = def->shift; |
1930 | (*sps)->slb_enc = def->sllp; | 2065 | (*sps)->slb_enc = def->sllp; |
1931 | (*sps)->enc[0].page_shift = def->shift; | 2066 | (*sps)->enc[0].page_shift = def->shift; |
1932 | /* | ||
1933 | * Only return base page encoding. We don't want to return | ||
1934 | * all the supporting pte_enc, because our H_ENTER doesn't | ||
1935 | * support MPSS yet. Once they do, we can start passing all | ||
1936 | * support pte_enc here | ||
1937 | */ | ||
1938 | (*sps)->enc[0].pte_enc = def->penc[linux_psize]; | 2067 | (*sps)->enc[0].pte_enc = def->penc[linux_psize]; |
1939 | /* | 2068 | /* |
1940 | * Add 16MB MPSS support if host supports it | 2069 | * Add 16MB MPSS support if host supports it |
@@ -2281,6 +2410,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) | |||
2281 | */ | 2410 | */ |
2282 | cpumask_setall(&kvm->arch.need_tlb_flush); | 2411 | cpumask_setall(&kvm->arch.need_tlb_flush); |
2283 | 2412 | ||
2413 | /* Start out with the default set of hcalls enabled */ | ||
2414 | memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, | ||
2415 | sizeof(kvm->arch.enabled_hcalls)); | ||
2416 | |||
2284 | kvm->arch.rma = NULL; | 2417 | kvm->arch.rma = NULL; |
2285 | 2418 | ||
2286 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); | 2419 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); |
@@ -2323,8 +2456,14 @@ static void kvmppc_free_vcores(struct kvm *kvm) | |||
2323 | { | 2456 | { |
2324 | long int i; | 2457 | long int i; |
2325 | 2458 | ||
2326 | for (i = 0; i < KVM_MAX_VCORES; ++i) | 2459 | for (i = 0; i < KVM_MAX_VCORES; ++i) { |
2460 | if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { | ||
2461 | struct kvmppc_vcore *vc = kvm->arch.vcores[i]; | ||
2462 | free_pages((unsigned long)vc->mpp_buffer, | ||
2463 | MPP_BUFFER_ORDER); | ||
2464 | } | ||
2327 | kfree(kvm->arch.vcores[i]); | 2465 | kfree(kvm->arch.vcores[i]); |
2466 | } | ||
2328 | kvm->arch.online_vcores = 0; | 2467 | kvm->arch.online_vcores = 0; |
2329 | } | 2468 | } |
2330 | 2469 | ||
@@ -2419,6 +2558,49 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp, | |||
2419 | return r; | 2558 | return r; |
2420 | } | 2559 | } |
2421 | 2560 | ||
2561 | /* | ||
2562 | * List of hcall numbers to enable by default. | ||
2563 | * For compatibility with old userspace, we enable by default | ||
2564 | * all hcalls that were implemented before the hcall-enabling | ||
2565 | * facility was added. Note this list should not include H_RTAS. | ||
2566 | */ | ||
2567 | static unsigned int default_hcall_list[] = { | ||
2568 | H_REMOVE, | ||
2569 | H_ENTER, | ||
2570 | H_READ, | ||
2571 | H_PROTECT, | ||
2572 | H_BULK_REMOVE, | ||
2573 | H_GET_TCE, | ||
2574 | H_PUT_TCE, | ||
2575 | H_SET_DABR, | ||
2576 | H_SET_XDABR, | ||
2577 | H_CEDE, | ||
2578 | H_PROD, | ||
2579 | H_CONFER, | ||
2580 | H_REGISTER_VPA, | ||
2581 | #ifdef CONFIG_KVM_XICS | ||
2582 | H_EOI, | ||
2583 | H_CPPR, | ||
2584 | H_IPI, | ||
2585 | H_IPOLL, | ||
2586 | H_XIRR, | ||
2587 | H_XIRR_X, | ||
2588 | #endif | ||
2589 | 0 | ||
2590 | }; | ||
2591 | |||
2592 | static void init_default_hcalls(void) | ||
2593 | { | ||
2594 | int i; | ||
2595 | unsigned int hcall; | ||
2596 | |||
2597 | for (i = 0; default_hcall_list[i]; ++i) { | ||
2598 | hcall = default_hcall_list[i]; | ||
2599 | WARN_ON(!kvmppc_hcall_impl_hv(hcall)); | ||
2600 | __set_bit(hcall / 4, default_enabled_hcalls); | ||
2601 | } | ||
2602 | } | ||
2603 | |||
2422 | static struct kvmppc_ops kvm_ops_hv = { | 2604 | static struct kvmppc_ops kvm_ops_hv = { |
2423 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, | 2605 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, |
2424 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, | 2606 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, |
@@ -2451,6 +2633,7 @@ static struct kvmppc_ops kvm_ops_hv = { | |||
2451 | .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, | 2633 | .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, |
2452 | .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, | 2634 | .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, |
2453 | .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, | 2635 | .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, |
2636 | .hcall_implemented = kvmppc_hcall_impl_hv, | ||
2454 | }; | 2637 | }; |
2455 | 2638 | ||
2456 | static int kvmppc_book3s_init_hv(void) | 2639 | static int kvmppc_book3s_init_hv(void) |
@@ -2466,6 +2649,8 @@ static int kvmppc_book3s_init_hv(void) | |||
2466 | kvm_ops_hv.owner = THIS_MODULE; | 2649 | kvm_ops_hv.owner = THIS_MODULE; |
2467 | kvmppc_hv_ops = &kvm_ops_hv; | 2650 | kvmppc_hv_ops = &kvm_ops_hv; |
2468 | 2651 | ||
2652 | init_default_hcalls(); | ||
2653 | |||
2469 | r = kvmppc_mmu_hv_init(); | 2654 | r = kvmppc_mmu_hv_init(); |
2470 | return r; | 2655 | return r; |
2471 | } | 2656 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 7cde8a665205..3b41447482e5 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
@@ -212,3 +212,16 @@ bool kvm_hv_mode_active(void) | |||
212 | { | 212 | { |
213 | return atomic_read(&hv_vm_count) != 0; | 213 | return atomic_read(&hv_vm_count) != 0; |
214 | } | 214 | } |
215 | |||
216 | extern int hcall_real_table[], hcall_real_table_end[]; | ||
217 | |||
218 | int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) | ||
219 | { | ||
220 | cmd /= 4; | ||
221 | if (cmd < hcall_real_table_end - hcall_real_table && | ||
222 | hcall_real_table[cmd]) | ||
223 | return 1; | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); | ||
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index 3a5c568b1e89..d562c8e2bc30 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c | |||
@@ -45,14 +45,14 @@ static void reload_slb(struct kvm_vcpu *vcpu) | |||
45 | return; | 45 | return; |
46 | 46 | ||
47 | /* Sanity check */ | 47 | /* Sanity check */ |
48 | n = min_t(u32, slb->persistent, SLB_MIN_SIZE); | 48 | n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE); |
49 | if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) | 49 | if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) |
50 | return; | 50 | return; |
51 | 51 | ||
52 | /* Load up the SLB from that */ | 52 | /* Load up the SLB from that */ |
53 | for (i = 0; i < n; ++i) { | 53 | for (i = 0; i < n; ++i) { |
54 | unsigned long rb = slb->save_area[i].esid; | 54 | unsigned long rb = be64_to_cpu(slb->save_area[i].esid); |
55 | unsigned long rs = slb->save_area[i].vsid; | 55 | unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); |
56 | 56 | ||
57 | rb = (rb & ~0xFFFul) | i; /* insert entry number */ | 57 | rb = (rb & ~0xFFFul) | i; /* insert entry number */ |
58 | asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); | 58 | asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 5a24d3c2b6b8..084ad54c73cd 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -154,10 +154,10 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva, | |||
154 | return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); | 154 | return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); |
155 | } | 155 | } |
156 | 156 | ||
157 | static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) | 157 | static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) |
158 | { | 158 | { |
159 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); | 159 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); |
160 | hpte[0] = hpte_v; | 160 | hpte[0] = cpu_to_be64(hpte_v); |
161 | } | 161 | } |
162 | 162 | ||
163 | long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | 163 | long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, |
@@ -166,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
166 | { | 166 | { |
167 | unsigned long i, pa, gpa, gfn, psize; | 167 | unsigned long i, pa, gpa, gfn, psize; |
168 | unsigned long slot_fn, hva; | 168 | unsigned long slot_fn, hva; |
169 | unsigned long *hpte; | 169 | __be64 *hpte; |
170 | struct revmap_entry *rev; | 170 | struct revmap_entry *rev; |
171 | unsigned long g_ptel; | 171 | unsigned long g_ptel; |
172 | struct kvm_memory_slot *memslot; | 172 | struct kvm_memory_slot *memslot; |
@@ -275,9 +275,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
275 | return H_PARAMETER; | 275 | return H_PARAMETER; |
276 | if (likely((flags & H_EXACT) == 0)) { | 276 | if (likely((flags & H_EXACT) == 0)) { |
277 | pte_index &= ~7UL; | 277 | pte_index &= ~7UL; |
278 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | 278 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
279 | for (i = 0; i < 8; ++i) { | 279 | for (i = 0; i < 8; ++i) { |
280 | if ((*hpte & HPTE_V_VALID) == 0 && | 280 | if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && |
281 | try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | | 281 | try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
282 | HPTE_V_ABSENT)) | 282 | HPTE_V_ABSENT)) |
283 | break; | 283 | break; |
@@ -292,11 +292,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
292 | */ | 292 | */ |
293 | hpte -= 16; | 293 | hpte -= 16; |
294 | for (i = 0; i < 8; ++i) { | 294 | for (i = 0; i < 8; ++i) { |
295 | u64 pte; | ||
295 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | 296 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
296 | cpu_relax(); | 297 | cpu_relax(); |
297 | if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT))) | 298 | pte = be64_to_cpu(*hpte); |
299 | if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) | ||
298 | break; | 300 | break; |
299 | *hpte &= ~HPTE_V_HVLOCK; | 301 | *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); |
300 | hpte += 2; | 302 | hpte += 2; |
301 | } | 303 | } |
302 | if (i == 8) | 304 | if (i == 8) |
@@ -304,14 +306,17 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
304 | } | 306 | } |
305 | pte_index += i; | 307 | pte_index += i; |
306 | } else { | 308 | } else { |
307 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | 309 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
308 | if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | | 310 | if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
309 | HPTE_V_ABSENT)) { | 311 | HPTE_V_ABSENT)) { |
310 | /* Lock the slot and check again */ | 312 | /* Lock the slot and check again */ |
313 | u64 pte; | ||
314 | |||
311 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | 315 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
312 | cpu_relax(); | 316 | cpu_relax(); |
313 | if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { | 317 | pte = be64_to_cpu(*hpte); |
314 | *hpte &= ~HPTE_V_HVLOCK; | 318 | if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
319 | *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); | ||
315 | return H_PTEG_FULL; | 320 | return H_PTEG_FULL; |
316 | } | 321 | } |
317 | } | 322 | } |
@@ -347,11 +352,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
347 | } | 352 | } |
348 | } | 353 | } |
349 | 354 | ||
350 | hpte[1] = ptel; | 355 | hpte[1] = cpu_to_be64(ptel); |
351 | 356 | ||
352 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ | 357 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ |
353 | eieio(); | 358 | eieio(); |
354 | hpte[0] = pteh; | 359 | hpte[0] = cpu_to_be64(pteh); |
355 | asm volatile("ptesync" : : : "memory"); | 360 | asm volatile("ptesync" : : : "memory"); |
356 | 361 | ||
357 | *pte_idx_ret = pte_index; | 362 | *pte_idx_ret = pte_index; |
@@ -468,30 +473,35 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, | |||
468 | unsigned long pte_index, unsigned long avpn, | 473 | unsigned long pte_index, unsigned long avpn, |
469 | unsigned long *hpret) | 474 | unsigned long *hpret) |
470 | { | 475 | { |
471 | unsigned long *hpte; | 476 | __be64 *hpte; |
472 | unsigned long v, r, rb; | 477 | unsigned long v, r, rb; |
473 | struct revmap_entry *rev; | 478 | struct revmap_entry *rev; |
479 | u64 pte; | ||
474 | 480 | ||
475 | if (pte_index >= kvm->arch.hpt_npte) | 481 | if (pte_index >= kvm->arch.hpt_npte) |
476 | return H_PARAMETER; | 482 | return H_PARAMETER; |
477 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | 483 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
478 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | 484 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
479 | cpu_relax(); | 485 | cpu_relax(); |
480 | if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || | 486 | pte = be64_to_cpu(hpte[0]); |
481 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || | 487 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
482 | ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { | 488 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || |
483 | hpte[0] &= ~HPTE_V_HVLOCK; | 489 | ((flags & H_ANDCOND) && (pte & avpn) != 0)) { |
490 | hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); | ||
484 | return H_NOT_FOUND; | 491 | return H_NOT_FOUND; |
485 | } | 492 | } |
486 | 493 | ||
487 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | 494 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); |
488 | v = hpte[0] & ~HPTE_V_HVLOCK; | 495 | v = pte & ~HPTE_V_HVLOCK; |
489 | if (v & HPTE_V_VALID) { | 496 | if (v & HPTE_V_VALID) { |
490 | hpte[0] &= ~HPTE_V_VALID; | 497 | u64 pte1; |
491 | rb = compute_tlbie_rb(v, hpte[1], pte_index); | 498 | |
499 | pte1 = be64_to_cpu(hpte[1]); | ||
500 | hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); | ||
501 | rb = compute_tlbie_rb(v, pte1, pte_index); | ||
492 | do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); | 502 | do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); |
493 | /* Read PTE low word after tlbie to get final R/C values */ | 503 | /* Read PTE low word after tlbie to get final R/C values */ |
494 | remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); | 504 | remove_revmap_chain(kvm, pte_index, rev, v, pte1); |
495 | } | 505 | } |
496 | r = rev->guest_rpte & ~HPTE_GR_RESERVED; | 506 | r = rev->guest_rpte & ~HPTE_GR_RESERVED; |
497 | note_hpte_modification(kvm, rev); | 507 | note_hpte_modification(kvm, rev); |
@@ -514,12 +524,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
514 | { | 524 | { |
515 | struct kvm *kvm = vcpu->kvm; | 525 | struct kvm *kvm = vcpu->kvm; |
516 | unsigned long *args = &vcpu->arch.gpr[4]; | 526 | unsigned long *args = &vcpu->arch.gpr[4]; |
517 | unsigned long *hp, *hptes[4], tlbrb[4]; | 527 | __be64 *hp, *hptes[4]; |
528 | unsigned long tlbrb[4]; | ||
518 | long int i, j, k, n, found, indexes[4]; | 529 | long int i, j, k, n, found, indexes[4]; |
519 | unsigned long flags, req, pte_index, rcbits; | 530 | unsigned long flags, req, pte_index, rcbits; |
520 | int global; | 531 | int global; |
521 | long int ret = H_SUCCESS; | 532 | long int ret = H_SUCCESS; |
522 | struct revmap_entry *rev, *revs[4]; | 533 | struct revmap_entry *rev, *revs[4]; |
534 | u64 hp0; | ||
523 | 535 | ||
524 | global = global_invalidates(kvm, 0); | 536 | global = global_invalidates(kvm, 0); |
525 | for (i = 0; i < 4 && ret == H_SUCCESS; ) { | 537 | for (i = 0; i < 4 && ret == H_SUCCESS; ) { |
@@ -542,8 +554,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
542 | ret = H_PARAMETER; | 554 | ret = H_PARAMETER; |
543 | break; | 555 | break; |
544 | } | 556 | } |
545 | hp = (unsigned long *) | 557 | hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4)); |
546 | (kvm->arch.hpt_virt + (pte_index << 4)); | ||
547 | /* to avoid deadlock, don't spin except for first */ | 558 | /* to avoid deadlock, don't spin except for first */ |
548 | if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { | 559 | if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { |
549 | if (n) | 560 | if (n) |
@@ -552,23 +563,24 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
552 | cpu_relax(); | 563 | cpu_relax(); |
553 | } | 564 | } |
554 | found = 0; | 565 | found = 0; |
555 | if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) { | 566 | hp0 = be64_to_cpu(hp[0]); |
567 | if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) { | ||
556 | switch (flags & 3) { | 568 | switch (flags & 3) { |
557 | case 0: /* absolute */ | 569 | case 0: /* absolute */ |
558 | found = 1; | 570 | found = 1; |
559 | break; | 571 | break; |
560 | case 1: /* andcond */ | 572 | case 1: /* andcond */ |
561 | if (!(hp[0] & args[j + 1])) | 573 | if (!(hp0 & args[j + 1])) |
562 | found = 1; | 574 | found = 1; |
563 | break; | 575 | break; |
564 | case 2: /* AVPN */ | 576 | case 2: /* AVPN */ |
565 | if ((hp[0] & ~0x7fUL) == args[j + 1]) | 577 | if ((hp0 & ~0x7fUL) == args[j + 1]) |
566 | found = 1; | 578 | found = 1; |
567 | break; | 579 | break; |
568 | } | 580 | } |
569 | } | 581 | } |
570 | if (!found) { | 582 | if (!found) { |
571 | hp[0] &= ~HPTE_V_HVLOCK; | 583 | hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
572 | args[j] = ((0x90 | flags) << 56) + pte_index; | 584 | args[j] = ((0x90 | flags) << 56) + pte_index; |
573 | continue; | 585 | continue; |
574 | } | 586 | } |
@@ -577,7 +589,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
577 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | 589 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); |
578 | note_hpte_modification(kvm, rev); | 590 | note_hpte_modification(kvm, rev); |
579 | 591 | ||
580 | if (!(hp[0] & HPTE_V_VALID)) { | 592 | if (!(hp0 & HPTE_V_VALID)) { |
581 | /* insert R and C bits from PTE */ | 593 | /* insert R and C bits from PTE */ |
582 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); | 594 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); |
583 | args[j] |= rcbits << (56 - 5); | 595 | args[j] |= rcbits << (56 - 5); |
@@ -585,8 +597,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
585 | continue; | 597 | continue; |
586 | } | 598 | } |
587 | 599 | ||
588 | hp[0] &= ~HPTE_V_VALID; /* leave it locked */ | 600 | /* leave it locked */ |
589 | tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index); | 601 | hp[0] &= ~cpu_to_be64(HPTE_V_VALID); |
602 | tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]), | ||
603 | be64_to_cpu(hp[1]), pte_index); | ||
590 | indexes[n] = j; | 604 | indexes[n] = j; |
591 | hptes[n] = hp; | 605 | hptes[n] = hp; |
592 | revs[n] = rev; | 606 | revs[n] = rev; |
@@ -605,7 +619,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
605 | pte_index = args[j] & ((1ul << 56) - 1); | 619 | pte_index = args[j] & ((1ul << 56) - 1); |
606 | hp = hptes[k]; | 620 | hp = hptes[k]; |
607 | rev = revs[k]; | 621 | rev = revs[k]; |
608 | remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]); | 622 | remove_revmap_chain(kvm, pte_index, rev, |
623 | be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); | ||
609 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); | 624 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); |
610 | args[j] |= rcbits << (56 - 5); | 625 | args[j] |= rcbits << (56 - 5); |
611 | hp[0] = 0; | 626 | hp[0] = 0; |
@@ -620,23 +635,25 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |||
620 | unsigned long va) | 635 | unsigned long va) |
621 | { | 636 | { |
622 | struct kvm *kvm = vcpu->kvm; | 637 | struct kvm *kvm = vcpu->kvm; |
623 | unsigned long *hpte; | 638 | __be64 *hpte; |
624 | struct revmap_entry *rev; | 639 | struct revmap_entry *rev; |
625 | unsigned long v, r, rb, mask, bits; | 640 | unsigned long v, r, rb, mask, bits; |
641 | u64 pte; | ||
626 | 642 | ||
627 | if (pte_index >= kvm->arch.hpt_npte) | 643 | if (pte_index >= kvm->arch.hpt_npte) |
628 | return H_PARAMETER; | 644 | return H_PARAMETER; |
629 | 645 | ||
630 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | 646 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
631 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | 647 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
632 | cpu_relax(); | 648 | cpu_relax(); |
633 | if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || | 649 | pte = be64_to_cpu(hpte[0]); |
634 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { | 650 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
635 | hpte[0] &= ~HPTE_V_HVLOCK; | 651 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) { |
652 | hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); | ||
636 | return H_NOT_FOUND; | 653 | return H_NOT_FOUND; |
637 | } | 654 | } |
638 | 655 | ||
639 | v = hpte[0]; | 656 | v = pte; |
640 | bits = (flags << 55) & HPTE_R_PP0; | 657 | bits = (flags << 55) & HPTE_R_PP0; |
641 | bits |= (flags << 48) & HPTE_R_KEY_HI; | 658 | bits |= (flags << 48) & HPTE_R_KEY_HI; |
642 | bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); | 659 | bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); |
@@ -650,12 +667,12 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |||
650 | rev->guest_rpte = r; | 667 | rev->guest_rpte = r; |
651 | note_hpte_modification(kvm, rev); | 668 | note_hpte_modification(kvm, rev); |
652 | } | 669 | } |
653 | r = (hpte[1] & ~mask) | bits; | 670 | r = (be64_to_cpu(hpte[1]) & ~mask) | bits; |
654 | 671 | ||
655 | /* Update HPTE */ | 672 | /* Update HPTE */ |
656 | if (v & HPTE_V_VALID) { | 673 | if (v & HPTE_V_VALID) { |
657 | rb = compute_tlbie_rb(v, r, pte_index); | 674 | rb = compute_tlbie_rb(v, r, pte_index); |
658 | hpte[0] = v & ~HPTE_V_VALID; | 675 | hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID); |
659 | do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); | 676 | do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); |
660 | /* | 677 | /* |
661 | * If the host has this page as readonly but the guest | 678 | * If the host has this page as readonly but the guest |
@@ -681,9 +698,9 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |||
681 | } | 698 | } |
682 | } | 699 | } |
683 | } | 700 | } |
684 | hpte[1] = r; | 701 | hpte[1] = cpu_to_be64(r); |
685 | eieio(); | 702 | eieio(); |
686 | hpte[0] = v & ~HPTE_V_HVLOCK; | 703 | hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK); |
687 | asm volatile("ptesync" : : : "memory"); | 704 | asm volatile("ptesync" : : : "memory"); |
688 | return H_SUCCESS; | 705 | return H_SUCCESS; |
689 | } | 706 | } |
@@ -692,7 +709,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | |||
692 | unsigned long pte_index) | 709 | unsigned long pte_index) |
693 | { | 710 | { |
694 | struct kvm *kvm = vcpu->kvm; | 711 | struct kvm *kvm = vcpu->kvm; |
695 | unsigned long *hpte, v, r; | 712 | __be64 *hpte; |
713 | unsigned long v, r; | ||
696 | int i, n = 1; | 714 | int i, n = 1; |
697 | struct revmap_entry *rev = NULL; | 715 | struct revmap_entry *rev = NULL; |
698 | 716 | ||
@@ -704,9 +722,9 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | |||
704 | } | 722 | } |
705 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | 723 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); |
706 | for (i = 0; i < n; ++i, ++pte_index) { | 724 | for (i = 0; i < n; ++i, ++pte_index) { |
707 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | 725 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
708 | v = hpte[0] & ~HPTE_V_HVLOCK; | 726 | v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; |
709 | r = hpte[1]; | 727 | r = be64_to_cpu(hpte[1]); |
710 | if (v & HPTE_V_ABSENT) { | 728 | if (v & HPTE_V_ABSENT) { |
711 | v &= ~HPTE_V_ABSENT; | 729 | v &= ~HPTE_V_ABSENT; |
712 | v |= HPTE_V_VALID; | 730 | v |= HPTE_V_VALID; |
@@ -721,25 +739,27 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | |||
721 | return H_SUCCESS; | 739 | return H_SUCCESS; |
722 | } | 740 | } |
723 | 741 | ||
724 | void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, | 742 | void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, |
725 | unsigned long pte_index) | 743 | unsigned long pte_index) |
726 | { | 744 | { |
727 | unsigned long rb; | 745 | unsigned long rb; |
728 | 746 | ||
729 | hptep[0] &= ~HPTE_V_VALID; | 747 | hptep[0] &= ~cpu_to_be64(HPTE_V_VALID); |
730 | rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); | 748 | rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), |
749 | pte_index); | ||
731 | do_tlbies(kvm, &rb, 1, 1, true); | 750 | do_tlbies(kvm, &rb, 1, 1, true); |
732 | } | 751 | } |
733 | EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); | 752 | EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); |
734 | 753 | ||
735 | void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, | 754 | void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, |
736 | unsigned long pte_index) | 755 | unsigned long pte_index) |
737 | { | 756 | { |
738 | unsigned long rb; | 757 | unsigned long rb; |
739 | unsigned char rbyte; | 758 | unsigned char rbyte; |
740 | 759 | ||
741 | rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); | 760 | rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), |
742 | rbyte = (hptep[1] & ~HPTE_R_R) >> 8; | 761 | pte_index); |
762 | rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8; | ||
743 | /* modify only the second-last byte, which contains the ref bit */ | 763 | /* modify only the second-last byte, which contains the ref bit */ |
744 | *((char *)hptep + 14) = rbyte; | 764 | *((char *)hptep + 14) = rbyte; |
745 | do_tlbies(kvm, &rb, 1, 1, false); | 765 | do_tlbies(kvm, &rb, 1, 1, false); |
@@ -765,7 +785,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
765 | unsigned long somask; | 785 | unsigned long somask; |
766 | unsigned long vsid, hash; | 786 | unsigned long vsid, hash; |
767 | unsigned long avpn; | 787 | unsigned long avpn; |
768 | unsigned long *hpte; | 788 | __be64 *hpte; |
769 | unsigned long mask, val; | 789 | unsigned long mask, val; |
770 | unsigned long v, r; | 790 | unsigned long v, r; |
771 | 791 | ||
@@ -797,11 +817,11 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
797 | val |= avpn; | 817 | val |= avpn; |
798 | 818 | ||
799 | for (;;) { | 819 | for (;;) { |
800 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7)); | 820 | hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7)); |
801 | 821 | ||
802 | for (i = 0; i < 16; i += 2) { | 822 | for (i = 0; i < 16; i += 2) { |
803 | /* Read the PTE racily */ | 823 | /* Read the PTE racily */ |
804 | v = hpte[i] & ~HPTE_V_HVLOCK; | 824 | v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; |
805 | 825 | ||
806 | /* Check valid/absent, hash, segment size and AVPN */ | 826 | /* Check valid/absent, hash, segment size and AVPN */ |
807 | if (!(v & valid) || (v & mask) != val) | 827 | if (!(v & valid) || (v & mask) != val) |
@@ -810,8 +830,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
810 | /* Lock the PTE and read it under the lock */ | 830 | /* Lock the PTE and read it under the lock */ |
811 | while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) | 831 | while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) |
812 | cpu_relax(); | 832 | cpu_relax(); |
813 | v = hpte[i] & ~HPTE_V_HVLOCK; | 833 | v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; |
814 | r = hpte[i+1]; | 834 | r = be64_to_cpu(hpte[i+1]); |
815 | 835 | ||
816 | /* | 836 | /* |
817 | * Check the HPTE again, including base page size | 837 | * Check the HPTE again, including base page size |
@@ -822,7 +842,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
822 | return (hash << 3) + (i >> 1); | 842 | return (hash << 3) + (i >> 1); |
823 | 843 | ||
824 | /* Unlock and move on */ | 844 | /* Unlock and move on */ |
825 | hpte[i] = v; | 845 | hpte[i] = cpu_to_be64(v); |
826 | } | 846 | } |
827 | 847 | ||
828 | if (val & HPTE_V_SECONDARY) | 848 | if (val & HPTE_V_SECONDARY) |
@@ -851,7 +871,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |||
851 | struct kvm *kvm = vcpu->kvm; | 871 | struct kvm *kvm = vcpu->kvm; |
852 | long int index; | 872 | long int index; |
853 | unsigned long v, r, gr; | 873 | unsigned long v, r, gr; |
854 | unsigned long *hpte; | 874 | __be64 *hpte; |
855 | unsigned long valid; | 875 | unsigned long valid; |
856 | struct revmap_entry *rev; | 876 | struct revmap_entry *rev; |
857 | unsigned long pp, key; | 877 | unsigned long pp, key; |
@@ -867,9 +887,9 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |||
867 | return status; /* there really was no HPTE */ | 887 | return status; /* there really was no HPTE */ |
868 | return 0; /* for prot fault, HPTE disappeared */ | 888 | return 0; /* for prot fault, HPTE disappeared */ |
869 | } | 889 | } |
870 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 890 | hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); |
871 | v = hpte[0] & ~HPTE_V_HVLOCK; | 891 | v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; |
872 | r = hpte[1]; | 892 | r = be64_to_cpu(hpte[1]); |
873 | rev = real_vmalloc_addr(&kvm->arch.revmap[index]); | 893 | rev = real_vmalloc_addr(&kvm->arch.revmap[index]); |
874 | gr = rev->guest_rpte; | 894 | gr = rev->guest_rpte; |
875 | 895 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 558a67df8126..855521ef04e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -32,10 +32,6 @@ | |||
32 | 32 | ||
33 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | 33 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) |
34 | 34 | ||
35 | #ifdef __LITTLE_ENDIAN__ | ||
36 | #error Need to fix lppaca and SLB shadow accesses in little endian mode | ||
37 | #endif | ||
38 | |||
39 | /* Values in HSTATE_NAPPING(r13) */ | 35 | /* Values in HSTATE_NAPPING(r13) */ |
40 | #define NAPPING_CEDE 1 | 36 | #define NAPPING_CEDE 1 |
41 | #define NAPPING_NOVCPU 2 | 37 | #define NAPPING_NOVCPU 2 |
@@ -595,9 +591,10 @@ kvmppc_got_guest: | |||
595 | ld r3, VCPU_VPA(r4) | 591 | ld r3, VCPU_VPA(r4) |
596 | cmpdi r3, 0 | 592 | cmpdi r3, 0 |
597 | beq 25f | 593 | beq 25f |
598 | lwz r5, LPPACA_YIELDCOUNT(r3) | 594 | li r6, LPPACA_YIELDCOUNT |
595 | LWZX_BE r5, r3, r6 | ||
599 | addi r5, r5, 1 | 596 | addi r5, r5, 1 |
600 | stw r5, LPPACA_YIELDCOUNT(r3) | 597 | STWX_BE r5, r3, r6 |
601 | li r6, 1 | 598 | li r6, 1 |
602 | stb r6, VCPU_VPA_DIRTY(r4) | 599 | stb r6, VCPU_VPA_DIRTY(r4) |
603 | 25: | 600 | 25: |
@@ -671,9 +668,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM) | |||
671 | 668 | ||
672 | mr r31, r4 | 669 | mr r31, r4 |
673 | addi r3, r31, VCPU_FPRS_TM | 670 | addi r3, r31, VCPU_FPRS_TM |
674 | bl .load_fp_state | 671 | bl load_fp_state |
675 | addi r3, r31, VCPU_VRS_TM | 672 | addi r3, r31, VCPU_VRS_TM |
676 | bl .load_vr_state | 673 | bl load_vr_state |
677 | mr r4, r31 | 674 | mr r4, r31 |
678 | lwz r7, VCPU_VRSAVE_TM(r4) | 675 | lwz r7, VCPU_VRSAVE_TM(r4) |
679 | mtspr SPRN_VRSAVE, r7 | 676 | mtspr SPRN_VRSAVE, r7 |
@@ -1417,9 +1414,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM) | |||
1417 | 1414 | ||
1418 | /* Save FP/VSX. */ | 1415 | /* Save FP/VSX. */ |
1419 | addi r3, r9, VCPU_FPRS_TM | 1416 | addi r3, r9, VCPU_FPRS_TM |
1420 | bl .store_fp_state | 1417 | bl store_fp_state |
1421 | addi r3, r9, VCPU_VRS_TM | 1418 | addi r3, r9, VCPU_VRS_TM |
1422 | bl .store_vr_state | 1419 | bl store_vr_state |
1423 | mfspr r6, SPRN_VRSAVE | 1420 | mfspr r6, SPRN_VRSAVE |
1424 | stw r6, VCPU_VRSAVE_TM(r9) | 1421 | stw r6, VCPU_VRSAVE_TM(r9) |
1425 | 1: | 1422 | 1: |
@@ -1442,9 +1439,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM) | |||
1442 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | 1439 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ |
1443 | cmpdi r8, 0 | 1440 | cmpdi r8, 0 |
1444 | beq 25f | 1441 | beq 25f |
1445 | lwz r3, LPPACA_YIELDCOUNT(r8) | 1442 | li r4, LPPACA_YIELDCOUNT |
1443 | LWZX_BE r3, r8, r4 | ||
1446 | addi r3, r3, 1 | 1444 | addi r3, r3, 1 |
1447 | stw r3, LPPACA_YIELDCOUNT(r8) | 1445 | STWX_BE r3, r8, r4 |
1448 | li r3, 1 | 1446 | li r3, 1 |
1449 | stb r3, VCPU_VPA_DIRTY(r9) | 1447 | stb r3, VCPU_VPA_DIRTY(r9) |
1450 | 25: | 1448 | 25: |
@@ -1757,8 +1755,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
1757 | 33: ld r8,PACA_SLBSHADOWPTR(r13) | 1755 | 33: ld r8,PACA_SLBSHADOWPTR(r13) |
1758 | 1756 | ||
1759 | .rept SLB_NUM_BOLTED | 1757 | .rept SLB_NUM_BOLTED |
1760 | ld r5,SLBSHADOW_SAVEAREA(r8) | 1758 | li r3, SLBSHADOW_SAVEAREA |
1761 | ld r6,SLBSHADOW_SAVEAREA+8(r8) | 1759 | LDX_BE r5, r8, r3 |
1760 | addi r3, r3, 8 | ||
1761 | LDX_BE r6, r8, r3 | ||
1762 | andis. r7,r5,SLB_ESID_V@h | 1762 | andis. r7,r5,SLB_ESID_V@h |
1763 | beq 1f | 1763 | beq 1f |
1764 | slbmte r6,r5 | 1764 | slbmte r6,r5 |
@@ -1909,12 +1909,23 @@ hcall_try_real_mode: | |||
1909 | clrrdi r3,r3,2 | 1909 | clrrdi r3,r3,2 |
1910 | cmpldi r3,hcall_real_table_end - hcall_real_table | 1910 | cmpldi r3,hcall_real_table_end - hcall_real_table |
1911 | bge guest_exit_cont | 1911 | bge guest_exit_cont |
1912 | /* See if this hcall is enabled for in-kernel handling */ | ||
1913 | ld r4, VCPU_KVM(r9) | ||
1914 | srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ | ||
1915 | sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ | ||
1916 | add r4, r4, r0 | ||
1917 | ld r0, KVM_ENABLED_HCALLS(r4) | ||
1918 | rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ | ||
1919 | srd r0, r0, r4 | ||
1920 | andi. r0, r0, 1 | ||
1921 | beq guest_exit_cont | ||
1922 | /* Get pointer to handler, if any, and call it */ | ||
1912 | LOAD_REG_ADDR(r4, hcall_real_table) | 1923 | LOAD_REG_ADDR(r4, hcall_real_table) |
1913 | lwax r3,r3,r4 | 1924 | lwax r3,r3,r4 |
1914 | cmpwi r3,0 | 1925 | cmpwi r3,0 |
1915 | beq guest_exit_cont | 1926 | beq guest_exit_cont |
1916 | add r3,r3,r4 | 1927 | add r12,r3,r4 |
1917 | mtctr r3 | 1928 | mtctr r12 |
1918 | mr r3,r9 /* get vcpu pointer */ | 1929 | mr r3,r9 /* get vcpu pointer */ |
1919 | ld r4,VCPU_GPR(R4)(r9) | 1930 | ld r4,VCPU_GPR(R4)(r9) |
1920 | bctrl | 1931 | bctrl |
@@ -2031,6 +2042,7 @@ hcall_real_table: | |||
2031 | .long 0 /* 0x12c */ | 2042 | .long 0 /* 0x12c */ |
2032 | .long 0 /* 0x130 */ | 2043 | .long 0 /* 0x130 */ |
2033 | .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table | 2044 | .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table |
2045 | .globl hcall_real_table_end | ||
2034 | hcall_real_table_end: | 2046 | hcall_real_table_end: |
2035 | 2047 | ||
2036 | ignore_hdec: | 2048 | ignore_hdec: |
@@ -2338,7 +2350,18 @@ kvmppc_read_intr: | |||
2338 | cmpdi r6, 0 | 2350 | cmpdi r6, 0 |
2339 | beq- 1f | 2351 | beq- 1f |
2340 | lwzcix r0, r6, r7 | 2352 | lwzcix r0, r6, r7 |
2341 | rlwinm. r3, r0, 0, 0xffffff | 2353 | /* |
2354 | * Save XIRR for later. Since we get in in reverse endian on LE | ||
2355 | * systems, save it byte reversed and fetch it back in host endian. | ||
2356 | */ | ||
2357 | li r3, HSTATE_SAVED_XIRR | ||
2358 | STWX_BE r0, r3, r13 | ||
2359 | #ifdef __LITTLE_ENDIAN__ | ||
2360 | lwz r3, HSTATE_SAVED_XIRR(r13) | ||
2361 | #else | ||
2362 | mr r3, r0 | ||
2363 | #endif | ||
2364 | rlwinm. r3, r3, 0, 0xffffff | ||
2342 | sync | 2365 | sync |
2343 | beq 1f /* if nothing pending in the ICP */ | 2366 | beq 1f /* if nothing pending in the ICP */ |
2344 | 2367 | ||
@@ -2370,10 +2393,9 @@ kvmppc_read_intr: | |||
2370 | li r3, -1 | 2393 | li r3, -1 |
2371 | 1: blr | 2394 | 1: blr |
2372 | 2395 | ||
2373 | 42: /* It's not an IPI and it's for the host, stash it in the PACA | 2396 | 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in |
2374 | * before exit, it will be picked up by the host ICP driver | 2397 | * the PACA earlier, it will be picked up by the host ICP driver |
2375 | */ | 2398 | */ |
2376 | stw r0, HSTATE_SAVED_XIRR(r13) | ||
2377 | li r3, 1 | 2399 | li r3, 1 |
2378 | b 1b | 2400 | b 1b |
2379 | 2401 | ||
@@ -2408,11 +2430,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
2408 | mtmsrd r8 | 2430 | mtmsrd r8 |
2409 | isync | 2431 | isync |
2410 | addi r3,r3,VCPU_FPRS | 2432 | addi r3,r3,VCPU_FPRS |
2411 | bl .store_fp_state | 2433 | bl store_fp_state |
2412 | #ifdef CONFIG_ALTIVEC | 2434 | #ifdef CONFIG_ALTIVEC |
2413 | BEGIN_FTR_SECTION | 2435 | BEGIN_FTR_SECTION |
2414 | addi r3,r31,VCPU_VRS | 2436 | addi r3,r31,VCPU_VRS |
2415 | bl .store_vr_state | 2437 | bl store_vr_state |
2416 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 2438 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2417 | #endif | 2439 | #endif |
2418 | mfspr r6,SPRN_VRSAVE | 2440 | mfspr r6,SPRN_VRSAVE |
@@ -2444,11 +2466,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
2444 | mtmsrd r8 | 2466 | mtmsrd r8 |
2445 | isync | 2467 | isync |
2446 | addi r3,r4,VCPU_FPRS | 2468 | addi r3,r4,VCPU_FPRS |
2447 | bl .load_fp_state | 2469 | bl load_fp_state |
2448 | #ifdef CONFIG_ALTIVEC | 2470 | #ifdef CONFIG_ALTIVEC |
2449 | BEGIN_FTR_SECTION | 2471 | BEGIN_FTR_SECTION |
2450 | addi r3,r31,VCPU_VRS | 2472 | addi r3,r31,VCPU_VRS |
2451 | bl .load_vr_state | 2473 | bl load_vr_state |
2452 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 2474 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2453 | #endif | 2475 | #endif |
2454 | lwz r7,VCPU_VRSAVE(r31) | 2476 | lwz r7,VCPU_VRSAVE(r31) |
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 6c8011fd57e6..bfb8035314e3 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -639,26 +639,36 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, | |||
639 | 639 | ||
640 | int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | 640 | int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) |
641 | { | 641 | { |
642 | u32 inst = kvmppc_get_last_inst(vcpu); | 642 | u32 inst; |
643 | enum emulation_result emulated = EMULATE_DONE; | 643 | enum emulation_result emulated = EMULATE_DONE; |
644 | int ax_rd, ax_ra, ax_rb, ax_rc; | ||
645 | short full_d; | ||
646 | u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c; | ||
644 | 647 | ||
645 | int ax_rd = inst_get_field(inst, 6, 10); | 648 | bool rcomp; |
646 | int ax_ra = inst_get_field(inst, 11, 15); | 649 | u32 cr; |
647 | int ax_rb = inst_get_field(inst, 16, 20); | ||
648 | int ax_rc = inst_get_field(inst, 21, 25); | ||
649 | short full_d = inst_get_field(inst, 16, 31); | ||
650 | |||
651 | u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd); | ||
652 | u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra); | ||
653 | u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb); | ||
654 | u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc); | ||
655 | |||
656 | bool rcomp = (inst & 1) ? true : false; | ||
657 | u32 cr = kvmppc_get_cr(vcpu); | ||
658 | #ifdef DEBUG | 650 | #ifdef DEBUG |
659 | int i; | 651 | int i; |
660 | #endif | 652 | #endif |
661 | 653 | ||
654 | emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); | ||
655 | if (emulated != EMULATE_DONE) | ||
656 | return emulated; | ||
657 | |||
658 | ax_rd = inst_get_field(inst, 6, 10); | ||
659 | ax_ra = inst_get_field(inst, 11, 15); | ||
660 | ax_rb = inst_get_field(inst, 16, 20); | ||
661 | ax_rc = inst_get_field(inst, 21, 25); | ||
662 | full_d = inst_get_field(inst, 16, 31); | ||
663 | |||
664 | fpr_d = &VCPU_FPR(vcpu, ax_rd); | ||
665 | fpr_a = &VCPU_FPR(vcpu, ax_ra); | ||
666 | fpr_b = &VCPU_FPR(vcpu, ax_rb); | ||
667 | fpr_c = &VCPU_FPR(vcpu, ax_rc); | ||
668 | |||
669 | rcomp = (inst & 1) ? true : false; | ||
670 | cr = kvmppc_get_cr(vcpu); | ||
671 | |||
662 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) | 672 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) |
663 | return EMULATE_FAIL; | 673 | return EMULATE_FAIL; |
664 | 674 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 8eef1e519077..faffb27badd9 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -62,6 +62,35 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | |||
62 | #define HW_PAGE_SIZE PAGE_SIZE | 62 | #define HW_PAGE_SIZE PAGE_SIZE |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) | ||
66 | { | ||
67 | ulong msr = kvmppc_get_msr(vcpu); | ||
68 | return (msr & (MSR_IR|MSR_DR)) == MSR_DR; | ||
69 | } | ||
70 | |||
71 | static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) | ||
72 | { | ||
73 | ulong msr = kvmppc_get_msr(vcpu); | ||
74 | ulong pc = kvmppc_get_pc(vcpu); | ||
75 | |||
76 | /* We are in DR only split real mode */ | ||
77 | if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) | ||
78 | return; | ||
79 | |||
80 | /* We have not fixed up the guest already */ | ||
81 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) | ||
82 | return; | ||
83 | |||
84 | /* The code is in fixupable address space */ | ||
85 | if (pc & SPLIT_HACK_MASK) | ||
86 | return; | ||
87 | |||
88 | vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; | ||
89 | kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); | ||
90 | } | ||
91 | |||
92 | void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu); | ||
93 | |||
65 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | 94 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
66 | { | 95 | { |
67 | #ifdef CONFIG_PPC_BOOK3S_64 | 96 | #ifdef CONFIG_PPC_BOOK3S_64 |
@@ -71,10 +100,19 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
71 | svcpu->in_use = 0; | 100 | svcpu->in_use = 0; |
72 | svcpu_put(svcpu); | 101 | svcpu_put(svcpu); |
73 | #endif | 102 | #endif |
103 | |||
104 | /* Disable AIL if supported */ | ||
105 | if (cpu_has_feature(CPU_FTR_HVMODE) && | ||
106 | cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
107 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); | ||
108 | |||
74 | vcpu->cpu = smp_processor_id(); | 109 | vcpu->cpu = smp_processor_id(); |
75 | #ifdef CONFIG_PPC_BOOK3S_32 | 110 | #ifdef CONFIG_PPC_BOOK3S_32 |
76 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; | 111 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
77 | #endif | 112 | #endif |
113 | |||
114 | if (kvmppc_is_split_real(vcpu)) | ||
115 | kvmppc_fixup_split_real(vcpu); | ||
78 | } | 116 | } |
79 | 117 | ||
80 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | 118 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
@@ -89,8 +127,17 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
89 | svcpu_put(svcpu); | 127 | svcpu_put(svcpu); |
90 | #endif | 128 | #endif |
91 | 129 | ||
130 | if (kvmppc_is_split_real(vcpu)) | ||
131 | kvmppc_unfixup_split_real(vcpu); | ||
132 | |||
92 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); | 133 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
93 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | 134 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
135 | |||
136 | /* Enable AIL if supported */ | ||
137 | if (cpu_has_feature(CPU_FTR_HVMODE) && | ||
138 | cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
139 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); | ||
140 | |||
94 | vcpu->cpu = -1; | 141 | vcpu->cpu = -1; |
95 | } | 142 | } |
96 | 143 | ||
@@ -120,6 +167,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
120 | #ifdef CONFIG_PPC_BOOK3S_64 | 167 | #ifdef CONFIG_PPC_BOOK3S_64 |
121 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | 168 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; |
122 | #endif | 169 | #endif |
170 | /* | ||
171 | * Now also save the current time base value. We use this | ||
172 | * to find the guest purr and spurr value. | ||
173 | */ | ||
174 | vcpu->arch.entry_tb = get_tb(); | ||
175 | vcpu->arch.entry_vtb = get_vtb(); | ||
176 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
177 | vcpu->arch.entry_ic = mfspr(SPRN_IC); | ||
123 | svcpu->in_use = true; | 178 | svcpu->in_use = true; |
124 | } | 179 | } |
125 | 180 | ||
@@ -166,6 +221,14 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
166 | #ifdef CONFIG_PPC_BOOK3S_64 | 221 | #ifdef CONFIG_PPC_BOOK3S_64 |
167 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; | 222 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; |
168 | #endif | 223 | #endif |
224 | /* | ||
225 | * Update purr and spurr using time base on exit. | ||
226 | */ | ||
227 | vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; | ||
228 | vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; | ||
229 | vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb; | ||
230 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
231 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; | ||
169 | svcpu->in_use = false; | 232 | svcpu->in_use = false; |
170 | 233 | ||
171 | out: | 234 | out: |
@@ -294,6 +357,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | |||
294 | } | 357 | } |
295 | } | 358 | } |
296 | 359 | ||
360 | if (kvmppc_is_split_real(vcpu)) | ||
361 | kvmppc_fixup_split_real(vcpu); | ||
362 | else | ||
363 | kvmppc_unfixup_split_real(vcpu); | ||
364 | |||
297 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != | 365 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != |
298 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | 366 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
299 | kvmppc_mmu_flush_segments(vcpu); | 367 | kvmppc_mmu_flush_segments(vcpu); |
@@ -443,19 +511,19 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
443 | put_page(hpage); | 511 | put_page(hpage); |
444 | } | 512 | } |
445 | 513 | ||
446 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 514 | static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
447 | { | 515 | { |
448 | ulong mp_pa = vcpu->arch.magic_page_pa; | 516 | ulong mp_pa = vcpu->arch.magic_page_pa; |
449 | 517 | ||
450 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) | 518 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
451 | mp_pa = (uint32_t)mp_pa; | 519 | mp_pa = (uint32_t)mp_pa; |
452 | 520 | ||
453 | if (unlikely(mp_pa) && | 521 | gpa &= ~0xFFFULL; |
454 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | 522 | if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { |
455 | return 1; | 523 | return 1; |
456 | } | 524 | } |
457 | 525 | ||
458 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | 526 | return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); |
459 | } | 527 | } |
460 | 528 | ||
461 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | 529 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, |
@@ -494,6 +562,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
494 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | 562 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); |
495 | break; | 563 | break; |
496 | case MSR_DR: | 564 | case MSR_DR: |
565 | if (!data && | ||
566 | (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && | ||
567 | ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) | ||
568 | pte.raddr &= ~SPLIT_HACK_MASK; | ||
569 | /* fall through */ | ||
497 | case MSR_IR: | 570 | case MSR_IR: |
498 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | 571 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
499 | 572 | ||
@@ -541,7 +614,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
541 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | 614 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
542 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 615 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
543 | } else if (!is_mmio && | 616 | } else if (!is_mmio && |
544 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 617 | kvmppc_visible_gpa(vcpu, pte.raddr)) { |
545 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { | 618 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { |
546 | /* | 619 | /* |
547 | * There is already a host HPTE there, presumably | 620 | * There is already a host HPTE there, presumably |
@@ -637,42 +710,6 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) | |||
637 | #endif | 710 | #endif |
638 | } | 711 | } |
639 | 712 | ||
640 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | ||
641 | { | ||
642 | ulong srr0 = kvmppc_get_pc(vcpu); | ||
643 | u32 last_inst = kvmppc_get_last_inst(vcpu); | ||
644 | int ret; | ||
645 | |||
646 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | ||
647 | if (ret == -ENOENT) { | ||
648 | ulong msr = kvmppc_get_msr(vcpu); | ||
649 | |||
650 | msr = kvmppc_set_field(msr, 33, 33, 1); | ||
651 | msr = kvmppc_set_field(msr, 34, 36, 0); | ||
652 | msr = kvmppc_set_field(msr, 42, 47, 0); | ||
653 | kvmppc_set_msr_fast(vcpu, msr); | ||
654 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | ||
655 | return EMULATE_AGAIN; | ||
656 | } | ||
657 | |||
658 | return EMULATE_DONE; | ||
659 | } | ||
660 | |||
661 | static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) | ||
662 | { | ||
663 | |||
664 | /* Need to do paired single emulation? */ | ||
665 | if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) | ||
666 | return EMULATE_DONE; | ||
667 | |||
668 | /* Read out the instruction */ | ||
669 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) | ||
670 | /* Need to emulate */ | ||
671 | return EMULATE_FAIL; | ||
672 | |||
673 | return EMULATE_AGAIN; | ||
674 | } | ||
675 | |||
676 | /* Handle external providers (FPU, Altivec, VSX) */ | 713 | /* Handle external providers (FPU, Altivec, VSX) */ |
677 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | 714 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
678 | ulong msr) | 715 | ulong msr) |
@@ -834,6 +871,15 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | |||
834 | 871 | ||
835 | return RESUME_GUEST; | 872 | return RESUME_GUEST; |
836 | } | 873 | } |
874 | |||
875 | void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) | ||
876 | { | ||
877 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { | ||
878 | /* TAR got dropped, drop it in shadow too */ | ||
879 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | ||
880 | } | ||
881 | vcpu->arch.fscr = fscr; | ||
882 | } | ||
837 | #endif | 883 | #endif |
838 | 884 | ||
839 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | 885 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
@@ -858,6 +904,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
858 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; | 904 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
859 | vcpu->stat.pf_instruc++; | 905 | vcpu->stat.pf_instruc++; |
860 | 906 | ||
907 | if (kvmppc_is_split_real(vcpu)) | ||
908 | kvmppc_fixup_split_real(vcpu); | ||
909 | |||
861 | #ifdef CONFIG_PPC_BOOK3S_32 | 910 | #ifdef CONFIG_PPC_BOOK3S_32 |
862 | /* We set segments as unused segments when invalidating them. So | 911 | /* We set segments as unused segments when invalidating them. So |
863 | * treat the respective fault as segment fault. */ | 912 | * treat the respective fault as segment fault. */ |
@@ -960,6 +1009,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
960 | case BOOK3S_INTERRUPT_DECREMENTER: | 1009 | case BOOK3S_INTERRUPT_DECREMENTER: |
961 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | 1010 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
962 | case BOOK3S_INTERRUPT_DOORBELL: | 1011 | case BOOK3S_INTERRUPT_DOORBELL: |
1012 | case BOOK3S_INTERRUPT_H_DOORBELL: | ||
963 | vcpu->stat.dec_exits++; | 1013 | vcpu->stat.dec_exits++; |
964 | r = RESUME_GUEST; | 1014 | r = RESUME_GUEST; |
965 | break; | 1015 | break; |
@@ -977,15 +1027,24 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
977 | { | 1027 | { |
978 | enum emulation_result er; | 1028 | enum emulation_result er; |
979 | ulong flags; | 1029 | ulong flags; |
1030 | u32 last_inst; | ||
1031 | int emul; | ||
980 | 1032 | ||
981 | program_interrupt: | 1033 | program_interrupt: |
982 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | 1034 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
983 | 1035 | ||
1036 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | ||
1037 | if (emul != EMULATE_DONE) { | ||
1038 | r = RESUME_GUEST; | ||
1039 | break; | ||
1040 | } | ||
1041 | |||
984 | if (kvmppc_get_msr(vcpu) & MSR_PR) { | 1042 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
985 | #ifdef EXIT_DEBUG | 1043 | #ifdef EXIT_DEBUG |
986 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 1044 | pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", |
1045 | kvmppc_get_pc(vcpu), last_inst); | ||
987 | #endif | 1046 | #endif |
988 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != | 1047 | if ((last_inst & 0xff0007ff) != |
989 | (INS_DCBZ & 0xfffffff7)) { | 1048 | (INS_DCBZ & 0xfffffff7)) { |
990 | kvmppc_core_queue_program(vcpu, flags); | 1049 | kvmppc_core_queue_program(vcpu, flags); |
991 | r = RESUME_GUEST; | 1050 | r = RESUME_GUEST; |
@@ -1004,7 +1063,7 @@ program_interrupt: | |||
1004 | break; | 1063 | break; |
1005 | case EMULATE_FAIL: | 1064 | case EMULATE_FAIL: |
1006 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 1065 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
1007 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 1066 | __func__, kvmppc_get_pc(vcpu), last_inst); |
1008 | kvmppc_core_queue_program(vcpu, flags); | 1067 | kvmppc_core_queue_program(vcpu, flags); |
1009 | r = RESUME_GUEST; | 1068 | r = RESUME_GUEST; |
1010 | break; | 1069 | break; |
@@ -1021,8 +1080,23 @@ program_interrupt: | |||
1021 | break; | 1080 | break; |
1022 | } | 1081 | } |
1023 | case BOOK3S_INTERRUPT_SYSCALL: | 1082 | case BOOK3S_INTERRUPT_SYSCALL: |
1083 | { | ||
1084 | u32 last_sc; | ||
1085 | int emul; | ||
1086 | |||
1087 | /* Get last sc for papr */ | ||
1088 | if (vcpu->arch.papr_enabled) { | ||
1089 | /* The sc instuction points SRR0 to the next inst */ | ||
1090 | emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); | ||
1091 | if (emul != EMULATE_DONE) { | ||
1092 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); | ||
1093 | r = RESUME_GUEST; | ||
1094 | break; | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1024 | if (vcpu->arch.papr_enabled && | 1098 | if (vcpu->arch.papr_enabled && |
1025 | (kvmppc_get_last_sc(vcpu) == 0x44000022) && | 1099 | (last_sc == 0x44000022) && |
1026 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | 1100 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
1027 | /* SC 1 papr hypercalls */ | 1101 | /* SC 1 papr hypercalls */ |
1028 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | 1102 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
@@ -1067,36 +1141,51 @@ program_interrupt: | |||
1067 | r = RESUME_GUEST; | 1141 | r = RESUME_GUEST; |
1068 | } | 1142 | } |
1069 | break; | 1143 | break; |
1144 | } | ||
1070 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | 1145 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1071 | case BOOK3S_INTERRUPT_ALTIVEC: | 1146 | case BOOK3S_INTERRUPT_ALTIVEC: |
1072 | case BOOK3S_INTERRUPT_VSX: | 1147 | case BOOK3S_INTERRUPT_VSX: |
1073 | { | 1148 | { |
1074 | int ext_msr = 0; | 1149 | int ext_msr = 0; |
1150 | int emul; | ||
1151 | u32 last_inst; | ||
1152 | |||
1153 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { | ||
1154 | /* Do paired single instruction emulation */ | ||
1155 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, | ||
1156 | &last_inst); | ||
1157 | if (emul == EMULATE_DONE) | ||
1158 | goto program_interrupt; | ||
1159 | else | ||
1160 | r = RESUME_GUEST; | ||
1075 | 1161 | ||
1076 | switch (exit_nr) { | 1162 | break; |
1077 | case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; | ||
1078 | case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; | ||
1079 | case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; | ||
1080 | } | 1163 | } |
1081 | 1164 | ||
1082 | switch (kvmppc_check_ext(vcpu, exit_nr)) { | 1165 | /* Enable external provider */ |
1083 | case EMULATE_DONE: | 1166 | switch (exit_nr) { |
1084 | /* everything ok - let's enable the ext */ | 1167 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1085 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | 1168 | ext_msr = MSR_FP; |
1086 | break; | 1169 | break; |
1087 | case EMULATE_FAIL: | 1170 | |
1088 | /* we need to emulate this instruction */ | 1171 | case BOOK3S_INTERRUPT_ALTIVEC: |
1089 | goto program_interrupt; | 1172 | ext_msr = MSR_VEC; |
1090 | break; | 1173 | break; |
1091 | default: | 1174 | |
1092 | /* nothing to worry about - go again */ | 1175 | case BOOK3S_INTERRUPT_VSX: |
1176 | ext_msr = MSR_VSX; | ||
1093 | break; | 1177 | break; |
1094 | } | 1178 | } |
1179 | |||
1180 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | ||
1095 | break; | 1181 | break; |
1096 | } | 1182 | } |
1097 | case BOOK3S_INTERRUPT_ALIGNMENT: | 1183 | case BOOK3S_INTERRUPT_ALIGNMENT: |
1098 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | 1184 | { |
1099 | u32 last_inst = kvmppc_get_last_inst(vcpu); | 1185 | u32 last_inst; |
1186 | int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | ||
1187 | |||
1188 | if (emul == EMULATE_DONE) { | ||
1100 | u32 dsisr; | 1189 | u32 dsisr; |
1101 | u64 dar; | 1190 | u64 dar; |
1102 | 1191 | ||
@@ -1110,6 +1199,7 @@ program_interrupt: | |||
1110 | } | 1199 | } |
1111 | r = RESUME_GUEST; | 1200 | r = RESUME_GUEST; |
1112 | break; | 1201 | break; |
1202 | } | ||
1113 | #ifdef CONFIG_PPC_BOOK3S_64 | 1203 | #ifdef CONFIG_PPC_BOOK3S_64 |
1114 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | 1204 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: |
1115 | kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); | 1205 | kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); |
@@ -1233,6 +1323,7 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
1233 | *val = get_reg_val(id, to_book3s(vcpu)->hior); | 1323 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
1234 | break; | 1324 | break; |
1235 | case KVM_REG_PPC_LPCR: | 1325 | case KVM_REG_PPC_LPCR: |
1326 | case KVM_REG_PPC_LPCR_64: | ||
1236 | /* | 1327 | /* |
1237 | * We are only interested in the LPCR_ILE bit | 1328 | * We are only interested in the LPCR_ILE bit |
1238 | */ | 1329 | */ |
@@ -1268,6 +1359,7 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
1268 | to_book3s(vcpu)->hior_explicit = true; | 1359 | to_book3s(vcpu)->hior_explicit = true; |
1269 | break; | 1360 | break; |
1270 | case KVM_REG_PPC_LPCR: | 1361 | case KVM_REG_PPC_LPCR: |
1362 | case KVM_REG_PPC_LPCR_64: | ||
1271 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); | 1363 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
1272 | break; | 1364 | break; |
1273 | default: | 1365 | default: |
@@ -1310,8 +1402,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, | |||
1310 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | 1402 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
1311 | if (!p) | 1403 | if (!p) |
1312 | goto uninit_vcpu; | 1404 | goto uninit_vcpu; |
1313 | /* the real shared page fills the last 4k of our page */ | 1405 | vcpu->arch.shared = (void *)p; |
1314 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); | ||
1315 | #ifdef CONFIG_PPC_BOOK3S_64 | 1406 | #ifdef CONFIG_PPC_BOOK3S_64 |
1316 | /* Always start the shared struct in native endian mode */ | 1407 | /* Always start the shared struct in native endian mode */ |
1317 | #ifdef __BIG_ENDIAN__ | 1408 | #ifdef __BIG_ENDIAN__ |
@@ -1568,6 +1659,11 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm) | |||
1568 | { | 1659 | { |
1569 | mutex_init(&kvm->arch.hpt_mutex); | 1660 | mutex_init(&kvm->arch.hpt_mutex); |
1570 | 1661 | ||
1662 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1663 | /* Start out with the default set of hcalls enabled */ | ||
1664 | kvmppc_pr_init_default_hcalls(kvm); | ||
1665 | #endif | ||
1666 | |||
1571 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | 1667 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
1572 | spin_lock(&kvm_global_user_count_lock); | 1668 | spin_lock(&kvm_global_user_count_lock); |
1573 | if (++kvm_global_user_count == 1) | 1669 | if (++kvm_global_user_count == 1) |
@@ -1636,6 +1732,9 @@ static struct kvmppc_ops kvm_ops_pr = { | |||
1636 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, | 1732 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, |
1637 | .fast_vcpu_kick = kvm_vcpu_kick, | 1733 | .fast_vcpu_kick = kvm_vcpu_kick, |
1638 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | 1734 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, |
1735 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1736 | .hcall_implemented = kvmppc_hcall_impl_pr, | ||
1737 | #endif | ||
1639 | }; | 1738 | }; |
1640 | 1739 | ||
1641 | 1740 | ||
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index 52a63bfe3f07..ce3c893d509b 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -40,8 +40,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
40 | { | 40 | { |
41 | long flags = kvmppc_get_gpr(vcpu, 4); | 41 | long flags = kvmppc_get_gpr(vcpu, 4); |
42 | long pte_index = kvmppc_get_gpr(vcpu, 5); | 42 | long pte_index = kvmppc_get_gpr(vcpu, 5); |
43 | unsigned long pteg[2 * 8]; | 43 | __be64 pteg[2 * 8]; |
44 | unsigned long pteg_addr, i, *hpte; | 44 | __be64 *hpte; |
45 | unsigned long pteg_addr, i; | ||
45 | long int ret; | 46 | long int ret; |
46 | 47 | ||
47 | i = pte_index & 7; | 48 | i = pte_index & 7; |
@@ -93,8 +94,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
93 | pteg = get_pteg_addr(vcpu, pte_index); | 94 | pteg = get_pteg_addr(vcpu, pte_index); |
94 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 95 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
95 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 96 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
96 | pte[0] = be64_to_cpu(pte[0]); | 97 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
97 | pte[1] = be64_to_cpu(pte[1]); | 98 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
98 | 99 | ||
99 | ret = H_NOT_FOUND; | 100 | ret = H_NOT_FOUND; |
100 | if ((pte[0] & HPTE_V_VALID) == 0 || | 101 | if ((pte[0] & HPTE_V_VALID) == 0 || |
@@ -171,8 +172,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
171 | 172 | ||
172 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); | 173 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); |
173 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 174 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
174 | pte[0] = be64_to_cpu(pte[0]); | 175 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
175 | pte[1] = be64_to_cpu(pte[1]); | 176 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
176 | 177 | ||
177 | /* tsl = AVPN */ | 178 | /* tsl = AVPN */ |
178 | flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; | 179 | flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; |
@@ -211,8 +212,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
211 | pteg = get_pteg_addr(vcpu, pte_index); | 212 | pteg = get_pteg_addr(vcpu, pte_index); |
212 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 213 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
213 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 214 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
214 | pte[0] = be64_to_cpu(pte[0]); | 215 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
215 | pte[1] = be64_to_cpu(pte[1]); | 216 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
216 | 217 | ||
217 | ret = H_NOT_FOUND; | 218 | ret = H_NOT_FOUND; |
218 | if ((pte[0] & HPTE_V_VALID) == 0 || | 219 | if ((pte[0] & HPTE_V_VALID) == 0 || |
@@ -231,8 +232,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
231 | 232 | ||
232 | rb = compute_tlbie_rb(v, r, pte_index); | 233 | rb = compute_tlbie_rb(v, r, pte_index); |
233 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 234 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
234 | pte[0] = cpu_to_be64(pte[0]); | 235 | pte[0] = (__force u64)cpu_to_be64(pte[0]); |
235 | pte[1] = cpu_to_be64(pte[1]); | 236 | pte[1] = (__force u64)cpu_to_be64(pte[1]); |
236 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); | 237 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); |
237 | ret = H_SUCCESS; | 238 | ret = H_SUCCESS; |
238 | 239 | ||
@@ -266,6 +267,12 @@ static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) | |||
266 | 267 | ||
267 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | 268 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) |
268 | { | 269 | { |
270 | int rc, idx; | ||
271 | |||
272 | if (cmd <= MAX_HCALL_OPCODE && | ||
273 | !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls)) | ||
274 | return EMULATE_FAIL; | ||
275 | |||
269 | switch (cmd) { | 276 | switch (cmd) { |
270 | case H_ENTER: | 277 | case H_ENTER: |
271 | return kvmppc_h_pr_enter(vcpu); | 278 | return kvmppc_h_pr_enter(vcpu); |
@@ -294,8 +301,11 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | |||
294 | break; | 301 | break; |
295 | case H_RTAS: | 302 | case H_RTAS: |
296 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 303 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
297 | return RESUME_HOST; | 304 | break; |
298 | if (kvmppc_rtas_hcall(vcpu)) | 305 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
306 | rc = kvmppc_rtas_hcall(vcpu); | ||
307 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
308 | if (rc) | ||
299 | break; | 309 | break; |
300 | kvmppc_set_gpr(vcpu, 3, 0); | 310 | kvmppc_set_gpr(vcpu, 3, 0); |
301 | return EMULATE_DONE; | 311 | return EMULATE_DONE; |
@@ -303,3 +313,61 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | |||
303 | 313 | ||
304 | return EMULATE_FAIL; | 314 | return EMULATE_FAIL; |
305 | } | 315 | } |
316 | |||
317 | int kvmppc_hcall_impl_pr(unsigned long cmd) | ||
318 | { | ||
319 | switch (cmd) { | ||
320 | case H_ENTER: | ||
321 | case H_REMOVE: | ||
322 | case H_PROTECT: | ||
323 | case H_BULK_REMOVE: | ||
324 | case H_PUT_TCE: | ||
325 | case H_CEDE: | ||
326 | #ifdef CONFIG_KVM_XICS | ||
327 | case H_XIRR: | ||
328 | case H_CPPR: | ||
329 | case H_EOI: | ||
330 | case H_IPI: | ||
331 | case H_IPOLL: | ||
332 | case H_XIRR_X: | ||
333 | #endif | ||
334 | return 1; | ||
335 | } | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * List of hcall numbers to enable by default. | ||
341 | * For compatibility with old userspace, we enable by default | ||
342 | * all hcalls that were implemented before the hcall-enabling | ||
343 | * facility was added. Note this list should not include H_RTAS. | ||
344 | */ | ||
345 | static unsigned int default_hcall_list[] = { | ||
346 | H_ENTER, | ||
347 | H_REMOVE, | ||
348 | H_PROTECT, | ||
349 | H_BULK_REMOVE, | ||
350 | H_PUT_TCE, | ||
351 | H_CEDE, | ||
352 | #ifdef CONFIG_KVM_XICS | ||
353 | H_XIRR, | ||
354 | H_CPPR, | ||
355 | H_EOI, | ||
356 | H_IPI, | ||
357 | H_IPOLL, | ||
358 | H_XIRR_X, | ||
359 | #endif | ||
360 | 0 | ||
361 | }; | ||
362 | |||
363 | void kvmppc_pr_init_default_hcalls(struct kvm *kvm) | ||
364 | { | ||
365 | int i; | ||
366 | unsigned int hcall; | ||
367 | |||
368 | for (i = 0; default_hcall_list[i]; ++i) { | ||
369 | hcall = default_hcall_list[i]; | ||
370 | WARN_ON(!kvmppc_hcall_impl_pr(hcall)); | ||
371 | __set_bit(hcall / 4, kvm->arch.enabled_hcalls); | ||
372 | } | ||
373 | } | ||
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index ab62109fdfa3..b4c89fa6f109 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -51,7 +51,6 @@ unsigned long kvmppc_booke_handlers; | |||
51 | 51 | ||
52 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 52 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
53 | { "mmio", VCPU_STAT(mmio_exits) }, | 53 | { "mmio", VCPU_STAT(mmio_exits) }, |
54 | { "dcr", VCPU_STAT(dcr_exits) }, | ||
55 | { "sig", VCPU_STAT(signal_exits) }, | 54 | { "sig", VCPU_STAT(signal_exits) }, |
56 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, | 55 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
57 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | 56 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, |
@@ -185,24 +184,28 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, | |||
185 | set_bit(priority, &vcpu->arch.pending_exceptions); | 184 | set_bit(priority, &vcpu->arch.pending_exceptions); |
186 | } | 185 | } |
187 | 186 | ||
188 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, | 187 | void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
189 | ulong dear_flags, ulong esr_flags) | 188 | ulong dear_flags, ulong esr_flags) |
190 | { | 189 | { |
191 | vcpu->arch.queued_dear = dear_flags; | 190 | vcpu->arch.queued_dear = dear_flags; |
192 | vcpu->arch.queued_esr = esr_flags; | 191 | vcpu->arch.queued_esr = esr_flags; |
193 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 192 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); |
194 | } | 193 | } |
195 | 194 | ||
196 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | 195 | void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, |
197 | ulong dear_flags, ulong esr_flags) | 196 | ulong dear_flags, ulong esr_flags) |
198 | { | 197 | { |
199 | vcpu->arch.queued_dear = dear_flags; | 198 | vcpu->arch.queued_dear = dear_flags; |
200 | vcpu->arch.queued_esr = esr_flags; | 199 | vcpu->arch.queued_esr = esr_flags; |
201 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | 200 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); |
202 | } | 201 | } |
203 | 202 | ||
204 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | 203 | void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu) |
205 | ulong esr_flags) | 204 | { |
205 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); | ||
206 | } | ||
207 | |||
208 | void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags) | ||
206 | { | 209 | { |
207 | vcpu->arch.queued_esr = esr_flags; | 210 | vcpu->arch.queued_esr = esr_flags; |
208 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | 211 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); |
@@ -266,13 +269,8 @@ static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu) | |||
266 | 269 | ||
267 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | 270 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) |
268 | { | 271 | { |
269 | #ifdef CONFIG_KVM_BOOKE_HV | 272 | kvmppc_set_srr0(vcpu, srr0); |
270 | mtspr(SPRN_GSRR0, srr0); | 273 | kvmppc_set_srr1(vcpu, srr1); |
271 | mtspr(SPRN_GSRR1, srr1); | ||
272 | #else | ||
273 | vcpu->arch.shared->srr0 = srr0; | ||
274 | vcpu->arch.shared->srr1 = srr1; | ||
275 | #endif | ||
276 | } | 274 | } |
277 | 275 | ||
278 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | 276 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) |
@@ -297,51 +295,6 @@ static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |||
297 | vcpu->arch.mcsrr1 = srr1; | 295 | vcpu->arch.mcsrr1 = srr1; |
298 | } | 296 | } |
299 | 297 | ||
300 | static unsigned long get_guest_dear(struct kvm_vcpu *vcpu) | ||
301 | { | ||
302 | #ifdef CONFIG_KVM_BOOKE_HV | ||
303 | return mfspr(SPRN_GDEAR); | ||
304 | #else | ||
305 | return vcpu->arch.shared->dar; | ||
306 | #endif | ||
307 | } | ||
308 | |||
309 | static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear) | ||
310 | { | ||
311 | #ifdef CONFIG_KVM_BOOKE_HV | ||
312 | mtspr(SPRN_GDEAR, dear); | ||
313 | #else | ||
314 | vcpu->arch.shared->dar = dear; | ||
315 | #endif | ||
316 | } | ||
317 | |||
318 | static unsigned long get_guest_esr(struct kvm_vcpu *vcpu) | ||
319 | { | ||
320 | #ifdef CONFIG_KVM_BOOKE_HV | ||
321 | return mfspr(SPRN_GESR); | ||
322 | #else | ||
323 | return vcpu->arch.shared->esr; | ||
324 | #endif | ||
325 | } | ||
326 | |||
327 | static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) | ||
328 | { | ||
329 | #ifdef CONFIG_KVM_BOOKE_HV | ||
330 | mtspr(SPRN_GESR, esr); | ||
331 | #else | ||
332 | vcpu->arch.shared->esr = esr; | ||
333 | #endif | ||
334 | } | ||
335 | |||
336 | static unsigned long get_guest_epr(struct kvm_vcpu *vcpu) | ||
337 | { | ||
338 | #ifdef CONFIG_KVM_BOOKE_HV | ||
339 | return mfspr(SPRN_GEPR); | ||
340 | #else | ||
341 | return vcpu->arch.epr; | ||
342 | #endif | ||
343 | } | ||
344 | |||
345 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 298 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
346 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | 299 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, |
347 | unsigned int priority) | 300 | unsigned int priority) |
@@ -450,9 +403,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
450 | 403 | ||
451 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 404 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
452 | if (update_esr == true) | 405 | if (update_esr == true) |
453 | set_guest_esr(vcpu, vcpu->arch.queued_esr); | 406 | kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); |
454 | if (update_dear == true) | 407 | if (update_dear == true) |
455 | set_guest_dear(vcpu, vcpu->arch.queued_dear); | 408 | kvmppc_set_dar(vcpu, vcpu->arch.queued_dear); |
456 | if (update_epr == true) { | 409 | if (update_epr == true) { |
457 | if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) | 410 | if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) |
458 | kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); | 411 | kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); |
@@ -752,9 +705,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
752 | * they were actually modified by emulation. */ | 705 | * they were actually modified by emulation. */ |
753 | return RESUME_GUEST_NV; | 706 | return RESUME_GUEST_NV; |
754 | 707 | ||
755 | case EMULATE_DO_DCR: | 708 | case EMULATE_AGAIN: |
756 | run->exit_reason = KVM_EXIT_DCR; | 709 | return RESUME_GUEST; |
757 | return RESUME_HOST; | ||
758 | 710 | ||
759 | case EMULATE_FAIL: | 711 | case EMULATE_FAIL: |
760 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 712 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
@@ -866,6 +818,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, | |||
866 | } | 818 | } |
867 | } | 819 | } |
868 | 820 | ||
821 | static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
822 | enum emulation_result emulated, u32 last_inst) | ||
823 | { | ||
824 | switch (emulated) { | ||
825 | case EMULATE_AGAIN: | ||
826 | return RESUME_GUEST; | ||
827 | |||
828 | case EMULATE_FAIL: | ||
829 | pr_debug("%s: load instruction from guest address %lx failed\n", | ||
830 | __func__, vcpu->arch.pc); | ||
831 | /* For debugging, encode the failing instruction and | ||
832 | * report it to userspace. */ | ||
833 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
834 | run->hw.hardware_exit_reason |= last_inst; | ||
835 | kvmppc_core_queue_program(vcpu, ESR_PIL); | ||
836 | return RESUME_HOST; | ||
837 | |||
838 | default: | ||
839 | BUG(); | ||
840 | } | ||
841 | } | ||
842 | |||
869 | /** | 843 | /** |
870 | * kvmppc_handle_exit | 844 | * kvmppc_handle_exit |
871 | * | 845 | * |
@@ -877,6 +851,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
877 | int r = RESUME_HOST; | 851 | int r = RESUME_HOST; |
878 | int s; | 852 | int s; |
879 | int idx; | 853 | int idx; |
854 | u32 last_inst = KVM_INST_FETCH_FAILED; | ||
855 | enum emulation_result emulated = EMULATE_DONE; | ||
880 | 856 | ||
881 | /* update before a new last_exit_type is rewritten */ | 857 | /* update before a new last_exit_type is rewritten */ |
882 | kvmppc_update_timing_stats(vcpu); | 858 | kvmppc_update_timing_stats(vcpu); |
@@ -884,6 +860,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
884 | /* restart interrupts if they were meant for the host */ | 860 | /* restart interrupts if they were meant for the host */ |
885 | kvmppc_restart_interrupt(vcpu, exit_nr); | 861 | kvmppc_restart_interrupt(vcpu, exit_nr); |
886 | 862 | ||
863 | /* | ||
864 | * get last instruction before beeing preempted | ||
865 | * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA | ||
866 | */ | ||
867 | switch (exit_nr) { | ||
868 | case BOOKE_INTERRUPT_DATA_STORAGE: | ||
869 | case BOOKE_INTERRUPT_DTLB_MISS: | ||
870 | case BOOKE_INTERRUPT_HV_PRIV: | ||
871 | emulated = kvmppc_get_last_inst(vcpu, false, &last_inst); | ||
872 | break; | ||
873 | default: | ||
874 | break; | ||
875 | } | ||
876 | |||
887 | local_irq_enable(); | 877 | local_irq_enable(); |
888 | 878 | ||
889 | trace_kvm_exit(exit_nr, vcpu); | 879 | trace_kvm_exit(exit_nr, vcpu); |
@@ -892,6 +882,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
892 | run->exit_reason = KVM_EXIT_UNKNOWN; | 882 | run->exit_reason = KVM_EXIT_UNKNOWN; |
893 | run->ready_for_interrupt_injection = 1; | 883 | run->ready_for_interrupt_injection = 1; |
894 | 884 | ||
885 | if (emulated != EMULATE_DONE) { | ||
886 | r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst); | ||
887 | goto out; | ||
888 | } | ||
889 | |||
895 | switch (exit_nr) { | 890 | switch (exit_nr) { |
896 | case BOOKE_INTERRUPT_MACHINE_CHECK: | 891 | case BOOKE_INTERRUPT_MACHINE_CHECK: |
897 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); | 892 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); |
@@ -1181,6 +1176,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1181 | BUG(); | 1176 | BUG(); |
1182 | } | 1177 | } |
1183 | 1178 | ||
1179 | out: | ||
1184 | /* | 1180 | /* |
1185 | * To avoid clobbering exit_reason, only check for signals if we | 1181 | * To avoid clobbering exit_reason, only check for signals if we |
1186 | * aren't already exiting to userspace for some other reason. | 1182 | * aren't already exiting to userspace for some other reason. |
@@ -1265,17 +1261,17 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1265 | regs->lr = vcpu->arch.lr; | 1261 | regs->lr = vcpu->arch.lr; |
1266 | regs->xer = kvmppc_get_xer(vcpu); | 1262 | regs->xer = kvmppc_get_xer(vcpu); |
1267 | regs->msr = vcpu->arch.shared->msr; | 1263 | regs->msr = vcpu->arch.shared->msr; |
1268 | regs->srr0 = vcpu->arch.shared->srr0; | 1264 | regs->srr0 = kvmppc_get_srr0(vcpu); |
1269 | regs->srr1 = vcpu->arch.shared->srr1; | 1265 | regs->srr1 = kvmppc_get_srr1(vcpu); |
1270 | regs->pid = vcpu->arch.pid; | 1266 | regs->pid = vcpu->arch.pid; |
1271 | regs->sprg0 = vcpu->arch.shared->sprg0; | 1267 | regs->sprg0 = kvmppc_get_sprg0(vcpu); |
1272 | regs->sprg1 = vcpu->arch.shared->sprg1; | 1268 | regs->sprg1 = kvmppc_get_sprg1(vcpu); |
1273 | regs->sprg2 = vcpu->arch.shared->sprg2; | 1269 | regs->sprg2 = kvmppc_get_sprg2(vcpu); |
1274 | regs->sprg3 = vcpu->arch.shared->sprg3; | 1270 | regs->sprg3 = kvmppc_get_sprg3(vcpu); |
1275 | regs->sprg4 = vcpu->arch.shared->sprg4; | 1271 | regs->sprg4 = kvmppc_get_sprg4(vcpu); |
1276 | regs->sprg5 = vcpu->arch.shared->sprg5; | 1272 | regs->sprg5 = kvmppc_get_sprg5(vcpu); |
1277 | regs->sprg6 = vcpu->arch.shared->sprg6; | 1273 | regs->sprg6 = kvmppc_get_sprg6(vcpu); |
1278 | regs->sprg7 = vcpu->arch.shared->sprg7; | 1274 | regs->sprg7 = kvmppc_get_sprg7(vcpu); |
1279 | 1275 | ||
1280 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1276 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1281 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 1277 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
@@ -1293,17 +1289,17 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1293 | vcpu->arch.lr = regs->lr; | 1289 | vcpu->arch.lr = regs->lr; |
1294 | kvmppc_set_xer(vcpu, regs->xer); | 1290 | kvmppc_set_xer(vcpu, regs->xer); |
1295 | kvmppc_set_msr(vcpu, regs->msr); | 1291 | kvmppc_set_msr(vcpu, regs->msr); |
1296 | vcpu->arch.shared->srr0 = regs->srr0; | 1292 | kvmppc_set_srr0(vcpu, regs->srr0); |
1297 | vcpu->arch.shared->srr1 = regs->srr1; | 1293 | kvmppc_set_srr1(vcpu, regs->srr1); |
1298 | kvmppc_set_pid(vcpu, regs->pid); | 1294 | kvmppc_set_pid(vcpu, regs->pid); |
1299 | vcpu->arch.shared->sprg0 = regs->sprg0; | 1295 | kvmppc_set_sprg0(vcpu, regs->sprg0); |
1300 | vcpu->arch.shared->sprg1 = regs->sprg1; | 1296 | kvmppc_set_sprg1(vcpu, regs->sprg1); |
1301 | vcpu->arch.shared->sprg2 = regs->sprg2; | 1297 | kvmppc_set_sprg2(vcpu, regs->sprg2); |
1302 | vcpu->arch.shared->sprg3 = regs->sprg3; | 1298 | kvmppc_set_sprg3(vcpu, regs->sprg3); |
1303 | vcpu->arch.shared->sprg4 = regs->sprg4; | 1299 | kvmppc_set_sprg4(vcpu, regs->sprg4); |
1304 | vcpu->arch.shared->sprg5 = regs->sprg5; | 1300 | kvmppc_set_sprg5(vcpu, regs->sprg5); |
1305 | vcpu->arch.shared->sprg6 = regs->sprg6; | 1301 | kvmppc_set_sprg6(vcpu, regs->sprg6); |
1306 | vcpu->arch.shared->sprg7 = regs->sprg7; | 1302 | kvmppc_set_sprg7(vcpu, regs->sprg7); |
1307 | 1303 | ||
1308 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1304 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1309 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 1305 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
@@ -1321,8 +1317,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu, | |||
1321 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | 1317 | sregs->u.e.csrr0 = vcpu->arch.csrr0; |
1322 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | 1318 | sregs->u.e.csrr1 = vcpu->arch.csrr1; |
1323 | sregs->u.e.mcsr = vcpu->arch.mcsr; | 1319 | sregs->u.e.mcsr = vcpu->arch.mcsr; |
1324 | sregs->u.e.esr = get_guest_esr(vcpu); | 1320 | sregs->u.e.esr = kvmppc_get_esr(vcpu); |
1325 | sregs->u.e.dear = get_guest_dear(vcpu); | 1321 | sregs->u.e.dear = kvmppc_get_dar(vcpu); |
1326 | sregs->u.e.tsr = vcpu->arch.tsr; | 1322 | sregs->u.e.tsr = vcpu->arch.tsr; |
1327 | sregs->u.e.tcr = vcpu->arch.tcr; | 1323 | sregs->u.e.tcr = vcpu->arch.tcr; |
1328 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | 1324 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); |
@@ -1339,8 +1335,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu, | |||
1339 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | 1335 | vcpu->arch.csrr0 = sregs->u.e.csrr0; |
1340 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | 1336 | vcpu->arch.csrr1 = sregs->u.e.csrr1; |
1341 | vcpu->arch.mcsr = sregs->u.e.mcsr; | 1337 | vcpu->arch.mcsr = sregs->u.e.mcsr; |
1342 | set_guest_esr(vcpu, sregs->u.e.esr); | 1338 | kvmppc_set_esr(vcpu, sregs->u.e.esr); |
1343 | set_guest_dear(vcpu, sregs->u.e.dear); | 1339 | kvmppc_set_dar(vcpu, sregs->u.e.dear); |
1344 | vcpu->arch.vrsave = sregs->u.e.vrsave; | 1340 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
1345 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); | 1341 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
1346 | 1342 | ||
@@ -1493,7 +1489,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1493 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2); | 1489 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2); |
1494 | break; | 1490 | break; |
1495 | case KVM_REG_PPC_EPR: { | 1491 | case KVM_REG_PPC_EPR: { |
1496 | u32 epr = get_guest_epr(vcpu); | 1492 | u32 epr = kvmppc_get_epr(vcpu); |
1497 | val = get_reg_val(reg->id, epr); | 1493 | val = get_reg_val(reg->id, epr); |
1498 | break; | 1494 | break; |
1499 | } | 1495 | } |
@@ -1788,6 +1784,57 @@ void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) | |||
1788 | #endif | 1784 | #endif |
1789 | } | 1785 | } |
1790 | 1786 | ||
1787 | int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, | ||
1788 | enum xlate_readwrite xlrw, struct kvmppc_pte *pte) | ||
1789 | { | ||
1790 | int gtlb_index; | ||
1791 | gpa_t gpaddr; | ||
1792 | |||
1793 | #ifdef CONFIG_KVM_E500V2 | ||
1794 | if (!(vcpu->arch.shared->msr & MSR_PR) && | ||
1795 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | ||
1796 | pte->eaddr = eaddr; | ||
1797 | pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) | | ||
1798 | (eaddr & ~PAGE_MASK); | ||
1799 | pte->vpage = eaddr >> PAGE_SHIFT; | ||
1800 | pte->may_read = true; | ||
1801 | pte->may_write = true; | ||
1802 | pte->may_execute = true; | ||
1803 | |||
1804 | return 0; | ||
1805 | } | ||
1806 | #endif | ||
1807 | |||
1808 | /* Check the guest TLB. */ | ||
1809 | switch (xlid) { | ||
1810 | case XLATE_INST: | ||
1811 | gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); | ||
1812 | break; | ||
1813 | case XLATE_DATA: | ||
1814 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); | ||
1815 | break; | ||
1816 | default: | ||
1817 | BUG(); | ||
1818 | } | ||
1819 | |||
1820 | /* Do we have a TLB entry at all? */ | ||
1821 | if (gtlb_index < 0) | ||
1822 | return -ENOENT; | ||
1823 | |||
1824 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); | ||
1825 | |||
1826 | pte->eaddr = eaddr; | ||
1827 | pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK); | ||
1828 | pte->vpage = eaddr >> PAGE_SHIFT; | ||
1829 | |||
1830 | /* XXX read permissions from the guest TLB */ | ||
1831 | pte->may_read = true; | ||
1832 | pte->may_write = true; | ||
1833 | pte->may_execute = true; | ||
1834 | |||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1791 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 1838 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1792 | struct kvm_guest_debug *dbg) | 1839 | struct kvm_guest_debug *dbg) |
1793 | { | 1840 | { |
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index b632cd35919b..f753543c56fa 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -99,13 +99,6 @@ enum int_class { | |||
99 | 99 | ||
100 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); | 100 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); |
101 | 101 | ||
102 | extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu); | ||
103 | extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
104 | unsigned int inst, int *advance); | ||
105 | extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, | ||
106 | ulong spr_val); | ||
107 | extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, | ||
108 | ulong *spr_val); | ||
109 | extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); | 102 | extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); |
110 | extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, | 103 | extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, |
111 | struct kvm_vcpu *vcpu, | 104 | struct kvm_vcpu *vcpu, |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 27a4b2877c10..28c158881d23 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -165,16 +165,16 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
165 | * guest (PR-mode only). | 165 | * guest (PR-mode only). |
166 | */ | 166 | */ |
167 | case SPRN_SPRG4: | 167 | case SPRN_SPRG4: |
168 | vcpu->arch.shared->sprg4 = spr_val; | 168 | kvmppc_set_sprg4(vcpu, spr_val); |
169 | break; | 169 | break; |
170 | case SPRN_SPRG5: | 170 | case SPRN_SPRG5: |
171 | vcpu->arch.shared->sprg5 = spr_val; | 171 | kvmppc_set_sprg5(vcpu, spr_val); |
172 | break; | 172 | break; |
173 | case SPRN_SPRG6: | 173 | case SPRN_SPRG6: |
174 | vcpu->arch.shared->sprg6 = spr_val; | 174 | kvmppc_set_sprg6(vcpu, spr_val); |
175 | break; | 175 | break; |
176 | case SPRN_SPRG7: | 176 | case SPRN_SPRG7: |
177 | vcpu->arch.shared->sprg7 = spr_val; | 177 | kvmppc_set_sprg7(vcpu, spr_val); |
178 | break; | 178 | break; |
179 | 179 | ||
180 | case SPRN_IVPR: | 180 | case SPRN_IVPR: |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 2c6deb5ef2fe..84c308a9a371 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <asm/ppc_asm.h> | 21 | #include <asm/ppc_asm.h> |
22 | #include <asm/kvm_asm.h> | 22 | #include <asm/kvm_asm.h> |
23 | #include <asm/reg.h> | 23 | #include <asm/reg.h> |
24 | #include <asm/mmu-44x.h> | ||
25 | #include <asm/page.h> | 24 | #include <asm/page.h> |
26 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
27 | 26 | ||
@@ -424,10 +423,6 @@ lightweight_exit: | |||
424 | mtspr SPRN_PID1, r3 | 423 | mtspr SPRN_PID1, r3 |
425 | #endif | 424 | #endif |
426 | 425 | ||
427 | #ifdef CONFIG_44x | ||
428 | iccci 0, 0 /* XXX hack */ | ||
429 | #endif | ||
430 | |||
431 | /* Load some guest volatiles. */ | 426 | /* Load some guest volatiles. */ |
432 | lwz r0, VCPU_GPR(R0)(r4) | 427 | lwz r0, VCPU_GPR(R0)(r4) |
433 | lwz r2, VCPU_GPR(R2)(r4) | 428 | lwz r2, VCPU_GPR(R2)(r4) |
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index a1712b818a5f..e9fa56a911fd 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -24,12 +24,10 @@ | |||
24 | #include <asm/ppc_asm.h> | 24 | #include <asm/ppc_asm.h> |
25 | #include <asm/kvm_asm.h> | 25 | #include <asm/kvm_asm.h> |
26 | #include <asm/reg.h> | 26 | #include <asm/reg.h> |
27 | #include <asm/mmu-44x.h> | ||
28 | #include <asm/page.h> | 27 | #include <asm/page.h> |
29 | #include <asm/asm-compat.h> | 28 | #include <asm/asm-compat.h> |
30 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
31 | #include <asm/bitsperlong.h> | 30 | #include <asm/bitsperlong.h> |
32 | #include <asm/thread_info.h> | ||
33 | 31 | ||
34 | #ifdef CONFIG_64BIT | 32 | #ifdef CONFIG_64BIT |
35 | #include <asm/exception-64e.h> | 33 | #include <asm/exception-64e.h> |
@@ -122,38 +120,14 @@ | |||
122 | 1: | 120 | 1: |
123 | 121 | ||
124 | .if \flags & NEED_EMU | 122 | .if \flags & NEED_EMU |
125 | /* | ||
126 | * This assumes you have external PID support. | ||
127 | * To support a bookehv CPU without external PID, you'll | ||
128 | * need to look up the TLB entry and create a temporary mapping. | ||
129 | * | ||
130 | * FIXME: we don't currently handle if the lwepx faults. PR-mode | ||
131 | * booke doesn't handle it either. Since Linux doesn't use | ||
132 | * broadcast tlbivax anymore, the only way this should happen is | ||
133 | * if the guest maps its memory execute-but-not-read, or if we | ||
134 | * somehow take a TLB miss in the middle of this entry code and | ||
135 | * evict the relevant entry. On e500mc, all kernel lowmem is | ||
136 | * bolted into TLB1 large page mappings, and we don't use | ||
137 | * broadcast invalidates, so we should not take a TLB miss here. | ||
138 | * | ||
139 | * Later we'll need to deal with faults here. Disallowing guest | ||
140 | * mappings that are execute-but-not-read could be an option on | ||
141 | * e500mc, but not on chips with an LRAT if it is used. | ||
142 | */ | ||
143 | |||
144 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | ||
145 | PPC_STL r15, VCPU_GPR(R15)(r4) | 123 | PPC_STL r15, VCPU_GPR(R15)(r4) |
146 | PPC_STL r16, VCPU_GPR(R16)(r4) | 124 | PPC_STL r16, VCPU_GPR(R16)(r4) |
147 | PPC_STL r17, VCPU_GPR(R17)(r4) | 125 | PPC_STL r17, VCPU_GPR(R17)(r4) |
148 | PPC_STL r18, VCPU_GPR(R18)(r4) | 126 | PPC_STL r18, VCPU_GPR(R18)(r4) |
149 | PPC_STL r19, VCPU_GPR(R19)(r4) | 127 | PPC_STL r19, VCPU_GPR(R19)(r4) |
150 | mr r8, r3 | ||
151 | PPC_STL r20, VCPU_GPR(R20)(r4) | 128 | PPC_STL r20, VCPU_GPR(R20)(r4) |
152 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | ||
153 | PPC_STL r21, VCPU_GPR(R21)(r4) | 129 | PPC_STL r21, VCPU_GPR(R21)(r4) |
154 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | ||
155 | PPC_STL r22, VCPU_GPR(R22)(r4) | 130 | PPC_STL r22, VCPU_GPR(R22)(r4) |
156 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | ||
157 | PPC_STL r23, VCPU_GPR(R23)(r4) | 131 | PPC_STL r23, VCPU_GPR(R23)(r4) |
158 | PPC_STL r24, VCPU_GPR(R24)(r4) | 132 | PPC_STL r24, VCPU_GPR(R24)(r4) |
159 | PPC_STL r25, VCPU_GPR(R25)(r4) | 133 | PPC_STL r25, VCPU_GPR(R25)(r4) |
@@ -163,33 +137,15 @@ | |||
163 | PPC_STL r29, VCPU_GPR(R29)(r4) | 137 | PPC_STL r29, VCPU_GPR(R29)(r4) |
164 | PPC_STL r30, VCPU_GPR(R30)(r4) | 138 | PPC_STL r30, VCPU_GPR(R30)(r4) |
165 | PPC_STL r31, VCPU_GPR(R31)(r4) | 139 | PPC_STL r31, VCPU_GPR(R31)(r4) |
166 | mtspr SPRN_EPLC, r8 | ||
167 | |||
168 | /* disable preemption, so we are sure we hit the fixup handler */ | ||
169 | CURRENT_THREAD_INFO(r8, r1) | ||
170 | li r7, 1 | ||
171 | stw r7, TI_PREEMPT(r8) | ||
172 | |||
173 | isync | ||
174 | 140 | ||
175 | /* | 141 | /* |
176 | * In case the read goes wrong, we catch it and write an invalid value | 142 | * We don't use external PID support. lwepx faults would need to be |
177 | * in LAST_INST instead. | 143 | * handled by KVM and this implies aditional code in DO_KVM (for |
144 | * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which | ||
145 | * is too intrusive for the host. Get last instuction in | ||
146 | * kvmppc_get_last_inst(). | ||
178 | */ | 147 | */ |
179 | 1: lwepx r9, 0, r5 | 148 | li r9, KVM_INST_FETCH_FAILED |
180 | 2: | ||
181 | .section .fixup, "ax" | ||
182 | 3: li r9, KVM_INST_FETCH_FAILED | ||
183 | b 2b | ||
184 | .previous | ||
185 | .section __ex_table,"a" | ||
186 | PPC_LONG_ALIGN | ||
187 | PPC_LONG 1b,3b | ||
188 | .previous | ||
189 | |||
190 | mtspr SPRN_EPLC, r3 | ||
191 | li r7, 0 | ||
192 | stw r7, TI_PREEMPT(r8) | ||
193 | stw r9, VCPU_LAST_INST(r4) | 149 | stw r9, VCPU_LAST_INST(r4) |
194 | .endif | 150 | .endif |
195 | 151 | ||
@@ -441,6 +397,7 @@ _GLOBAL(kvmppc_resume_host) | |||
441 | #ifdef CONFIG_64BIT | 397 | #ifdef CONFIG_64BIT |
442 | PPC_LL r3, PACA_SPRG_VDSO(r13) | 398 | PPC_LL r3, PACA_SPRG_VDSO(r13) |
443 | #endif | 399 | #endif |
400 | mfspr r5, SPRN_SPRG9 | ||
444 | PPC_STD(r6, VCPU_SHARED_SPRG4, r11) | 401 | PPC_STD(r6, VCPU_SHARED_SPRG4, r11) |
445 | mfspr r8, SPRN_SPRG6 | 402 | mfspr r8, SPRN_SPRG6 |
446 | PPC_STD(r7, VCPU_SHARED_SPRG5, r11) | 403 | PPC_STD(r7, VCPU_SHARED_SPRG5, r11) |
@@ -448,6 +405,7 @@ _GLOBAL(kvmppc_resume_host) | |||
448 | #ifdef CONFIG_64BIT | 405 | #ifdef CONFIG_64BIT |
449 | mtspr SPRN_SPRG_VDSO_WRITE, r3 | 406 | mtspr SPRN_SPRG_VDSO_WRITE, r3 |
450 | #endif | 407 | #endif |
408 | PPC_STD(r5, VCPU_SPRG9, r4) | ||
451 | PPC_STD(r8, VCPU_SHARED_SPRG6, r11) | 409 | PPC_STD(r8, VCPU_SHARED_SPRG6, r11) |
452 | mfxer r3 | 410 | mfxer r3 |
453 | PPC_STD(r9, VCPU_SHARED_SPRG7, r11) | 411 | PPC_STD(r9, VCPU_SHARED_SPRG7, r11) |
@@ -682,7 +640,9 @@ lightweight_exit: | |||
682 | mtspr SPRN_SPRG5W, r6 | 640 | mtspr SPRN_SPRG5W, r6 |
683 | PPC_LD(r8, VCPU_SHARED_SPRG7, r11) | 641 | PPC_LD(r8, VCPU_SHARED_SPRG7, r11) |
684 | mtspr SPRN_SPRG6W, r7 | 642 | mtspr SPRN_SPRG6W, r7 |
643 | PPC_LD(r5, VCPU_SPRG9, r4) | ||
685 | mtspr SPRN_SPRG7W, r8 | 644 | mtspr SPRN_SPRG7W, r8 |
645 | mtspr SPRN_SPRG9, r5 | ||
686 | 646 | ||
687 | /* Load some guest volatiles. */ | 647 | /* Load some guest volatiles. */ |
688 | PPC_LL r3, VCPU_LR(r4) | 648 | PPC_LL r3, VCPU_LR(r4) |
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 002d51764143..c99c40e9182a 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -250,6 +250,14 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va | |||
250 | spr_val); | 250 | spr_val); |
251 | break; | 251 | break; |
252 | 252 | ||
253 | case SPRN_PWRMGTCR0: | ||
254 | /* | ||
255 | * Guest relies on host power management configurations | ||
256 | * Treat the request as a general store | ||
257 | */ | ||
258 | vcpu->arch.pwrmgtcr0 = spr_val; | ||
259 | break; | ||
260 | |||
253 | /* extra exceptions */ | 261 | /* extra exceptions */ |
254 | case SPRN_IVOR32: | 262 | case SPRN_IVOR32: |
255 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; | 263 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; |
@@ -368,6 +376,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v | |||
368 | *spr_val = vcpu->arch.eptcfg; | 376 | *spr_val = vcpu->arch.eptcfg; |
369 | break; | 377 | break; |
370 | 378 | ||
379 | case SPRN_PWRMGTCR0: | ||
380 | *spr_val = vcpu->arch.pwrmgtcr0; | ||
381 | break; | ||
382 | |||
371 | /* extra exceptions */ | 383 | /* extra exceptions */ |
372 | case SPRN_IVOR32: | 384 | case SPRN_IVOR32: |
373 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | 385 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 86903d3f5a03..08f14bb57897 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -107,11 +107,15 @@ static u32 get_host_mas0(unsigned long eaddr) | |||
107 | { | 107 | { |
108 | unsigned long flags; | 108 | unsigned long flags; |
109 | u32 mas0; | 109 | u32 mas0; |
110 | u32 mas4; | ||
110 | 111 | ||
111 | local_irq_save(flags); | 112 | local_irq_save(flags); |
112 | mtspr(SPRN_MAS6, 0); | 113 | mtspr(SPRN_MAS6, 0); |
114 | mas4 = mfspr(SPRN_MAS4); | ||
115 | mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK); | ||
113 | asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); | 116 | asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); |
114 | mas0 = mfspr(SPRN_MAS0); | 117 | mas0 = mfspr(SPRN_MAS0); |
118 | mtspr(SPRN_MAS4, mas4); | ||
115 | local_irq_restore(flags); | 119 | local_irq_restore(flags); |
116 | 120 | ||
117 | return mas0; | 121 | return mas0; |
@@ -607,6 +611,104 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
607 | } | 611 | } |
608 | } | 612 | } |
609 | 613 | ||
614 | #ifdef CONFIG_KVM_BOOKE_HV | ||
615 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | ||
616 | u32 *instr) | ||
617 | { | ||
618 | gva_t geaddr; | ||
619 | hpa_t addr; | ||
620 | hfn_t pfn; | ||
621 | hva_t eaddr; | ||
622 | u32 mas1, mas2, mas3; | ||
623 | u64 mas7_mas3; | ||
624 | struct page *page; | ||
625 | unsigned int addr_space, psize_shift; | ||
626 | bool pr; | ||
627 | unsigned long flags; | ||
628 | |||
629 | /* Search TLB for guest pc to get the real address */ | ||
630 | geaddr = kvmppc_get_pc(vcpu); | ||
631 | |||
632 | addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; | ||
633 | |||
634 | local_irq_save(flags); | ||
635 | mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); | ||
636 | mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid); | ||
637 | asm volatile("tlbsx 0, %[geaddr]\n" : : | ||
638 | [geaddr] "r" (geaddr)); | ||
639 | mtspr(SPRN_MAS5, 0); | ||
640 | mtspr(SPRN_MAS8, 0); | ||
641 | mas1 = mfspr(SPRN_MAS1); | ||
642 | mas2 = mfspr(SPRN_MAS2); | ||
643 | mas3 = mfspr(SPRN_MAS3); | ||
644 | #ifdef CONFIG_64BIT | ||
645 | mas7_mas3 = mfspr(SPRN_MAS7_MAS3); | ||
646 | #else | ||
647 | mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3; | ||
648 | #endif | ||
649 | local_irq_restore(flags); | ||
650 | |||
651 | /* | ||
652 | * If the TLB entry for guest pc was evicted, return to the guest. | ||
653 | * There are high chances to find a valid TLB entry next time. | ||
654 | */ | ||
655 | if (!(mas1 & MAS1_VALID)) | ||
656 | return EMULATE_AGAIN; | ||
657 | |||
658 | /* | ||
659 | * Another thread may rewrite the TLB entry in parallel, don't | ||
660 | * execute from the address if the execute permission is not set | ||
661 | */ | ||
662 | pr = vcpu->arch.shared->msr & MSR_PR; | ||
663 | if (unlikely((pr && !(mas3 & MAS3_UX)) || | ||
664 | (!pr && !(mas3 & MAS3_SX)))) { | ||
665 | pr_err_ratelimited( | ||
666 | "%s: Instuction emulation from guest addres %08lx without execute permission\n", | ||
667 | __func__, geaddr); | ||
668 | return EMULATE_AGAIN; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * The real address will be mapped by a cacheable, memory coherent, | ||
673 | * write-back page. Check for mismatches when LRAT is used. | ||
674 | */ | ||
675 | if (has_feature(vcpu, VCPU_FTR_MMU_V2) && | ||
676 | unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { | ||
677 | pr_err_ratelimited( | ||
678 | "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n", | ||
679 | __func__, geaddr); | ||
680 | return EMULATE_AGAIN; | ||
681 | } | ||
682 | |||
683 | /* Get pfn */ | ||
684 | psize_shift = MAS1_GET_TSIZE(mas1) + 10; | ||
685 | addr = (mas7_mas3 & (~0ULL << psize_shift)) | | ||
686 | (geaddr & ((1ULL << psize_shift) - 1ULL)); | ||
687 | pfn = addr >> PAGE_SHIFT; | ||
688 | |||
689 | /* Guard against emulation from devices area */ | ||
690 | if (unlikely(!page_is_ram(pfn))) { | ||
691 | pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n", | ||
692 | __func__, addr); | ||
693 | return EMULATE_AGAIN; | ||
694 | } | ||
695 | |||
696 | /* Map a page and get guest's instruction */ | ||
697 | page = pfn_to_page(pfn); | ||
698 | eaddr = (unsigned long)kmap_atomic(page); | ||
699 | *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); | ||
700 | kunmap_atomic((u32 *)eaddr); | ||
701 | |||
702 | return EMULATE_DONE; | ||
703 | } | ||
704 | #else | ||
705 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | ||
706 | u32 *instr) | ||
707 | { | ||
708 | return EMULATE_AGAIN; | ||
709 | } | ||
710 | #endif | ||
711 | |||
610 | /************* MMU Notifiers *************/ | 712 | /************* MMU Notifiers *************/ |
611 | 713 | ||
612 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 714 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 17e456279224..164bad2a19bf 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | |||
110 | { | 110 | { |
111 | } | 111 | } |
112 | 112 | ||
113 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); | 113 | static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); |
114 | 114 | ||
115 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | 115 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) |
116 | { | 116 | { |
@@ -141,9 +141,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | |||
141 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | 141 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
142 | 142 | ||
143 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || | 143 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
144 | __get_cpu_var(last_vcpu_on_cpu) != vcpu) { | 144 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) { |
145 | kvmppc_e500_tlbil_all(vcpu_e500); | 145 | kvmppc_e500_tlbil_all(vcpu_e500); |
146 | __get_cpu_var(last_vcpu_on_cpu) = vcpu; | 146 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu; |
147 | } | 147 | } |
148 | 148 | ||
149 | kvmppc_load_guest_fp(vcpu); | 149 | kvmppc_load_guest_fp(vcpu); |
@@ -267,14 +267,32 @@ static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu, | |||
267 | static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, | 267 | static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
268 | union kvmppc_one_reg *val) | 268 | union kvmppc_one_reg *val) |
269 | { | 269 | { |
270 | int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); | 270 | int r = 0; |
271 | |||
272 | switch (id) { | ||
273 | case KVM_REG_PPC_SPRG9: | ||
274 | *val = get_reg_val(id, vcpu->arch.sprg9); | ||
275 | break; | ||
276 | default: | ||
277 | r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); | ||
278 | } | ||
279 | |||
271 | return r; | 280 | return r; |
272 | } | 281 | } |
273 | 282 | ||
274 | static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, | 283 | static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
275 | union kvmppc_one_reg *val) | 284 | union kvmppc_one_reg *val) |
276 | { | 285 | { |
277 | int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); | 286 | int r = 0; |
287 | |||
288 | switch (id) { | ||
289 | case KVM_REG_PPC_SPRG9: | ||
290 | vcpu->arch.sprg9 = set_reg_val(id, *val); | ||
291 | break; | ||
292 | default: | ||
293 | r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); | ||
294 | } | ||
295 | |||
278 | return r; | 296 | return r; |
279 | } | 297 | } |
280 | 298 | ||
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index da86d9ba3476..e96b50d0bdab 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -207,36 +207,28 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
207 | return emulated; | 207 | return emulated; |
208 | } | 208 | } |
209 | 209 | ||
210 | /* XXX to do: | ||
211 | * lhax | ||
212 | * lhaux | ||
213 | * lswx | ||
214 | * lswi | ||
215 | * stswx | ||
216 | * stswi | ||
217 | * lha | ||
218 | * lhau | ||
219 | * lmw | ||
220 | * stmw | ||
221 | * | ||
222 | */ | ||
223 | /* XXX Should probably auto-generate instruction decoding for a particular core | 210 | /* XXX Should probably auto-generate instruction decoding for a particular core |
224 | * from opcode tables in the future. */ | 211 | * from opcode tables in the future. */ |
225 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 212 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
226 | { | 213 | { |
227 | u32 inst = kvmppc_get_last_inst(vcpu); | 214 | u32 inst; |
228 | int ra = get_ra(inst); | 215 | int rs, rt, sprn; |
229 | int rs = get_rs(inst); | 216 | enum emulation_result emulated; |
230 | int rt = get_rt(inst); | ||
231 | int sprn = get_sprn(inst); | ||
232 | enum emulation_result emulated = EMULATE_DONE; | ||
233 | int advance = 1; | 217 | int advance = 1; |
234 | 218 | ||
235 | /* this default type might be overwritten by subcategories */ | 219 | /* this default type might be overwritten by subcategories */ |
236 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 220 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
237 | 221 | ||
222 | emulated = kvmppc_get_last_inst(vcpu, false, &inst); | ||
223 | if (emulated != EMULATE_DONE) | ||
224 | return emulated; | ||
225 | |||
238 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 226 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
239 | 227 | ||
228 | rs = get_rs(inst); | ||
229 | rt = get_rt(inst); | ||
230 | sprn = get_sprn(inst); | ||
231 | |||
240 | switch (get_op(inst)) { | 232 | switch (get_op(inst)) { |
241 | case OP_TRAP: | 233 | case OP_TRAP: |
242 | #ifdef CONFIG_PPC_BOOK3S | 234 | #ifdef CONFIG_PPC_BOOK3S |
@@ -264,200 +256,24 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
264 | #endif | 256 | #endif |
265 | advance = 0; | 257 | advance = 0; |
266 | break; | 258 | break; |
267 | case OP_31_XOP_LWZX: | ||
268 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
269 | break; | ||
270 | |||
271 | case OP_31_XOP_LBZX: | ||
272 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
273 | break; | ||
274 | |||
275 | case OP_31_XOP_LBZUX: | ||
276 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
277 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
278 | break; | ||
279 | |||
280 | case OP_31_XOP_STWX: | ||
281 | emulated = kvmppc_handle_store(run, vcpu, | ||
282 | kvmppc_get_gpr(vcpu, rs), | ||
283 | 4, 1); | ||
284 | break; | ||
285 | |||
286 | case OP_31_XOP_STBX: | ||
287 | emulated = kvmppc_handle_store(run, vcpu, | ||
288 | kvmppc_get_gpr(vcpu, rs), | ||
289 | 1, 1); | ||
290 | break; | ||
291 | |||
292 | case OP_31_XOP_STBUX: | ||
293 | emulated = kvmppc_handle_store(run, vcpu, | ||
294 | kvmppc_get_gpr(vcpu, rs), | ||
295 | 1, 1); | ||
296 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
297 | break; | ||
298 | |||
299 | case OP_31_XOP_LHAX: | ||
300 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
301 | break; | ||
302 | |||
303 | case OP_31_XOP_LHZX: | ||
304 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
305 | break; | ||
306 | |||
307 | case OP_31_XOP_LHZUX: | ||
308 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
309 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
310 | break; | ||
311 | 259 | ||
312 | case OP_31_XOP_MFSPR: | 260 | case OP_31_XOP_MFSPR: |
313 | emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); | 261 | emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); |
314 | break; | 262 | break; |
315 | 263 | ||
316 | case OP_31_XOP_STHX: | ||
317 | emulated = kvmppc_handle_store(run, vcpu, | ||
318 | kvmppc_get_gpr(vcpu, rs), | ||
319 | 2, 1); | ||
320 | break; | ||
321 | |||
322 | case OP_31_XOP_STHUX: | ||
323 | emulated = kvmppc_handle_store(run, vcpu, | ||
324 | kvmppc_get_gpr(vcpu, rs), | ||
325 | 2, 1); | ||
326 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
327 | break; | ||
328 | |||
329 | case OP_31_XOP_MTSPR: | 264 | case OP_31_XOP_MTSPR: |
330 | emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); | 265 | emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); |
331 | break; | 266 | break; |
332 | 267 | ||
333 | case OP_31_XOP_DCBST: | ||
334 | case OP_31_XOP_DCBF: | ||
335 | case OP_31_XOP_DCBI: | ||
336 | /* Do nothing. The guest is performing dcbi because | ||
337 | * hardware DMA is not snooped by the dcache, but | ||
338 | * emulated DMA either goes through the dcache as | ||
339 | * normal writes, or the host kernel has handled dcache | ||
340 | * coherence. */ | ||
341 | break; | ||
342 | |||
343 | case OP_31_XOP_LWBRX: | ||
344 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | ||
345 | break; | ||
346 | |||
347 | case OP_31_XOP_TLBSYNC: | 268 | case OP_31_XOP_TLBSYNC: |
348 | break; | 269 | break; |
349 | 270 | ||
350 | case OP_31_XOP_STWBRX: | ||
351 | emulated = kvmppc_handle_store(run, vcpu, | ||
352 | kvmppc_get_gpr(vcpu, rs), | ||
353 | 4, 0); | ||
354 | break; | ||
355 | |||
356 | case OP_31_XOP_LHBRX: | ||
357 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | ||
358 | break; | ||
359 | |||
360 | case OP_31_XOP_STHBRX: | ||
361 | emulated = kvmppc_handle_store(run, vcpu, | ||
362 | kvmppc_get_gpr(vcpu, rs), | ||
363 | 2, 0); | ||
364 | break; | ||
365 | |||
366 | default: | 271 | default: |
367 | /* Attempt core-specific emulation below. */ | 272 | /* Attempt core-specific emulation below. */ |
368 | emulated = EMULATE_FAIL; | 273 | emulated = EMULATE_FAIL; |
369 | } | 274 | } |
370 | break; | 275 | break; |
371 | 276 | ||
372 | case OP_LWZ: | ||
373 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
374 | break; | ||
375 | |||
376 | /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ | ||
377 | case OP_LD: | ||
378 | rt = get_rt(inst); | ||
379 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
380 | break; | ||
381 | |||
382 | case OP_LWZU: | ||
383 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
384 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
385 | break; | ||
386 | |||
387 | case OP_LBZ: | ||
388 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
389 | break; | ||
390 | |||
391 | case OP_LBZU: | ||
392 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
393 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
394 | break; | ||
395 | |||
396 | case OP_STW: | ||
397 | emulated = kvmppc_handle_store(run, vcpu, | ||
398 | kvmppc_get_gpr(vcpu, rs), | ||
399 | 4, 1); | ||
400 | break; | ||
401 | |||
402 | /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ | ||
403 | case OP_STD: | ||
404 | rs = get_rs(inst); | ||
405 | emulated = kvmppc_handle_store(run, vcpu, | ||
406 | kvmppc_get_gpr(vcpu, rs), | ||
407 | 8, 1); | ||
408 | break; | ||
409 | |||
410 | case OP_STWU: | ||
411 | emulated = kvmppc_handle_store(run, vcpu, | ||
412 | kvmppc_get_gpr(vcpu, rs), | ||
413 | 4, 1); | ||
414 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
415 | break; | ||
416 | |||
417 | case OP_STB: | ||
418 | emulated = kvmppc_handle_store(run, vcpu, | ||
419 | kvmppc_get_gpr(vcpu, rs), | ||
420 | 1, 1); | ||
421 | break; | ||
422 | |||
423 | case OP_STBU: | ||
424 | emulated = kvmppc_handle_store(run, vcpu, | ||
425 | kvmppc_get_gpr(vcpu, rs), | ||
426 | 1, 1); | ||
427 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
428 | break; | ||
429 | |||
430 | case OP_LHZ: | ||
431 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
432 | break; | ||
433 | |||
434 | case OP_LHZU: | ||
435 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
436 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
437 | break; | ||
438 | |||
439 | case OP_LHA: | ||
440 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
441 | break; | ||
442 | |||
443 | case OP_LHAU: | ||
444 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
445 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
446 | break; | ||
447 | |||
448 | case OP_STH: | ||
449 | emulated = kvmppc_handle_store(run, vcpu, | ||
450 | kvmppc_get_gpr(vcpu, rs), | ||
451 | 2, 1); | ||
452 | break; | ||
453 | |||
454 | case OP_STHU: | ||
455 | emulated = kvmppc_handle_store(run, vcpu, | ||
456 | kvmppc_get_gpr(vcpu, rs), | ||
457 | 2, 1); | ||
458 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
459 | break; | ||
460 | |||
461 | default: | 277 | default: |
462 | emulated = EMULATE_FAIL; | 278 | emulated = EMULATE_FAIL; |
463 | } | 279 | } |
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c new file mode 100644 index 000000000000..0de4ffa175a9 --- /dev/null +++ b/arch/powerpc/kvm/emulate_loadstore.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * Copyright 2011 Freescale Semiconductor, Inc. | ||
17 | * | ||
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/jiffies.h> | ||
22 | #include <linux/hrtimer.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/kvm_host.h> | ||
26 | #include <linux/clockchips.h> | ||
27 | |||
28 | #include <asm/reg.h> | ||
29 | #include <asm/time.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/kvm_ppc.h> | ||
32 | #include <asm/disassemble.h> | ||
33 | #include <asm/ppc-opcode.h> | ||
34 | #include "timing.h" | ||
35 | #include "trace.h" | ||
36 | |||
37 | /* XXX to do: | ||
38 | * lhax | ||
39 | * lhaux | ||
40 | * lswx | ||
41 | * lswi | ||
42 | * stswx | ||
43 | * stswi | ||
44 | * lha | ||
45 | * lhau | ||
46 | * lmw | ||
47 | * stmw | ||
48 | * | ||
49 | */ | ||
50 | int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) | ||
51 | { | ||
52 | struct kvm_run *run = vcpu->run; | ||
53 | u32 inst; | ||
54 | int ra, rs, rt; | ||
55 | enum emulation_result emulated; | ||
56 | int advance = 1; | ||
57 | |||
58 | /* this default type might be overwritten by subcategories */ | ||
59 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | ||
60 | |||
61 | emulated = kvmppc_get_last_inst(vcpu, false, &inst); | ||
62 | if (emulated != EMULATE_DONE) | ||
63 | return emulated; | ||
64 | |||
65 | ra = get_ra(inst); | ||
66 | rs = get_rs(inst); | ||
67 | rt = get_rt(inst); | ||
68 | |||
69 | switch (get_op(inst)) { | ||
70 | case 31: | ||
71 | switch (get_xop(inst)) { | ||
72 | case OP_31_XOP_LWZX: | ||
73 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
74 | break; | ||
75 | |||
76 | case OP_31_XOP_LBZX: | ||
77 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
78 | break; | ||
79 | |||
80 | case OP_31_XOP_LBZUX: | ||
81 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
82 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
83 | break; | ||
84 | |||
85 | case OP_31_XOP_STWX: | ||
86 | emulated = kvmppc_handle_store(run, vcpu, | ||
87 | kvmppc_get_gpr(vcpu, rs), | ||
88 | 4, 1); | ||
89 | break; | ||
90 | |||
91 | case OP_31_XOP_STBX: | ||
92 | emulated = kvmppc_handle_store(run, vcpu, | ||
93 | kvmppc_get_gpr(vcpu, rs), | ||
94 | 1, 1); | ||
95 | break; | ||
96 | |||
97 | case OP_31_XOP_STBUX: | ||
98 | emulated = kvmppc_handle_store(run, vcpu, | ||
99 | kvmppc_get_gpr(vcpu, rs), | ||
100 | 1, 1); | ||
101 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
102 | break; | ||
103 | |||
104 | case OP_31_XOP_LHAX: | ||
105 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
106 | break; | ||
107 | |||
108 | case OP_31_XOP_LHZX: | ||
109 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
110 | break; | ||
111 | |||
112 | case OP_31_XOP_LHZUX: | ||
113 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
114 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
115 | break; | ||
116 | |||
117 | case OP_31_XOP_STHX: | ||
118 | emulated = kvmppc_handle_store(run, vcpu, | ||
119 | kvmppc_get_gpr(vcpu, rs), | ||
120 | 2, 1); | ||
121 | break; | ||
122 | |||
123 | case OP_31_XOP_STHUX: | ||
124 | emulated = kvmppc_handle_store(run, vcpu, | ||
125 | kvmppc_get_gpr(vcpu, rs), | ||
126 | 2, 1); | ||
127 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
128 | break; | ||
129 | |||
130 | case OP_31_XOP_DCBST: | ||
131 | case OP_31_XOP_DCBF: | ||
132 | case OP_31_XOP_DCBI: | ||
133 | /* Do nothing. The guest is performing dcbi because | ||
134 | * hardware DMA is not snooped by the dcache, but | ||
135 | * emulated DMA either goes through the dcache as | ||
136 | * normal writes, or the host kernel has handled dcache | ||
137 | * coherence. */ | ||
138 | break; | ||
139 | |||
140 | case OP_31_XOP_LWBRX: | ||
141 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | ||
142 | break; | ||
143 | |||
144 | case OP_31_XOP_STWBRX: | ||
145 | emulated = kvmppc_handle_store(run, vcpu, | ||
146 | kvmppc_get_gpr(vcpu, rs), | ||
147 | 4, 0); | ||
148 | break; | ||
149 | |||
150 | case OP_31_XOP_LHBRX: | ||
151 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | ||
152 | break; | ||
153 | |||
154 | case OP_31_XOP_STHBRX: | ||
155 | emulated = kvmppc_handle_store(run, vcpu, | ||
156 | kvmppc_get_gpr(vcpu, rs), | ||
157 | 2, 0); | ||
158 | break; | ||
159 | |||
160 | default: | ||
161 | emulated = EMULATE_FAIL; | ||
162 | break; | ||
163 | } | ||
164 | break; | ||
165 | |||
166 | case OP_LWZ: | ||
167 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
168 | break; | ||
169 | |||
170 | /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ | ||
171 | case OP_LD: | ||
172 | rt = get_rt(inst); | ||
173 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
174 | break; | ||
175 | |||
176 | case OP_LWZU: | ||
177 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
178 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
179 | break; | ||
180 | |||
181 | case OP_LBZ: | ||
182 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
183 | break; | ||
184 | |||
185 | case OP_LBZU: | ||
186 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
187 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
188 | break; | ||
189 | |||
190 | case OP_STW: | ||
191 | emulated = kvmppc_handle_store(run, vcpu, | ||
192 | kvmppc_get_gpr(vcpu, rs), | ||
193 | 4, 1); | ||
194 | break; | ||
195 | |||
196 | /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ | ||
197 | case OP_STD: | ||
198 | rs = get_rs(inst); | ||
199 | emulated = kvmppc_handle_store(run, vcpu, | ||
200 | kvmppc_get_gpr(vcpu, rs), | ||
201 | 8, 1); | ||
202 | break; | ||
203 | |||
204 | case OP_STWU: | ||
205 | emulated = kvmppc_handle_store(run, vcpu, | ||
206 | kvmppc_get_gpr(vcpu, rs), | ||
207 | 4, 1); | ||
208 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
209 | break; | ||
210 | |||
211 | case OP_STB: | ||
212 | emulated = kvmppc_handle_store(run, vcpu, | ||
213 | kvmppc_get_gpr(vcpu, rs), | ||
214 | 1, 1); | ||
215 | break; | ||
216 | |||
217 | case OP_STBU: | ||
218 | emulated = kvmppc_handle_store(run, vcpu, | ||
219 | kvmppc_get_gpr(vcpu, rs), | ||
220 | 1, 1); | ||
221 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
222 | break; | ||
223 | |||
224 | case OP_LHZ: | ||
225 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
226 | break; | ||
227 | |||
228 | case OP_LHZU: | ||
229 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
230 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
231 | break; | ||
232 | |||
233 | case OP_LHA: | ||
234 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
235 | break; | ||
236 | |||
237 | case OP_LHAU: | ||
238 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
239 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
240 | break; | ||
241 | |||
242 | case OP_STH: | ||
243 | emulated = kvmppc_handle_store(run, vcpu, | ||
244 | kvmppc_get_gpr(vcpu, rs), | ||
245 | 2, 1); | ||
246 | break; | ||
247 | |||
248 | case OP_STHU: | ||
249 | emulated = kvmppc_handle_store(run, vcpu, | ||
250 | kvmppc_get_gpr(vcpu, rs), | ||
251 | 2, 1); | ||
252 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
253 | break; | ||
254 | |||
255 | default: | ||
256 | emulated = EMULATE_FAIL; | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | if (emulated == EMULATE_FAIL) { | ||
261 | advance = 0; | ||
262 | kvmppc_core_queue_program(vcpu, 0); | ||
263 | } | ||
264 | |||
265 | trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); | ||
266 | |||
267 | /* Advance past emulated instruction. */ | ||
268 | if (advance) | ||
269 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); | ||
270 | |||
271 | return emulated; | ||
272 | } | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 61c738ab1283..288b4bb05cbd 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -190,6 +190,25 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
190 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; | 190 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; |
191 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; | 191 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; |
192 | 192 | ||
193 | #ifdef CONFIG_PPC_64K_PAGES | ||
194 | /* | ||
195 | * Make sure our 4k magic page is in the same window of a 64k | ||
196 | * page within the guest and within the host's page. | ||
197 | */ | ||
198 | if ((vcpu->arch.magic_page_pa & 0xf000) != | ||
199 | ((ulong)vcpu->arch.shared & 0xf000)) { | ||
200 | void *old_shared = vcpu->arch.shared; | ||
201 | ulong shared = (ulong)vcpu->arch.shared; | ||
202 | void *new_shared; | ||
203 | |||
204 | shared &= PAGE_MASK; | ||
205 | shared |= vcpu->arch.magic_page_pa & 0xf000; | ||
206 | new_shared = (void*)shared; | ||
207 | memcpy(new_shared, old_shared, 0x1000); | ||
208 | vcpu->arch.shared = new_shared; | ||
209 | } | ||
210 | #endif | ||
211 | |||
193 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; | 212 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
194 | 213 | ||
195 | r = EV_SUCCESS; | 214 | r = EV_SUCCESS; |
@@ -198,7 +217,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
198 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): | 217 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
199 | r = EV_SUCCESS; | 218 | r = EV_SUCCESS; |
200 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) | 219 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
201 | /* XXX Missing magic page on 44x */ | ||
202 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | 220 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
203 | #endif | 221 | #endif |
204 | 222 | ||
@@ -254,13 +272,16 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
254 | enum emulation_result er; | 272 | enum emulation_result er; |
255 | int r; | 273 | int r; |
256 | 274 | ||
257 | er = kvmppc_emulate_instruction(run, vcpu); | 275 | er = kvmppc_emulate_loadstore(vcpu); |
258 | switch (er) { | 276 | switch (er) { |
259 | case EMULATE_DONE: | 277 | case EMULATE_DONE: |
260 | /* Future optimization: only reload non-volatiles if they were | 278 | /* Future optimization: only reload non-volatiles if they were |
261 | * actually modified. */ | 279 | * actually modified. */ |
262 | r = RESUME_GUEST_NV; | 280 | r = RESUME_GUEST_NV; |
263 | break; | 281 | break; |
282 | case EMULATE_AGAIN: | ||
283 | r = RESUME_GUEST; | ||
284 | break; | ||
264 | case EMULATE_DO_MMIO: | 285 | case EMULATE_DO_MMIO: |
265 | run->exit_reason = KVM_EXIT_MMIO; | 286 | run->exit_reason = KVM_EXIT_MMIO; |
266 | /* We must reload nonvolatiles because "update" load/store | 287 | /* We must reload nonvolatiles because "update" load/store |
@@ -270,11 +291,15 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
270 | r = RESUME_HOST_NV; | 291 | r = RESUME_HOST_NV; |
271 | break; | 292 | break; |
272 | case EMULATE_FAIL: | 293 | case EMULATE_FAIL: |
294 | { | ||
295 | u32 last_inst; | ||
296 | |||
297 | kvmppc_get_last_inst(vcpu, false, &last_inst); | ||
273 | /* XXX Deliver Program interrupt to guest. */ | 298 | /* XXX Deliver Program interrupt to guest. */ |
274 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | 299 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
275 | kvmppc_get_last_inst(vcpu)); | ||
276 | r = RESUME_HOST; | 300 | r = RESUME_HOST; |
277 | break; | 301 | break; |
302 | } | ||
278 | default: | 303 | default: |
279 | WARN_ON(1); | 304 | WARN_ON(1); |
280 | r = RESUME_GUEST; | 305 | r = RESUME_GUEST; |
@@ -284,6 +309,81 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
284 | } | 309 | } |
285 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); | 310 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
286 | 311 | ||
312 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | ||
313 | bool data) | ||
314 | { | ||
315 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; | ||
316 | struct kvmppc_pte pte; | ||
317 | int r; | ||
318 | |||
319 | vcpu->stat.st++; | ||
320 | |||
321 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | ||
322 | XLATE_WRITE, &pte); | ||
323 | if (r < 0) | ||
324 | return r; | ||
325 | |||
326 | *eaddr = pte.raddr; | ||
327 | |||
328 | if (!pte.may_write) | ||
329 | return -EPERM; | ||
330 | |||
331 | /* Magic page override */ | ||
332 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | ||
333 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | ||
334 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | ||
335 | void *magic = vcpu->arch.shared; | ||
336 | magic += pte.eaddr & 0xfff; | ||
337 | memcpy(magic, ptr, size); | ||
338 | return EMULATE_DONE; | ||
339 | } | ||
340 | |||
341 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) | ||
342 | return EMULATE_DO_MMIO; | ||
343 | |||
344 | return EMULATE_DONE; | ||
345 | } | ||
346 | EXPORT_SYMBOL_GPL(kvmppc_st); | ||
347 | |||
348 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | ||
349 | bool data) | ||
350 | { | ||
351 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; | ||
352 | struct kvmppc_pte pte; | ||
353 | int rc; | ||
354 | |||
355 | vcpu->stat.ld++; | ||
356 | |||
357 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | ||
358 | XLATE_READ, &pte); | ||
359 | if (rc) | ||
360 | return rc; | ||
361 | |||
362 | *eaddr = pte.raddr; | ||
363 | |||
364 | if (!pte.may_read) | ||
365 | return -EPERM; | ||
366 | |||
367 | if (!data && !pte.may_execute) | ||
368 | return -ENOEXEC; | ||
369 | |||
370 | /* Magic page override */ | ||
371 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | ||
372 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | ||
373 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | ||
374 | void *magic = vcpu->arch.shared; | ||
375 | magic += pte.eaddr & 0xfff; | ||
376 | memcpy(ptr, magic, size); | ||
377 | return EMULATE_DONE; | ||
378 | } | ||
379 | |||
380 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) | ||
381 | return EMULATE_DO_MMIO; | ||
382 | |||
383 | return EMULATE_DONE; | ||
384 | } | ||
385 | EXPORT_SYMBOL_GPL(kvmppc_ld); | ||
386 | |||
287 | int kvm_arch_hardware_enable(void *garbage) | 387 | int kvm_arch_hardware_enable(void *garbage) |
288 | { | 388 | { |
289 | return 0; | 389 | return 0; |
@@ -366,14 +466,20 @@ void kvm_arch_sync_events(struct kvm *kvm) | |||
366 | { | 466 | { |
367 | } | 467 | } |
368 | 468 | ||
369 | int kvm_dev_ioctl_check_extension(long ext) | 469 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
370 | { | 470 | { |
371 | int r; | 471 | int r; |
372 | /* FIXME!! | 472 | /* Assume we're using HV mode when the HV module is loaded */ |
373 | * Should some of this be vm ioctl ? is it possible now ? | ||
374 | */ | ||
375 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; | 473 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
376 | 474 | ||
475 | if (kvm) { | ||
476 | /* | ||
477 | * Hooray - we know which VM type we're running on. Depend on | ||
478 | * that rather than the guess above. | ||
479 | */ | ||
480 | hv_enabled = is_kvmppc_hv_enabled(kvm); | ||
481 | } | ||
482 | |||
377 | switch (ext) { | 483 | switch (ext) { |
378 | #ifdef CONFIG_BOOKE | 484 | #ifdef CONFIG_BOOKE |
379 | case KVM_CAP_PPC_BOOKE_SREGS: | 485 | case KVM_CAP_PPC_BOOKE_SREGS: |
@@ -387,6 +493,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
387 | case KVM_CAP_PPC_UNSET_IRQ: | 493 | case KVM_CAP_PPC_UNSET_IRQ: |
388 | case KVM_CAP_PPC_IRQ_LEVEL: | 494 | case KVM_CAP_PPC_IRQ_LEVEL: |
389 | case KVM_CAP_ENABLE_CAP: | 495 | case KVM_CAP_ENABLE_CAP: |
496 | case KVM_CAP_ENABLE_CAP_VM: | ||
390 | case KVM_CAP_ONE_REG: | 497 | case KVM_CAP_ONE_REG: |
391 | case KVM_CAP_IOEVENTFD: | 498 | case KVM_CAP_IOEVENTFD: |
392 | case KVM_CAP_DEVICE_CTRL: | 499 | case KVM_CAP_DEVICE_CTRL: |
@@ -417,6 +524,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
417 | case KVM_CAP_PPC_ALLOC_HTAB: | 524 | case KVM_CAP_PPC_ALLOC_HTAB: |
418 | case KVM_CAP_PPC_RTAS: | 525 | case KVM_CAP_PPC_RTAS: |
419 | case KVM_CAP_PPC_FIXUP_HCALL: | 526 | case KVM_CAP_PPC_FIXUP_HCALL: |
527 | case KVM_CAP_PPC_ENABLE_HCALL: | ||
420 | #ifdef CONFIG_KVM_XICS | 528 | #ifdef CONFIG_KVM_XICS |
421 | case KVM_CAP_IRQ_XICS: | 529 | case KVM_CAP_IRQ_XICS: |
422 | #endif | 530 | #endif |
@@ -635,12 +743,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
635 | #endif | 743 | #endif |
636 | } | 744 | } |
637 | 745 | ||
638 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | ||
639 | struct kvm_run *run) | ||
640 | { | ||
641 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); | ||
642 | } | ||
643 | |||
644 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | 746 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
645 | struct kvm_run *run) | 747 | struct kvm_run *run) |
646 | { | 748 | { |
@@ -837,10 +939,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
837 | if (!vcpu->mmio_is_write) | 939 | if (!vcpu->mmio_is_write) |
838 | kvmppc_complete_mmio_load(vcpu, run); | 940 | kvmppc_complete_mmio_load(vcpu, run); |
839 | vcpu->mmio_needed = 0; | 941 | vcpu->mmio_needed = 0; |
840 | } else if (vcpu->arch.dcr_needed) { | ||
841 | if (!vcpu->arch.dcr_is_write) | ||
842 | kvmppc_complete_dcr_load(vcpu, run); | ||
843 | vcpu->arch.dcr_needed = 0; | ||
844 | } else if (vcpu->arch.osi_needed) { | 942 | } else if (vcpu->arch.osi_needed) { |
845 | u64 *gprs = run->osi.gprs; | 943 | u64 *gprs = run->osi.gprs; |
846 | int i; | 944 | int i; |
@@ -1099,6 +1197,42 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, | |||
1099 | return 0; | 1197 | return 0; |
1100 | } | 1198 | } |
1101 | 1199 | ||
1200 | |||
1201 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, | ||
1202 | struct kvm_enable_cap *cap) | ||
1203 | { | ||
1204 | int r; | ||
1205 | |||
1206 | if (cap->flags) | ||
1207 | return -EINVAL; | ||
1208 | |||
1209 | switch (cap->cap) { | ||
1210 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
1211 | case KVM_CAP_PPC_ENABLE_HCALL: { | ||
1212 | unsigned long hcall = cap->args[0]; | ||
1213 | |||
1214 | r = -EINVAL; | ||
1215 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || | ||
1216 | cap->args[1] > 1) | ||
1217 | break; | ||
1218 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) | ||
1219 | break; | ||
1220 | if (cap->args[1]) | ||
1221 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); | ||
1222 | else | ||
1223 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); | ||
1224 | r = 0; | ||
1225 | break; | ||
1226 | } | ||
1227 | #endif | ||
1228 | default: | ||
1229 | r = -EINVAL; | ||
1230 | break; | ||
1231 | } | ||
1232 | |||
1233 | return r; | ||
1234 | } | ||
1235 | |||
1102 | long kvm_arch_vm_ioctl(struct file *filp, | 1236 | long kvm_arch_vm_ioctl(struct file *filp, |
1103 | unsigned int ioctl, unsigned long arg) | 1237 | unsigned int ioctl, unsigned long arg) |
1104 | { | 1238 | { |
@@ -1118,6 +1252,15 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1118 | 1252 | ||
1119 | break; | 1253 | break; |
1120 | } | 1254 | } |
1255 | case KVM_ENABLE_CAP: | ||
1256 | { | ||
1257 | struct kvm_enable_cap cap; | ||
1258 | r = -EFAULT; | ||
1259 | if (copy_from_user(&cap, argp, sizeof(cap))) | ||
1260 | goto out; | ||
1261 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); | ||
1262 | break; | ||
1263 | } | ||
1121 | #ifdef CONFIG_PPC_BOOK3S_64 | 1264 | #ifdef CONFIG_PPC_BOOK3S_64 |
1122 | case KVM_CREATE_SPAPR_TCE: { | 1265 | case KVM_CREATE_SPAPR_TCE: { |
1123 | struct kvm_create_spapr_tce create_tce; | 1266 | struct kvm_create_spapr_tce create_tce; |
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c index 07b6110a4bb7..e44d2b2ea97e 100644 --- a/arch/powerpc/kvm/timing.c +++ b/arch/powerpc/kvm/timing.c | |||
@@ -110,7 +110,6 @@ void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) | |||
110 | 110 | ||
111 | static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { | 111 | static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { |
112 | [MMIO_EXITS] = "MMIO", | 112 | [MMIO_EXITS] = "MMIO", |
113 | [DCR_EXITS] = "DCR", | ||
114 | [SIGNAL_EXITS] = "SIGNAL", | 113 | [SIGNAL_EXITS] = "SIGNAL", |
115 | [ITLB_REAL_MISS_EXITS] = "ITLBREAL", | 114 | [ITLB_REAL_MISS_EXITS] = "ITLBREAL", |
116 | [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", | 115 | [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", |
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index bf191e72b2d8..3123690c82dc 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h | |||
@@ -63,9 +63,6 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | |||
63 | case EMULATED_INST_EXITS: | 63 | case EMULATED_INST_EXITS: |
64 | vcpu->stat.emulated_inst_exits++; | 64 | vcpu->stat.emulated_inst_exits++; |
65 | break; | 65 | break; |
66 | case DCR_EXITS: | ||
67 | vcpu->stat.dcr_exits++; | ||
68 | break; | ||
69 | case DSI_EXITS: | 66 | case DSI_EXITS: |
70 | vcpu->stat.dsi_exits++; | 67 | vcpu->stat.dsi_exits++; |
71 | break; | 68 | break; |
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h index e1357cd8dc1f..a674f090dfb8 100644 --- a/arch/powerpc/kvm/trace_pr.h +++ b/arch/powerpc/kvm/trace_pr.h | |||
@@ -291,6 +291,26 @@ TRACE_EVENT(kvm_unmap_hva, | |||
291 | TP_printk("unmap hva 0x%lx\n", __entry->hva) | 291 | TP_printk("unmap hva 0x%lx\n", __entry->hva) |
292 | ); | 292 | ); |
293 | 293 | ||
294 | TRACE_EVENT(kvm_ppc_instr, | ||
295 | TP_PROTO(unsigned int inst, unsigned long _pc, unsigned int emulate), | ||
296 | TP_ARGS(inst, _pc, emulate), | ||
297 | |||
298 | TP_STRUCT__entry( | ||
299 | __field( unsigned int, inst ) | ||
300 | __field( unsigned long, pc ) | ||
301 | __field( unsigned int, emulate ) | ||
302 | ), | ||
303 | |||
304 | TP_fast_assign( | ||
305 | __entry->inst = inst; | ||
306 | __entry->pc = _pc; | ||
307 | __entry->emulate = emulate; | ||
308 | ), | ||
309 | |||
310 | TP_printk("inst %u pc 0x%lx emulate %u\n", | ||
311 | __entry->inst, __entry->pc, __entry->emulate) | ||
312 | ); | ||
313 | |||
294 | #endif /* _TRACE_KVM_H */ | 314 | #endif /* _TRACE_KVM_H */ |
295 | 315 | ||
296 | /* This part must be outside protection */ | 316 | /* This part must be outside protection */ |