aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 14:35:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 14:35:30 -0400
commit66bb0aa077978dbb76e6283531eb3cc7a878de38 (patch)
tree62a28a96cb43df2d8f7c6eb14d4676a1e2ce3887 /arch/powerpc
parente306e3be1cbe5b11d0f8a53a557c205cf27e4979 (diff)
parentc77dcacb397519b6ade8f08201a4a90a7f4f751e (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull second round of KVM changes from Paolo Bonzini: "Here are the PPC and ARM changes for KVM, which I separated because they had small conflicts (respectively within KVM documentation, and with 3.16-rc changes). Since they were all within the subsystem, I took care of them. Stephen Rothwell reported some snags in PPC builds, but they are all fixed now; the latest linux-next report was clean. New features for ARM include: - KVM VGIC v2 emulation on GICv3 hardware - Big-Endian support for arm/arm64 (guest and host) - Debug Architecture support for arm64 (arm32 is on Christoffer's todo list) And for PPC: - Book3S: Good number of LE host fixes, enable HV on LE - Book3S HV: Add in-guest debug support This release drops support for KVM on the PPC440. As a result, the PPC merge removes more lines than it adds. :) I also included an x86 change, since Davidlohr tied it to an independent bug report and the reporter quickly provided a Tested-by; there was no reason to wait for -rc2" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (122 commits) KVM: Move more code under CONFIG_HAVE_KVM_IRQFD KVM: nVMX: fix "acknowledge interrupt on exit" when APICv is in use KVM: nVMX: Fix nested vmexit ack intr before load vmcs01 KVM: PPC: Enable IRQFD support for the XICS interrupt controller KVM: Give IRQFD its own separate enabling Kconfig option KVM: Move irq notifier implementation into eventfd.c KVM: Move all accesses to kvm::irq_routing into irqchip.c KVM: irqchip: Provide and use accessors for irq routing table KVM: Don't keep reference to irq routing table in irqfd struct KVM: PPC: drop duplicate tracepoint arm64: KVM: fix 64bit CP15 VM access for 32bit guests KVM: arm64: GICv3: mandate page-aligned GICV region arm64: KVM: GICv3: move system register access to msr_s/mrs_s KVM: PPC: PR: Handle FSCR feature deselects KVM: PPC: HV: Remove generic instruction emulation KVM: PPC: BOOKEHV: rename e500hv_spr to bookehv_spr KVM: PPC: Remove DCR handling KVM: PPC: Expose helper functions for data/inst faults KVM: PPC: Separate loadstore emulation from priv emulation KVM: PPC: Handle magic page in kvmppc_ld/st ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig.debug4
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h4
-rw-r--r--arch/powerpc/include/asm/cache.h7
-rw-r--r--arch/powerpc/include/asm/hvcall.h6
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h67
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h51
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h29
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h15
-rw-r--r--arch/powerpc/include/asm/kvm_host.h28
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h116
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h8
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h17
-rw-r--r--arch/powerpc/include/asm/reg.h13
-rw-r--r--arch/powerpc/include/asm/time.h9
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kvm/44x.c237
-rw-r--r--arch/powerpc/kvm/44x_emulate.c194
-rw-r--r--arch/powerpc/kvm/44x_tlb.c528
-rw-r--r--arch/powerpc/kvm/44x_tlb.h86
-rw-r--r--arch/powerpc/kvm/Kconfig20
-rw-r--r--arch/powerpc/kvm/Makefile18
-rw-r--r--arch/powerpc/kvm/book3s.c156
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c145
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c28
-rw-r--r--arch/powerpc/kvm/book3s_hv.c271
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c146
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S70
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c38
-rw-r--r--arch/powerpc/kvm/book3s_pr.c223
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c92
-rw-r--r--arch/powerpc/kvm/book3s_xics.c55
-rw-r--r--arch/powerpc/kvm/book3s_xics.h2
-rw-r--r--arch/powerpc/kvm/booke.c225
-rw-r--r--arch/powerpc/kvm/booke.h7
-rw-r--r--arch/powerpc/kvm/booke_emulate.c8
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S5
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S60
-rw-r--r--arch/powerpc/kvm/e500_emulate.c12
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c102
-rw-r--r--arch/powerpc/kvm/e500mc.c28
-rw-r--r--arch/powerpc/kvm/emulate.c206
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c272
-rw-r--r--arch/powerpc/kvm/mpic.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c181
-rw-r--r--arch/powerpc/kvm/timing.c1
-rw-r--r--arch/powerpc/kvm/timing.h3
55 files changed, 1839 insertions, 2003 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 35d16bd2760b..ec2e40f2cc11 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -202,9 +202,7 @@ config PPC_EARLY_DEBUG_BEAT
202 202
203config PPC_EARLY_DEBUG_44x 203config PPC_EARLY_DEBUG_44x
204 bool "Early serial debugging for IBM/AMCC 44x CPUs" 204 bool "Early serial debugging for IBM/AMCC 44x CPUs"
205 # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water 205 depends on 44x
206 # mark, which doesn't work with current 440 KVM.
207 depends on 44x && !KVM
208 help 206 help
209 Select this to enable early debugging for IBM 44x chips via the 207 Select this to enable early debugging for IBM 44x chips via the
210 inbuilt serial port. If you enable this, ensure you set 208 inbuilt serial port. If you enable this, ensure you set
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index ccf66b9060a6..924e10df1844 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -127,4 +127,3 @@ CONFIG_CRYPTO_PCBC=y
127# CONFIG_CRYPTO_ANSI_CPRNG is not set 127# CONFIG_CRYPTO_ANSI_CPRNG is not set
128# CONFIG_CRYPTO_HW is not set 128# CONFIG_CRYPTO_HW is not set
129CONFIG_VIRTUALIZATION=y 129CONFIG_VIRTUALIZATION=y
130CONFIG_KVM_440=y
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 4b237aa35660..21be8ae8f809 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -34,10 +34,14 @@
34#define PPC_MIN_STKFRM 112 34#define PPC_MIN_STKFRM 112
35 35
36#ifdef __BIG_ENDIAN__ 36#ifdef __BIG_ENDIAN__
37#define LWZX_BE stringify_in_c(lwzx)
37#define LDX_BE stringify_in_c(ldx) 38#define LDX_BE stringify_in_c(ldx)
39#define STWX_BE stringify_in_c(stwx)
38#define STDX_BE stringify_in_c(stdx) 40#define STDX_BE stringify_in_c(stdx)
39#else 41#else
42#define LWZX_BE stringify_in_c(lwbrx)
40#define LDX_BE stringify_in_c(ldbrx) 43#define LDX_BE stringify_in_c(ldbrx)
44#define STWX_BE stringify_in_c(stwbrx)
41#define STDX_BE stringify_in_c(stdbrx) 45#define STDX_BE stringify_in_c(stdbrx)
42#endif 46#endif
43 47
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index ed0afc1e44a4..34a05a1a990b 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -3,6 +3,7 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/reg.h>
6 7
7/* bytes per L1 cache line */ 8/* bytes per L1 cache line */
8#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 9#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -39,6 +40,12 @@ struct ppc64_caches {
39}; 40};
40 41
41extern struct ppc64_caches ppc64_caches; 42extern struct ppc64_caches ppc64_caches;
43
44static inline void logmpp(u64 x)
45{
46 asm volatile(PPC_LOGMPP(R1) : : "r" (x));
47}
48
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 49#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43 50
44#if defined(__ASSEMBLY__) 51#if defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 5dbbb29f5c3e..85bc8c0d257b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -279,6 +279,12 @@
279#define H_GET_24X7_DATA 0xF07C 279#define H_GET_24X7_DATA 0xF07C
280#define H_GET_PERF_COUNTER_INFO 0xF080 280#define H_GET_PERF_COUNTER_INFO 0xF080
281 281
282/* Values for 2nd argument to H_SET_MODE */
283#define H_SET_MODE_RESOURCE_SET_CIABR 1
284#define H_SET_MODE_RESOURCE_SET_DAWR 2
285#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
286#define H_SET_MODE_RESOURCE_LE 4
287
282#ifndef __ASSEMBLY__ 288#ifndef __ASSEMBLY__
283 289
284/** 290/**
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
deleted file mode 100644
index a0e57618ff33..000000000000
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __ASM_44X_H__
21#define __ASM_44X_H__
22
23#include <linux/kvm_host.h>
24
25#define PPC44x_TLB_SIZE 64
26
27/* If the guest is expecting it, this can be as large as we like; we'd just
28 * need to find some way of advertising it. */
29#define KVM44x_GUEST_TLB_SIZE 64
30
31struct kvmppc_44x_tlbe {
32 u32 tid; /* Only the low 8 bits are used. */
33 u32 word0;
34 u32 word1;
35 u32 word2;
36};
37
38struct kvmppc_44x_shadow_ref {
39 struct page *page;
40 u16 gtlb_index;
41 u8 writeable;
42 u8 tid;
43};
44
45struct kvmppc_vcpu_44x {
46 /* Unmodified copy of the guest's TLB. */
47 struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
48
49 /* References to guest pages in the hardware TLB. */
50 struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
51
52 /* State of the shadow TLB at guest context switch time. */
53 struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
54 u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
55
56 struct kvm_vcpu vcpu;
57};
58
59static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
60{
61 return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
62}
63
64void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
65void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
66
67#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index ecf7e133a4f2..465dfcb82c92 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -33,7 +33,6 @@
33/* IVPR must be 64KiB-aligned. */ 33/* IVPR must be 64KiB-aligned. */
34#define VCPU_SIZE_ORDER 4 34#define VCPU_SIZE_ORDER 4
35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) 35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
36#define VCPU_TLB_PGSZ PPC44x_TLB_64K
37#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) 36#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
38 37
39#define BOOKE_INTERRUPT_CRITICAL 0 38#define BOOKE_INTERRUPT_CRITICAL 0
@@ -132,6 +131,7 @@
132#define BOOK3S_HFLAG_NATIVE_PS 0x8 131#define BOOK3S_HFLAG_NATIVE_PS 0x8
133#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10 132#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
134#define BOOK3S_HFLAG_NEW_TLBIE 0x20 133#define BOOK3S_HFLAG_NEW_TLBIE 0x20
134#define BOOK3S_HFLAG_SPLIT_HACK 0x40
135 135
136#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 136#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
137#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 137#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f52f65694527..6acf0c2a0f99 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -83,8 +83,6 @@ struct kvmppc_vcpu_book3s {
83 u64 sdr1; 83 u64 sdr1;
84 u64 hior; 84 u64 hior;
85 u64 msr_mask; 85 u64 msr_mask;
86 u64 purr_offset;
87 u64 spurr_offset;
88#ifdef CONFIG_PPC_BOOK3S_32 86#ifdef CONFIG_PPC_BOOK3S_32
89 u32 vsid_pool[VSID_POOL_SIZE]; 87 u32 vsid_pool[VSID_POOL_SIZE];
90 u32 vsid_next; 88 u32 vsid_next;
@@ -148,9 +146,10 @@ extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *
148extern int kvmppc_mmu_hpte_sysinit(void); 146extern int kvmppc_mmu_hpte_sysinit(void);
149extern void kvmppc_mmu_hpte_sysexit(void); 147extern void kvmppc_mmu_hpte_sysexit(void);
150extern int kvmppc_mmu_hv_init(void); 148extern int kvmppc_mmu_hv_init(void);
149extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
151 150
151/* XXX remove this export when load_last_inst() is generic */
152extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 152extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
153extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
154extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 153extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
155extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 154extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
156 unsigned int vec); 155 unsigned int vec);
@@ -159,13 +158,13 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
159 bool upper, u32 val); 158 bool upper, u32 val);
160extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 159extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
161extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 160extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
162extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 161extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
163 bool *writable); 162 bool *writable);
164extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 163extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
165 unsigned long *rmap, long pte_index, int realmode); 164 unsigned long *rmap, long pte_index, int realmode);
166extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 165extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
167 unsigned long pte_index); 166 unsigned long pte_index);
168void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, 167void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
169 unsigned long pte_index); 168 unsigned long pte_index);
170extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 169extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
171 unsigned long *nb_ret); 170 unsigned long *nb_ret);
@@ -183,12 +182,16 @@ extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
183 struct kvm_memory_slot *memslot, unsigned long *map); 182 struct kvm_memory_slot *memslot, unsigned long *map);
184extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, 183extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
185 unsigned long mask); 184 unsigned long mask);
185extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
186 186
187extern void kvmppc_entry_trampoline(void); 187extern void kvmppc_entry_trampoline(void);
188extern void kvmppc_hv_entry_trampoline(void); 188extern void kvmppc_hv_entry_trampoline(void);
189extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 189extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
190extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 190extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
191extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 191extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
192extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
193extern int kvmppc_hcall_impl_pr(unsigned long cmd);
194extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
192extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, 195extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
193 struct kvm_vcpu *vcpu); 196 struct kvm_vcpu *vcpu);
194extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 197extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
@@ -274,32 +277,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
274 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); 277 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
275} 278}
276 279
277static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
278{
279 /* Load the instruction manually if it failed to do so in the
280 * exit path */
281 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
282 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
283
284 return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
285 vcpu->arch.last_inst;
286}
287
288static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
289{
290 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
291}
292
293/*
294 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
295 * Because the sc instruction sets SRR0 to point to the following
296 * instruction, we have to fetch from pc - 4.
297 */
298static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
299{
300 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
301}
302
303static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 280static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
304{ 281{
305 return vcpu->arch.fault_dar; 282 return vcpu->arch.fault_dar;
@@ -310,6 +287,13 @@ static inline bool is_kvmppc_resume_guest(int r)
310 return (r == RESUME_GUEST || r == RESUME_GUEST_NV); 287 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
311} 288}
312 289
290static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
291static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
292{
293 /* Only PR KVM supports the magic page */
294 return !is_kvmppc_hv_enabled(vcpu->kvm);
295}
296
313/* Magic register values loaded into r3 and r4 before the 'sc' assembly 297/* Magic register values loaded into r3 and r4 before the 'sc' assembly
314 * instruction for the OSI hypercalls */ 298 * instruction for the OSI hypercalls */
315#define OSI_SC_MAGIC_R3 0x113724FA 299#define OSI_SC_MAGIC_R3 0x113724FA
@@ -322,4 +306,7 @@ static inline bool is_kvmppc_resume_guest(int r)
322/* LPIDs we support with this build -- runtime limit may be lower */ 306/* LPIDs we support with this build -- runtime limit may be lower */
323#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) 307#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
324 308
309#define SPLIT_HACK_MASK 0xff000000
310#define SPLIT_HACK_OFFS 0xfb000000
311
325#endif /* __ASM_KVM_BOOK3S_H__ */ 312#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d645428a65a4..0aa817933e6a 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -59,20 +59,29 @@ extern unsigned long kvm_rma_pages;
59/* These bits are reserved in the guest view of the HPTE */ 59/* These bits are reserved in the guest view of the HPTE */
60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED 60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
61 61
62static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) 62static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
63{ 63{
64 unsigned long tmp, old; 64 unsigned long tmp, old;
65 __be64 be_lockbit, be_bits;
66
67 /*
68 * We load/store in native endian, but the HTAB is in big endian. If
69 * we byte swap all data we apply on the PTE we're implicitly correct
70 * again.
71 */
72 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
73 be_bits = cpu_to_be64(bits);
65 74
66 asm volatile(" ldarx %0,0,%2\n" 75 asm volatile(" ldarx %0,0,%2\n"
67 " and. %1,%0,%3\n" 76 " and. %1,%0,%3\n"
68 " bne 2f\n" 77 " bne 2f\n"
69 " ori %0,%0,%4\n" 78 " or %0,%0,%4\n"
70 " stdcx. %0,0,%2\n" 79 " stdcx. %0,0,%2\n"
71 " beq+ 2f\n" 80 " beq+ 2f\n"
72 " mr %1,%3\n" 81 " mr %1,%3\n"
73 "2: isync" 82 "2: isync"
74 : "=&r" (tmp), "=&r" (old) 83 : "=&r" (tmp), "=&r" (old)
75 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) 84 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
76 : "cc", "memory"); 85 : "cc", "memory");
77 return old == 0; 86 return old == 0;
78} 87}
@@ -110,16 +119,12 @@ static inline int __hpte_actual_psize(unsigned int lp, int psize)
110static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 119static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
111 unsigned long pte_index) 120 unsigned long pte_index)
112{ 121{
113 int b_psize, a_psize; 122 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
114 unsigned int penc; 123 unsigned int penc;
115 unsigned long rb = 0, va_low, sllp; 124 unsigned long rb = 0, va_low, sllp;
116 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1); 125 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
117 126
118 if (!(v & HPTE_V_LARGE)) { 127 if (v & HPTE_V_LARGE) {
119 /* both base and actual psize is 4k */
120 b_psize = MMU_PAGE_4K;
121 a_psize = MMU_PAGE_4K;
122 } else {
123 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) { 128 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
124 129
125 /* valid entries have a shift value */ 130 /* valid entries have a shift value */
@@ -142,6 +147,8 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
142 */ 147 */
143 /* This covers 14..54 bits of va*/ 148 /* This covers 14..54 bits of va*/
144 rb = (v & ~0x7fUL) << 16; /* AVA field */ 149 rb = (v & ~0x7fUL) << 16; /* AVA field */
150
151 rb |= v >> (62 - 8); /* B field */
145 /* 152 /*
146 * AVA in v had cleared lower 23 bits. We need to derive 153 * AVA in v had cleared lower 23 bits. We need to derive
147 * that from pteg index 154 * that from pteg index
@@ -172,10 +179,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
172 { 179 {
173 int aval_shift; 180 int aval_shift;
174 /* 181 /*
175 * remaining 7bits of AVA/LP fields 182 * remaining bits of AVA/LP fields
176 * Also contain the rr bits of LP 183 * Also contain the rr bits of LP
177 */ 184 */
178 rb |= (va_low & 0x7f) << 16; 185 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
179 /* 186 /*
180 * Now clear not needed LP bits based on actual psize 187 * Now clear not needed LP bits based on actual psize
181 */ 188 */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index c7aed6105ff9..f7aa5cc395c4 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
69 return false; 69 return false;
70} 70}
71 71
72static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
73{
74 return vcpu->arch.last_inst;
75}
76
77static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 72static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
78{ 73{
79 vcpu->arch.ctr = val; 74 vcpu->arch.ctr = val;
@@ -108,4 +103,14 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
108{ 103{
109 return vcpu->arch.fault_dear; 104 return vcpu->arch.fault_dear;
110} 105}
106
107static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
108{
109 /* Magic page is only supported on e500v2 */
110#ifdef CONFIG_KVM_E500V2
111 return true;
112#else
113 return false;
114#endif
115}
111#endif /* __ASM_KVM_BOOKE_H__ */ 116#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b8efdf..98d9dd50d063 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#include <asm/hvcall.h>
37 38
38#define KVM_MAX_VCPUS NR_CPUS 39#define KVM_MAX_VCPUS NR_CPUS
39#define KVM_MAX_VCORES NR_CPUS 40#define KVM_MAX_VCORES NR_CPUS
@@ -48,7 +49,6 @@
48#define KVM_NR_IRQCHIPS 1 49#define KVM_NR_IRQCHIPS 1
49#define KVM_IRQCHIP_NUM_PINS 256 50#define KVM_IRQCHIP_NUM_PINS 256
50 51
51#if !defined(CONFIG_KVM_440)
52#include <linux/mmu_notifier.h> 52#include <linux/mmu_notifier.h>
53 53
54#define KVM_ARCH_WANT_MMU_NOTIFIER 54#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -61,8 +61,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
61extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 61extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
62extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 62extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
63 63
64#endif
65
66#define HPTEG_CACHE_NUM (1 << 15) 64#define HPTEG_CACHE_NUM (1 << 15)
67#define HPTEG_HASH_BITS_PTE 13 65#define HPTEG_HASH_BITS_PTE 13
68#define HPTEG_HASH_BITS_PTE_LONG 12 66#define HPTEG_HASH_BITS_PTE_LONG 12
@@ -96,7 +94,6 @@ struct kvm_vm_stat {
96struct kvm_vcpu_stat { 94struct kvm_vcpu_stat {
97 u32 sum_exits; 95 u32 sum_exits;
98 u32 mmio_exits; 96 u32 mmio_exits;
99 u32 dcr_exits;
100 u32 signal_exits; 97 u32 signal_exits;
101 u32 light_exits; 98 u32 light_exits;
102 /* Account for special types of light exits: */ 99 /* Account for special types of light exits: */
@@ -113,22 +110,21 @@ struct kvm_vcpu_stat {
113 u32 halt_wakeup; 110 u32 halt_wakeup;
114 u32 dbell_exits; 111 u32 dbell_exits;
115 u32 gdbell_exits; 112 u32 gdbell_exits;
113 u32 ld;
114 u32 st;
116#ifdef CONFIG_PPC_BOOK3S 115#ifdef CONFIG_PPC_BOOK3S
117 u32 pf_storage; 116 u32 pf_storage;
118 u32 pf_instruc; 117 u32 pf_instruc;
119 u32 sp_storage; 118 u32 sp_storage;
120 u32 sp_instruc; 119 u32 sp_instruc;
121 u32 queue_intr; 120 u32 queue_intr;
122 u32 ld;
123 u32 ld_slow; 121 u32 ld_slow;
124 u32 st;
125 u32 st_slow; 122 u32 st_slow;
126#endif 123#endif
127}; 124};
128 125
129enum kvm_exit_types { 126enum kvm_exit_types {
130 MMIO_EXITS, 127 MMIO_EXITS,
131 DCR_EXITS,
132 SIGNAL_EXITS, 128 SIGNAL_EXITS,
133 ITLB_REAL_MISS_EXITS, 129 ITLB_REAL_MISS_EXITS,
134 ITLB_VIRT_MISS_EXITS, 130 ITLB_VIRT_MISS_EXITS,
@@ -254,7 +250,6 @@ struct kvm_arch {
254 atomic_t hpte_mod_interest; 250 atomic_t hpte_mod_interest;
255 spinlock_t slot_phys_lock; 251 spinlock_t slot_phys_lock;
256 cpumask_t need_tlb_flush; 252 cpumask_t need_tlb_flush;
257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
258 int hpt_cma_alloc; 253 int hpt_cma_alloc;
259#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 254#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
260#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 255#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -263,6 +258,7 @@ struct kvm_arch {
263#ifdef CONFIG_PPC_BOOK3S_64 258#ifdef CONFIG_PPC_BOOK3S_64
264 struct list_head spapr_tce_tables; 259 struct list_head spapr_tce_tables;
265 struct list_head rtas_tokens; 260 struct list_head rtas_tokens;
261 DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
266#endif 262#endif
267#ifdef CONFIG_KVM_MPIC 263#ifdef CONFIG_KVM_MPIC
268 struct openpic *mpic; 264 struct openpic *mpic;
@@ -271,6 +267,10 @@ struct kvm_arch {
271 struct kvmppc_xics *xics; 267 struct kvmppc_xics *xics;
272#endif 268#endif
273 struct kvmppc_ops *kvm_ops; 269 struct kvmppc_ops *kvm_ops;
270#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
271 /* This array can grow quite large, keep it at the end */
272 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
273#endif
274}; 274};
275 275
276/* 276/*
@@ -305,6 +305,8 @@ struct kvmppc_vcore {
305 u32 arch_compat; 305 u32 arch_compat;
306 ulong pcr; 306 ulong pcr;
307 ulong dpdes; /* doorbell state (POWER8) */ 307 ulong dpdes; /* doorbell state (POWER8) */
308 void *mpp_buffer; /* Micro Partition Prefetch buffer */
309 bool mpp_buffer_is_valid;
308}; 310};
309 311
310#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 312#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -503,8 +505,10 @@ struct kvm_vcpu_arch {
503#ifdef CONFIG_BOOKE 505#ifdef CONFIG_BOOKE
504 u32 decar; 506 u32 decar;
505#endif 507#endif
506 u32 tbl; 508 /* Time base value when we entered the guest */
507 u32 tbu; 509 u64 entry_tb;
510 u64 entry_vtb;
511 u64 entry_ic;
508 u32 tcr; 512 u32 tcr;
509 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */ 513 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
510 u32 ivor[64]; 514 u32 ivor[64];
@@ -580,6 +584,8 @@ struct kvm_vcpu_arch {
580 u32 mmucfg; 584 u32 mmucfg;
581 u32 eptcfg; 585 u32 eptcfg;
582 u32 epr; 586 u32 epr;
587 u64 sprg9;
588 u32 pwrmgtcr0;
583 u32 crit_save; 589 u32 crit_save;
584 /* guest debug registers*/ 590 /* guest debug registers*/
585 struct debug_reg dbg_reg; 591 struct debug_reg dbg_reg;
@@ -593,8 +599,6 @@ struct kvm_vcpu_arch {
593 u8 io_gpr; /* GPR used as IO source/target */ 599 u8 io_gpr; /* GPR used as IO source/target */
594 u8 mmio_is_bigendian; 600 u8 mmio_is_bigendian;
595 u8 mmio_sign_extend; 601 u8 mmio_sign_extend;
596 u8 dcr_needed;
597 u8 dcr_is_write;
598 u8 osi_needed; 602 u8 osi_needed;
599 u8 osi_enabled; 603 u8 osi_enabled;
600 u8 papr_enabled; 604 u8 papr_enabled;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd067a6..fb86a2299d8a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -41,12 +41,26 @@
41enum emulation_result { 41enum emulation_result {
42 EMULATE_DONE, /* no further processing */ 42 EMULATE_DONE, /* no further processing */
43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
44 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
45 EMULATE_FAIL, /* can't emulate this instruction */ 44 EMULATE_FAIL, /* can't emulate this instruction */
46 EMULATE_AGAIN, /* something went wrong. go again */ 45 EMULATE_AGAIN, /* something went wrong. go again */
47 EMULATE_EXIT_USER, /* emulation requires exit to user-space */ 46 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
48}; 47};
49 48
49enum instruction_type {
50 INST_GENERIC,
51 INST_SC, /* system call */
52};
53
54enum xlate_instdata {
55 XLATE_INST, /* translate instruction address */
56 XLATE_DATA /* translate data address */
57};
58
59enum xlate_readwrite {
60 XLATE_READ, /* check for read permissions */
61 XLATE_WRITE /* check for write permissions */
62};
63
50extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 64extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
51extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 65extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
52extern void kvmppc_handler_highmem(void); 66extern void kvmppc_handler_highmem(void);
@@ -62,8 +76,16 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 u64 val, unsigned int bytes, 76 u64 val, unsigned int bytes,
63 int is_default_endian); 77 int is_default_endian);
64 78
79extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
80 enum instruction_type type, u32 *inst);
81
82extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
83 bool data);
84extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
85 bool data);
65extern int kvmppc_emulate_instruction(struct kvm_run *run, 86extern int kvmppc_emulate_instruction(struct kvm_run *run,
66 struct kvm_vcpu *vcpu); 87 struct kvm_vcpu *vcpu);
88extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
67extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 89extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
68extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 90extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
69extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 91extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
@@ -86,6 +108,9 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
86 gva_t eaddr); 108 gva_t eaddr);
87extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); 109extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
88extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); 110extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
111extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
112 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
113 struct kvmppc_pte *pte);
89 114
90extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, 115extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
91 unsigned int id); 116 unsigned int id);
@@ -106,6 +131,14 @@ extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
106extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 131extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
107 struct kvm_interrupt *irq); 132 struct kvm_interrupt *irq);
108extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); 133extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
134extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
135 ulong esr_flags);
136extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
137 ulong dear_flags,
138 ulong esr_flags);
139extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
140extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
141 ulong esr_flags);
109extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); 142extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); 143extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
111 144
@@ -228,12 +261,35 @@ struct kvmppc_ops {
228 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); 261 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
229 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, 262 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
230 unsigned long arg); 263 unsigned long arg);
231 264 int (*hcall_implemented)(unsigned long hcall);
232}; 265};
233 266
234extern struct kvmppc_ops *kvmppc_hv_ops; 267extern struct kvmppc_ops *kvmppc_hv_ops;
235extern struct kvmppc_ops *kvmppc_pr_ops; 268extern struct kvmppc_ops *kvmppc_pr_ops;
236 269
270static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
271 enum instruction_type type, u32 *inst)
272{
273 int ret = EMULATE_DONE;
274 u32 fetched_inst;
275
276 /* Load the instruction manually if it failed to do so in the
277 * exit path */
278 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
279 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
280
281 /* Write fetch_failed unswapped if the fetch failed */
282 if (ret == EMULATE_DONE)
283 fetched_inst = kvmppc_need_byteswap(vcpu) ?
284 swab32(vcpu->arch.last_inst) :
285 vcpu->arch.last_inst;
286 else
287 fetched_inst = vcpu->arch.last_inst;
288
289 *inst = fetched_inst;
290 return ret;
291}
292
237static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) 293static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
238{ 294{
239 return kvm->arch.kvm_ops == kvmppc_hv_ops; 295 return kvm->arch.kvm_ops == kvmppc_hv_ops;
@@ -392,6 +448,17 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
392 { return 0; } 448 { return 0; }
393#endif 449#endif
394 450
451static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
452{
453#ifdef CONFIG_KVM_BOOKE_HV
454 return mfspr(SPRN_GEPR);
455#elif defined(CONFIG_BOOKE)
456 return vcpu->arch.epr;
457#else
458 return 0;
459#endif
460}
461
395static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) 462static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
396{ 463{
397#ifdef CONFIG_KVM_BOOKE_HV 464#ifdef CONFIG_KVM_BOOKE_HV
@@ -472,8 +539,20 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
472#endif 539#endif
473} 540}
474 541
542#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
543static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
544{ \
545 return mfspr(bookehv_spr); \
546} \
547
548#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
549static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
550{ \
551 mtspr(bookehv_spr, val); \
552} \
553
475#define SHARED_WRAPPER_GET(reg, size) \ 554#define SHARED_WRAPPER_GET(reg, size) \
476static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 555static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
477{ \ 556{ \
478 if (kvmppc_shared_big_endian(vcpu)) \ 557 if (kvmppc_shared_big_endian(vcpu)) \
479 return be##size##_to_cpu(vcpu->arch.shared->reg); \ 558 return be##size##_to_cpu(vcpu->arch.shared->reg); \
@@ -494,14 +573,31 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
494 SHARED_WRAPPER_GET(reg, size) \ 573 SHARED_WRAPPER_GET(reg, size) \
495 SHARED_WRAPPER_SET(reg, size) \ 574 SHARED_WRAPPER_SET(reg, size) \
496 575
576#define SPRNG_WRAPPER(reg, bookehv_spr) \
577 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
578 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
579
580#ifdef CONFIG_KVM_BOOKE_HV
581
582#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
583 SPRNG_WRAPPER(reg, bookehv_spr) \
584
585#else
586
587#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
588 SHARED_WRAPPER(reg, size) \
589
590#endif
591
497SHARED_WRAPPER(critical, 64) 592SHARED_WRAPPER(critical, 64)
498SHARED_WRAPPER(sprg0, 64) 593SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
499SHARED_WRAPPER(sprg1, 64) 594SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
500SHARED_WRAPPER(sprg2, 64) 595SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
501SHARED_WRAPPER(sprg3, 64) 596SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
502SHARED_WRAPPER(srr0, 64) 597SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
503SHARED_WRAPPER(srr1, 64) 598SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
504SHARED_WRAPPER(dar, 64) 599SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
600SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
505SHARED_WRAPPER_GET(msr, 64) 601SHARED_WRAPPER_GET(msr, 64)
506static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) 602static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
507{ 603{
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index d0918e09557f..cd4f04a74802 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,11 @@
40 40
41/* MAS registers bit definitions */ 41/* MAS registers bit definitions */
42 42
43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) 43#define MAS0_TLBSEL_MASK 0x30000000
44#define MAS0_TLBSEL_SHIFT 28
45#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
46#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \
47 MAS0_TLBSEL_SHIFT)
44#define MAS0_ESEL_MASK 0x0FFF0000 48#define MAS0_ESEL_MASK 0x0FFF0000
45#define MAS0_ESEL_SHIFT 16 49#define MAS0_ESEL_SHIFT 16
46#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) 50#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
@@ -58,6 +62,7 @@
58#define MAS1_TSIZE_MASK 0x00000f80 62#define MAS1_TSIZE_MASK 0x00000f80
59#define MAS1_TSIZE_SHIFT 7 63#define MAS1_TSIZE_SHIFT 7
60#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) 64#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
65#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT)
61 66
62#define MAS2_EPN (~0xFFFUL) 67#define MAS2_EPN (~0xFFFUL)
63#define MAS2_X0 0x00000040 68#define MAS2_X0 0x00000040
@@ -86,6 +91,7 @@
86#define MAS3_SPSIZE 0x0000003e 91#define MAS3_SPSIZE 0x0000003e
87#define MAS3_SPSIZE_SHIFT 1 92#define MAS3_SPSIZE_SHIFT 1
88 93
94#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK
89#define MAS4_TLBSELD(x) MAS0_TLBSEL(x) 95#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
90#define MAS4_INDD 0x00008000 /* Default IND */ 96#define MAS4_INDD 0x00008000 /* Default IND */
91#define MAS4_TSIZED(x) MAS1_TSIZE(x) 97#define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e316dad6ba76..6f8536208049 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -139,6 +139,7 @@
139#define PPC_INST_ISEL 0x7c00001e 139#define PPC_INST_ISEL 0x7c00001e
140#define PPC_INST_ISEL_MASK 0xfc00003e 140#define PPC_INST_ISEL_MASK 0xfc00003e
141#define PPC_INST_LDARX 0x7c0000a8 141#define PPC_INST_LDARX 0x7c0000a8
142#define PPC_INST_LOGMPP 0x7c0007e4
142#define PPC_INST_LSWI 0x7c0004aa 143#define PPC_INST_LSWI 0x7c0004aa
143#define PPC_INST_LSWX 0x7c00042a 144#define PPC_INST_LSWX 0x7c00042a
144#define PPC_INST_LWARX 0x7c000028 145#define PPC_INST_LWARX 0x7c000028
@@ -277,6 +278,20 @@
277#define __PPC_EH(eh) 0 278#define __PPC_EH(eh) 0
278#endif 279#endif
279 280
281/* POWER8 Micro Partition Prefetch (MPP) parameters */
282/* Address mask is common for LOGMPP instruction and MPPR SPR */
283#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000
284
285/* Bits 60 and 61 of MPP SPR should be set to one of the following */
286/* Aborting the fetch is indeed setting 00 in the table size bits */
287#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
288#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
289
290/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
291#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
292#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
293#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
294
280/* Deal with instructions that older assemblers aren't aware of */ 295/* Deal with instructions that older assemblers aren't aware of */
281#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 296#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
282 __PPC_RA(a) | __PPC_RB(b)) 297 __PPC_RA(a) | __PPC_RB(b))
@@ -285,6 +300,8 @@
285#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ 300#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
286 ___PPC_RT(t) | ___PPC_RA(a) | \ 301 ___PPC_RT(t) | ___PPC_RA(a) | \
287 ___PPC_RB(b) | __PPC_EH(eh)) 302 ___PPC_RB(b) | __PPC_EH(eh))
303#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
304 __PPC_RB(b))
288#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ 305#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
289 ___PPC_RT(t) | ___PPC_RA(a) | \ 306 ___PPC_RT(t) | ___PPC_RA(a) | \
290 ___PPC_RB(b) | __PPC_EH(eh)) 307 ___PPC_RB(b) | __PPC_EH(eh))
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f7b97b895708..1c987bf794ef 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -225,6 +225,7 @@
225#define CTRL_TE 0x00c00000 /* thread enable */ 225#define CTRL_TE 0x00c00000 /* thread enable */
226#define CTRL_RUNLATCH 0x1 226#define CTRL_RUNLATCH 0x1
227#define SPRN_DAWR 0xB4 227#define SPRN_DAWR 0xB4
228#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
228#define SPRN_RPR 0xBA /* Relative Priority Register */ 229#define SPRN_RPR 0xBA /* Relative Priority Register */
229#define SPRN_CIABR 0xBB 230#define SPRN_CIABR 0xBB
230#define CIABR_PRIV 0x3 231#define CIABR_PRIV 0x3
@@ -944,9 +945,6 @@
944 * readable variant for reads, which can avoid a fault 945 * readable variant for reads, which can avoid a fault
945 * with KVM type virtualization. 946 * with KVM type virtualization.
946 * 947 *
947 * (*) Under KVM, the host SPRG1 is used to point to
948 * the current VCPU data structure
949 *
950 * 32-bit 8xx: 948 * 32-bit 8xx:
951 * - SPRG0 scratch for exception vectors 949 * - SPRG0 scratch for exception vectors
952 * - SPRG1 scratch for exception vectors 950 * - SPRG1 scratch for exception vectors
@@ -1203,6 +1201,15 @@
1203 : "r" ((unsigned long)(v)) \ 1201 : "r" ((unsigned long)(v)) \
1204 : "memory") 1202 : "memory")
1205 1203
1204static inline unsigned long mfvtb (void)
1205{
1206#ifdef CONFIG_PPC_BOOK3S_64
1207 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1208 return mfspr(SPRN_VTB);
1209#endif
1210 return 0;
1211}
1212
1206#ifdef __powerpc64__ 1213#ifdef __powerpc64__
1207#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) 1214#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
1208#define mftb() ({unsigned long rval; \ 1215#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1d428e6007ca..03cbada59d3a 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -102,6 +102,15 @@ static inline u64 get_rtc(void)
102 return (u64)hi * 1000000000 + lo; 102 return (u64)hi * 1000000000 + lo;
103} 103}
104 104
105static inline u64 get_vtb(void)
106{
107#ifdef CONFIG_PPC_BOOK3S_64
108 if (cpu_has_feature(CPU_FTR_ARCH_207S))
109 return mfvtb();
110#endif
111 return 0;
112}
113
105#ifdef CONFIG_PPC64 114#ifdef CONFIG_PPC64
106static inline u64 get_tb(void) 115static inline u64 get_tb(void)
107{ 116{
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 2bc4a9409a93..e0e49dbb145d 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -548,6 +548,7 @@ struct kvm_get_htab_header {
548 548
549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) 549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) 550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
551#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
551#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) 552#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
552 553
553/* Architecture compatibility level */ 554/* Architecture compatibility level */
@@ -555,6 +556,7 @@ struct kvm_get_htab_header {
555 556
556#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) 557#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
557#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9) 558#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
559#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
558 560
559/* Transactional Memory checkpointed state: 561/* Transactional Memory checkpointed state:
560 * This is all GPRs, all VSX regs and a subset of SPRs 562 * This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e35054054c32..9d7dede2847c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -491,6 +491,7 @@ int main(void)
491 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); 491 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
492 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); 492 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
493 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); 493 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
494 DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
494 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); 495 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
495 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); 496 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
496 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); 497 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
@@ -665,6 +666,7 @@ int main(void)
665 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 666 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
666 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 667 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
667 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 668 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
669 DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9));
668 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 670 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
669 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 671 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
670 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 672 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
deleted file mode 100644
index 9cb4b0a36031..000000000000
--- a/arch/powerpc/kvm/44x.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/kvm_host.h>
21#include <linux/slab.h>
22#include <linux/err.h>
23#include <linux/export.h>
24#include <linux/module.h>
25#include <linux/miscdevice.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/tlbflush.h>
30#include <asm/kvm_44x.h>
31#include <asm/kvm_ppc.h>
32
33#include "44x_tlb.h"
34#include "booke.h"
35
36static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
37{
38 kvmppc_booke_vcpu_load(vcpu, cpu);
39 kvmppc_44x_tlb_load(vcpu);
40}
41
42static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
43{
44 kvmppc_44x_tlb_put(vcpu);
45 kvmppc_booke_vcpu_put(vcpu);
46}
47
48int kvmppc_core_check_processor_compat(void)
49{
50 int r;
51
52 if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0)
53 r = 0;
54 else
55 r = -ENOTSUPP;
56
57 return r;
58}
59
60int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
61{
62 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
63 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
64 int i;
65
66 tlbe->tid = 0;
67 tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
68 tlbe->word1 = 0;
69 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
70
71 tlbe++;
72 tlbe->tid = 0;
73 tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
74 tlbe->word1 = 0xef600000;
75 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
76 | PPC44x_TLB_I | PPC44x_TLB_G;
77
78 /* Since the guest can directly access the timebase, it must know the
79 * real timebase frequency. Accordingly, it must see the state of
80 * CCR1[TCS]. */
81 /* XXX CCR1 doesn't exist on all 440 SoCs. */
82 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
83
84 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++)
85 vcpu_44x->shadow_refs[i].gtlb_index = -1;
86
87 vcpu->arch.cpu_type = KVM_CPU_440;
88 vcpu->arch.pvr = mfspr(SPRN_PVR);
89
90 return 0;
91}
92
93/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
94int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
95 struct kvm_translation *tr)
96{
97 int index;
98 gva_t eaddr;
99 u8 pid;
100 u8 as;
101
102 eaddr = tr->linear_address;
103 pid = (tr->linear_address >> 32) & 0xff;
104 as = (tr->linear_address >> 40) & 0x1;
105
106 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
107 if (index == -1) {
108 tr->valid = 0;
109 return 0;
110 }
111
112 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
113 /* XXX what does "writeable" and "usermode" even mean? */
114 tr->valid = 1;
115
116 return 0;
117}
118
119static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
120 struct kvm_sregs *sregs)
121{
122 return kvmppc_get_sregs_ivor(vcpu, sregs);
123}
124
125static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
126 struct kvm_sregs *sregs)
127{
128 return kvmppc_set_sregs_ivor(vcpu, sregs);
129}
130
131static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
132 union kvmppc_one_reg *val)
133{
134 return -EINVAL;
135}
136
137static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
138 union kvmppc_one_reg *val)
139{
140 return -EINVAL;
141}
142
143static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
144 unsigned int id)
145{
146 struct kvmppc_vcpu_44x *vcpu_44x;
147 struct kvm_vcpu *vcpu;
148 int err;
149
150 vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
151 if (!vcpu_44x) {
152 err = -ENOMEM;
153 goto out;
154 }
155
156 vcpu = &vcpu_44x->vcpu;
157 err = kvm_vcpu_init(vcpu, kvm, id);
158 if (err)
159 goto free_vcpu;
160
161 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
162 if (!vcpu->arch.shared)
163 goto uninit_vcpu;
164
165 return vcpu;
166
167uninit_vcpu:
168 kvm_vcpu_uninit(vcpu);
169free_vcpu:
170 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
171out:
172 return ERR_PTR(err);
173}
174
175static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
176{
177 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
178
179 free_page((unsigned long)vcpu->arch.shared);
180 kvm_vcpu_uninit(vcpu);
181 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
182}
183
184static int kvmppc_core_init_vm_44x(struct kvm *kvm)
185{
186 return 0;
187}
188
189static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
190{
191}
192
193static struct kvmppc_ops kvm_ops_44x = {
194 .get_sregs = kvmppc_core_get_sregs_44x,
195 .set_sregs = kvmppc_core_set_sregs_44x,
196 .get_one_reg = kvmppc_get_one_reg_44x,
197 .set_one_reg = kvmppc_set_one_reg_44x,
198 .vcpu_load = kvmppc_core_vcpu_load_44x,
199 .vcpu_put = kvmppc_core_vcpu_put_44x,
200 .vcpu_create = kvmppc_core_vcpu_create_44x,
201 .vcpu_free = kvmppc_core_vcpu_free_44x,
202 .mmu_destroy = kvmppc_mmu_destroy_44x,
203 .init_vm = kvmppc_core_init_vm_44x,
204 .destroy_vm = kvmppc_core_destroy_vm_44x,
205 .emulate_op = kvmppc_core_emulate_op_44x,
206 .emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
207 .emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
208};
209
210static int __init kvmppc_44x_init(void)
211{
212 int r;
213
214 r = kvmppc_booke_init();
215 if (r)
216 goto err_out;
217
218 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
219 if (r)
220 goto err_out;
221 kvm_ops_44x.owner = THIS_MODULE;
222 kvmppc_pr_ops = &kvm_ops_44x;
223
224err_out:
225 return r;
226}
227
228static void __exit kvmppc_44x_exit(void)
229{
230 kvmppc_pr_ops = NULL;
231 kvmppc_booke_exit();
232}
233
234module_init(kvmppc_44x_init);
235module_exit(kvmppc_44x_exit);
236MODULE_ALIAS_MISCDEV(KVM_MINOR);
237MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
deleted file mode 100644
index 92c9ab4bcfec..000000000000
--- a/arch/powerpc/kvm/44x_emulate.c
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/dcr.h>
22#include <asm/dcr-regs.h>
23#include <asm/disassemble.h>
24#include <asm/kvm_44x.h>
25#include "timing.h"
26
27#include "booke.h"
28#include "44x_tlb.h"
29
30#define XOP_MFDCRX 259
31#define XOP_MFDCR 323
32#define XOP_MTDCRX 387
33#define XOP_MTDCR 451
34#define XOP_TLBSX 914
35#define XOP_ICCCI 966
36#define XOP_TLBWE 978
37
38static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn)
39{
40 /* emulate some access in kernel */
41 switch (dcrn) {
42 case DCRN_CPR0_CONFIG_ADDR:
43 vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
44 return EMULATE_DONE;
45 default:
46 vcpu->run->dcr.dcrn = dcrn;
47 vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs);
48 vcpu->run->dcr.is_write = 1;
49 vcpu->arch.dcr_is_write = 1;
50 vcpu->arch.dcr_needed = 1;
51 kvmppc_account_exit(vcpu, DCR_EXITS);
52 return EMULATE_DO_DCR;
53 }
54}
55
56static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
57{
58 /* The guest may access CPR0 registers to determine the timebase
59 * frequency, and it must know the real host frequency because it
60 * can directly access the timebase registers.
61 *
62 * It would be possible to emulate those accesses in userspace,
63 * but userspace can really only figure out the end frequency.
64 * We could decompose that into the factors that compute it, but
65 * that's tricky math, and it's easier to just report the real
66 * CPR0 values.
67 */
68 switch (dcrn) {
69 case DCRN_CPR0_CONFIG_ADDR:
70 kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
71 break;
72 case DCRN_CPR0_CONFIG_DATA:
73 local_irq_disable();
74 mtdcr(DCRN_CPR0_CONFIG_ADDR,
75 vcpu->arch.cpr0_cfgaddr);
76 kvmppc_set_gpr(vcpu, rt,
77 mfdcr(DCRN_CPR0_CONFIG_DATA));
78 local_irq_enable();
79 break;
80 default:
81 vcpu->run->dcr.dcrn = dcrn;
82 vcpu->run->dcr.data = 0;
83 vcpu->run->dcr.is_write = 0;
84 vcpu->arch.dcr_is_write = 0;
85 vcpu->arch.io_gpr = rt;
86 vcpu->arch.dcr_needed = 1;
87 kvmppc_account_exit(vcpu, DCR_EXITS);
88 return EMULATE_DO_DCR;
89 }
90
91 return EMULATE_DONE;
92}
93
94int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
95 unsigned int inst, int *advance)
96{
97 int emulated = EMULATE_DONE;
98 int dcrn = get_dcrn(inst);
99 int ra = get_ra(inst);
100 int rb = get_rb(inst);
101 int rc = get_rc(inst);
102 int rs = get_rs(inst);
103 int rt = get_rt(inst);
104 int ws = get_ws(inst);
105
106 switch (get_op(inst)) {
107 case 31:
108 switch (get_xop(inst)) {
109
110 case XOP_MFDCR:
111 emulated = emulate_mfdcr(vcpu, rt, dcrn);
112 break;
113
114 case XOP_MFDCRX:
115 emulated = emulate_mfdcr(vcpu, rt,
116 kvmppc_get_gpr(vcpu, ra));
117 break;
118
119 case XOP_MTDCR:
120 emulated = emulate_mtdcr(vcpu, rs, dcrn);
121 break;
122
123 case XOP_MTDCRX:
124 emulated = emulate_mtdcr(vcpu, rs,
125 kvmppc_get_gpr(vcpu, ra));
126 break;
127
128 case XOP_TLBWE:
129 emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
130 break;
131
132 case XOP_TLBSX:
133 emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
134 break;
135
136 case XOP_ICCCI:
137 break;
138
139 default:
140 emulated = EMULATE_FAIL;
141 }
142
143 break;
144
145 default:
146 emulated = EMULATE_FAIL;
147 }
148
149 if (emulated == EMULATE_FAIL)
150 emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
151
152 return emulated;
153}
154
155int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
156{
157 int emulated = EMULATE_DONE;
158
159 switch (sprn) {
160 case SPRN_PID:
161 kvmppc_set_pid(vcpu, spr_val); break;
162 case SPRN_MMUCR:
163 vcpu->arch.mmucr = spr_val; break;
164 case SPRN_CCR0:
165 vcpu->arch.ccr0 = spr_val; break;
166 case SPRN_CCR1:
167 vcpu->arch.ccr1 = spr_val; break;
168 default:
169 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
170 }
171
172 return emulated;
173}
174
175int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
176{
177 int emulated = EMULATE_DONE;
178
179 switch (sprn) {
180 case SPRN_PID:
181 *spr_val = vcpu->arch.pid; break;
182 case SPRN_MMUCR:
183 *spr_val = vcpu->arch.mmucr; break;
184 case SPRN_CCR0:
185 *spr_val = vcpu->arch.ccr0; break;
186 case SPRN_CCR1:
187 *spr_val = vcpu->arch.ccr1; break;
188 default:
189 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
190 }
191
192 return emulated;
193}
194
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
deleted file mode 100644
index 0deef1082e02..000000000000
--- a/arch/powerpc/kvm/44x_tlb.c
+++ /dev/null
@@ -1,528 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
27#include <asm/mmu-44x.h>
28#include <asm/kvm_ppc.h>
29#include <asm/kvm_44x.h>
30#include "timing.h"
31
32#include "44x_tlb.h"
33#include "trace.h"
34
35#ifndef PPC44x_TLBE_SIZE
36#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
37#endif
38
39#define PAGE_SIZE_4K (1<<12)
40#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
41
42#define PPC44x_TLB_UATTR_MASK \
43 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
44#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
45#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
46
47#ifdef DEBUG
48void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
49{
50 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
51 struct kvmppc_44x_tlbe *tlbe;
52 int i;
53
54 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
55 printk("| %2s | %3s | %8s | %8s | %8s |\n",
56 "nr", "tid", "word0", "word1", "word2");
57
58 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
59 tlbe = &vcpu_44x->guest_tlb[i];
60 if (tlbe->word0 & PPC44x_TLB_VALID)
61 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
62 i, tlbe->tid, tlbe->word0, tlbe->word1,
63 tlbe->word2);
64 }
65}
66#endif
67
68static inline void kvmppc_44x_tlbie(unsigned int index)
69{
70 /* 0 <= index < 64, so the V bit is clear and we can use the index as
71 * word0. */
72 asm volatile(
73 "tlbwe %[index], %[index], 0\n"
74 :
75 : [index] "r"(index)
76 );
77}
78
79static inline void kvmppc_44x_tlbre(unsigned int index,
80 struct kvmppc_44x_tlbe *tlbe)
81{
82 asm volatile(
83 "tlbre %[word0], %[index], 0\n"
84 "mfspr %[tid], %[sprn_mmucr]\n"
85 "andi. %[tid], %[tid], 0xff\n"
86 "tlbre %[word1], %[index], 1\n"
87 "tlbre %[word2], %[index], 2\n"
88 : [word0] "=r"(tlbe->word0),
89 [word1] "=r"(tlbe->word1),
90 [word2] "=r"(tlbe->word2),
91 [tid] "=r"(tlbe->tid)
92 : [index] "r"(index),
93 [sprn_mmucr] "i"(SPRN_MMUCR)
94 : "cc"
95 );
96}
97
98static inline void kvmppc_44x_tlbwe(unsigned int index,
99 struct kvmppc_44x_tlbe *stlbe)
100{
101 unsigned long tmp;
102
103 asm volatile(
104 "mfspr %[tmp], %[sprn_mmucr]\n"
105 "rlwimi %[tmp], %[tid], 0, 0xff\n"
106 "mtspr %[sprn_mmucr], %[tmp]\n"
107 "tlbwe %[word0], %[index], 0\n"
108 "tlbwe %[word1], %[index], 1\n"
109 "tlbwe %[word2], %[index], 2\n"
110 : [tmp] "=&r"(tmp)
111 : [word0] "r"(stlbe->word0),
112 [word1] "r"(stlbe->word1),
113 [word2] "r"(stlbe->word2),
114 [tid] "r"(stlbe->tid),
115 [index] "r"(index),
116 [sprn_mmucr] "i"(SPRN_MMUCR)
117 );
118}
119
120static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
121{
122 /* We only care about the guest's permission and user bits. */
123 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
124
125 if (!usermode) {
126 /* Guest is in supervisor mode, so we need to translate guest
127 * supervisor permissions into user permissions. */
128 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
129 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
130 }
131
132 /* Make sure host can always access this memory. */
133 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
134
135 /* WIMGE = 0b00100 */
136 attrib |= PPC44x_TLB_M;
137
138 return attrib;
139}
140
141/* Load shadow TLB back into hardware. */
142void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
143{
144 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
145 int i;
146
147 for (i = 0; i <= tlb_44x_hwater; i++) {
148 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
149
150 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
151 kvmppc_44x_tlbwe(i, stlbe);
152 }
153}
154
155static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
156 unsigned int i)
157{
158 vcpu_44x->shadow_tlb_mod[i] = 1;
159}
160
161/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
162void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
163{
164 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
165 int i;
166
167 for (i = 0; i <= tlb_44x_hwater; i++) {
168 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
169
170 if (vcpu_44x->shadow_tlb_mod[i])
171 kvmppc_44x_tlbre(i, stlbe);
172
173 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
174 kvmppc_44x_tlbie(i);
175 }
176}
177
178
179/* Search the guest TLB for a matching entry. */
180int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
181 unsigned int as)
182{
183 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
184 int i;
185
186 /* XXX Replace loop with fancy data structures. */
187 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
188 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
189 unsigned int tid;
190
191 if (eaddr < get_tlb_eaddr(tlbe))
192 continue;
193
194 if (eaddr > get_tlb_end(tlbe))
195 continue;
196
197 tid = get_tlb_tid(tlbe);
198 if (tid && (tid != pid))
199 continue;
200
201 if (!get_tlb_v(tlbe))
202 continue;
203
204 if (get_tlb_ts(tlbe) != as)
205 continue;
206
207 return i;
208 }
209
210 return -1;
211}
212
213gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
214 gva_t eaddr)
215{
216 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
217 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
218 unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
219
220 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
221}
222
223int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
224{
225 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
226
227 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
228}
229
230int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
231{
232 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
233
234 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
235}
236
237void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
238{
239}
240
241void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
242{
243}
244
245static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
246 unsigned int stlb_index)
247{
248 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
249
250 if (!ref->page)
251 return;
252
253 /* Discard from the TLB. */
254 /* Note: we could actually invalidate a host mapping, if the host overwrote
255 * this TLB entry since we inserted a guest mapping. */
256 kvmppc_44x_tlbie(stlb_index);
257
258 /* Now release the page. */
259 if (ref->writeable)
260 kvm_release_page_dirty(ref->page);
261 else
262 kvm_release_page_clean(ref->page);
263
264 ref->page = NULL;
265
266 /* XXX set tlb_44x_index to stlb_index? */
267
268 trace_kvm_stlb_inval(stlb_index);
269}
270
271void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
272{
273 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
274 int i;
275
276 for (i = 0; i <= tlb_44x_hwater; i++)
277 kvmppc_44x_shadow_release(vcpu_44x, i);
278}
279
280/**
281 * kvmppc_mmu_map -- create a host mapping for guest memory
282 *
283 * If the guest wanted a larger page than the host supports, only the first
284 * host page is mapped here and the rest are demand faulted.
285 *
286 * If the guest wanted a smaller page than the host page size, we map only the
287 * guest-size page (i.e. not a full host page mapping).
288 *
289 * Caller must ensure that the specified guest TLB entry is safe to insert into
290 * the shadow TLB.
291 */
292void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
293 unsigned int gtlb_index)
294{
295 struct kvmppc_44x_tlbe stlbe;
296 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
297 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
298 struct kvmppc_44x_shadow_ref *ref;
299 struct page *new_page;
300 hpa_t hpaddr;
301 gfn_t gfn;
302 u32 asid = gtlbe->tid;
303 u32 flags = gtlbe->word2;
304 u32 max_bytes = get_tlb_bytes(gtlbe);
305 unsigned int victim;
306
307 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
308 * miss handler by disabling interrupts. */
309 local_irq_disable();
310 victim = ++tlb_44x_index;
311 if (victim > tlb_44x_hwater)
312 victim = 0;
313 tlb_44x_index = victim;
314 local_irq_enable();
315
316 /* Get reference to new page. */
317 gfn = gpaddr >> PAGE_SHIFT;
318 new_page = gfn_to_page(vcpu->kvm, gfn);
319 if (is_error_page(new_page)) {
320 printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n",
321 (unsigned long long)gfn);
322 return;
323 }
324 hpaddr = page_to_phys(new_page);
325
326 /* Invalidate any previous shadow mappings. */
327 kvmppc_44x_shadow_release(vcpu_44x, victim);
328
329 /* XXX Make sure (va, size) doesn't overlap any other
330 * entries. 440x6 user manual says the result would be
331 * "undefined." */
332
333 /* XXX what about AS? */
334
335 /* Force TS=1 for all guest mappings. */
336 stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
337
338 if (max_bytes >= PAGE_SIZE) {
339 /* Guest mapping is larger than or equal to host page size. We can use
340 * a "native" host mapping. */
341 stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
342 } else {
343 /* Guest mapping is smaller than host page size. We must restrict the
344 * size of the mapping to be at most the smaller of the two, but for
345 * simplicity we fall back to a 4K mapping (this is probably what the
346 * guest is using anyways). */
347 stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
348
349 /* 'hpaddr' is a host page, which is larger than the mapping we're
350 * inserting here. To compensate, we must add the in-page offset to the
351 * sub-page. */
352 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
353 }
354
355 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
356 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
357 vcpu->arch.shared->msr & MSR_PR);
358 stlbe.tid = !(asid & 0xff);
359
360 /* Keep track of the reference so we can properly release it later. */
361 ref = &vcpu_44x->shadow_refs[victim];
362 ref->page = new_page;
363 ref->gtlb_index = gtlb_index;
364 ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
365 ref->tid = stlbe.tid;
366
367 /* Insert shadow mapping into hardware TLB. */
368 kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
369 kvmppc_44x_tlbwe(victim, &stlbe);
370 trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
371 stlbe.word2);
372}
373
374/* For a particular guest TLB entry, invalidate the corresponding host TLB
375 * mappings and release the host pages. */
376static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
377 unsigned int gtlb_index)
378{
379 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
380 int i;
381
382 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
383 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
384 if (ref->gtlb_index == gtlb_index)
385 kvmppc_44x_shadow_release(vcpu_44x, i);
386 }
387}
388
389void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
390{
391 int usermode = vcpu->arch.shared->msr & MSR_PR;
392
393 vcpu->arch.shadow_pid = !usermode;
394}
395
396void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
397{
398 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
399 int i;
400
401 if (unlikely(vcpu->arch.pid == new_pid))
402 return;
403
404 vcpu->arch.pid = new_pid;
405
406 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
407 * can't access guest kernel mappings (TID=1). When we switch to a new
408 * guest PID, which will also use host PID=0, we must discard the old guest
409 * userspace mappings. */
410 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
411 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
412
413 if (ref->tid == 0)
414 kvmppc_44x_shadow_release(vcpu_44x, i);
415 }
416}
417
418static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
419 const struct kvmppc_44x_tlbe *tlbe)
420{
421 gpa_t gpa;
422
423 if (!get_tlb_v(tlbe))
424 return 0;
425
426 /* Does it match current guest AS? */
427 /* XXX what about IS != DS? */
428 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
429 return 0;
430
431 gpa = get_tlb_raddr(tlbe);
432 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
433 /* Mapping is not for RAM. */
434 return 0;
435
436 return 1;
437}
438
439int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
440{
441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
442 struct kvmppc_44x_tlbe *tlbe;
443 unsigned int gtlb_index;
444 int idx;
445
446 gtlb_index = kvmppc_get_gpr(vcpu, ra);
447 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
448 printk("%s: index %d\n", __func__, gtlb_index);
449 kvmppc_dump_vcpu(vcpu);
450 return EMULATE_FAIL;
451 }
452
453 tlbe = &vcpu_44x->guest_tlb[gtlb_index];
454
455 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
456 if (tlbe->word0 & PPC44x_TLB_VALID)
457 kvmppc_44x_invalidate(vcpu, gtlb_index);
458
459 switch (ws) {
460 case PPC44x_TLB_PAGEID:
461 tlbe->tid = get_mmucr_stid(vcpu);
462 tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
463 break;
464
465 case PPC44x_TLB_XLAT:
466 tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
467 break;
468
469 case PPC44x_TLB_ATTRIB:
470 tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
471 break;
472
473 default:
474 return EMULATE_FAIL;
475 }
476
477 idx = srcu_read_lock(&vcpu->kvm->srcu);
478
479 if (tlbe_is_host_safe(vcpu, tlbe)) {
480 gva_t eaddr;
481 gpa_t gpaddr;
482 u32 bytes;
483
484 eaddr = get_tlb_eaddr(tlbe);
485 gpaddr = get_tlb_raddr(tlbe);
486
487 /* Use the advertised page size to mask effective and real addrs. */
488 bytes = get_tlb_bytes(tlbe);
489 eaddr &= ~(bytes - 1);
490 gpaddr &= ~(bytes - 1);
491
492 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
493 }
494
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496
497 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
498 tlbe->word2);
499
500 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
501 return EMULATE_DONE;
502}
503
504int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
505{
506 u32 ea;
507 int gtlb_index;
508 unsigned int as = get_mmucr_sts(vcpu);
509 unsigned int pid = get_mmucr_stid(vcpu);
510
511 ea = kvmppc_get_gpr(vcpu, rb);
512 if (ra)
513 ea += kvmppc_get_gpr(vcpu, ra);
514
515 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
516 if (rc) {
517 u32 cr = kvmppc_get_cr(vcpu);
518
519 if (gtlb_index < 0)
520 kvmppc_set_cr(vcpu, cr & ~0x20000000);
521 else
522 kvmppc_set_cr(vcpu, cr | 0x20000000);
523 }
524 kvmppc_set_gpr(vcpu, rt, gtlb_index);
525
526 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
527 return EMULATE_DONE;
528}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
deleted file mode 100644
index a9ff80e51526..000000000000
--- a/arch/powerpc/kvm/44x_tlb.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __KVM_POWERPC_TLB_H__
21#define __KVM_POWERPC_TLB_H__
22
23#include <linux/kvm_host.h>
24#include <asm/mmu-44x.h>
25
26extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
27 unsigned int pid, unsigned int as);
28
29extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
30 u8 rc);
31extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
32
33/* TLB helper functions */
34static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
35{
36 return (tlbe->word0 >> 4) & 0xf;
37}
38
39static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe)
40{
41 return tlbe->word0 & 0xfffffc00;
42}
43
44static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe)
45{
46 unsigned int pgsize = get_tlb_size(tlbe);
47 return 1 << 10 << (pgsize << 1);
48}
49
50static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe)
51{
52 return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
53}
54
55static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe)
56{
57 u64 word1 = tlbe->word1;
58 return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
59}
60
61static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe)
62{
63 return tlbe->tid & 0xff;
64}
65
66static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe)
67{
68 return (tlbe->word0 >> 8) & 0x1;
69}
70
71static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe)
72{
73 return (tlbe->word0 >> 9) & 0x1;
74}
75
76static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
77{
78 return vcpu->arch.mmucr & 0xff;
79}
80
81static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
82{
83 return (vcpu->arch.mmucr >> 16) & 0x1;
84}
85
86#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index d6a53b95de94..602eb51d20bc 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -75,7 +75,6 @@ config KVM_BOOK3S_64
75config KVM_BOOK3S_64_HV 75config KVM_BOOK3S_64_HV
76 tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" 76 tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
77 depends on KVM_BOOK3S_64 77 depends on KVM_BOOK3S_64
78 depends on !CPU_LITTLE_ENDIAN
79 select KVM_BOOK3S_HV_POSSIBLE 78 select KVM_BOOK3S_HV_POSSIBLE
80 select MMU_NOTIFIER 79 select MMU_NOTIFIER
81 select CMA 80 select CMA
@@ -113,23 +112,9 @@ config KVM_BOOK3S_64_PR
113config KVM_BOOKE_HV 112config KVM_BOOKE_HV
114 bool 113 bool
115 114
116config KVM_440
117 bool "KVM support for PowerPC 440 processors"
118 depends on 44x
119 select KVM
120 select KVM_MMIO
121 ---help---
122 Support running unmodified 440 guest kernels in virtual machines on
123 440 host processors.
124
125 This module provides access to the hardware capabilities through
126 a character device node named /dev/kvm.
127
128 If unsure, say N.
129
130config KVM_EXIT_TIMING 115config KVM_EXIT_TIMING
131 bool "Detailed exit timing" 116 bool "Detailed exit timing"
132 depends on KVM_440 || KVM_E500V2 || KVM_E500MC 117 depends on KVM_E500V2 || KVM_E500MC
133 ---help--- 118 ---help---
134 Calculate elapsed time for every exit/enter cycle. A per-vcpu 119 Calculate elapsed time for every exit/enter cycle. A per-vcpu
135 report is available in debugfs kvm/vm#_vcpu#_timing. 120 report is available in debugfs kvm/vm#_vcpu#_timing.
@@ -173,6 +158,7 @@ config KVM_MPIC
173 bool "KVM in-kernel MPIC emulation" 158 bool "KVM in-kernel MPIC emulation"
174 depends on KVM && E500 159 depends on KVM && E500
175 select HAVE_KVM_IRQCHIP 160 select HAVE_KVM_IRQCHIP
161 select HAVE_KVM_IRQFD
176 select HAVE_KVM_IRQ_ROUTING 162 select HAVE_KVM_IRQ_ROUTING
177 select HAVE_KVM_MSI 163 select HAVE_KVM_MSI
178 help 164 help
@@ -184,6 +170,8 @@ config KVM_MPIC
184config KVM_XICS 170config KVM_XICS
185 bool "KVM in-kernel XICS emulation" 171 bool "KVM in-kernel XICS emulation"
186 depends on KVM_BOOK3S_64 && !KVM_MPIC 172 depends on KVM_BOOK3S_64 && !KVM_MPIC
173 select HAVE_KVM_IRQCHIP
174 select HAVE_KVM_IRQFD
187 ---help--- 175 ---help---
188 Include support for the XICS (eXternal Interrupt Controller 176 Include support for the XICS (eXternal Interrupt Controller
189 Specification) interrupt controller architecture used on 177 Specification) interrupt controller architecture used on
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 72905c30082e..0570eef83fba 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -10,27 +10,17 @@ KVM := ../../../virt/kvm
10common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ 10common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
11 $(KVM)/eventfd.o 11 $(KVM)/eventfd.o
12 12
13CFLAGS_44x_tlb.o := -I.
14CFLAGS_e500_mmu.o := -I. 13CFLAGS_e500_mmu.o := -I.
15CFLAGS_e500_mmu_host.o := -I. 14CFLAGS_e500_mmu_host.o := -I.
16CFLAGS_emulate.o := -I. 15CFLAGS_emulate.o := -I.
16CFLAGS_emulate_loadstore.o := -I.
17 17
18common-objs-y += powerpc.o emulate.o 18common-objs-y += powerpc.o emulate.o emulate_loadstore.o
19obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o 19obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
20obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o 20obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
21 21
22AFLAGS_booke_interrupts.o := -I$(obj) 22AFLAGS_booke_interrupts.o := -I$(obj)
23 23
24kvm-440-objs := \
25 $(common-objs-y) \
26 booke.o \
27 booke_emulate.o \
28 booke_interrupts.o \
29 44x.o \
30 44x_tlb.o \
31 44x_emulate.o
32kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs)
33
34kvm-e500-objs := \ 24kvm-e500-objs := \
35 $(common-objs-y) \ 25 $(common-objs-y) \
36 booke.o \ 26 booke.o \
@@ -58,6 +48,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
58 48
59kvm-pr-y := \ 49kvm-pr-y := \
60 fpu.o \ 50 fpu.o \
51 emulate.o \
61 book3s_paired_singles.o \ 52 book3s_paired_singles.o \
62 book3s_pr.o \ 53 book3s_pr.o \
63 book3s_pr_papr.o \ 54 book3s_pr_papr.o \
@@ -100,7 +91,7 @@ kvm-book3s_64-module-objs += \
100 $(KVM)/kvm_main.o \ 91 $(KVM)/kvm_main.o \
101 $(KVM)/eventfd.o \ 92 $(KVM)/eventfd.o \
102 powerpc.o \ 93 powerpc.o \
103 emulate.o \ 94 emulate_loadstore.o \
104 book3s.o \ 95 book3s.o \
105 book3s_64_vio.o \ 96 book3s_64_vio.o \
106 book3s_rtas.o \ 97 book3s_rtas.o \
@@ -126,7 +117,6 @@ kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
126 117
127kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 118kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
128 119
129obj-$(CONFIG_KVM_440) += kvm.o
130obj-$(CONFIG_KVM_E500V2) += kvm.o 120obj-$(CONFIG_KVM_E500V2) += kvm.o
131obj-$(CONFIG_KVM_E500MC) += kvm.o 121obj-$(CONFIG_KVM_E500MC) += kvm.o
132obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 122obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c254c27f240e..dd03f6b299ba 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -72,6 +72,17 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
72{ 72{
73} 73}
74 74
75void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
76{
77 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
78 ulong pc = kvmppc_get_pc(vcpu);
79 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
80 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
81 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
82 }
83}
84EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
85
75static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 86static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
76{ 87{
77 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 88 if (!is_kvmppc_hv_enabled(vcpu->kvm))
@@ -118,6 +129,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
118 129
119void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 130void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
120{ 131{
132 kvmppc_unfixup_split_real(vcpu);
121 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); 133 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
122 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); 134 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
123 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 135 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
@@ -218,6 +230,23 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
218 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 230 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
219} 231}
220 232
233void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
234 ulong flags)
235{
236 kvmppc_set_dar(vcpu, dar);
237 kvmppc_set_dsisr(vcpu, flags);
238 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
239}
240
241void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
242{
243 u64 msr = kvmppc_get_msr(vcpu);
244 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
245 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
246 kvmppc_set_msr_fast(vcpu, msr);
247 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
248}
249
221int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 250int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
222{ 251{
223 int deliver = 1; 252 int deliver = 1;
@@ -342,18 +371,18 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
342} 371}
343EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 372EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
344 373
345pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 374pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
346 bool *writable) 375 bool *writable)
347{ 376{
348 ulong mp_pa = vcpu->arch.magic_page_pa; 377 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
378 gfn_t gfn = gpa >> PAGE_SHIFT;
349 379
350 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 380 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
351 mp_pa = (uint32_t)mp_pa; 381 mp_pa = (uint32_t)mp_pa;
352 382
353 /* Magic page override */ 383 /* Magic page override */
354 if (unlikely(mp_pa) && 384 gpa &= ~0xFFFULL;
355 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 385 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
356 ((mp_pa & PAGE_MASK) & KVM_PAM))) {
357 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 386 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
358 pfn_t pfn; 387 pfn_t pfn;
359 388
@@ -366,11 +395,13 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
366 395
367 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 396 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
368} 397}
369EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); 398EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
370 399
371static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 400int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
372 bool iswrite, struct kvmppc_pte *pte) 401 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
373{ 402{
403 bool data = (xlid == XLATE_DATA);
404 bool iswrite = (xlrw == XLATE_WRITE);
374 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 405 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
375 int r; 406 int r;
376 407
@@ -384,88 +415,34 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
384 pte->may_write = true; 415 pte->may_write = true;
385 pte->may_execute = true; 416 pte->may_execute = true;
386 r = 0; 417 r = 0;
418
419 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
420 !data) {
421 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
422 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
423 pte->raddr &= ~SPLIT_HACK_MASK;
424 }
387 } 425 }
388 426
389 return r; 427 return r;
390} 428}
391 429
392static hva_t kvmppc_bad_hva(void) 430int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
393{ 431 u32 *inst)
394 return PAGE_OFFSET;
395}
396
397static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
398 bool read)
399{
400 hva_t hpage;
401
402 if (read && !pte->may_read)
403 goto err;
404
405 if (!read && !pte->may_write)
406 goto err;
407
408 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
409 if (kvm_is_error_hva(hpage))
410 goto err;
411
412 return hpage | (pte->raddr & ~PAGE_MASK);
413err:
414 return kvmppc_bad_hva();
415}
416
417int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
418 bool data)
419{
420 struct kvmppc_pte pte;
421
422 vcpu->stat.st++;
423
424 if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
425 return -ENOENT;
426
427 *eaddr = pte.raddr;
428
429 if (!pte.may_write)
430 return -EPERM;
431
432 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
433 return EMULATE_DO_MMIO;
434
435 return EMULATE_DONE;
436}
437EXPORT_SYMBOL_GPL(kvmppc_st);
438
439int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
440 bool data)
441{ 432{
442 struct kvmppc_pte pte; 433 ulong pc = kvmppc_get_pc(vcpu);
443 hva_t hva = *eaddr; 434 int r;
444
445 vcpu->stat.ld++;
446
447 if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
448 goto nopte;
449
450 *eaddr = pte.raddr;
451
452 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
453 if (kvm_is_error_hva(hva))
454 goto mmio;
455
456 if (copy_from_user(ptr, (void __user *)hva, size)) {
457 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
458 goto mmio;
459 }
460 435
461 return EMULATE_DONE; 436 if (type == INST_SC)
437 pc -= 4;
462 438
463nopte: 439 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
464 return -ENOENT; 440 if (r == EMULATE_DONE)
465mmio: 441 return r;
466 return EMULATE_DO_MMIO; 442 else
443 return EMULATE_AGAIN;
467} 444}
468EXPORT_SYMBOL_GPL(kvmppc_ld); 445EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
469 446
470int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 447int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
471{ 448{
@@ -646,6 +623,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
646 case KVM_REG_PPC_BESCR: 623 case KVM_REG_PPC_BESCR:
647 val = get_reg_val(reg->id, vcpu->arch.bescr); 624 val = get_reg_val(reg->id, vcpu->arch.bescr);
648 break; 625 break;
626 case KVM_REG_PPC_VTB:
627 val = get_reg_val(reg->id, vcpu->arch.vtb);
628 break;
629 case KVM_REG_PPC_IC:
630 val = get_reg_val(reg->id, vcpu->arch.ic);
631 break;
649 default: 632 default:
650 r = -EINVAL; 633 r = -EINVAL;
651 break; 634 break;
@@ -750,6 +733,12 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
750 case KVM_REG_PPC_BESCR: 733 case KVM_REG_PPC_BESCR:
751 vcpu->arch.bescr = set_reg_val(reg->id, val); 734 vcpu->arch.bescr = set_reg_val(reg->id, val);
752 break; 735 break;
736 case KVM_REG_PPC_VTB:
737 vcpu->arch.vtb = set_reg_val(reg->id, val);
738 break;
739 case KVM_REG_PPC_IC:
740 vcpu->arch.ic = set_reg_val(reg->id, val);
741 break;
753 default: 742 default:
754 r = -EINVAL; 743 r = -EINVAL;
755 break; 744 break;
@@ -913,6 +902,11 @@ int kvmppc_core_check_processor_compat(void)
913 return 0; 902 return 0;
914} 903}
915 904
905int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
906{
907 return kvm->arch.kvm_ops->hcall_implemented(hcall);
908}
909
916static int kvmppc_book3s_init(void) 910static int kvmppc_book3s_init(void)
917{ 911{
918 int r; 912 int r;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 93503bbdae43..cd0b0730e29e 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -335,7 +335,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
335 if (r < 0) 335 if (r < 0)
336 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, 336 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
337 data, iswrite, true); 337 data, iswrite, true);
338 if (r < 0) 338 if (r == -ENOENT)
339 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, 339 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
340 data, iswrite, false); 340 data, iswrite, false);
341 341
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 678e75370495..2035d16a9262 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -156,11 +156,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
156 bool writable; 156 bool writable;
157 157
158 /* Get host physical address for gpa */ 158 /* Get host physical address for gpa */
159 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, 159 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
160 iswrite, &writable);
161 if (is_error_noslot_pfn(hpaddr)) { 160 if (is_error_noslot_pfn(hpaddr)) {
162 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 161 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
163 orig_pte->eaddr); 162 orig_pte->raddr);
164 r = -EINVAL; 163 r = -EINVAL;
165 goto out; 164 goto out;
166 } 165 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0ac98392f363..b982d925c710 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -104,9 +104,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
104 smp_rmb(); 104 smp_rmb();
105 105
106 /* Get host physical address for gpa */ 106 /* Get host physical address for gpa */
107 pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable); 107 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
108 if (is_error_noslot_pfn(pfn)) { 108 if (is_error_noslot_pfn(pfn)) {
109 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn); 109 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
110 orig_pte->raddr);
110 r = -EINVAL; 111 r = -EINVAL;
111 goto out; 112 goto out;
112 } 113 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index a01744fc3483..72c20bb16d26 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -448,7 +448,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
448 unsigned long slb_v; 448 unsigned long slb_v;
449 unsigned long pp, key; 449 unsigned long pp, key;
450 unsigned long v, gr; 450 unsigned long v, gr;
451 unsigned long *hptep; 451 __be64 *hptep;
452 int index; 452 int index;
453 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); 453 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
454 454
@@ -471,13 +471,13 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
471 preempt_enable(); 471 preempt_enable();
472 return -ENOENT; 472 return -ENOENT;
473 } 473 }
474 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 474 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
475 v = hptep[0] & ~HPTE_V_HVLOCK; 475 v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
476 gr = kvm->arch.revmap[index].guest_rpte; 476 gr = kvm->arch.revmap[index].guest_rpte;
477 477
478 /* Unlock the HPTE */ 478 /* Unlock the HPTE */
479 asm volatile("lwsync" : : : "memory"); 479 asm volatile("lwsync" : : : "memory");
480 hptep[0] = v; 480 hptep[0] = cpu_to_be64(v);
481 preempt_enable(); 481 preempt_enable();
482 482
483 gpte->eaddr = eaddr; 483 gpte->eaddr = eaddr;
@@ -528,21 +528,14 @@ static int instruction_is_store(unsigned int instr)
528static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 528static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
529 unsigned long gpa, gva_t ea, int is_store) 529 unsigned long gpa, gva_t ea, int is_store)
530{ 530{
531 int ret;
532 u32 last_inst; 531 u32 last_inst;
533 unsigned long srr0 = kvmppc_get_pc(vcpu);
534 532
535 /* We try to load the last instruction. We don't let 533 /*
536 * emulate_instruction do it as it doesn't check what
537 * kvmppc_ld returns.
538 * If we fail, we just return to the guest and try executing it again. 534 * If we fail, we just return to the guest and try executing it again.
539 */ 535 */
540 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { 536 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
541 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 537 EMULATE_DONE)
542 if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) 538 return RESUME_GUEST;
543 return RESUME_GUEST;
544 vcpu->arch.last_inst = last_inst;
545 }
546 539
547 /* 540 /*
548 * WARNING: We do not know for sure whether the instruction we just 541 * WARNING: We do not know for sure whether the instruction we just
@@ -556,7 +549,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
556 * we just return and retry the instruction. 549 * we just return and retry the instruction.
557 */ 550 */
558 551
559 if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) 552 if (instruction_is_store(last_inst) != !!is_store)
560 return RESUME_GUEST; 553 return RESUME_GUEST;
561 554
562 /* 555 /*
@@ -581,7 +574,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
581 unsigned long ea, unsigned long dsisr) 574 unsigned long ea, unsigned long dsisr)
582{ 575{
583 struct kvm *kvm = vcpu->kvm; 576 struct kvm *kvm = vcpu->kvm;
584 unsigned long *hptep, hpte[3], r; 577 unsigned long hpte[3], r;
578 __be64 *hptep;
585 unsigned long mmu_seq, psize, pte_size; 579 unsigned long mmu_seq, psize, pte_size;
586 unsigned long gpa_base, gfn_base; 580 unsigned long gpa_base, gfn_base;
587 unsigned long gpa, gfn, hva, pfn; 581 unsigned long gpa, gfn, hva, pfn;
@@ -604,16 +598,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
604 if (ea != vcpu->arch.pgfault_addr) 598 if (ea != vcpu->arch.pgfault_addr)
605 return RESUME_GUEST; 599 return RESUME_GUEST;
606 index = vcpu->arch.pgfault_index; 600 index = vcpu->arch.pgfault_index;
607 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 601 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
608 rev = &kvm->arch.revmap[index]; 602 rev = &kvm->arch.revmap[index];
609 preempt_disable(); 603 preempt_disable();
610 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 604 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
611 cpu_relax(); 605 cpu_relax();
612 hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; 606 hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
613 hpte[1] = hptep[1]; 607 hpte[1] = be64_to_cpu(hptep[1]);
614 hpte[2] = r = rev->guest_rpte; 608 hpte[2] = r = rev->guest_rpte;
615 asm volatile("lwsync" : : : "memory"); 609 asm volatile("lwsync" : : : "memory");
616 hptep[0] = hpte[0]; 610 hptep[0] = cpu_to_be64(hpte[0]);
617 preempt_enable(); 611 preempt_enable();
618 612
619 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || 613 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
@@ -729,8 +723,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
729 preempt_disable(); 723 preempt_disable();
730 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 724 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
731 cpu_relax(); 725 cpu_relax();
732 if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || 726 if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
733 rev->guest_rpte != hpte[2]) 727 be64_to_cpu(hptep[1]) != hpte[1] ||
728 rev->guest_rpte != hpte[2])
734 /* HPTE has been changed under us; let the guest retry */ 729 /* HPTE has been changed under us; let the guest retry */
735 goto out_unlock; 730 goto out_unlock;
736 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; 731 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
@@ -750,20 +745,20 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
750 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; 745 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
751 r &= rcbits | ~(HPTE_R_R | HPTE_R_C); 746 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
752 747
753 if (hptep[0] & HPTE_V_VALID) { 748 if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
754 /* HPTE was previously valid, so we need to invalidate it */ 749 /* HPTE was previously valid, so we need to invalidate it */
755 unlock_rmap(rmap); 750 unlock_rmap(rmap);
756 hptep[0] |= HPTE_V_ABSENT; 751 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
757 kvmppc_invalidate_hpte(kvm, hptep, index); 752 kvmppc_invalidate_hpte(kvm, hptep, index);
758 /* don't lose previous R and C bits */ 753 /* don't lose previous R and C bits */
759 r |= hptep[1] & (HPTE_R_R | HPTE_R_C); 754 r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
760 } else { 755 } else {
761 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); 756 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
762 } 757 }
763 758
764 hptep[1] = r; 759 hptep[1] = cpu_to_be64(r);
765 eieio(); 760 eieio();
766 hptep[0] = hpte[0]; 761 hptep[0] = cpu_to_be64(hpte[0]);
767 asm volatile("ptesync" : : : "memory"); 762 asm volatile("ptesync" : : : "memory");
768 preempt_enable(); 763 preempt_enable();
769 if (page && hpte_is_writable(r)) 764 if (page && hpte_is_writable(r))
@@ -782,7 +777,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
782 return ret; 777 return ret;
783 778
784 out_unlock: 779 out_unlock:
785 hptep[0] &= ~HPTE_V_HVLOCK; 780 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
786 preempt_enable(); 781 preempt_enable();
787 goto out_put; 782 goto out_put;
788} 783}
@@ -858,7 +853,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
858{ 853{
859 struct revmap_entry *rev = kvm->arch.revmap; 854 struct revmap_entry *rev = kvm->arch.revmap;
860 unsigned long h, i, j; 855 unsigned long h, i, j;
861 unsigned long *hptep; 856 __be64 *hptep;
862 unsigned long ptel, psize, rcbits; 857 unsigned long ptel, psize, rcbits;
863 858
864 for (;;) { 859 for (;;) {
@@ -874,11 +869,11 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
874 * rmap chain lock. 869 * rmap chain lock.
875 */ 870 */
876 i = *rmapp & KVMPPC_RMAP_INDEX; 871 i = *rmapp & KVMPPC_RMAP_INDEX;
877 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); 872 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
878 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 873 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
879 /* unlock rmap before spinning on the HPTE lock */ 874 /* unlock rmap before spinning on the HPTE lock */
880 unlock_rmap(rmapp); 875 unlock_rmap(rmapp);
881 while (hptep[0] & HPTE_V_HVLOCK) 876 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
882 cpu_relax(); 877 cpu_relax();
883 continue; 878 continue;
884 } 879 }
@@ -897,14 +892,14 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
897 892
898 /* Now check and modify the HPTE */ 893 /* Now check and modify the HPTE */
899 ptel = rev[i].guest_rpte; 894 ptel = rev[i].guest_rpte;
900 psize = hpte_page_size(hptep[0], ptel); 895 psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
901 if ((hptep[0] & HPTE_V_VALID) && 896 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
902 hpte_rpn(ptel, psize) == gfn) { 897 hpte_rpn(ptel, psize) == gfn) {
903 if (kvm->arch.using_mmu_notifiers) 898 if (kvm->arch.using_mmu_notifiers)
904 hptep[0] |= HPTE_V_ABSENT; 899 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
905 kvmppc_invalidate_hpte(kvm, hptep, i); 900 kvmppc_invalidate_hpte(kvm, hptep, i);
906 /* Harvest R and C */ 901 /* Harvest R and C */
907 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); 902 rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
908 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; 903 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
909 if (rcbits & ~rev[i].guest_rpte) { 904 if (rcbits & ~rev[i].guest_rpte) {
910 rev[i].guest_rpte = ptel | rcbits; 905 rev[i].guest_rpte = ptel | rcbits;
@@ -912,7 +907,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
912 } 907 }
913 } 908 }
914 unlock_rmap(rmapp); 909 unlock_rmap(rmapp);
915 hptep[0] &= ~HPTE_V_HVLOCK; 910 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
916 } 911 }
917 return 0; 912 return 0;
918} 913}
@@ -959,7 +954,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
959{ 954{
960 struct revmap_entry *rev = kvm->arch.revmap; 955 struct revmap_entry *rev = kvm->arch.revmap;
961 unsigned long head, i, j; 956 unsigned long head, i, j;
962 unsigned long *hptep; 957 __be64 *hptep;
963 int ret = 0; 958 int ret = 0;
964 959
965 retry: 960 retry:
@@ -975,23 +970,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
975 970
976 i = head = *rmapp & KVMPPC_RMAP_INDEX; 971 i = head = *rmapp & KVMPPC_RMAP_INDEX;
977 do { 972 do {
978 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); 973 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
979 j = rev[i].forw; 974 j = rev[i].forw;
980 975
981 /* If this HPTE isn't referenced, ignore it */ 976 /* If this HPTE isn't referenced, ignore it */
982 if (!(hptep[1] & HPTE_R_R)) 977 if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
983 continue; 978 continue;
984 979
985 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 980 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
986 /* unlock rmap before spinning on the HPTE lock */ 981 /* unlock rmap before spinning on the HPTE lock */
987 unlock_rmap(rmapp); 982 unlock_rmap(rmapp);
988 while (hptep[0] & HPTE_V_HVLOCK) 983 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
989 cpu_relax(); 984 cpu_relax();
990 goto retry; 985 goto retry;
991 } 986 }
992 987
993 /* Now check and modify the HPTE */ 988 /* Now check and modify the HPTE */
994 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { 989 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
990 (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
995 kvmppc_clear_ref_hpte(kvm, hptep, i); 991 kvmppc_clear_ref_hpte(kvm, hptep, i);
996 if (!(rev[i].guest_rpte & HPTE_R_R)) { 992 if (!(rev[i].guest_rpte & HPTE_R_R)) {
997 rev[i].guest_rpte |= HPTE_R_R; 993 rev[i].guest_rpte |= HPTE_R_R;
@@ -999,7 +995,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
999 } 995 }
1000 ret = 1; 996 ret = 1;
1001 } 997 }
1002 hptep[0] &= ~HPTE_V_HVLOCK; 998 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
1003 } while ((i = j) != head); 999 } while ((i = j) != head);
1004 1000
1005 unlock_rmap(rmapp); 1001 unlock_rmap(rmapp);
@@ -1033,7 +1029,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1033 do { 1029 do {
1034 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); 1030 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
1035 j = rev[i].forw; 1031 j = rev[i].forw;
1036 if (hp[1] & HPTE_R_R) 1032 if (be64_to_cpu(hp[1]) & HPTE_R_R)
1037 goto out; 1033 goto out;
1038 } while ((i = j) != head); 1034 } while ((i = j) != head);
1039 } 1035 }
@@ -1073,7 +1069,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1073 unsigned long head, i, j; 1069 unsigned long head, i, j;
1074 unsigned long n; 1070 unsigned long n;
1075 unsigned long v, r; 1071 unsigned long v, r;
1076 unsigned long *hptep; 1072 __be64 *hptep;
1077 int npages_dirty = 0; 1073 int npages_dirty = 0;
1078 1074
1079 retry: 1075 retry:
@@ -1089,7 +1085,8 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1089 1085
1090 i = head = *rmapp & KVMPPC_RMAP_INDEX; 1086 i = head = *rmapp & KVMPPC_RMAP_INDEX;
1091 do { 1087 do {
1092 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); 1088 unsigned long hptep1;
1089 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
1093 j = rev[i].forw; 1090 j = rev[i].forw;
1094 1091
1095 /* 1092 /*
@@ -1106,29 +1103,30 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1106 * Otherwise we need to do the tlbie even if C==0 in 1103 * Otherwise we need to do the tlbie even if C==0 in
1107 * order to pick up any delayed writeback of C. 1104 * order to pick up any delayed writeback of C.
1108 */ 1105 */
1109 if (!(hptep[1] & HPTE_R_C) && 1106 hptep1 = be64_to_cpu(hptep[1]);
1110 (!hpte_is_writable(hptep[1]) || vcpus_running(kvm))) 1107 if (!(hptep1 & HPTE_R_C) &&
1108 (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
1111 continue; 1109 continue;
1112 1110
1113 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 1111 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
1114 /* unlock rmap before spinning on the HPTE lock */ 1112 /* unlock rmap before spinning on the HPTE lock */
1115 unlock_rmap(rmapp); 1113 unlock_rmap(rmapp);
1116 while (hptep[0] & HPTE_V_HVLOCK) 1114 while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
1117 cpu_relax(); 1115 cpu_relax();
1118 goto retry; 1116 goto retry;
1119 } 1117 }
1120 1118
1121 /* Now check and modify the HPTE */ 1119 /* Now check and modify the HPTE */
1122 if (!(hptep[0] & HPTE_V_VALID)) 1120 if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID)))
1123 continue; 1121 continue;
1124 1122
1125 /* need to make it temporarily absent so C is stable */ 1123 /* need to make it temporarily absent so C is stable */
1126 hptep[0] |= HPTE_V_ABSENT; 1124 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
1127 kvmppc_invalidate_hpte(kvm, hptep, i); 1125 kvmppc_invalidate_hpte(kvm, hptep, i);
1128 v = hptep[0]; 1126 v = be64_to_cpu(hptep[0]);
1129 r = hptep[1]; 1127 r = be64_to_cpu(hptep[1]);
1130 if (r & HPTE_R_C) { 1128 if (r & HPTE_R_C) {
1131 hptep[1] = r & ~HPTE_R_C; 1129 hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
1132 if (!(rev[i].guest_rpte & HPTE_R_C)) { 1130 if (!(rev[i].guest_rpte & HPTE_R_C)) {
1133 rev[i].guest_rpte |= HPTE_R_C; 1131 rev[i].guest_rpte |= HPTE_R_C;
1134 note_hpte_modification(kvm, &rev[i]); 1132 note_hpte_modification(kvm, &rev[i]);
@@ -1141,7 +1139,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1141 } 1139 }
1142 v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); 1140 v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
1143 v |= HPTE_V_VALID; 1141 v |= HPTE_V_VALID;
1144 hptep[0] = v; 1142 hptep[0] = cpu_to_be64(v);
1145 } while ((i = j) != head); 1143 } while ((i = j) != head);
1146 1144
1147 unlock_rmap(rmapp); 1145 unlock_rmap(rmapp);
@@ -1305,7 +1303,7 @@ struct kvm_htab_ctx {
1305 * Returns 1 if this HPT entry has been modified or has pending 1303 * Returns 1 if this HPT entry has been modified or has pending
1306 * R/C bit changes. 1304 * R/C bit changes.
1307 */ 1305 */
1308static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp) 1306static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
1309{ 1307{
1310 unsigned long rcbits_unset; 1308 unsigned long rcbits_unset;
1311 1309
@@ -1314,13 +1312,14 @@ static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
1314 1312
1315 /* Also need to consider changes in reference and changed bits */ 1313 /* Also need to consider changes in reference and changed bits */
1316 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); 1314 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1317 if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset)) 1315 if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
1316 (be64_to_cpu(hptp[1]) & rcbits_unset))
1318 return 1; 1317 return 1;
1319 1318
1320 return 0; 1319 return 0;
1321} 1320}
1322 1321
1323static long record_hpte(unsigned long flags, unsigned long *hptp, 1322static long record_hpte(unsigned long flags, __be64 *hptp,
1324 unsigned long *hpte, struct revmap_entry *revp, 1323 unsigned long *hpte, struct revmap_entry *revp,
1325 int want_valid, int first_pass) 1324 int want_valid, int first_pass)
1326{ 1325{
@@ -1335,10 +1334,10 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1335 return 0; 1334 return 0;
1336 1335
1337 valid = 0; 1336 valid = 0;
1338 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { 1337 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1339 valid = 1; 1338 valid = 1;
1340 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && 1339 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
1341 !(hptp[0] & HPTE_V_BOLTED)) 1340 !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
1342 valid = 0; 1341 valid = 0;
1343 } 1342 }
1344 if (valid != want_valid) 1343 if (valid != want_valid)
@@ -1350,7 +1349,7 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1350 preempt_disable(); 1349 preempt_disable();
1351 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) 1350 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1352 cpu_relax(); 1351 cpu_relax();
1353 v = hptp[0]; 1352 v = be64_to_cpu(hptp[0]);
1354 1353
1355 /* re-evaluate valid and dirty from synchronized HPTE value */ 1354 /* re-evaluate valid and dirty from synchronized HPTE value */
1356 valid = !!(v & HPTE_V_VALID); 1355 valid = !!(v & HPTE_V_VALID);
@@ -1358,9 +1357,9 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1358 1357
1359 /* Harvest R and C into guest view if necessary */ 1358 /* Harvest R and C into guest view if necessary */
1360 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); 1359 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1361 if (valid && (rcbits_unset & hptp[1])) { 1360 if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
1362 revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) | 1361 revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
1363 HPTE_GR_MODIFIED; 1362 (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
1364 dirty = 1; 1363 dirty = 1;
1365 } 1364 }
1366 1365
@@ -1379,13 +1378,13 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1379 revp->guest_rpte = r; 1378 revp->guest_rpte = r;
1380 } 1379 }
1381 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 1380 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
1382 hptp[0] &= ~HPTE_V_HVLOCK; 1381 hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
1383 preempt_enable(); 1382 preempt_enable();
1384 if (!(valid == want_valid && (first_pass || dirty))) 1383 if (!(valid == want_valid && (first_pass || dirty)))
1385 ok = 0; 1384 ok = 0;
1386 } 1385 }
1387 hpte[0] = v; 1386 hpte[0] = cpu_to_be64(v);
1388 hpte[1] = r; 1387 hpte[1] = cpu_to_be64(r);
1389 return ok; 1388 return ok;
1390} 1389}
1391 1390
@@ -1395,7 +1394,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1395 struct kvm_htab_ctx *ctx = file->private_data; 1394 struct kvm_htab_ctx *ctx = file->private_data;
1396 struct kvm *kvm = ctx->kvm; 1395 struct kvm *kvm = ctx->kvm;
1397 struct kvm_get_htab_header hdr; 1396 struct kvm_get_htab_header hdr;
1398 unsigned long *hptp; 1397 __be64 *hptp;
1399 struct revmap_entry *revp; 1398 struct revmap_entry *revp;
1400 unsigned long i, nb, nw; 1399 unsigned long i, nb, nw;
1401 unsigned long __user *lbuf; 1400 unsigned long __user *lbuf;
@@ -1411,7 +1410,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1411 flags = ctx->flags; 1410 flags = ctx->flags;
1412 1411
1413 i = ctx->index; 1412 i = ctx->index;
1414 hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1413 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1415 revp = kvm->arch.revmap + i; 1414 revp = kvm->arch.revmap + i;
1416 lbuf = (unsigned long __user *)buf; 1415 lbuf = (unsigned long __user *)buf;
1417 1416
@@ -1495,7 +1494,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1495 unsigned long i, j; 1494 unsigned long i, j;
1496 unsigned long v, r; 1495 unsigned long v, r;
1497 unsigned long __user *lbuf; 1496 unsigned long __user *lbuf;
1498 unsigned long *hptp; 1497 __be64 *hptp;
1499 unsigned long tmp[2]; 1498 unsigned long tmp[2];
1500 ssize_t nb; 1499 ssize_t nb;
1501 long int err, ret; 1500 long int err, ret;
@@ -1537,7 +1536,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1537 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) 1536 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
1538 break; 1537 break;
1539 1538
1540 hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1539 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1541 lbuf = (unsigned long __user *)buf; 1540 lbuf = (unsigned long __user *)buf;
1542 for (j = 0; j < hdr.n_valid; ++j) { 1541 for (j = 0; j < hdr.n_valid; ++j) {
1543 err = -EFAULT; 1542 err = -EFAULT;
@@ -1549,7 +1548,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1549 lbuf += 2; 1548 lbuf += 2;
1550 nb += HPTE_SIZE; 1549 nb += HPTE_SIZE;
1551 1550
1552 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) 1551 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1553 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1552 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1554 err = -EIO; 1553 err = -EIO;
1555 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, 1554 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
@@ -1575,7 +1574,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1575 } 1574 }
1576 1575
1577 for (j = 0; j < hdr.n_invalid; ++j) { 1576 for (j = 0; j < hdr.n_invalid; ++j) {
1578 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) 1577 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1579 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1578 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1580 ++i; 1579 ++i;
1581 hptp += 2; 1580 hptp += 2;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 3f295269af37..5a2bc4b0dfe5 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -439,12 +439,6 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
439 (mfmsr() & MSR_HV)) 439 (mfmsr() & MSR_HV))
440 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 440 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
441 break; 441 break;
442 case SPRN_PURR:
443 to_book3s(vcpu)->purr_offset = spr_val - get_tb();
444 break;
445 case SPRN_SPURR:
446 to_book3s(vcpu)->spurr_offset = spr_val - get_tb();
447 break;
448 case SPRN_GQR0: 442 case SPRN_GQR0:
449 case SPRN_GQR1: 443 case SPRN_GQR1:
450 case SPRN_GQR2: 444 case SPRN_GQR2:
@@ -455,10 +449,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
455 case SPRN_GQR7: 449 case SPRN_GQR7:
456 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 450 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
457 break; 451 break;
452#ifdef CONFIG_PPC_BOOK3S_64
458 case SPRN_FSCR: 453 case SPRN_FSCR:
459 vcpu->arch.fscr = spr_val; 454 kvmppc_set_fscr(vcpu, spr_val);
460 break; 455 break;
461#ifdef CONFIG_PPC_BOOK3S_64
462 case SPRN_BESCR: 456 case SPRN_BESCR:
463 vcpu->arch.bescr = spr_val; 457 vcpu->arch.bescr = spr_val;
464 break; 458 break;
@@ -572,10 +566,22 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
572 *spr_val = 0; 566 *spr_val = 0;
573 break; 567 break;
574 case SPRN_PURR: 568 case SPRN_PURR:
575 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; 569 /*
570 * On exit we would have updated purr
571 */
572 *spr_val = vcpu->arch.purr;
576 break; 573 break;
577 case SPRN_SPURR: 574 case SPRN_SPURR:
578 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; 575 /*
576 * On exit we would have updated spurr
577 */
578 *spr_val = vcpu->arch.spurr;
579 break;
580 case SPRN_VTB:
581 *spr_val = vcpu->arch.vtb;
582 break;
583 case SPRN_IC:
584 *spr_val = vcpu->arch.ic;
579 break; 585 break;
580 case SPRN_GQR0: 586 case SPRN_GQR0:
581 case SPRN_GQR1: 587 case SPRN_GQR1:
@@ -587,10 +593,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
587 case SPRN_GQR7: 593 case SPRN_GQR7:
588 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; 594 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
589 break; 595 break;
596#ifdef CONFIG_PPC_BOOK3S_64
590 case SPRN_FSCR: 597 case SPRN_FSCR:
591 *spr_val = vcpu->arch.fscr; 598 *spr_val = vcpu->arch.fscr;
592 break; 599 break;
593#ifdef CONFIG_PPC_BOOK3S_64
594 case SPRN_BESCR: 600 case SPRN_BESCR:
595 *spr_val = vcpu->arch.bescr; 601 *spr_val = vcpu->arch.bescr;
596 break; 602 break;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7a12edbb61e7..27cced9c7249 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -35,6 +35,7 @@
35 35
36#include <asm/reg.h> 36#include <asm/reg.h>
37#include <asm/cputable.h> 37#include <asm/cputable.h>
38#include <asm/cache.h>
38#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
39#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -67,6 +68,15 @@
67/* Used as a "null" value for timebase values */ 68/* Used as a "null" value for timebase values */
68#define TB_NIL (~(u64)0) 69#define TB_NIL (~(u64)0)
69 70
71static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
72
73#if defined(CONFIG_PPC_64K_PAGES)
74#define MPP_BUFFER_ORDER 0
75#elif defined(CONFIG_PPC_4K_PAGES)
76#define MPP_BUFFER_ORDER 3
77#endif
78
79
70static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 80static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
71static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 81static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
72 82
@@ -270,7 +280,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
270static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 280static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
271{ 281{
272 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; 282 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
273 vpa->yield_count = 1; 283 vpa->yield_count = cpu_to_be32(1);
274} 284}
275 285
276static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, 286static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
@@ -293,8 +303,8 @@ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
293struct reg_vpa { 303struct reg_vpa {
294 u32 dummy; 304 u32 dummy;
295 union { 305 union {
296 u16 hword; 306 __be16 hword;
297 u32 word; 307 __be32 word;
298 } length; 308 } length;
299}; 309};
300 310
@@ -333,9 +343,9 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
333 if (va == NULL) 343 if (va == NULL)
334 return H_PARAMETER; 344 return H_PARAMETER;
335 if (subfunc == H_VPA_REG_VPA) 345 if (subfunc == H_VPA_REG_VPA)
336 len = ((struct reg_vpa *)va)->length.hword; 346 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
337 else 347 else
338 len = ((struct reg_vpa *)va)->length.word; 348 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
339 kvmppc_unpin_guest_page(kvm, va, vpa, false); 349 kvmppc_unpin_guest_page(kvm, va, vpa, false);
340 350
341 /* Check length */ 351 /* Check length */
@@ -540,21 +550,63 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
540 return; 550 return;
541 memset(dt, 0, sizeof(struct dtl_entry)); 551 memset(dt, 0, sizeof(struct dtl_entry));
542 dt->dispatch_reason = 7; 552 dt->dispatch_reason = 7;
543 dt->processor_id = vc->pcpu + vcpu->arch.ptid; 553 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
544 dt->timebase = now + vc->tb_offset; 554 dt->timebase = cpu_to_be64(now + vc->tb_offset);
545 dt->enqueue_to_dispatch_time = stolen; 555 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
546 dt->srr0 = kvmppc_get_pc(vcpu); 556 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
547 dt->srr1 = vcpu->arch.shregs.msr; 557 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
548 ++dt; 558 ++dt;
549 if (dt == vcpu->arch.dtl.pinned_end) 559 if (dt == vcpu->arch.dtl.pinned_end)
550 dt = vcpu->arch.dtl.pinned_addr; 560 dt = vcpu->arch.dtl.pinned_addr;
551 vcpu->arch.dtl_ptr = dt; 561 vcpu->arch.dtl_ptr = dt;
552 /* order writing *dt vs. writing vpa->dtl_idx */ 562 /* order writing *dt vs. writing vpa->dtl_idx */
553 smp_wmb(); 563 smp_wmb();
554 vpa->dtl_idx = ++vcpu->arch.dtl_index; 564 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
555 vcpu->arch.dtl.dirty = true; 565 vcpu->arch.dtl.dirty = true;
556} 566}
557 567
568static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
569{
570 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
571 return true;
572 if ((!vcpu->arch.vcore->arch_compat) &&
573 cpu_has_feature(CPU_FTR_ARCH_207S))
574 return true;
575 return false;
576}
577
578static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
579 unsigned long resource, unsigned long value1,
580 unsigned long value2)
581{
582 switch (resource) {
583 case H_SET_MODE_RESOURCE_SET_CIABR:
584 if (!kvmppc_power8_compatible(vcpu))
585 return H_P2;
586 if (value2)
587 return H_P4;
588 if (mflags)
589 return H_UNSUPPORTED_FLAG_START;
590 /* Guests can't breakpoint the hypervisor */
591 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
592 return H_P3;
593 vcpu->arch.ciabr = value1;
594 return H_SUCCESS;
595 case H_SET_MODE_RESOURCE_SET_DAWR:
596 if (!kvmppc_power8_compatible(vcpu))
597 return H_P2;
598 if (mflags)
599 return H_UNSUPPORTED_FLAG_START;
600 if (value2 & DABRX_HYP)
601 return H_P4;
602 vcpu->arch.dawr = value1;
603 vcpu->arch.dawrx = value2;
604 return H_SUCCESS;
605 default:
606 return H_TOO_HARD;
607 }
608}
609
558int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) 610int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
559{ 611{
560 unsigned long req = kvmppc_get_gpr(vcpu, 3); 612 unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -562,6 +614,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
562 struct kvm_vcpu *tvcpu; 614 struct kvm_vcpu *tvcpu;
563 int idx, rc; 615 int idx, rc;
564 616
617 if (req <= MAX_HCALL_OPCODE &&
618 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
619 return RESUME_HOST;
620
565 switch (req) { 621 switch (req) {
566 case H_ENTER: 622 case H_ENTER:
567 idx = srcu_read_lock(&vcpu->kvm->srcu); 623 idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -620,7 +676,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
620 676
621 /* Send the error out to userspace via KVM_RUN */ 677 /* Send the error out to userspace via KVM_RUN */
622 return rc; 678 return rc;
623 679 case H_SET_MODE:
680 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
681 kvmppc_get_gpr(vcpu, 5),
682 kvmppc_get_gpr(vcpu, 6),
683 kvmppc_get_gpr(vcpu, 7));
684 if (ret == H_TOO_HARD)
685 return RESUME_HOST;
686 break;
624 case H_XIRR: 687 case H_XIRR:
625 case H_CPPR: 688 case H_CPPR:
626 case H_EOI: 689 case H_EOI:
@@ -639,6 +702,29 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
639 return RESUME_GUEST; 702 return RESUME_GUEST;
640} 703}
641 704
705static int kvmppc_hcall_impl_hv(unsigned long cmd)
706{
707 switch (cmd) {
708 case H_CEDE:
709 case H_PROD:
710 case H_CONFER:
711 case H_REGISTER_VPA:
712 case H_SET_MODE:
713#ifdef CONFIG_KVM_XICS
714 case H_XIRR:
715 case H_CPPR:
716 case H_EOI:
717 case H_IPI:
718 case H_IPOLL:
719 case H_XIRR_X:
720#endif
721 return 1;
722 }
723
724 /* See if it's in the real-mode table */
725 return kvmppc_hcall_impl_hv_realmode(cmd);
726}
727
642static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 728static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
643 struct task_struct *tsk) 729 struct task_struct *tsk)
644{ 730{
@@ -785,7 +871,8 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
785 return 0; 871 return 0;
786} 872}
787 873
788static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) 874static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
875 bool preserve_top32)
789{ 876{
790 struct kvmppc_vcore *vc = vcpu->arch.vcore; 877 struct kvmppc_vcore *vc = vcpu->arch.vcore;
791 u64 mask; 878 u64 mask;
@@ -820,6 +907,10 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
820 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; 907 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
821 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 908 if (cpu_has_feature(CPU_FTR_ARCH_207S))
822 mask |= LPCR_AIL; 909 mask |= LPCR_AIL;
910
911 /* Broken 32-bit version of LPCR must not clear top bits */
912 if (preserve_top32)
913 mask &= 0xFFFFFFFF;
823 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 914 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
824 spin_unlock(&vc->lock); 915 spin_unlock(&vc->lock);
825} 916}
@@ -894,12 +985,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
894 case KVM_REG_PPC_CIABR: 985 case KVM_REG_PPC_CIABR:
895 *val = get_reg_val(id, vcpu->arch.ciabr); 986 *val = get_reg_val(id, vcpu->arch.ciabr);
896 break; 987 break;
897 case KVM_REG_PPC_IC:
898 *val = get_reg_val(id, vcpu->arch.ic);
899 break;
900 case KVM_REG_PPC_VTB:
901 *val = get_reg_val(id, vcpu->arch.vtb);
902 break;
903 case KVM_REG_PPC_CSIGR: 988 case KVM_REG_PPC_CSIGR:
904 *val = get_reg_val(id, vcpu->arch.csigr); 989 *val = get_reg_val(id, vcpu->arch.csigr);
905 break; 990 break;
@@ -939,6 +1024,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
939 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); 1024 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
940 break; 1025 break;
941 case KVM_REG_PPC_LPCR: 1026 case KVM_REG_PPC_LPCR:
1027 case KVM_REG_PPC_LPCR_64:
942 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); 1028 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
943 break; 1029 break;
944 case KVM_REG_PPC_PPR: 1030 case KVM_REG_PPC_PPR:
@@ -1094,12 +1180,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1094 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) 1180 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1095 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ 1181 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1096 break; 1182 break;
1097 case KVM_REG_PPC_IC:
1098 vcpu->arch.ic = set_reg_val(id, *val);
1099 break;
1100 case KVM_REG_PPC_VTB:
1101 vcpu->arch.vtb = set_reg_val(id, *val);
1102 break;
1103 case KVM_REG_PPC_CSIGR: 1183 case KVM_REG_PPC_CSIGR:
1104 vcpu->arch.csigr = set_reg_val(id, *val); 1184 vcpu->arch.csigr = set_reg_val(id, *val);
1105 break; 1185 break;
@@ -1150,7 +1230,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1150 ALIGN(set_reg_val(id, *val), 1UL << 24); 1230 ALIGN(set_reg_val(id, *val), 1UL << 24);
1151 break; 1231 break;
1152 case KVM_REG_PPC_LPCR: 1232 case KVM_REG_PPC_LPCR:
1153 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); 1233 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1234 break;
1235 case KVM_REG_PPC_LPCR_64:
1236 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1154 break; 1237 break;
1155 case KVM_REG_PPC_PPR: 1238 case KVM_REG_PPC_PPR:
1156 vcpu->arch.ppr = set_reg_val(id, *val); 1239 vcpu->arch.ppr = set_reg_val(id, *val);
@@ -1228,6 +1311,33 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1228 return r; 1311 return r;
1229} 1312}
1230 1313
1314static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1315{
1316 struct kvmppc_vcore *vcore;
1317
1318 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1319
1320 if (vcore == NULL)
1321 return NULL;
1322
1323 INIT_LIST_HEAD(&vcore->runnable_threads);
1324 spin_lock_init(&vcore->lock);
1325 init_waitqueue_head(&vcore->wq);
1326 vcore->preempt_tb = TB_NIL;
1327 vcore->lpcr = kvm->arch.lpcr;
1328 vcore->first_vcpuid = core * threads_per_subcore;
1329 vcore->kvm = kvm;
1330
1331 vcore->mpp_buffer_is_valid = false;
1332
1333 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1334 vcore->mpp_buffer = (void *)__get_free_pages(
1335 GFP_KERNEL|__GFP_ZERO,
1336 MPP_BUFFER_ORDER);
1337
1338 return vcore;
1339}
1340
1231static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, 1341static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1232 unsigned int id) 1342 unsigned int id)
1233{ 1343{
@@ -1279,16 +1389,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1279 mutex_lock(&kvm->lock); 1389 mutex_lock(&kvm->lock);
1280 vcore = kvm->arch.vcores[core]; 1390 vcore = kvm->arch.vcores[core];
1281 if (!vcore) { 1391 if (!vcore) {
1282 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); 1392 vcore = kvmppc_vcore_create(kvm, core);
1283 if (vcore) {
1284 INIT_LIST_HEAD(&vcore->runnable_threads);
1285 spin_lock_init(&vcore->lock);
1286 init_waitqueue_head(&vcore->wq);
1287 vcore->preempt_tb = TB_NIL;
1288 vcore->lpcr = kvm->arch.lpcr;
1289 vcore->first_vcpuid = core * threads_per_subcore;
1290 vcore->kvm = kvm;
1291 }
1292 kvm->arch.vcores[core] = vcore; 1393 kvm->arch.vcores[core] = vcore;
1293 kvm->arch.online_vcores++; 1394 kvm->arch.online_vcores++;
1294 } 1395 }
@@ -1500,6 +1601,33 @@ static int on_primary_thread(void)
1500 return 1; 1601 return 1;
1501} 1602}
1502 1603
1604static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1605{
1606 phys_addr_t phy_addr, mpp_addr;
1607
1608 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1609 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1610
1611 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1612 logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1613
1614 vc->mpp_buffer_is_valid = true;
1615}
1616
1617static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1618{
1619 phys_addr_t phy_addr, mpp_addr;
1620
1621 phy_addr = virt_to_phys(vc->mpp_buffer);
1622 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1623
1624 /* We must abort any in-progress save operations to ensure
1625 * the table is valid so that prefetch engine knows when to
1626 * stop prefetching. */
1627 logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1628 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1629}
1630
1503/* 1631/*
1504 * Run a set of guest threads on a physical core. 1632 * Run a set of guest threads on a physical core.
1505 * Called with vc->lock held. 1633 * Called with vc->lock held.
@@ -1577,9 +1705,16 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1577 1705
1578 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 1706 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
1579 1707
1708 if (vc->mpp_buffer_is_valid)
1709 kvmppc_start_restoring_l2_cache(vc);
1710
1580 __kvmppc_vcore_entry(); 1711 __kvmppc_vcore_entry();
1581 1712
1582 spin_lock(&vc->lock); 1713 spin_lock(&vc->lock);
1714
1715 if (vc->mpp_buffer)
1716 kvmppc_start_saving_l2_cache(vc);
1717
1583 /* disable sending of IPIs on virtual external irqs */ 1718 /* disable sending of IPIs on virtual external irqs */
1584 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1719 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1585 vcpu->cpu = -1; 1720 vcpu->cpu = -1;
@@ -1929,12 +2064,6 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1929 (*sps)->page_shift = def->shift; 2064 (*sps)->page_shift = def->shift;
1930 (*sps)->slb_enc = def->sllp; 2065 (*sps)->slb_enc = def->sllp;
1931 (*sps)->enc[0].page_shift = def->shift; 2066 (*sps)->enc[0].page_shift = def->shift;
1932 /*
1933 * Only return base page encoding. We don't want to return
1934 * all the supporting pte_enc, because our H_ENTER doesn't
1935 * support MPSS yet. Once they do, we can start passing all
1936 * support pte_enc here
1937 */
1938 (*sps)->enc[0].pte_enc = def->penc[linux_psize]; 2067 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
1939 /* 2068 /*
1940 * Add 16MB MPSS support if host supports it 2069 * Add 16MB MPSS support if host supports it
@@ -2281,6 +2410,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2281 */ 2410 */
2282 cpumask_setall(&kvm->arch.need_tlb_flush); 2411 cpumask_setall(&kvm->arch.need_tlb_flush);
2283 2412
2413 /* Start out with the default set of hcalls enabled */
2414 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
2415 sizeof(kvm->arch.enabled_hcalls));
2416
2284 kvm->arch.rma = NULL; 2417 kvm->arch.rma = NULL;
2285 2418
2286 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 2419 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -2323,8 +2456,14 @@ static void kvmppc_free_vcores(struct kvm *kvm)
2323{ 2456{
2324 long int i; 2457 long int i;
2325 2458
2326 for (i = 0; i < KVM_MAX_VCORES; ++i) 2459 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2460 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
2461 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2462 free_pages((unsigned long)vc->mpp_buffer,
2463 MPP_BUFFER_ORDER);
2464 }
2327 kfree(kvm->arch.vcores[i]); 2465 kfree(kvm->arch.vcores[i]);
2466 }
2328 kvm->arch.online_vcores = 0; 2467 kvm->arch.online_vcores = 0;
2329} 2468}
2330 2469
@@ -2419,6 +2558,49 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
2419 return r; 2558 return r;
2420} 2559}
2421 2560
2561/*
2562 * List of hcall numbers to enable by default.
2563 * For compatibility with old userspace, we enable by default
2564 * all hcalls that were implemented before the hcall-enabling
2565 * facility was added. Note this list should not include H_RTAS.
2566 */
2567static unsigned int default_hcall_list[] = {
2568 H_REMOVE,
2569 H_ENTER,
2570 H_READ,
2571 H_PROTECT,
2572 H_BULK_REMOVE,
2573 H_GET_TCE,
2574 H_PUT_TCE,
2575 H_SET_DABR,
2576 H_SET_XDABR,
2577 H_CEDE,
2578 H_PROD,
2579 H_CONFER,
2580 H_REGISTER_VPA,
2581#ifdef CONFIG_KVM_XICS
2582 H_EOI,
2583 H_CPPR,
2584 H_IPI,
2585 H_IPOLL,
2586 H_XIRR,
2587 H_XIRR_X,
2588#endif
2589 0
2590};
2591
2592static void init_default_hcalls(void)
2593{
2594 int i;
2595 unsigned int hcall;
2596
2597 for (i = 0; default_hcall_list[i]; ++i) {
2598 hcall = default_hcall_list[i];
2599 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
2600 __set_bit(hcall / 4, default_enabled_hcalls);
2601 }
2602}
2603
2422static struct kvmppc_ops kvm_ops_hv = { 2604static struct kvmppc_ops kvm_ops_hv = {
2423 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, 2605 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2424 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, 2606 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -2451,6 +2633,7 @@ static struct kvmppc_ops kvm_ops_hv = {
2451 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, 2633 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2452 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, 2634 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2453 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, 2635 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
2636 .hcall_implemented = kvmppc_hcall_impl_hv,
2454}; 2637};
2455 2638
2456static int kvmppc_book3s_init_hv(void) 2639static int kvmppc_book3s_init_hv(void)
@@ -2466,6 +2649,8 @@ static int kvmppc_book3s_init_hv(void)
2466 kvm_ops_hv.owner = THIS_MODULE; 2649 kvm_ops_hv.owner = THIS_MODULE;
2467 kvmppc_hv_ops = &kvm_ops_hv; 2650 kvmppc_hv_ops = &kvm_ops_hv;
2468 2651
2652 init_default_hcalls();
2653
2469 r = kvmppc_mmu_hv_init(); 2654 r = kvmppc_mmu_hv_init();
2470 return r; 2655 return r;
2471} 2656}
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 6cf498a9bc98..329d7fdd0a6a 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -219,3 +219,16 @@ bool kvm_hv_mode_active(void)
219{ 219{
220 return atomic_read(&hv_vm_count) != 0; 220 return atomic_read(&hv_vm_count) != 0;
221} 221}
222
223extern int hcall_real_table[], hcall_real_table_end[];
224
225int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
226{
227 cmd /= 4;
228 if (cmd < hcall_real_table_end - hcall_real_table &&
229 hcall_real_table[cmd])
230 return 1;
231
232 return 0;
233}
234EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 3a5c568b1e89..d562c8e2bc30 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -45,14 +45,14 @@ static void reload_slb(struct kvm_vcpu *vcpu)
45 return; 45 return;
46 46
47 /* Sanity check */ 47 /* Sanity check */
48 n = min_t(u32, slb->persistent, SLB_MIN_SIZE); 48 n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) 49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
50 return; 50 return;
51 51
52 /* Load up the SLB from that */ 52 /* Load up the SLB from that */
53 for (i = 0; i < n; ++i) { 53 for (i = 0; i < n; ++i) {
54 unsigned long rb = slb->save_area[i].esid; 54 unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
55 unsigned long rs = slb->save_area[i].vsid; 55 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
56 56
57 rb = (rb & ~0xFFFul) | i; /* insert entry number */ 57 rb = (rb & ~0xFFFul) | i; /* insert entry number */
58 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); 58 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 5a24d3c2b6b8..084ad54c73cd 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -154,10 +154,10 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
154 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); 154 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
155} 155}
156 156
157static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) 157static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
158{ 158{
159 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 159 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
160 hpte[0] = hpte_v; 160 hpte[0] = cpu_to_be64(hpte_v);
161} 161}
162 162
163long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 163long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
@@ -166,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
166{ 166{
167 unsigned long i, pa, gpa, gfn, psize; 167 unsigned long i, pa, gpa, gfn, psize;
168 unsigned long slot_fn, hva; 168 unsigned long slot_fn, hva;
169 unsigned long *hpte; 169 __be64 *hpte;
170 struct revmap_entry *rev; 170 struct revmap_entry *rev;
171 unsigned long g_ptel; 171 unsigned long g_ptel;
172 struct kvm_memory_slot *memslot; 172 struct kvm_memory_slot *memslot;
@@ -275,9 +275,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
275 return H_PARAMETER; 275 return H_PARAMETER;
276 if (likely((flags & H_EXACT) == 0)) { 276 if (likely((flags & H_EXACT) == 0)) {
277 pte_index &= ~7UL; 277 pte_index &= ~7UL;
278 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 278 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
279 for (i = 0; i < 8; ++i) { 279 for (i = 0; i < 8; ++i) {
280 if ((*hpte & HPTE_V_VALID) == 0 && 280 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
281 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 281 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
282 HPTE_V_ABSENT)) 282 HPTE_V_ABSENT))
283 break; 283 break;
@@ -292,11 +292,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
292 */ 292 */
293 hpte -= 16; 293 hpte -= 16;
294 for (i = 0; i < 8; ++i) { 294 for (i = 0; i < 8; ++i) {
295 u64 pte;
295 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 296 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
296 cpu_relax(); 297 cpu_relax();
297 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT))) 298 pte = be64_to_cpu(*hpte);
299 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
298 break; 300 break;
299 *hpte &= ~HPTE_V_HVLOCK; 301 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
300 hpte += 2; 302 hpte += 2;
301 } 303 }
302 if (i == 8) 304 if (i == 8)
@@ -304,14 +306,17 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
304 } 306 }
305 pte_index += i; 307 pte_index += i;
306 } else { 308 } else {
307 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 309 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
308 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 310 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
309 HPTE_V_ABSENT)) { 311 HPTE_V_ABSENT)) {
310 /* Lock the slot and check again */ 312 /* Lock the slot and check again */
313 u64 pte;
314
311 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 315 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
312 cpu_relax(); 316 cpu_relax();
313 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { 317 pte = be64_to_cpu(*hpte);
314 *hpte &= ~HPTE_V_HVLOCK; 318 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
319 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
315 return H_PTEG_FULL; 320 return H_PTEG_FULL;
316 } 321 }
317 } 322 }
@@ -347,11 +352,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
347 } 352 }
348 } 353 }
349 354
350 hpte[1] = ptel; 355 hpte[1] = cpu_to_be64(ptel);
351 356
352 /* Write the first HPTE dword, unlocking the HPTE and making it valid */ 357 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
353 eieio(); 358 eieio();
354 hpte[0] = pteh; 359 hpte[0] = cpu_to_be64(pteh);
355 asm volatile("ptesync" : : : "memory"); 360 asm volatile("ptesync" : : : "memory");
356 361
357 *pte_idx_ret = pte_index; 362 *pte_idx_ret = pte_index;
@@ -468,30 +473,35 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
468 unsigned long pte_index, unsigned long avpn, 473 unsigned long pte_index, unsigned long avpn,
469 unsigned long *hpret) 474 unsigned long *hpret)
470{ 475{
471 unsigned long *hpte; 476 __be64 *hpte;
472 unsigned long v, r, rb; 477 unsigned long v, r, rb;
473 struct revmap_entry *rev; 478 struct revmap_entry *rev;
479 u64 pte;
474 480
475 if (pte_index >= kvm->arch.hpt_npte) 481 if (pte_index >= kvm->arch.hpt_npte)
476 return H_PARAMETER; 482 return H_PARAMETER;
477 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 483 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
478 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 484 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
479 cpu_relax(); 485 cpu_relax();
480 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 486 pte = be64_to_cpu(hpte[0]);
481 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || 487 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
482 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { 488 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
483 hpte[0] &= ~HPTE_V_HVLOCK; 489 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
490 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
484 return H_NOT_FOUND; 491 return H_NOT_FOUND;
485 } 492 }
486 493
487 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 494 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
488 v = hpte[0] & ~HPTE_V_HVLOCK; 495 v = pte & ~HPTE_V_HVLOCK;
489 if (v & HPTE_V_VALID) { 496 if (v & HPTE_V_VALID) {
490 hpte[0] &= ~HPTE_V_VALID; 497 u64 pte1;
491 rb = compute_tlbie_rb(v, hpte[1], pte_index); 498
499 pte1 = be64_to_cpu(hpte[1]);
500 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
501 rb = compute_tlbie_rb(v, pte1, pte_index);
492 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); 502 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
493 /* Read PTE low word after tlbie to get final R/C values */ 503 /* Read PTE low word after tlbie to get final R/C values */
494 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); 504 remove_revmap_chain(kvm, pte_index, rev, v, pte1);
495 } 505 }
496 r = rev->guest_rpte & ~HPTE_GR_RESERVED; 506 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
497 note_hpte_modification(kvm, rev); 507 note_hpte_modification(kvm, rev);
@@ -514,12 +524,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
514{ 524{
515 struct kvm *kvm = vcpu->kvm; 525 struct kvm *kvm = vcpu->kvm;
516 unsigned long *args = &vcpu->arch.gpr[4]; 526 unsigned long *args = &vcpu->arch.gpr[4];
517 unsigned long *hp, *hptes[4], tlbrb[4]; 527 __be64 *hp, *hptes[4];
528 unsigned long tlbrb[4];
518 long int i, j, k, n, found, indexes[4]; 529 long int i, j, k, n, found, indexes[4];
519 unsigned long flags, req, pte_index, rcbits; 530 unsigned long flags, req, pte_index, rcbits;
520 int global; 531 int global;
521 long int ret = H_SUCCESS; 532 long int ret = H_SUCCESS;
522 struct revmap_entry *rev, *revs[4]; 533 struct revmap_entry *rev, *revs[4];
534 u64 hp0;
523 535
524 global = global_invalidates(kvm, 0); 536 global = global_invalidates(kvm, 0);
525 for (i = 0; i < 4 && ret == H_SUCCESS; ) { 537 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
@@ -542,8 +554,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
542 ret = H_PARAMETER; 554 ret = H_PARAMETER;
543 break; 555 break;
544 } 556 }
545 hp = (unsigned long *) 557 hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
546 (kvm->arch.hpt_virt + (pte_index << 4));
547 /* to avoid deadlock, don't spin except for first */ 558 /* to avoid deadlock, don't spin except for first */
548 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { 559 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
549 if (n) 560 if (n)
@@ -552,23 +563,24 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
552 cpu_relax(); 563 cpu_relax();
553 } 564 }
554 found = 0; 565 found = 0;
555 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) { 566 hp0 = be64_to_cpu(hp[0]);
567 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
556 switch (flags & 3) { 568 switch (flags & 3) {
557 case 0: /* absolute */ 569 case 0: /* absolute */
558 found = 1; 570 found = 1;
559 break; 571 break;
560 case 1: /* andcond */ 572 case 1: /* andcond */
561 if (!(hp[0] & args[j + 1])) 573 if (!(hp0 & args[j + 1]))
562 found = 1; 574 found = 1;
563 break; 575 break;
564 case 2: /* AVPN */ 576 case 2: /* AVPN */
565 if ((hp[0] & ~0x7fUL) == args[j + 1]) 577 if ((hp0 & ~0x7fUL) == args[j + 1])
566 found = 1; 578 found = 1;
567 break; 579 break;
568 } 580 }
569 } 581 }
570 if (!found) { 582 if (!found) {
571 hp[0] &= ~HPTE_V_HVLOCK; 583 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
572 args[j] = ((0x90 | flags) << 56) + pte_index; 584 args[j] = ((0x90 | flags) << 56) + pte_index;
573 continue; 585 continue;
574 } 586 }
@@ -577,7 +589,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
577 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 589 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
578 note_hpte_modification(kvm, rev); 590 note_hpte_modification(kvm, rev);
579 591
580 if (!(hp[0] & HPTE_V_VALID)) { 592 if (!(hp0 & HPTE_V_VALID)) {
581 /* insert R and C bits from PTE */ 593 /* insert R and C bits from PTE */
582 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 594 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
583 args[j] |= rcbits << (56 - 5); 595 args[j] |= rcbits << (56 - 5);
@@ -585,8 +597,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
585 continue; 597 continue;
586 } 598 }
587 599
588 hp[0] &= ~HPTE_V_VALID; /* leave it locked */ 600 /* leave it locked */
589 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index); 601 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
602 tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
603 be64_to_cpu(hp[1]), pte_index);
590 indexes[n] = j; 604 indexes[n] = j;
591 hptes[n] = hp; 605 hptes[n] = hp;
592 revs[n] = rev; 606 revs[n] = rev;
@@ -605,7 +619,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
605 pte_index = args[j] & ((1ul << 56) - 1); 619 pte_index = args[j] & ((1ul << 56) - 1);
606 hp = hptes[k]; 620 hp = hptes[k];
607 rev = revs[k]; 621 rev = revs[k];
608 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]); 622 remove_revmap_chain(kvm, pte_index, rev,
623 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
609 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 624 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
610 args[j] |= rcbits << (56 - 5); 625 args[j] |= rcbits << (56 - 5);
611 hp[0] = 0; 626 hp[0] = 0;
@@ -620,23 +635,25 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
620 unsigned long va) 635 unsigned long va)
621{ 636{
622 struct kvm *kvm = vcpu->kvm; 637 struct kvm *kvm = vcpu->kvm;
623 unsigned long *hpte; 638 __be64 *hpte;
624 struct revmap_entry *rev; 639 struct revmap_entry *rev;
625 unsigned long v, r, rb, mask, bits; 640 unsigned long v, r, rb, mask, bits;
641 u64 pte;
626 642
627 if (pte_index >= kvm->arch.hpt_npte) 643 if (pte_index >= kvm->arch.hpt_npte)
628 return H_PARAMETER; 644 return H_PARAMETER;
629 645
630 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 646 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
631 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 647 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
632 cpu_relax(); 648 cpu_relax();
633 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 649 pte = be64_to_cpu(hpte[0]);
634 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { 650 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
635 hpte[0] &= ~HPTE_V_HVLOCK; 651 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
652 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
636 return H_NOT_FOUND; 653 return H_NOT_FOUND;
637 } 654 }
638 655
639 v = hpte[0]; 656 v = pte;
640 bits = (flags << 55) & HPTE_R_PP0; 657 bits = (flags << 55) & HPTE_R_PP0;
641 bits |= (flags << 48) & HPTE_R_KEY_HI; 658 bits |= (flags << 48) & HPTE_R_KEY_HI;
642 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); 659 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
@@ -650,12 +667,12 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
650 rev->guest_rpte = r; 667 rev->guest_rpte = r;
651 note_hpte_modification(kvm, rev); 668 note_hpte_modification(kvm, rev);
652 } 669 }
653 r = (hpte[1] & ~mask) | bits; 670 r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
654 671
655 /* Update HPTE */ 672 /* Update HPTE */
656 if (v & HPTE_V_VALID) { 673 if (v & HPTE_V_VALID) {
657 rb = compute_tlbie_rb(v, r, pte_index); 674 rb = compute_tlbie_rb(v, r, pte_index);
658 hpte[0] = v & ~HPTE_V_VALID; 675 hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
659 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); 676 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
660 /* 677 /*
661 * If the host has this page as readonly but the guest 678 * If the host has this page as readonly but the guest
@@ -681,9 +698,9 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
681 } 698 }
682 } 699 }
683 } 700 }
684 hpte[1] = r; 701 hpte[1] = cpu_to_be64(r);
685 eieio(); 702 eieio();
686 hpte[0] = v & ~HPTE_V_HVLOCK; 703 hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
687 asm volatile("ptesync" : : : "memory"); 704 asm volatile("ptesync" : : : "memory");
688 return H_SUCCESS; 705 return H_SUCCESS;
689} 706}
@@ -692,7 +709,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
692 unsigned long pte_index) 709 unsigned long pte_index)
693{ 710{
694 struct kvm *kvm = vcpu->kvm; 711 struct kvm *kvm = vcpu->kvm;
695 unsigned long *hpte, v, r; 712 __be64 *hpte;
713 unsigned long v, r;
696 int i, n = 1; 714 int i, n = 1;
697 struct revmap_entry *rev = NULL; 715 struct revmap_entry *rev = NULL;
698 716
@@ -704,9 +722,9 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
704 } 722 }
705 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 723 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
706 for (i = 0; i < n; ++i, ++pte_index) { 724 for (i = 0; i < n; ++i, ++pte_index) {
707 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 725 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
708 v = hpte[0] & ~HPTE_V_HVLOCK; 726 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
709 r = hpte[1]; 727 r = be64_to_cpu(hpte[1]);
710 if (v & HPTE_V_ABSENT) { 728 if (v & HPTE_V_ABSENT) {
711 v &= ~HPTE_V_ABSENT; 729 v &= ~HPTE_V_ABSENT;
712 v |= HPTE_V_VALID; 730 v |= HPTE_V_VALID;
@@ -721,25 +739,27 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
721 return H_SUCCESS; 739 return H_SUCCESS;
722} 740}
723 741
724void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 742void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
725 unsigned long pte_index) 743 unsigned long pte_index)
726{ 744{
727 unsigned long rb; 745 unsigned long rb;
728 746
729 hptep[0] &= ~HPTE_V_VALID; 747 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
730 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 748 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
749 pte_index);
731 do_tlbies(kvm, &rb, 1, 1, true); 750 do_tlbies(kvm, &rb, 1, 1, true);
732} 751}
733EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); 752EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
734 753
735void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, 754void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
736 unsigned long pte_index) 755 unsigned long pte_index)
737{ 756{
738 unsigned long rb; 757 unsigned long rb;
739 unsigned char rbyte; 758 unsigned char rbyte;
740 759
741 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 760 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
742 rbyte = (hptep[1] & ~HPTE_R_R) >> 8; 761 pte_index);
762 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
743 /* modify only the second-last byte, which contains the ref bit */ 763 /* modify only the second-last byte, which contains the ref bit */
744 *((char *)hptep + 14) = rbyte; 764 *((char *)hptep + 14) = rbyte;
745 do_tlbies(kvm, &rb, 1, 1, false); 765 do_tlbies(kvm, &rb, 1, 1, false);
@@ -765,7 +785,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
765 unsigned long somask; 785 unsigned long somask;
766 unsigned long vsid, hash; 786 unsigned long vsid, hash;
767 unsigned long avpn; 787 unsigned long avpn;
768 unsigned long *hpte; 788 __be64 *hpte;
769 unsigned long mask, val; 789 unsigned long mask, val;
770 unsigned long v, r; 790 unsigned long v, r;
771 791
@@ -797,11 +817,11 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
797 val |= avpn; 817 val |= avpn;
798 818
799 for (;;) { 819 for (;;) {
800 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7)); 820 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
801 821
802 for (i = 0; i < 16; i += 2) { 822 for (i = 0; i < 16; i += 2) {
803 /* Read the PTE racily */ 823 /* Read the PTE racily */
804 v = hpte[i] & ~HPTE_V_HVLOCK; 824 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
805 825
806 /* Check valid/absent, hash, segment size and AVPN */ 826 /* Check valid/absent, hash, segment size and AVPN */
807 if (!(v & valid) || (v & mask) != val) 827 if (!(v & valid) || (v & mask) != val)
@@ -810,8 +830,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
810 /* Lock the PTE and read it under the lock */ 830 /* Lock the PTE and read it under the lock */
811 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) 831 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
812 cpu_relax(); 832 cpu_relax();
813 v = hpte[i] & ~HPTE_V_HVLOCK; 833 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
814 r = hpte[i+1]; 834 r = be64_to_cpu(hpte[i+1]);
815 835
816 /* 836 /*
817 * Check the HPTE again, including base page size 837 * Check the HPTE again, including base page size
@@ -822,7 +842,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
822 return (hash << 3) + (i >> 1); 842 return (hash << 3) + (i >> 1);
823 843
824 /* Unlock and move on */ 844 /* Unlock and move on */
825 hpte[i] = v; 845 hpte[i] = cpu_to_be64(v);
826 } 846 }
827 847
828 if (val & HPTE_V_SECONDARY) 848 if (val & HPTE_V_SECONDARY)
@@ -851,7 +871,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
851 struct kvm *kvm = vcpu->kvm; 871 struct kvm *kvm = vcpu->kvm;
852 long int index; 872 long int index;
853 unsigned long v, r, gr; 873 unsigned long v, r, gr;
854 unsigned long *hpte; 874 __be64 *hpte;
855 unsigned long valid; 875 unsigned long valid;
856 struct revmap_entry *rev; 876 struct revmap_entry *rev;
857 unsigned long pp, key; 877 unsigned long pp, key;
@@ -867,9 +887,9 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
867 return status; /* there really was no HPTE */ 887 return status; /* there really was no HPTE */
868 return 0; /* for prot fault, HPTE disappeared */ 888 return 0; /* for prot fault, HPTE disappeared */
869 } 889 }
870 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 890 hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
871 v = hpte[0] & ~HPTE_V_HVLOCK; 891 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
872 r = hpte[1]; 892 r = be64_to_cpu(hpte[1]);
873 rev = real_vmalloc_addr(&kvm->arch.revmap[index]); 893 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
874 gr = rev->guest_rpte; 894 gr = rev->guest_rpte;
875 895
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index b4b0082f761c..3ee38e6e884f 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -401,6 +401,11 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
401 icp->rm_action |= XICS_RM_REJECT; 401 icp->rm_action |= XICS_RM_REJECT;
402 icp->rm_reject = irq; 402 icp->rm_reject = irq;
403 } 403 }
404
405 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
406 icp->rm_action |= XICS_RM_NOTIFY_EOI;
407 icp->rm_eoied_irq = irq;
408 }
404 bail: 409 bail:
405 return check_too_hard(xics, icp); 410 return check_too_hard(xics, icp);
406} 411}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7faf8fd05738..f0c4db7704c3 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,10 +32,6 @@
32 32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34 34
35#ifdef __LITTLE_ENDIAN__
36#error Need to fix lppaca and SLB shadow accesses in little endian mode
37#endif
38
39/* Values in HSTATE_NAPPING(r13) */ 35/* Values in HSTATE_NAPPING(r13) */
40#define NAPPING_CEDE 1 36#define NAPPING_CEDE 1
41#define NAPPING_NOVCPU 2 37#define NAPPING_NOVCPU 2
@@ -601,9 +597,10 @@ kvmppc_got_guest:
601 ld r3, VCPU_VPA(r4) 597 ld r3, VCPU_VPA(r4)
602 cmpdi r3, 0 598 cmpdi r3, 0
603 beq 25f 599 beq 25f
604 lwz r5, LPPACA_YIELDCOUNT(r3) 600 li r6, LPPACA_YIELDCOUNT
601 LWZX_BE r5, r3, r6
605 addi r5, r5, 1 602 addi r5, r5, 1
606 stw r5, LPPACA_YIELDCOUNT(r3) 603 STWX_BE r5, r3, r6
607 li r6, 1 604 li r6, 1
608 stb r6, VCPU_VPA_DIRTY(r4) 605 stb r6, VCPU_VPA_DIRTY(r4)
60925: 60625:
@@ -677,9 +674,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
677 674
678 mr r31, r4 675 mr r31, r4
679 addi r3, r31, VCPU_FPRS_TM 676 addi r3, r31, VCPU_FPRS_TM
680 bl .load_fp_state 677 bl load_fp_state
681 addi r3, r31, VCPU_VRS_TM 678 addi r3, r31, VCPU_VRS_TM
682 bl .load_vr_state 679 bl load_vr_state
683 mr r4, r31 680 mr r4, r31
684 lwz r7, VCPU_VRSAVE_TM(r4) 681 lwz r7, VCPU_VRSAVE_TM(r4)
685 mtspr SPRN_VRSAVE, r7 682 mtspr SPRN_VRSAVE, r7
@@ -1423,9 +1420,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1423 1420
1424 /* Save FP/VSX. */ 1421 /* Save FP/VSX. */
1425 addi r3, r9, VCPU_FPRS_TM 1422 addi r3, r9, VCPU_FPRS_TM
1426 bl .store_fp_state 1423 bl store_fp_state
1427 addi r3, r9, VCPU_VRS_TM 1424 addi r3, r9, VCPU_VRS_TM
1428 bl .store_vr_state 1425 bl store_vr_state
1429 mfspr r6, SPRN_VRSAVE 1426 mfspr r6, SPRN_VRSAVE
1430 stw r6, VCPU_VRSAVE_TM(r9) 1427 stw r6, VCPU_VRSAVE_TM(r9)
14311: 14281:
@@ -1448,9 +1445,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1448 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1445 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1449 cmpdi r8, 0 1446 cmpdi r8, 0
1450 beq 25f 1447 beq 25f
1451 lwz r3, LPPACA_YIELDCOUNT(r8) 1448 li r4, LPPACA_YIELDCOUNT
1449 LWZX_BE r3, r8, r4
1452 addi r3, r3, 1 1450 addi r3, r3, 1
1453 stw r3, LPPACA_YIELDCOUNT(r8) 1451 STWX_BE r3, r8, r4
1454 li r3, 1 1452 li r3, 1
1455 stb r3, VCPU_VPA_DIRTY(r9) 1453 stb r3, VCPU_VPA_DIRTY(r9)
145625: 145425:
@@ -1763,8 +1761,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
176333: ld r8,PACA_SLBSHADOWPTR(r13) 176133: ld r8,PACA_SLBSHADOWPTR(r13)
1764 1762
1765 .rept SLB_NUM_BOLTED 1763 .rept SLB_NUM_BOLTED
1766 ld r5,SLBSHADOW_SAVEAREA(r8) 1764 li r3, SLBSHADOW_SAVEAREA
1767 ld r6,SLBSHADOW_SAVEAREA+8(r8) 1765 LDX_BE r5, r8, r3
1766 addi r3, r3, 8
1767 LDX_BE r6, r8, r3
1768 andis. r7,r5,SLB_ESID_V@h 1768 andis. r7,r5,SLB_ESID_V@h
1769 beq 1f 1769 beq 1f
1770 slbmte r6,r5 1770 slbmte r6,r5
@@ -1915,12 +1915,23 @@ hcall_try_real_mode:
1915 clrrdi r3,r3,2 1915 clrrdi r3,r3,2
1916 cmpldi r3,hcall_real_table_end - hcall_real_table 1916 cmpldi r3,hcall_real_table_end - hcall_real_table
1917 bge guest_exit_cont 1917 bge guest_exit_cont
1918 /* See if this hcall is enabled for in-kernel handling */
1919 ld r4, VCPU_KVM(r9)
1920 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1921 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1922 add r4, r4, r0
1923 ld r0, KVM_ENABLED_HCALLS(r4)
1924 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1925 srd r0, r0, r4
1926 andi. r0, r0, 1
1927 beq guest_exit_cont
1928 /* Get pointer to handler, if any, and call it */
1918 LOAD_REG_ADDR(r4, hcall_real_table) 1929 LOAD_REG_ADDR(r4, hcall_real_table)
1919 lwax r3,r3,r4 1930 lwax r3,r3,r4
1920 cmpwi r3,0 1931 cmpwi r3,0
1921 beq guest_exit_cont 1932 beq guest_exit_cont
1922 add r3,r3,r4 1933 add r12,r3,r4
1923 mtctr r3 1934 mtctr r12
1924 mr r3,r9 /* get vcpu pointer */ 1935 mr r3,r9 /* get vcpu pointer */
1925 ld r4,VCPU_GPR(R4)(r9) 1936 ld r4,VCPU_GPR(R4)(r9)
1926 bctrl 1937 bctrl
@@ -2037,6 +2048,7 @@ hcall_real_table:
2037 .long 0 /* 0x12c */ 2048 .long 0 /* 0x12c */
2038 .long 0 /* 0x130 */ 2049 .long 0 /* 0x130 */
2039 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2050 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2051 .globl hcall_real_table_end
2040hcall_real_table_end: 2052hcall_real_table_end:
2041 2053
2042ignore_hdec: 2054ignore_hdec:
@@ -2344,7 +2356,18 @@ kvmppc_read_intr:
2344 cmpdi r6, 0 2356 cmpdi r6, 0
2345 beq- 1f 2357 beq- 1f
2346 lwzcix r0, r6, r7 2358 lwzcix r0, r6, r7
2347 rlwinm. r3, r0, 0, 0xffffff 2359 /*
2360 * Save XIRR for later. Since we get in in reverse endian on LE
2361 * systems, save it byte reversed and fetch it back in host endian.
2362 */
2363 li r3, HSTATE_SAVED_XIRR
2364 STWX_BE r0, r3, r13
2365#ifdef __LITTLE_ENDIAN__
2366 lwz r3, HSTATE_SAVED_XIRR(r13)
2367#else
2368 mr r3, r0
2369#endif
2370 rlwinm. r3, r3, 0, 0xffffff
2348 sync 2371 sync
2349 beq 1f /* if nothing pending in the ICP */ 2372 beq 1f /* if nothing pending in the ICP */
2350 2373
@@ -2376,10 +2399,9 @@ kvmppc_read_intr:
2376 li r3, -1 2399 li r3, -1
23771: blr 24001: blr
2378 2401
237942: /* It's not an IPI and it's for the host, stash it in the PACA 240242: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2380 * before exit, it will be picked up by the host ICP driver 2403 * the PACA earlier, it will be picked up by the host ICP driver
2381 */ 2404 */
2382 stw r0, HSTATE_SAVED_XIRR(r13)
2383 li r3, 1 2405 li r3, 1
2384 b 1b 2406 b 1b
2385 2407
@@ -2414,11 +2436,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2414 mtmsrd r8 2436 mtmsrd r8
2415 isync 2437 isync
2416 addi r3,r3,VCPU_FPRS 2438 addi r3,r3,VCPU_FPRS
2417 bl .store_fp_state 2439 bl store_fp_state
2418#ifdef CONFIG_ALTIVEC 2440#ifdef CONFIG_ALTIVEC
2419BEGIN_FTR_SECTION 2441BEGIN_FTR_SECTION
2420 addi r3,r31,VCPU_VRS 2442 addi r3,r31,VCPU_VRS
2421 bl .store_vr_state 2443 bl store_vr_state
2422END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2444END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2423#endif 2445#endif
2424 mfspr r6,SPRN_VRSAVE 2446 mfspr r6,SPRN_VRSAVE
@@ -2450,11 +2472,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2450 mtmsrd r8 2472 mtmsrd r8
2451 isync 2473 isync
2452 addi r3,r4,VCPU_FPRS 2474 addi r3,r4,VCPU_FPRS
2453 bl .load_fp_state 2475 bl load_fp_state
2454#ifdef CONFIG_ALTIVEC 2476#ifdef CONFIG_ALTIVEC
2455BEGIN_FTR_SECTION 2477BEGIN_FTR_SECTION
2456 addi r3,r31,VCPU_VRS 2478 addi r3,r31,VCPU_VRS
2457 bl .load_vr_state 2479 bl load_vr_state
2458END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2480END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2459#endif 2481#endif
2460 lwz r7,VCPU_VRSAVE(r31) 2482 lwz r7,VCPU_VRSAVE(r31)
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index 6c8011fd57e6..bfb8035314e3 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -639,26 +639,36 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
639 639
640int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) 640int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
641{ 641{
642 u32 inst = kvmppc_get_last_inst(vcpu); 642 u32 inst;
643 enum emulation_result emulated = EMULATE_DONE; 643 enum emulation_result emulated = EMULATE_DONE;
644 int ax_rd, ax_ra, ax_rb, ax_rc;
645 short full_d;
646 u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c;
644 647
645 int ax_rd = inst_get_field(inst, 6, 10); 648 bool rcomp;
646 int ax_ra = inst_get_field(inst, 11, 15); 649 u32 cr;
647 int ax_rb = inst_get_field(inst, 16, 20);
648 int ax_rc = inst_get_field(inst, 21, 25);
649 short full_d = inst_get_field(inst, 16, 31);
650
651 u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
652 u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
653 u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
654 u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
655
656 bool rcomp = (inst & 1) ? true : false;
657 u32 cr = kvmppc_get_cr(vcpu);
658#ifdef DEBUG 650#ifdef DEBUG
659 int i; 651 int i;
660#endif 652#endif
661 653
654 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
655 if (emulated != EMULATE_DONE)
656 return emulated;
657
658 ax_rd = inst_get_field(inst, 6, 10);
659 ax_ra = inst_get_field(inst, 11, 15);
660 ax_rb = inst_get_field(inst, 16, 20);
661 ax_rc = inst_get_field(inst, 21, 25);
662 full_d = inst_get_field(inst, 16, 31);
663
664 fpr_d = &VCPU_FPR(vcpu, ax_rd);
665 fpr_a = &VCPU_FPR(vcpu, ax_ra);
666 fpr_b = &VCPU_FPR(vcpu, ax_rb);
667 fpr_c = &VCPU_FPR(vcpu, ax_rc);
668
669 rcomp = (inst & 1) ? true : false;
670 cr = kvmppc_get_cr(vcpu);
671
662 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 672 if (!kvmppc_inst_is_paired_single(vcpu, inst))
663 return EMULATE_FAIL; 673 return EMULATE_FAIL;
664 674
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 8eef1e519077..faffb27badd9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -62,6 +62,35 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
62#define HW_PAGE_SIZE PAGE_SIZE 62#define HW_PAGE_SIZE PAGE_SIZE
63#endif 63#endif
64 64
65static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
66{
67 ulong msr = kvmppc_get_msr(vcpu);
68 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
69}
70
71static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
72{
73 ulong msr = kvmppc_get_msr(vcpu);
74 ulong pc = kvmppc_get_pc(vcpu);
75
76 /* We are in DR only split real mode */
77 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
78 return;
79
80 /* We have not fixed up the guest already */
81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
82 return;
83
84 /* The code is in fixupable address space */
85 if (pc & SPLIT_HACK_MASK)
86 return;
87
88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
89 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
90}
91
92void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
93
65static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) 94static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
66{ 95{
67#ifdef CONFIG_PPC_BOOK3S_64 96#ifdef CONFIG_PPC_BOOK3S_64
@@ -71,10 +100,19 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
71 svcpu->in_use = 0; 100 svcpu->in_use = 0;
72 svcpu_put(svcpu); 101 svcpu_put(svcpu);
73#endif 102#endif
103
104 /* Disable AIL if supported */
105 if (cpu_has_feature(CPU_FTR_HVMODE) &&
106 cpu_has_feature(CPU_FTR_ARCH_207S))
107 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
108
74 vcpu->cpu = smp_processor_id(); 109 vcpu->cpu = smp_processor_id();
75#ifdef CONFIG_PPC_BOOK3S_32 110#ifdef CONFIG_PPC_BOOK3S_32
76 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; 111 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
77#endif 112#endif
113
114 if (kvmppc_is_split_real(vcpu))
115 kvmppc_fixup_split_real(vcpu);
78} 116}
79 117
80static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) 118static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
@@ -89,8 +127,17 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
89 svcpu_put(svcpu); 127 svcpu_put(svcpu);
90#endif 128#endif
91 129
130 if (kvmppc_is_split_real(vcpu))
131 kvmppc_unfixup_split_real(vcpu);
132
92 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 133 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
93 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 134 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
135
136 /* Enable AIL if supported */
137 if (cpu_has_feature(CPU_FTR_HVMODE) &&
138 cpu_has_feature(CPU_FTR_ARCH_207S))
139 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
140
94 vcpu->cpu = -1; 141 vcpu->cpu = -1;
95} 142}
96 143
@@ -120,6 +167,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
120#ifdef CONFIG_PPC_BOOK3S_64 167#ifdef CONFIG_PPC_BOOK3S_64
121 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; 168 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
122#endif 169#endif
170 /*
171 * Now also save the current time base value. We use this
172 * to find the guest purr and spurr value.
173 */
174 vcpu->arch.entry_tb = get_tb();
175 vcpu->arch.entry_vtb = get_vtb();
176 if (cpu_has_feature(CPU_FTR_ARCH_207S))
177 vcpu->arch.entry_ic = mfspr(SPRN_IC);
123 svcpu->in_use = true; 178 svcpu->in_use = true;
124} 179}
125 180
@@ -166,6 +221,14 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
166#ifdef CONFIG_PPC_BOOK3S_64 221#ifdef CONFIG_PPC_BOOK3S_64
167 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; 222 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
168#endif 223#endif
224 /*
225 * Update purr and spurr using time base on exit.
226 */
227 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
228 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
229 vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb;
230 if (cpu_has_feature(CPU_FTR_ARCH_207S))
231 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
169 svcpu->in_use = false; 232 svcpu->in_use = false;
170 233
171out: 234out:
@@ -294,6 +357,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
294 } 357 }
295 } 358 }
296 359
360 if (kvmppc_is_split_real(vcpu))
361 kvmppc_fixup_split_real(vcpu);
362 else
363 kvmppc_unfixup_split_real(vcpu);
364
297 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != 365 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
298 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 366 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
299 kvmppc_mmu_flush_segments(vcpu); 367 kvmppc_mmu_flush_segments(vcpu);
@@ -443,19 +511,19 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
443 put_page(hpage); 511 put_page(hpage);
444} 512}
445 513
446static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 514static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
447{ 515{
448 ulong mp_pa = vcpu->arch.magic_page_pa; 516 ulong mp_pa = vcpu->arch.magic_page_pa;
449 517
450 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 518 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
451 mp_pa = (uint32_t)mp_pa; 519 mp_pa = (uint32_t)mp_pa;
452 520
453 if (unlikely(mp_pa) && 521 gpa &= ~0xFFFULL;
454 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { 522 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
455 return 1; 523 return 1;
456 } 524 }
457 525
458 return kvm_is_visible_gfn(vcpu->kvm, gfn); 526 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
459} 527}
460 528
461int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, 529int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -494,6 +562,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
494 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 562 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
495 break; 563 break;
496 case MSR_DR: 564 case MSR_DR:
565 if (!data &&
566 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
567 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
568 pte.raddr &= ~SPLIT_HACK_MASK;
569 /* fall through */
497 case MSR_IR: 570 case MSR_IR:
498 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 571 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
499 572
@@ -541,7 +614,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
541 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 614 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
542 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 615 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
543 } else if (!is_mmio && 616 } else if (!is_mmio &&
544 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 617 kvmppc_visible_gpa(vcpu, pte.raddr)) {
545 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { 618 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
546 /* 619 /*
547 * There is already a host HPTE there, presumably 620 * There is already a host HPTE there, presumably
@@ -637,42 +710,6 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
637#endif 710#endif
638} 711}
639 712
640static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
641{
642 ulong srr0 = kvmppc_get_pc(vcpu);
643 u32 last_inst = kvmppc_get_last_inst(vcpu);
644 int ret;
645
646 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
647 if (ret == -ENOENT) {
648 ulong msr = kvmppc_get_msr(vcpu);
649
650 msr = kvmppc_set_field(msr, 33, 33, 1);
651 msr = kvmppc_set_field(msr, 34, 36, 0);
652 msr = kvmppc_set_field(msr, 42, 47, 0);
653 kvmppc_set_msr_fast(vcpu, msr);
654 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
655 return EMULATE_AGAIN;
656 }
657
658 return EMULATE_DONE;
659}
660
661static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
662{
663
664 /* Need to do paired single emulation? */
665 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
666 return EMULATE_DONE;
667
668 /* Read out the instruction */
669 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
670 /* Need to emulate */
671 return EMULATE_FAIL;
672
673 return EMULATE_AGAIN;
674}
675
676/* Handle external providers (FPU, Altivec, VSX) */ 713/* Handle external providers (FPU, Altivec, VSX) */
677static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 714static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
678 ulong msr) 715 ulong msr)
@@ -834,6 +871,15 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
834 871
835 return RESUME_GUEST; 872 return RESUME_GUEST;
836} 873}
874
875void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
876{
877 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
878 /* TAR got dropped, drop it in shadow too */
879 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
880 }
881 vcpu->arch.fscr = fscr;
882}
837#endif 883#endif
838 884
839int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 885int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -858,6 +904,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
858 ulong shadow_srr1 = vcpu->arch.shadow_srr1; 904 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
859 vcpu->stat.pf_instruc++; 905 vcpu->stat.pf_instruc++;
860 906
907 if (kvmppc_is_split_real(vcpu))
908 kvmppc_fixup_split_real(vcpu);
909
861#ifdef CONFIG_PPC_BOOK3S_32 910#ifdef CONFIG_PPC_BOOK3S_32
862 /* We set segments as unused segments when invalidating them. So 911 /* We set segments as unused segments when invalidating them. So
863 * treat the respective fault as segment fault. */ 912 * treat the respective fault as segment fault. */
@@ -960,6 +1009,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
960 case BOOK3S_INTERRUPT_DECREMENTER: 1009 case BOOK3S_INTERRUPT_DECREMENTER:
961 case BOOK3S_INTERRUPT_HV_DECREMENTER: 1010 case BOOK3S_INTERRUPT_HV_DECREMENTER:
962 case BOOK3S_INTERRUPT_DOORBELL: 1011 case BOOK3S_INTERRUPT_DOORBELL:
1012 case BOOK3S_INTERRUPT_H_DOORBELL:
963 vcpu->stat.dec_exits++; 1013 vcpu->stat.dec_exits++;
964 r = RESUME_GUEST; 1014 r = RESUME_GUEST;
965 break; 1015 break;
@@ -977,15 +1027,24 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
977 { 1027 {
978 enum emulation_result er; 1028 enum emulation_result er;
979 ulong flags; 1029 ulong flags;
1030 u32 last_inst;
1031 int emul;
980 1032
981program_interrupt: 1033program_interrupt:
982 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; 1034 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
983 1035
1036 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1037 if (emul != EMULATE_DONE) {
1038 r = RESUME_GUEST;
1039 break;
1040 }
1041
984 if (kvmppc_get_msr(vcpu) & MSR_PR) { 1042 if (kvmppc_get_msr(vcpu) & MSR_PR) {
985#ifdef EXIT_DEBUG 1043#ifdef EXIT_DEBUG
986 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 1044 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1045 kvmppc_get_pc(vcpu), last_inst);
987#endif 1046#endif
988 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != 1047 if ((last_inst & 0xff0007ff) !=
989 (INS_DCBZ & 0xfffffff7)) { 1048 (INS_DCBZ & 0xfffffff7)) {
990 kvmppc_core_queue_program(vcpu, flags); 1049 kvmppc_core_queue_program(vcpu, flags);
991 r = RESUME_GUEST; 1050 r = RESUME_GUEST;
@@ -1004,7 +1063,7 @@ program_interrupt:
1004 break; 1063 break;
1005 case EMULATE_FAIL: 1064 case EMULATE_FAIL:
1006 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 1065 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
1007 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 1066 __func__, kvmppc_get_pc(vcpu), last_inst);
1008 kvmppc_core_queue_program(vcpu, flags); 1067 kvmppc_core_queue_program(vcpu, flags);
1009 r = RESUME_GUEST; 1068 r = RESUME_GUEST;
1010 break; 1069 break;
@@ -1021,8 +1080,23 @@ program_interrupt:
1021 break; 1080 break;
1022 } 1081 }
1023 case BOOK3S_INTERRUPT_SYSCALL: 1082 case BOOK3S_INTERRUPT_SYSCALL:
1083 {
1084 u32 last_sc;
1085 int emul;
1086
1087 /* Get last sc for papr */
1088 if (vcpu->arch.papr_enabled) {
1089 /* The sc instuction points SRR0 to the next inst */
1090 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1091 if (emul != EMULATE_DONE) {
1092 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1093 r = RESUME_GUEST;
1094 break;
1095 }
1096 }
1097
1024 if (vcpu->arch.papr_enabled && 1098 if (vcpu->arch.papr_enabled &&
1025 (kvmppc_get_last_sc(vcpu) == 0x44000022) && 1099 (last_sc == 0x44000022) &&
1026 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 1100 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1027 /* SC 1 papr hypercalls */ 1101 /* SC 1 papr hypercalls */
1028 ulong cmd = kvmppc_get_gpr(vcpu, 3); 1102 ulong cmd = kvmppc_get_gpr(vcpu, 3);
@@ -1067,36 +1141,51 @@ program_interrupt:
1067 r = RESUME_GUEST; 1141 r = RESUME_GUEST;
1068 } 1142 }
1069 break; 1143 break;
1144 }
1070 case BOOK3S_INTERRUPT_FP_UNAVAIL: 1145 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1071 case BOOK3S_INTERRUPT_ALTIVEC: 1146 case BOOK3S_INTERRUPT_ALTIVEC:
1072 case BOOK3S_INTERRUPT_VSX: 1147 case BOOK3S_INTERRUPT_VSX:
1073 { 1148 {
1074 int ext_msr = 0; 1149 int ext_msr = 0;
1150 int emul;
1151 u32 last_inst;
1152
1153 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1154 /* Do paired single instruction emulation */
1155 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1156 &last_inst);
1157 if (emul == EMULATE_DONE)
1158 goto program_interrupt;
1159 else
1160 r = RESUME_GUEST;
1075 1161
1076 switch (exit_nr) { 1162 break;
1077 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
1078 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
1079 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
1080 } 1163 }
1081 1164
1082 switch (kvmppc_check_ext(vcpu, exit_nr)) { 1165 /* Enable external provider */
1083 case EMULATE_DONE: 1166 switch (exit_nr) {
1084 /* everything ok - let's enable the ext */ 1167 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1085 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); 1168 ext_msr = MSR_FP;
1086 break; 1169 break;
1087 case EMULATE_FAIL: 1170
1088 /* we need to emulate this instruction */ 1171 case BOOK3S_INTERRUPT_ALTIVEC:
1089 goto program_interrupt; 1172 ext_msr = MSR_VEC;
1090 break; 1173 break;
1091 default: 1174
1092 /* nothing to worry about - go again */ 1175 case BOOK3S_INTERRUPT_VSX:
1176 ext_msr = MSR_VSX;
1093 break; 1177 break;
1094 } 1178 }
1179
1180 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1095 break; 1181 break;
1096 } 1182 }
1097 case BOOK3S_INTERRUPT_ALIGNMENT: 1183 case BOOK3S_INTERRUPT_ALIGNMENT:
1098 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { 1184 {
1099 u32 last_inst = kvmppc_get_last_inst(vcpu); 1185 u32 last_inst;
1186 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1187
1188 if (emul == EMULATE_DONE) {
1100 u32 dsisr; 1189 u32 dsisr;
1101 u64 dar; 1190 u64 dar;
1102 1191
@@ -1110,6 +1199,7 @@ program_interrupt:
1110 } 1199 }
1111 r = RESUME_GUEST; 1200 r = RESUME_GUEST;
1112 break; 1201 break;
1202 }
1113#ifdef CONFIG_PPC_BOOK3S_64 1203#ifdef CONFIG_PPC_BOOK3S_64
1114 case BOOK3S_INTERRUPT_FAC_UNAVAIL: 1204 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1115 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); 1205 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
@@ -1233,6 +1323,7 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1233 *val = get_reg_val(id, to_book3s(vcpu)->hior); 1323 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1234 break; 1324 break;
1235 case KVM_REG_PPC_LPCR: 1325 case KVM_REG_PPC_LPCR:
1326 case KVM_REG_PPC_LPCR_64:
1236 /* 1327 /*
1237 * We are only interested in the LPCR_ILE bit 1328 * We are only interested in the LPCR_ILE bit
1238 */ 1329 */
@@ -1268,6 +1359,7 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1268 to_book3s(vcpu)->hior_explicit = true; 1359 to_book3s(vcpu)->hior_explicit = true;
1269 break; 1360 break;
1270 case KVM_REG_PPC_LPCR: 1361 case KVM_REG_PPC_LPCR:
1362 case KVM_REG_PPC_LPCR_64:
1271 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); 1363 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1272 break; 1364 break;
1273 default: 1365 default:
@@ -1310,8 +1402,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1310 p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1402 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1311 if (!p) 1403 if (!p)
1312 goto uninit_vcpu; 1404 goto uninit_vcpu;
1313 /* the real shared page fills the last 4k of our page */ 1405 vcpu->arch.shared = (void *)p;
1314 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1315#ifdef CONFIG_PPC_BOOK3S_64 1406#ifdef CONFIG_PPC_BOOK3S_64
1316 /* Always start the shared struct in native endian mode */ 1407 /* Always start the shared struct in native endian mode */
1317#ifdef __BIG_ENDIAN__ 1408#ifdef __BIG_ENDIAN__
@@ -1568,6 +1659,11 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1568{ 1659{
1569 mutex_init(&kvm->arch.hpt_mutex); 1660 mutex_init(&kvm->arch.hpt_mutex);
1570 1661
1662#ifdef CONFIG_PPC_BOOK3S_64
1663 /* Start out with the default set of hcalls enabled */
1664 kvmppc_pr_init_default_hcalls(kvm);
1665#endif
1666
1571 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 1667 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1572 spin_lock(&kvm_global_user_count_lock); 1668 spin_lock(&kvm_global_user_count_lock);
1573 if (++kvm_global_user_count == 1) 1669 if (++kvm_global_user_count == 1)
@@ -1636,6 +1732,9 @@ static struct kvmppc_ops kvm_ops_pr = {
1636 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, 1732 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1637 .fast_vcpu_kick = kvm_vcpu_kick, 1733 .fast_vcpu_kick = kvm_vcpu_kick,
1638 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, 1734 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1735#ifdef CONFIG_PPC_BOOK3S_64
1736 .hcall_implemented = kvmppc_hcall_impl_pr,
1737#endif
1639}; 1738};
1640 1739
1641 1740
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 52a63bfe3f07..ce3c893d509b 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -40,8 +40,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
40{ 40{
41 long flags = kvmppc_get_gpr(vcpu, 4); 41 long flags = kvmppc_get_gpr(vcpu, 4);
42 long pte_index = kvmppc_get_gpr(vcpu, 5); 42 long pte_index = kvmppc_get_gpr(vcpu, 5);
43 unsigned long pteg[2 * 8]; 43 __be64 pteg[2 * 8];
44 unsigned long pteg_addr, i, *hpte; 44 __be64 *hpte;
45 unsigned long pteg_addr, i;
45 long int ret; 46 long int ret;
46 47
47 i = pte_index & 7; 48 i = pte_index & 7;
@@ -93,8 +94,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
93 pteg = get_pteg_addr(vcpu, pte_index); 94 pteg = get_pteg_addr(vcpu, pte_index);
94 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 95 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
95 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 96 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
96 pte[0] = be64_to_cpu(pte[0]); 97 pte[0] = be64_to_cpu((__force __be64)pte[0]);
97 pte[1] = be64_to_cpu(pte[1]); 98 pte[1] = be64_to_cpu((__force __be64)pte[1]);
98 99
99 ret = H_NOT_FOUND; 100 ret = H_NOT_FOUND;
100 if ((pte[0] & HPTE_V_VALID) == 0 || 101 if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -171,8 +172,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
171 172
172 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); 173 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
173 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 174 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
174 pte[0] = be64_to_cpu(pte[0]); 175 pte[0] = be64_to_cpu((__force __be64)pte[0]);
175 pte[1] = be64_to_cpu(pte[1]); 176 pte[1] = be64_to_cpu((__force __be64)pte[1]);
176 177
177 /* tsl = AVPN */ 178 /* tsl = AVPN */
178 flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; 179 flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
@@ -211,8 +212,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
211 pteg = get_pteg_addr(vcpu, pte_index); 212 pteg = get_pteg_addr(vcpu, pte_index);
212 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 213 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
213 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 214 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
214 pte[0] = be64_to_cpu(pte[0]); 215 pte[0] = be64_to_cpu((__force __be64)pte[0]);
215 pte[1] = be64_to_cpu(pte[1]); 216 pte[1] = be64_to_cpu((__force __be64)pte[1]);
216 217
217 ret = H_NOT_FOUND; 218 ret = H_NOT_FOUND;
218 if ((pte[0] & HPTE_V_VALID) == 0 || 219 if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -231,8 +232,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
231 232
232 rb = compute_tlbie_rb(v, r, pte_index); 233 rb = compute_tlbie_rb(v, r, pte_index);
233 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
234 pte[0] = cpu_to_be64(pte[0]); 235 pte[0] = (__force u64)cpu_to_be64(pte[0]);
235 pte[1] = cpu_to_be64(pte[1]); 236 pte[1] = (__force u64)cpu_to_be64(pte[1]);
236 copy_to_user((void __user *)pteg, pte, sizeof(pte)); 237 copy_to_user((void __user *)pteg, pte, sizeof(pte));
237 ret = H_SUCCESS; 238 ret = H_SUCCESS;
238 239
@@ -266,6 +267,12 @@ static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
266 267
267int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) 268int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
268{ 269{
270 int rc, idx;
271
272 if (cmd <= MAX_HCALL_OPCODE &&
273 !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
274 return EMULATE_FAIL;
275
269 switch (cmd) { 276 switch (cmd) {
270 case H_ENTER: 277 case H_ENTER:
271 return kvmppc_h_pr_enter(vcpu); 278 return kvmppc_h_pr_enter(vcpu);
@@ -294,8 +301,11 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
294 break; 301 break;
295 case H_RTAS: 302 case H_RTAS:
296 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) 303 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
297 return RESUME_HOST; 304 break;
298 if (kvmppc_rtas_hcall(vcpu)) 305 idx = srcu_read_lock(&vcpu->kvm->srcu);
306 rc = kvmppc_rtas_hcall(vcpu);
307 srcu_read_unlock(&vcpu->kvm->srcu, idx);
308 if (rc)
299 break; 309 break;
300 kvmppc_set_gpr(vcpu, 3, 0); 310 kvmppc_set_gpr(vcpu, 3, 0);
301 return EMULATE_DONE; 311 return EMULATE_DONE;
@@ -303,3 +313,61 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
303 313
304 return EMULATE_FAIL; 314 return EMULATE_FAIL;
305} 315}
316
317int kvmppc_hcall_impl_pr(unsigned long cmd)
318{
319 switch (cmd) {
320 case H_ENTER:
321 case H_REMOVE:
322 case H_PROTECT:
323 case H_BULK_REMOVE:
324 case H_PUT_TCE:
325 case H_CEDE:
326#ifdef CONFIG_KVM_XICS
327 case H_XIRR:
328 case H_CPPR:
329 case H_EOI:
330 case H_IPI:
331 case H_IPOLL:
332 case H_XIRR_X:
333#endif
334 return 1;
335 }
336 return 0;
337}
338
339/*
340 * List of hcall numbers to enable by default.
341 * For compatibility with old userspace, we enable by default
342 * all hcalls that were implemented before the hcall-enabling
343 * facility was added. Note this list should not include H_RTAS.
344 */
345static unsigned int default_hcall_list[] = {
346 H_ENTER,
347 H_REMOVE,
348 H_PROTECT,
349 H_BULK_REMOVE,
350 H_PUT_TCE,
351 H_CEDE,
352#ifdef CONFIG_KVM_XICS
353 H_XIRR,
354 H_CPPR,
355 H_EOI,
356 H_IPI,
357 H_IPOLL,
358 H_XIRR_X,
359#endif
360 0
361};
362
363void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
364{
365 int i;
366 unsigned int hcall;
367
368 for (i = 0; default_hcall_list[i]; ++i) {
369 hcall = default_hcall_list[i];
370 WARN_ON(!kvmppc_hcall_impl_pr(hcall));
371 __set_bit(hcall / 4, kvm->arch.enabled_hcalls);
372 }
373}
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index d1acd32a64c0..eaeb78047fb8 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -64,8 +64,12 @@
64static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 64static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
65 u32 new_irq); 65 u32 new_irq);
66 66
67static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level, 67/*
68 bool report_status) 68 * Return value ideally indicates how the interrupt was handled, but no
69 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
70 * so just return 0.
71 */
72static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
69{ 73{
70 struct ics_irq_state *state; 74 struct ics_irq_state *state;
71 struct kvmppc_ics *ics; 75 struct kvmppc_ics *ics;
@@ -82,17 +86,14 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level,
82 if (!state->exists) 86 if (!state->exists)
83 return -EINVAL; 87 return -EINVAL;
84 88
85 if (report_status)
86 return state->asserted;
87
88 /* 89 /*
89 * We set state->asserted locklessly. This should be fine as 90 * We set state->asserted locklessly. This should be fine as
90 * we are the only setter, thus concurrent access is undefined 91 * we are the only setter, thus concurrent access is undefined
91 * to begin with. 92 * to begin with.
92 */ 93 */
93 if (level == KVM_INTERRUPT_SET_LEVEL) 94 if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL)
94 state->asserted = 1; 95 state->asserted = 1;
95 else if (level == KVM_INTERRUPT_UNSET) { 96 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
96 state->asserted = 0; 97 state->asserted = 0;
97 return 0; 98 return 0;
98 } 99 }
@@ -100,7 +101,7 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level,
100 /* Attempt delivery */ 101 /* Attempt delivery */
101 icp_deliver_irq(xics, NULL, irq); 102 icp_deliver_irq(xics, NULL, irq);
102 103
103 return state->asserted; 104 return 0;
104} 105}
105 106
106static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 107static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
@@ -772,6 +773,8 @@ static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
772 if (state->asserted) 773 if (state->asserted)
773 icp_deliver_irq(xics, icp, irq); 774 icp_deliver_irq(xics, icp, irq);
774 775
776 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
777
775 return H_SUCCESS; 778 return H_SUCCESS;
776} 779}
777 780
@@ -789,6 +792,8 @@ static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
789 icp_check_resend(xics, icp); 792 icp_check_resend(xics, icp);
790 if (icp->rm_action & XICS_RM_REJECT) 793 if (icp->rm_action & XICS_RM_REJECT)
791 icp_deliver_irq(xics, icp, icp->rm_reject); 794 icp_deliver_irq(xics, icp, icp->rm_reject);
795 if (icp->rm_action & XICS_RM_NOTIFY_EOI)
796 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
792 797
793 icp->rm_action = 0; 798 icp->rm_action = 0;
794 799
@@ -1170,7 +1175,16 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1170{ 1175{
1171 struct kvmppc_xics *xics = kvm->arch.xics; 1176 struct kvmppc_xics *xics = kvm->arch.xics;
1172 1177
1173 return ics_deliver_irq(xics, irq, level, line_status); 1178 return ics_deliver_irq(xics, irq, level);
1179}
1180
1181int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1182 int irq_source_id, int level, bool line_status)
1183{
1184 if (!level)
1185 return -1;
1186 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1187 level, line_status);
1174} 1188}
1175 1189
1176static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1190static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
@@ -1301,3 +1315,26 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1301 vcpu->arch.icp = NULL; 1315 vcpu->arch.icp = NULL;
1302 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; 1316 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1303} 1317}
1318
1319static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
1320 struct kvm *kvm, int irq_source_id, int level,
1321 bool line_status)
1322{
1323 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1324}
1325
1326int kvm_irq_map_gsi(struct kvm *kvm,
1327 struct kvm_kernel_irq_routing_entry *entries, int gsi)
1328{
1329 entries->gsi = gsi;
1330 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1331 entries->set = xics_set_irq;
1332 entries->irqchip.irqchip = 0;
1333 entries->irqchip.pin = gsi;
1334 return 1;
1335}
1336
1337int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1338{
1339 return pin;
1340}
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index dd9326c5c19b..e8aaa7a3f209 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -71,9 +71,11 @@ struct kvmppc_icp {
71#define XICS_RM_KICK_VCPU 0x1 71#define XICS_RM_KICK_VCPU 0x1
72#define XICS_RM_CHECK_RESEND 0x2 72#define XICS_RM_CHECK_RESEND 0x2
73#define XICS_RM_REJECT 0x4 73#define XICS_RM_REJECT 0x4
74#define XICS_RM_NOTIFY_EOI 0x8
74 u32 rm_action; 75 u32 rm_action;
75 struct kvm_vcpu *rm_kick_target; 76 struct kvm_vcpu *rm_kick_target;
76 u32 rm_reject; 77 u32 rm_reject;
78 u32 rm_eoied_irq;
77 79
78 /* Debug stuff for real mode */ 80 /* Debug stuff for real mode */
79 union kvmppc_icp_state rm_dbgstate; 81 union kvmppc_icp_state rm_dbgstate;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ab62109fdfa3..b4c89fa6f109 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -51,7 +51,6 @@ unsigned long kvmppc_booke_handlers;
51 51
52struct kvm_stats_debugfs_item debugfs_entries[] = { 52struct kvm_stats_debugfs_item debugfs_entries[] = {
53 { "mmio", VCPU_STAT(mmio_exits) }, 53 { "mmio", VCPU_STAT(mmio_exits) },
54 { "dcr", VCPU_STAT(dcr_exits) },
55 { "sig", VCPU_STAT(signal_exits) }, 54 { "sig", VCPU_STAT(signal_exits) },
56 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, 55 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
57 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, 56 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
@@ -185,24 +184,28 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
185 set_bit(priority, &vcpu->arch.pending_exceptions); 184 set_bit(priority, &vcpu->arch.pending_exceptions);
186} 185}
187 186
188static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, 187void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
189 ulong dear_flags, ulong esr_flags) 188 ulong dear_flags, ulong esr_flags)
190{ 189{
191 vcpu->arch.queued_dear = dear_flags; 190 vcpu->arch.queued_dear = dear_flags;
192 vcpu->arch.queued_esr = esr_flags; 191 vcpu->arch.queued_esr = esr_flags;
193 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); 192 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
194} 193}
195 194
196static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, 195void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
197 ulong dear_flags, ulong esr_flags) 196 ulong dear_flags, ulong esr_flags)
198{ 197{
199 vcpu->arch.queued_dear = dear_flags; 198 vcpu->arch.queued_dear = dear_flags;
200 vcpu->arch.queued_esr = esr_flags; 199 vcpu->arch.queued_esr = esr_flags;
201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); 200 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
202} 201}
203 202
204static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, 203void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
205 ulong esr_flags) 204{
205 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
206}
207
208void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
206{ 209{
207 vcpu->arch.queued_esr = esr_flags; 210 vcpu->arch.queued_esr = esr_flags;
208 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); 211 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
@@ -266,13 +269,8 @@ static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
266 269
267static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 270static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
268{ 271{
269#ifdef CONFIG_KVM_BOOKE_HV 272 kvmppc_set_srr0(vcpu, srr0);
270 mtspr(SPRN_GSRR0, srr0); 273 kvmppc_set_srr1(vcpu, srr1);
271 mtspr(SPRN_GSRR1, srr1);
272#else
273 vcpu->arch.shared->srr0 = srr0;
274 vcpu->arch.shared->srr1 = srr1;
275#endif
276} 274}
277 275
278static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 276static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
@@ -297,51 +295,6 @@ static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
297 vcpu->arch.mcsrr1 = srr1; 295 vcpu->arch.mcsrr1 = srr1;
298} 296}
299 297
300static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
301{
302#ifdef CONFIG_KVM_BOOKE_HV
303 return mfspr(SPRN_GDEAR);
304#else
305 return vcpu->arch.shared->dar;
306#endif
307}
308
309static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
310{
311#ifdef CONFIG_KVM_BOOKE_HV
312 mtspr(SPRN_GDEAR, dear);
313#else
314 vcpu->arch.shared->dar = dear;
315#endif
316}
317
318static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
319{
320#ifdef CONFIG_KVM_BOOKE_HV
321 return mfspr(SPRN_GESR);
322#else
323 return vcpu->arch.shared->esr;
324#endif
325}
326
327static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
328{
329#ifdef CONFIG_KVM_BOOKE_HV
330 mtspr(SPRN_GESR, esr);
331#else
332 vcpu->arch.shared->esr = esr;
333#endif
334}
335
336static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
337{
338#ifdef CONFIG_KVM_BOOKE_HV
339 return mfspr(SPRN_GEPR);
340#else
341 return vcpu->arch.epr;
342#endif
343}
344
345/* Deliver the interrupt of the corresponding priority, if possible. */ 298/* Deliver the interrupt of the corresponding priority, if possible. */
346static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 299static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
347 unsigned int priority) 300 unsigned int priority)
@@ -450,9 +403,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
450 403
451 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 404 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
452 if (update_esr == true) 405 if (update_esr == true)
453 set_guest_esr(vcpu, vcpu->arch.queued_esr); 406 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
454 if (update_dear == true) 407 if (update_dear == true)
455 set_guest_dear(vcpu, vcpu->arch.queued_dear); 408 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
456 if (update_epr == true) { 409 if (update_epr == true) {
457 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) 410 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
458 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); 411 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
@@ -752,9 +705,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
752 * they were actually modified by emulation. */ 705 * they were actually modified by emulation. */
753 return RESUME_GUEST_NV; 706 return RESUME_GUEST_NV;
754 707
755 case EMULATE_DO_DCR: 708 case EMULATE_AGAIN:
756 run->exit_reason = KVM_EXIT_DCR; 709 return RESUME_GUEST;
757 return RESUME_HOST;
758 710
759 case EMULATE_FAIL: 711 case EMULATE_FAIL:
760 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 712 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
@@ -866,6 +818,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
866 } 818 }
867} 819}
868 820
821static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
822 enum emulation_result emulated, u32 last_inst)
823{
824 switch (emulated) {
825 case EMULATE_AGAIN:
826 return RESUME_GUEST;
827
828 case EMULATE_FAIL:
829 pr_debug("%s: load instruction from guest address %lx failed\n",
830 __func__, vcpu->arch.pc);
831 /* For debugging, encode the failing instruction and
832 * report it to userspace. */
833 run->hw.hardware_exit_reason = ~0ULL << 32;
834 run->hw.hardware_exit_reason |= last_inst;
835 kvmppc_core_queue_program(vcpu, ESR_PIL);
836 return RESUME_HOST;
837
838 default:
839 BUG();
840 }
841}
842
869/** 843/**
870 * kvmppc_handle_exit 844 * kvmppc_handle_exit
871 * 845 *
@@ -877,6 +851,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
877 int r = RESUME_HOST; 851 int r = RESUME_HOST;
878 int s; 852 int s;
879 int idx; 853 int idx;
854 u32 last_inst = KVM_INST_FETCH_FAILED;
855 enum emulation_result emulated = EMULATE_DONE;
880 856
881 /* update before a new last_exit_type is rewritten */ 857 /* update before a new last_exit_type is rewritten */
882 kvmppc_update_timing_stats(vcpu); 858 kvmppc_update_timing_stats(vcpu);
@@ -884,6 +860,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
884 /* restart interrupts if they were meant for the host */ 860 /* restart interrupts if they were meant for the host */
885 kvmppc_restart_interrupt(vcpu, exit_nr); 861 kvmppc_restart_interrupt(vcpu, exit_nr);
886 862
863 /*
864 * get last instruction before beeing preempted
865 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
866 */
867 switch (exit_nr) {
868 case BOOKE_INTERRUPT_DATA_STORAGE:
869 case BOOKE_INTERRUPT_DTLB_MISS:
870 case BOOKE_INTERRUPT_HV_PRIV:
871 emulated = kvmppc_get_last_inst(vcpu, false, &last_inst);
872 break;
873 default:
874 break;
875 }
876
887 local_irq_enable(); 877 local_irq_enable();
888 878
889 trace_kvm_exit(exit_nr, vcpu); 879 trace_kvm_exit(exit_nr, vcpu);
@@ -892,6 +882,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
892 run->exit_reason = KVM_EXIT_UNKNOWN; 882 run->exit_reason = KVM_EXIT_UNKNOWN;
893 run->ready_for_interrupt_injection = 1; 883 run->ready_for_interrupt_injection = 1;
894 884
885 if (emulated != EMULATE_DONE) {
886 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
887 goto out;
888 }
889
895 switch (exit_nr) { 890 switch (exit_nr) {
896 case BOOKE_INTERRUPT_MACHINE_CHECK: 891 case BOOKE_INTERRUPT_MACHINE_CHECK:
897 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 892 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
@@ -1181,6 +1176,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1181 BUG(); 1176 BUG();
1182 } 1177 }
1183 1178
1179out:
1184 /* 1180 /*
1185 * To avoid clobbering exit_reason, only check for signals if we 1181 * To avoid clobbering exit_reason, only check for signals if we
1186 * aren't already exiting to userspace for some other reason. 1182 * aren't already exiting to userspace for some other reason.
@@ -1265,17 +1261,17 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1265 regs->lr = vcpu->arch.lr; 1261 regs->lr = vcpu->arch.lr;
1266 regs->xer = kvmppc_get_xer(vcpu); 1262 regs->xer = kvmppc_get_xer(vcpu);
1267 regs->msr = vcpu->arch.shared->msr; 1263 regs->msr = vcpu->arch.shared->msr;
1268 regs->srr0 = vcpu->arch.shared->srr0; 1264 regs->srr0 = kvmppc_get_srr0(vcpu);
1269 regs->srr1 = vcpu->arch.shared->srr1; 1265 regs->srr1 = kvmppc_get_srr1(vcpu);
1270 regs->pid = vcpu->arch.pid; 1266 regs->pid = vcpu->arch.pid;
1271 regs->sprg0 = vcpu->arch.shared->sprg0; 1267 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1272 regs->sprg1 = vcpu->arch.shared->sprg1; 1268 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1273 regs->sprg2 = vcpu->arch.shared->sprg2; 1269 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1274 regs->sprg3 = vcpu->arch.shared->sprg3; 1270 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1275 regs->sprg4 = vcpu->arch.shared->sprg4; 1271 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1276 regs->sprg5 = vcpu->arch.shared->sprg5; 1272 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1277 regs->sprg6 = vcpu->arch.shared->sprg6; 1273 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1278 regs->sprg7 = vcpu->arch.shared->sprg7; 1274 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1279 1275
1280 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1276 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1281 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 1277 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -1293,17 +1289,17 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1293 vcpu->arch.lr = regs->lr; 1289 vcpu->arch.lr = regs->lr;
1294 kvmppc_set_xer(vcpu, regs->xer); 1290 kvmppc_set_xer(vcpu, regs->xer);
1295 kvmppc_set_msr(vcpu, regs->msr); 1291 kvmppc_set_msr(vcpu, regs->msr);
1296 vcpu->arch.shared->srr0 = regs->srr0; 1292 kvmppc_set_srr0(vcpu, regs->srr0);
1297 vcpu->arch.shared->srr1 = regs->srr1; 1293 kvmppc_set_srr1(vcpu, regs->srr1);
1298 kvmppc_set_pid(vcpu, regs->pid); 1294 kvmppc_set_pid(vcpu, regs->pid);
1299 vcpu->arch.shared->sprg0 = regs->sprg0; 1295 kvmppc_set_sprg0(vcpu, regs->sprg0);
1300 vcpu->arch.shared->sprg1 = regs->sprg1; 1296 kvmppc_set_sprg1(vcpu, regs->sprg1);
1301 vcpu->arch.shared->sprg2 = regs->sprg2; 1297 kvmppc_set_sprg2(vcpu, regs->sprg2);
1302 vcpu->arch.shared->sprg3 = regs->sprg3; 1298 kvmppc_set_sprg3(vcpu, regs->sprg3);
1303 vcpu->arch.shared->sprg4 = regs->sprg4; 1299 kvmppc_set_sprg4(vcpu, regs->sprg4);
1304 vcpu->arch.shared->sprg5 = regs->sprg5; 1300 kvmppc_set_sprg5(vcpu, regs->sprg5);
1305 vcpu->arch.shared->sprg6 = regs->sprg6; 1301 kvmppc_set_sprg6(vcpu, regs->sprg6);
1306 vcpu->arch.shared->sprg7 = regs->sprg7; 1302 kvmppc_set_sprg7(vcpu, regs->sprg7);
1307 1303
1308 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1304 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1309 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 1305 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -1321,8 +1317,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
1321 sregs->u.e.csrr0 = vcpu->arch.csrr0; 1317 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1322 sregs->u.e.csrr1 = vcpu->arch.csrr1; 1318 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1323 sregs->u.e.mcsr = vcpu->arch.mcsr; 1319 sregs->u.e.mcsr = vcpu->arch.mcsr;
1324 sregs->u.e.esr = get_guest_esr(vcpu); 1320 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1325 sregs->u.e.dear = get_guest_dear(vcpu); 1321 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1326 sregs->u.e.tsr = vcpu->arch.tsr; 1322 sregs->u.e.tsr = vcpu->arch.tsr;
1327 sregs->u.e.tcr = vcpu->arch.tcr; 1323 sregs->u.e.tcr = vcpu->arch.tcr;
1328 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); 1324 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -1339,8 +1335,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
1339 vcpu->arch.csrr0 = sregs->u.e.csrr0; 1335 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1340 vcpu->arch.csrr1 = sregs->u.e.csrr1; 1336 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1341 vcpu->arch.mcsr = sregs->u.e.mcsr; 1337 vcpu->arch.mcsr = sregs->u.e.mcsr;
1342 set_guest_esr(vcpu, sregs->u.e.esr); 1338 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1343 set_guest_dear(vcpu, sregs->u.e.dear); 1339 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1344 vcpu->arch.vrsave = sregs->u.e.vrsave; 1340 vcpu->arch.vrsave = sregs->u.e.vrsave;
1345 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 1341 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1346 1342
@@ -1493,7 +1489,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1493 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2); 1489 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
1494 break; 1490 break;
1495 case KVM_REG_PPC_EPR: { 1491 case KVM_REG_PPC_EPR: {
1496 u32 epr = get_guest_epr(vcpu); 1492 u32 epr = kvmppc_get_epr(vcpu);
1497 val = get_reg_val(reg->id, epr); 1493 val = get_reg_val(reg->id, epr);
1498 break; 1494 break;
1499 } 1495 }
@@ -1788,6 +1784,57 @@ void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1788#endif 1784#endif
1789} 1785}
1790 1786
1787int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1788 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1789{
1790 int gtlb_index;
1791 gpa_t gpaddr;
1792
1793#ifdef CONFIG_KVM_E500V2
1794 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1795 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1796 pte->eaddr = eaddr;
1797 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1798 (eaddr & ~PAGE_MASK);
1799 pte->vpage = eaddr >> PAGE_SHIFT;
1800 pte->may_read = true;
1801 pte->may_write = true;
1802 pte->may_execute = true;
1803
1804 return 0;
1805 }
1806#endif
1807
1808 /* Check the guest TLB. */
1809 switch (xlid) {
1810 case XLATE_INST:
1811 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1812 break;
1813 case XLATE_DATA:
1814 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1815 break;
1816 default:
1817 BUG();
1818 }
1819
1820 /* Do we have a TLB entry at all? */
1821 if (gtlb_index < 0)
1822 return -ENOENT;
1823
1824 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1825
1826 pte->eaddr = eaddr;
1827 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1828 pte->vpage = eaddr >> PAGE_SHIFT;
1829
1830 /* XXX read permissions from the guest TLB */
1831 pte->may_read = true;
1832 pte->may_write = true;
1833 pte->may_execute = true;
1834
1835 return 0;
1836}
1837
1791int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1838int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1792 struct kvm_guest_debug *dbg) 1839 struct kvm_guest_debug *dbg)
1793{ 1840{
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index b632cd35919b..f753543c56fa 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -99,13 +99,6 @@ enum int_class {
99 99
100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
101 101
102extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
103extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
104 unsigned int inst, int *advance);
105extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
106 ulong spr_val);
107extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
108 ulong *spr_val);
109extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); 102extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, 103extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
111 struct kvm_vcpu *vcpu, 104 struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 27a4b2877c10..28c158881d23 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -165,16 +165,16 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
165 * guest (PR-mode only). 165 * guest (PR-mode only).
166 */ 166 */
167 case SPRN_SPRG4: 167 case SPRN_SPRG4:
168 vcpu->arch.shared->sprg4 = spr_val; 168 kvmppc_set_sprg4(vcpu, spr_val);
169 break; 169 break;
170 case SPRN_SPRG5: 170 case SPRN_SPRG5:
171 vcpu->arch.shared->sprg5 = spr_val; 171 kvmppc_set_sprg5(vcpu, spr_val);
172 break; 172 break;
173 case SPRN_SPRG6: 173 case SPRN_SPRG6:
174 vcpu->arch.shared->sprg6 = spr_val; 174 kvmppc_set_sprg6(vcpu, spr_val);
175 break; 175 break;
176 case SPRN_SPRG7: 176 case SPRN_SPRG7:
177 vcpu->arch.shared->sprg7 = spr_val; 177 kvmppc_set_sprg7(vcpu, spr_val);
178 break; 178 break;
179 179
180 case SPRN_IVPR: 180 case SPRN_IVPR:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 2c6deb5ef2fe..84c308a9a371 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -21,7 +21,6 @@
21#include <asm/ppc_asm.h> 21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
23#include <asm/reg.h> 23#include <asm/reg.h>
24#include <asm/mmu-44x.h>
25#include <asm/page.h> 24#include <asm/page.h>
26#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
27 26
@@ -424,10 +423,6 @@ lightweight_exit:
424 mtspr SPRN_PID1, r3 423 mtspr SPRN_PID1, r3
425#endif 424#endif
426 425
427#ifdef CONFIG_44x
428 iccci 0, 0 /* XXX hack */
429#endif
430
431 /* Load some guest volatiles. */ 426 /* Load some guest volatiles. */
432 lwz r0, VCPU_GPR(R0)(r4) 427 lwz r0, VCPU_GPR(R0)(r4)
433 lwz r2, VCPU_GPR(R2)(r4) 428 lwz r2, VCPU_GPR(R2)(r4)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index a1712b818a5f..e9fa56a911fd 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -24,12 +24,10 @@
24#include <asm/ppc_asm.h> 24#include <asm/ppc_asm.h>
25#include <asm/kvm_asm.h> 25#include <asm/kvm_asm.h>
26#include <asm/reg.h> 26#include <asm/reg.h>
27#include <asm/mmu-44x.h>
28#include <asm/page.h> 27#include <asm/page.h>
29#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
30#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
31#include <asm/bitsperlong.h> 30#include <asm/bitsperlong.h>
32#include <asm/thread_info.h>
33 31
34#ifdef CONFIG_64BIT 32#ifdef CONFIG_64BIT
35#include <asm/exception-64e.h> 33#include <asm/exception-64e.h>
@@ -122,38 +120,14 @@
1221: 1201:
123 121
124 .if \flags & NEED_EMU 122 .if \flags & NEED_EMU
125 /*
126 * This assumes you have external PID support.
127 * To support a bookehv CPU without external PID, you'll
128 * need to look up the TLB entry and create a temporary mapping.
129 *
130 * FIXME: we don't currently handle if the lwepx faults. PR-mode
131 * booke doesn't handle it either. Since Linux doesn't use
132 * broadcast tlbivax anymore, the only way this should happen is
133 * if the guest maps its memory execute-but-not-read, or if we
134 * somehow take a TLB miss in the middle of this entry code and
135 * evict the relevant entry. On e500mc, all kernel lowmem is
136 * bolted into TLB1 large page mappings, and we don't use
137 * broadcast invalidates, so we should not take a TLB miss here.
138 *
139 * Later we'll need to deal with faults here. Disallowing guest
140 * mappings that are execute-but-not-read could be an option on
141 * e500mc, but not on chips with an LRAT if it is used.
142 */
143
144 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
145 PPC_STL r15, VCPU_GPR(R15)(r4) 123 PPC_STL r15, VCPU_GPR(R15)(r4)
146 PPC_STL r16, VCPU_GPR(R16)(r4) 124 PPC_STL r16, VCPU_GPR(R16)(r4)
147 PPC_STL r17, VCPU_GPR(R17)(r4) 125 PPC_STL r17, VCPU_GPR(R17)(r4)
148 PPC_STL r18, VCPU_GPR(R18)(r4) 126 PPC_STL r18, VCPU_GPR(R18)(r4)
149 PPC_STL r19, VCPU_GPR(R19)(r4) 127 PPC_STL r19, VCPU_GPR(R19)(r4)
150 mr r8, r3
151 PPC_STL r20, VCPU_GPR(R20)(r4) 128 PPC_STL r20, VCPU_GPR(R20)(r4)
152 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
153 PPC_STL r21, VCPU_GPR(R21)(r4) 129 PPC_STL r21, VCPU_GPR(R21)(r4)
154 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
155 PPC_STL r22, VCPU_GPR(R22)(r4) 130 PPC_STL r22, VCPU_GPR(R22)(r4)
156 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
157 PPC_STL r23, VCPU_GPR(R23)(r4) 131 PPC_STL r23, VCPU_GPR(R23)(r4)
158 PPC_STL r24, VCPU_GPR(R24)(r4) 132 PPC_STL r24, VCPU_GPR(R24)(r4)
159 PPC_STL r25, VCPU_GPR(R25)(r4) 133 PPC_STL r25, VCPU_GPR(R25)(r4)
@@ -163,33 +137,15 @@
163 PPC_STL r29, VCPU_GPR(R29)(r4) 137 PPC_STL r29, VCPU_GPR(R29)(r4)
164 PPC_STL r30, VCPU_GPR(R30)(r4) 138 PPC_STL r30, VCPU_GPR(R30)(r4)
165 PPC_STL r31, VCPU_GPR(R31)(r4) 139 PPC_STL r31, VCPU_GPR(R31)(r4)
166 mtspr SPRN_EPLC, r8
167
168 /* disable preemption, so we are sure we hit the fixup handler */
169 CURRENT_THREAD_INFO(r8, r1)
170 li r7, 1
171 stw r7, TI_PREEMPT(r8)
172
173 isync
174 140
175 /* 141 /*
176 * In case the read goes wrong, we catch it and write an invalid value 142 * We don't use external PID support. lwepx faults would need to be
177 * in LAST_INST instead. 143 * handled by KVM and this implies aditional code in DO_KVM (for
144 * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which
145 * is too intrusive for the host. Get last instuction in
146 * kvmppc_get_last_inst().
178 */ 147 */
1791: lwepx r9, 0, r5 148 li r9, KVM_INST_FETCH_FAILED
1802:
181.section .fixup, "ax"
1823: li r9, KVM_INST_FETCH_FAILED
183 b 2b
184.previous
185.section __ex_table,"a"
186 PPC_LONG_ALIGN
187 PPC_LONG 1b,3b
188.previous
189
190 mtspr SPRN_EPLC, r3
191 li r7, 0
192 stw r7, TI_PREEMPT(r8)
193 stw r9, VCPU_LAST_INST(r4) 149 stw r9, VCPU_LAST_INST(r4)
194 .endif 150 .endif
195 151
@@ -441,6 +397,7 @@ _GLOBAL(kvmppc_resume_host)
441#ifdef CONFIG_64BIT 397#ifdef CONFIG_64BIT
442 PPC_LL r3, PACA_SPRG_VDSO(r13) 398 PPC_LL r3, PACA_SPRG_VDSO(r13)
443#endif 399#endif
400 mfspr r5, SPRN_SPRG9
444 PPC_STD(r6, VCPU_SHARED_SPRG4, r11) 401 PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
445 mfspr r8, SPRN_SPRG6 402 mfspr r8, SPRN_SPRG6
446 PPC_STD(r7, VCPU_SHARED_SPRG5, r11) 403 PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
@@ -448,6 +405,7 @@ _GLOBAL(kvmppc_resume_host)
448#ifdef CONFIG_64BIT 405#ifdef CONFIG_64BIT
449 mtspr SPRN_SPRG_VDSO_WRITE, r3 406 mtspr SPRN_SPRG_VDSO_WRITE, r3
450#endif 407#endif
408 PPC_STD(r5, VCPU_SPRG9, r4)
451 PPC_STD(r8, VCPU_SHARED_SPRG6, r11) 409 PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
452 mfxer r3 410 mfxer r3
453 PPC_STD(r9, VCPU_SHARED_SPRG7, r11) 411 PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
@@ -682,7 +640,9 @@ lightweight_exit:
682 mtspr SPRN_SPRG5W, r6 640 mtspr SPRN_SPRG5W, r6
683 PPC_LD(r8, VCPU_SHARED_SPRG7, r11) 641 PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
684 mtspr SPRN_SPRG6W, r7 642 mtspr SPRN_SPRG6W, r7
643 PPC_LD(r5, VCPU_SPRG9, r4)
685 mtspr SPRN_SPRG7W, r8 644 mtspr SPRN_SPRG7W, r8
645 mtspr SPRN_SPRG9, r5
686 646
687 /* Load some guest volatiles. */ 647 /* Load some guest volatiles. */
688 PPC_LL r3, VCPU_LR(r4) 648 PPC_LL r3, VCPU_LR(r4)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 002d51764143..c99c40e9182a 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -250,6 +250,14 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
250 spr_val); 250 spr_val);
251 break; 251 break;
252 252
253 case SPRN_PWRMGTCR0:
254 /*
255 * Guest relies on host power management configurations
256 * Treat the request as a general store
257 */
258 vcpu->arch.pwrmgtcr0 = spr_val;
259 break;
260
253 /* extra exceptions */ 261 /* extra exceptions */
254 case SPRN_IVOR32: 262 case SPRN_IVOR32:
255 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; 263 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -368,6 +376,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
368 *spr_val = vcpu->arch.eptcfg; 376 *spr_val = vcpu->arch.eptcfg;
369 break; 377 break;
370 378
379 case SPRN_PWRMGTCR0:
380 *spr_val = vcpu->arch.pwrmgtcr0;
381 break;
382
371 /* extra exceptions */ 383 /* extra exceptions */
372 case SPRN_IVOR32: 384 case SPRN_IVOR32:
373 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; 385 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 86903d3f5a03..08f14bb57897 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -107,11 +107,15 @@ static u32 get_host_mas0(unsigned long eaddr)
107{ 107{
108 unsigned long flags; 108 unsigned long flags;
109 u32 mas0; 109 u32 mas0;
110 u32 mas4;
110 111
111 local_irq_save(flags); 112 local_irq_save(flags);
112 mtspr(SPRN_MAS6, 0); 113 mtspr(SPRN_MAS6, 0);
114 mas4 = mfspr(SPRN_MAS4);
115 mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
113 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); 116 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
114 mas0 = mfspr(SPRN_MAS0); 117 mas0 = mfspr(SPRN_MAS0);
118 mtspr(SPRN_MAS4, mas4);
115 local_irq_restore(flags); 119 local_irq_restore(flags);
116 120
117 return mas0; 121 return mas0;
@@ -607,6 +611,104 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
607 } 611 }
608} 612}
609 613
614#ifdef CONFIG_KVM_BOOKE_HV
615int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
616 u32 *instr)
617{
618 gva_t geaddr;
619 hpa_t addr;
620 hfn_t pfn;
621 hva_t eaddr;
622 u32 mas1, mas2, mas3;
623 u64 mas7_mas3;
624 struct page *page;
625 unsigned int addr_space, psize_shift;
626 bool pr;
627 unsigned long flags;
628
629 /* Search TLB for guest pc to get the real address */
630 geaddr = kvmppc_get_pc(vcpu);
631
632 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
633
634 local_irq_save(flags);
635 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
636 mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid);
637 asm volatile("tlbsx 0, %[geaddr]\n" : :
638 [geaddr] "r" (geaddr));
639 mtspr(SPRN_MAS5, 0);
640 mtspr(SPRN_MAS8, 0);
641 mas1 = mfspr(SPRN_MAS1);
642 mas2 = mfspr(SPRN_MAS2);
643 mas3 = mfspr(SPRN_MAS3);
644#ifdef CONFIG_64BIT
645 mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
646#else
647 mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
648#endif
649 local_irq_restore(flags);
650
651 /*
652 * If the TLB entry for guest pc was evicted, return to the guest.
653 * There are high chances to find a valid TLB entry next time.
654 */
655 if (!(mas1 & MAS1_VALID))
656 return EMULATE_AGAIN;
657
658 /*
659 * Another thread may rewrite the TLB entry in parallel, don't
660 * execute from the address if the execute permission is not set
661 */
662 pr = vcpu->arch.shared->msr & MSR_PR;
663 if (unlikely((pr && !(mas3 & MAS3_UX)) ||
664 (!pr && !(mas3 & MAS3_SX)))) {
665 pr_err_ratelimited(
666 "%s: Instuction emulation from guest addres %08lx without execute permission\n",
667 __func__, geaddr);
668 return EMULATE_AGAIN;
669 }
670
671 /*
672 * The real address will be mapped by a cacheable, memory coherent,
673 * write-back page. Check for mismatches when LRAT is used.
674 */
675 if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
676 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
677 pr_err_ratelimited(
678 "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n",
679 __func__, geaddr);
680 return EMULATE_AGAIN;
681 }
682
683 /* Get pfn */
684 psize_shift = MAS1_GET_TSIZE(mas1) + 10;
685 addr = (mas7_mas3 & (~0ULL << psize_shift)) |
686 (geaddr & ((1ULL << psize_shift) - 1ULL));
687 pfn = addr >> PAGE_SHIFT;
688
689 /* Guard against emulation from devices area */
690 if (unlikely(!page_is_ram(pfn))) {
691 pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n",
692 __func__, addr);
693 return EMULATE_AGAIN;
694 }
695
696 /* Map a page and get guest's instruction */
697 page = pfn_to_page(pfn);
698 eaddr = (unsigned long)kmap_atomic(page);
699 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
700 kunmap_atomic((u32 *)eaddr);
701
702 return EMULATE_DONE;
703}
704#else
705int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
706 u32 *instr)
707{
708 return EMULATE_AGAIN;
709}
710#endif
711
610/************* MMU Notifiers *************/ 712/************* MMU Notifiers *************/
611 713
612int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 714int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 17e456279224..164bad2a19bf 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
110{ 110{
111} 111}
112 112
113static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); 113static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
114 114
115static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) 115static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
116{ 116{
@@ -141,9 +141,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
141 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 141 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
142 142
143 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || 143 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
144 __get_cpu_var(last_vcpu_on_cpu) != vcpu) { 144 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) {
145 kvmppc_e500_tlbil_all(vcpu_e500); 145 kvmppc_e500_tlbil_all(vcpu_e500);
146 __get_cpu_var(last_vcpu_on_cpu) = vcpu; 146 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
147 } 147 }
148 148
149 kvmppc_load_guest_fp(vcpu); 149 kvmppc_load_guest_fp(vcpu);
@@ -267,14 +267,32 @@ static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
267static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, 267static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
268 union kvmppc_one_reg *val) 268 union kvmppc_one_reg *val)
269{ 269{
270 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 270 int r = 0;
271
272 switch (id) {
273 case KVM_REG_PPC_SPRG9:
274 *val = get_reg_val(id, vcpu->arch.sprg9);
275 break;
276 default:
277 r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
278 }
279
271 return r; 280 return r;
272} 281}
273 282
274static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, 283static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
275 union kvmppc_one_reg *val) 284 union kvmppc_one_reg *val)
276{ 285{
277 int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); 286 int r = 0;
287
288 switch (id) {
289 case KVM_REG_PPC_SPRG9:
290 vcpu->arch.sprg9 = set_reg_val(id, *val);
291 break;
292 default:
293 r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
294 }
295
278 return r; 296 return r;
279} 297}
280 298
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index da86d9ba3476..e96b50d0bdab 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -207,36 +207,28 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
207 return emulated; 207 return emulated;
208} 208}
209 209
210/* XXX to do:
211 * lhax
212 * lhaux
213 * lswx
214 * lswi
215 * stswx
216 * stswi
217 * lha
218 * lhau
219 * lmw
220 * stmw
221 *
222 */
223/* XXX Should probably auto-generate instruction decoding for a particular core 210/* XXX Should probably auto-generate instruction decoding for a particular core
224 * from opcode tables in the future. */ 211 * from opcode tables in the future. */
225int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 212int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
226{ 213{
227 u32 inst = kvmppc_get_last_inst(vcpu); 214 u32 inst;
228 int ra = get_ra(inst); 215 int rs, rt, sprn;
229 int rs = get_rs(inst); 216 enum emulation_result emulated;
230 int rt = get_rt(inst);
231 int sprn = get_sprn(inst);
232 enum emulation_result emulated = EMULATE_DONE;
233 int advance = 1; 217 int advance = 1;
234 218
235 /* this default type might be overwritten by subcategories */ 219 /* this default type might be overwritten by subcategories */
236 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 220 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
237 221
222 emulated = kvmppc_get_last_inst(vcpu, false, &inst);
223 if (emulated != EMULATE_DONE)
224 return emulated;
225
238 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 226 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
239 227
228 rs = get_rs(inst);
229 rt = get_rt(inst);
230 sprn = get_sprn(inst);
231
240 switch (get_op(inst)) { 232 switch (get_op(inst)) {
241 case OP_TRAP: 233 case OP_TRAP:
242#ifdef CONFIG_PPC_BOOK3S 234#ifdef CONFIG_PPC_BOOK3S
@@ -264,200 +256,24 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
264#endif 256#endif
265 advance = 0; 257 advance = 0;
266 break; 258 break;
267 case OP_31_XOP_LWZX:
268 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
269 break;
270
271 case OP_31_XOP_LBZX:
272 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
273 break;
274
275 case OP_31_XOP_LBZUX:
276 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
277 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
278 break;
279
280 case OP_31_XOP_STWX:
281 emulated = kvmppc_handle_store(run, vcpu,
282 kvmppc_get_gpr(vcpu, rs),
283 4, 1);
284 break;
285
286 case OP_31_XOP_STBX:
287 emulated = kvmppc_handle_store(run, vcpu,
288 kvmppc_get_gpr(vcpu, rs),
289 1, 1);
290 break;
291
292 case OP_31_XOP_STBUX:
293 emulated = kvmppc_handle_store(run, vcpu,
294 kvmppc_get_gpr(vcpu, rs),
295 1, 1);
296 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
297 break;
298
299 case OP_31_XOP_LHAX:
300 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
301 break;
302
303 case OP_31_XOP_LHZX:
304 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
305 break;
306
307 case OP_31_XOP_LHZUX:
308 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
309 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
310 break;
311 259
312 case OP_31_XOP_MFSPR: 260 case OP_31_XOP_MFSPR:
313 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); 261 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
314 break; 262 break;
315 263
316 case OP_31_XOP_STHX:
317 emulated = kvmppc_handle_store(run, vcpu,
318 kvmppc_get_gpr(vcpu, rs),
319 2, 1);
320 break;
321
322 case OP_31_XOP_STHUX:
323 emulated = kvmppc_handle_store(run, vcpu,
324 kvmppc_get_gpr(vcpu, rs),
325 2, 1);
326 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
327 break;
328
329 case OP_31_XOP_MTSPR: 264 case OP_31_XOP_MTSPR:
330 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); 265 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
331 break; 266 break;
332 267
333 case OP_31_XOP_DCBST:
334 case OP_31_XOP_DCBF:
335 case OP_31_XOP_DCBI:
336 /* Do nothing. The guest is performing dcbi because
337 * hardware DMA is not snooped by the dcache, but
338 * emulated DMA either goes through the dcache as
339 * normal writes, or the host kernel has handled dcache
340 * coherence. */
341 break;
342
343 case OP_31_XOP_LWBRX:
344 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
345 break;
346
347 case OP_31_XOP_TLBSYNC: 268 case OP_31_XOP_TLBSYNC:
348 break; 269 break;
349 270
350 case OP_31_XOP_STWBRX:
351 emulated = kvmppc_handle_store(run, vcpu,
352 kvmppc_get_gpr(vcpu, rs),
353 4, 0);
354 break;
355
356 case OP_31_XOP_LHBRX:
357 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
358 break;
359
360 case OP_31_XOP_STHBRX:
361 emulated = kvmppc_handle_store(run, vcpu,
362 kvmppc_get_gpr(vcpu, rs),
363 2, 0);
364 break;
365
366 default: 271 default:
367 /* Attempt core-specific emulation below. */ 272 /* Attempt core-specific emulation below. */
368 emulated = EMULATE_FAIL; 273 emulated = EMULATE_FAIL;
369 } 274 }
370 break; 275 break;
371 276
372 case OP_LWZ:
373 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
374 break;
375
376 /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
377 case OP_LD:
378 rt = get_rt(inst);
379 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
380 break;
381
382 case OP_LWZU:
383 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
384 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
385 break;
386
387 case OP_LBZ:
388 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
389 break;
390
391 case OP_LBZU:
392 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
393 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
394 break;
395
396 case OP_STW:
397 emulated = kvmppc_handle_store(run, vcpu,
398 kvmppc_get_gpr(vcpu, rs),
399 4, 1);
400 break;
401
402 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
403 case OP_STD:
404 rs = get_rs(inst);
405 emulated = kvmppc_handle_store(run, vcpu,
406 kvmppc_get_gpr(vcpu, rs),
407 8, 1);
408 break;
409
410 case OP_STWU:
411 emulated = kvmppc_handle_store(run, vcpu,
412 kvmppc_get_gpr(vcpu, rs),
413 4, 1);
414 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
415 break;
416
417 case OP_STB:
418 emulated = kvmppc_handle_store(run, vcpu,
419 kvmppc_get_gpr(vcpu, rs),
420 1, 1);
421 break;
422
423 case OP_STBU:
424 emulated = kvmppc_handle_store(run, vcpu,
425 kvmppc_get_gpr(vcpu, rs),
426 1, 1);
427 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
428 break;
429
430 case OP_LHZ:
431 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
432 break;
433
434 case OP_LHZU:
435 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
436 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
437 break;
438
439 case OP_LHA:
440 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
441 break;
442
443 case OP_LHAU:
444 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
445 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
446 break;
447
448 case OP_STH:
449 emulated = kvmppc_handle_store(run, vcpu,
450 kvmppc_get_gpr(vcpu, rs),
451 2, 1);
452 break;
453
454 case OP_STHU:
455 emulated = kvmppc_handle_store(run, vcpu,
456 kvmppc_get_gpr(vcpu, rs),
457 2, 1);
458 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
459 break;
460
461 default: 277 default:
462 emulated = EMULATE_FAIL; 278 emulated = EMULATE_FAIL;
463 } 279 }
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
new file mode 100644
index 000000000000..0de4ffa175a9
--- /dev/null
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -0,0 +1,272 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <linux/jiffies.h>
22#include <linux/hrtimer.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <linux/kvm_host.h>
26#include <linux/clockchips.h>
27
28#include <asm/reg.h>
29#include <asm/time.h>
30#include <asm/byteorder.h>
31#include <asm/kvm_ppc.h>
32#include <asm/disassemble.h>
33#include <asm/ppc-opcode.h>
34#include "timing.h"
35#include "trace.h"
36
37/* XXX to do:
38 * lhax
39 * lhaux
40 * lswx
41 * lswi
42 * stswx
43 * stswi
44 * lha
45 * lhau
46 * lmw
47 * stmw
48 *
49 */
50int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
51{
52 struct kvm_run *run = vcpu->run;
53 u32 inst;
54 int ra, rs, rt;
55 enum emulation_result emulated;
56 int advance = 1;
57
58 /* this default type might be overwritten by subcategories */
59 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
60
61 emulated = kvmppc_get_last_inst(vcpu, false, &inst);
62 if (emulated != EMULATE_DONE)
63 return emulated;
64
65 ra = get_ra(inst);
66 rs = get_rs(inst);
67 rt = get_rt(inst);
68
69 switch (get_op(inst)) {
70 case 31:
71 switch (get_xop(inst)) {
72 case OP_31_XOP_LWZX:
73 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
74 break;
75
76 case OP_31_XOP_LBZX:
77 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
78 break;
79
80 case OP_31_XOP_LBZUX:
81 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
82 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
83 break;
84
85 case OP_31_XOP_STWX:
86 emulated = kvmppc_handle_store(run, vcpu,
87 kvmppc_get_gpr(vcpu, rs),
88 4, 1);
89 break;
90
91 case OP_31_XOP_STBX:
92 emulated = kvmppc_handle_store(run, vcpu,
93 kvmppc_get_gpr(vcpu, rs),
94 1, 1);
95 break;
96
97 case OP_31_XOP_STBUX:
98 emulated = kvmppc_handle_store(run, vcpu,
99 kvmppc_get_gpr(vcpu, rs),
100 1, 1);
101 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
102 break;
103
104 case OP_31_XOP_LHAX:
105 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
106 break;
107
108 case OP_31_XOP_LHZX:
109 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
110 break;
111
112 case OP_31_XOP_LHZUX:
113 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
114 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
115 break;
116
117 case OP_31_XOP_STHX:
118 emulated = kvmppc_handle_store(run, vcpu,
119 kvmppc_get_gpr(vcpu, rs),
120 2, 1);
121 break;
122
123 case OP_31_XOP_STHUX:
124 emulated = kvmppc_handle_store(run, vcpu,
125 kvmppc_get_gpr(vcpu, rs),
126 2, 1);
127 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
128 break;
129
130 case OP_31_XOP_DCBST:
131 case OP_31_XOP_DCBF:
132 case OP_31_XOP_DCBI:
133 /* Do nothing. The guest is performing dcbi because
134 * hardware DMA is not snooped by the dcache, but
135 * emulated DMA either goes through the dcache as
136 * normal writes, or the host kernel has handled dcache
137 * coherence. */
138 break;
139
140 case OP_31_XOP_LWBRX:
141 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
142 break;
143
144 case OP_31_XOP_STWBRX:
145 emulated = kvmppc_handle_store(run, vcpu,
146 kvmppc_get_gpr(vcpu, rs),
147 4, 0);
148 break;
149
150 case OP_31_XOP_LHBRX:
151 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
152 break;
153
154 case OP_31_XOP_STHBRX:
155 emulated = kvmppc_handle_store(run, vcpu,
156 kvmppc_get_gpr(vcpu, rs),
157 2, 0);
158 break;
159
160 default:
161 emulated = EMULATE_FAIL;
162 break;
163 }
164 break;
165
166 case OP_LWZ:
167 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
168 break;
169
170 /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
171 case OP_LD:
172 rt = get_rt(inst);
173 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
174 break;
175
176 case OP_LWZU:
177 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
178 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
179 break;
180
181 case OP_LBZ:
182 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
183 break;
184
185 case OP_LBZU:
186 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
187 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
188 break;
189
190 case OP_STW:
191 emulated = kvmppc_handle_store(run, vcpu,
192 kvmppc_get_gpr(vcpu, rs),
193 4, 1);
194 break;
195
196 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
197 case OP_STD:
198 rs = get_rs(inst);
199 emulated = kvmppc_handle_store(run, vcpu,
200 kvmppc_get_gpr(vcpu, rs),
201 8, 1);
202 break;
203
204 case OP_STWU:
205 emulated = kvmppc_handle_store(run, vcpu,
206 kvmppc_get_gpr(vcpu, rs),
207 4, 1);
208 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
209 break;
210
211 case OP_STB:
212 emulated = kvmppc_handle_store(run, vcpu,
213 kvmppc_get_gpr(vcpu, rs),
214 1, 1);
215 break;
216
217 case OP_STBU:
218 emulated = kvmppc_handle_store(run, vcpu,
219 kvmppc_get_gpr(vcpu, rs),
220 1, 1);
221 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
222 break;
223
224 case OP_LHZ:
225 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
226 break;
227
228 case OP_LHZU:
229 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
230 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
231 break;
232
233 case OP_LHA:
234 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
235 break;
236
237 case OP_LHAU:
238 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
239 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
240 break;
241
242 case OP_STH:
243 emulated = kvmppc_handle_store(run, vcpu,
244 kvmppc_get_gpr(vcpu, rs),
245 2, 1);
246 break;
247
248 case OP_STHU:
249 emulated = kvmppc_handle_store(run, vcpu,
250 kvmppc_get_gpr(vcpu, rs),
251 2, 1);
252 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
253 break;
254
255 default:
256 emulated = EMULATE_FAIL;
257 break;
258 }
259
260 if (emulated == EMULATE_FAIL) {
261 advance = 0;
262 kvmppc_core_queue_program(vcpu, 0);
263 }
264
265 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
266
267 /* Advance past emulated instruction. */
268 if (advance)
269 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
270
271 return emulated;
272}
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index b68d0dc9479a..39b3a8f816f2 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -1826,8 +1826,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
1826 return 0; 1826 return 0;
1827} 1827}
1828 1828
1829int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 1829int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1830 struct kvm_kernel_irq_routing_entry *e,
1831 const struct kvm_irq_routing_entry *ue) 1830 const struct kvm_irq_routing_entry *ue)
1832{ 1831{
1833 int r = -EINVAL; 1832 int r = -EINVAL;
@@ -1839,7 +1838,6 @@ int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
1839 e->irqchip.pin = ue->u.irqchip.pin; 1838 e->irqchip.pin = ue->u.irqchip.pin;
1840 if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) 1839 if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
1841 goto out; 1840 goto out;
1842 rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
1843 break; 1841 break;
1844 case KVM_IRQ_ROUTING_MSI: 1842 case KVM_IRQ_ROUTING_MSI:
1845 e->set = kvm_set_msi; 1843 e->set = kvm_set_msi;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 61c738ab1283..4c79284b58be 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -190,6 +190,25 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
192 192
193#ifdef CONFIG_PPC_64K_PAGES
194 /*
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
197 */
198 if ((vcpu->arch.magic_page_pa & 0xf000) !=
199 ((ulong)vcpu->arch.shared & 0xf000)) {
200 void *old_shared = vcpu->arch.shared;
201 ulong shared = (ulong)vcpu->arch.shared;
202 void *new_shared;
203
204 shared &= PAGE_MASK;
205 shared |= vcpu->arch.magic_page_pa & 0xf000;
206 new_shared = (void*)shared;
207 memcpy(new_shared, old_shared, 0x1000);
208 vcpu->arch.shared = new_shared;
209 }
210#endif
211
193 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 212 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
194 213
195 r = EV_SUCCESS; 214 r = EV_SUCCESS;
@@ -198,7 +217,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
198 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
199 r = EV_SUCCESS; 218 r = EV_SUCCESS;
200#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
201 /* XXX Missing magic page on 44x */
202 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 220 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
203#endif 221#endif
204 222
@@ -254,13 +272,16 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
254 enum emulation_result er; 272 enum emulation_result er;
255 int r; 273 int r;
256 274
257 er = kvmppc_emulate_instruction(run, vcpu); 275 er = kvmppc_emulate_loadstore(vcpu);
258 switch (er) { 276 switch (er) {
259 case EMULATE_DONE: 277 case EMULATE_DONE:
260 /* Future optimization: only reload non-volatiles if they were 278 /* Future optimization: only reload non-volatiles if they were
261 * actually modified. */ 279 * actually modified. */
262 r = RESUME_GUEST_NV; 280 r = RESUME_GUEST_NV;
263 break; 281 break;
282 case EMULATE_AGAIN:
283 r = RESUME_GUEST;
284 break;
264 case EMULATE_DO_MMIO: 285 case EMULATE_DO_MMIO:
265 run->exit_reason = KVM_EXIT_MMIO; 286 run->exit_reason = KVM_EXIT_MMIO;
266 /* We must reload nonvolatiles because "update" load/store 287 /* We must reload nonvolatiles because "update" load/store
@@ -270,11 +291,15 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
270 r = RESUME_HOST_NV; 291 r = RESUME_HOST_NV;
271 break; 292 break;
272 case EMULATE_FAIL: 293 case EMULATE_FAIL:
294 {
295 u32 last_inst;
296
297 kvmppc_get_last_inst(vcpu, false, &last_inst);
273 /* XXX Deliver Program interrupt to guest. */ 298 /* XXX Deliver Program interrupt to guest. */
274 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 299 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
275 kvmppc_get_last_inst(vcpu));
276 r = RESUME_HOST; 300 r = RESUME_HOST;
277 break; 301 break;
302 }
278 default: 303 default:
279 WARN_ON(1); 304 WARN_ON(1);
280 r = RESUME_GUEST; 305 r = RESUME_GUEST;
@@ -284,6 +309,81 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
284} 309}
285EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 310EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
286 311
312int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
313 bool data)
314{
315 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
316 struct kvmppc_pte pte;
317 int r;
318
319 vcpu->stat.st++;
320
321 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
322 XLATE_WRITE, &pte);
323 if (r < 0)
324 return r;
325
326 *eaddr = pte.raddr;
327
328 if (!pte.may_write)
329 return -EPERM;
330
331 /* Magic page override */
332 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
333 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
334 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
335 void *magic = vcpu->arch.shared;
336 magic += pte.eaddr & 0xfff;
337 memcpy(magic, ptr, size);
338 return EMULATE_DONE;
339 }
340
341 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
342 return EMULATE_DO_MMIO;
343
344 return EMULATE_DONE;
345}
346EXPORT_SYMBOL_GPL(kvmppc_st);
347
348int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349 bool data)
350{
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352 struct kvmppc_pte pte;
353 int rc;
354
355 vcpu->stat.ld++;
356
357 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
358 XLATE_READ, &pte);
359 if (rc)
360 return rc;
361
362 *eaddr = pte.raddr;
363
364 if (!pte.may_read)
365 return -EPERM;
366
367 if (!data && !pte.may_execute)
368 return -ENOEXEC;
369
370 /* Magic page override */
371 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
372 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
373 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
374 void *magic = vcpu->arch.shared;
375 magic += pte.eaddr & 0xfff;
376 memcpy(ptr, magic, size);
377 return EMULATE_DONE;
378 }
379
380 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
381 return EMULATE_DO_MMIO;
382
383 return EMULATE_DONE;
384}
385EXPORT_SYMBOL_GPL(kvmppc_ld);
386
287int kvm_arch_hardware_enable(void *garbage) 387int kvm_arch_hardware_enable(void *garbage)
288{ 388{
289 return 0; 389 return 0;
@@ -366,14 +466,20 @@ void kvm_arch_sync_events(struct kvm *kvm)
366{ 466{
367} 467}
368 468
369int kvm_dev_ioctl_check_extension(long ext) 469int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
370{ 470{
371 int r; 471 int r;
372 /* FIXME!! 472 /* Assume we're using HV mode when the HV module is loaded */
373 * Should some of this be vm ioctl ? is it possible now ?
374 */
375 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 473 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
376 474
475 if (kvm) {
476 /*
477 * Hooray - we know which VM type we're running on. Depend on
478 * that rather than the guess above.
479 */
480 hv_enabled = is_kvmppc_hv_enabled(kvm);
481 }
482
377 switch (ext) { 483 switch (ext) {
378#ifdef CONFIG_BOOKE 484#ifdef CONFIG_BOOKE
379 case KVM_CAP_PPC_BOOKE_SREGS: 485 case KVM_CAP_PPC_BOOKE_SREGS:
@@ -387,6 +493,7 @@ int kvm_dev_ioctl_check_extension(long ext)
387 case KVM_CAP_PPC_UNSET_IRQ: 493 case KVM_CAP_PPC_UNSET_IRQ:
388 case KVM_CAP_PPC_IRQ_LEVEL: 494 case KVM_CAP_PPC_IRQ_LEVEL:
389 case KVM_CAP_ENABLE_CAP: 495 case KVM_CAP_ENABLE_CAP:
496 case KVM_CAP_ENABLE_CAP_VM:
390 case KVM_CAP_ONE_REG: 497 case KVM_CAP_ONE_REG:
391 case KVM_CAP_IOEVENTFD: 498 case KVM_CAP_IOEVENTFD:
392 case KVM_CAP_DEVICE_CTRL: 499 case KVM_CAP_DEVICE_CTRL:
@@ -417,6 +524,7 @@ int kvm_dev_ioctl_check_extension(long ext)
417 case KVM_CAP_PPC_ALLOC_HTAB: 524 case KVM_CAP_PPC_ALLOC_HTAB:
418 case KVM_CAP_PPC_RTAS: 525 case KVM_CAP_PPC_RTAS:
419 case KVM_CAP_PPC_FIXUP_HCALL: 526 case KVM_CAP_PPC_FIXUP_HCALL:
527 case KVM_CAP_PPC_ENABLE_HCALL:
420#ifdef CONFIG_KVM_XICS 528#ifdef CONFIG_KVM_XICS
421 case KVM_CAP_IRQ_XICS: 529 case KVM_CAP_IRQ_XICS:
422#endif 530#endif
@@ -635,12 +743,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
635#endif 743#endif
636} 744}
637 745
638static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
639 struct kvm_run *run)
640{
641 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
642}
643
644static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 746static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
645 struct kvm_run *run) 747 struct kvm_run *run)
646{ 748{
@@ -837,10 +939,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
837 if (!vcpu->mmio_is_write) 939 if (!vcpu->mmio_is_write)
838 kvmppc_complete_mmio_load(vcpu, run); 940 kvmppc_complete_mmio_load(vcpu, run);
839 vcpu->mmio_needed = 0; 941 vcpu->mmio_needed = 0;
840 } else if (vcpu->arch.dcr_needed) {
841 if (!vcpu->arch.dcr_is_write)
842 kvmppc_complete_dcr_load(vcpu, run);
843 vcpu->arch.dcr_needed = 0;
844 } else if (vcpu->arch.osi_needed) { 942 } else if (vcpu->arch.osi_needed) {
845 u64 *gprs = run->osi.gprs; 943 u64 *gprs = run->osi.gprs;
846 int i; 944 int i;
@@ -1099,6 +1197,42 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1099 return 0; 1197 return 0;
1100} 1198}
1101 1199
1200
1201static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1202 struct kvm_enable_cap *cap)
1203{
1204 int r;
1205
1206 if (cap->flags)
1207 return -EINVAL;
1208
1209 switch (cap->cap) {
1210#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1211 case KVM_CAP_PPC_ENABLE_HCALL: {
1212 unsigned long hcall = cap->args[0];
1213
1214 r = -EINVAL;
1215 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1216 cap->args[1] > 1)
1217 break;
1218 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1219 break;
1220 if (cap->args[1])
1221 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1222 else
1223 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1224 r = 0;
1225 break;
1226 }
1227#endif
1228 default:
1229 r = -EINVAL;
1230 break;
1231 }
1232
1233 return r;
1234}
1235
1102long kvm_arch_vm_ioctl(struct file *filp, 1236long kvm_arch_vm_ioctl(struct file *filp,
1103 unsigned int ioctl, unsigned long arg) 1237 unsigned int ioctl, unsigned long arg)
1104{ 1238{
@@ -1118,6 +1252,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
1118 1252
1119 break; 1253 break;
1120 } 1254 }
1255 case KVM_ENABLE_CAP:
1256 {
1257 struct kvm_enable_cap cap;
1258 r = -EFAULT;
1259 if (copy_from_user(&cap, argp, sizeof(cap)))
1260 goto out;
1261 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1262 break;
1263 }
1121#ifdef CONFIG_PPC_BOOK3S_64 1264#ifdef CONFIG_PPC_BOOK3S_64
1122 case KVM_CREATE_SPAPR_TCE: { 1265 case KVM_CREATE_SPAPR_TCE: {
1123 struct kvm_create_spapr_tce create_tce; 1266 struct kvm_create_spapr_tce create_tce;
@@ -1204,3 +1347,5 @@ void kvm_arch_exit(void)
1204{ 1347{
1205 1348
1206} 1349}
1350
1351EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 07b6110a4bb7..e44d2b2ea97e 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -110,7 +110,6 @@ void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
110 110
111static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { 111static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
112 [MMIO_EXITS] = "MMIO", 112 [MMIO_EXITS] = "MMIO",
113 [DCR_EXITS] = "DCR",
114 [SIGNAL_EXITS] = "SIGNAL", 113 [SIGNAL_EXITS] = "SIGNAL",
115 [ITLB_REAL_MISS_EXITS] = "ITLBREAL", 114 [ITLB_REAL_MISS_EXITS] = "ITLBREAL",
116 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", 115 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT",
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index bf191e72b2d8..3123690c82dc 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -63,9 +63,6 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
63 case EMULATED_INST_EXITS: 63 case EMULATED_INST_EXITS:
64 vcpu->stat.emulated_inst_exits++; 64 vcpu->stat.emulated_inst_exits++;
65 break; 65 break;
66 case DCR_EXITS:
67 vcpu->stat.dcr_exits++;
68 break;
69 case DSI_EXITS: 66 case DSI_EXITS:
70 vcpu->stat.dsi_exits++; 67 vcpu->stat.dsi_exits++;
71 break; 68 break;