aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/asm-compat.h4
-rw-r--r--arch/powerpc/include/asm/cache.h7
-rw-r--r--arch/powerpc/include/asm/hvcall.h6
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h67
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h51
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h29
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h15
-rw-r--r--arch/powerpc/include/asm/kvm_host.h28
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h116
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h8
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h17
-rw-r--r--arch/powerpc/include/asm/reg.h13
-rw-r--r--arch/powerpc/include/asm/time.h9
14 files changed, 230 insertions, 142 deletions
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 4b237aa35660..21be8ae8f809 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -34,10 +34,14 @@
34#define PPC_MIN_STKFRM 112 34#define PPC_MIN_STKFRM 112
35 35
36#ifdef __BIG_ENDIAN__ 36#ifdef __BIG_ENDIAN__
37#define LWZX_BE stringify_in_c(lwzx)
37#define LDX_BE stringify_in_c(ldx) 38#define LDX_BE stringify_in_c(ldx)
39#define STWX_BE stringify_in_c(stwx)
38#define STDX_BE stringify_in_c(stdx) 40#define STDX_BE stringify_in_c(stdx)
39#else 41#else
42#define LWZX_BE stringify_in_c(lwbrx)
40#define LDX_BE stringify_in_c(ldbrx) 43#define LDX_BE stringify_in_c(ldbrx)
44#define STWX_BE stringify_in_c(stwbrx)
41#define STDX_BE stringify_in_c(stdbrx) 45#define STDX_BE stringify_in_c(stdbrx)
42#endif 46#endif
43 47
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index ed0afc1e44a4..34a05a1a990b 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -3,6 +3,7 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/reg.h>
6 7
7/* bytes per L1 cache line */ 8/* bytes per L1 cache line */
8#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 9#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -39,6 +40,12 @@ struct ppc64_caches {
39}; 40};
40 41
41extern struct ppc64_caches ppc64_caches; 42extern struct ppc64_caches ppc64_caches;
43
44static inline void logmpp(u64 x)
45{
46 asm volatile(PPC_LOGMPP(R1) : : "r" (x));
47}
48
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 49#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43 50
44#if defined(__ASSEMBLY__) 51#if defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 5dbbb29f5c3e..85bc8c0d257b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -279,6 +279,12 @@
279#define H_GET_24X7_DATA 0xF07C 279#define H_GET_24X7_DATA 0xF07C
280#define H_GET_PERF_COUNTER_INFO 0xF080 280#define H_GET_PERF_COUNTER_INFO 0xF080
281 281
282/* Values for 2nd argument to H_SET_MODE */
283#define H_SET_MODE_RESOURCE_SET_CIABR 1
284#define H_SET_MODE_RESOURCE_SET_DAWR 2
285#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
286#define H_SET_MODE_RESOURCE_LE 4
287
282#ifndef __ASSEMBLY__ 288#ifndef __ASSEMBLY__
283 289
284/** 290/**
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
deleted file mode 100644
index a0e57618ff33..000000000000
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __ASM_44X_H__
21#define __ASM_44X_H__
22
23#include <linux/kvm_host.h>
24
25#define PPC44x_TLB_SIZE 64
26
27/* If the guest is expecting it, this can be as large as we like; we'd just
28 * need to find some way of advertising it. */
29#define KVM44x_GUEST_TLB_SIZE 64
30
31struct kvmppc_44x_tlbe {
32 u32 tid; /* Only the low 8 bits are used. */
33 u32 word0;
34 u32 word1;
35 u32 word2;
36};
37
38struct kvmppc_44x_shadow_ref {
39 struct page *page;
40 u16 gtlb_index;
41 u8 writeable;
42 u8 tid;
43};
44
45struct kvmppc_vcpu_44x {
46 /* Unmodified copy of the guest's TLB. */
47 struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
48
49 /* References to guest pages in the hardware TLB. */
50 struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
51
52 /* State of the shadow TLB at guest context switch time. */
53 struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
54 u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
55
56 struct kvm_vcpu vcpu;
57};
58
59static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
60{
61 return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
62}
63
64void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
65void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
66
67#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 9601741080e5..b8901c4a4922 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -33,7 +33,6 @@
33/* IVPR must be 64KiB-aligned. */ 33/* IVPR must be 64KiB-aligned. */
34#define VCPU_SIZE_ORDER 4 34#define VCPU_SIZE_ORDER 4
35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) 35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
36#define VCPU_TLB_PGSZ PPC44x_TLB_64K
37#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) 36#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
38 37
39#define BOOKE_INTERRUPT_CRITICAL 0 38#define BOOKE_INTERRUPT_CRITICAL 0
@@ -131,6 +130,7 @@
131#define BOOK3S_HFLAG_NATIVE_PS 0x8 130#define BOOK3S_HFLAG_NATIVE_PS 0x8
132#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10 131#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
133#define BOOK3S_HFLAG_NEW_TLBIE 0x20 132#define BOOK3S_HFLAG_NEW_TLBIE 0x20
133#define BOOK3S_HFLAG_SPLIT_HACK 0x40
134 134
135#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 135#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
136#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 136#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f52f65694527..6acf0c2a0f99 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -83,8 +83,6 @@ struct kvmppc_vcpu_book3s {
83 u64 sdr1; 83 u64 sdr1;
84 u64 hior; 84 u64 hior;
85 u64 msr_mask; 85 u64 msr_mask;
86 u64 purr_offset;
87 u64 spurr_offset;
88#ifdef CONFIG_PPC_BOOK3S_32 86#ifdef CONFIG_PPC_BOOK3S_32
89 u32 vsid_pool[VSID_POOL_SIZE]; 87 u32 vsid_pool[VSID_POOL_SIZE];
90 u32 vsid_next; 88 u32 vsid_next;
@@ -148,9 +146,10 @@ extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *
148extern int kvmppc_mmu_hpte_sysinit(void); 146extern int kvmppc_mmu_hpte_sysinit(void);
149extern void kvmppc_mmu_hpte_sysexit(void); 147extern void kvmppc_mmu_hpte_sysexit(void);
150extern int kvmppc_mmu_hv_init(void); 148extern int kvmppc_mmu_hv_init(void);
149extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
151 150
151/* XXX remove this export when load_last_inst() is generic */
152extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 152extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
153extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
154extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 153extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
155extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 154extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
156 unsigned int vec); 155 unsigned int vec);
@@ -159,13 +158,13 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
159 bool upper, u32 val); 158 bool upper, u32 val);
160extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 159extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
161extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 160extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
162extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 161extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
163 bool *writable); 162 bool *writable);
164extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 163extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
165 unsigned long *rmap, long pte_index, int realmode); 164 unsigned long *rmap, long pte_index, int realmode);
166extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 165extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
167 unsigned long pte_index); 166 unsigned long pte_index);
168void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, 167void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
169 unsigned long pte_index); 168 unsigned long pte_index);
170extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 169extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
171 unsigned long *nb_ret); 170 unsigned long *nb_ret);
@@ -183,12 +182,16 @@ extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
183 struct kvm_memory_slot *memslot, unsigned long *map); 182 struct kvm_memory_slot *memslot, unsigned long *map);
184extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, 183extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
185 unsigned long mask); 184 unsigned long mask);
185extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
186 186
187extern void kvmppc_entry_trampoline(void); 187extern void kvmppc_entry_trampoline(void);
188extern void kvmppc_hv_entry_trampoline(void); 188extern void kvmppc_hv_entry_trampoline(void);
189extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 189extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
190extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 190extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
191extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 191extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
192extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
193extern int kvmppc_hcall_impl_pr(unsigned long cmd);
194extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
192extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, 195extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
193 struct kvm_vcpu *vcpu); 196 struct kvm_vcpu *vcpu);
194extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 197extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
@@ -274,32 +277,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
274 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); 277 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
275} 278}
276 279
277static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
278{
279 /* Load the instruction manually if it failed to do so in the
280 * exit path */
281 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
282 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
283
284 return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
285 vcpu->arch.last_inst;
286}
287
288static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
289{
290 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
291}
292
293/*
294 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
295 * Because the sc instruction sets SRR0 to point to the following
296 * instruction, we have to fetch from pc - 4.
297 */
298static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
299{
300 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
301}
302
303static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 280static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
304{ 281{
305 return vcpu->arch.fault_dar; 282 return vcpu->arch.fault_dar;
@@ -310,6 +287,13 @@ static inline bool is_kvmppc_resume_guest(int r)
310 return (r == RESUME_GUEST || r == RESUME_GUEST_NV); 287 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
311} 288}
312 289
290static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
291static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
292{
293 /* Only PR KVM supports the magic page */
294 return !is_kvmppc_hv_enabled(vcpu->kvm);
295}
296
313/* Magic register values loaded into r3 and r4 before the 'sc' assembly 297/* Magic register values loaded into r3 and r4 before the 'sc' assembly
314 * instruction for the OSI hypercalls */ 298 * instruction for the OSI hypercalls */
315#define OSI_SC_MAGIC_R3 0x113724FA 299#define OSI_SC_MAGIC_R3 0x113724FA
@@ -322,4 +306,7 @@ static inline bool is_kvmppc_resume_guest(int r)
322/* LPIDs we support with this build -- runtime limit may be lower */ 306/* LPIDs we support with this build -- runtime limit may be lower */
323#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) 307#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
324 308
309#define SPLIT_HACK_MASK 0xff000000
310#define SPLIT_HACK_OFFS 0xfb000000
311
325#endif /* __ASM_KVM_BOOK3S_H__ */ 312#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d645428a65a4..0aa817933e6a 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -59,20 +59,29 @@ extern unsigned long kvm_rma_pages;
59/* These bits are reserved in the guest view of the HPTE */ 59/* These bits are reserved in the guest view of the HPTE */
60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED 60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
61 61
62static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) 62static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
63{ 63{
64 unsigned long tmp, old; 64 unsigned long tmp, old;
65 __be64 be_lockbit, be_bits;
66
67 /*
68 * We load/store in native endian, but the HTAB is in big endian. If
69 * we byte swap all data we apply on the PTE we're implicitly correct
70 * again.
71 */
72 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
73 be_bits = cpu_to_be64(bits);
65 74
66 asm volatile(" ldarx %0,0,%2\n" 75 asm volatile(" ldarx %0,0,%2\n"
67 " and. %1,%0,%3\n" 76 " and. %1,%0,%3\n"
68 " bne 2f\n" 77 " bne 2f\n"
69 " ori %0,%0,%4\n" 78 " or %0,%0,%4\n"
70 " stdcx. %0,0,%2\n" 79 " stdcx. %0,0,%2\n"
71 " beq+ 2f\n" 80 " beq+ 2f\n"
72 " mr %1,%3\n" 81 " mr %1,%3\n"
73 "2: isync" 82 "2: isync"
74 : "=&r" (tmp), "=&r" (old) 83 : "=&r" (tmp), "=&r" (old)
75 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) 84 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
76 : "cc", "memory"); 85 : "cc", "memory");
77 return old == 0; 86 return old == 0;
78} 87}
@@ -110,16 +119,12 @@ static inline int __hpte_actual_psize(unsigned int lp, int psize)
110static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 119static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
111 unsigned long pte_index) 120 unsigned long pte_index)
112{ 121{
113 int b_psize, a_psize; 122 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
114 unsigned int penc; 123 unsigned int penc;
115 unsigned long rb = 0, va_low, sllp; 124 unsigned long rb = 0, va_low, sllp;
116 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1); 125 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
117 126
118 if (!(v & HPTE_V_LARGE)) { 127 if (v & HPTE_V_LARGE) {
119 /* both base and actual psize is 4k */
120 b_psize = MMU_PAGE_4K;
121 a_psize = MMU_PAGE_4K;
122 } else {
123 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) { 128 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
124 129
125 /* valid entries have a shift value */ 130 /* valid entries have a shift value */
@@ -142,6 +147,8 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
142 */ 147 */
143 /* This covers 14..54 bits of va*/ 148 /* This covers 14..54 bits of va*/
144 rb = (v & ~0x7fUL) << 16; /* AVA field */ 149 rb = (v & ~0x7fUL) << 16; /* AVA field */
150
151 rb |= v >> (62 - 8); /* B field */
145 /* 152 /*
146 * AVA in v had cleared lower 23 bits. We need to derive 153 * AVA in v had cleared lower 23 bits. We need to derive
147 * that from pteg index 154 * that from pteg index
@@ -172,10 +179,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
172 { 179 {
173 int aval_shift; 180 int aval_shift;
174 /* 181 /*
175 * remaining 7bits of AVA/LP fields 182 * remaining bits of AVA/LP fields
176 * Also contain the rr bits of LP 183 * Also contain the rr bits of LP
177 */ 184 */
178 rb |= (va_low & 0x7f) << 16; 185 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
179 /* 186 /*
180 * Now clear not needed LP bits based on actual psize 187 * Now clear not needed LP bits based on actual psize
181 */ 188 */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index c7aed6105ff9..f7aa5cc395c4 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
69 return false; 69 return false;
70} 70}
71 71
72static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
73{
74 return vcpu->arch.last_inst;
75}
76
77static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 72static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
78{ 73{
79 vcpu->arch.ctr = val; 74 vcpu->arch.ctr = val;
@@ -108,4 +103,14 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
108{ 103{
109 return vcpu->arch.fault_dear; 104 return vcpu->arch.fault_dear;
110} 105}
106
107static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
108{
109 /* Magic page is only supported on e500v2 */
110#ifdef CONFIG_KVM_E500V2
111 return true;
112#else
113 return false;
114#endif
115}
111#endif /* __ASM_KVM_BOOKE_H__ */ 116#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b8efdf..98d9dd50d063 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#include <asm/hvcall.h>
37 38
38#define KVM_MAX_VCPUS NR_CPUS 39#define KVM_MAX_VCPUS NR_CPUS
39#define KVM_MAX_VCORES NR_CPUS 40#define KVM_MAX_VCORES NR_CPUS
@@ -48,7 +49,6 @@
48#define KVM_NR_IRQCHIPS 1 49#define KVM_NR_IRQCHIPS 1
49#define KVM_IRQCHIP_NUM_PINS 256 50#define KVM_IRQCHIP_NUM_PINS 256
50 51
51#if !defined(CONFIG_KVM_440)
52#include <linux/mmu_notifier.h> 52#include <linux/mmu_notifier.h>
53 53
54#define KVM_ARCH_WANT_MMU_NOTIFIER 54#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -61,8 +61,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
61extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 61extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
62extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 62extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
63 63
64#endif
65
66#define HPTEG_CACHE_NUM (1 << 15) 64#define HPTEG_CACHE_NUM (1 << 15)
67#define HPTEG_HASH_BITS_PTE 13 65#define HPTEG_HASH_BITS_PTE 13
68#define HPTEG_HASH_BITS_PTE_LONG 12 66#define HPTEG_HASH_BITS_PTE_LONG 12
@@ -96,7 +94,6 @@ struct kvm_vm_stat {
96struct kvm_vcpu_stat { 94struct kvm_vcpu_stat {
97 u32 sum_exits; 95 u32 sum_exits;
98 u32 mmio_exits; 96 u32 mmio_exits;
99 u32 dcr_exits;
100 u32 signal_exits; 97 u32 signal_exits;
101 u32 light_exits; 98 u32 light_exits;
102 /* Account for special types of light exits: */ 99 /* Account for special types of light exits: */
@@ -113,22 +110,21 @@ struct kvm_vcpu_stat {
113 u32 halt_wakeup; 110 u32 halt_wakeup;
114 u32 dbell_exits; 111 u32 dbell_exits;
115 u32 gdbell_exits; 112 u32 gdbell_exits;
113 u32 ld;
114 u32 st;
116#ifdef CONFIG_PPC_BOOK3S 115#ifdef CONFIG_PPC_BOOK3S
117 u32 pf_storage; 116 u32 pf_storage;
118 u32 pf_instruc; 117 u32 pf_instruc;
119 u32 sp_storage; 118 u32 sp_storage;
120 u32 sp_instruc; 119 u32 sp_instruc;
121 u32 queue_intr; 120 u32 queue_intr;
122 u32 ld;
123 u32 ld_slow; 121 u32 ld_slow;
124 u32 st;
125 u32 st_slow; 122 u32 st_slow;
126#endif 123#endif
127}; 124};
128 125
129enum kvm_exit_types { 126enum kvm_exit_types {
130 MMIO_EXITS, 127 MMIO_EXITS,
131 DCR_EXITS,
132 SIGNAL_EXITS, 128 SIGNAL_EXITS,
133 ITLB_REAL_MISS_EXITS, 129 ITLB_REAL_MISS_EXITS,
134 ITLB_VIRT_MISS_EXITS, 130 ITLB_VIRT_MISS_EXITS,
@@ -254,7 +250,6 @@ struct kvm_arch {
254 atomic_t hpte_mod_interest; 250 atomic_t hpte_mod_interest;
255 spinlock_t slot_phys_lock; 251 spinlock_t slot_phys_lock;
256 cpumask_t need_tlb_flush; 252 cpumask_t need_tlb_flush;
257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
258 int hpt_cma_alloc; 253 int hpt_cma_alloc;
259#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 254#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
260#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 255#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -263,6 +258,7 @@ struct kvm_arch {
263#ifdef CONFIG_PPC_BOOK3S_64 258#ifdef CONFIG_PPC_BOOK3S_64
264 struct list_head spapr_tce_tables; 259 struct list_head spapr_tce_tables;
265 struct list_head rtas_tokens; 260 struct list_head rtas_tokens;
261 DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
266#endif 262#endif
267#ifdef CONFIG_KVM_MPIC 263#ifdef CONFIG_KVM_MPIC
268 struct openpic *mpic; 264 struct openpic *mpic;
@@ -271,6 +267,10 @@ struct kvm_arch {
271 struct kvmppc_xics *xics; 267 struct kvmppc_xics *xics;
272#endif 268#endif
273 struct kvmppc_ops *kvm_ops; 269 struct kvmppc_ops *kvm_ops;
270#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
271 /* This array can grow quite large, keep it at the end */
272 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
273#endif
274}; 274};
275 275
276/* 276/*
@@ -305,6 +305,8 @@ struct kvmppc_vcore {
305 u32 arch_compat; 305 u32 arch_compat;
306 ulong pcr; 306 ulong pcr;
307 ulong dpdes; /* doorbell state (POWER8) */ 307 ulong dpdes; /* doorbell state (POWER8) */
308 void *mpp_buffer; /* Micro Partition Prefetch buffer */
309 bool mpp_buffer_is_valid;
308}; 310};
309 311
310#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 312#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -503,8 +505,10 @@ struct kvm_vcpu_arch {
503#ifdef CONFIG_BOOKE 505#ifdef CONFIG_BOOKE
504 u32 decar; 506 u32 decar;
505#endif 507#endif
506 u32 tbl; 508 /* Time base value when we entered the guest */
507 u32 tbu; 509 u64 entry_tb;
510 u64 entry_vtb;
511 u64 entry_ic;
508 u32 tcr; 512 u32 tcr;
509 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */ 513 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
510 u32 ivor[64]; 514 u32 ivor[64];
@@ -580,6 +584,8 @@ struct kvm_vcpu_arch {
580 u32 mmucfg; 584 u32 mmucfg;
581 u32 eptcfg; 585 u32 eptcfg;
582 u32 epr; 586 u32 epr;
587 u64 sprg9;
588 u32 pwrmgtcr0;
583 u32 crit_save; 589 u32 crit_save;
584 /* guest debug registers*/ 590 /* guest debug registers*/
585 struct debug_reg dbg_reg; 591 struct debug_reg dbg_reg;
@@ -593,8 +599,6 @@ struct kvm_vcpu_arch {
593 u8 io_gpr; /* GPR used as IO source/target */ 599 u8 io_gpr; /* GPR used as IO source/target */
594 u8 mmio_is_bigendian; 600 u8 mmio_is_bigendian;
595 u8 mmio_sign_extend; 601 u8 mmio_sign_extend;
596 u8 dcr_needed;
597 u8 dcr_is_write;
598 u8 osi_needed; 602 u8 osi_needed;
599 u8 osi_enabled; 603 u8 osi_enabled;
600 u8 papr_enabled; 604 u8 papr_enabled;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd067a6..fb86a2299d8a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -41,12 +41,26 @@
41enum emulation_result { 41enum emulation_result {
42 EMULATE_DONE, /* no further processing */ 42 EMULATE_DONE, /* no further processing */
43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
44 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
45 EMULATE_FAIL, /* can't emulate this instruction */ 44 EMULATE_FAIL, /* can't emulate this instruction */
46 EMULATE_AGAIN, /* something went wrong. go again */ 45 EMULATE_AGAIN, /* something went wrong. go again */
47 EMULATE_EXIT_USER, /* emulation requires exit to user-space */ 46 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
48}; 47};
49 48
49enum instruction_type {
50 INST_GENERIC,
51 INST_SC, /* system call */
52};
53
54enum xlate_instdata {
55 XLATE_INST, /* translate instruction address */
56 XLATE_DATA /* translate data address */
57};
58
59enum xlate_readwrite {
60 XLATE_READ, /* check for read permissions */
61 XLATE_WRITE /* check for write permissions */
62};
63
50extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 64extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
51extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 65extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
52extern void kvmppc_handler_highmem(void); 66extern void kvmppc_handler_highmem(void);
@@ -62,8 +76,16 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 u64 val, unsigned int bytes, 76 u64 val, unsigned int bytes,
63 int is_default_endian); 77 int is_default_endian);
64 78
79extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
80 enum instruction_type type, u32 *inst);
81
82extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
83 bool data);
84extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
85 bool data);
65extern int kvmppc_emulate_instruction(struct kvm_run *run, 86extern int kvmppc_emulate_instruction(struct kvm_run *run,
66 struct kvm_vcpu *vcpu); 87 struct kvm_vcpu *vcpu);
88extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
67extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 89extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
68extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 90extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
69extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 91extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
@@ -86,6 +108,9 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
86 gva_t eaddr); 108 gva_t eaddr);
87extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); 109extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
88extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); 110extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
111extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
112 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
113 struct kvmppc_pte *pte);
89 114
90extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, 115extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
91 unsigned int id); 116 unsigned int id);
@@ -106,6 +131,14 @@ extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
106extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 131extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
107 struct kvm_interrupt *irq); 132 struct kvm_interrupt *irq);
108extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); 133extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
134extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
135 ulong esr_flags);
136extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
137 ulong dear_flags,
138 ulong esr_flags);
139extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
140extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
141 ulong esr_flags);
109extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); 142extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); 143extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
111 144
@@ -228,12 +261,35 @@ struct kvmppc_ops {
228 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); 261 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
229 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, 262 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
230 unsigned long arg); 263 unsigned long arg);
231 264 int (*hcall_implemented)(unsigned long hcall);
232}; 265};
233 266
234extern struct kvmppc_ops *kvmppc_hv_ops; 267extern struct kvmppc_ops *kvmppc_hv_ops;
235extern struct kvmppc_ops *kvmppc_pr_ops; 268extern struct kvmppc_ops *kvmppc_pr_ops;
236 269
270static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
271 enum instruction_type type, u32 *inst)
272{
273 int ret = EMULATE_DONE;
274 u32 fetched_inst;
275
276 /* Load the instruction manually if it failed to do so in the
277 * exit path */
278 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
279 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
280
281 /* Write fetch_failed unswapped if the fetch failed */
282 if (ret == EMULATE_DONE)
283 fetched_inst = kvmppc_need_byteswap(vcpu) ?
284 swab32(vcpu->arch.last_inst) :
285 vcpu->arch.last_inst;
286 else
287 fetched_inst = vcpu->arch.last_inst;
288
289 *inst = fetched_inst;
290 return ret;
291}
292
237static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) 293static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
238{ 294{
239 return kvm->arch.kvm_ops == kvmppc_hv_ops; 295 return kvm->arch.kvm_ops == kvmppc_hv_ops;
@@ -392,6 +448,17 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
392 { return 0; } 448 { return 0; }
393#endif 449#endif
394 450
451static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
452{
453#ifdef CONFIG_KVM_BOOKE_HV
454 return mfspr(SPRN_GEPR);
455#elif defined(CONFIG_BOOKE)
456 return vcpu->arch.epr;
457#else
458 return 0;
459#endif
460}
461
395static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) 462static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
396{ 463{
397#ifdef CONFIG_KVM_BOOKE_HV 464#ifdef CONFIG_KVM_BOOKE_HV
@@ -472,8 +539,20 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
472#endif 539#endif
473} 540}
474 541
542#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
543static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
544{ \
545 return mfspr(bookehv_spr); \
546} \
547
548#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
549static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
550{ \
551 mtspr(bookehv_spr, val); \
552} \
553
475#define SHARED_WRAPPER_GET(reg, size) \ 554#define SHARED_WRAPPER_GET(reg, size) \
476static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 555static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
477{ \ 556{ \
478 if (kvmppc_shared_big_endian(vcpu)) \ 557 if (kvmppc_shared_big_endian(vcpu)) \
479 return be##size##_to_cpu(vcpu->arch.shared->reg); \ 558 return be##size##_to_cpu(vcpu->arch.shared->reg); \
@@ -494,14 +573,31 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
494 SHARED_WRAPPER_GET(reg, size) \ 573 SHARED_WRAPPER_GET(reg, size) \
495 SHARED_WRAPPER_SET(reg, size) \ 574 SHARED_WRAPPER_SET(reg, size) \
496 575
576#define SPRNG_WRAPPER(reg, bookehv_spr) \
577 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
578 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
579
580#ifdef CONFIG_KVM_BOOKE_HV
581
582#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
583 SPRNG_WRAPPER(reg, bookehv_spr) \
584
585#else
586
587#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
588 SHARED_WRAPPER(reg, size) \
589
590#endif
591
497SHARED_WRAPPER(critical, 64) 592SHARED_WRAPPER(critical, 64)
498SHARED_WRAPPER(sprg0, 64) 593SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
499SHARED_WRAPPER(sprg1, 64) 594SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
500SHARED_WRAPPER(sprg2, 64) 595SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
501SHARED_WRAPPER(sprg3, 64) 596SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
502SHARED_WRAPPER(srr0, 64) 597SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
503SHARED_WRAPPER(srr1, 64) 598SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
504SHARED_WRAPPER(dar, 64) 599SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
600SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
505SHARED_WRAPPER_GET(msr, 64) 601SHARED_WRAPPER_GET(msr, 64)
506static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) 602static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
507{ 603{
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index d0918e09557f..cd4f04a74802 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,11 @@
40 40
41/* MAS registers bit definitions */ 41/* MAS registers bit definitions */
42 42
43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) 43#define MAS0_TLBSEL_MASK 0x30000000
44#define MAS0_TLBSEL_SHIFT 28
45#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
46#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \
47 MAS0_TLBSEL_SHIFT)
44#define MAS0_ESEL_MASK 0x0FFF0000 48#define MAS0_ESEL_MASK 0x0FFF0000
45#define MAS0_ESEL_SHIFT 16 49#define MAS0_ESEL_SHIFT 16
46#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) 50#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
@@ -58,6 +62,7 @@
58#define MAS1_TSIZE_MASK 0x00000f80 62#define MAS1_TSIZE_MASK 0x00000f80
59#define MAS1_TSIZE_SHIFT 7 63#define MAS1_TSIZE_SHIFT 7
60#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) 64#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
65#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT)
61 66
62#define MAS2_EPN (~0xFFFUL) 67#define MAS2_EPN (~0xFFFUL)
63#define MAS2_X0 0x00000040 68#define MAS2_X0 0x00000040
@@ -86,6 +91,7 @@
86#define MAS3_SPSIZE 0x0000003e 91#define MAS3_SPSIZE 0x0000003e
87#define MAS3_SPSIZE_SHIFT 1 92#define MAS3_SPSIZE_SHIFT 1
88 93
94#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK
89#define MAS4_TLBSELD(x) MAS0_TLBSEL(x) 95#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
90#define MAS4_INDD 0x00008000 /* Default IND */ 96#define MAS4_INDD 0x00008000 /* Default IND */
91#define MAS4_TSIZED(x) MAS1_TSIZE(x) 97#define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 3132bb9365f3..c636841fc772 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -139,6 +139,7 @@
139#define PPC_INST_ISEL 0x7c00001e 139#define PPC_INST_ISEL 0x7c00001e
140#define PPC_INST_ISEL_MASK 0xfc00003e 140#define PPC_INST_ISEL_MASK 0xfc00003e
141#define PPC_INST_LDARX 0x7c0000a8 141#define PPC_INST_LDARX 0x7c0000a8
142#define PPC_INST_LOGMPP 0x7c0007e4
142#define PPC_INST_LSWI 0x7c0004aa 143#define PPC_INST_LSWI 0x7c0004aa
143#define PPC_INST_LSWX 0x7c00042a 144#define PPC_INST_LSWX 0x7c00042a
144#define PPC_INST_LWARX 0x7c000028 145#define PPC_INST_LWARX 0x7c000028
@@ -275,6 +276,20 @@
275#define __PPC_EH(eh) 0 276#define __PPC_EH(eh) 0
276#endif 277#endif
277 278
279/* POWER8 Micro Partition Prefetch (MPP) parameters */
280/* Address mask is common for LOGMPP instruction and MPPR SPR */
281#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000
282
283/* Bits 60 and 61 of MPP SPR should be set to one of the following */
284/* Aborting the fetch is indeed setting 00 in the table size bits */
285#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
286#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
287
288/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
289#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
290#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
291#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
292
278/* Deal with instructions that older assemblers aren't aware of */ 293/* Deal with instructions that older assemblers aren't aware of */
279#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 294#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
280 __PPC_RA(a) | __PPC_RB(b)) 295 __PPC_RA(a) | __PPC_RB(b))
@@ -283,6 +298,8 @@
283#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ 298#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
284 ___PPC_RT(t) | ___PPC_RA(a) | \ 299 ___PPC_RT(t) | ___PPC_RA(a) | \
285 ___PPC_RB(b) | __PPC_EH(eh)) 300 ___PPC_RB(b) | __PPC_EH(eh))
301#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
302 __PPC_RB(b))
286#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ 303#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
287 ___PPC_RT(t) | ___PPC_RA(a) | \ 304 ___PPC_RT(t) | ___PPC_RA(a) | \
288 ___PPC_RB(b) | __PPC_EH(eh)) 305 ___PPC_RB(b) | __PPC_EH(eh))
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bffd89d27301..c547b26371b8 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -225,6 +225,7 @@
225#define CTRL_TE 0x00c00000 /* thread enable */ 225#define CTRL_TE 0x00c00000 /* thread enable */
226#define CTRL_RUNLATCH 0x1 226#define CTRL_RUNLATCH 0x1
227#define SPRN_DAWR 0xB4 227#define SPRN_DAWR 0xB4
228#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
228#define SPRN_RPR 0xBA /* Relative Priority Register */ 229#define SPRN_RPR 0xBA /* Relative Priority Register */
229#define SPRN_CIABR 0xBB 230#define SPRN_CIABR 0xBB
230#define CIABR_PRIV 0x3 231#define CIABR_PRIV 0x3
@@ -944,9 +945,6 @@
944 * readable variant for reads, which can avoid a fault 945 * readable variant for reads, which can avoid a fault
945 * with KVM type virtualization. 946 * with KVM type virtualization.
946 * 947 *
947 * (*) Under KVM, the host SPRG1 is used to point to
948 * the current VCPU data structure
949 *
950 * 32-bit 8xx: 948 * 32-bit 8xx:
951 * - SPRG0 scratch for exception vectors 949 * - SPRG0 scratch for exception vectors
952 * - SPRG1 scratch for exception vectors 950 * - SPRG1 scratch for exception vectors
@@ -1203,6 +1201,15 @@
1203 : "r" ((unsigned long)(v)) \ 1201 : "r" ((unsigned long)(v)) \
1204 : "memory") 1202 : "memory")
1205 1203
1204static inline unsigned long mfvtb (void)
1205{
1206#ifdef CONFIG_PPC_BOOK3S_64
1207 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1208 return mfspr(SPRN_VTB);
1209#endif
1210 return 0;
1211}
1212
1206#ifdef __powerpc64__ 1213#ifdef __powerpc64__
1207#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) 1214#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
1208#define mftb() ({unsigned long rval; \ 1215#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1d428e6007ca..03cbada59d3a 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -102,6 +102,15 @@ static inline u64 get_rtc(void)
102 return (u64)hi * 1000000000 + lo; 102 return (u64)hi * 1000000000 + lo;
103} 103}
104 104
105static inline u64 get_vtb(void)
106{
107#ifdef CONFIG_PPC_BOOK3S_64
108 if (cpu_has_feature(CPU_FTR_ARCH_207S))
109 return mfvtb();
110#endif
111 return 0;
112}
113
105#ifdef CONFIG_PPC64 114#ifdef CONFIG_PPC64
106static inline u64 get_tb(void) 115static inline u64 get_tb(void)
107{ 116{