aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-06-28 20:21:34 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:54 -0400
commitde56a948b9182fbcf92cb8212f114de096c2d574 (patch)
tree633ab73672aa2543b683686fc8fb023629c5f8f8 /arch/powerpc/include/asm
parent3c42bf8a717cb636e0ed2ed77194669e2ac3ed56 (diff)
KVM: PPC: Add support for Book3S processors in hypervisor mode
This adds support for KVM running on 64-bit Book 3S processors, specifically POWER7, in hypervisor mode. Using hypervisor mode means that the guest can use the processor's supervisor mode. That means that the guest can execute privileged instructions and access privileged registers itself without trapping to the host. This gives excellent performance, but does mean that KVM cannot emulate a processor architecture other than the one that the hardware implements. This code assumes that the guest is running paravirtualized using the PAPR (Power Architecture Platform Requirements) interface, which is the interface that IBM's PowerVM hypervisor uses. That means that existing Linux distributions that run on IBM pSeries machines will also run under KVM without modification. In order to communicate the PAPR hypercalls to qemu, this adds a new KVM_EXIT_PAPR_HCALL exit code to include/linux/kvm.h. Currently the choice between book3s_hv support and book3s_pr support (i.e. the existing code, which runs the guest in user mode) has to be made at kernel configuration time, so a given kernel binary can only do one or the other. This new book3s_hv code doesn't support MMIO emulation at present. Since we are running paravirtualized guests, this isn't a serious restriction. With the guest running in supervisor mode, most exceptions go straight to the guest. We will never get data or instruction storage or segment interrupts, alignment interrupts, decrementer interrupts, program interrupts, single-step interrupts, etc., coming to the hypervisor from the guest. Therefore this introduces a new KVMTEST_NONHV macro for the exception entry path so that we don't have to do the KVM test on entry to those exception handlers. We do however get hypervisor decrementer, hypervisor data storage, hypervisor instruction storage, and hypervisor emulation assist interrupts, so we have to handle those. In hypervisor mode, real-mode accesses can access all of RAM, not just a limited amount. Therefore we put all the guest state in the vcpu.arch and use the shadow_vcpu in the PACA only for temporary scratch space. We allocate the vcpu with kzalloc rather than vzalloc, and we don't use anything in the kvmppc_vcpu_book3s struct, so we don't allocate it. We don't have a shared page with the guest, but we still need a kvm_vcpu_arch_shared struct to store the values of various registers, so we include one in the vcpu_arch struct. The POWER7 processor has a restriction that all threads in a core have to be in the same partition. MMU-on kernel code counts as a partition (partition 0), so we have to do a partition switch on every entry to and exit from the guest. At present we require the host and guest to run in single-thread mode because of this hardware restriction. This code allocates a hashed page table for the guest and initializes it with HPTEs for the guest's Virtual Real Memory Area (VRMA). We require that the guest memory is allocated using 16MB huge pages, in order to simplify the low-level memory management. This also means that we can get away without tracking paging activity in the host for now, since huge pages can't be paged or swapped. This also adds a few new exports needed by the book3s_hv code. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/exception-64s.h19
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h137
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h12
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h53
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h6
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h10
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/reg.h4
11 files changed, 224 insertions, 29 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 296c9b66c04a..69435da8f2ba 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -134,6 +134,17 @@ do_kvm_##n: \
134#define KVM_HANDLER_SKIP(area, h, n) 134#define KVM_HANDLER_SKIP(area, h, n)
135#endif 135#endif
136 136
137#ifdef CONFIG_KVM_BOOK3S_PR
138#define KVMTEST_PR(n) __KVMTEST(n)
139#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
140#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
141
142#else
143#define KVMTEST_PR(n)
144#define KVM_HANDLER_PR(area, h, n)
145#define KVM_HANDLER_PR_SKIP(area, h, n)
146#endif
147
137#define NOTEST(n) 148#define NOTEST(n)
138 149
139/* 150/*
@@ -210,7 +221,7 @@ label##_pSeries: \
210 HMT_MEDIUM; \ 221 HMT_MEDIUM; \
211 SET_SCRATCH0(r13); /* save r13 */ \ 222 SET_SCRATCH0(r13); /* save r13 */ \
212 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 223 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
213 EXC_STD, KVMTEST, vec) 224 EXC_STD, KVMTEST_PR, vec)
214 225
215#define STD_EXCEPTION_HV(loc, vec, label) \ 226#define STD_EXCEPTION_HV(loc, vec, label) \
216 . = loc; \ 227 . = loc; \
@@ -227,8 +238,8 @@ label##_hv: \
227 beq masked_##h##interrupt 238 beq masked_##h##interrupt
228#define _SOFTEN_TEST(h) __SOFTEN_TEST(h) 239#define _SOFTEN_TEST(h) __SOFTEN_TEST(h)
229 240
230#define SOFTEN_TEST(vec) \ 241#define SOFTEN_TEST_PR(vec) \
231 KVMTEST(vec); \ 242 KVMTEST_PR(vec); \
232 _SOFTEN_TEST(EXC_STD) 243 _SOFTEN_TEST(EXC_STD)
233 244
234#define SOFTEN_TEST_HV(vec) \ 245#define SOFTEN_TEST_HV(vec) \
@@ -248,7 +259,7 @@ label##_hv: \
248 .globl label##_pSeries; \ 259 .globl label##_pSeries; \
249label##_pSeries: \ 260label##_pSeries: \
250 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 261 _MASKABLE_EXCEPTION_PSERIES(vec, label, \
251 EXC_STD, SOFTEN_TEST) 262 EXC_STD, SOFTEN_TEST_PR)
252 263
253#define MASKABLE_EXCEPTION_HV(loc, vec, label) \ 264#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
254 . = loc; \ 265 . = loc; \
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 0951b17f4eb5..7b1f0e0fc653 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -64,8 +64,12 @@
64#define BOOK3S_INTERRUPT_PROGRAM 0x700 64#define BOOK3S_INTERRUPT_PROGRAM 0x700
65#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 65#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
66#define BOOK3S_INTERRUPT_DECREMENTER 0x900 66#define BOOK3S_INTERRUPT_DECREMENTER 0x900
67#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
67#define BOOK3S_INTERRUPT_SYSCALL 0xc00 68#define BOOK3S_INTERRUPT_SYSCALL 0xc00
68#define BOOK3S_INTERRUPT_TRACE 0xd00 69#define BOOK3S_INTERRUPT_TRACE 0xd00
70#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
71#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
72#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
69#define BOOK3S_INTERRUPT_PERFMON 0xf00 73#define BOOK3S_INTERRUPT_PERFMON 0xf00
70#define BOOK3S_INTERRUPT_ALTIVEC 0xf20 74#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
71#define BOOK3S_INTERRUPT_VSX 0xf40 75#define BOOK3S_INTERRUPT_VSX 0xf40
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 480fff6090db..5537c45d626c 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -116,6 +116,7 @@ extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
116extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); 116extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
117extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 117extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
118extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 118extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
119extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
119extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 120extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
120extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 121extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
121extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 122extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -127,10 +128,12 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
127extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 128extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
128extern int kvmppc_mmu_hpte_sysinit(void); 129extern int kvmppc_mmu_hpte_sysinit(void);
129extern void kvmppc_mmu_hpte_sysexit(void); 130extern void kvmppc_mmu_hpte_sysexit(void);
131extern int kvmppc_mmu_hv_init(void);
130 132
131extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 133extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
132extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 134extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
133extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 135extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
136extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
134extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 137extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
135 bool upper, u32 val); 138 bool upper, u32 val);
136extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 139extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
@@ -140,6 +143,7 @@ extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
140extern void kvmppc_handler_lowmem_trampoline(void); 143extern void kvmppc_handler_lowmem_trampoline(void);
141extern void kvmppc_handler_trampoline_enter(void); 144extern void kvmppc_handler_trampoline_enter(void);
142extern void kvmppc_rmcall(ulong srr0, ulong srr1); 145extern void kvmppc_rmcall(ulong srr0, ulong srr1);
146extern void kvmppc_hv_entry_trampoline(void);
143extern void kvmppc_load_up_fpu(void); 147extern void kvmppc_load_up_fpu(void);
144extern void kvmppc_load_up_altivec(void); 148extern void kvmppc_load_up_altivec(void);
145extern void kvmppc_load_up_vsx(void); 149extern void kvmppc_load_up_vsx(void);
@@ -151,6 +155,19 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
151 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); 155 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
152} 156}
153 157
158extern void kvm_return_point(void);
159
160/* Also add subarch specific defines */
161
162#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
163#include <asm/kvm_book3s_32.h>
164#endif
165#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
166#include <asm/kvm_book3s_64.h>
167#endif
168
169#ifdef CONFIG_KVM_BOOK3S_PR
170
154static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 171static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
155{ 172{
156 return to_book3s(vcpu)->hior; 173 return to_book3s(vcpu)->hior;
@@ -165,16 +182,6 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
165 vcpu->arch.shared->int_pending = 0; 182 vcpu->arch.shared->int_pending = 0;
166} 183}
167 184
168static inline ulong dsisr(void)
169{
170 ulong r;
171 asm ( "mfdsisr %0 " : "=r" (r) );
172 return r;
173}
174
175extern void kvm_return_point(void);
176static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
177
178static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 185static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
179{ 186{
180 if ( num < 14 ) { 187 if ( num < 14 ) {
@@ -281,6 +288,108 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
281 288
282 return crit; 289 return crit;
283} 290}
291#else /* CONFIG_KVM_BOOK3S_PR */
292
293static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
294{
295 return 0;
296}
297
298static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
299 unsigned long pending_now, unsigned long old_pending)
300{
301 /* Recalculate LPCR:MER based on the presence of
302 * a pending external interrupt
303 */
304 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &pending_now) ||
305 test_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &pending_now))
306 vcpu->arch.lpcr |= LPCR_MER;
307 else
308 vcpu->arch.lpcr &= ~((u64)LPCR_MER);
309}
310
311static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
312{
313 vcpu->arch.gpr[num] = val;
314}
315
316static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
317{
318 return vcpu->arch.gpr[num];
319}
320
321static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
322{
323 vcpu->arch.cr = val;
324}
325
326static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
327{
328 return vcpu->arch.cr;
329}
330
331static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
332{
333 vcpu->arch.xer = val;
334}
335
336static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
337{
338 return vcpu->arch.xer;
339}
340
341static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
342{
343 vcpu->arch.ctr = val;
344}
345
346static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
347{
348 return vcpu->arch.ctr;
349}
350
351static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
352{
353 vcpu->arch.lr = val;
354}
355
356static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
357{
358 return vcpu->arch.lr;
359}
360
361static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
362{
363 vcpu->arch.pc = val;
364}
365
366static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
367{
368 return vcpu->arch.pc;
369}
370
371static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
372{
373 ulong pc = kvmppc_get_pc(vcpu);
374
375 /* Load the instruction manually if it failed to do so in the
376 * exit path */
377 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
378 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
379
380 return vcpu->arch.last_inst;
381}
382
383static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
384{
385 return vcpu->arch.fault_dar;
386}
387
388static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
389{
390 return false;
391}
392#endif
284 393
285/* Magic register values loaded into r3 and r4 before the 'sc' assembly 394/* Magic register values loaded into r3 and r4 before the 'sc' assembly
286 * instruction for the OSI hypercalls */ 395 * instruction for the OSI hypercalls */
@@ -289,12 +398,4 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
289 398
290#define INS_DCBZ 0x7c0007ec 399#define INS_DCBZ 0x7c0007ec
291 400
292/* Also add subarch specific defines */
293
294#ifdef CONFIG_PPC_BOOK3S_32
295#include <asm/kvm_book3s_32.h>
296#else
297#include <asm/kvm_book3s_64.h>
298#endif
299
300#endif /* __ASM_KVM_BOOK3S_H__ */ 401#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 4cadd612d575..5f73388ea0af 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,9 +20,11 @@
20#ifndef __ASM_KVM_BOOK3S_64_H__ 20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__ 21#define __ASM_KVM_BOOK3S_64_H__
22 22
23#ifdef CONFIG_KVM_BOOK3S_PR
23static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) 24static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
24{ 25{
25 return &get_paca()->shadow_vcpu; 26 return &get_paca()->shadow_vcpu;
26} 27}
28#endif
27 29
28#endif /* __ASM_KVM_BOOK3S_64_H__ */ 30#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 312617529864..b7b039532fbc 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -70,10 +70,22 @@ kvmppc_resume_\intno:
70struct kvmppc_host_state { 70struct kvmppc_host_state {
71 ulong host_r1; 71 ulong host_r1;
72 ulong host_r2; 72 ulong host_r2;
73 ulong host_msr;
73 ulong vmhandler; 74 ulong vmhandler;
74 ulong scratch0; 75 ulong scratch0;
75 ulong scratch1; 76 ulong scratch1;
76 u8 in_guest; 77 u8 in_guest;
78
79#ifdef CONFIG_KVM_BOOK3S_64_HV
80 struct kvm_vcpu *kvm_vcpu;
81 u64 dabr;
82 u64 host_mmcr[3];
83 u32 host_pmc[6];
84 u64 host_purr;
85 u64 host_spurr;
86 u64 host_dscr;
87 u64 dec_expires;
88#endif
77}; 89};
78 90
79struct kvmppc_book3s_shadow_vcpu { 91struct kvmppc_book3s_shadow_vcpu {
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 9c9ba3d59b1b..a90e09188777 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -93,4 +93,8 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
93 return vcpu->arch.fault_dear; 93 return vcpu->arch.fault_dear;
94} 94}
95 95
96static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
97{
98 return vcpu->arch.shared->msr;
99}
96#endif /* __ASM_KVM_BOOKE_H__ */ 100#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 069eb9fc6c41..4a3f790d5fc4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -33,7 +33,9 @@
33/* memory slots that does not exposed to userspace */ 33/* memory slots that does not exposed to userspace */
34#define KVM_PRIVATE_MEM_SLOTS 4 34#define KVM_PRIVATE_MEM_SLOTS 4
35 35
36#ifdef CONFIG_KVM_MMIO
36#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 37#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
38#endif
37 39
38/* We don't currently support large pages. */ 40/* We don't currently support large pages. */
39#define KVM_HPAGE_GFN_SHIFT(x) 0 41#define KVM_HPAGE_GFN_SHIFT(x) 0
@@ -133,7 +135,26 @@ struct kvmppc_exit_timing {
133 }; 135 };
134}; 136};
135 137
138struct kvmppc_pginfo {
139 unsigned long pfn;
140 atomic_t refcnt;
141};
142
136struct kvm_arch { 143struct kvm_arch {
144#ifdef CONFIG_KVM_BOOK3S_64_HV
145 unsigned long hpt_virt;
146 unsigned long ram_npages;
147 unsigned long ram_psize;
148 unsigned long ram_porder;
149 struct kvmppc_pginfo *ram_pginfo;
150 unsigned int lpid;
151 unsigned int host_lpid;
152 unsigned long host_lpcr;
153 unsigned long sdr1;
154 unsigned long host_sdr1;
155 int tlbie_lock;
156 unsigned short last_vcpu[NR_CPUS];
157#endif /* CONFIG_KVM_BOOK3S_64_HV */
137}; 158};
138 159
139struct kvmppc_pte { 160struct kvmppc_pte {
@@ -190,7 +211,7 @@ struct kvm_vcpu_arch {
190 ulong rmcall; 211 ulong rmcall;
191 ulong host_paca_phys; 212 ulong host_paca_phys;
192 struct kvmppc_slb slb[64]; 213 struct kvmppc_slb slb[64];
193 int slb_max; /* # valid entries in slb[] */ 214 int slb_max; /* 1 + index of last valid entry in slb[] */
194 int slb_nr; /* total number of entries in SLB */ 215 int slb_nr; /* total number of entries in SLB */
195 struct kvmppc_mmu mmu; 216 struct kvmppc_mmu mmu;
196#endif 217#endif
@@ -212,7 +233,7 @@ struct kvm_vcpu_arch {
212#endif 233#endif
213 234
214#ifdef CONFIG_VSX 235#ifdef CONFIG_VSX
215 u64 vsr[32]; 236 u64 vsr[64];
216#endif 237#endif
217 238
218#ifdef CONFIG_PPC_BOOK3S 239#ifdef CONFIG_PPC_BOOK3S
@@ -220,18 +241,24 @@ struct kvm_vcpu_arch {
220 u32 qpr[32]; 241 u32 qpr[32];
221#endif 242#endif
222 243
223#ifdef CONFIG_BOOKE
224 ulong pc; 244 ulong pc;
225 ulong ctr; 245 ulong ctr;
226 ulong lr; 246 ulong lr;
227 247
228 ulong xer; 248 ulong xer;
229 u32 cr; 249 u32 cr;
230#endif
231 250
232#ifdef CONFIG_PPC_BOOK3S 251#ifdef CONFIG_PPC_BOOK3S
233 ulong hflags; 252 ulong hflags;
234 ulong guest_owned_ext; 253 ulong guest_owned_ext;
254 ulong purr;
255 ulong spurr;
256 ulong lpcr;
257 ulong dscr;
258 ulong amr;
259 ulong uamor;
260 u32 ctrl;
261 ulong dabr;
235#endif 262#endif
236 u32 vrsave; /* also USPRG0 */ 263 u32 vrsave; /* also USPRG0 */
237 u32 mmucr; 264 u32 mmucr;
@@ -270,6 +297,9 @@ struct kvm_vcpu_arch {
270 u32 dbcr1; 297 u32 dbcr1;
271 u32 dbsr; 298 u32 dbsr;
272 299
300 u64 mmcr[3];
301 u32 pmc[6];
302
273#ifdef CONFIG_KVM_EXIT_TIMING 303#ifdef CONFIG_KVM_EXIT_TIMING
274 struct mutex exit_timing_lock; 304 struct mutex exit_timing_lock;
275 struct kvmppc_exit_timing timing_exit; 305 struct kvmppc_exit_timing timing_exit;
@@ -284,8 +314,12 @@ struct kvm_vcpu_arch {
284 struct dentry *debugfs_exit_timing; 314 struct dentry *debugfs_exit_timing;
285#endif 315#endif
286 316
317#ifdef CONFIG_PPC_BOOK3S
318 ulong fault_dar;
319 u32 fault_dsisr;
320#endif
321
287#ifdef CONFIG_BOOKE 322#ifdef CONFIG_BOOKE
288 u32 last_inst;
289 ulong fault_dear; 323 ulong fault_dear;
290 ulong fault_esr; 324 ulong fault_esr;
291 ulong queued_dear; 325 ulong queued_dear;
@@ -300,16 +334,25 @@ struct kvm_vcpu_arch {
300 u8 dcr_is_write; 334 u8 dcr_is_write;
301 u8 osi_needed; 335 u8 osi_needed;
302 u8 osi_enabled; 336 u8 osi_enabled;
337 u8 hcall_needed;
303 338
304 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ 339 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
305 340
306 struct hrtimer dec_timer; 341 struct hrtimer dec_timer;
307 struct tasklet_struct tasklet; 342 struct tasklet_struct tasklet;
308 u64 dec_jiffies; 343 u64 dec_jiffies;
344 u64 dec_expires;
309 unsigned long pending_exceptions; 345 unsigned long pending_exceptions;
346 u16 last_cpu;
347 u32 last_inst;
348 int trap;
310 struct kvm_vcpu_arch_shared *shared; 349 struct kvm_vcpu_arch_shared *shared;
311 unsigned long magic_page_pa; /* phys addr to map the magic page to */ 350 unsigned long magic_page_pa; /* phys addr to map the magic page to */
312 unsigned long magic_page_ea; /* effect. addr to map the magic page to */ 351 unsigned long magic_page_ea; /* effect. addr to map the magic page to */
352
353#ifdef CONFIG_KVM_BOOK3S_64_HV
354 struct kvm_vcpu_arch_shared shregs;
355#endif
313}; 356};
314 357
315#endif /* __POWERPC_KVM_HOST_H__ */ 358#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 48b7ab76de2d..0dafd53c30ed 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -112,6 +112,12 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
112extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); 112extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
113extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); 113extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
114 114
115extern long kvmppc_alloc_hpt(struct kvm *kvm);
116extern void kvmppc_free_hpt(struct kvm *kvm);
117extern long kvmppc_prepare_vrma(struct kvm *kvm,
118 struct kvm_userspace_memory_region *mem);
119extern void kvmppc_map_vrma(struct kvm *kvm,
120 struct kvm_userspace_memory_region *mem);
115extern int kvmppc_core_init_vm(struct kvm *kvm); 121extern int kvmppc_core_init_vm(struct kvm *kvm);
116extern void kvmppc_core_destroy_vm(struct kvm *kvm); 122extern void kvmppc_core_destroy_vm(struct kvm *kvm);
117extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 123extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index d865bd909c7d..b445e0af4c2b 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -90,13 +90,19 @@ extern char initial_stab[];
90 90
91#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) 91#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
92#define HPTE_R_TS ASM_CONST(0x4000000000000000) 92#define HPTE_R_TS ASM_CONST(0x4000000000000000)
93#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
93#define HPTE_R_RPN_SHIFT 12 94#define HPTE_R_RPN_SHIFT 12
94#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) 95#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
95#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
96#define HPTE_R_PP ASM_CONST(0x0000000000000003) 96#define HPTE_R_PP ASM_CONST(0x0000000000000003)
97#define HPTE_R_N ASM_CONST(0x0000000000000004) 97#define HPTE_R_N ASM_CONST(0x0000000000000004)
98#define HPTE_R_G ASM_CONST(0x0000000000000008)
99#define HPTE_R_M ASM_CONST(0x0000000000000010)
100#define HPTE_R_I ASM_CONST(0x0000000000000020)
101#define HPTE_R_W ASM_CONST(0x0000000000000040)
102#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
98#define HPTE_R_C ASM_CONST(0x0000000000000080) 103#define HPTE_R_C ASM_CONST(0x0000000000000080)
99#define HPTE_R_R ASM_CONST(0x0000000000000100) 104#define HPTE_R_R ASM_CONST(0x0000000000000100)
105#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
100 106
101#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) 107#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
102#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) 108#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 58f4a18ef60c..a6da12859959 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -147,8 +147,10 @@ struct paca_struct {
147 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ 147 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
148 148
149#ifdef CONFIG_KVM_BOOK3S_HANDLER 149#ifdef CONFIG_KVM_BOOK3S_HANDLER
150#ifdef CONFIG_KVM_BOOK3S_PR
150 /* We use this to store guest state in */ 151 /* We use this to store guest state in */
151 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 152 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
153#endif
152 struct kvmppc_host_state kvm_hstate; 154 struct kvmppc_host_state kvm_hstate;
153#endif 155#endif
154}; 156};
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d879a6b91635..36a611b398c5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -189,6 +189,9 @@
189#define SPRN_CTR 0x009 /* Count Register */ 189#define SPRN_CTR 0x009 /* Count Register */
190#define SPRN_DSCR 0x11 190#define SPRN_DSCR 0x11
191#define SPRN_CFAR 0x1c /* Come From Address Register */ 191#define SPRN_CFAR 0x1c /* Come From Address Register */
192#define SPRN_AMR 0x1d /* Authority Mask Register */
193#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
194#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
192#define SPRN_ACOP 0x1F /* Available Coprocessor Register */ 195#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
193#define SPRN_CTRLF 0x088 196#define SPRN_CTRLF 0x088
194#define SPRN_CTRLT 0x098 197#define SPRN_CTRLT 0x098
@@ -252,6 +255,7 @@
252#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ 255#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
253#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ 256#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
254#define SPRN_LPID 0x13F /* Logical Partition Identifier */ 257#define SPRN_LPID 0x13F /* Logical Partition Identifier */
258#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
255#define SPRN_HMER 0x150 /* Hardware m? error recovery */ 259#define SPRN_HMER 0x150 /* Hardware m? error recovery */
256#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ 260#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
257#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ 261#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */