aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-12-20 10:34:43 -0500
committerAvi Kivity <avi@redhat.com>2012-04-08 05:51:19 -0400
commitd30f6e480055e5be12e7a03fd11ea912a451daa5 (patch)
treee6c367e6f1da4da67b3a395a1a735a09e52067c0 /arch
parentcfac57847a67c4903f34a77e971521531bbc7c77 (diff)
KVM: PPC: booke: category E.HV (GS-mode) support
Chips such as e500mc that implement category E.HV in Power ISA 2.06 provide hardware virtualization features, including a new MSR mode for guest state. The guest OS can perform many operations without trapping into the hypervisor, including transitions to and from guest userspace. Since we can use SRR1[GS] to reliably tell whether an exception came from guest state, instead of messing around with IVPR, we use DO_KVM similarly to book3s. Current issues include: - Machine checks from guest state are not routed to the host handler. - The guest can cause a host oops by executing an emulated instruction in a page that lacks read permission. Existing e500/4xx support has the same problem. Includes work by Ashish Kalra <Ashish.Kalra@freescale.com>, Varun Sethi <Varun.Sethi@freescale.com>, and Liu Yu <yu.liu@freescale.com>. Signed-off-by: Scott Wood <scottwood@freescale.com> [agraf: remove pt_regs usage] Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/dbell.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h49
-rw-r--r--arch/powerpc/include/asm/kvm_host.h19
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h3
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h6
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h34
-rw-r--r--arch/powerpc/kernel/asm-offsets.c15
-rw-r--r--arch/powerpc/kernel/head_booke.h28
-rw-r--r--arch/powerpc/kvm/Kconfig3
-rw-r--r--arch/powerpc/kvm/booke.c309
-rw-r--r--arch/powerpc/kvm/booke.h24
-rw-r--r--arch/powerpc/kvm/booke_emulate.c23
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S587
-rw-r--r--arch/powerpc/kvm/powerpc.c5
-rw-r--r--arch/powerpc/kvm/timing.h6
18 files changed, 1058 insertions, 67 deletions
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index efa74ac44a35..d7365b01f0c4 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -19,6 +19,7 @@
19 19
20#define PPC_DBELL_MSG_BRDCAST (0x04000000) 20#define PPC_DBELL_MSG_BRDCAST (0x04000000)
21#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) 21#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
22#define PPC_DBELL_LPID(x) ((x) << (63 - 49))
22enum ppc_dbell { 23enum ppc_dbell {
23 PPC_DBELL = 0, /* doorbell */ 24 PPC_DBELL = 0, /* doorbell */
24 PPC_DBELL_CRIT = 1, /* critical doorbell */ 25 PPC_DBELL_CRIT = 1, /* critical doorbell */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 7b1f0e0fc653..097815233284 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -48,6 +48,14 @@
48#define BOOKE_INTERRUPT_SPE_FP_DATA 33 48#define BOOKE_INTERRUPT_SPE_FP_DATA 33
49#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 49#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
50#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 50#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
51#define BOOKE_INTERRUPT_DOORBELL 36
52#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
53
54/* booke_hv */
55#define BOOKE_INTERRUPT_GUEST_DBELL 38
56#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
57#define BOOKE_INTERRUPT_HV_SYSCALL 40
58#define BOOKE_INTERRUPT_HV_PRIV 41
51 59
52/* book3s */ 60/* book3s */
53 61
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644
index 000000000000..30a600fa1b6a
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2010-2011 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef ASM_KVM_BOOKE_HV_ASM_H
10#define ASM_KVM_BOOKE_HV_ASM_H
11
12#ifdef __ASSEMBLY__
13
14/*
15 * All exceptions from guest state must go through KVM
16 * (except for those which are delivered directly to the guest) --
17 * there are no exceptions for which we fall through directly to
18 * the normal host handler.
19 *
20 * Expected inputs (normal exceptions):
21 * SCRATCH0 = saved r10
22 * r10 = thread struct
23 * r11 = appropriate SRR1 variant (currently used as scratch)
24 * r13 = saved CR
25 * *(r10 + THREAD_NORMSAVE(0)) = saved r11
26 * *(r10 + THREAD_NORMSAVE(2)) = saved r13
27 *
28 * Expected inputs (crit/mcheck/debug exceptions):
29 * appropriate SCRATCH = saved r8
30 * r8 = exception level stack frame
31 * r9 = *(r8 + _CCR) = saved CR
32 * r11 = appropriate SRR1 variant (currently used as scratch)
33 * *(r8 + GPR9) = saved r9
34 * *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
35 * *(r8 + GPR11) = saved r11
36 */
37.macro DO_KVM intno srr1
38#ifdef CONFIG_KVM_BOOKE_HV
39BEGIN_FTR_SECTION
40 mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
41 bf 3, kvmppc_resume_\intno\()_\srr1
42 b kvmppc_handler_\intno\()_\srr1
43kvmppc_resume_\intno\()_\srr1:
44END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
45#endif
46.endm
47
48#endif /*__ASSEMBLY__ */
49#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 5b81cbc43a42..e645623728fc 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -106,6 +106,8 @@ struct kvm_vcpu_stat {
106 u32 dec_exits; 106 u32 dec_exits;
107 u32 ext_intr_exits; 107 u32 ext_intr_exits;
108 u32 halt_wakeup; 108 u32 halt_wakeup;
109 u32 dbell_exits;
110 u32 gdbell_exits;
109#ifdef CONFIG_PPC_BOOK3S 111#ifdef CONFIG_PPC_BOOK3S
110 u32 pf_storage; 112 u32 pf_storage;
111 u32 pf_instruc; 113 u32 pf_instruc;
@@ -140,6 +142,7 @@ enum kvm_exit_types {
140 EMULATED_TLBSX_EXITS, 142 EMULATED_TLBSX_EXITS,
141 EMULATED_TLBWE_EXITS, 143 EMULATED_TLBWE_EXITS,
142 EMULATED_RFI_EXITS, 144 EMULATED_RFI_EXITS,
145 EMULATED_RFCI_EXITS,
143 DEC_EXITS, 146 DEC_EXITS,
144 EXT_INTR_EXITS, 147 EXT_INTR_EXITS,
145 HALT_WAKEUP, 148 HALT_WAKEUP,
@@ -147,6 +150,8 @@ enum kvm_exit_types {
147 FP_UNAVAIL, 150 FP_UNAVAIL,
148 DEBUG_EXITS, 151 DEBUG_EXITS,
149 TIMEINGUEST, 152 TIMEINGUEST,
153 DBELL_EXITS,
154 GDBELL_EXITS,
150 __NUMBER_OF_KVM_EXIT_TYPES 155 __NUMBER_OF_KVM_EXIT_TYPES
151}; 156};
152 157
@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot {
217}; 222};
218 223
219struct kvm_arch { 224struct kvm_arch {
225 unsigned int lpid;
220#ifdef CONFIG_KVM_BOOK3S_64_HV 226#ifdef CONFIG_KVM_BOOK3S_64_HV
221 unsigned long hpt_virt; 227 unsigned long hpt_virt;
222 struct revmap_entry *revmap; 228 struct revmap_entry *revmap;
223 unsigned int lpid;
224 unsigned int host_lpid; 229 unsigned int host_lpid;
225 unsigned long host_lpcr; 230 unsigned long host_lpcr;
226 unsigned long sdr1; 231 unsigned long sdr1;
@@ -345,6 +350,17 @@ struct kvm_vcpu_arch {
345 u64 vsr[64]; 350 u64 vsr[64];
346#endif 351#endif
347 352
353#ifdef CONFIG_KVM_BOOKE_HV
354 u32 host_mas4;
355 u32 host_mas6;
356 u32 shadow_epcr;
357 u32 epcr;
358 u32 shadow_msrp;
359 u32 eplc;
360 u32 epsc;
361 u32 oldpir;
362#endif
363
348#ifdef CONFIG_PPC_BOOK3S 364#ifdef CONFIG_PPC_BOOK3S
349 /* For Gekko paired singles */ 365 /* For Gekko paired singles */
350 u32 qpr[32]; 366 u32 qpr[32];
@@ -428,6 +444,7 @@ struct kvm_vcpu_arch {
428 ulong queued_esr; 444 ulong queued_esr;
429 u32 tlbcfg[4]; 445 u32 tlbcfg[4];
430 u32 mmucfg; 446 u32 mmucfg;
447 u32 epr;
431#endif 448#endif
432 gpa_t paddr_accessed; 449 gpa_t paddr_accessed;
433 450
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 731e920eda1e..e709975702a6 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -139,6 +139,9 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
139extern void kvmppc_core_commit_memory_region(struct kvm *kvm, 139extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
140 struct kvm_userspace_memory_region *mem); 140 struct kvm_userspace_memory_region *mem);
141 141
142extern int kvmppc_bookehv_init(void);
143extern void kvmppc_bookehv_exit(void);
144
142/* 145/*
143 * Cuts out inst bits with ordering according to spec. 146 * Cuts out inst bits with ordering according to spec.
144 * That means the leftmost bit is zero. All given bits are included. 147 * That means the leftmost bit is zero. All given bits are included.
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index cdb5421877e2..eeabcdbc30f7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -104,6 +104,8 @@
104#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ 104#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
105#define MAS4_TSIZED_SHIFT 7 105#define MAS4_TSIZED_SHIFT 7
106 106
107#define MAS5_SGS 0x80000000
108
107#define MAS6_SPID0 0x3FFF0000 109#define MAS6_SPID0 0x3FFF0000
108#define MAS6_SPID1 0x00007FFE 110#define MAS6_SPID1 0x00007FFE
109#define MAS6_ISIZE(x) MAS1_TSIZE(x) 111#define MAS6_ISIZE(x) MAS1_TSIZE(x)
@@ -118,6 +120,10 @@
118 120
119#define MAS7_RPN 0xFFFFFFFF 121#define MAS7_RPN 0xFFFFFFFF
120 122
123#define MAS8_TGS 0x80000000 /* Guest space */
124#define MAS8_VF 0x40000000 /* Virtualization Fault */
125#define MAS8_TLPID 0x000000ff
126
121/* Bit definitions for MMUCFG */ 127/* Bit definitions for MMUCFG */
122#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ 128#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
123#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ 129#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8e2d0371fe1e..2a25ab0f5896 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -243,6 +243,9 @@ struct thread_struct {
243#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 243#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
244 void* kvm_shadow_vcpu; /* KVM internal data */ 244 void* kvm_shadow_vcpu; /* KVM internal data */
245#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ 245#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
246#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
247 struct kvm_vcpu *kvm_vcpu;
248#endif
246#ifdef CONFIG_PPC64 249#ifdef CONFIG_PPC64
247 unsigned long dscr; 250 unsigned long dscr;
248 int dscr_inherit; 251 int dscr_inherit;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9d7f0fb69028..f0cb7f461b9d 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -257,7 +257,9 @@
257#define LPCR_LPES_SH 2 257#define LPCR_LPES_SH 2
258#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ 258#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
259#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ 259#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
260#ifndef SPRN_LPID
260#define SPRN_LPID 0x13F /* Logical Partition Identifier */ 261#define SPRN_LPID 0x13F /* Logical Partition Identifier */
262#endif
261#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ 263#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
262#define SPRN_HMER 0x150 /* Hardware m? error recovery */ 264#define SPRN_HMER 0x150 /* Hardware m? error recovery */
263#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ 265#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index b86faa9107da..815e404f8c18 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -61,18 +61,30 @@ extern u32 booke_wdt_period;
61#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ 61#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
62#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ 62#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
63#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ 63#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
64#define SPRN_MSRP 0x137 /* MSR Protect Register */
64#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ 65#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
65#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ 66#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
66#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */ 67#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
67#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ 68#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
69#define SPRN_LPID 0x152 /* Logical Partition ID */
68#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */ 70#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
69#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */ 71#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
70#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */ 72#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
71#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */ 73#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
72#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */ 74#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
73#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */ 75#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
76#define SPRN_GSPRG0 0x170 /* Guest SPRG0 */
77#define SPRN_GSPRG1 0x171 /* Guest SPRG1 */
78#define SPRN_GSPRG2 0x172 /* Guest SPRG2 */
79#define SPRN_GSPRG3 0x173 /* Guest SPRG3 */
74#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */ 80#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
75#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */ 81#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
82#define SPRN_GSRR0 0x17A /* Guest SRR0 */
83#define SPRN_GSRR1 0x17B /* Guest SRR1 */
84#define SPRN_GEPR 0x17C /* Guest EPR */
85#define SPRN_GDEAR 0x17D /* Guest DEAR */
86#define SPRN_GPIR 0x17E /* Guest PIR */
87#define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */
76#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ 88#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
77#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ 89#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
78#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ 90#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
@@ -93,6 +105,13 @@ extern u32 booke_wdt_period;
93#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ 105#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
94#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ 106#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
95#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ 107#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
108#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
109#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
110#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
111#define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */
112#define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */
113#define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */
114#define SPRN_GIVPR 0x1BF /* Guest IVPR */
96#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ 115#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
97#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ 116#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
98#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ 117#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
@@ -245,6 +264,10 @@ extern u32 booke_wdt_period;
245#define MCSR_LDG 0x00002000UL /* Guarded Load */ 264#define MCSR_LDG 0x00002000UL /* Guarded Load */
246#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */ 265#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
247#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */ 266#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
267
268#define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */
269#define MSRP_DEP 0x00000200 /* Protect MSR[DE] */
270#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
248#endif 271#endif
249 272
250#ifdef CONFIG_E200 273#ifdef CONFIG_E200
@@ -599,6 +622,17 @@ extern u32 booke_wdt_period;
599#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates 622#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
600 * for hypervisor */ 623 * for hypervisor */
601 624
625/* Bit definitions for EPLC/EPSC */
626#define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */
627#define EPC_EPR_SHIFT 31
628#define EPC_EAS 0x40000000 /* Address Space */
629#define EPC_EAS_SHIFT 30
630#define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */
631#define EPC_EGS_SHIFT 29
632#define EPC_ELPID 0x00ff0000
633#define EPC_ELPID_SHIFT 16
634#define EPC_EPID 0x00003fff
635#define EPC_EPID_SHIFT 0
602 636
603/* 637/*
604 * The IBM-403 is an even more odd special case, as it is much 638 * The IBM-403 is an even more odd special case, as it is much
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 34b8afe94a50..bbede5882c5b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -116,6 +116,9 @@ int main(void)
116#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 116#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
117 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); 117 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
118#endif 118#endif
119#ifdef CONFIG_KVM_BOOKE_HV
120 DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
121#endif
119 122
120 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 123 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
121 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 124 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -387,6 +390,7 @@ int main(void)
387#ifdef CONFIG_KVM 390#ifdef CONFIG_KVM
388 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 391 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
389 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 392 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
393 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
390 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 394 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
391 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 395 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
392 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); 396 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
@@ -429,9 +433,11 @@ int main(void)
429 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); 433 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
430 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); 434 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
431 435
436 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
437 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
438
432 /* book3s */ 439 /* book3s */
433#ifdef CONFIG_KVM_BOOK3S_64_HV 440#ifdef CONFIG_KVM_BOOK3S_64_HV
434 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
435 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); 441 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
436 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); 442 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
437 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); 443 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -446,7 +452,6 @@ int main(void)
446 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); 452 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
447#endif 453#endif
448#ifdef CONFIG_PPC_BOOK3S 454#ifdef CONFIG_PPC_BOOK3S
449 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
450 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); 455 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
451 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); 456 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
452 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); 457 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
@@ -597,6 +602,12 @@ int main(void)
597 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); 602 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
598#endif 603#endif
599 604
605#ifdef CONFIG_KVM_BOOKE_HV
606 DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
607 DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
608 DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
609#endif
610
600#ifdef CONFIG_KVM_EXIT_TIMING 611#ifdef CONFIG_KVM_EXIT_TIMING
601 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 612 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
602 arch.timing_exit.tv32.tbu)); 613 arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 51fd0724e095..5f051eeb93a2 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -3,6 +3,7 @@
3 3
4#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ 4#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
5#include <asm/kvm_asm.h> 5#include <asm/kvm_asm.h>
6#include <asm/kvm_booke_hv_asm.h>
6 7
7/* 8/*
8 * Macros used for common Book-e exception handling 9 * Macros used for common Book-e exception handling
@@ -36,8 +37,9 @@
36 stw r11, THREAD_NORMSAVE(0)(r10); \ 37 stw r11, THREAD_NORMSAVE(0)(r10); \
37 stw r13, THREAD_NORMSAVE(2)(r10); \ 38 stw r13, THREAD_NORMSAVE(2)(r10); \
38 mfcr r13; /* save CR in r13 for now */\ 39 mfcr r13; /* save CR in r13 for now */\
39 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ 40 mfspr r11, SPRN_SRR1; \
40 andi. r11,r11,MSR_PR; \ 41 DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
42 andi. r11, r11, MSR_PR; /* check whether user or kernel */\
41 mr r11, r1; \ 43 mr r11, r1; \
42 beq 1f; \ 44 beq 1f; \
43 /* if from user, start at top of this thread's kernel stack */ \ 45 /* if from user, start at top of this thread's kernel stack */ \
@@ -123,8 +125,9 @@
123 stw r10,GPR10(r8); \ 125 stw r10,GPR10(r8); \
124 stw r11,GPR11(r8); \ 126 stw r11,GPR11(r8); \
125 stw r9,_CCR(r8); /* save CR on stack */\ 127 stw r9,_CCR(r8); /* save CR on stack */\
126 mfspr r10,exc_level_srr1; /* check whether user or kernel */\ 128 mfspr r11,exc_level_srr1; /* check whether user or kernel */\
127 andi. r10,r10,MSR_PR; \ 129 DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
130 andi. r11,r11,MSR_PR; \
128 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ 131 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
129 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ 132 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
130 addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ 133 addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
@@ -173,6 +176,23 @@
173 SPRN_MCSRR0, SPRN_MCSRR1) 176 SPRN_MCSRR0, SPRN_MCSRR1)
174 177
175/* 178/*
179 * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite
180 * being delivered to the host. This exception can only happen
181 * inside a KVM guest -- so we just handle up to the DO_KVM rather
182 * than try to fit this into one of the existing prolog macros.
183 */
184#define GUEST_DOORBELL_EXCEPTION \
185 START_EXCEPTION(GuestDoorbell); \
186 mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
187 mfspr r10, SPRN_SPRG_THREAD; \
188 stw r11, THREAD_NORMSAVE(0)(r10); \
189 mfspr r11, SPRN_SRR1; \
190 stw r13, THREAD_NORMSAVE(2)(r10); \
191 mfcr r13; /* save CR in r13 for now */\
192 DO_KVM BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1; \
193 trap
194
195/*
176 * Exception vectors. 196 * Exception vectors.
177 */ 197 */
178#define START_EXCEPTION(label) \ 198#define START_EXCEPTION(label) \
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 8f64709ae331..2c33cd336434 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR
90 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV 90 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
91 select KVM_BOOK3S_PR 91 select KVM_BOOK3S_PR
92 92
93config KVM_BOOKE_HV
94 bool
95
93config KVM_440 96config KVM_440
94 bool "KVM support for PowerPC 440 processors" 97 bool "KVM support for PowerPC 440 processors"
95 depends on EXPERIMENTAL && 44x 98 depends on EXPERIMENTAL && 44x
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 2ee9bae38328..75dbaeb2efa3 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -17,6 +17,8 @@
17 * 17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
20 */ 22 */
21 23
22#include <linux/errno.h> 24#include <linux/errno.h>
@@ -30,9 +32,12 @@
30#include <asm/cputable.h> 32#include <asm/cputable.h>
31#include <asm/uaccess.h> 33#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h> 34#include <asm/kvm_ppc.h>
33#include "timing.h"
34#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
36#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
35 39
40#include "timing.h"
36#include "booke.h" 41#include "booke.h"
37 42
38unsigned long kvmppc_booke_handlers; 43unsigned long kvmppc_booke_handlers;
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
55 { "dec", VCPU_STAT(dec_exits) }, 60 { "dec", VCPU_STAT(dec_exits) },
56 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 61 { "ext_intr", VCPU_STAT(ext_intr_exits) },
57 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 62 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "doorbell", VCPU_STAT(dbell_exits) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits) },
58 { NULL } 65 { NULL }
59}; 66};
60 67
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
121{ 128{
122 u32 old_msr = vcpu->arch.shared->msr; 129 u32 old_msr = vcpu->arch.shared->msr;
123 130
131#ifdef CONFIG_KVM_BOOKE_HV
132 new_msr |= MSR_GS;
133#endif
134
124 vcpu->arch.shared->msr = new_msr; 135 vcpu->arch.shared->msr = new_msr;
125 136
126 kvmppc_mmu_msr_notify(vcpu, old_msr); 137 kvmppc_mmu_msr_notify(vcpu, old_msr);
@@ -195,6 +206,75 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
195 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
196} 207}
197 208
209static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210{
211#ifdef CONFIG_KVM_BOOKE_HV
212 mtspr(SPRN_GSRR0, srr0);
213 mtspr(SPRN_GSRR1, srr1);
214#else
215 vcpu->arch.shared->srr0 = srr0;
216 vcpu->arch.shared->srr1 = srr1;
217#endif
218}
219
220static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
221{
222 vcpu->arch.csrr0 = srr0;
223 vcpu->arch.csrr1 = srr1;
224}
225
226static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
227{
228 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
229 vcpu->arch.dsrr0 = srr0;
230 vcpu->arch.dsrr1 = srr1;
231 } else {
232 set_guest_csrr(vcpu, srr0, srr1);
233 }
234}
235
236static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
237{
238 vcpu->arch.mcsrr0 = srr0;
239 vcpu->arch.mcsrr1 = srr1;
240}
241
242static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
243{
244#ifdef CONFIG_KVM_BOOKE_HV
245 return mfspr(SPRN_GDEAR);
246#else
247 return vcpu->arch.shared->dar;
248#endif
249}
250
251static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
252{
253#ifdef CONFIG_KVM_BOOKE_HV
254 mtspr(SPRN_GDEAR, dear);
255#else
256 vcpu->arch.shared->dar = dear;
257#endif
258}
259
260static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
261{
262#ifdef CONFIG_KVM_BOOKE_HV
263 return mfspr(SPRN_GESR);
264#else
265 return vcpu->arch.shared->esr;
266#endif
267}
268
269static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
270{
271#ifdef CONFIG_KVM_BOOKE_HV
272 mtspr(SPRN_GESR, esr);
273#else
274 vcpu->arch.shared->esr = esr;
275#endif
276}
277
198/* Deliver the interrupt of the corresponding priority, if possible. */ 278/* Deliver the interrupt of the corresponding priority, if possible. */
199static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 279static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
200 unsigned int priority) 280 unsigned int priority)
@@ -206,6 +286,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
206 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); 286 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
207 bool crit; 287 bool crit;
208 bool keep_irq = false; 288 bool keep_irq = false;
289 enum int_class int_class;
209 290
210 /* Truncate crit indicators in 32 bit mode */ 291 /* Truncate crit indicators in 32 bit mode */
211 if (!(vcpu->arch.shared->msr & MSR_SF)) { 292 if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -241,16 +322,20 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
241 case BOOKE_IRQPRIO_AP_UNAVAIL: 322 case BOOKE_IRQPRIO_AP_UNAVAIL:
242 case BOOKE_IRQPRIO_ALIGNMENT: 323 case BOOKE_IRQPRIO_ALIGNMENT:
243 allowed = 1; 324 allowed = 1;
244 msr_mask = MSR_CE|MSR_ME|MSR_DE; 325 msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE;
326 int_class = INT_CLASS_NONCRIT;
245 break; 327 break;
246 case BOOKE_IRQPRIO_CRITICAL: 328 case BOOKE_IRQPRIO_CRITICAL:
247 case BOOKE_IRQPRIO_WATCHDOG:
248 allowed = vcpu->arch.shared->msr & MSR_CE; 329 allowed = vcpu->arch.shared->msr & MSR_CE;
249 msr_mask = MSR_ME; 330 allowed = allowed && !crit;
331 msr_mask = MSR_GS | MSR_ME;
332 int_class = INT_CLASS_CRIT;
250 break; 333 break;
251 case BOOKE_IRQPRIO_MACHINE_CHECK: 334 case BOOKE_IRQPRIO_MACHINE_CHECK:
252 allowed = vcpu->arch.shared->msr & MSR_ME; 335 allowed = vcpu->arch.shared->msr & MSR_ME;
253 msr_mask = 0; 336 allowed = allowed && !crit;
337 msr_mask = MSR_GS;
338 int_class = INT_CLASS_MC;
254 break; 339 break;
255 case BOOKE_IRQPRIO_DECREMENTER: 340 case BOOKE_IRQPRIO_DECREMENTER:
256 case BOOKE_IRQPRIO_FIT: 341 case BOOKE_IRQPRIO_FIT:
@@ -259,28 +344,62 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
259 case BOOKE_IRQPRIO_EXTERNAL: 344 case BOOKE_IRQPRIO_EXTERNAL:
260 allowed = vcpu->arch.shared->msr & MSR_EE; 345 allowed = vcpu->arch.shared->msr & MSR_EE;
261 allowed = allowed && !crit; 346 allowed = allowed && !crit;
262 msr_mask = MSR_CE|MSR_ME|MSR_DE; 347 msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE;
348 int_class = INT_CLASS_NONCRIT;
263 break; 349 break;
264 case BOOKE_IRQPRIO_DEBUG: 350 case BOOKE_IRQPRIO_DEBUG:
265 allowed = vcpu->arch.shared->msr & MSR_DE; 351 allowed = vcpu->arch.shared->msr & MSR_DE;
266 msr_mask = MSR_ME; 352 allowed = allowed && !crit;
353 msr_mask = MSR_GS | MSR_ME;
354 int_class = INT_CLASS_CRIT;
267 break; 355 break;
268 } 356 }
269 357
270 if (allowed) { 358 if (allowed) {
271 vcpu->arch.shared->srr0 = vcpu->arch.pc; 359 switch (int_class) {
272 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; 360 case INT_CLASS_NONCRIT:
361 set_guest_srr(vcpu, vcpu->arch.pc,
362 vcpu->arch.shared->msr);
363 break;
364 case INT_CLASS_CRIT:
365 set_guest_csrr(vcpu, vcpu->arch.pc,
366 vcpu->arch.shared->msr);
367 break;
368 case INT_CLASS_DBG:
369 set_guest_dsrr(vcpu, vcpu->arch.pc,
370 vcpu->arch.shared->msr);
371 break;
372 case INT_CLASS_MC:
373 set_guest_mcsrr(vcpu, vcpu->arch.pc,
374 vcpu->arch.shared->msr);
375 break;
376 }
377
273 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 378 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
274 if (update_esr == true) 379 if (update_esr == true)
275 vcpu->arch.shared->esr = vcpu->arch.queued_esr; 380 set_guest_esr(vcpu, vcpu->arch.queued_esr);
276 if (update_dear == true) 381 if (update_dear == true)
277 vcpu->arch.shared->dar = vcpu->arch.queued_dear; 382 set_guest_dear(vcpu, vcpu->arch.queued_dear);
278 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 383 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
279 384
280 if (!keep_irq) 385 if (!keep_irq)
281 clear_bit(priority, &vcpu->arch.pending_exceptions); 386 clear_bit(priority, &vcpu->arch.pending_exceptions);
282 } 387 }
283 388
389#ifdef CONFIG_KVM_BOOKE_HV
390 /*
391 * If an interrupt is pending but masked, raise a guest doorbell
392 * so that we are notified when the guest enables the relevant
393 * MSR bit.
394 */
395 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
396 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
397 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
398 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
399 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
400 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
401#endif
402
284 return allowed; 403 return allowed;
285} 404}
286 405
@@ -344,6 +463,11 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
344 return -EINVAL; 463 return -EINVAL;
345 } 464 }
346 465
466 if (!current->thread.kvm_vcpu) {
467 WARN(1, "no vcpu\n");
468 return -EPERM;
469 }
470
347 local_irq_disable(); 471 local_irq_disable();
348 472
349 kvmppc_core_prepare_to_enter(vcpu); 473 kvmppc_core_prepare_to_enter(vcpu);
@@ -363,6 +487,38 @@ out:
363 return ret; 487 return ret;
364} 488}
365 489
490static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
491{
492 enum emulation_result er;
493
494 er = kvmppc_emulate_instruction(run, vcpu);
495 switch (er) {
496 case EMULATE_DONE:
497 /* don't overwrite subtypes, just account kvm_stats */
498 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
499 /* Future optimization: only reload non-volatiles if
500 * they were actually modified by emulation. */
501 return RESUME_GUEST_NV;
502
503 case EMULATE_DO_DCR:
504 run->exit_reason = KVM_EXIT_DCR;
505 return RESUME_HOST;
506
507 case EMULATE_FAIL:
508 /* XXX Deliver Program interrupt to guest. */
509 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
510 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
511 /* For debugging, encode the failing instruction and
512 * report it to userspace. */
513 run->hw.hardware_exit_reason = ~0ULL << 32;
514 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
515 return RESUME_HOST;
516
517 default:
518 BUG();
519 }
520}
521
366/** 522/**
367 * kvmppc_handle_exit 523 * kvmppc_handle_exit
368 * 524 *
@@ -371,12 +527,30 @@ out:
371int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 527int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
372 unsigned int exit_nr) 528 unsigned int exit_nr)
373{ 529{
374 enum emulation_result er;
375 int r = RESUME_HOST; 530 int r = RESUME_HOST;
376 531
377 /* update before a new last_exit_type is rewritten */ 532 /* update before a new last_exit_type is rewritten */
378 kvmppc_update_timing_stats(vcpu); 533 kvmppc_update_timing_stats(vcpu);
379 534
535 switch (exit_nr) {
536 case BOOKE_INTERRUPT_EXTERNAL:
537 do_IRQ(current->thread.regs);
538 break;
539
540 case BOOKE_INTERRUPT_DECREMENTER:
541 timer_interrupt(current->thread.regs);
542 break;
543
544#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
545 case BOOKE_INTERRUPT_DOORBELL:
546 doorbell_exception(current->thread.regs);
547 break;
548#endif
549 case BOOKE_INTERRUPT_MACHINE_CHECK:
550 /* FIXME */
551 break;
552 }
553
380 local_irq_enable(); 554 local_irq_enable();
381 555
382 run->exit_reason = KVM_EXIT_UNKNOWN; 556 run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -384,30 +558,56 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
384 558
385 switch (exit_nr) { 559 switch (exit_nr) {
386 case BOOKE_INTERRUPT_MACHINE_CHECK: 560 case BOOKE_INTERRUPT_MACHINE_CHECK:
387 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 561 kvm_resched(vcpu);
388 kvmppc_dump_vcpu(vcpu); 562 r = RESUME_GUEST;
389 r = RESUME_HOST;
390 break; 563 break;
391 564
392 case BOOKE_INTERRUPT_EXTERNAL: 565 case BOOKE_INTERRUPT_EXTERNAL:
393 kvmppc_account_exit(vcpu, EXT_INTR_EXITS); 566 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
394 if (need_resched()) 567 kvm_resched(vcpu);
395 cond_resched();
396 r = RESUME_GUEST; 568 r = RESUME_GUEST;
397 break; 569 break;
398 570
399 case BOOKE_INTERRUPT_DECREMENTER: 571 case BOOKE_INTERRUPT_DECREMENTER:
400 /* Since we switched IVPR back to the host's value, the host
401 * handled this interrupt the moment we enabled interrupts.
402 * Now we just offer it a chance to reschedule the guest. */
403 kvmppc_account_exit(vcpu, DEC_EXITS); 572 kvmppc_account_exit(vcpu, DEC_EXITS);
404 if (need_resched()) 573 kvm_resched(vcpu);
405 cond_resched();
406 r = RESUME_GUEST; 574 r = RESUME_GUEST;
407 break; 575 break;
408 576
577 case BOOKE_INTERRUPT_DOORBELL:
578 kvmppc_account_exit(vcpu, DBELL_EXITS);
579 kvm_resched(vcpu);
580 r = RESUME_GUEST;
581 break;
582
583 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
584 kvmppc_account_exit(vcpu, GDBELL_EXITS);
585
586 /*
587 * We are here because there is a pending guest interrupt
588 * which could not be delivered as MSR_CE or MSR_ME was not
589 * set. Once we break from here we will retry delivery.
590 */
591 r = RESUME_GUEST;
592 break;
593
594 case BOOKE_INTERRUPT_GUEST_DBELL:
595 kvmppc_account_exit(vcpu, GDBELL_EXITS);
596
597 /*
598 * We are here because there is a pending guest interrupt
599 * which could not be delivered as MSR_EE was not set. Once
600 * we break from here we will retry delivery.
601 */
602 r = RESUME_GUEST;
603 break;
604
605 case BOOKE_INTERRUPT_HV_PRIV:
606 r = emulation_exit(run, vcpu);
607 break;
608
409 case BOOKE_INTERRUPT_PROGRAM: 609 case BOOKE_INTERRUPT_PROGRAM:
410 if (vcpu->arch.shared->msr & MSR_PR) { 610 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
411 /* Program traps generated by user-level software must be handled 611 /* Program traps generated by user-level software must be handled
412 * by the guest kernel. */ 612 * by the guest kernel. */
413 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); 613 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
@@ -416,32 +616,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
416 break; 616 break;
417 } 617 }
418 618
419 er = kvmppc_emulate_instruction(run, vcpu); 619 r = emulation_exit(run, vcpu);
420 switch (er) {
421 case EMULATE_DONE:
422 /* don't overwrite subtypes, just account kvm_stats */
423 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
424 /* Future optimization: only reload non-volatiles if
425 * they were actually modified by emulation. */
426 r = RESUME_GUEST_NV;
427 break;
428 case EMULATE_DO_DCR:
429 run->exit_reason = KVM_EXIT_DCR;
430 r = RESUME_HOST;
431 break;
432 case EMULATE_FAIL:
433 /* XXX Deliver Program interrupt to guest. */
434 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
435 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
436 /* For debugging, encode the failing instruction and
437 * report it to userspace. */
438 run->hw.hardware_exit_reason = ~0ULL << 32;
439 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
440 r = RESUME_HOST;
441 break;
442 default:
443 BUG();
444 }
445 break; 620 break;
446 621
447 case BOOKE_INTERRUPT_FP_UNAVAIL: 622 case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -506,6 +681,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
506 r = RESUME_GUEST; 681 r = RESUME_GUEST;
507 break; 682 break;
508 683
684#ifdef CONFIG_KVM_BOOKE_HV
685 case BOOKE_INTERRUPT_HV_SYSCALL:
686 if (!(vcpu->arch.shared->msr & MSR_PR)) {
687 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
688 } else {
689 /*
690 * hcall from guest userspace -- send privileged
691 * instruction program check.
692 */
693 kvmppc_core_queue_program(vcpu, ESR_PPR);
694 }
695
696 r = RESUME_GUEST;
697 break;
698#else
509 case BOOKE_INTERRUPT_SYSCALL: 699 case BOOKE_INTERRUPT_SYSCALL:
510 if (!(vcpu->arch.shared->msr & MSR_PR) && 700 if (!(vcpu->arch.shared->msr & MSR_PR) &&
511 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { 701 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
@@ -519,6 +709,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
519 kvmppc_account_exit(vcpu, SYSCALL_EXITS); 709 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
520 r = RESUME_GUEST; 710 r = RESUME_GUEST;
521 break; 711 break;
712#endif
522 713
523 case BOOKE_INTERRUPT_DTLB_MISS: { 714 case BOOKE_INTERRUPT_DTLB_MISS: {
524 unsigned long eaddr = vcpu->arch.fault_dear; 715 unsigned long eaddr = vcpu->arch.fault_dear;
@@ -659,12 +850,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
659 int r; 850 int r;
660 851
661 vcpu->arch.pc = 0; 852 vcpu->arch.pc = 0;
662 vcpu->arch.shared->msr = 0;
663 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
664 vcpu->arch.shared->pir = vcpu->vcpu_id; 853 vcpu->arch.shared->pir = vcpu->vcpu_id;
665 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 854 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
855 kvmppc_set_msr(vcpu, 0);
666 856
857#ifndef CONFIG_KVM_BOOKE_HV
858 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
667 vcpu->arch.shadow_pid = 1; 859 vcpu->arch.shadow_pid = 1;
860 vcpu->arch.shared->msr = 0;
861#endif
668 862
669 /* Eye-catching numbers so we know if the guest takes an interrupt 863 /* Eye-catching numbers so we know if the guest takes an interrupt
670 * before it's programmed its own IVPR/IVORs. */ 864 * before it's programmed its own IVPR/IVORs. */
@@ -745,8 +939,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
745 sregs->u.e.csrr0 = vcpu->arch.csrr0; 939 sregs->u.e.csrr0 = vcpu->arch.csrr0;
746 sregs->u.e.csrr1 = vcpu->arch.csrr1; 940 sregs->u.e.csrr1 = vcpu->arch.csrr1;
747 sregs->u.e.mcsr = vcpu->arch.mcsr; 941 sregs->u.e.mcsr = vcpu->arch.mcsr;
748 sregs->u.e.esr = vcpu->arch.shared->esr; 942 sregs->u.e.esr = get_guest_esr(vcpu);
749 sregs->u.e.dear = vcpu->arch.shared->dar; 943 sregs->u.e.dear = get_guest_dear(vcpu);
750 sregs->u.e.tsr = vcpu->arch.tsr; 944 sregs->u.e.tsr = vcpu->arch.tsr;
751 sregs->u.e.tcr = vcpu->arch.tcr; 945 sregs->u.e.tcr = vcpu->arch.tcr;
752 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); 946 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -763,8 +957,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
763 vcpu->arch.csrr0 = sregs->u.e.csrr0; 957 vcpu->arch.csrr0 = sregs->u.e.csrr0;
764 vcpu->arch.csrr1 = sregs->u.e.csrr1; 958 vcpu->arch.csrr1 = sregs->u.e.csrr1;
765 vcpu->arch.mcsr = sregs->u.e.mcsr; 959 vcpu->arch.mcsr = sregs->u.e.mcsr;
766 vcpu->arch.shared->esr = sregs->u.e.esr; 960 set_guest_esr(vcpu, sregs->u.e.esr);
767 vcpu->arch.shared->dar = sregs->u.e.dear; 961 set_guest_dear(vcpu, sregs->u.e.dear);
768 vcpu->arch.vrsave = sregs->u.e.vrsave; 962 vcpu->arch.vrsave = sregs->u.e.vrsave;
769 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 963 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
770 964
@@ -961,14 +1155,17 @@ void kvmppc_decrementer_func(unsigned long data)
961 1155
962void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1156void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
963{ 1157{
1158 current->thread.kvm_vcpu = vcpu;
964} 1159}
965 1160
966void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) 1161void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
967{ 1162{
1163 current->thread.kvm_vcpu = NULL;
968} 1164}
969 1165
970int __init kvmppc_booke_init(void) 1166int __init kvmppc_booke_init(void)
971{ 1167{
1168#ifndef CONFIG_KVM_BOOKE_HV
972 unsigned long ivor[16]; 1169 unsigned long ivor[16];
973 unsigned long max_ivor = 0; 1170 unsigned long max_ivor = 0;
974 int i; 1171 int i;
@@ -1011,7 +1208,7 @@ int __init kvmppc_booke_init(void)
1011 } 1208 }
1012 flush_icache_range(kvmppc_booke_handlers, 1209 flush_icache_range(kvmppc_booke_handlers,
1013 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); 1210 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1014 1211#endif /* !BOOKE_HV */
1015 return 0; 1212 return 0;
1016} 1213}
1017 1214
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 05d1d99428ce..d53bcf2558f5 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -48,7 +48,20 @@
48#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 48#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
49/* Internal pseudo-irqprio for level triggered externals */ 49/* Internal pseudo-irqprio for level triggered externals */
50#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 50#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20
51#define BOOKE_IRQPRIO_MAX 20 51#define BOOKE_IRQPRIO_DBELL 21
52#define BOOKE_IRQPRIO_DBELL_CRIT 22
53#define BOOKE_IRQPRIO_MAX 23
54
55#define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \
56 (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \
57 (1 << BOOKE_IRQPRIO_DBELL) | \
58 (1 << BOOKE_IRQPRIO_DECREMENTER) | \
59 (1 << BOOKE_IRQPRIO_FIT) | \
60 (1 << BOOKE_IRQPRIO_EXTERNAL))
61
62#define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \
63 (1 << BOOKE_IRQPRIO_WATCHDOG) | \
64 (1 << BOOKE_IRQPRIO_CRITICAL))
52 65
53extern unsigned long kvmppc_booke_handlers; 66extern unsigned long kvmppc_booke_handlers;
54 67
@@ -74,4 +87,13 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
74void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 87void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
75void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); 88void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu);
76 89
90enum int_class {
91 INT_CLASS_NONCRIT,
92 INT_CLASS_CRIT,
93 INT_CLASS_MC,
94 INT_CLASS_DBG,
95};
96
97void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
98
77#endif /* __KVM_BOOKE_H__ */ 99#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 3e652da36534..904412bbea40 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -99,6 +99,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
99 return emulated; 99 return emulated;
100} 100}
101 101
102/*
103 * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode).
104 * Their backing store is in real registers, and these functions
105 * will return the wrong result if called for them in another context
106 * (such as debugging).
107 */
102int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 108int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
103{ 109{
104 int emulated = EMULATE_DONE; 110 int emulated = EMULATE_DONE;
@@ -122,9 +128,11 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
122 kvmppc_set_tcr(vcpu, spr_val); 128 kvmppc_set_tcr(vcpu, spr_val);
123 break; 129 break;
124 130
125 /* Note: SPRG4-7 are user-readable. These values are 131 /*
126 * loaded into the real SPRGs when resuming the 132 * Note: SPRG4-7 are user-readable.
127 * guest. */ 133 * These values are loaded into the real SPRGs when resuming the
134 * guest (PR-mode only).
135 */
128 case SPRN_SPRG4: 136 case SPRN_SPRG4:
129 vcpu->arch.shared->sprg4 = spr_val; break; 137 vcpu->arch.shared->sprg4 = spr_val; break;
130 case SPRN_SPRG5: 138 case SPRN_SPRG5:
@@ -136,6 +144,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
136 144
137 case SPRN_IVPR: 145 case SPRN_IVPR:
138 vcpu->arch.ivpr = spr_val; 146 vcpu->arch.ivpr = spr_val;
147#ifdef CONFIG_KVM_BOOKE_HV
148 mtspr(SPRN_GIVPR, spr_val);
149#endif
139 break; 150 break;
140 case SPRN_IVOR0: 151 case SPRN_IVOR0:
141 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; 152 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
@@ -145,6 +156,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
145 break; 156 break;
146 case SPRN_IVOR2: 157 case SPRN_IVOR2:
147 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; 158 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
159#ifdef CONFIG_KVM_BOOKE_HV
160 mtspr(SPRN_GIVOR2, spr_val);
161#endif
148 break; 162 break;
149 case SPRN_IVOR3: 163 case SPRN_IVOR3:
150 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; 164 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
@@ -163,6 +177,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
163 break; 177 break;
164 case SPRN_IVOR8: 178 case SPRN_IVOR8:
165 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; 179 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
180#ifdef CONFIG_KVM_BOOKE_HV
181 mtspr(SPRN_GIVOR8, spr_val);
182#endif
166 break; 183 break;
167 case SPRN_IVOR9: 184 case SPRN_IVOR9:
168 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; 185 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
new file mode 100644
index 000000000000..9eaeebd86e44
--- /dev/null
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -0,0 +1,587 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
16 *
17 * Author: Varun Sethi <varun.sethi@freescale.com>
18 * Author: Scott Wood <scotwood@freescale.com>
19 *
20 * This file is derived from arch/powerpc/kvm/booke_interrupts.S
21 */
22
23#include <asm/ppc_asm.h>
24#include <asm/kvm_asm.h>
25#include <asm/reg.h>
26#include <asm/mmu-44x.h>
27#include <asm/page.h>
28#include <asm/asm-compat.h>
29#include <asm/asm-offsets.h>
30#include <asm/bitsperlong.h>
31
32#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
33
34#define GET_VCPU(vcpu, thread) \
35 PPC_LL vcpu, THREAD_KVM_VCPU(thread)
36
37#define SET_VCPU(vcpu) \
38 PPC_STL vcpu, (THREAD + THREAD_KVM_VCPU)(r2)
39
40#define LONGBYTES (BITS_PER_LONG / 8)
41
42#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
43#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
44
45/* The host stack layout: */
46#define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */
47#define HOST_CALLEE_LR (1 * LONGBYTES)
48#define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */
49/*
50 * r2 is special: it holds 'current', and it made nonvolatile in the
51 * kernel with the -ffixed-r2 gcc option.
52 */
53#define HOST_R2 (3 * LONGBYTES)
54#define HOST_NV_GPRS (4 * LONGBYTES)
55#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
56#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
57#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
58#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */
59
60#define NEED_EMU 0x00000001 /* emulation -- save nv regs */
61#define NEED_DEAR 0x00000002 /* save faulting DEAR */
62#define NEED_ESR 0x00000004 /* save faulting ESR */
63
64/*
65 * On entry:
66 * r4 = vcpu, r5 = srr0, r6 = srr1
67 * saved in vcpu: cr, ctr, r3-r13
68 */
69.macro kvm_handler_common intno, srr0, flags
70 mfspr r10, SPRN_PID
71 lwz r8, VCPU_HOST_PID(r4)
72 PPC_LL r11, VCPU_SHARED(r4)
73 PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
74 li r14, \intno
75
76 stw r10, VCPU_GUEST_PID(r4)
77 mtspr SPRN_PID, r8
78
79 .if \flags & NEED_EMU
80 lwz r9, VCPU_KVM(r4)
81 .endif
82
83#ifdef CONFIG_KVM_EXIT_TIMING
84 /* save exit time */
851: mfspr r7, SPRN_TBRU
86 mfspr r8, SPRN_TBRL
87 mfspr r9, SPRN_TBRU
88 cmpw r9, r7
89 PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4)
90 bne- 1b
91 PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4)
92#endif
93
94 oris r8, r6, MSR_CE@h
95#ifndef CONFIG_64BIT
96 stw r6, (VCPU_SHARED_MSR + 4)(r11)
97#else
98 std r6, (VCPU_SHARED_MSR)(r11)
99#endif
100 ori r8, r8, MSR_ME | MSR_RI
101 PPC_STL r5, VCPU_PC(r4)
102
103 /*
104 * Make sure CE/ME/RI are set (if appropriate for exception type)
105 * whether or not the guest had it set. Since mfmsr/mtmsr are
106 * somewhat expensive, skip in the common case where the guest
107 * had all these bits set (and thus they're still set if
108 * appropriate for the exception type).
109 */
110 cmpw r6, r8
111 .if \flags & NEED_EMU
112 lwz r9, KVM_LPID(r9)
113 .endif
114 beq 1f
115 mfmsr r7
116 .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0
117 oris r7, r7, MSR_CE@h
118 .endif
119 .if \srr0 != SPRN_MCSRR0
120 ori r7, r7, MSR_ME | MSR_RI
121 .endif
122 mtmsr r7
1231:
124
125 .if \flags & NEED_EMU
126 /*
127 * This assumes you have external PID support.
128 * To support a bookehv CPU without external PID, you'll
129 * need to look up the TLB entry and create a temporary mapping.
130 *
131 * FIXME: we don't currently handle if the lwepx faults. PR-mode
132 * booke doesn't handle it either. Since Linux doesn't use
133 * broadcast tlbivax anymore, the only way this should happen is
134 * if the guest maps its memory execute-but-not-read, or if we
135 * somehow take a TLB miss in the middle of this entry code and
136 * evict the relevant entry. On e500mc, all kernel lowmem is
137 * bolted into TLB1 large page mappings, and we don't use
138 * broadcast invalidates, so we should not take a TLB miss here.
139 *
140 * Later we'll need to deal with faults here. Disallowing guest
141 * mappings that are execute-but-not-read could be an option on
142 * e500mc, but not on chips with an LRAT if it is used.
143 */
144
145 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
146 PPC_STL r15, VCPU_GPR(r15)(r4)
147 PPC_STL r16, VCPU_GPR(r16)(r4)
148 PPC_STL r17, VCPU_GPR(r17)(r4)
149 PPC_STL r18, VCPU_GPR(r18)(r4)
150 PPC_STL r19, VCPU_GPR(r19)(r4)
151 mr r8, r3
152 PPC_STL r20, VCPU_GPR(r20)(r4)
153 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
154 PPC_STL r21, VCPU_GPR(r21)(r4)
155 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
156 PPC_STL r22, VCPU_GPR(r22)(r4)
157 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
158 PPC_STL r23, VCPU_GPR(r23)(r4)
159 PPC_STL r24, VCPU_GPR(r24)(r4)
160 PPC_STL r25, VCPU_GPR(r25)(r4)
161 PPC_STL r26, VCPU_GPR(r26)(r4)
162 PPC_STL r27, VCPU_GPR(r27)(r4)
163 PPC_STL r28, VCPU_GPR(r28)(r4)
164 PPC_STL r29, VCPU_GPR(r29)(r4)
165 PPC_STL r30, VCPU_GPR(r30)(r4)
166 PPC_STL r31, VCPU_GPR(r31)(r4)
167 mtspr SPRN_EPLC, r8
168 isync
169 lwepx r9, 0, r5
170 mtspr SPRN_EPLC, r3
171 stw r9, VCPU_LAST_INST(r4)
172 .endif
173
174 .if \flags & NEED_ESR
175 mfspr r8, SPRN_ESR
176 PPC_STL r8, VCPU_FAULT_ESR(r4)
177 .endif
178
179 .if \flags & NEED_DEAR
180 mfspr r9, SPRN_DEAR
181 PPC_STL r9, VCPU_FAULT_DEAR(r4)
182 .endif
183
184 b kvmppc_resume_host
185.endm
186
187/*
188 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
189 */
190.macro kvm_handler intno srr0, srr1, flags
191_GLOBAL(kvmppc_handler_\intno\()_\srr1)
192 GET_VCPU(r11, r10)
193 PPC_STL r3, VCPU_GPR(r3)(r11)
194 mfspr r3, SPRN_SPRG_RSCRATCH0
195 PPC_STL r4, VCPU_GPR(r4)(r11)
196 PPC_LL r4, THREAD_NORMSAVE(0)(r10)
197 PPC_STL r5, VCPU_GPR(r5)(r11)
198 PPC_STL r13, VCPU_CR(r11)
199 mfspr r5, \srr0
200 PPC_STL r3, VCPU_GPR(r10)(r11)
201 PPC_LL r3, THREAD_NORMSAVE(2)(r10)
202 PPC_STL r6, VCPU_GPR(r6)(r11)
203 PPC_STL r4, VCPU_GPR(r11)(r11)
204 mfspr r6, \srr1
205 PPC_STL r7, VCPU_GPR(r7)(r11)
206 PPC_STL r8, VCPU_GPR(r8)(r11)
207 PPC_STL r9, VCPU_GPR(r9)(r11)
208 PPC_STL r3, VCPU_GPR(r13)(r11)
209 mfctr r7
210 PPC_STL r12, VCPU_GPR(r12)(r11)
211 PPC_STL r7, VCPU_CTR(r11)
212 mr r4, r11
213 kvm_handler_common \intno, \srr0, \flags
214.endm
215
216.macro kvm_lvl_handler intno scratch srr0, srr1, flags
217_GLOBAL(kvmppc_handler_\intno\()_\srr1)
218 mfspr r10, SPRN_SPRG_THREAD
219 GET_VCPU(r11, r10)
220 PPC_STL r3, VCPU_GPR(r3)(r11)
221 mfspr r3, \scratch
222 PPC_STL r4, VCPU_GPR(r4)(r11)
223 PPC_LL r4, GPR9(r8)
224 PPC_STL r5, VCPU_GPR(r5)(r11)
225 PPC_STL r9, VCPU_CR(r11)
226 mfspr r5, \srr0
227 PPC_STL r3, VCPU_GPR(r8)(r11)
228 PPC_LL r3, GPR10(r8)
229 PPC_STL r6, VCPU_GPR(r6)(r11)
230 PPC_STL r4, VCPU_GPR(r9)(r11)
231 mfspr r6, \srr1
232 PPC_LL r4, GPR11(r8)
233 PPC_STL r7, VCPU_GPR(r7)(r11)
234 PPC_STL r8, VCPU_GPR(r8)(r11)
235 PPC_STL r3, VCPU_GPR(r10)(r11)
236 mfctr r7
237 PPC_STL r12, VCPU_GPR(r12)(r11)
238 PPC_STL r4, VCPU_GPR(r11)(r11)
239 PPC_STL r7, VCPU_CTR(r11)
240 mr r4, r11
241 kvm_handler_common \intno, \srr0, \flags
242.endm
243
244kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
245 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
246kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
247 SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
248kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
249 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR)
250kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
251kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
252kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
253 SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
254kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
255kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
256kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
257kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
258kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0
259kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0
260kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
261 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
262kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
263 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
264kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
265kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
266kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
267kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
268kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
269kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
270kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
271 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
272kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU
273kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
274kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0
275kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \
276 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
277kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
278 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
279kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
280 SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
281
282
283/* Registers:
284 * SPRG_SCRATCH0: guest r10
285 * r4: vcpu pointer
286 * r11: vcpu->arch.shared
287 * r14: KVM exit number
288 */
289_GLOBAL(kvmppc_resume_host)
290 /* Save remaining volatile guest register state to vcpu. */
291 mfspr r3, SPRN_VRSAVE
292 PPC_STL r0, VCPU_GPR(r0)(r4)
293 PPC_STL r1, VCPU_GPR(r1)(r4)
294 mflr r5
295 mfspr r6, SPRN_SPRG4
296 PPC_STL r2, VCPU_GPR(r2)(r4)
297 PPC_STL r5, VCPU_LR(r4)
298 mfspr r7, SPRN_SPRG5
299 PPC_STL r3, VCPU_VRSAVE(r4)
300 PPC_STL r6, VCPU_SHARED_SPRG4(r11)
301 mfspr r8, SPRN_SPRG6
302 PPC_STL r7, VCPU_SHARED_SPRG5(r11)
303 mfspr r9, SPRN_SPRG7
304 PPC_STL r8, VCPU_SHARED_SPRG6(r11)
305 mfxer r3
306 PPC_STL r9, VCPU_SHARED_SPRG7(r11)
307
308 /* save guest MAS registers and restore host mas4 & mas6 */
309 mfspr r5, SPRN_MAS0
310 PPC_STL r3, VCPU_XER(r4)
311 mfspr r6, SPRN_MAS1
312 stw r5, VCPU_SHARED_MAS0(r11)
313 mfspr r7, SPRN_MAS2
314 stw r6, VCPU_SHARED_MAS1(r11)
315#ifndef CONFIG_64BIT
316 stw r7, (VCPU_SHARED_MAS2 + 4)(r11)
317#else
318 std r7, (VCPU_SHARED_MAS2)(r11)
319#endif
320 mfspr r5, SPRN_MAS3
321 mfspr r6, SPRN_MAS4
322 stw r5, VCPU_SHARED_MAS7_3+4(r11)
323 mfspr r7, SPRN_MAS6
324 stw r6, VCPU_SHARED_MAS4(r11)
325 mfspr r5, SPRN_MAS7
326 lwz r6, VCPU_HOST_MAS4(r4)
327 stw r7, VCPU_SHARED_MAS6(r11)
328 lwz r8, VCPU_HOST_MAS6(r4)
329 mtspr SPRN_MAS4, r6
330 stw r5, VCPU_SHARED_MAS7_3+0(r11)
331 mtspr SPRN_MAS6, r8
332 mfspr r3, SPRN_EPCR
333 rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH
334 mtspr SPRN_EPCR, r3
335 isync
336
337 /* Restore host stack pointer */
338 PPC_LL r1, VCPU_HOST_STACK(r4)
339 PPC_LL r2, HOST_R2(r1)
340
341 /* Switch to kernel stack and jump to handler. */
342 PPC_LL r3, HOST_RUN(r1)
343 mr r5, r14 /* intno */
344 mr r14, r4 /* Save vcpu pointer. */
345 bl kvmppc_handle_exit
346
347 /* Restore vcpu pointer and the nonvolatiles we used. */
348 mr r4, r14
349 PPC_LL r14, VCPU_GPR(r14)(r4)
350
351 andi. r5, r3, RESUME_FLAG_NV
352 beq skip_nv_load
353 PPC_LL r15, VCPU_GPR(r15)(r4)
354 PPC_LL r16, VCPU_GPR(r16)(r4)
355 PPC_LL r17, VCPU_GPR(r17)(r4)
356 PPC_LL r18, VCPU_GPR(r18)(r4)
357 PPC_LL r19, VCPU_GPR(r19)(r4)
358 PPC_LL r20, VCPU_GPR(r20)(r4)
359 PPC_LL r21, VCPU_GPR(r21)(r4)
360 PPC_LL r22, VCPU_GPR(r22)(r4)
361 PPC_LL r23, VCPU_GPR(r23)(r4)
362 PPC_LL r24, VCPU_GPR(r24)(r4)
363 PPC_LL r25, VCPU_GPR(r25)(r4)
364 PPC_LL r26, VCPU_GPR(r26)(r4)
365 PPC_LL r27, VCPU_GPR(r27)(r4)
366 PPC_LL r28, VCPU_GPR(r28)(r4)
367 PPC_LL r29, VCPU_GPR(r29)(r4)
368 PPC_LL r30, VCPU_GPR(r30)(r4)
369 PPC_LL r31, VCPU_GPR(r31)(r4)
370skip_nv_load:
371 /* Should we return to the guest? */
372 andi. r5, r3, RESUME_FLAG_HOST
373 beq lightweight_exit
374
375 srawi r3, r3, 2 /* Shift -ERR back down. */
376
377heavyweight_exit:
378 /* Not returning to guest. */
379 PPC_LL r5, HOST_STACK_LR(r1)
380
381 /*
382 * We already saved guest volatile register state; now save the
383 * non-volatiles.
384 */
385
386 PPC_STL r15, VCPU_GPR(r15)(r4)
387 PPC_STL r16, VCPU_GPR(r16)(r4)
388 PPC_STL r17, VCPU_GPR(r17)(r4)
389 PPC_STL r18, VCPU_GPR(r18)(r4)
390 PPC_STL r19, VCPU_GPR(r19)(r4)
391 PPC_STL r20, VCPU_GPR(r20)(r4)
392 PPC_STL r21, VCPU_GPR(r21)(r4)
393 PPC_STL r22, VCPU_GPR(r22)(r4)
394 PPC_STL r23, VCPU_GPR(r23)(r4)
395 PPC_STL r24, VCPU_GPR(r24)(r4)
396 PPC_STL r25, VCPU_GPR(r25)(r4)
397 PPC_STL r26, VCPU_GPR(r26)(r4)
398 PPC_STL r27, VCPU_GPR(r27)(r4)
399 PPC_STL r28, VCPU_GPR(r28)(r4)
400 PPC_STL r29, VCPU_GPR(r29)(r4)
401 PPC_STL r30, VCPU_GPR(r30)(r4)
402 PPC_STL r31, VCPU_GPR(r31)(r4)
403
404 /* Load host non-volatile register state from host stack. */
405 PPC_LL r14, HOST_NV_GPR(r14)(r1)
406 PPC_LL r15, HOST_NV_GPR(r15)(r1)
407 PPC_LL r16, HOST_NV_GPR(r16)(r1)
408 PPC_LL r17, HOST_NV_GPR(r17)(r1)
409 PPC_LL r18, HOST_NV_GPR(r18)(r1)
410 PPC_LL r19, HOST_NV_GPR(r19)(r1)
411 PPC_LL r20, HOST_NV_GPR(r20)(r1)
412 PPC_LL r21, HOST_NV_GPR(r21)(r1)
413 PPC_LL r22, HOST_NV_GPR(r22)(r1)
414 PPC_LL r23, HOST_NV_GPR(r23)(r1)
415 PPC_LL r24, HOST_NV_GPR(r24)(r1)
416 PPC_LL r25, HOST_NV_GPR(r25)(r1)
417 PPC_LL r26, HOST_NV_GPR(r26)(r1)
418 PPC_LL r27, HOST_NV_GPR(r27)(r1)
419 PPC_LL r28, HOST_NV_GPR(r28)(r1)
420 PPC_LL r29, HOST_NV_GPR(r29)(r1)
421 PPC_LL r30, HOST_NV_GPR(r30)(r1)
422 PPC_LL r31, HOST_NV_GPR(r31)(r1)
423
424 /* Return to kvm_vcpu_run(). */
425 mtlr r5
426 addi r1, r1, HOST_STACK_SIZE
427 /* r3 still contains the return code from kvmppc_handle_exit(). */
428 blr
429
430/* Registers:
431 * r3: kvm_run pointer
432 * r4: vcpu pointer
433 */
434_GLOBAL(__kvmppc_vcpu_run)
435 stwu r1, -HOST_STACK_SIZE(r1)
436 PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
437
438 /* Save host state to stack. */
439 PPC_STL r3, HOST_RUN(r1)
440 mflr r3
441 PPC_STL r3, HOST_STACK_LR(r1)
442
443 /* Save host non-volatile register state to stack. */
444 PPC_STL r14, HOST_NV_GPR(r14)(r1)
445 PPC_STL r15, HOST_NV_GPR(r15)(r1)
446 PPC_STL r16, HOST_NV_GPR(r16)(r1)
447 PPC_STL r17, HOST_NV_GPR(r17)(r1)
448 PPC_STL r18, HOST_NV_GPR(r18)(r1)
449 PPC_STL r19, HOST_NV_GPR(r19)(r1)
450 PPC_STL r20, HOST_NV_GPR(r20)(r1)
451 PPC_STL r21, HOST_NV_GPR(r21)(r1)
452 PPC_STL r22, HOST_NV_GPR(r22)(r1)
453 PPC_STL r23, HOST_NV_GPR(r23)(r1)
454 PPC_STL r24, HOST_NV_GPR(r24)(r1)
455 PPC_STL r25, HOST_NV_GPR(r25)(r1)
456 PPC_STL r26, HOST_NV_GPR(r26)(r1)
457 PPC_STL r27, HOST_NV_GPR(r27)(r1)
458 PPC_STL r28, HOST_NV_GPR(r28)(r1)
459 PPC_STL r29, HOST_NV_GPR(r29)(r1)
460 PPC_STL r30, HOST_NV_GPR(r30)(r1)
461 PPC_STL r31, HOST_NV_GPR(r31)(r1)
462
463 /* Load guest non-volatiles. */
464 PPC_LL r14, VCPU_GPR(r14)(r4)
465 PPC_LL r15, VCPU_GPR(r15)(r4)
466 PPC_LL r16, VCPU_GPR(r16)(r4)
467 PPC_LL r17, VCPU_GPR(r17)(r4)
468 PPC_LL r18, VCPU_GPR(r18)(r4)
469 PPC_LL r19, VCPU_GPR(r19)(r4)
470 PPC_LL r20, VCPU_GPR(r20)(r4)
471 PPC_LL r21, VCPU_GPR(r21)(r4)
472 PPC_LL r22, VCPU_GPR(r22)(r4)
473 PPC_LL r23, VCPU_GPR(r23)(r4)
474 PPC_LL r24, VCPU_GPR(r24)(r4)
475 PPC_LL r25, VCPU_GPR(r25)(r4)
476 PPC_LL r26, VCPU_GPR(r26)(r4)
477 PPC_LL r27, VCPU_GPR(r27)(r4)
478 PPC_LL r28, VCPU_GPR(r28)(r4)
479 PPC_LL r29, VCPU_GPR(r29)(r4)
480 PPC_LL r30, VCPU_GPR(r30)(r4)
481 PPC_LL r31, VCPU_GPR(r31)(r4)
482
483
484lightweight_exit:
485 PPC_STL r2, HOST_R2(r1)
486
487 mfspr r3, SPRN_PID
488 stw r3, VCPU_HOST_PID(r4)
489 lwz r3, VCPU_GUEST_PID(r4)
490 mtspr SPRN_PID, r3
491
492 /* Save vcpu pointer for the exception handlers
493 * must be done before loading guest r2.
494 */
495// SET_VCPU(r4)
496
497 PPC_LL r11, VCPU_SHARED(r4)
498 /* Save host mas4 and mas6 and load guest MAS registers */
499 mfspr r3, SPRN_MAS4
500 stw r3, VCPU_HOST_MAS4(r4)
501 mfspr r3, SPRN_MAS6
502 stw r3, VCPU_HOST_MAS6(r4)
503 lwz r3, VCPU_SHARED_MAS0(r11)
504 lwz r5, VCPU_SHARED_MAS1(r11)
505#ifndef CONFIG_64BIT
506 lwz r6, (VCPU_SHARED_MAS2 + 4)(r11)
507#else
508 ld r6, (VCPU_SHARED_MAS2)(r11)
509#endif
510 lwz r7, VCPU_SHARED_MAS7_3+4(r11)
511 lwz r8, VCPU_SHARED_MAS4(r11)
512 mtspr SPRN_MAS0, r3
513 mtspr SPRN_MAS1, r5
514 mtspr SPRN_MAS2, r6
515 mtspr SPRN_MAS3, r7
516 mtspr SPRN_MAS4, r8
517 lwz r3, VCPU_SHARED_MAS6(r11)
518 lwz r5, VCPU_SHARED_MAS7_3+0(r11)
519 mtspr SPRN_MAS6, r3
520 mtspr SPRN_MAS7, r5
521 /* Disable MAS register updates via exception */
522 mfspr r3, SPRN_EPCR
523 oris r3, r3, SPRN_EPCR_DMIUH@h
524 mtspr SPRN_EPCR, r3
525
526 /*
527 * Host interrupt handlers may have clobbered these guest-readable
528 * SPRGs, so we need to reload them here with the guest's values.
529 */
530 lwz r3, VCPU_VRSAVE(r4)
531 lwz r5, VCPU_SHARED_SPRG4(r11)
532 mtspr SPRN_VRSAVE, r3
533 lwz r6, VCPU_SHARED_SPRG5(r11)
534 mtspr SPRN_SPRG4W, r5
535 lwz r7, VCPU_SHARED_SPRG6(r11)
536 mtspr SPRN_SPRG5W, r6
537 lwz r8, VCPU_SHARED_SPRG7(r11)
538 mtspr SPRN_SPRG6W, r7
539 mtspr SPRN_SPRG7W, r8
540
541 /* Load some guest volatiles. */
542 PPC_LL r3, VCPU_LR(r4)
543 PPC_LL r5, VCPU_XER(r4)
544 PPC_LL r6, VCPU_CTR(r4)
545 PPC_LL r7, VCPU_CR(r4)
546 PPC_LL r8, VCPU_PC(r4)
547#ifndef CONFIG_64BIT
548 lwz r9, (VCPU_SHARED_MSR + 4)(r11)
549#else
550 ld r9, (VCPU_SHARED_MSR)(r11)
551#endif
552 PPC_LL r0, VCPU_GPR(r0)(r4)
553 PPC_LL r1, VCPU_GPR(r1)(r4)
554 PPC_LL r2, VCPU_GPR(r2)(r4)
555 PPC_LL r10, VCPU_GPR(r10)(r4)
556 PPC_LL r11, VCPU_GPR(r11)(r4)
557 PPC_LL r12, VCPU_GPR(r12)(r4)
558 PPC_LL r13, VCPU_GPR(r13)(r4)
559 mtlr r3
560 mtxer r5
561 mtctr r6
562 mtcr r7
563 mtsrr0 r8
564 mtsrr1 r9
565
566#ifdef CONFIG_KVM_EXIT_TIMING
567 /* save enter time */
5681:
569 mfspr r6, SPRN_TBRU
570 mfspr r7, SPRN_TBRL
571 mfspr r8, SPRN_TBRU
572 cmpw r8, r6
573 PPC_STL r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
574 bne 1b
575 PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
576#endif
577
578 /* Finish loading guest volatiles and jump to guest. */
579 PPC_LL r5, VCPU_GPR(r5)(r4)
580 PPC_LL r6, VCPU_GPR(r6)(r4)
581 PPC_LL r7, VCPU_GPR(r7)(r4)
582 PPC_LL r8, VCPU_GPR(r8)(r4)
583 PPC_LL r9, VCPU_GPR(r9)(r4)
584
585 PPC_LL r3, VCPU_GPR(r3)(r4)
586 PPC_LL r4, VCPU_GPR(r4)(r4)
587 rfi
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index cd53e08403b3..6a530e4b3e7c 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -114,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
114 goto out; 114 goto out;
115#endif 115#endif
116 116
117#ifdef CONFIG_KVM_BOOKE_HV
118 if (!cpu_has_feature(CPU_FTR_EMB_HV))
119 goto out;
120#endif
121
117 r = true; 122 r = true;
118 123
119out: 124out:
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index 8167d42a776f..bf191e72b2d8 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
93 case SIGNAL_EXITS: 93 case SIGNAL_EXITS:
94 vcpu->stat.signal_exits++; 94 vcpu->stat.signal_exits++;
95 break; 95 break;
96 case DBELL_EXITS:
97 vcpu->stat.dbell_exits++;
98 break;
99 case GDBELL_EXITS:
100 vcpu->stat.gdbell_exits++;
101 break;
96 } 102 }
97} 103}
98 104