aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-07 20:58:06 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:49 -0500
commit021ec9c69f8b7b20f46296cc76cc4cb341b25191 (patch)
tree304f086761e7c01fb412c8319b89ff8b6fb2dde7 /arch/powerpc
parentbc90923e27908ef65aa8aaad2f234e18b5273c78 (diff)
KVM: PPC: Call SLB patching code in interrupt safe manner
Currently we're racy when doing the transition from IR=1 to IR=0, from the module memory entry code to the real mode SLB switching code. To work around that I took a look at the RTAS entry code which is faced with a similar problem and did the same thing: A small helper in linear mapped memory that does mtmsr with IR=0 and then RFIs info the actual handler. Thanks to that trick we can safely take page faults in the entry code and only need to be really wary of what to do as of the SLB switching part. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_exports.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S25
-rw-r--r--arch/powerpc/kvm/book3s_64_rmhandlers.S18
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S4
9 files changed, 34 insertions, 21 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f192017d799d..c91be0ff0232 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -121,6 +121,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
121 121
122extern u32 kvmppc_trampoline_lowmem; 122extern u32 kvmppc_trampoline_lowmem;
123extern u32 kvmppc_trampoline_enter; 123extern u32 kvmppc_trampoline_enter;
124extern void kvmppc_rmcall(ulong srr0, ulong srr1);
124 125
125static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 126static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
126{ 127{
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
index fca9404c1a7d..183461b48407 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
@@ -69,7 +69,6 @@ struct kvmppc_book3s_shadow_vcpu {
69 ulong scratch0; 69 ulong scratch0;
70 ulong scratch1; 70 ulong scratch1;
71 ulong vmhandler; 71 ulong vmhandler;
72 ulong rmhandler;
73}; 72};
74 73
75#endif /*__ASSEMBLY__ */ 74#endif /*__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d615fa8a1412..f7215e622dfd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -167,6 +167,7 @@ struct kvm_vcpu_arch {
167 ulong trampoline_lowmem; 167 ulong trampoline_lowmem;
168 ulong trampoline_enter; 168 ulong trampoline_enter;
169 ulong highmem_handler; 169 ulong highmem_handler;
170 ulong rmcall;
170 ulong host_paca_phys; 171 ulong host_paca_phys;
171 struct kvmppc_mmu mmu; 172 struct kvmppc_mmu mmu;
172#endif 173#endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 1501e77c980c..ee9935442f0e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -214,8 +214,6 @@ int main(void)
214 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2)); 214 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
215 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct, 215 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
216 shadow_vcpu.vmhandler)); 216 shadow_vcpu.vmhandler));
217 DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
218 shadow_vcpu.rmhandler));
219 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct, 217 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
220 shadow_vcpu.scratch0)); 218 shadow_vcpu.scratch0));
221 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct, 219 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
@@ -438,6 +436,7 @@ int main(void)
438 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 436 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
439 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 437 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
440 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 438 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
439 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
441 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 440 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
442#else 441#else
443 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 442 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3e06eae3f2c8..13173922b678 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -919,6 +919,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
919 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; 919 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
920 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; 920 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
921 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; 921 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
922 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
922 923
923 vcpu->arch.shadow_msr = MSR_USER64; 924 vcpu->arch.shadow_msr = MSR_USER64;
924 925
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c
index 5b2db38ed86c..99b07125c529 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_64_exports.c
@@ -22,3 +22,4 @@
22 22
23EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); 23EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
24EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); 24EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
25EXPORT_SYMBOL_GPL(kvmppc_rmcall);
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
index 3c0ba5513077..33aef5345f6b 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -95,17 +95,14 @@ kvm_start_entry:
95 ld r3, VCPU_HIGHMEM_HANDLER(r4) 95 ld r3, VCPU_HIGHMEM_HANDLER(r4)
96 std r3, PACA_KVM_VMHANDLER(r13) 96 std r3, PACA_KVM_VMHANDLER(r13)
97 97
98 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
99 std r3, PACA_KVM_RMHANDLER(r13)
100
101kvm_start_lightweight: 98kvm_start_lightweight:
102 99
103 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ 100 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
104 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 101 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
105 102
106 /* Load some guest state in the respective registers */ 103 /* Load some guest state in the respective registers */
107 ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ 104 ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
108 mtctr r3 /* CTR = r3 */ 105 /* will be swapped in by rmcall */
109 106
110 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ 107 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
111 mtlr r3 /* LR = r3 */ 108 mtlr r3 /* LR = r3 */
@@ -131,22 +128,14 @@ kvm_start_lightweight:
131 128
132no_dcbz32_on: 129no_dcbz32_on:
133 130
134 /* This sets the Magic value for the trampoline */ 131 ld r6, VCPU_RMCALL(r4)
135 132 mtctr r6
136 /* XXX this needs to move into a safe function, so we can
137 be sure we don't get any interrupts */
138
139 li r11, 1
140 stb r11, PACA_KVM_IN_GUEST(r13)
141
142 ld r3, PACA_KVM_RMHANDLER(r13)
143 mtsrr0 r3
144 133
145 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 134 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
146 mtsrr1 r3 135 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
147 136
148 /* Jump to SLB patching handlder and into our guest */ 137 /* Jump to SLB patching handlder and into our guest */
149 RFI 138 bctr
150 139
151/* 140/*
152 * This is the handler in module memory. It gets jumped at from the 141 * This is the handler in module memory. It gets jumped at from the
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
index 9ad1c2645d6f..e7091c9459a8 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S
@@ -140,6 +140,24 @@ kvmppc_handler_lowmem_trampoline:
140 blr 140 blr
141kvmppc_handler_lowmem_trampoline_end: 141kvmppc_handler_lowmem_trampoline_end:
142 142
143/*
144 * Call a function in real mode
145 *
146 * Input Registers:
147 *
148 * R3 = function
149 * R4 = MSR
150 * R5 = CTR
151 *
152 */
153_GLOBAL(kvmppc_rmcall)
154 mtmsr r4 /* Disable relocation, so mtsrr
155 doesn't get interrupted */
156 mtctr r5
157 mtsrr0 r3
158 mtsrr1 r4
159 RFI
160
143.global kvmppc_trampoline_lowmem 161.global kvmppc_trampoline_lowmem
144kvmppc_trampoline_lowmem: 162kvmppc_trampoline_lowmem:
145 .long kvmppc_handler_lowmem_trampoline - _stext 163 .long kvmppc_handler_lowmem_trampoline - _stext
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index d07b88617b2c..35b762722187 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -63,6 +63,10 @@ kvmppc_handler_trampoline_enter:
63 mtsrr0 r9 63 mtsrr0 r9
64 mtsrr1 r10 64 mtsrr1 r10
65 65
66 /* Activate guest mode, so faults get handled by KVM */
67 li r11, KVM_GUEST_MODE_GUEST
68 stb r11, PACA_KVM_IN_GUEST(r13)
69
66 /* Remove LPAR shadow entries */ 70 /* Remove LPAR shadow entries */
67 71
68#if SLB_NUM_BOLTED == 3 72#if SLB_NUM_BOLTED == 3