aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/powerpc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r--arch/powerpc/kvm/powerpc.c130
1 files changed, 16 insertions, 114 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 8bef0efcdfe1..2822c8ccfaaf 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -28,9 +28,9 @@
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31#include "timing.h"
31#include "../mm/mmu_decl.h" 32#include "../mm/mmu_decl.h"
32 33
33
34gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 34gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
35{ 35{
36 return gfn; 36 return gfn;
@@ -99,14 +99,7 @@ void kvm_arch_hardware_unsetup(void)
99 99
100void kvm_arch_check_processor_compat(void *rtn) 100void kvm_arch_check_processor_compat(void *rtn)
101{ 101{
102 int r; 102 *(int *)rtn = kvmppc_core_check_processor_compat();
103
104 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
105 r = 0;
106 else
107 r = -ENOTSUPP;
108
109 *(int *)rtn = r;
110} 103}
111 104
112struct kvm *kvm_arch_create_vm(void) 105struct kvm *kvm_arch_create_vm(void)
@@ -144,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext)
144 int r; 137 int r;
145 138
146 switch (ext) { 139 switch (ext) {
147 case KVM_CAP_USER_MEMORY:
148 r = 1;
149 break;
150 case KVM_CAP_COALESCED_MMIO: 140 case KVM_CAP_COALESCED_MMIO:
151 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 141 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
152 break; 142 break;
@@ -179,30 +169,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
179struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 169struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
180{ 170{
181 struct kvm_vcpu *vcpu; 171 struct kvm_vcpu *vcpu;
182 int err; 172 vcpu = kvmppc_core_vcpu_create(kvm, id);
183 173 kvmppc_create_vcpu_debugfs(vcpu, id);
184 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
185 if (!vcpu) {
186 err = -ENOMEM;
187 goto out;
188 }
189
190 err = kvm_vcpu_init(vcpu, kvm, id);
191 if (err)
192 goto free_vcpu;
193
194 return vcpu; 174 return vcpu;
195
196free_vcpu:
197 kmem_cache_free(kvm_vcpu_cache, vcpu);
198out:
199 return ERR_PTR(err);
200} 175}
201 176
202void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 177void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
203{ 178{
204 kvm_vcpu_uninit(vcpu); 179 kvmppc_remove_vcpu_debugfs(vcpu);
205 kmem_cache_free(kvm_vcpu_cache, vcpu); 180 kvmppc_core_vcpu_free(vcpu);
206} 181}
207 182
208void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 183void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -212,16 +187,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
212 187
213int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 188int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
214{ 189{
215 unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; 190 return kvmppc_core_pending_dec(vcpu);
216
217 return test_bit(priority, &vcpu->arch.pending_exceptions);
218} 191}
219 192
220static void kvmppc_decrementer_func(unsigned long data) 193static void kvmppc_decrementer_func(unsigned long data)
221{ 194{
222 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 195 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
223 196
224 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); 197 kvmppc_core_queue_dec(vcpu);
225 198
226 if (waitqueue_active(&vcpu->wq)) { 199 if (waitqueue_active(&vcpu->wq)) {
227 wake_up_interruptible(&vcpu->wq); 200 wake_up_interruptible(&vcpu->wq);
@@ -242,96 +215,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
242 kvmppc_core_destroy_mmu(vcpu); 215 kvmppc_core_destroy_mmu(vcpu);
243} 216}
244 217
245/* Note: clearing MSR[DE] just means that the debug interrupt will not be
246 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
247 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
248 * will be delivered as an "imprecise debug event" (which is indicated by
249 * DBSR[IDE].
250 */
251static void kvmppc_disable_debug_interrupts(void)
252{
253 mtmsr(mfmsr() & ~MSR_DE);
254}
255
256static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
257{
258 kvmppc_disable_debug_interrupts();
259
260 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
261 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
262 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
263 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
264 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
265 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
266 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
267 mtmsr(vcpu->arch.host_msr);
268}
269
270static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
271{
272 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
273 u32 dbcr0 = 0;
274
275 vcpu->arch.host_msr = mfmsr();
276 kvmppc_disable_debug_interrupts();
277
278 /* Save host debug register state. */
279 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
280 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
281 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
282 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
283 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
284 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
285 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
286
287 /* set registers up for guest */
288
289 if (dbg->bp[0]) {
290 mtspr(SPRN_IAC1, dbg->bp[0]);
291 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
292 }
293 if (dbg->bp[1]) {
294 mtspr(SPRN_IAC2, dbg->bp[1]);
295 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
296 }
297 if (dbg->bp[2]) {
298 mtspr(SPRN_IAC3, dbg->bp[2]);
299 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
300 }
301 if (dbg->bp[3]) {
302 mtspr(SPRN_IAC4, dbg->bp[3]);
303 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
304 }
305
306 mtspr(SPRN_DBCR0, dbcr0);
307 mtspr(SPRN_DBCR1, 0);
308 mtspr(SPRN_DBCR2, 0);
309}
310
311void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 218void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
312{ 219{
313 int i;
314
315 if (vcpu->guest_debug.enabled) 220 if (vcpu->guest_debug.enabled)
316 kvmppc_load_guest_debug_registers(vcpu); 221 kvmppc_core_load_guest_debugstate(vcpu);
317 222
318 /* Mark every guest entry in the shadow TLB entry modified, so that they 223 kvmppc_core_vcpu_load(vcpu, cpu);
319 * will all be reloaded on the next vcpu run (instead of being
320 * demand-faulted). */
321 for (i = 0; i <= tlb_44x_hwater; i++)
322 kvmppc_tlbe_set_modified(vcpu, i);
323} 224}
324 225
325void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 226void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
326{ 227{
327 if (vcpu->guest_debug.enabled) 228 if (vcpu->guest_debug.enabled)
328 kvmppc_restore_host_debug_state(vcpu); 229 kvmppc_core_load_host_debugstate(vcpu);
329 230
330 /* Don't leave guest TLB entries resident when being de-scheduled. */ 231 /* Don't leave guest TLB entries resident when being de-scheduled. */
331 /* XXX It would be nice to differentiate between heavyweight exit and 232 /* XXX It would be nice to differentiate between heavyweight exit and
332 * sched_out here, since we could avoid the TLB flush for heavyweight 233 * sched_out here, since we could avoid the TLB flush for heavyweight
333 * exits. */ 234 * exits. */
334 _tlbil_all(); 235 _tlbil_all();
236 kvmppc_core_vcpu_put(vcpu);
335} 237}
336 238
337int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 239int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
@@ -355,14 +257,14 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
355static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 257static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
356 struct kvm_run *run) 258 struct kvm_run *run)
357{ 259{
358 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 260 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
359 *gpr = run->dcr.data; 261 *gpr = run->dcr.data;
360} 262}
361 263
362static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 264static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
363 struct kvm_run *run) 265 struct kvm_run *run)
364{ 266{
365 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 267 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
366 268
367 if (run->mmio.len > sizeof(*gpr)) { 269 if (run->mmio.len > sizeof(*gpr)) {
368 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 270 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -460,7 +362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
460 vcpu->arch.dcr_needed = 0; 362 vcpu->arch.dcr_needed = 0;
461 } 363 }
462 364
463 kvmppc_check_and_deliver_interrupts(vcpu); 365 kvmppc_core_deliver_interrupts(vcpu);
464 366
465 local_irq_disable(); 367 local_irq_disable();
466 kvm_guest_enter(); 368 kvm_guest_enter();
@@ -478,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
478 380
479int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 381int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
480{ 382{
481 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); 383 kvmppc_core_queue_external(vcpu, irq);
482 384
483 if (waitqueue_active(&vcpu->wq)) { 385 if (waitqueue_active(&vcpu->wq)) {
484 wake_up_interruptible(&vcpu->wq); 386 wake_up_interruptible(&vcpu->wq);