diff options
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
| -rw-r--r-- | arch/powerpc/kvm/powerpc.c | 99 |
1 files changed, 98 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 53826a5f6c06..90a6fc422b23 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <asm/cputable.h> | 27 | #include <asm/cputable.h> |
| 28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
| 29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
| 30 | #include <asm/tlbflush.h> | ||
| 30 | 31 | ||
| 31 | 32 | ||
| 32 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 33 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
| @@ -239,18 +240,114 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
| 239 | { | 240 | { |
| 240 | } | 241 | } |
| 241 | 242 | ||
| 243 | /* Note: clearing MSR[DE] just means that the debug interrupt will not be | ||
| 244 | * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. | ||
| 245 | * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt | ||
| 246 | * will be delivered as an "imprecise debug event" (which is indicated by | ||
| 247 | * DBSR[IDE]. | ||
| 248 | */ | ||
| 249 | static void kvmppc_disable_debug_interrupts(void) | ||
| 250 | { | ||
| 251 | mtmsr(mfmsr() & ~MSR_DE); | ||
| 252 | } | ||
| 253 | |||
| 254 | static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu) | ||
| 255 | { | ||
| 256 | kvmppc_disable_debug_interrupts(); | ||
| 257 | |||
| 258 | mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); | ||
| 259 | mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); | ||
| 260 | mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); | ||
| 261 | mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); | ||
| 262 | mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); | ||
| 263 | mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); | ||
| 264 | mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); | ||
| 265 | mtmsr(vcpu->arch.host_msr); | ||
| 266 | } | ||
| 267 | |||
| 268 | static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu) | ||
| 269 | { | ||
| 270 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
| 271 | u32 dbcr0 = 0; | ||
| 272 | |||
| 273 | vcpu->arch.host_msr = mfmsr(); | ||
| 274 | kvmppc_disable_debug_interrupts(); | ||
| 275 | |||
| 276 | /* Save host debug register state. */ | ||
| 277 | vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); | ||
| 278 | vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); | ||
| 279 | vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); | ||
| 280 | vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); | ||
| 281 | vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); | ||
| 282 | vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); | ||
| 283 | vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); | ||
| 284 | |||
| 285 | /* set registers up for guest */ | ||
| 286 | |||
| 287 | if (dbg->bp[0]) { | ||
| 288 | mtspr(SPRN_IAC1, dbg->bp[0]); | ||
| 289 | dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; | ||
| 290 | } | ||
| 291 | if (dbg->bp[1]) { | ||
| 292 | mtspr(SPRN_IAC2, dbg->bp[1]); | ||
| 293 | dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; | ||
| 294 | } | ||
| 295 | if (dbg->bp[2]) { | ||
| 296 | mtspr(SPRN_IAC3, dbg->bp[2]); | ||
| 297 | dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; | ||
| 298 | } | ||
| 299 | if (dbg->bp[3]) { | ||
| 300 | mtspr(SPRN_IAC4, dbg->bp[3]); | ||
| 301 | dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; | ||
| 302 | } | ||
| 303 | |||
| 304 | mtspr(SPRN_DBCR0, dbcr0); | ||
| 305 | mtspr(SPRN_DBCR1, 0); | ||
| 306 | mtspr(SPRN_DBCR2, 0); | ||
| 307 | } | ||
| 308 | |||
| 242 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 309 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 243 | { | 310 | { |
| 311 | int i; | ||
| 312 | |||
| 313 | if (vcpu->guest_debug.enabled) | ||
| 314 | kvmppc_load_guest_debug_registers(vcpu); | ||
| 315 | |||
| 316 | /* Mark every guest entry in the shadow TLB entry modified, so that they | ||
| 317 | * will all be reloaded on the next vcpu run (instead of being | ||
| 318 | * demand-faulted). */ | ||
| 319 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
| 320 | kvmppc_tlbe_set_modified(vcpu, i); | ||
| 244 | } | 321 | } |
| 245 | 322 | ||
| 246 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 323 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 247 | { | 324 | { |
| 325 | if (vcpu->guest_debug.enabled) | ||
| 326 | kvmppc_restore_host_debug_state(vcpu); | ||
| 327 | |||
| 328 | /* Don't leave guest TLB entries resident when being de-scheduled. */ | ||
| 329 | /* XXX It would be nice to differentiate between heavyweight exit and | ||
| 330 | * sched_out here, since we could avoid the TLB flush for heavyweight | ||
| 331 | * exits. */ | ||
| 332 | _tlbia(); | ||
| 248 | } | 333 | } |
| 249 | 334 | ||
| 250 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 335 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |
| 251 | struct kvm_debug_guest *dbg) | 336 | struct kvm_debug_guest *dbg) |
| 252 | { | 337 | { |
| 253 | return -ENOTSUPP; | 338 | int i; |
| 339 | |||
| 340 | vcpu->guest_debug.enabled = dbg->enabled; | ||
| 341 | if (vcpu->guest_debug.enabled) { | ||
| 342 | for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) { | ||
| 343 | if (dbg->breakpoints[i].enabled) | ||
| 344 | vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; | ||
| 345 | else | ||
| 346 | vcpu->guest_debug.bp[i] = 0; | ||
| 347 | } | ||
| 348 | } | ||
| 349 | |||
| 350 | return 0; | ||
| 254 | } | 351 | } |
| 255 | 352 | ||
| 256 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | 353 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, |
