diff options
Diffstat (limited to 'arch/powerpc/kernel/asm-offsets.c')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 190 |
1 files changed, 144 insertions, 46 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 36e1c8a29be8..54b935f2f5de 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -128,6 +128,7 @@ int main(void) | |||
128 | DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); | 128 | DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); |
129 | /* paca */ | 129 | /* paca */ |
130 | DEFINE(PACA_SIZE, sizeof(struct paca_struct)); | 130 | DEFINE(PACA_SIZE, sizeof(struct paca_struct)); |
131 | DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token)); | ||
131 | DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); | 132 | DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); |
132 | DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); | 133 | DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); |
133 | DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); | 134 | DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); |
@@ -187,7 +188,9 @@ int main(void) | |||
187 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | 188 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); |
188 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | 189 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); |
189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 190 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
191 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); | ||
190 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); | 192 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
193 | DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); | ||
191 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); | 194 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); |
192 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 195 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
193 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | 196 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
@@ -198,11 +201,6 @@ int main(void) | |||
198 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | 201 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); |
199 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | 202 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); |
200 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | 203 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); |
201 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
202 | DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); | ||
203 | DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb)); | ||
204 | DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max)); | ||
205 | #endif | ||
206 | #endif /* CONFIG_PPC64 */ | 204 | #endif /* CONFIG_PPC64 */ |
207 | 205 | ||
208 | /* RTAS */ | 206 | /* RTAS */ |
@@ -397,67 +395,160 @@ int main(void) | |||
397 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 395 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
398 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 396 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
399 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); | 397 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); |
398 | DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); | ||
399 | DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); | ||
400 | #ifdef CONFIG_ALTIVEC | ||
401 | DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); | ||
402 | DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); | ||
403 | #endif | ||
404 | #ifdef CONFIG_VSX | ||
405 | DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr)); | ||
406 | #endif | ||
407 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | ||
408 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); | ||
409 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | ||
410 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | ||
411 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); | ||
412 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
413 | DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); | ||
414 | DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); | ||
415 | DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); | ||
416 | DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); | ||
417 | DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); | ||
418 | DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); | ||
419 | DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); | ||
420 | #endif | ||
400 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); | 421 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); |
401 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); | 422 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); |
402 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); | 423 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); |
403 | DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); | 424 | DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); |
404 | DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); | 425 | DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); |
426 | DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); | ||
405 | DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); | 427 | DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); |
406 | DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); | 428 | DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); |
429 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); | ||
407 | 430 | ||
408 | /* book3s */ | 431 | /* book3s */ |
432 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
433 | DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); | ||
434 | DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); | ||
435 | DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); | ||
436 | DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); | ||
437 | DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); | ||
438 | DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); | ||
439 | DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); | ||
440 | DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); | ||
441 | DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); | ||
442 | DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); | ||
443 | DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); | ||
444 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); | ||
445 | #endif | ||
409 | #ifdef CONFIG_PPC_BOOK3S | 446 | #ifdef CONFIG_PPC_BOOK3S |
447 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
448 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); | ||
410 | DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); | 449 | DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); |
411 | DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); | 450 | DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); |
412 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); | 451 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); |
452 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); | ||
453 | DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); | ||
454 | DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); | ||
455 | DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); | ||
456 | DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); | ||
457 | DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); | ||
413 | DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); | 458 | DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); |
414 | DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); | 459 | DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); |
415 | DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); | 460 | DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); |
416 | DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); | 461 | DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); |
417 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); | 462 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); |
463 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); | ||
464 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); | ||
465 | DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); | ||
466 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa)); | ||
467 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); | ||
468 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); | ||
469 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); | ||
470 | DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); | ||
471 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); | ||
472 | DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu)); | ||
473 | DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); | ||
474 | DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); | ||
475 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); | ||
476 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); | ||
477 | DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); | ||
478 | DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); | ||
479 | DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); | ||
480 | DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); | ||
418 | DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - | 481 | DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - |
419 | offsetof(struct kvmppc_vcpu_book3s, vcpu)); | 482 | offsetof(struct kvmppc_vcpu_book3s, vcpu)); |
420 | DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); | 483 | DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); |
421 | DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); | 484 | DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); |
422 | DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); | 485 | DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); |
423 | DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); | 486 | |
424 | DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); | 487 | #ifdef CONFIG_PPC_BOOK3S_64 |
425 | DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); | 488 | #ifdef CONFIG_KVM_BOOK3S_PR |
426 | DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); | 489 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) |
427 | DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); | 490 | #else |
428 | DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); | 491 | # define SVCPU_FIELD(x, f) |
429 | DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); | 492 | #endif |
430 | DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); | 493 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) |
431 | DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); | 494 | #else /* 32-bit */ |
432 | DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); | 495 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) |
433 | DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); | 496 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f)) |
434 | DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); | 497 | #endif |
435 | DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); | 498 | |
436 | DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); | 499 | SVCPU_FIELD(SVCPU_CR, cr); |
437 | DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); | 500 | SVCPU_FIELD(SVCPU_XER, xer); |
438 | DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); | 501 | SVCPU_FIELD(SVCPU_CTR, ctr); |
439 | DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); | 502 | SVCPU_FIELD(SVCPU_LR, lr); |
440 | DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); | 503 | SVCPU_FIELD(SVCPU_PC, pc); |
441 | DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, | 504 | SVCPU_FIELD(SVCPU_R0, gpr[0]); |
442 | vmhandler)); | 505 | SVCPU_FIELD(SVCPU_R1, gpr[1]); |
443 | DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, | 506 | SVCPU_FIELD(SVCPU_R2, gpr[2]); |
444 | scratch0)); | 507 | SVCPU_FIELD(SVCPU_R3, gpr[3]); |
445 | DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, | 508 | SVCPU_FIELD(SVCPU_R4, gpr[4]); |
446 | scratch1)); | 509 | SVCPU_FIELD(SVCPU_R5, gpr[5]); |
447 | DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, | 510 | SVCPU_FIELD(SVCPU_R6, gpr[6]); |
448 | in_guest)); | 511 | SVCPU_FIELD(SVCPU_R7, gpr[7]); |
449 | DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, | 512 | SVCPU_FIELD(SVCPU_R8, gpr[8]); |
450 | fault_dsisr)); | 513 | SVCPU_FIELD(SVCPU_R9, gpr[9]); |
451 | DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu, | 514 | SVCPU_FIELD(SVCPU_R10, gpr[10]); |
452 | fault_dar)); | 515 | SVCPU_FIELD(SVCPU_R11, gpr[11]); |
453 | DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu, | 516 | SVCPU_FIELD(SVCPU_R12, gpr[12]); |
454 | last_inst)); | 517 | SVCPU_FIELD(SVCPU_R13, gpr[13]); |
455 | DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu, | 518 | SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr); |
456 | shadow_srr1)); | 519 | SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar); |
520 | SVCPU_FIELD(SVCPU_LAST_INST, last_inst); | ||
521 | SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1); | ||
457 | #ifdef CONFIG_PPC_BOOK3S_32 | 522 | #ifdef CONFIG_PPC_BOOK3S_32 |
458 | DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); | 523 | SVCPU_FIELD(SVCPU_SR, sr); |
459 | #endif | 524 | #endif |
460 | #else | 525 | #ifdef CONFIG_PPC64 |
526 | SVCPU_FIELD(SVCPU_SLB, slb); | ||
527 | SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); | ||
528 | #endif | ||
529 | |||
530 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); | ||
531 | HSTATE_FIELD(HSTATE_HOST_R2, host_r2); | ||
532 | HSTATE_FIELD(HSTATE_HOST_MSR, host_msr); | ||
533 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | ||
534 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | ||
535 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | ||
536 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | ||
537 | |||
538 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
539 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); | ||
540 | HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); | ||
541 | HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); | ||
542 | HSTATE_FIELD(HSTATE_MMCR, host_mmcr); | ||
543 | HSTATE_FIELD(HSTATE_PMC, host_pmc); | ||
544 | HSTATE_FIELD(HSTATE_PURR, host_purr); | ||
545 | HSTATE_FIELD(HSTATE_SPURR, host_spurr); | ||
546 | HSTATE_FIELD(HSTATE_DSCR, host_dscr); | ||
547 | HSTATE_FIELD(HSTATE_DABR, dabr); | ||
548 | HSTATE_FIELD(HSTATE_DECEXP, dec_expires); | ||
549 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | ||
550 | |||
551 | #else /* CONFIG_PPC_BOOK3S */ | ||
461 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 552 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
462 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | 553 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); |
463 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | 554 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
@@ -467,7 +558,7 @@ int main(void) | |||
467 | DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); | 558 | DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); |
468 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); | 559 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); |
469 | #endif /* CONFIG_PPC_BOOK3S */ | 560 | #endif /* CONFIG_PPC_BOOK3S */ |
470 | #endif | 561 | #endif /* CONFIG_KVM */ |
471 | 562 | ||
472 | #ifdef CONFIG_KVM_GUEST | 563 | #ifdef CONFIG_KVM_GUEST |
473 | DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, | 564 | DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, |
@@ -497,6 +588,13 @@ int main(void) | |||
497 | DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); | 588 | DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); |
498 | #endif | 589 | #endif |
499 | 590 | ||
591 | #if defined(CONFIG_KVM) && defined(CONFIG_SPE) | ||
592 | DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); | ||
593 | DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); | ||
594 | DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); | ||
595 | DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); | ||
596 | #endif | ||
597 | |||
500 | #ifdef CONFIG_KVM_EXIT_TIMING | 598 | #ifdef CONFIG_KVM_EXIT_TIMING |
501 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, | 599 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, |
502 | arch.timing_exit.tv32.tbu)); | 600 | arch.timing_exit.tv32.tbu)); |