aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/asm-offsets.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
commitf080480488028bcc25357f85e8ae54ccc3bb7173 (patch)
tree8fcc943f16d26c795b3b6324b478af2d5a30285d /arch/powerpc/kernel/asm-offsets.c
parenteda670c626a4f53eb8ac5f20d8c10d3f0b54c583 (diff)
parente504c9098ed6acd9e1079c5e10e4910724ad429f (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM changes from Paolo Bonzini: "Here are the 3.13 KVM changes. There was a lot of work on the PPC side: the HV and emulation flavors can now coexist in a single kernel is probably the most interesting change from a user point of view. On the x86 side there are nested virtualization improvements and a few bugfixes. ARM got transparent huge page support, improved overcommit, and support for big endian guests. Finally, there is a new interface to connect KVM with VFIO. This helps with devices that use NoSnoop PCI transactions, letting the driver in the guest execute WBINVD instructions. This includes some nVidia cards on Windows, that fail to start without these patches and the corresponding userspace changes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits) kvm, vmx: Fix lazy FPU on nested guest arm/arm64: KVM: PSCI: propagate caller endianness to the incoming vcpu arm/arm64: KVM: MMIO support for BE guest kvm, cpuid: Fix sparse warning kvm: Delete prototype for non-existent function kvm_check_iopl kvm: Delete prototype for non-existent function complete_pio hung_task: add method to reset detector pvclock: detect watchdog reset at pvclock read kvm: optimize out smp_mb after srcu_read_unlock srcu: API for barrier after srcu read unlock KVM: remove vm mmap method KVM: IOMMU: hva align mapping page size KVM: x86: trace cpuid emulation when called from emulator KVM: emulator: cleanup decode_register_operand() a bit KVM: emulator: check rex prefix inside decode_register() KVM: x86: fix emulation of "movzbl %bpl, %eax" kvm_host: typo fix KVM: x86: emulate SAHF instruction MAINTAINERS: add tree for kvm.git Documentation/kvm: add a 00-INDEX file ...
Diffstat (limited to 'arch/powerpc/kernel/asm-offsets.c')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e60a3697932c..2ea5cc033ec8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -439,7 +439,7 @@ int main(void)
439 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 439 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
440 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 440 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
441 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 441 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
442#ifdef CONFIG_KVM_BOOK3S_64_HV 442#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
443 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); 443 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
444 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); 444 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
445 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); 445 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
@@ -470,7 +470,7 @@ int main(void)
470 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 470 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
471 471
472 /* book3s */ 472 /* book3s */
473#ifdef CONFIG_KVM_BOOK3S_64_HV 473#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
474 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); 474 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
475 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); 475 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
476 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); 476 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -502,6 +502,8 @@ int main(void)
502 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); 502 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
503 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); 503 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
504 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); 504 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
505 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
506 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
505 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); 507 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
506 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); 508 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
507 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); 509 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -511,18 +513,22 @@ int main(void)
511 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); 513 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
512 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); 514 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
513 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); 515 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
516 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
517 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
514 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); 518 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
515 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); 519 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
516 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 520 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
517 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 521 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
518 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 522 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
519 offsetof(struct kvmppc_vcpu_book3s, vcpu)); 523 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
524 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
520 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); 525 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
521 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); 526 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
522 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); 527 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
523 528
524#ifdef CONFIG_PPC_BOOK3S_64 529#ifdef CONFIG_PPC_BOOK3S_64
525#ifdef CONFIG_KVM_BOOK3S_PR 530#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
531 DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
526# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) 532# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
527#else 533#else
528# define SVCPU_FIELD(x, f) 534# define SVCPU_FIELD(x, f)
@@ -574,7 +580,7 @@ int main(void)
574 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 580 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
575 HSTATE_FIELD(HSTATE_NAPPING, napping); 581 HSTATE_FIELD(HSTATE_NAPPING, napping);
576 582
577#ifdef CONFIG_KVM_BOOK3S_64_HV 583#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
578 HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req); 584 HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
579 HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); 585 HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
580 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); 586 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
@@ -590,10 +596,11 @@ int main(void)
590 HSTATE_FIELD(HSTATE_DABR, dabr); 596 HSTATE_FIELD(HSTATE_DABR, dabr);
591 HSTATE_FIELD(HSTATE_DECEXP, dec_expires); 597 HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
592 DEFINE(IPI_PRIORITY, IPI_PRIORITY); 598 DEFINE(IPI_PRIORITY, IPI_PRIORITY);
593#endif /* CONFIG_KVM_BOOK3S_64_HV */ 599#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
594 600
595#ifdef CONFIG_PPC_BOOK3S_64 601#ifdef CONFIG_PPC_BOOK3S_64
596 HSTATE_FIELD(HSTATE_CFAR, cfar); 602 HSTATE_FIELD(HSTATE_CFAR, cfar);
603 HSTATE_FIELD(HSTATE_PPR, ppr);
597#endif /* CONFIG_PPC_BOOK3S_64 */ 604#endif /* CONFIG_PPC_BOOK3S_64 */
598 605
599#else /* CONFIG_PPC_BOOK3S */ 606#else /* CONFIG_PPC_BOOK3S */