aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:55:22 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:58:11 -0400
commitcc568ead3ce8e0284e7e2cc77bd1dafb03ba4ca1 (patch)
tree6525ab90e70f0e0736e9bc050f66645ca373c802
parent5d5768660539b6d0da0d46113ffb0676540579a6 (diff)
parent8e6afa36e754be84b468d7df9e5aa71cf4003f3b (diff)
Merge tag 'signed-kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm
Patch queue for ppc - 2014-08-01 Highlights in this release include: - BookE: Rework instruction fetch, not racy anymore now - BookE HV: Fix ONE_REG accessors for some in-hardware registers - Book3S: Good number of LE host fixes, enable HV on LE - Book3S: Some misc bug fixes - Book3S HV: Add in-guest debug support - Book3S HV: Preload cache lines on context switch - Remove 440 support Alexander Graf (31): KVM: PPC: Book3s PR: Disable AIL mode with OPAL KVM: PPC: Book3s HV: Fix tlbie compile error KVM: PPC: Book3S PR: Handle hyp doorbell exits KVM: PPC: Book3S PR: Fix ABIv2 on LE KVM: PPC: Book3S PR: Fix sparse endian checks PPC: Add asm helpers for BE 32bit load/store KVM: PPC: Book3S HV: Make HTAB code LE host aware KVM: PPC: Book3S HV: Access guest VPA in BE KVM: PPC: Book3S HV: Access host lppaca and shadow slb in BE KVM: PPC: Book3S HV: Access XICS in BE KVM: PPC: Book3S HV: Fix ABIv2 on LE KVM: PPC: Book3S HV: Enable for little endian hosts KVM: PPC: Book3S: Move vcore definition to end of kvm_arch struct KVM: PPC: Deflect page write faults properly in kvmppc_st KVM: PPC: Book3S: Stop PTE lookup on write errors KVM: PPC: Book3S: Add hack for split real mode KVM: PPC: Book3S: Make magic page properly 4k mappable KVM: PPC: Remove 440 support KVM: Rename and add argument to check_extension KVM: Allow KVM_CHECK_EXTENSION on the vm fd KVM: PPC: Book3S: Provide different CAPs based on HV or PR mode KVM: PPC: Implement kvmppc_xlate for all targets KVM: PPC: Move kvmppc_ld/st to common code KVM: PPC: Remove kvmppc_bad_hva() KVM: PPC: Use kvm_read_guest in kvmppc_ld KVM: PPC: Handle magic page in kvmppc_ld/st KVM: PPC: Separate loadstore emulation from priv emulation KVM: PPC: Expose helper functions for data/inst faults KVM: PPC: Remove DCR handling KVM: PPC: HV: Remove generic instruction emulation KVM: PPC: PR: Handle FSCR feature deselects Alexey Kardashevskiy (1): KVM: PPC: Book3S: Fix LPCR one_reg interface Aneesh Kumar K.V (4): KVM: PPC: BOOK3S: PR: Fix PURR and SPURR emulation KVM: PPC: BOOK3S: PR: Emulate virtual timebase register KVM: PPC: BOOK3S: PR: Emulate instruction counter KVM: PPC: BOOK3S: HV: Update compute_tlbie_rb to handle 16MB base page Anton Blanchard (2): KVM: PPC: Book3S HV: Fix ABIv2 indirect branch issue KVM: PPC: Assembly functions exported to modules need _GLOBAL_TOC() Bharat Bhushan (10): kvm: ppc: bookehv: Added wrapper macros for shadow registers kvm: ppc: booke: Use the shared struct helpers of SRR0 and SRR1 kvm: ppc: booke: Use the shared struct helpers of SPRN_DEAR kvm: ppc: booke: Add shared struct helpers of SPRN_ESR kvm: ppc: booke: Use the shared struct helpers for SPRN_SPRG0-7 kvm: ppc: Add SPRN_EPR get helper function kvm: ppc: bookehv: Save restore SPRN_SPRG9 on guest entry exit KVM: PPC: Booke-hv: Add one reg interface for SPRG9 KVM: PPC: Remove comment saying SPRG1 is used for vcpu pointer KVM: PPC: BOOKEHV: rename e500hv_spr to bookehv_spr Michael Neuling (1): KVM: PPC: Book3S HV: Add H_SET_MODE hcall handling Mihai Caraman (8): KVM: PPC: e500mc: Enhance tlb invalidation condition on vcpu schedule KVM: PPC: e500: Fix default tlb for victim hint KVM: PPC: e500: Emulate power management control SPR KVM: PPC: e500mc: Revert "add load inst fixup" KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1 KVM: PPC: Book3s: Remove kvmppc_read_inst() function KVM: PPC: Allow kvmppc_get_last_inst() to fail KVM: PPC: Bookehv: Get vcpu's last instruction for emulation Paul Mackerras (4): KVM: PPC: Book3S: Controls for in-kernel sPAPR hypercall handling KVM: PPC: Book3S: Allow only implemented hcalls to be enabled or disabled KVM: PPC: Book3S PR: Take SRCU read lock around RTAS kvm_read_guest() call KVM: PPC: Book3S: Make kvmppc_ld return a more accurate error indication Stewart Smith (2): Split out struct kvmppc_vcore creation to separate function Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8 Conflicts: Documentation/virtual/kvm/api.txt
-rw-r--r--Documentation/powerpc/00-INDEX2
-rw-r--r--Documentation/powerpc/kvm_440.txt41
-rw-r--r--Documentation/virtual/kvm/api.txt60
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/powerpc/Kconfig.debug4
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h4
-rw-r--r--arch/powerpc/include/asm/cache.h7
-rw-r--r--arch/powerpc/include/asm/hvcall.h6
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h67
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h51
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h29
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h15
-rw-r--r--arch/powerpc/include/asm/kvm_host.h28
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h116
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h8
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h17
-rw-r--r--arch/powerpc/include/asm/reg.h13
-rw-r--r--arch/powerpc/include/asm/time.h9
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kvm/44x.c237
-rw-r--r--arch/powerpc/kvm/44x_emulate.c194
-rw-r--r--arch/powerpc/kvm/44x_tlb.c528
-rw-r--r--arch/powerpc/kvm/44x_tlb.h86
-rw-r--r--arch/powerpc/kvm/Kconfig17
-rw-r--r--arch/powerpc/kvm/Makefile18
-rw-r--r--arch/powerpc/kvm/book3s.c156
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c145
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c28
-rw-r--r--arch/powerpc/kvm/book3s_hv.c271
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c146
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S70
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c38
-rw-r--r--arch/powerpc/kvm/book3s_pr.c223
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c92
-rw-r--r--arch/powerpc/kvm/booke.c225
-rw-r--r--arch/powerpc/kvm/booke.h7
-rw-r--r--arch/powerpc/kvm/booke_emulate.c8
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S5
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S60
-rw-r--r--arch/powerpc/kvm/e500_emulate.c12
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c102
-rw-r--r--arch/powerpc/kvm/e500mc.c28
-rw-r--r--arch/powerpc/kvm/emulate.c206
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c272
-rw-r--r--arch/powerpc/kvm/powerpc.c179
-rw-r--r--arch/powerpc/kvm/timing.c1
-rw-r--r--arch/powerpc/kvm/timing.h3
-rw-r--r--arch/powerpc/kvm/trace_pr.h20
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/uapi/linux/kvm.h6
-rw-r--r--virt/kvm/kvm_main.c60
63 files changed, 1894 insertions, 2078 deletions
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
index 6db73df04278..a68784d0a1ee 100644
--- a/Documentation/powerpc/00-INDEX
+++ b/Documentation/powerpc/00-INDEX
@@ -17,8 +17,6 @@ firmware-assisted-dump.txt
17 - Documentation on the firmware assisted dump mechanism "fadump". 17 - Documentation on the firmware assisted dump mechanism "fadump".
18hvcs.txt 18hvcs.txt
19 - IBM "Hypervisor Virtual Console Server" Installation Guide 19 - IBM "Hypervisor Virtual Console Server" Installation Guide
20kvm_440.txt
21 - Various notes on the implementation of KVM for PowerPC 440.
22mpc52xx.txt 20mpc52xx.txt
23 - Linux 2.6.x on MPC52xx family 21 - Linux 2.6.x on MPC52xx family
24pmu-ebb.txt 22pmu-ebb.txt
diff --git a/Documentation/powerpc/kvm_440.txt b/Documentation/powerpc/kvm_440.txt
deleted file mode 100644
index c02a003fa03a..000000000000
--- a/Documentation/powerpc/kvm_440.txt
+++ /dev/null
@@ -1,41 +0,0 @@
1Hollis Blanchard <hollisb@us.ibm.com>
215 Apr 2008
3
4Various notes on the implementation of KVM for PowerPC 440:
5
6To enforce isolation, host userspace, guest kernel, and guest userspace all
7run at user privilege level. Only the host kernel runs in supervisor mode.
8Executing privileged instructions in the guest traps into KVM (in the host
9kernel), where we decode and emulate them. Through this technique, unmodified
10440 Linux kernels can be run (slowly) as guests. Future performance work will
11focus on reducing the overhead and frequency of these traps.
12
13The usual code flow is started from userspace invoking an "run" ioctl, which
14causes KVM to switch into guest context. We use IVPR to hijack the host
15interrupt vectors while running the guest, which allows us to direct all
16interrupts to kvmppc_handle_interrupt(). At this point, we could either
17- handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or
18- let the host interrupt handler run (e.g. when the decrementer fires), or
19- return to host userspace (e.g. when the guest performs device MMIO)
20
21Address spaces: We take advantage of the fact that Linux doesn't use the AS=1
22address space (in host or guest), which gives us virtual address space to use
23for guest mappings. While the guest is running, the host kernel remains mapped
24in AS=0, but the guest can only use AS=1 mappings.
25
26TLB entries: The TLB entries covering the host linear mapping remain
27present while running the guest. This reduces the overhead of lightweight
28exits, which are handled by KVM running in the host kernel. We keep three
29copies of the TLB:
30 - guest TLB: contents of the TLB as the guest sees it
31 - shadow TLB: the TLB that is actually in hardware while guest is running
32 - host TLB: to restore TLB state when context switching guest -> host
33When a TLB miss occurs because a mapping was not present in the shadow TLB,
34but was present in the guest TLB, KVM handles the fault without invoking the
35guest. Large guest pages are backed by multiple 4KB shadow pages through this
36mechanism.
37
38IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network
39and block IO, so those drivers must be enabled in the guest. It's possible
40that some qemu device emulation (e.g. e1000 or rtl8139) may also work with
41little effort.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 68cda1fc3d52..beae3fde075e 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -148,9 +148,9 @@ of banks, as set via the KVM_X86_SETUP_MCE ioctl.
148 148
1494.4 KVM_CHECK_EXTENSION 1494.4 KVM_CHECK_EXTENSION
150 150
151Capability: basic 151Capability: basic, KVM_CAP_CHECK_EXTENSION_VM for vm ioctl
152Architectures: all 152Architectures: all
153Type: system ioctl 153Type: system ioctl, vm ioctl
154Parameters: extension identifier (KVM_CAP_*) 154Parameters: extension identifier (KVM_CAP_*)
155Returns: 0 if unsupported; 1 (or some other positive integer) if supported 155Returns: 0 if unsupported; 1 (or some other positive integer) if supported
156 156
@@ -160,6 +160,9 @@ receives an integer that describes the extension availability.
160Generally 0 means no and 1 means yes, but some extensions may report 160Generally 0 means no and 1 means yes, but some extensions may report
161additional information in the integer return value. 161additional information in the integer return value.
162 162
163Based on their initialization different VMs may have different capabilities.
164It is thus encouraged to use the vm ioctl to query for capabilities (available
165with KVM_CAP_CHECK_EXTENSION_VM on the vm fd)
163 166
1644.5 KVM_GET_VCPU_MMAP_SIZE 1674.5 KVM_GET_VCPU_MMAP_SIZE
165 168
@@ -1892,7 +1895,8 @@ registers, find a list below:
1892 PPC | KVM_REG_PPC_PID | 64 1895 PPC | KVM_REG_PPC_PID | 64
1893 PPC | KVM_REG_PPC_ACOP | 64 1896 PPC | KVM_REG_PPC_ACOP | 64
1894 PPC | KVM_REG_PPC_VRSAVE | 32 1897 PPC | KVM_REG_PPC_VRSAVE | 32
1895 PPC | KVM_REG_PPC_LPCR | 64 1898 PPC | KVM_REG_PPC_LPCR | 32
1899 PPC | KVM_REG_PPC_LPCR_64 | 64
1896 PPC | KVM_REG_PPC_PPR | 64 1900 PPC | KVM_REG_PPC_PPR | 64
1897 PPC | KVM_REG_PPC_ARCH_COMPAT | 32 1901 PPC | KVM_REG_PPC_ARCH_COMPAT | 32
1898 PPC | KVM_REG_PPC_DABRX | 32 1902 PPC | KVM_REG_PPC_DABRX | 32
@@ -2677,8 +2681,8 @@ The 'data' member contains, in its first 'len' bytes, the value as it would
2677appear if the VCPU performed a load or store of the appropriate width directly 2681appear if the VCPU performed a load or store of the appropriate width directly
2678to the byte array. 2682to the byte array.
2679 2683
2680NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR, 2684NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR and
2681 KVM_EXIT_PAPR and KVM_EXIT_EPR the corresponding 2685 KVM_EXIT_EPR the corresponding
2682operations are complete (and guest state is consistent) only after userspace 2686operations are complete (and guest state is consistent) only after userspace
2683has re-entered the kernel with KVM_RUN. The kernel side will first finish 2687has re-entered the kernel with KVM_RUN. The kernel side will first finish
2684incomplete operations and then check for pending signals. Userspace 2688incomplete operations and then check for pending signals. Userspace
@@ -2749,7 +2753,7 @@ Principles of Operation Book in the Chapter for Dynamic Address Translation
2749 __u8 is_write; 2753 __u8 is_write;
2750 } dcr; 2754 } dcr;
2751 2755
2752powerpc specific. 2756Deprecated - was used for 440 KVM.
2753 2757
2754 /* KVM_EXIT_OSI */ 2758 /* KVM_EXIT_OSI */
2755 struct { 2759 struct {
@@ -2931,8 +2935,8 @@ The fields in each entry are defined as follows:
2931 this function/index combination 2935 this function/index combination
2932 2936
2933 2937
29346. Capabilities that can be enabled 29386. Capabilities that can be enabled on vCPUs
2935----------------------------------- 2939--------------------------------------------
2936 2940
2937There are certain capabilities that change the behavior of the virtual CPU or 2941There are certain capabilities that change the behavior of the virtual CPU or
2938the virtual machine when enabled. To enable them, please see section 4.37. 2942the virtual machine when enabled. To enable them, please see section 4.37.
@@ -3091,3 +3095,43 @@ Parameters: none
3091 3095
3092This capability enables the in-kernel irqchip for s390. Please refer to 3096This capability enables the in-kernel irqchip for s390. Please refer to
3093"4.24 KVM_CREATE_IRQCHIP" for details. 3097"4.24 KVM_CREATE_IRQCHIP" for details.
3098
30997. Capabilities that can be enabled on VMs
3100------------------------------------------
3101
3102There are certain capabilities that change the behavior of the virtual
3103machine when enabled. To enable them, please see section 4.37. Below
3104you can find a list of capabilities and what their effect on the VM
3105is when enabling them.
3106
3107The following information is provided along with the description:
3108
3109 Architectures: which instruction set architectures provide this ioctl.
3110 x86 includes both i386 and x86_64.
3111
3112 Parameters: what parameters are accepted by the capability.
3113
3114 Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
3115 are not detailed, but errors with specific meanings are.
3116
3117
31187.1 KVM_CAP_PPC_ENABLE_HCALL
3119
3120Architectures: ppc
3121Parameters: args[0] is the sPAPR hcall number
3122 args[1] is 0 to disable, 1 to enable in-kernel handling
3123
3124This capability controls whether individual sPAPR hypercalls (hcalls)
3125get handled by the kernel or not. Enabling or disabling in-kernel
3126handling of an hcall is effective across the VM. On creation, an
3127initial set of hcalls are enabled for in-kernel handling, which
3128consists of those hcalls for which in-kernel handlers were implemented
3129before this capability was implemented. If disabled, the kernel will
3130not to attempt to handle the hcall, but will always exit to userspace
3131to handle it. Note that it may not make sense to enable some and
3132disable others of a group of related hcalls, but KVM does not prevent
3133userspace from doing that.
3134
3135If the hcall number specified is not one that has an in-kernel
3136implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL
3137error.
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index d7424ef80354..a99e0cdf8ba2 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -174,7 +174,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
174 } 174 }
175} 175}
176 176
177int kvm_dev_ioctl_check_extension(long ext) 177int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
178{ 178{
179 int r; 179 int r;
180 switch (ext) { 180 switch (ext) {
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 6a4309bb821a..0729ba6acddf 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -190,7 +190,7 @@ void kvm_arch_check_processor_compat(void *rtn)
190 *(int *)rtn = 0; 190 *(int *)rtn = 0;
191} 191}
192 192
193int kvm_dev_ioctl_check_extension(long ext) 193int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
194{ 194{
195 195
196 int r; 196 int r;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 4fda672cb58e..cd7114147ae7 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -886,7 +886,7 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
886 return VM_FAULT_SIGBUS; 886 return VM_FAULT_SIGBUS;
887} 887}
888 888
889int kvm_dev_ioctl_check_extension(long ext) 889int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
890{ 890{
891 int r; 891 int r;
892 892
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 35d16bd2760b..ec2e40f2cc11 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -202,9 +202,7 @@ config PPC_EARLY_DEBUG_BEAT
202 202
203config PPC_EARLY_DEBUG_44x 203config PPC_EARLY_DEBUG_44x
204 bool "Early serial debugging for IBM/AMCC 44x CPUs" 204 bool "Early serial debugging for IBM/AMCC 44x CPUs"
205 # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water 205 depends on 44x
206 # mark, which doesn't work with current 440 KVM.
207 depends on 44x && !KVM
208 help 206 help
209 Select this to enable early debugging for IBM 44x chips via the 207 Select this to enable early debugging for IBM 44x chips via the
210 inbuilt serial port. If you enable this, ensure you set 208 inbuilt serial port. If you enable this, ensure you set
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index ccf66b9060a6..924e10df1844 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -127,4 +127,3 @@ CONFIG_CRYPTO_PCBC=y
127# CONFIG_CRYPTO_ANSI_CPRNG is not set 127# CONFIG_CRYPTO_ANSI_CPRNG is not set
128# CONFIG_CRYPTO_HW is not set 128# CONFIG_CRYPTO_HW is not set
129CONFIG_VIRTUALIZATION=y 129CONFIG_VIRTUALIZATION=y
130CONFIG_KVM_440=y
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 4b237aa35660..21be8ae8f809 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -34,10 +34,14 @@
34#define PPC_MIN_STKFRM 112 34#define PPC_MIN_STKFRM 112
35 35
36#ifdef __BIG_ENDIAN__ 36#ifdef __BIG_ENDIAN__
37#define LWZX_BE stringify_in_c(lwzx)
37#define LDX_BE stringify_in_c(ldx) 38#define LDX_BE stringify_in_c(ldx)
39#define STWX_BE stringify_in_c(stwx)
38#define STDX_BE stringify_in_c(stdx) 40#define STDX_BE stringify_in_c(stdx)
39#else 41#else
42#define LWZX_BE stringify_in_c(lwbrx)
40#define LDX_BE stringify_in_c(ldbrx) 43#define LDX_BE stringify_in_c(ldbrx)
44#define STWX_BE stringify_in_c(stwbrx)
41#define STDX_BE stringify_in_c(stdbrx) 45#define STDX_BE stringify_in_c(stdbrx)
42#endif 46#endif
43 47
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index ed0afc1e44a4..34a05a1a990b 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -3,6 +3,7 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/reg.h>
6 7
7/* bytes per L1 cache line */ 8/* bytes per L1 cache line */
8#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 9#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -39,6 +40,12 @@ struct ppc64_caches {
39}; 40};
40 41
41extern struct ppc64_caches ppc64_caches; 42extern struct ppc64_caches ppc64_caches;
43
44static inline void logmpp(u64 x)
45{
46 asm volatile(PPC_LOGMPP(R1) : : "r" (x));
47}
48
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 49#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43 50
44#if defined(__ASSEMBLY__) 51#if defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 5dbbb29f5c3e..85bc8c0d257b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -279,6 +279,12 @@
279#define H_GET_24X7_DATA 0xF07C 279#define H_GET_24X7_DATA 0xF07C
280#define H_GET_PERF_COUNTER_INFO 0xF080 280#define H_GET_PERF_COUNTER_INFO 0xF080
281 281
282/* Values for 2nd argument to H_SET_MODE */
283#define H_SET_MODE_RESOURCE_SET_CIABR 1
284#define H_SET_MODE_RESOURCE_SET_DAWR 2
285#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
286#define H_SET_MODE_RESOURCE_LE 4
287
282#ifndef __ASSEMBLY__ 288#ifndef __ASSEMBLY__
283 289
284/** 290/**
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
deleted file mode 100644
index a0e57618ff33..000000000000
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __ASM_44X_H__
21#define __ASM_44X_H__
22
23#include <linux/kvm_host.h>
24
25#define PPC44x_TLB_SIZE 64
26
27/* If the guest is expecting it, this can be as large as we like; we'd just
28 * need to find some way of advertising it. */
29#define KVM44x_GUEST_TLB_SIZE 64
30
31struct kvmppc_44x_tlbe {
32 u32 tid; /* Only the low 8 bits are used. */
33 u32 word0;
34 u32 word1;
35 u32 word2;
36};
37
38struct kvmppc_44x_shadow_ref {
39 struct page *page;
40 u16 gtlb_index;
41 u8 writeable;
42 u8 tid;
43};
44
45struct kvmppc_vcpu_44x {
46 /* Unmodified copy of the guest's TLB. */
47 struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
48
49 /* References to guest pages in the hardware TLB. */
50 struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
51
52 /* State of the shadow TLB at guest context switch time. */
53 struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
54 u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
55
56 struct kvm_vcpu vcpu;
57};
58
59static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
60{
61 return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
62}
63
64void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
65void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
66
67#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 9601741080e5..b8901c4a4922 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -33,7 +33,6 @@
33/* IVPR must be 64KiB-aligned. */ 33/* IVPR must be 64KiB-aligned. */
34#define VCPU_SIZE_ORDER 4 34#define VCPU_SIZE_ORDER 4
35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) 35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
36#define VCPU_TLB_PGSZ PPC44x_TLB_64K
37#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) 36#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
38 37
39#define BOOKE_INTERRUPT_CRITICAL 0 38#define BOOKE_INTERRUPT_CRITICAL 0
@@ -131,6 +130,7 @@
131#define BOOK3S_HFLAG_NATIVE_PS 0x8 130#define BOOK3S_HFLAG_NATIVE_PS 0x8
132#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10 131#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
133#define BOOK3S_HFLAG_NEW_TLBIE 0x20 132#define BOOK3S_HFLAG_NEW_TLBIE 0x20
133#define BOOK3S_HFLAG_SPLIT_HACK 0x40
134 134
135#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 135#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
136#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 136#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f52f65694527..6acf0c2a0f99 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -83,8 +83,6 @@ struct kvmppc_vcpu_book3s {
83 u64 sdr1; 83 u64 sdr1;
84 u64 hior; 84 u64 hior;
85 u64 msr_mask; 85 u64 msr_mask;
86 u64 purr_offset;
87 u64 spurr_offset;
88#ifdef CONFIG_PPC_BOOK3S_32 86#ifdef CONFIG_PPC_BOOK3S_32
89 u32 vsid_pool[VSID_POOL_SIZE]; 87 u32 vsid_pool[VSID_POOL_SIZE];
90 u32 vsid_next; 88 u32 vsid_next;
@@ -148,9 +146,10 @@ extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *
148extern int kvmppc_mmu_hpte_sysinit(void); 146extern int kvmppc_mmu_hpte_sysinit(void);
149extern void kvmppc_mmu_hpte_sysexit(void); 147extern void kvmppc_mmu_hpte_sysexit(void);
150extern int kvmppc_mmu_hv_init(void); 148extern int kvmppc_mmu_hv_init(void);
149extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
151 150
151/* XXX remove this export when load_last_inst() is generic */
152extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 152extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
153extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
154extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 153extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
155extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 154extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
156 unsigned int vec); 155 unsigned int vec);
@@ -159,13 +158,13 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
159 bool upper, u32 val); 158 bool upper, u32 val);
160extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 159extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
161extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 160extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
162extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 161extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
163 bool *writable); 162 bool *writable);
164extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 163extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
165 unsigned long *rmap, long pte_index, int realmode); 164 unsigned long *rmap, long pte_index, int realmode);
166extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 165extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
167 unsigned long pte_index); 166 unsigned long pte_index);
168void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, 167void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
169 unsigned long pte_index); 168 unsigned long pte_index);
170extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 169extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
171 unsigned long *nb_ret); 170 unsigned long *nb_ret);
@@ -183,12 +182,16 @@ extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
183 struct kvm_memory_slot *memslot, unsigned long *map); 182 struct kvm_memory_slot *memslot, unsigned long *map);
184extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, 183extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
185 unsigned long mask); 184 unsigned long mask);
185extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
186 186
187extern void kvmppc_entry_trampoline(void); 187extern void kvmppc_entry_trampoline(void);
188extern void kvmppc_hv_entry_trampoline(void); 188extern void kvmppc_hv_entry_trampoline(void);
189extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 189extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
190extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 190extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
191extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 191extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
192extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
193extern int kvmppc_hcall_impl_pr(unsigned long cmd);
194extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
192extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, 195extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
193 struct kvm_vcpu *vcpu); 196 struct kvm_vcpu *vcpu);
194extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 197extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
@@ -274,32 +277,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
274 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); 277 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
275} 278}
276 279
277static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
278{
279 /* Load the instruction manually if it failed to do so in the
280 * exit path */
281 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
282 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
283
284 return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
285 vcpu->arch.last_inst;
286}
287
288static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
289{
290 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
291}
292
293/*
294 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
295 * Because the sc instruction sets SRR0 to point to the following
296 * instruction, we have to fetch from pc - 4.
297 */
298static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
299{
300 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
301}
302
303static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 280static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
304{ 281{
305 return vcpu->arch.fault_dar; 282 return vcpu->arch.fault_dar;
@@ -310,6 +287,13 @@ static inline bool is_kvmppc_resume_guest(int r)
310 return (r == RESUME_GUEST || r == RESUME_GUEST_NV); 287 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
311} 288}
312 289
290static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
291static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
292{
293 /* Only PR KVM supports the magic page */
294 return !is_kvmppc_hv_enabled(vcpu->kvm);
295}
296
313/* Magic register values loaded into r3 and r4 before the 'sc' assembly 297/* Magic register values loaded into r3 and r4 before the 'sc' assembly
314 * instruction for the OSI hypercalls */ 298 * instruction for the OSI hypercalls */
315#define OSI_SC_MAGIC_R3 0x113724FA 299#define OSI_SC_MAGIC_R3 0x113724FA
@@ -322,4 +306,7 @@ static inline bool is_kvmppc_resume_guest(int r)
322/* LPIDs we support with this build -- runtime limit may be lower */ 306/* LPIDs we support with this build -- runtime limit may be lower */
323#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) 307#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
324 308
309#define SPLIT_HACK_MASK 0xff000000
310#define SPLIT_HACK_OFFS 0xfb000000
311
325#endif /* __ASM_KVM_BOOK3S_H__ */ 312#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d645428a65a4..0aa817933e6a 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -59,20 +59,29 @@ extern unsigned long kvm_rma_pages;
59/* These bits are reserved in the guest view of the HPTE */ 59/* These bits are reserved in the guest view of the HPTE */
60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED 60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
61 61
62static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) 62static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
63{ 63{
64 unsigned long tmp, old; 64 unsigned long tmp, old;
65 __be64 be_lockbit, be_bits;
66
67 /*
68 * We load/store in native endian, but the HTAB is in big endian. If
69 * we byte swap all data we apply on the PTE we're implicitly correct
70 * again.
71 */
72 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
73 be_bits = cpu_to_be64(bits);
65 74
66 asm volatile(" ldarx %0,0,%2\n" 75 asm volatile(" ldarx %0,0,%2\n"
67 " and. %1,%0,%3\n" 76 " and. %1,%0,%3\n"
68 " bne 2f\n" 77 " bne 2f\n"
69 " ori %0,%0,%4\n" 78 " or %0,%0,%4\n"
70 " stdcx. %0,0,%2\n" 79 " stdcx. %0,0,%2\n"
71 " beq+ 2f\n" 80 " beq+ 2f\n"
72 " mr %1,%3\n" 81 " mr %1,%3\n"
73 "2: isync" 82 "2: isync"
74 : "=&r" (tmp), "=&r" (old) 83 : "=&r" (tmp), "=&r" (old)
75 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) 84 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
76 : "cc", "memory"); 85 : "cc", "memory");
77 return old == 0; 86 return old == 0;
78} 87}
@@ -110,16 +119,12 @@ static inline int __hpte_actual_psize(unsigned int lp, int psize)
110static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 119static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
111 unsigned long pte_index) 120 unsigned long pte_index)
112{ 121{
113 int b_psize, a_psize; 122 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
114 unsigned int penc; 123 unsigned int penc;
115 unsigned long rb = 0, va_low, sllp; 124 unsigned long rb = 0, va_low, sllp;
116 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1); 125 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
117 126
118 if (!(v & HPTE_V_LARGE)) { 127 if (v & HPTE_V_LARGE) {
119 /* both base and actual psize is 4k */
120 b_psize = MMU_PAGE_4K;
121 a_psize = MMU_PAGE_4K;
122 } else {
123 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) { 128 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
124 129
125 /* valid entries have a shift value */ 130 /* valid entries have a shift value */
@@ -142,6 +147,8 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
142 */ 147 */
143 /* This covers 14..54 bits of va*/ 148 /* This covers 14..54 bits of va*/
144 rb = (v & ~0x7fUL) << 16; /* AVA field */ 149 rb = (v & ~0x7fUL) << 16; /* AVA field */
150
151 rb |= v >> (62 - 8); /* B field */
145 /* 152 /*
146 * AVA in v had cleared lower 23 bits. We need to derive 153 * AVA in v had cleared lower 23 bits. We need to derive
147 * that from pteg index 154 * that from pteg index
@@ -172,10 +179,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
172 { 179 {
173 int aval_shift; 180 int aval_shift;
174 /* 181 /*
175 * remaining 7bits of AVA/LP fields 182 * remaining bits of AVA/LP fields
176 * Also contain the rr bits of LP 183 * Also contain the rr bits of LP
177 */ 184 */
178 rb |= (va_low & 0x7f) << 16; 185 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
179 /* 186 /*
180 * Now clear not needed LP bits based on actual psize 187 * Now clear not needed LP bits based on actual psize
181 */ 188 */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index c7aed6105ff9..f7aa5cc395c4 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
69 return false; 69 return false;
70} 70}
71 71
72static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
73{
74 return vcpu->arch.last_inst;
75}
76
77static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 72static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
78{ 73{
79 vcpu->arch.ctr = val; 74 vcpu->arch.ctr = val;
@@ -108,4 +103,14 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
108{ 103{
109 return vcpu->arch.fault_dear; 104 return vcpu->arch.fault_dear;
110} 105}
106
107static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
108{
109 /* Magic page is only supported on e500v2 */
110#ifdef CONFIG_KVM_E500V2
111 return true;
112#else
113 return false;
114#endif
115}
111#endif /* __ASM_KVM_BOOKE_H__ */ 116#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b8efdf..98d9dd50d063 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#include <asm/hvcall.h>
37 38
38#define KVM_MAX_VCPUS NR_CPUS 39#define KVM_MAX_VCPUS NR_CPUS
39#define KVM_MAX_VCORES NR_CPUS 40#define KVM_MAX_VCORES NR_CPUS
@@ -48,7 +49,6 @@
48#define KVM_NR_IRQCHIPS 1 49#define KVM_NR_IRQCHIPS 1
49#define KVM_IRQCHIP_NUM_PINS 256 50#define KVM_IRQCHIP_NUM_PINS 256
50 51
51#if !defined(CONFIG_KVM_440)
52#include <linux/mmu_notifier.h> 52#include <linux/mmu_notifier.h>
53 53
54#define KVM_ARCH_WANT_MMU_NOTIFIER 54#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -61,8 +61,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
61extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 61extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
62extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 62extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
63 63
64#endif
65
66#define HPTEG_CACHE_NUM (1 << 15) 64#define HPTEG_CACHE_NUM (1 << 15)
67#define HPTEG_HASH_BITS_PTE 13 65#define HPTEG_HASH_BITS_PTE 13
68#define HPTEG_HASH_BITS_PTE_LONG 12 66#define HPTEG_HASH_BITS_PTE_LONG 12
@@ -96,7 +94,6 @@ struct kvm_vm_stat {
96struct kvm_vcpu_stat { 94struct kvm_vcpu_stat {
97 u32 sum_exits; 95 u32 sum_exits;
98 u32 mmio_exits; 96 u32 mmio_exits;
99 u32 dcr_exits;
100 u32 signal_exits; 97 u32 signal_exits;
101 u32 light_exits; 98 u32 light_exits;
102 /* Account for special types of light exits: */ 99 /* Account for special types of light exits: */
@@ -113,22 +110,21 @@ struct kvm_vcpu_stat {
113 u32 halt_wakeup; 110 u32 halt_wakeup;
114 u32 dbell_exits; 111 u32 dbell_exits;
115 u32 gdbell_exits; 112 u32 gdbell_exits;
113 u32 ld;
114 u32 st;
116#ifdef CONFIG_PPC_BOOK3S 115#ifdef CONFIG_PPC_BOOK3S
117 u32 pf_storage; 116 u32 pf_storage;
118 u32 pf_instruc; 117 u32 pf_instruc;
119 u32 sp_storage; 118 u32 sp_storage;
120 u32 sp_instruc; 119 u32 sp_instruc;
121 u32 queue_intr; 120 u32 queue_intr;
122 u32 ld;
123 u32 ld_slow; 121 u32 ld_slow;
124 u32 st;
125 u32 st_slow; 122 u32 st_slow;
126#endif 123#endif
127}; 124};
128 125
129enum kvm_exit_types { 126enum kvm_exit_types {
130 MMIO_EXITS, 127 MMIO_EXITS,
131 DCR_EXITS,
132 SIGNAL_EXITS, 128 SIGNAL_EXITS,
133 ITLB_REAL_MISS_EXITS, 129 ITLB_REAL_MISS_EXITS,
134 ITLB_VIRT_MISS_EXITS, 130 ITLB_VIRT_MISS_EXITS,
@@ -254,7 +250,6 @@ struct kvm_arch {
254 atomic_t hpte_mod_interest; 250 atomic_t hpte_mod_interest;
255 spinlock_t slot_phys_lock; 251 spinlock_t slot_phys_lock;
256 cpumask_t need_tlb_flush; 252 cpumask_t need_tlb_flush;
257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
258 int hpt_cma_alloc; 253 int hpt_cma_alloc;
259#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 254#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
260#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 255#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -263,6 +258,7 @@ struct kvm_arch {
263#ifdef CONFIG_PPC_BOOK3S_64 258#ifdef CONFIG_PPC_BOOK3S_64
264 struct list_head spapr_tce_tables; 259 struct list_head spapr_tce_tables;
265 struct list_head rtas_tokens; 260 struct list_head rtas_tokens;
261 DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
266#endif 262#endif
267#ifdef CONFIG_KVM_MPIC 263#ifdef CONFIG_KVM_MPIC
268 struct openpic *mpic; 264 struct openpic *mpic;
@@ -271,6 +267,10 @@ struct kvm_arch {
271 struct kvmppc_xics *xics; 267 struct kvmppc_xics *xics;
272#endif 268#endif
273 struct kvmppc_ops *kvm_ops; 269 struct kvmppc_ops *kvm_ops;
270#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
271 /* This array can grow quite large, keep it at the end */
272 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
273#endif
274}; 274};
275 275
276/* 276/*
@@ -305,6 +305,8 @@ struct kvmppc_vcore {
305 u32 arch_compat; 305 u32 arch_compat;
306 ulong pcr; 306 ulong pcr;
307 ulong dpdes; /* doorbell state (POWER8) */ 307 ulong dpdes; /* doorbell state (POWER8) */
308 void *mpp_buffer; /* Micro Partition Prefetch buffer */
309 bool mpp_buffer_is_valid;
308}; 310};
309 311
310#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 312#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -503,8 +505,10 @@ struct kvm_vcpu_arch {
503#ifdef CONFIG_BOOKE 505#ifdef CONFIG_BOOKE
504 u32 decar; 506 u32 decar;
505#endif 507#endif
506 u32 tbl; 508 /* Time base value when we entered the guest */
507 u32 tbu; 509 u64 entry_tb;
510 u64 entry_vtb;
511 u64 entry_ic;
508 u32 tcr; 512 u32 tcr;
509 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */ 513 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
510 u32 ivor[64]; 514 u32 ivor[64];
@@ -580,6 +584,8 @@ struct kvm_vcpu_arch {
580 u32 mmucfg; 584 u32 mmucfg;
581 u32 eptcfg; 585 u32 eptcfg;
582 u32 epr; 586 u32 epr;
587 u64 sprg9;
588 u32 pwrmgtcr0;
583 u32 crit_save; 589 u32 crit_save;
584 /* guest debug registers*/ 590 /* guest debug registers*/
585 struct debug_reg dbg_reg; 591 struct debug_reg dbg_reg;
@@ -593,8 +599,6 @@ struct kvm_vcpu_arch {
593 u8 io_gpr; /* GPR used as IO source/target */ 599 u8 io_gpr; /* GPR used as IO source/target */
594 u8 mmio_is_bigendian; 600 u8 mmio_is_bigendian;
595 u8 mmio_sign_extend; 601 u8 mmio_sign_extend;
596 u8 dcr_needed;
597 u8 dcr_is_write;
598 u8 osi_needed; 602 u8 osi_needed;
599 u8 osi_enabled; 603 u8 osi_enabled;
600 u8 papr_enabled; 604 u8 papr_enabled;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd067a6..fb86a2299d8a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -41,12 +41,26 @@
41enum emulation_result { 41enum emulation_result {
42 EMULATE_DONE, /* no further processing */ 42 EMULATE_DONE, /* no further processing */
43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
44 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
45 EMULATE_FAIL, /* can't emulate this instruction */ 44 EMULATE_FAIL, /* can't emulate this instruction */
46 EMULATE_AGAIN, /* something went wrong. go again */ 45 EMULATE_AGAIN, /* something went wrong. go again */
47 EMULATE_EXIT_USER, /* emulation requires exit to user-space */ 46 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
48}; 47};
49 48
49enum instruction_type {
50 INST_GENERIC,
51 INST_SC, /* system call */
52};
53
54enum xlate_instdata {
55 XLATE_INST, /* translate instruction address */
56 XLATE_DATA /* translate data address */
57};
58
59enum xlate_readwrite {
60 XLATE_READ, /* check for read permissions */
61 XLATE_WRITE /* check for write permissions */
62};
63
50extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 64extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
51extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 65extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
52extern void kvmppc_handler_highmem(void); 66extern void kvmppc_handler_highmem(void);
@@ -62,8 +76,16 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 u64 val, unsigned int bytes, 76 u64 val, unsigned int bytes,
63 int is_default_endian); 77 int is_default_endian);
64 78
79extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
80 enum instruction_type type, u32 *inst);
81
82extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
83 bool data);
84extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
85 bool data);
65extern int kvmppc_emulate_instruction(struct kvm_run *run, 86extern int kvmppc_emulate_instruction(struct kvm_run *run,
66 struct kvm_vcpu *vcpu); 87 struct kvm_vcpu *vcpu);
88extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
67extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 89extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
68extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 90extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
69extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 91extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
@@ -86,6 +108,9 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
86 gva_t eaddr); 108 gva_t eaddr);
87extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); 109extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
88extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); 110extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
111extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
112 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
113 struct kvmppc_pte *pte);
89 114
90extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, 115extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
91 unsigned int id); 116 unsigned int id);
@@ -106,6 +131,14 @@ extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
106extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 131extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
107 struct kvm_interrupt *irq); 132 struct kvm_interrupt *irq);
108extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); 133extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
134extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
135 ulong esr_flags);
136extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
137 ulong dear_flags,
138 ulong esr_flags);
139extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
140extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
141 ulong esr_flags);
109extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); 142extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); 143extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
111 144
@@ -228,12 +261,35 @@ struct kvmppc_ops {
228 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); 261 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
229 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, 262 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
230 unsigned long arg); 263 unsigned long arg);
231 264 int (*hcall_implemented)(unsigned long hcall);
232}; 265};
233 266
234extern struct kvmppc_ops *kvmppc_hv_ops; 267extern struct kvmppc_ops *kvmppc_hv_ops;
235extern struct kvmppc_ops *kvmppc_pr_ops; 268extern struct kvmppc_ops *kvmppc_pr_ops;
236 269
270static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
271 enum instruction_type type, u32 *inst)
272{
273 int ret = EMULATE_DONE;
274 u32 fetched_inst;
275
276 /* Load the instruction manually if it failed to do so in the
277 * exit path */
278 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
279 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
280
281 /* Write fetch_failed unswapped if the fetch failed */
282 if (ret == EMULATE_DONE)
283 fetched_inst = kvmppc_need_byteswap(vcpu) ?
284 swab32(vcpu->arch.last_inst) :
285 vcpu->arch.last_inst;
286 else
287 fetched_inst = vcpu->arch.last_inst;
288
289 *inst = fetched_inst;
290 return ret;
291}
292
237static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) 293static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
238{ 294{
239 return kvm->arch.kvm_ops == kvmppc_hv_ops; 295 return kvm->arch.kvm_ops == kvmppc_hv_ops;
@@ -392,6 +448,17 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
392 { return 0; } 448 { return 0; }
393#endif 449#endif
394 450
451static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
452{
453#ifdef CONFIG_KVM_BOOKE_HV
454 return mfspr(SPRN_GEPR);
455#elif defined(CONFIG_BOOKE)
456 return vcpu->arch.epr;
457#else
458 return 0;
459#endif
460}
461
395static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) 462static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
396{ 463{
397#ifdef CONFIG_KVM_BOOKE_HV 464#ifdef CONFIG_KVM_BOOKE_HV
@@ -472,8 +539,20 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
472#endif 539#endif
473} 540}
474 541
542#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
543static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
544{ \
545 return mfspr(bookehv_spr); \
546} \
547
548#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
549static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
550{ \
551 mtspr(bookehv_spr, val); \
552} \
553
475#define SHARED_WRAPPER_GET(reg, size) \ 554#define SHARED_WRAPPER_GET(reg, size) \
476static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 555static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
477{ \ 556{ \
478 if (kvmppc_shared_big_endian(vcpu)) \ 557 if (kvmppc_shared_big_endian(vcpu)) \
479 return be##size##_to_cpu(vcpu->arch.shared->reg); \ 558 return be##size##_to_cpu(vcpu->arch.shared->reg); \
@@ -494,14 +573,31 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
494 SHARED_WRAPPER_GET(reg, size) \ 573 SHARED_WRAPPER_GET(reg, size) \
495 SHARED_WRAPPER_SET(reg, size) \ 574 SHARED_WRAPPER_SET(reg, size) \
496 575
576#define SPRNG_WRAPPER(reg, bookehv_spr) \
577 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
578 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
579
580#ifdef CONFIG_KVM_BOOKE_HV
581
582#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
583 SPRNG_WRAPPER(reg, bookehv_spr) \
584
585#else
586
587#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
588 SHARED_WRAPPER(reg, size) \
589
590#endif
591
497SHARED_WRAPPER(critical, 64) 592SHARED_WRAPPER(critical, 64)
498SHARED_WRAPPER(sprg0, 64) 593SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
499SHARED_WRAPPER(sprg1, 64) 594SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
500SHARED_WRAPPER(sprg2, 64) 595SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
501SHARED_WRAPPER(sprg3, 64) 596SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
502SHARED_WRAPPER(srr0, 64) 597SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
503SHARED_WRAPPER(srr1, 64) 598SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
504SHARED_WRAPPER(dar, 64) 599SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
600SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
505SHARED_WRAPPER_GET(msr, 64) 601SHARED_WRAPPER_GET(msr, 64)
506static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) 602static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
507{ 603{
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index d0918e09557f..cd4f04a74802 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,11 @@
40 40
41/* MAS registers bit definitions */ 41/* MAS registers bit definitions */
42 42
43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) 43#define MAS0_TLBSEL_MASK 0x30000000
44#define MAS0_TLBSEL_SHIFT 28
45#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
46#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \
47 MAS0_TLBSEL_SHIFT)
44#define MAS0_ESEL_MASK 0x0FFF0000 48#define MAS0_ESEL_MASK 0x0FFF0000
45#define MAS0_ESEL_SHIFT 16 49#define MAS0_ESEL_SHIFT 16
46#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) 50#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
@@ -58,6 +62,7 @@
58#define MAS1_TSIZE_MASK 0x00000f80 62#define MAS1_TSIZE_MASK 0x00000f80
59#define MAS1_TSIZE_SHIFT 7 63#define MAS1_TSIZE_SHIFT 7
60#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) 64#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
65#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT)
61 66
62#define MAS2_EPN (~0xFFFUL) 67#define MAS2_EPN (~0xFFFUL)
63#define MAS2_X0 0x00000040 68#define MAS2_X0 0x00000040
@@ -86,6 +91,7 @@
86#define MAS3_SPSIZE 0x0000003e 91#define MAS3_SPSIZE 0x0000003e
87#define MAS3_SPSIZE_SHIFT 1 92#define MAS3_SPSIZE_SHIFT 1
88 93
94#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK
89#define MAS4_TLBSELD(x) MAS0_TLBSEL(x) 95#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
90#define MAS4_INDD 0x00008000 /* Default IND */ 96#define MAS4_INDD 0x00008000 /* Default IND */
91#define MAS4_TSIZED(x) MAS1_TSIZE(x) 97#define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 3132bb9365f3..c636841fc772 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -139,6 +139,7 @@
139#define PPC_INST_ISEL 0x7c00001e 139#define PPC_INST_ISEL 0x7c00001e
140#define PPC_INST_ISEL_MASK 0xfc00003e 140#define PPC_INST_ISEL_MASK 0xfc00003e
141#define PPC_INST_LDARX 0x7c0000a8 141#define PPC_INST_LDARX 0x7c0000a8
142#define PPC_INST_LOGMPP 0x7c0007e4
142#define PPC_INST_LSWI 0x7c0004aa 143#define PPC_INST_LSWI 0x7c0004aa
143#define PPC_INST_LSWX 0x7c00042a 144#define PPC_INST_LSWX 0x7c00042a
144#define PPC_INST_LWARX 0x7c000028 145#define PPC_INST_LWARX 0x7c000028
@@ -275,6 +276,20 @@
275#define __PPC_EH(eh) 0 276#define __PPC_EH(eh) 0
276#endif 277#endif
277 278
279/* POWER8 Micro Partition Prefetch (MPP) parameters */
280/* Address mask is common for LOGMPP instruction and MPPR SPR */
281#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000
282
283/* Bits 60 and 61 of MPP SPR should be set to one of the following */
284/* Aborting the fetch is indeed setting 00 in the table size bits */
285#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
286#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
287
288/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
289#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
290#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
291#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
292
278/* Deal with instructions that older assemblers aren't aware of */ 293/* Deal with instructions that older assemblers aren't aware of */
279#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 294#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
280 __PPC_RA(a) | __PPC_RB(b)) 295 __PPC_RA(a) | __PPC_RB(b))
@@ -283,6 +298,8 @@
283#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ 298#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
284 ___PPC_RT(t) | ___PPC_RA(a) | \ 299 ___PPC_RT(t) | ___PPC_RA(a) | \
285 ___PPC_RB(b) | __PPC_EH(eh)) 300 ___PPC_RB(b) | __PPC_EH(eh))
301#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
302 __PPC_RB(b))
286#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ 303#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
287 ___PPC_RT(t) | ___PPC_RA(a) | \ 304 ___PPC_RT(t) | ___PPC_RA(a) | \
288 ___PPC_RB(b) | __PPC_EH(eh)) 305 ___PPC_RB(b) | __PPC_EH(eh))
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bffd89d27301..c547b26371b8 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -225,6 +225,7 @@
225#define CTRL_TE 0x00c00000 /* thread enable */ 225#define CTRL_TE 0x00c00000 /* thread enable */
226#define CTRL_RUNLATCH 0x1 226#define CTRL_RUNLATCH 0x1
227#define SPRN_DAWR 0xB4 227#define SPRN_DAWR 0xB4
228#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
228#define SPRN_RPR 0xBA /* Relative Priority Register */ 229#define SPRN_RPR 0xBA /* Relative Priority Register */
229#define SPRN_CIABR 0xBB 230#define SPRN_CIABR 0xBB
230#define CIABR_PRIV 0x3 231#define CIABR_PRIV 0x3
@@ -944,9 +945,6 @@
944 * readable variant for reads, which can avoid a fault 945 * readable variant for reads, which can avoid a fault
945 * with KVM type virtualization. 946 * with KVM type virtualization.
946 * 947 *
947 * (*) Under KVM, the host SPRG1 is used to point to
948 * the current VCPU data structure
949 *
950 * 32-bit 8xx: 948 * 32-bit 8xx:
951 * - SPRG0 scratch for exception vectors 949 * - SPRG0 scratch for exception vectors
952 * - SPRG1 scratch for exception vectors 950 * - SPRG1 scratch for exception vectors
@@ -1203,6 +1201,15 @@
1203 : "r" ((unsigned long)(v)) \ 1201 : "r" ((unsigned long)(v)) \
1204 : "memory") 1202 : "memory")
1205 1203
1204static inline unsigned long mfvtb (void)
1205{
1206#ifdef CONFIG_PPC_BOOK3S_64
1207 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1208 return mfspr(SPRN_VTB);
1209#endif
1210 return 0;
1211}
1212
1206#ifdef __powerpc64__ 1213#ifdef __powerpc64__
1207#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) 1214#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
1208#define mftb() ({unsigned long rval; \ 1215#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1d428e6007ca..03cbada59d3a 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -102,6 +102,15 @@ static inline u64 get_rtc(void)
102 return (u64)hi * 1000000000 + lo; 102 return (u64)hi * 1000000000 + lo;
103} 103}
104 104
105static inline u64 get_vtb(void)
106{
107#ifdef CONFIG_PPC_BOOK3S_64
108 if (cpu_has_feature(CPU_FTR_ARCH_207S))
109 return mfvtb();
110#endif
111 return 0;
112}
113
105#ifdef CONFIG_PPC64 114#ifdef CONFIG_PPC64
106static inline u64 get_tb(void) 115static inline u64 get_tb(void)
107{ 116{
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 2bc4a9409a93..e0e49dbb145d 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -548,6 +548,7 @@ struct kvm_get_htab_header {
548 548
549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) 549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) 550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
551#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
551#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) 552#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
552 553
553/* Architecture compatibility level */ 554/* Architecture compatibility level */
@@ -555,6 +556,7 @@ struct kvm_get_htab_header {
555 556
556#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) 557#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
557#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9) 558#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
559#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
558 560
559/* Transactional Memory checkpointed state: 561/* Transactional Memory checkpointed state:
560 * This is all GPRs, all VSX regs and a subset of SPRs 562 * This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f5995a912213..ab9ae0411e8f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -493,6 +493,7 @@ int main(void)
493 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); 493 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
494 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); 494 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
495 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); 495 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
496 DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
496 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); 497 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
497 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); 498 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
498 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); 499 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
@@ -667,6 +668,7 @@ int main(void)
667 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 668 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
668 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 669 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
669 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 670 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
671 DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9));
670 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 672 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
671 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 673 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
672 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 674 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
deleted file mode 100644
index 9cb4b0a36031..000000000000
--- a/arch/powerpc/kvm/44x.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/kvm_host.h>
21#include <linux/slab.h>
22#include <linux/err.h>
23#include <linux/export.h>
24#include <linux/module.h>
25#include <linux/miscdevice.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/tlbflush.h>
30#include <asm/kvm_44x.h>
31#include <asm/kvm_ppc.h>
32
33#include "44x_tlb.h"
34#include "booke.h"
35
36static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
37{
38 kvmppc_booke_vcpu_load(vcpu, cpu);
39 kvmppc_44x_tlb_load(vcpu);
40}
41
42static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
43{
44 kvmppc_44x_tlb_put(vcpu);
45 kvmppc_booke_vcpu_put(vcpu);
46}
47
48int kvmppc_core_check_processor_compat(void)
49{
50 int r;
51
52 if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0)
53 r = 0;
54 else
55 r = -ENOTSUPP;
56
57 return r;
58}
59
60int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
61{
62 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
63 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
64 int i;
65
66 tlbe->tid = 0;
67 tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
68 tlbe->word1 = 0;
69 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
70
71 tlbe++;
72 tlbe->tid = 0;
73 tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
74 tlbe->word1 = 0xef600000;
75 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
76 | PPC44x_TLB_I | PPC44x_TLB_G;
77
78 /* Since the guest can directly access the timebase, it must know the
79 * real timebase frequency. Accordingly, it must see the state of
80 * CCR1[TCS]. */
81 /* XXX CCR1 doesn't exist on all 440 SoCs. */
82 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
83
84 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++)
85 vcpu_44x->shadow_refs[i].gtlb_index = -1;
86
87 vcpu->arch.cpu_type = KVM_CPU_440;
88 vcpu->arch.pvr = mfspr(SPRN_PVR);
89
90 return 0;
91}
92
93/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
94int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
95 struct kvm_translation *tr)
96{
97 int index;
98 gva_t eaddr;
99 u8 pid;
100 u8 as;
101
102 eaddr = tr->linear_address;
103 pid = (tr->linear_address >> 32) & 0xff;
104 as = (tr->linear_address >> 40) & 0x1;
105
106 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
107 if (index == -1) {
108 tr->valid = 0;
109 return 0;
110 }
111
112 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
113 /* XXX what does "writeable" and "usermode" even mean? */
114 tr->valid = 1;
115
116 return 0;
117}
118
119static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
120 struct kvm_sregs *sregs)
121{
122 return kvmppc_get_sregs_ivor(vcpu, sregs);
123}
124
125static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
126 struct kvm_sregs *sregs)
127{
128 return kvmppc_set_sregs_ivor(vcpu, sregs);
129}
130
131static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
132 union kvmppc_one_reg *val)
133{
134 return -EINVAL;
135}
136
137static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
138 union kvmppc_one_reg *val)
139{
140 return -EINVAL;
141}
142
143static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
144 unsigned int id)
145{
146 struct kvmppc_vcpu_44x *vcpu_44x;
147 struct kvm_vcpu *vcpu;
148 int err;
149
150 vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
151 if (!vcpu_44x) {
152 err = -ENOMEM;
153 goto out;
154 }
155
156 vcpu = &vcpu_44x->vcpu;
157 err = kvm_vcpu_init(vcpu, kvm, id);
158 if (err)
159 goto free_vcpu;
160
161 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
162 if (!vcpu->arch.shared)
163 goto uninit_vcpu;
164
165 return vcpu;
166
167uninit_vcpu:
168 kvm_vcpu_uninit(vcpu);
169free_vcpu:
170 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
171out:
172 return ERR_PTR(err);
173}
174
175static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
176{
177 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
178
179 free_page((unsigned long)vcpu->arch.shared);
180 kvm_vcpu_uninit(vcpu);
181 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
182}
183
184static int kvmppc_core_init_vm_44x(struct kvm *kvm)
185{
186 return 0;
187}
188
189static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
190{
191}
192
193static struct kvmppc_ops kvm_ops_44x = {
194 .get_sregs = kvmppc_core_get_sregs_44x,
195 .set_sregs = kvmppc_core_set_sregs_44x,
196 .get_one_reg = kvmppc_get_one_reg_44x,
197 .set_one_reg = kvmppc_set_one_reg_44x,
198 .vcpu_load = kvmppc_core_vcpu_load_44x,
199 .vcpu_put = kvmppc_core_vcpu_put_44x,
200 .vcpu_create = kvmppc_core_vcpu_create_44x,
201 .vcpu_free = kvmppc_core_vcpu_free_44x,
202 .mmu_destroy = kvmppc_mmu_destroy_44x,
203 .init_vm = kvmppc_core_init_vm_44x,
204 .destroy_vm = kvmppc_core_destroy_vm_44x,
205 .emulate_op = kvmppc_core_emulate_op_44x,
206 .emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
207 .emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
208};
209
210static int __init kvmppc_44x_init(void)
211{
212 int r;
213
214 r = kvmppc_booke_init();
215 if (r)
216 goto err_out;
217
218 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
219 if (r)
220 goto err_out;
221 kvm_ops_44x.owner = THIS_MODULE;
222 kvmppc_pr_ops = &kvm_ops_44x;
223
224err_out:
225 return r;
226}
227
228static void __exit kvmppc_44x_exit(void)
229{
230 kvmppc_pr_ops = NULL;
231 kvmppc_booke_exit();
232}
233
234module_init(kvmppc_44x_init);
235module_exit(kvmppc_44x_exit);
236MODULE_ALIAS_MISCDEV(KVM_MINOR);
237MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
deleted file mode 100644
index 92c9ab4bcfec..000000000000
--- a/arch/powerpc/kvm/44x_emulate.c
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/dcr.h>
22#include <asm/dcr-regs.h>
23#include <asm/disassemble.h>
24#include <asm/kvm_44x.h>
25#include "timing.h"
26
27#include "booke.h"
28#include "44x_tlb.h"
29
30#define XOP_MFDCRX 259
31#define XOP_MFDCR 323
32#define XOP_MTDCRX 387
33#define XOP_MTDCR 451
34#define XOP_TLBSX 914
35#define XOP_ICCCI 966
36#define XOP_TLBWE 978
37
38static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn)
39{
40 /* emulate some access in kernel */
41 switch (dcrn) {
42 case DCRN_CPR0_CONFIG_ADDR:
43 vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
44 return EMULATE_DONE;
45 default:
46 vcpu->run->dcr.dcrn = dcrn;
47 vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs);
48 vcpu->run->dcr.is_write = 1;
49 vcpu->arch.dcr_is_write = 1;
50 vcpu->arch.dcr_needed = 1;
51 kvmppc_account_exit(vcpu, DCR_EXITS);
52 return EMULATE_DO_DCR;
53 }
54}
55
56static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
57{
58 /* The guest may access CPR0 registers to determine the timebase
59 * frequency, and it must know the real host frequency because it
60 * can directly access the timebase registers.
61 *
62 * It would be possible to emulate those accesses in userspace,
63 * but userspace can really only figure out the end frequency.
64 * We could decompose that into the factors that compute it, but
65 * that's tricky math, and it's easier to just report the real
66 * CPR0 values.
67 */
68 switch (dcrn) {
69 case DCRN_CPR0_CONFIG_ADDR:
70 kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
71 break;
72 case DCRN_CPR0_CONFIG_DATA:
73 local_irq_disable();
74 mtdcr(DCRN_CPR0_CONFIG_ADDR,
75 vcpu->arch.cpr0_cfgaddr);
76 kvmppc_set_gpr(vcpu, rt,
77 mfdcr(DCRN_CPR0_CONFIG_DATA));
78 local_irq_enable();
79 break;
80 default:
81 vcpu->run->dcr.dcrn = dcrn;
82 vcpu->run->dcr.data = 0;
83 vcpu->run->dcr.is_write = 0;
84 vcpu->arch.dcr_is_write = 0;
85 vcpu->arch.io_gpr = rt;
86 vcpu->arch.dcr_needed = 1;
87 kvmppc_account_exit(vcpu, DCR_EXITS);
88 return EMULATE_DO_DCR;
89 }
90
91 return EMULATE_DONE;
92}
93
94int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
95 unsigned int inst, int *advance)
96{
97 int emulated = EMULATE_DONE;
98 int dcrn = get_dcrn(inst);
99 int ra = get_ra(inst);
100 int rb = get_rb(inst);
101 int rc = get_rc(inst);
102 int rs = get_rs(inst);
103 int rt = get_rt(inst);
104 int ws = get_ws(inst);
105
106 switch (get_op(inst)) {
107 case 31:
108 switch (get_xop(inst)) {
109
110 case XOP_MFDCR:
111 emulated = emulate_mfdcr(vcpu, rt, dcrn);
112 break;
113
114 case XOP_MFDCRX:
115 emulated = emulate_mfdcr(vcpu, rt,
116 kvmppc_get_gpr(vcpu, ra));
117 break;
118
119 case XOP_MTDCR:
120 emulated = emulate_mtdcr(vcpu, rs, dcrn);
121 break;
122
123 case XOP_MTDCRX:
124 emulated = emulate_mtdcr(vcpu, rs,
125 kvmppc_get_gpr(vcpu, ra));
126 break;
127
128 case XOP_TLBWE:
129 emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
130 break;
131
132 case XOP_TLBSX:
133 emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
134 break;
135
136 case XOP_ICCCI:
137 break;
138
139 default:
140 emulated = EMULATE_FAIL;
141 }
142
143 break;
144
145 default:
146 emulated = EMULATE_FAIL;
147 }
148
149 if (emulated == EMULATE_FAIL)
150 emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
151
152 return emulated;
153}
154
155int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
156{
157 int emulated = EMULATE_DONE;
158
159 switch (sprn) {
160 case SPRN_PID:
161 kvmppc_set_pid(vcpu, spr_val); break;
162 case SPRN_MMUCR:
163 vcpu->arch.mmucr = spr_val; break;
164 case SPRN_CCR0:
165 vcpu->arch.ccr0 = spr_val; break;
166 case SPRN_CCR1:
167 vcpu->arch.ccr1 = spr_val; break;
168 default:
169 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
170 }
171
172 return emulated;
173}
174
175int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
176{
177 int emulated = EMULATE_DONE;
178
179 switch (sprn) {
180 case SPRN_PID:
181 *spr_val = vcpu->arch.pid; break;
182 case SPRN_MMUCR:
183 *spr_val = vcpu->arch.mmucr; break;
184 case SPRN_CCR0:
185 *spr_val = vcpu->arch.ccr0; break;
186 case SPRN_CCR1:
187 *spr_val = vcpu->arch.ccr1; break;
188 default:
189 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
190 }
191
192 return emulated;
193}
194
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
deleted file mode 100644
index 0deef1082e02..000000000000
--- a/arch/powerpc/kvm/44x_tlb.c
+++ /dev/null
@@ -1,528 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
27#include <asm/mmu-44x.h>
28#include <asm/kvm_ppc.h>
29#include <asm/kvm_44x.h>
30#include "timing.h"
31
32#include "44x_tlb.h"
33#include "trace.h"
34
35#ifndef PPC44x_TLBE_SIZE
36#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
37#endif
38
39#define PAGE_SIZE_4K (1<<12)
40#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
41
42#define PPC44x_TLB_UATTR_MASK \
43 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
44#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
45#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
46
47#ifdef DEBUG
48void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
49{
50 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
51 struct kvmppc_44x_tlbe *tlbe;
52 int i;
53
54 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
55 printk("| %2s | %3s | %8s | %8s | %8s |\n",
56 "nr", "tid", "word0", "word1", "word2");
57
58 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
59 tlbe = &vcpu_44x->guest_tlb[i];
60 if (tlbe->word0 & PPC44x_TLB_VALID)
61 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
62 i, tlbe->tid, tlbe->word0, tlbe->word1,
63 tlbe->word2);
64 }
65}
66#endif
67
68static inline void kvmppc_44x_tlbie(unsigned int index)
69{
70 /* 0 <= index < 64, so the V bit is clear and we can use the index as
71 * word0. */
72 asm volatile(
73 "tlbwe %[index], %[index], 0\n"
74 :
75 : [index] "r"(index)
76 );
77}
78
79static inline void kvmppc_44x_tlbre(unsigned int index,
80 struct kvmppc_44x_tlbe *tlbe)
81{
82 asm volatile(
83 "tlbre %[word0], %[index], 0\n"
84 "mfspr %[tid], %[sprn_mmucr]\n"
85 "andi. %[tid], %[tid], 0xff\n"
86 "tlbre %[word1], %[index], 1\n"
87 "tlbre %[word2], %[index], 2\n"
88 : [word0] "=r"(tlbe->word0),
89 [word1] "=r"(tlbe->word1),
90 [word2] "=r"(tlbe->word2),
91 [tid] "=r"(tlbe->tid)
92 : [index] "r"(index),
93 [sprn_mmucr] "i"(SPRN_MMUCR)
94 : "cc"
95 );
96}
97
98static inline void kvmppc_44x_tlbwe(unsigned int index,
99 struct kvmppc_44x_tlbe *stlbe)
100{
101 unsigned long tmp;
102
103 asm volatile(
104 "mfspr %[tmp], %[sprn_mmucr]\n"
105 "rlwimi %[tmp], %[tid], 0, 0xff\n"
106 "mtspr %[sprn_mmucr], %[tmp]\n"
107 "tlbwe %[word0], %[index], 0\n"
108 "tlbwe %[word1], %[index], 1\n"
109 "tlbwe %[word2], %[index], 2\n"
110 : [tmp] "=&r"(tmp)
111 : [word0] "r"(stlbe->word0),
112 [word1] "r"(stlbe->word1),
113 [word2] "r"(stlbe->word2),
114 [tid] "r"(stlbe->tid),
115 [index] "r"(index),
116 [sprn_mmucr] "i"(SPRN_MMUCR)
117 );
118}
119
120static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
121{
122 /* We only care about the guest's permission and user bits. */
123 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
124
125 if (!usermode) {
126 /* Guest is in supervisor mode, so we need to translate guest
127 * supervisor permissions into user permissions. */
128 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
129 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
130 }
131
132 /* Make sure host can always access this memory. */
133 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
134
135 /* WIMGE = 0b00100 */
136 attrib |= PPC44x_TLB_M;
137
138 return attrib;
139}
140
141/* Load shadow TLB back into hardware. */
142void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
143{
144 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
145 int i;
146
147 for (i = 0; i <= tlb_44x_hwater; i++) {
148 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
149
150 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
151 kvmppc_44x_tlbwe(i, stlbe);
152 }
153}
154
155static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
156 unsigned int i)
157{
158 vcpu_44x->shadow_tlb_mod[i] = 1;
159}
160
161/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
162void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
163{
164 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
165 int i;
166
167 for (i = 0; i <= tlb_44x_hwater; i++) {
168 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
169
170 if (vcpu_44x->shadow_tlb_mod[i])
171 kvmppc_44x_tlbre(i, stlbe);
172
173 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
174 kvmppc_44x_tlbie(i);
175 }
176}
177
178
179/* Search the guest TLB for a matching entry. */
180int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
181 unsigned int as)
182{
183 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
184 int i;
185
186 /* XXX Replace loop with fancy data structures. */
187 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
188 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
189 unsigned int tid;
190
191 if (eaddr < get_tlb_eaddr(tlbe))
192 continue;
193
194 if (eaddr > get_tlb_end(tlbe))
195 continue;
196
197 tid = get_tlb_tid(tlbe);
198 if (tid && (tid != pid))
199 continue;
200
201 if (!get_tlb_v(tlbe))
202 continue;
203
204 if (get_tlb_ts(tlbe) != as)
205 continue;
206
207 return i;
208 }
209
210 return -1;
211}
212
213gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
214 gva_t eaddr)
215{
216 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
217 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
218 unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
219
220 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
221}
222
223int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
224{
225 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
226
227 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
228}
229
230int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
231{
232 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
233
234 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
235}
236
237void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
238{
239}
240
241void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
242{
243}
244
245static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
246 unsigned int stlb_index)
247{
248 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
249
250 if (!ref->page)
251 return;
252
253 /* Discard from the TLB. */
254 /* Note: we could actually invalidate a host mapping, if the host overwrote
255 * this TLB entry since we inserted a guest mapping. */
256 kvmppc_44x_tlbie(stlb_index);
257
258 /* Now release the page. */
259 if (ref->writeable)
260 kvm_release_page_dirty(ref->page);
261 else
262 kvm_release_page_clean(ref->page);
263
264 ref->page = NULL;
265
266 /* XXX set tlb_44x_index to stlb_index? */
267
268 trace_kvm_stlb_inval(stlb_index);
269}
270
271void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
272{
273 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
274 int i;
275
276 for (i = 0; i <= tlb_44x_hwater; i++)
277 kvmppc_44x_shadow_release(vcpu_44x, i);
278}
279
280/**
281 * kvmppc_mmu_map -- create a host mapping for guest memory
282 *
283 * If the guest wanted a larger page than the host supports, only the first
284 * host page is mapped here and the rest are demand faulted.
285 *
286 * If the guest wanted a smaller page than the host page size, we map only the
287 * guest-size page (i.e. not a full host page mapping).
288 *
289 * Caller must ensure that the specified guest TLB entry is safe to insert into
290 * the shadow TLB.
291 */
292void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
293 unsigned int gtlb_index)
294{
295 struct kvmppc_44x_tlbe stlbe;
296 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
297 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
298 struct kvmppc_44x_shadow_ref *ref;
299 struct page *new_page;
300 hpa_t hpaddr;
301 gfn_t gfn;
302 u32 asid = gtlbe->tid;
303 u32 flags = gtlbe->word2;
304 u32 max_bytes = get_tlb_bytes(gtlbe);
305 unsigned int victim;
306
307 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
308 * miss handler by disabling interrupts. */
309 local_irq_disable();
310 victim = ++tlb_44x_index;
311 if (victim > tlb_44x_hwater)
312 victim = 0;
313 tlb_44x_index = victim;
314 local_irq_enable();
315
316 /* Get reference to new page. */
317 gfn = gpaddr >> PAGE_SHIFT;
318 new_page = gfn_to_page(vcpu->kvm, gfn);
319 if (is_error_page(new_page)) {
320 printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n",
321 (unsigned long long)gfn);
322 return;
323 }
324 hpaddr = page_to_phys(new_page);
325
326 /* Invalidate any previous shadow mappings. */
327 kvmppc_44x_shadow_release(vcpu_44x, victim);
328
329 /* XXX Make sure (va, size) doesn't overlap any other
330 * entries. 440x6 user manual says the result would be
331 * "undefined." */
332
333 /* XXX what about AS? */
334
335 /* Force TS=1 for all guest mappings. */
336 stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
337
338 if (max_bytes >= PAGE_SIZE) {
339 /* Guest mapping is larger than or equal to host page size. We can use
340 * a "native" host mapping. */
341 stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
342 } else {
343 /* Guest mapping is smaller than host page size. We must restrict the
344 * size of the mapping to be at most the smaller of the two, but for
345 * simplicity we fall back to a 4K mapping (this is probably what the
346 * guest is using anyways). */
347 stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
348
349 /* 'hpaddr' is a host page, which is larger than the mapping we're
350 * inserting here. To compensate, we must add the in-page offset to the
351 * sub-page. */
352 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
353 }
354
355 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
356 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
357 vcpu->arch.shared->msr & MSR_PR);
358 stlbe.tid = !(asid & 0xff);
359
360 /* Keep track of the reference so we can properly release it later. */
361 ref = &vcpu_44x->shadow_refs[victim];
362 ref->page = new_page;
363 ref->gtlb_index = gtlb_index;
364 ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
365 ref->tid = stlbe.tid;
366
367 /* Insert shadow mapping into hardware TLB. */
368 kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
369 kvmppc_44x_tlbwe(victim, &stlbe);
370 trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
371 stlbe.word2);
372}
373
374/* For a particular guest TLB entry, invalidate the corresponding host TLB
375 * mappings and release the host pages. */
376static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
377 unsigned int gtlb_index)
378{
379 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
380 int i;
381
382 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
383 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
384 if (ref->gtlb_index == gtlb_index)
385 kvmppc_44x_shadow_release(vcpu_44x, i);
386 }
387}
388
389void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
390{
391 int usermode = vcpu->arch.shared->msr & MSR_PR;
392
393 vcpu->arch.shadow_pid = !usermode;
394}
395
396void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
397{
398 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
399 int i;
400
401 if (unlikely(vcpu->arch.pid == new_pid))
402 return;
403
404 vcpu->arch.pid = new_pid;
405
406 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
407 * can't access guest kernel mappings (TID=1). When we switch to a new
408 * guest PID, which will also use host PID=0, we must discard the old guest
409 * userspace mappings. */
410 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
411 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
412
413 if (ref->tid == 0)
414 kvmppc_44x_shadow_release(vcpu_44x, i);
415 }
416}
417
418static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
419 const struct kvmppc_44x_tlbe *tlbe)
420{
421 gpa_t gpa;
422
423 if (!get_tlb_v(tlbe))
424 return 0;
425
426 /* Does it match current guest AS? */
427 /* XXX what about IS != DS? */
428 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
429 return 0;
430
431 gpa = get_tlb_raddr(tlbe);
432 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
433 /* Mapping is not for RAM. */
434 return 0;
435
436 return 1;
437}
438
439int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
440{
441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
442 struct kvmppc_44x_tlbe *tlbe;
443 unsigned int gtlb_index;
444 int idx;
445
446 gtlb_index = kvmppc_get_gpr(vcpu, ra);
447 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
448 printk("%s: index %d\n", __func__, gtlb_index);
449 kvmppc_dump_vcpu(vcpu);
450 return EMULATE_FAIL;
451 }
452
453 tlbe = &vcpu_44x->guest_tlb[gtlb_index];
454
455 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
456 if (tlbe->word0 & PPC44x_TLB_VALID)
457 kvmppc_44x_invalidate(vcpu, gtlb_index);
458
459 switch (ws) {
460 case PPC44x_TLB_PAGEID:
461 tlbe->tid = get_mmucr_stid(vcpu);
462 tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
463 break;
464
465 case PPC44x_TLB_XLAT:
466 tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
467 break;
468
469 case PPC44x_TLB_ATTRIB:
470 tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
471 break;
472
473 default:
474 return EMULATE_FAIL;
475 }
476
477 idx = srcu_read_lock(&vcpu->kvm->srcu);
478
479 if (tlbe_is_host_safe(vcpu, tlbe)) {
480 gva_t eaddr;
481 gpa_t gpaddr;
482 u32 bytes;
483
484 eaddr = get_tlb_eaddr(tlbe);
485 gpaddr = get_tlb_raddr(tlbe);
486
487 /* Use the advertised page size to mask effective and real addrs. */
488 bytes = get_tlb_bytes(tlbe);
489 eaddr &= ~(bytes - 1);
490 gpaddr &= ~(bytes - 1);
491
492 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
493 }
494
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496
497 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
498 tlbe->word2);
499
500 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
501 return EMULATE_DONE;
502}
503
504int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
505{
506 u32 ea;
507 int gtlb_index;
508 unsigned int as = get_mmucr_sts(vcpu);
509 unsigned int pid = get_mmucr_stid(vcpu);
510
511 ea = kvmppc_get_gpr(vcpu, rb);
512 if (ra)
513 ea += kvmppc_get_gpr(vcpu, ra);
514
515 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
516 if (rc) {
517 u32 cr = kvmppc_get_cr(vcpu);
518
519 if (gtlb_index < 0)
520 kvmppc_set_cr(vcpu, cr & ~0x20000000);
521 else
522 kvmppc_set_cr(vcpu, cr | 0x20000000);
523 }
524 kvmppc_set_gpr(vcpu, rt, gtlb_index);
525
526 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
527 return EMULATE_DONE;
528}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
deleted file mode 100644
index a9ff80e51526..000000000000
--- a/arch/powerpc/kvm/44x_tlb.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __KVM_POWERPC_TLB_H__
21#define __KVM_POWERPC_TLB_H__
22
23#include <linux/kvm_host.h>
24#include <asm/mmu-44x.h>
25
26extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
27 unsigned int pid, unsigned int as);
28
29extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
30 u8 rc);
31extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
32
33/* TLB helper functions */
34static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
35{
36 return (tlbe->word0 >> 4) & 0xf;
37}
38
39static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe)
40{
41 return tlbe->word0 & 0xfffffc00;
42}
43
44static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe)
45{
46 unsigned int pgsize = get_tlb_size(tlbe);
47 return 1 << 10 << (pgsize << 1);
48}
49
50static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe)
51{
52 return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
53}
54
55static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe)
56{
57 u64 word1 = tlbe->word1;
58 return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
59}
60
61static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe)
62{
63 return tlbe->tid & 0xff;
64}
65
66static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe)
67{
68 return (tlbe->word0 >> 8) & 0x1;
69}
70
71static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe)
72{
73 return (tlbe->word0 >> 9) & 0x1;
74}
75
76static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
77{
78 return vcpu->arch.mmucr & 0xff;
79}
80
81static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
82{
83 return (vcpu->arch.mmucr >> 16) & 0x1;
84}
85
86#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index d6a53b95de94..8f104a6879f0 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -75,7 +75,6 @@ config KVM_BOOK3S_64
75config KVM_BOOK3S_64_HV 75config KVM_BOOK3S_64_HV
76 tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" 76 tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
77 depends on KVM_BOOK3S_64 77 depends on KVM_BOOK3S_64
78 depends on !CPU_LITTLE_ENDIAN
79 select KVM_BOOK3S_HV_POSSIBLE 78 select KVM_BOOK3S_HV_POSSIBLE
80 select MMU_NOTIFIER 79 select MMU_NOTIFIER
81 select CMA 80 select CMA
@@ -113,23 +112,9 @@ config KVM_BOOK3S_64_PR
113config KVM_BOOKE_HV 112config KVM_BOOKE_HV
114 bool 113 bool
115 114
116config KVM_440
117 bool "KVM support for PowerPC 440 processors"
118 depends on 44x
119 select KVM
120 select KVM_MMIO
121 ---help---
122 Support running unmodified 440 guest kernels in virtual machines on
123 440 host processors.
124
125 This module provides access to the hardware capabilities through
126 a character device node named /dev/kvm.
127
128 If unsure, say N.
129
130config KVM_EXIT_TIMING 115config KVM_EXIT_TIMING
131 bool "Detailed exit timing" 116 bool "Detailed exit timing"
132 depends on KVM_440 || KVM_E500V2 || KVM_E500MC 117 depends on KVM_E500V2 || KVM_E500MC
133 ---help--- 118 ---help---
134 Calculate elapsed time for every exit/enter cycle. A per-vcpu 119 Calculate elapsed time for every exit/enter cycle. A per-vcpu
135 report is available in debugfs kvm/vm#_vcpu#_timing. 120 report is available in debugfs kvm/vm#_vcpu#_timing.
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index ce569b6bf4d8..2d590dea5482 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -10,27 +10,17 @@ KVM := ../../../virt/kvm
10common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ 10common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
11 $(KVM)/eventfd.o 11 $(KVM)/eventfd.o
12 12
13CFLAGS_44x_tlb.o := -I.
14CFLAGS_e500_mmu.o := -I. 13CFLAGS_e500_mmu.o := -I.
15CFLAGS_e500_mmu_host.o := -I. 14CFLAGS_e500_mmu_host.o := -I.
16CFLAGS_emulate.o := -I. 15CFLAGS_emulate.o := -I.
16CFLAGS_emulate_loadstore.o := -I.
17 17
18common-objs-y += powerpc.o emulate.o 18common-objs-y += powerpc.o emulate.o emulate_loadstore.o
19obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o 19obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
20obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o 20obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
21 21
22AFLAGS_booke_interrupts.o := -I$(obj) 22AFLAGS_booke_interrupts.o := -I$(obj)
23 23
24kvm-440-objs := \
25 $(common-objs-y) \
26 booke.o \
27 booke_emulate.o \
28 booke_interrupts.o \
29 44x.o \
30 44x_tlb.o \
31 44x_emulate.o
32kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs)
33
34kvm-e500-objs := \ 24kvm-e500-objs := \
35 $(common-objs-y) \ 25 $(common-objs-y) \
36 booke.o \ 26 booke.o \
@@ -58,6 +48,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
58 48
59kvm-pr-y := \ 49kvm-pr-y := \
60 fpu.o \ 50 fpu.o \
51 emulate.o \
61 book3s_paired_singles.o \ 52 book3s_paired_singles.o \
62 book3s_pr.o \ 53 book3s_pr.o \
63 book3s_pr_papr.o \ 54 book3s_pr_papr.o \
@@ -101,7 +92,7 @@ kvm-book3s_64-module-objs += \
101 $(KVM)/kvm_main.o \ 92 $(KVM)/kvm_main.o \
102 $(KVM)/eventfd.o \ 93 $(KVM)/eventfd.o \
103 powerpc.o \ 94 powerpc.o \
104 emulate.o \ 95 emulate_loadstore.o \
105 book3s.o \ 96 book3s.o \
106 book3s_64_vio.o \ 97 book3s_64_vio.o \
107 book3s_rtas.o \ 98 book3s_rtas.o \
@@ -127,7 +118,6 @@ kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
127 118
128kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 119kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
129 120
130obj-$(CONFIG_KVM_440) += kvm.o
131obj-$(CONFIG_KVM_E500V2) += kvm.o 121obj-$(CONFIG_KVM_E500V2) += kvm.o
132obj-$(CONFIG_KVM_E500MC) += kvm.o 122obj-$(CONFIG_KVM_E500MC) += kvm.o
133obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 123obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c254c27f240e..dd03f6b299ba 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -72,6 +72,17 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
72{ 72{
73} 73}
74 74
75void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
76{
77 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
78 ulong pc = kvmppc_get_pc(vcpu);
79 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
80 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
81 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
82 }
83}
84EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
85
75static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 86static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
76{ 87{
77 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 88 if (!is_kvmppc_hv_enabled(vcpu->kvm))
@@ -118,6 +129,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
118 129
119void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 130void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
120{ 131{
132 kvmppc_unfixup_split_real(vcpu);
121 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); 133 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
122 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); 134 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
123 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 135 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
@@ -218,6 +230,23 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
218 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 230 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
219} 231}
220 232
233void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
234 ulong flags)
235{
236 kvmppc_set_dar(vcpu, dar);
237 kvmppc_set_dsisr(vcpu, flags);
238 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
239}
240
241void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
242{
243 u64 msr = kvmppc_get_msr(vcpu);
244 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
245 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
246 kvmppc_set_msr_fast(vcpu, msr);
247 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
248}
249
221int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 250int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
222{ 251{
223 int deliver = 1; 252 int deliver = 1;
@@ -342,18 +371,18 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
342} 371}
343EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 372EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
344 373
345pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 374pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
346 bool *writable) 375 bool *writable)
347{ 376{
348 ulong mp_pa = vcpu->arch.magic_page_pa; 377 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
378 gfn_t gfn = gpa >> PAGE_SHIFT;
349 379
350 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 380 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
351 mp_pa = (uint32_t)mp_pa; 381 mp_pa = (uint32_t)mp_pa;
352 382
353 /* Magic page override */ 383 /* Magic page override */
354 if (unlikely(mp_pa) && 384 gpa &= ~0xFFFULL;
355 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 385 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
356 ((mp_pa & PAGE_MASK) & KVM_PAM))) {
357 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 386 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
358 pfn_t pfn; 387 pfn_t pfn;
359 388
@@ -366,11 +395,13 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
366 395
367 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 396 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
368} 397}
369EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); 398EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
370 399
371static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 400int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
372 bool iswrite, struct kvmppc_pte *pte) 401 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
373{ 402{
403 bool data = (xlid == XLATE_DATA);
404 bool iswrite = (xlrw == XLATE_WRITE);
374 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 405 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
375 int r; 406 int r;
376 407
@@ -384,88 +415,34 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
384 pte->may_write = true; 415 pte->may_write = true;
385 pte->may_execute = true; 416 pte->may_execute = true;
386 r = 0; 417 r = 0;
418
419 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
420 !data) {
421 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
422 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
423 pte->raddr &= ~SPLIT_HACK_MASK;
424 }
387 } 425 }
388 426
389 return r; 427 return r;
390} 428}
391 429
392static hva_t kvmppc_bad_hva(void) 430int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
393{ 431 u32 *inst)
394 return PAGE_OFFSET;
395}
396
397static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
398 bool read)
399{
400 hva_t hpage;
401
402 if (read && !pte->may_read)
403 goto err;
404
405 if (!read && !pte->may_write)
406 goto err;
407
408 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
409 if (kvm_is_error_hva(hpage))
410 goto err;
411
412 return hpage | (pte->raddr & ~PAGE_MASK);
413err:
414 return kvmppc_bad_hva();
415}
416
417int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
418 bool data)
419{
420 struct kvmppc_pte pte;
421
422 vcpu->stat.st++;
423
424 if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
425 return -ENOENT;
426
427 *eaddr = pte.raddr;
428
429 if (!pte.may_write)
430 return -EPERM;
431
432 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
433 return EMULATE_DO_MMIO;
434
435 return EMULATE_DONE;
436}
437EXPORT_SYMBOL_GPL(kvmppc_st);
438
439int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
440 bool data)
441{ 432{
442 struct kvmppc_pte pte; 433 ulong pc = kvmppc_get_pc(vcpu);
443 hva_t hva = *eaddr; 434 int r;
444
445 vcpu->stat.ld++;
446
447 if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
448 goto nopte;
449
450 *eaddr = pte.raddr;
451
452 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
453 if (kvm_is_error_hva(hva))
454 goto mmio;
455
456 if (copy_from_user(ptr, (void __user *)hva, size)) {
457 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
458 goto mmio;
459 }
460 435
461 return EMULATE_DONE; 436 if (type == INST_SC)
437 pc -= 4;
462 438
463nopte: 439 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
464 return -ENOENT; 440 if (r == EMULATE_DONE)
465mmio: 441 return r;
466 return EMULATE_DO_MMIO; 442 else
443 return EMULATE_AGAIN;
467} 444}
468EXPORT_SYMBOL_GPL(kvmppc_ld); 445EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
469 446
470int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 447int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
471{ 448{
@@ -646,6 +623,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
646 case KVM_REG_PPC_BESCR: 623 case KVM_REG_PPC_BESCR:
647 val = get_reg_val(reg->id, vcpu->arch.bescr); 624 val = get_reg_val(reg->id, vcpu->arch.bescr);
648 break; 625 break;
626 case KVM_REG_PPC_VTB:
627 val = get_reg_val(reg->id, vcpu->arch.vtb);
628 break;
629 case KVM_REG_PPC_IC:
630 val = get_reg_val(reg->id, vcpu->arch.ic);
631 break;
649 default: 632 default:
650 r = -EINVAL; 633 r = -EINVAL;
651 break; 634 break;
@@ -750,6 +733,12 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
750 case KVM_REG_PPC_BESCR: 733 case KVM_REG_PPC_BESCR:
751 vcpu->arch.bescr = set_reg_val(reg->id, val); 734 vcpu->arch.bescr = set_reg_val(reg->id, val);
752 break; 735 break;
736 case KVM_REG_PPC_VTB:
737 vcpu->arch.vtb = set_reg_val(reg->id, val);
738 break;
739 case KVM_REG_PPC_IC:
740 vcpu->arch.ic = set_reg_val(reg->id, val);
741 break;
753 default: 742 default:
754 r = -EINVAL; 743 r = -EINVAL;
755 break; 744 break;
@@ -913,6 +902,11 @@ int kvmppc_core_check_processor_compat(void)
913 return 0; 902 return 0;
914} 903}
915 904
905int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
906{
907 return kvm->arch.kvm_ops->hcall_implemented(hcall);
908}
909
916static int kvmppc_book3s_init(void) 910static int kvmppc_book3s_init(void)
917{ 911{
918 int r; 912 int r;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 93503bbdae43..cd0b0730e29e 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -335,7 +335,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
335 if (r < 0) 335 if (r < 0)
336 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, 336 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
337 data, iswrite, true); 337 data, iswrite, true);
338 if (r < 0) 338 if (r == -ENOENT)
339 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, 339 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
340 data, iswrite, false); 340 data, iswrite, false);
341 341
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 678e75370495..2035d16a9262 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -156,11 +156,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
156 bool writable; 156 bool writable;
157 157
158 /* Get host physical address for gpa */ 158 /* Get host physical address for gpa */
159 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, 159 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
160 iswrite, &writable);
161 if (is_error_noslot_pfn(hpaddr)) { 160 if (is_error_noslot_pfn(hpaddr)) {
162 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 161 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
163 orig_pte->eaddr); 162 orig_pte->raddr);
164 r = -EINVAL; 163 r = -EINVAL;
165 goto out; 164 goto out;
166 } 165 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0ac98392f363..b982d925c710 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -104,9 +104,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
104 smp_rmb(); 104 smp_rmb();
105 105
106 /* Get host physical address for gpa */ 106 /* Get host physical address for gpa */
107 pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable); 107 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
108 if (is_error_noslot_pfn(pfn)) { 108 if (is_error_noslot_pfn(pfn)) {
109 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn); 109 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
110 orig_pte->raddr);
110 r = -EINVAL; 111 r = -EINVAL;
111 goto out; 112 goto out;
112 } 113 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 68468d695f12..e3d17f571085 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -450,7 +450,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
450 unsigned long slb_v; 450 unsigned long slb_v;
451 unsigned long pp, key; 451 unsigned long pp, key;
452 unsigned long v, gr; 452 unsigned long v, gr;
453 unsigned long *hptep; 453 __be64 *hptep;
454 int index; 454 int index;
455 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); 455 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
456 456
@@ -473,13 +473,13 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
473 preempt_enable(); 473 preempt_enable();
474 return -ENOENT; 474 return -ENOENT;
475 } 475 }
476 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 476 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
477 v = hptep[0] & ~HPTE_V_HVLOCK; 477 v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
478 gr = kvm->arch.revmap[index].guest_rpte; 478 gr = kvm->arch.revmap[index].guest_rpte;
479 479
480 /* Unlock the HPTE */ 480 /* Unlock the HPTE */
481 asm volatile("lwsync" : : : "memory"); 481 asm volatile("lwsync" : : : "memory");
482 hptep[0] = v; 482 hptep[0] = cpu_to_be64(v);
483 preempt_enable(); 483 preempt_enable();
484 484
485 gpte->eaddr = eaddr; 485 gpte->eaddr = eaddr;
@@ -530,21 +530,14 @@ static int instruction_is_store(unsigned int instr)
530static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 530static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
531 unsigned long gpa, gva_t ea, int is_store) 531 unsigned long gpa, gva_t ea, int is_store)
532{ 532{
533 int ret;
534 u32 last_inst; 533 u32 last_inst;
535 unsigned long srr0 = kvmppc_get_pc(vcpu);
536 534
537 /* We try to load the last instruction. We don't let 535 /*
538 * emulate_instruction do it as it doesn't check what
539 * kvmppc_ld returns.
540 * If we fail, we just return to the guest and try executing it again. 536 * If we fail, we just return to the guest and try executing it again.
541 */ 537 */
542 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { 538 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
543 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 539 EMULATE_DONE)
544 if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) 540 return RESUME_GUEST;
545 return RESUME_GUEST;
546 vcpu->arch.last_inst = last_inst;
547 }
548 541
549 /* 542 /*
550 * WARNING: We do not know for sure whether the instruction we just 543 * WARNING: We do not know for sure whether the instruction we just
@@ -558,7 +551,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
558 * we just return and retry the instruction. 551 * we just return and retry the instruction.
559 */ 552 */
560 553
561 if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) 554 if (instruction_is_store(last_inst) != !!is_store)
562 return RESUME_GUEST; 555 return RESUME_GUEST;
563 556
564 /* 557 /*
@@ -583,7 +576,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
583 unsigned long ea, unsigned long dsisr) 576 unsigned long ea, unsigned long dsisr)
584{ 577{
585 struct kvm *kvm = vcpu->kvm; 578 struct kvm *kvm = vcpu->kvm;
586 unsigned long *hptep, hpte[3], r; 579 unsigned long hpte[3], r;
580 __be64 *hptep;
587 unsigned long mmu_seq, psize, pte_size; 581 unsigned long mmu_seq, psize, pte_size;
588 unsigned long gpa_base, gfn_base; 582 unsigned long gpa_base, gfn_base;
589 unsigned long gpa, gfn, hva, pfn; 583 unsigned long gpa, gfn, hva, pfn;
@@ -606,16 +600,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
606 if (ea != vcpu->arch.pgfault_addr) 600 if (ea != vcpu->arch.pgfault_addr)
607 return RESUME_GUEST; 601 return RESUME_GUEST;
608 index = vcpu->arch.pgfault_index; 602 index = vcpu->arch.pgfault_index;
609 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 603 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
610 rev = &kvm->arch.revmap[index]; 604 rev = &kvm->arch.revmap[index];
611 preempt_disable(); 605 preempt_disable();
612 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 606 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
613 cpu_relax(); 607 cpu_relax();
614 hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; 608 hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
615 hpte[1] = hptep[1]; 609 hpte[1] = be64_to_cpu(hptep[1]);
616 hpte[2] = r = rev->guest_rpte; 610 hpte[2] = r = rev->guest_rpte;
617 asm volatile("lwsync" : : : "memory"); 611 asm volatile("lwsync" : : : "memory");
618 hptep[0] = hpte[0]; 612 hptep[0] = cpu_to_be64(hpte[0]);
619 preempt_enable(); 613 preempt_enable();
620 614
621 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || 615 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
@@ -731,8 +725,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
731 preempt_disable(); 725 preempt_disable();
732 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 726 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
733 cpu_relax(); 727 cpu_relax();
734 if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || 728 if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
735 rev->guest_rpte != hpte[2]) 729 be64_to_cpu(hptep[1]) != hpte[1] ||
730 rev->guest_rpte != hpte[2])
736 /* HPTE has been changed under us; let the guest retry */ 731 /* HPTE has been changed under us; let the guest retry */
737 goto out_unlock; 732 goto out_unlock;
738 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; 733 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
@@ -752,20 +747,20 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
752 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; 747 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
753 r &= rcbits | ~(HPTE_R_R | HPTE_R_C); 748 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
754 749
755 if (hptep[0] & HPTE_V_VALID) { 750 if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
756 /* HPTE was previously valid, so we need to invalidate it */ 751 /* HPTE was previously valid, so we need to invalidate it */
757 unlock_rmap(rmap); 752 unlock_rmap(rmap);
758 hptep[0] |= HPTE_V_ABSENT; 753 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
759 kvmppc_invalidate_hpte(kvm, hptep, index); 754 kvmppc_invalidate_hpte(kvm, hptep, index);
760 /* don't lose previous R and C bits */ 755 /* don't lose previous R and C bits */
761 r |= hptep[1] & (HPTE_R_R | HPTE_R_C); 756 r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
762 } else { 757 } else {
763 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); 758 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
764 } 759 }
765 760
766 hptep[1] = r; 761 hptep[1] = cpu_to_be64(r);
767 eieio(); 762 eieio();
768 hptep[0] = hpte[0]; 763 hptep[0] = cpu_to_be64(hpte[0]);
769 asm volatile("ptesync" : : : "memory"); 764 asm volatile("ptesync" : : : "memory");
770 preempt_enable(); 765 preempt_enable();
771 if (page && hpte_is_writable(r)) 766 if (page && hpte_is_writable(r))
@@ -784,7 +779,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
784 return ret; 779 return ret;
785 780
786 out_unlock: 781 out_unlock:
787 hptep[0] &= ~HPTE_V_HVLOCK; 782 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
788 preempt_enable(); 783 preempt_enable();
789 goto out_put; 784 goto out_put;
790} 785}
@@ -860,7 +855,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
860{ 855{
861 struct revmap_entry *rev = kvm->arch.revmap; 856 struct revmap_entry *rev = kvm->arch.revmap;
862 unsigned long h, i, j; 857 unsigned long h, i, j;
863 unsigned long *hptep; 858 __be64 *hptep;
864 unsigned long ptel, psize, rcbits; 859 unsigned long ptel, psize, rcbits;
865 860
866 for (;;) { 861 for (;;) {
@@ -876,11 +871,11 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
876 * rmap chain lock. 871 * rmap chain lock.
877 */ 872 */
878 i = *rmapp & KVMPPC_RMAP_INDEX; 873 i = *rmapp & KVMPPC_RMAP_INDEX;
879 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); 874 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
880 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 875 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
881 /* unlock rmap before spinning on the HPTE lock */ 876 /* unlock rmap before spinning on the HPTE lock */
882 unlock_rmap(rmapp); 877 unlock_rmap(rmapp);
883 while (hptep[0] & HPTE_V_HVLOCK) 878 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
884 cpu_relax(); 879 cpu_relax();
885 continue; 880 continue;
886 } 881 }
@@ -899,14 +894,14 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
899 894
900 /* Now check and modify the HPTE */ 895 /* Now check and modify the HPTE */
901 ptel = rev[i].guest_rpte; 896 ptel = rev[i].guest_rpte;
902 psize = hpte_page_size(hptep[0], ptel); 897 psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
903 if ((hptep[0] & HPTE_V_VALID) && 898 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
904 hpte_rpn(ptel, psize) == gfn) { 899 hpte_rpn(ptel, psize) == gfn) {
905 if (kvm->arch.using_mmu_notifiers) 900 if (kvm->arch.using_mmu_notifiers)
906 hptep[0] |= HPTE_V_ABSENT; 901 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
907 kvmppc_invalidate_hpte(kvm, hptep, i); 902 kvmppc_invalidate_hpte(kvm, hptep, i);
908 /* Harvest R and C */ 903 /* Harvest R and C */
909 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); 904 rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
910 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; 905 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
911 if (rcbits & ~rev[i].guest_rpte) { 906 if (rcbits & ~rev[i].guest_rpte) {
912 rev[i].guest_rpte = ptel | rcbits; 907 rev[i].guest_rpte = ptel | rcbits;
@@ -914,7 +909,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
914 } 909 }
915 } 910 }
916 unlock_rmap(rmapp); 911 unlock_rmap(rmapp);
917 hptep[0] &= ~HPTE_V_HVLOCK; 912 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
918 } 913 }
919 return 0; 914 return 0;
920} 915}
@@ -961,7 +956,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
961{ 956{
962 struct revmap_entry *rev = kvm->arch.revmap; 957 struct revmap_entry *rev = kvm->arch.revmap;
963 unsigned long head, i, j; 958 unsigned long head, i, j;
964 unsigned long *hptep; 959 __be64 *hptep;
965 int ret = 0; 960 int ret = 0;
966 961
967 retry: 962 retry:
@@ -977,23 +972,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
977 972
978 i = head = *rmapp & KVMPPC_RMAP_INDEX; 973 i = head = *rmapp & KVMPPC_RMAP_INDEX;
979 do { 974 do {
980 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); 975 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
981 j = rev[i].forw; 976 j = rev[i].forw;
982 977
983 /* If this HPTE isn't referenced, ignore it */ 978 /* If this HPTE isn't referenced, ignore it */
984 if (!(hptep[1] & HPTE_R_R)) 979 if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
985 continue; 980 continue;
986 981
987 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 982 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
988 /* unlock rmap before spinning on the HPTE lock */ 983 /* unlock rmap before spinning on the HPTE lock */
989 unlock_rmap(rmapp); 984 unlock_rmap(rmapp);
990 while (hptep[0] & HPTE_V_HVLOCK) 985 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
991 cpu_relax(); 986 cpu_relax();
992 goto retry; 987 goto retry;
993 } 988 }
994 989
995 /* Now check and modify the HPTE */ 990 /* Now check and modify the HPTE */
996 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { 991 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
992 (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
997 kvmppc_clear_ref_hpte(kvm, hptep, i); 993 kvmppc_clear_ref_hpte(kvm, hptep, i);
998 if (!(rev[i].guest_rpte & HPTE_R_R)) { 994 if (!(rev[i].guest_rpte & HPTE_R_R)) {
999 rev[i].guest_rpte |= HPTE_R_R; 995 rev[i].guest_rpte |= HPTE_R_R;
@@ -1001,7 +997,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1001 } 997 }
1002 ret = 1; 998 ret = 1;
1003 } 999 }
1004 hptep[0] &= ~HPTE_V_HVLOCK; 1000 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
1005 } while ((i = j) != head); 1001 } while ((i = j) != head);
1006 1002
1007 unlock_rmap(rmapp); 1003 unlock_rmap(rmapp);
@@ -1035,7 +1031,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1035 do { 1031 do {
1036 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); 1032 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
1037 j = rev[i].forw; 1033 j = rev[i].forw;
1038 if (hp[1] & HPTE_R_R) 1034 if (be64_to_cpu(hp[1]) & HPTE_R_R)
1039 goto out; 1035 goto out;
1040 } while ((i = j) != head); 1036 } while ((i = j) != head);
1041 } 1037 }
@@ -1075,7 +1071,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1075 unsigned long head, i, j; 1071 unsigned long head, i, j;
1076 unsigned long n; 1072 unsigned long n;
1077 unsigned long v, r; 1073 unsigned long v, r;
1078 unsigned long *hptep; 1074 __be64 *hptep;
1079 int npages_dirty = 0; 1075 int npages_dirty = 0;
1080 1076
1081 retry: 1077 retry:
@@ -1091,7 +1087,8 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1091 1087
1092 i = head = *rmapp & KVMPPC_RMAP_INDEX; 1088 i = head = *rmapp & KVMPPC_RMAP_INDEX;
1093 do { 1089 do {
1094 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); 1090 unsigned long hptep1;
1091 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
1095 j = rev[i].forw; 1092 j = rev[i].forw;
1096 1093
1097 /* 1094 /*
@@ -1108,29 +1105,30 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1108 * Otherwise we need to do the tlbie even if C==0 in 1105 * Otherwise we need to do the tlbie even if C==0 in
1109 * order to pick up any delayed writeback of C. 1106 * order to pick up any delayed writeback of C.
1110 */ 1107 */
1111 if (!(hptep[1] & HPTE_R_C) && 1108 hptep1 = be64_to_cpu(hptep[1]);
1112 (!hpte_is_writable(hptep[1]) || vcpus_running(kvm))) 1109 if (!(hptep1 & HPTE_R_C) &&
1110 (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
1113 continue; 1111 continue;
1114 1112
1115 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 1113 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
1116 /* unlock rmap before spinning on the HPTE lock */ 1114 /* unlock rmap before spinning on the HPTE lock */
1117 unlock_rmap(rmapp); 1115 unlock_rmap(rmapp);
1118 while (hptep[0] & HPTE_V_HVLOCK) 1116 while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
1119 cpu_relax(); 1117 cpu_relax();
1120 goto retry; 1118 goto retry;
1121 } 1119 }
1122 1120
1123 /* Now check and modify the HPTE */ 1121 /* Now check and modify the HPTE */
1124 if (!(hptep[0] & HPTE_V_VALID)) 1122 if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID)))
1125 continue; 1123 continue;
1126 1124
1127 /* need to make it temporarily absent so C is stable */ 1125 /* need to make it temporarily absent so C is stable */
1128 hptep[0] |= HPTE_V_ABSENT; 1126 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
1129 kvmppc_invalidate_hpte(kvm, hptep, i); 1127 kvmppc_invalidate_hpte(kvm, hptep, i);
1130 v = hptep[0]; 1128 v = be64_to_cpu(hptep[0]);
1131 r = hptep[1]; 1129 r = be64_to_cpu(hptep[1]);
1132 if (r & HPTE_R_C) { 1130 if (r & HPTE_R_C) {
1133 hptep[1] = r & ~HPTE_R_C; 1131 hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
1134 if (!(rev[i].guest_rpte & HPTE_R_C)) { 1132 if (!(rev[i].guest_rpte & HPTE_R_C)) {
1135 rev[i].guest_rpte |= HPTE_R_C; 1133 rev[i].guest_rpte |= HPTE_R_C;
1136 note_hpte_modification(kvm, &rev[i]); 1134 note_hpte_modification(kvm, &rev[i]);
@@ -1143,7 +1141,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1143 } 1141 }
1144 v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); 1142 v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
1145 v |= HPTE_V_VALID; 1143 v |= HPTE_V_VALID;
1146 hptep[0] = v; 1144 hptep[0] = cpu_to_be64(v);
1147 } while ((i = j) != head); 1145 } while ((i = j) != head);
1148 1146
1149 unlock_rmap(rmapp); 1147 unlock_rmap(rmapp);
@@ -1307,7 +1305,7 @@ struct kvm_htab_ctx {
1307 * Returns 1 if this HPT entry has been modified or has pending 1305 * Returns 1 if this HPT entry has been modified or has pending
1308 * R/C bit changes. 1306 * R/C bit changes.
1309 */ 1307 */
1310static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp) 1308static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
1311{ 1309{
1312 unsigned long rcbits_unset; 1310 unsigned long rcbits_unset;
1313 1311
@@ -1316,13 +1314,14 @@ static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
1316 1314
1317 /* Also need to consider changes in reference and changed bits */ 1315 /* Also need to consider changes in reference and changed bits */
1318 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); 1316 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1319 if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset)) 1317 if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
1318 (be64_to_cpu(hptp[1]) & rcbits_unset))
1320 return 1; 1319 return 1;
1321 1320
1322 return 0; 1321 return 0;
1323} 1322}
1324 1323
1325static long record_hpte(unsigned long flags, unsigned long *hptp, 1324static long record_hpte(unsigned long flags, __be64 *hptp,
1326 unsigned long *hpte, struct revmap_entry *revp, 1325 unsigned long *hpte, struct revmap_entry *revp,
1327 int want_valid, int first_pass) 1326 int want_valid, int first_pass)
1328{ 1327{
@@ -1337,10 +1336,10 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1337 return 0; 1336 return 0;
1338 1337
1339 valid = 0; 1338 valid = 0;
1340 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { 1339 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1341 valid = 1; 1340 valid = 1;
1342 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && 1341 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
1343 !(hptp[0] & HPTE_V_BOLTED)) 1342 !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
1344 valid = 0; 1343 valid = 0;
1345 } 1344 }
1346 if (valid != want_valid) 1345 if (valid != want_valid)
@@ -1352,7 +1351,7 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1352 preempt_disable(); 1351 preempt_disable();
1353 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) 1352 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1354 cpu_relax(); 1353 cpu_relax();
1355 v = hptp[0]; 1354 v = be64_to_cpu(hptp[0]);
1356 1355
1357 /* re-evaluate valid and dirty from synchronized HPTE value */ 1356 /* re-evaluate valid and dirty from synchronized HPTE value */
1358 valid = !!(v & HPTE_V_VALID); 1357 valid = !!(v & HPTE_V_VALID);
@@ -1360,9 +1359,9 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1360 1359
1361 /* Harvest R and C into guest view if necessary */ 1360 /* Harvest R and C into guest view if necessary */
1362 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); 1361 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1363 if (valid && (rcbits_unset & hptp[1])) { 1362 if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
1364 revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) | 1363 revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
1365 HPTE_GR_MODIFIED; 1364 (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
1366 dirty = 1; 1365 dirty = 1;
1367 } 1366 }
1368 1367
@@ -1381,13 +1380,13 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
1381 revp->guest_rpte = r; 1380 revp->guest_rpte = r;
1382 } 1381 }
1383 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 1382 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
1384 hptp[0] &= ~HPTE_V_HVLOCK; 1383 hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
1385 preempt_enable(); 1384 preempt_enable();
1386 if (!(valid == want_valid && (first_pass || dirty))) 1385 if (!(valid == want_valid && (first_pass || dirty)))
1387 ok = 0; 1386 ok = 0;
1388 } 1387 }
1389 hpte[0] = v; 1388 hpte[0] = cpu_to_be64(v);
1390 hpte[1] = r; 1389 hpte[1] = cpu_to_be64(r);
1391 return ok; 1390 return ok;
1392} 1391}
1393 1392
@@ -1397,7 +1396,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1397 struct kvm_htab_ctx *ctx = file->private_data; 1396 struct kvm_htab_ctx *ctx = file->private_data;
1398 struct kvm *kvm = ctx->kvm; 1397 struct kvm *kvm = ctx->kvm;
1399 struct kvm_get_htab_header hdr; 1398 struct kvm_get_htab_header hdr;
1400 unsigned long *hptp; 1399 __be64 *hptp;
1401 struct revmap_entry *revp; 1400 struct revmap_entry *revp;
1402 unsigned long i, nb, nw; 1401 unsigned long i, nb, nw;
1403 unsigned long __user *lbuf; 1402 unsigned long __user *lbuf;
@@ -1413,7 +1412,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1413 flags = ctx->flags; 1412 flags = ctx->flags;
1414 1413
1415 i = ctx->index; 1414 i = ctx->index;
1416 hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1415 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1417 revp = kvm->arch.revmap + i; 1416 revp = kvm->arch.revmap + i;
1418 lbuf = (unsigned long __user *)buf; 1417 lbuf = (unsigned long __user *)buf;
1419 1418
@@ -1497,7 +1496,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1497 unsigned long i, j; 1496 unsigned long i, j;
1498 unsigned long v, r; 1497 unsigned long v, r;
1499 unsigned long __user *lbuf; 1498 unsigned long __user *lbuf;
1500 unsigned long *hptp; 1499 __be64 *hptp;
1501 unsigned long tmp[2]; 1500 unsigned long tmp[2];
1502 ssize_t nb; 1501 ssize_t nb;
1503 long int err, ret; 1502 long int err, ret;
@@ -1539,7 +1538,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1539 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) 1538 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
1540 break; 1539 break;
1541 1540
1542 hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1541 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1543 lbuf = (unsigned long __user *)buf; 1542 lbuf = (unsigned long __user *)buf;
1544 for (j = 0; j < hdr.n_valid; ++j) { 1543 for (j = 0; j < hdr.n_valid; ++j) {
1545 err = -EFAULT; 1544 err = -EFAULT;
@@ -1551,7 +1550,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1551 lbuf += 2; 1550 lbuf += 2;
1552 nb += HPTE_SIZE; 1551 nb += HPTE_SIZE;
1553 1552
1554 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) 1553 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1555 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1554 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1556 err = -EIO; 1555 err = -EIO;
1557 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, 1556 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
@@ -1577,7 +1576,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1577 } 1576 }
1578 1577
1579 for (j = 0; j < hdr.n_invalid; ++j) { 1578 for (j = 0; j < hdr.n_invalid; ++j) {
1580 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) 1579 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1581 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1580 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1582 ++i; 1581 ++i;
1583 hptp += 2; 1582 hptp += 2;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 3f295269af37..5a2bc4b0dfe5 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -439,12 +439,6 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
439 (mfmsr() & MSR_HV)) 439 (mfmsr() & MSR_HV))
440 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 440 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
441 break; 441 break;
442 case SPRN_PURR:
443 to_book3s(vcpu)->purr_offset = spr_val - get_tb();
444 break;
445 case SPRN_SPURR:
446 to_book3s(vcpu)->spurr_offset = spr_val - get_tb();
447 break;
448 case SPRN_GQR0: 442 case SPRN_GQR0:
449 case SPRN_GQR1: 443 case SPRN_GQR1:
450 case SPRN_GQR2: 444 case SPRN_GQR2:
@@ -455,10 +449,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
455 case SPRN_GQR7: 449 case SPRN_GQR7:
456 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 450 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
457 break; 451 break;
452#ifdef CONFIG_PPC_BOOK3S_64
458 case SPRN_FSCR: 453 case SPRN_FSCR:
459 vcpu->arch.fscr = spr_val; 454 kvmppc_set_fscr(vcpu, spr_val);
460 break; 455 break;
461#ifdef CONFIG_PPC_BOOK3S_64
462 case SPRN_BESCR: 456 case SPRN_BESCR:
463 vcpu->arch.bescr = spr_val; 457 vcpu->arch.bescr = spr_val;
464 break; 458 break;
@@ -572,10 +566,22 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
572 *spr_val = 0; 566 *spr_val = 0;
573 break; 567 break;
574 case SPRN_PURR: 568 case SPRN_PURR:
575 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; 569 /*
570 * On exit we would have updated purr
571 */
572 *spr_val = vcpu->arch.purr;
576 break; 573 break;
577 case SPRN_SPURR: 574 case SPRN_SPURR:
578 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; 575 /*
576 * On exit we would have updated spurr
577 */
578 *spr_val = vcpu->arch.spurr;
579 break;
580 case SPRN_VTB:
581 *spr_val = vcpu->arch.vtb;
582 break;
583 case SPRN_IC:
584 *spr_val = vcpu->arch.ic;
579 break; 585 break;
580 case SPRN_GQR0: 586 case SPRN_GQR0:
581 case SPRN_GQR1: 587 case SPRN_GQR1:
@@ -587,10 +593,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
587 case SPRN_GQR7: 593 case SPRN_GQR7:
588 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; 594 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
589 break; 595 break;
596#ifdef CONFIG_PPC_BOOK3S_64
590 case SPRN_FSCR: 597 case SPRN_FSCR:
591 *spr_val = vcpu->arch.fscr; 598 *spr_val = vcpu->arch.fscr;
592 break; 599 break;
593#ifdef CONFIG_PPC_BOOK3S_64
594 case SPRN_BESCR: 600 case SPRN_BESCR:
595 *spr_val = vcpu->arch.bescr; 601 *spr_val = vcpu->arch.bescr;
596 break; 602 break;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7a12edbb61e7..27cced9c7249 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -35,6 +35,7 @@
35 35
36#include <asm/reg.h> 36#include <asm/reg.h>
37#include <asm/cputable.h> 37#include <asm/cputable.h>
38#include <asm/cache.h>
38#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
39#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -67,6 +68,15 @@
67/* Used as a "null" value for timebase values */ 68/* Used as a "null" value for timebase values */
68#define TB_NIL (~(u64)0) 69#define TB_NIL (~(u64)0)
69 70
71static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
72
73#if defined(CONFIG_PPC_64K_PAGES)
74#define MPP_BUFFER_ORDER 0
75#elif defined(CONFIG_PPC_4K_PAGES)
76#define MPP_BUFFER_ORDER 3
77#endif
78
79
70static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 80static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
71static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 81static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
72 82
@@ -270,7 +280,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
270static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 280static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
271{ 281{
272 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; 282 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
273 vpa->yield_count = 1; 283 vpa->yield_count = cpu_to_be32(1);
274} 284}
275 285
276static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, 286static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
@@ -293,8 +303,8 @@ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
293struct reg_vpa { 303struct reg_vpa {
294 u32 dummy; 304 u32 dummy;
295 union { 305 union {
296 u16 hword; 306 __be16 hword;
297 u32 word; 307 __be32 word;
298 } length; 308 } length;
299}; 309};
300 310
@@ -333,9 +343,9 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
333 if (va == NULL) 343 if (va == NULL)
334 return H_PARAMETER; 344 return H_PARAMETER;
335 if (subfunc == H_VPA_REG_VPA) 345 if (subfunc == H_VPA_REG_VPA)
336 len = ((struct reg_vpa *)va)->length.hword; 346 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
337 else 347 else
338 len = ((struct reg_vpa *)va)->length.word; 348 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
339 kvmppc_unpin_guest_page(kvm, va, vpa, false); 349 kvmppc_unpin_guest_page(kvm, va, vpa, false);
340 350
341 /* Check length */ 351 /* Check length */
@@ -540,21 +550,63 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
540 return; 550 return;
541 memset(dt, 0, sizeof(struct dtl_entry)); 551 memset(dt, 0, sizeof(struct dtl_entry));
542 dt->dispatch_reason = 7; 552 dt->dispatch_reason = 7;
543 dt->processor_id = vc->pcpu + vcpu->arch.ptid; 553 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
544 dt->timebase = now + vc->tb_offset; 554 dt->timebase = cpu_to_be64(now + vc->tb_offset);
545 dt->enqueue_to_dispatch_time = stolen; 555 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
546 dt->srr0 = kvmppc_get_pc(vcpu); 556 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
547 dt->srr1 = vcpu->arch.shregs.msr; 557 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
548 ++dt; 558 ++dt;
549 if (dt == vcpu->arch.dtl.pinned_end) 559 if (dt == vcpu->arch.dtl.pinned_end)
550 dt = vcpu->arch.dtl.pinned_addr; 560 dt = vcpu->arch.dtl.pinned_addr;
551 vcpu->arch.dtl_ptr = dt; 561 vcpu->arch.dtl_ptr = dt;
552 /* order writing *dt vs. writing vpa->dtl_idx */ 562 /* order writing *dt vs. writing vpa->dtl_idx */
553 smp_wmb(); 563 smp_wmb();
554 vpa->dtl_idx = ++vcpu->arch.dtl_index; 564 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
555 vcpu->arch.dtl.dirty = true; 565 vcpu->arch.dtl.dirty = true;
556} 566}
557 567
568static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
569{
570 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
571 return true;
572 if ((!vcpu->arch.vcore->arch_compat) &&
573 cpu_has_feature(CPU_FTR_ARCH_207S))
574 return true;
575 return false;
576}
577
578static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
579 unsigned long resource, unsigned long value1,
580 unsigned long value2)
581{
582 switch (resource) {
583 case H_SET_MODE_RESOURCE_SET_CIABR:
584 if (!kvmppc_power8_compatible(vcpu))
585 return H_P2;
586 if (value2)
587 return H_P4;
588 if (mflags)
589 return H_UNSUPPORTED_FLAG_START;
590 /* Guests can't breakpoint the hypervisor */
591 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
592 return H_P3;
593 vcpu->arch.ciabr = value1;
594 return H_SUCCESS;
595 case H_SET_MODE_RESOURCE_SET_DAWR:
596 if (!kvmppc_power8_compatible(vcpu))
597 return H_P2;
598 if (mflags)
599 return H_UNSUPPORTED_FLAG_START;
600 if (value2 & DABRX_HYP)
601 return H_P4;
602 vcpu->arch.dawr = value1;
603 vcpu->arch.dawrx = value2;
604 return H_SUCCESS;
605 default:
606 return H_TOO_HARD;
607 }
608}
609
558int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) 610int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
559{ 611{
560 unsigned long req = kvmppc_get_gpr(vcpu, 3); 612 unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -562,6 +614,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
562 struct kvm_vcpu *tvcpu; 614 struct kvm_vcpu *tvcpu;
563 int idx, rc; 615 int idx, rc;
564 616
617 if (req <= MAX_HCALL_OPCODE &&
618 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
619 return RESUME_HOST;
620
565 switch (req) { 621 switch (req) {
566 case H_ENTER: 622 case H_ENTER:
567 idx = srcu_read_lock(&vcpu->kvm->srcu); 623 idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -620,7 +676,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
620 676
621 /* Send the error out to userspace via KVM_RUN */ 677 /* Send the error out to userspace via KVM_RUN */
622 return rc; 678 return rc;
623 679 case H_SET_MODE:
680 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
681 kvmppc_get_gpr(vcpu, 5),
682 kvmppc_get_gpr(vcpu, 6),
683 kvmppc_get_gpr(vcpu, 7));
684 if (ret == H_TOO_HARD)
685 return RESUME_HOST;
686 break;
624 case H_XIRR: 687 case H_XIRR:
625 case H_CPPR: 688 case H_CPPR:
626 case H_EOI: 689 case H_EOI:
@@ -639,6 +702,29 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
639 return RESUME_GUEST; 702 return RESUME_GUEST;
640} 703}
641 704
705static int kvmppc_hcall_impl_hv(unsigned long cmd)
706{
707 switch (cmd) {
708 case H_CEDE:
709 case H_PROD:
710 case H_CONFER:
711 case H_REGISTER_VPA:
712 case H_SET_MODE:
713#ifdef CONFIG_KVM_XICS
714 case H_XIRR:
715 case H_CPPR:
716 case H_EOI:
717 case H_IPI:
718 case H_IPOLL:
719 case H_XIRR_X:
720#endif
721 return 1;
722 }
723
724 /* See if it's in the real-mode table */
725 return kvmppc_hcall_impl_hv_realmode(cmd);
726}
727
642static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 728static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
643 struct task_struct *tsk) 729 struct task_struct *tsk)
644{ 730{
@@ -785,7 +871,8 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
785 return 0; 871 return 0;
786} 872}
787 873
788static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) 874static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
875 bool preserve_top32)
789{ 876{
790 struct kvmppc_vcore *vc = vcpu->arch.vcore; 877 struct kvmppc_vcore *vc = vcpu->arch.vcore;
791 u64 mask; 878 u64 mask;
@@ -820,6 +907,10 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
820 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; 907 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
821 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 908 if (cpu_has_feature(CPU_FTR_ARCH_207S))
822 mask |= LPCR_AIL; 909 mask |= LPCR_AIL;
910
911 /* Broken 32-bit version of LPCR must not clear top bits */
912 if (preserve_top32)
913 mask &= 0xFFFFFFFF;
823 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 914 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
824 spin_unlock(&vc->lock); 915 spin_unlock(&vc->lock);
825} 916}
@@ -894,12 +985,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
894 case KVM_REG_PPC_CIABR: 985 case KVM_REG_PPC_CIABR:
895 *val = get_reg_val(id, vcpu->arch.ciabr); 986 *val = get_reg_val(id, vcpu->arch.ciabr);
896 break; 987 break;
897 case KVM_REG_PPC_IC:
898 *val = get_reg_val(id, vcpu->arch.ic);
899 break;
900 case KVM_REG_PPC_VTB:
901 *val = get_reg_val(id, vcpu->arch.vtb);
902 break;
903 case KVM_REG_PPC_CSIGR: 988 case KVM_REG_PPC_CSIGR:
904 *val = get_reg_val(id, vcpu->arch.csigr); 989 *val = get_reg_val(id, vcpu->arch.csigr);
905 break; 990 break;
@@ -939,6 +1024,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
939 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); 1024 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
940 break; 1025 break;
941 case KVM_REG_PPC_LPCR: 1026 case KVM_REG_PPC_LPCR:
1027 case KVM_REG_PPC_LPCR_64:
942 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); 1028 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
943 break; 1029 break;
944 case KVM_REG_PPC_PPR: 1030 case KVM_REG_PPC_PPR:
@@ -1094,12 +1180,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1094 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) 1180 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1095 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ 1181 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1096 break; 1182 break;
1097 case KVM_REG_PPC_IC:
1098 vcpu->arch.ic = set_reg_val(id, *val);
1099 break;
1100 case KVM_REG_PPC_VTB:
1101 vcpu->arch.vtb = set_reg_val(id, *val);
1102 break;
1103 case KVM_REG_PPC_CSIGR: 1183 case KVM_REG_PPC_CSIGR:
1104 vcpu->arch.csigr = set_reg_val(id, *val); 1184 vcpu->arch.csigr = set_reg_val(id, *val);
1105 break; 1185 break;
@@ -1150,7 +1230,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1150 ALIGN(set_reg_val(id, *val), 1UL << 24); 1230 ALIGN(set_reg_val(id, *val), 1UL << 24);
1151 break; 1231 break;
1152 case KVM_REG_PPC_LPCR: 1232 case KVM_REG_PPC_LPCR:
1153 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); 1233 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1234 break;
1235 case KVM_REG_PPC_LPCR_64:
1236 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1154 break; 1237 break;
1155 case KVM_REG_PPC_PPR: 1238 case KVM_REG_PPC_PPR:
1156 vcpu->arch.ppr = set_reg_val(id, *val); 1239 vcpu->arch.ppr = set_reg_val(id, *val);
@@ -1228,6 +1311,33 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1228 return r; 1311 return r;
1229} 1312}
1230 1313
1314static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1315{
1316 struct kvmppc_vcore *vcore;
1317
1318 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1319
1320 if (vcore == NULL)
1321 return NULL;
1322
1323 INIT_LIST_HEAD(&vcore->runnable_threads);
1324 spin_lock_init(&vcore->lock);
1325 init_waitqueue_head(&vcore->wq);
1326 vcore->preempt_tb = TB_NIL;
1327 vcore->lpcr = kvm->arch.lpcr;
1328 vcore->first_vcpuid = core * threads_per_subcore;
1329 vcore->kvm = kvm;
1330
1331 vcore->mpp_buffer_is_valid = false;
1332
1333 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1334 vcore->mpp_buffer = (void *)__get_free_pages(
1335 GFP_KERNEL|__GFP_ZERO,
1336 MPP_BUFFER_ORDER);
1337
1338 return vcore;
1339}
1340
1231static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, 1341static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1232 unsigned int id) 1342 unsigned int id)
1233{ 1343{
@@ -1279,16 +1389,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1279 mutex_lock(&kvm->lock); 1389 mutex_lock(&kvm->lock);
1280 vcore = kvm->arch.vcores[core]; 1390 vcore = kvm->arch.vcores[core];
1281 if (!vcore) { 1391 if (!vcore) {
1282 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); 1392 vcore = kvmppc_vcore_create(kvm, core);
1283 if (vcore) {
1284 INIT_LIST_HEAD(&vcore->runnable_threads);
1285 spin_lock_init(&vcore->lock);
1286 init_waitqueue_head(&vcore->wq);
1287 vcore->preempt_tb = TB_NIL;
1288 vcore->lpcr = kvm->arch.lpcr;
1289 vcore->first_vcpuid = core * threads_per_subcore;
1290 vcore->kvm = kvm;
1291 }
1292 kvm->arch.vcores[core] = vcore; 1393 kvm->arch.vcores[core] = vcore;
1293 kvm->arch.online_vcores++; 1394 kvm->arch.online_vcores++;
1294 } 1395 }
@@ -1500,6 +1601,33 @@ static int on_primary_thread(void)
1500 return 1; 1601 return 1;
1501} 1602}
1502 1603
1604static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1605{
1606 phys_addr_t phy_addr, mpp_addr;
1607
1608 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1609 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1610
1611 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1612 logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1613
1614 vc->mpp_buffer_is_valid = true;
1615}
1616
1617static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1618{
1619 phys_addr_t phy_addr, mpp_addr;
1620
1621 phy_addr = virt_to_phys(vc->mpp_buffer);
1622 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1623
1624 /* We must abort any in-progress save operations to ensure
1625 * the table is valid so that prefetch engine knows when to
1626 * stop prefetching. */
1627 logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1628 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1629}
1630
1503/* 1631/*
1504 * Run a set of guest threads on a physical core. 1632 * Run a set of guest threads on a physical core.
1505 * Called with vc->lock held. 1633 * Called with vc->lock held.
@@ -1577,9 +1705,16 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1577 1705
1578 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 1706 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
1579 1707
1708 if (vc->mpp_buffer_is_valid)
1709 kvmppc_start_restoring_l2_cache(vc);
1710
1580 __kvmppc_vcore_entry(); 1711 __kvmppc_vcore_entry();
1581 1712
1582 spin_lock(&vc->lock); 1713 spin_lock(&vc->lock);
1714
1715 if (vc->mpp_buffer)
1716 kvmppc_start_saving_l2_cache(vc);
1717
1583 /* disable sending of IPIs on virtual external irqs */ 1718 /* disable sending of IPIs on virtual external irqs */
1584 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1719 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1585 vcpu->cpu = -1; 1720 vcpu->cpu = -1;
@@ -1929,12 +2064,6 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1929 (*sps)->page_shift = def->shift; 2064 (*sps)->page_shift = def->shift;
1930 (*sps)->slb_enc = def->sllp; 2065 (*sps)->slb_enc = def->sllp;
1931 (*sps)->enc[0].page_shift = def->shift; 2066 (*sps)->enc[0].page_shift = def->shift;
1932 /*
1933 * Only return base page encoding. We don't want to return
1934 * all the supporting pte_enc, because our H_ENTER doesn't
1935 * support MPSS yet. Once they do, we can start passing all
1936 * support pte_enc here
1937 */
1938 (*sps)->enc[0].pte_enc = def->penc[linux_psize]; 2067 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
1939 /* 2068 /*
1940 * Add 16MB MPSS support if host supports it 2069 * Add 16MB MPSS support if host supports it
@@ -2281,6 +2410,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2281 */ 2410 */
2282 cpumask_setall(&kvm->arch.need_tlb_flush); 2411 cpumask_setall(&kvm->arch.need_tlb_flush);
2283 2412
2413 /* Start out with the default set of hcalls enabled */
2414 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
2415 sizeof(kvm->arch.enabled_hcalls));
2416
2284 kvm->arch.rma = NULL; 2417 kvm->arch.rma = NULL;
2285 2418
2286 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 2419 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -2323,8 +2456,14 @@ static void kvmppc_free_vcores(struct kvm *kvm)
2323{ 2456{
2324 long int i; 2457 long int i;
2325 2458
2326 for (i = 0; i < KVM_MAX_VCORES; ++i) 2459 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2460 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
2461 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2462 free_pages((unsigned long)vc->mpp_buffer,
2463 MPP_BUFFER_ORDER);
2464 }
2327 kfree(kvm->arch.vcores[i]); 2465 kfree(kvm->arch.vcores[i]);
2466 }
2328 kvm->arch.online_vcores = 0; 2467 kvm->arch.online_vcores = 0;
2329} 2468}
2330 2469
@@ -2419,6 +2558,49 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
2419 return r; 2558 return r;
2420} 2559}
2421 2560
2561/*
2562 * List of hcall numbers to enable by default.
2563 * For compatibility with old userspace, we enable by default
2564 * all hcalls that were implemented before the hcall-enabling
2565 * facility was added. Note this list should not include H_RTAS.
2566 */
2567static unsigned int default_hcall_list[] = {
2568 H_REMOVE,
2569 H_ENTER,
2570 H_READ,
2571 H_PROTECT,
2572 H_BULK_REMOVE,
2573 H_GET_TCE,
2574 H_PUT_TCE,
2575 H_SET_DABR,
2576 H_SET_XDABR,
2577 H_CEDE,
2578 H_PROD,
2579 H_CONFER,
2580 H_REGISTER_VPA,
2581#ifdef CONFIG_KVM_XICS
2582 H_EOI,
2583 H_CPPR,
2584 H_IPI,
2585 H_IPOLL,
2586 H_XIRR,
2587 H_XIRR_X,
2588#endif
2589 0
2590};
2591
2592static void init_default_hcalls(void)
2593{
2594 int i;
2595 unsigned int hcall;
2596
2597 for (i = 0; default_hcall_list[i]; ++i) {
2598 hcall = default_hcall_list[i];
2599 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
2600 __set_bit(hcall / 4, default_enabled_hcalls);
2601 }
2602}
2603
2422static struct kvmppc_ops kvm_ops_hv = { 2604static struct kvmppc_ops kvm_ops_hv = {
2423 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, 2605 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2424 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, 2606 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -2451,6 +2633,7 @@ static struct kvmppc_ops kvm_ops_hv = {
2451 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, 2633 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2452 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, 2634 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2453 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, 2635 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
2636 .hcall_implemented = kvmppc_hcall_impl_hv,
2454}; 2637};
2455 2638
2456static int kvmppc_book3s_init_hv(void) 2639static int kvmppc_book3s_init_hv(void)
@@ -2466,6 +2649,8 @@ static int kvmppc_book3s_init_hv(void)
2466 kvm_ops_hv.owner = THIS_MODULE; 2649 kvm_ops_hv.owner = THIS_MODULE;
2467 kvmppc_hv_ops = &kvm_ops_hv; 2650 kvmppc_hv_ops = &kvm_ops_hv;
2468 2651
2652 init_default_hcalls();
2653
2469 r = kvmppc_mmu_hv_init(); 2654 r = kvmppc_mmu_hv_init();
2470 return r; 2655 return r;
2471} 2656}
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7cde8a665205..3b41447482e5 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -212,3 +212,16 @@ bool kvm_hv_mode_active(void)
212{ 212{
213 return atomic_read(&hv_vm_count) != 0; 213 return atomic_read(&hv_vm_count) != 0;
214} 214}
215
216extern int hcall_real_table[], hcall_real_table_end[];
217
218int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
219{
220 cmd /= 4;
221 if (cmd < hcall_real_table_end - hcall_real_table &&
222 hcall_real_table[cmd])
223 return 1;
224
225 return 0;
226}
227EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 3a5c568b1e89..d562c8e2bc30 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -45,14 +45,14 @@ static void reload_slb(struct kvm_vcpu *vcpu)
45 return; 45 return;
46 46
47 /* Sanity check */ 47 /* Sanity check */
48 n = min_t(u32, slb->persistent, SLB_MIN_SIZE); 48 n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) 49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
50 return; 50 return;
51 51
52 /* Load up the SLB from that */ 52 /* Load up the SLB from that */
53 for (i = 0; i < n; ++i) { 53 for (i = 0; i < n; ++i) {
54 unsigned long rb = slb->save_area[i].esid; 54 unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
55 unsigned long rs = slb->save_area[i].vsid; 55 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
56 56
57 rb = (rb & ~0xFFFul) | i; /* insert entry number */ 57 rb = (rb & ~0xFFFul) | i; /* insert entry number */
58 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); 58 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 5a24d3c2b6b8..084ad54c73cd 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -154,10 +154,10 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
154 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); 154 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
155} 155}
156 156
157static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) 157static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
158{ 158{
159 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 159 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
160 hpte[0] = hpte_v; 160 hpte[0] = cpu_to_be64(hpte_v);
161} 161}
162 162
163long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 163long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
@@ -166,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
166{ 166{
167 unsigned long i, pa, gpa, gfn, psize; 167 unsigned long i, pa, gpa, gfn, psize;
168 unsigned long slot_fn, hva; 168 unsigned long slot_fn, hva;
169 unsigned long *hpte; 169 __be64 *hpte;
170 struct revmap_entry *rev; 170 struct revmap_entry *rev;
171 unsigned long g_ptel; 171 unsigned long g_ptel;
172 struct kvm_memory_slot *memslot; 172 struct kvm_memory_slot *memslot;
@@ -275,9 +275,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
275 return H_PARAMETER; 275 return H_PARAMETER;
276 if (likely((flags & H_EXACT) == 0)) { 276 if (likely((flags & H_EXACT) == 0)) {
277 pte_index &= ~7UL; 277 pte_index &= ~7UL;
278 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 278 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
279 for (i = 0; i < 8; ++i) { 279 for (i = 0; i < 8; ++i) {
280 if ((*hpte & HPTE_V_VALID) == 0 && 280 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
281 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 281 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
282 HPTE_V_ABSENT)) 282 HPTE_V_ABSENT))
283 break; 283 break;
@@ -292,11 +292,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
292 */ 292 */
293 hpte -= 16; 293 hpte -= 16;
294 for (i = 0; i < 8; ++i) { 294 for (i = 0; i < 8; ++i) {
295 u64 pte;
295 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 296 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
296 cpu_relax(); 297 cpu_relax();
297 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT))) 298 pte = be64_to_cpu(*hpte);
299 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
298 break; 300 break;
299 *hpte &= ~HPTE_V_HVLOCK; 301 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
300 hpte += 2; 302 hpte += 2;
301 } 303 }
302 if (i == 8) 304 if (i == 8)
@@ -304,14 +306,17 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
304 } 306 }
305 pte_index += i; 307 pte_index += i;
306 } else { 308 } else {
307 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 309 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
308 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 310 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
309 HPTE_V_ABSENT)) { 311 HPTE_V_ABSENT)) {
310 /* Lock the slot and check again */ 312 /* Lock the slot and check again */
313 u64 pte;
314
311 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 315 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
312 cpu_relax(); 316 cpu_relax();
313 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { 317 pte = be64_to_cpu(*hpte);
314 *hpte &= ~HPTE_V_HVLOCK; 318 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
319 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
315 return H_PTEG_FULL; 320 return H_PTEG_FULL;
316 } 321 }
317 } 322 }
@@ -347,11 +352,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
347 } 352 }
348 } 353 }
349 354
350 hpte[1] = ptel; 355 hpte[1] = cpu_to_be64(ptel);
351 356
352 /* Write the first HPTE dword, unlocking the HPTE and making it valid */ 357 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
353 eieio(); 358 eieio();
354 hpte[0] = pteh; 359 hpte[0] = cpu_to_be64(pteh);
355 asm volatile("ptesync" : : : "memory"); 360 asm volatile("ptesync" : : : "memory");
356 361
357 *pte_idx_ret = pte_index; 362 *pte_idx_ret = pte_index;
@@ -468,30 +473,35 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
468 unsigned long pte_index, unsigned long avpn, 473 unsigned long pte_index, unsigned long avpn,
469 unsigned long *hpret) 474 unsigned long *hpret)
470{ 475{
471 unsigned long *hpte; 476 __be64 *hpte;
472 unsigned long v, r, rb; 477 unsigned long v, r, rb;
473 struct revmap_entry *rev; 478 struct revmap_entry *rev;
479 u64 pte;
474 480
475 if (pte_index >= kvm->arch.hpt_npte) 481 if (pte_index >= kvm->arch.hpt_npte)
476 return H_PARAMETER; 482 return H_PARAMETER;
477 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 483 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
478 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 484 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
479 cpu_relax(); 485 cpu_relax();
480 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 486 pte = be64_to_cpu(hpte[0]);
481 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || 487 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
482 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { 488 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
483 hpte[0] &= ~HPTE_V_HVLOCK; 489 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
490 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
484 return H_NOT_FOUND; 491 return H_NOT_FOUND;
485 } 492 }
486 493
487 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 494 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
488 v = hpte[0] & ~HPTE_V_HVLOCK; 495 v = pte & ~HPTE_V_HVLOCK;
489 if (v & HPTE_V_VALID) { 496 if (v & HPTE_V_VALID) {
490 hpte[0] &= ~HPTE_V_VALID; 497 u64 pte1;
491 rb = compute_tlbie_rb(v, hpte[1], pte_index); 498
499 pte1 = be64_to_cpu(hpte[1]);
500 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
501 rb = compute_tlbie_rb(v, pte1, pte_index);
492 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); 502 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
493 /* Read PTE low word after tlbie to get final R/C values */ 503 /* Read PTE low word after tlbie to get final R/C values */
494 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); 504 remove_revmap_chain(kvm, pte_index, rev, v, pte1);
495 } 505 }
496 r = rev->guest_rpte & ~HPTE_GR_RESERVED; 506 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
497 note_hpte_modification(kvm, rev); 507 note_hpte_modification(kvm, rev);
@@ -514,12 +524,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
514{ 524{
515 struct kvm *kvm = vcpu->kvm; 525 struct kvm *kvm = vcpu->kvm;
516 unsigned long *args = &vcpu->arch.gpr[4]; 526 unsigned long *args = &vcpu->arch.gpr[4];
517 unsigned long *hp, *hptes[4], tlbrb[4]; 527 __be64 *hp, *hptes[4];
528 unsigned long tlbrb[4];
518 long int i, j, k, n, found, indexes[4]; 529 long int i, j, k, n, found, indexes[4];
519 unsigned long flags, req, pte_index, rcbits; 530 unsigned long flags, req, pte_index, rcbits;
520 int global; 531 int global;
521 long int ret = H_SUCCESS; 532 long int ret = H_SUCCESS;
522 struct revmap_entry *rev, *revs[4]; 533 struct revmap_entry *rev, *revs[4];
534 u64 hp0;
523 535
524 global = global_invalidates(kvm, 0); 536 global = global_invalidates(kvm, 0);
525 for (i = 0; i < 4 && ret == H_SUCCESS; ) { 537 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
@@ -542,8 +554,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
542 ret = H_PARAMETER; 554 ret = H_PARAMETER;
543 break; 555 break;
544 } 556 }
545 hp = (unsigned long *) 557 hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
546 (kvm->arch.hpt_virt + (pte_index << 4));
547 /* to avoid deadlock, don't spin except for first */ 558 /* to avoid deadlock, don't spin except for first */
548 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { 559 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
549 if (n) 560 if (n)
@@ -552,23 +563,24 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
552 cpu_relax(); 563 cpu_relax();
553 } 564 }
554 found = 0; 565 found = 0;
555 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) { 566 hp0 = be64_to_cpu(hp[0]);
567 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
556 switch (flags & 3) { 568 switch (flags & 3) {
557 case 0: /* absolute */ 569 case 0: /* absolute */
558 found = 1; 570 found = 1;
559 break; 571 break;
560 case 1: /* andcond */ 572 case 1: /* andcond */
561 if (!(hp[0] & args[j + 1])) 573 if (!(hp0 & args[j + 1]))
562 found = 1; 574 found = 1;
563 break; 575 break;
564 case 2: /* AVPN */ 576 case 2: /* AVPN */
565 if ((hp[0] & ~0x7fUL) == args[j + 1]) 577 if ((hp0 & ~0x7fUL) == args[j + 1])
566 found = 1; 578 found = 1;
567 break; 579 break;
568 } 580 }
569 } 581 }
570 if (!found) { 582 if (!found) {
571 hp[0] &= ~HPTE_V_HVLOCK; 583 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
572 args[j] = ((0x90 | flags) << 56) + pte_index; 584 args[j] = ((0x90 | flags) << 56) + pte_index;
573 continue; 585 continue;
574 } 586 }
@@ -577,7 +589,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
577 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 589 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
578 note_hpte_modification(kvm, rev); 590 note_hpte_modification(kvm, rev);
579 591
580 if (!(hp[0] & HPTE_V_VALID)) { 592 if (!(hp0 & HPTE_V_VALID)) {
581 /* insert R and C bits from PTE */ 593 /* insert R and C bits from PTE */
582 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 594 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
583 args[j] |= rcbits << (56 - 5); 595 args[j] |= rcbits << (56 - 5);
@@ -585,8 +597,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
585 continue; 597 continue;
586 } 598 }
587 599
588 hp[0] &= ~HPTE_V_VALID; /* leave it locked */ 600 /* leave it locked */
589 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index); 601 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
602 tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
603 be64_to_cpu(hp[1]), pte_index);
590 indexes[n] = j; 604 indexes[n] = j;
591 hptes[n] = hp; 605 hptes[n] = hp;
592 revs[n] = rev; 606 revs[n] = rev;
@@ -605,7 +619,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
605 pte_index = args[j] & ((1ul << 56) - 1); 619 pte_index = args[j] & ((1ul << 56) - 1);
606 hp = hptes[k]; 620 hp = hptes[k];
607 rev = revs[k]; 621 rev = revs[k];
608 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]); 622 remove_revmap_chain(kvm, pte_index, rev,
623 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
609 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 624 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
610 args[j] |= rcbits << (56 - 5); 625 args[j] |= rcbits << (56 - 5);
611 hp[0] = 0; 626 hp[0] = 0;
@@ -620,23 +635,25 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
620 unsigned long va) 635 unsigned long va)
621{ 636{
622 struct kvm *kvm = vcpu->kvm; 637 struct kvm *kvm = vcpu->kvm;
623 unsigned long *hpte; 638 __be64 *hpte;
624 struct revmap_entry *rev; 639 struct revmap_entry *rev;
625 unsigned long v, r, rb, mask, bits; 640 unsigned long v, r, rb, mask, bits;
641 u64 pte;
626 642
627 if (pte_index >= kvm->arch.hpt_npte) 643 if (pte_index >= kvm->arch.hpt_npte)
628 return H_PARAMETER; 644 return H_PARAMETER;
629 645
630 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 646 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
631 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 647 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
632 cpu_relax(); 648 cpu_relax();
633 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 649 pte = be64_to_cpu(hpte[0]);
634 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { 650 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
635 hpte[0] &= ~HPTE_V_HVLOCK; 651 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
652 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
636 return H_NOT_FOUND; 653 return H_NOT_FOUND;
637 } 654 }
638 655
639 v = hpte[0]; 656 v = pte;
640 bits = (flags << 55) & HPTE_R_PP0; 657 bits = (flags << 55) & HPTE_R_PP0;
641 bits |= (flags << 48) & HPTE_R_KEY_HI; 658 bits |= (flags << 48) & HPTE_R_KEY_HI;
642 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); 659 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
@@ -650,12 +667,12 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
650 rev->guest_rpte = r; 667 rev->guest_rpte = r;
651 note_hpte_modification(kvm, rev); 668 note_hpte_modification(kvm, rev);
652 } 669 }
653 r = (hpte[1] & ~mask) | bits; 670 r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
654 671
655 /* Update HPTE */ 672 /* Update HPTE */
656 if (v & HPTE_V_VALID) { 673 if (v & HPTE_V_VALID) {
657 rb = compute_tlbie_rb(v, r, pte_index); 674 rb = compute_tlbie_rb(v, r, pte_index);
658 hpte[0] = v & ~HPTE_V_VALID; 675 hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
659 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); 676 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
660 /* 677 /*
661 * If the host has this page as readonly but the guest 678 * If the host has this page as readonly but the guest
@@ -681,9 +698,9 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
681 } 698 }
682 } 699 }
683 } 700 }
684 hpte[1] = r; 701 hpte[1] = cpu_to_be64(r);
685 eieio(); 702 eieio();
686 hpte[0] = v & ~HPTE_V_HVLOCK; 703 hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
687 asm volatile("ptesync" : : : "memory"); 704 asm volatile("ptesync" : : : "memory");
688 return H_SUCCESS; 705 return H_SUCCESS;
689} 706}
@@ -692,7 +709,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
692 unsigned long pte_index) 709 unsigned long pte_index)
693{ 710{
694 struct kvm *kvm = vcpu->kvm; 711 struct kvm *kvm = vcpu->kvm;
695 unsigned long *hpte, v, r; 712 __be64 *hpte;
713 unsigned long v, r;
696 int i, n = 1; 714 int i, n = 1;
697 struct revmap_entry *rev = NULL; 715 struct revmap_entry *rev = NULL;
698 716
@@ -704,9 +722,9 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
704 } 722 }
705 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 723 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
706 for (i = 0; i < n; ++i, ++pte_index) { 724 for (i = 0; i < n; ++i, ++pte_index) {
707 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 725 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
708 v = hpte[0] & ~HPTE_V_HVLOCK; 726 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
709 r = hpte[1]; 727 r = be64_to_cpu(hpte[1]);
710 if (v & HPTE_V_ABSENT) { 728 if (v & HPTE_V_ABSENT) {
711 v &= ~HPTE_V_ABSENT; 729 v &= ~HPTE_V_ABSENT;
712 v |= HPTE_V_VALID; 730 v |= HPTE_V_VALID;
@@ -721,25 +739,27 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
721 return H_SUCCESS; 739 return H_SUCCESS;
722} 740}
723 741
724void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 742void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
725 unsigned long pte_index) 743 unsigned long pte_index)
726{ 744{
727 unsigned long rb; 745 unsigned long rb;
728 746
729 hptep[0] &= ~HPTE_V_VALID; 747 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
730 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 748 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
749 pte_index);
731 do_tlbies(kvm, &rb, 1, 1, true); 750 do_tlbies(kvm, &rb, 1, 1, true);
732} 751}
733EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); 752EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
734 753
735void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, 754void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
736 unsigned long pte_index) 755 unsigned long pte_index)
737{ 756{
738 unsigned long rb; 757 unsigned long rb;
739 unsigned char rbyte; 758 unsigned char rbyte;
740 759
741 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 760 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
742 rbyte = (hptep[1] & ~HPTE_R_R) >> 8; 761 pte_index);
762 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
743 /* modify only the second-last byte, which contains the ref bit */ 763 /* modify only the second-last byte, which contains the ref bit */
744 *((char *)hptep + 14) = rbyte; 764 *((char *)hptep + 14) = rbyte;
745 do_tlbies(kvm, &rb, 1, 1, false); 765 do_tlbies(kvm, &rb, 1, 1, false);
@@ -765,7 +785,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
765 unsigned long somask; 785 unsigned long somask;
766 unsigned long vsid, hash; 786 unsigned long vsid, hash;
767 unsigned long avpn; 787 unsigned long avpn;
768 unsigned long *hpte; 788 __be64 *hpte;
769 unsigned long mask, val; 789 unsigned long mask, val;
770 unsigned long v, r; 790 unsigned long v, r;
771 791
@@ -797,11 +817,11 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
797 val |= avpn; 817 val |= avpn;
798 818
799 for (;;) { 819 for (;;) {
800 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7)); 820 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
801 821
802 for (i = 0; i < 16; i += 2) { 822 for (i = 0; i < 16; i += 2) {
803 /* Read the PTE racily */ 823 /* Read the PTE racily */
804 v = hpte[i] & ~HPTE_V_HVLOCK; 824 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
805 825
806 /* Check valid/absent, hash, segment size and AVPN */ 826 /* Check valid/absent, hash, segment size and AVPN */
807 if (!(v & valid) || (v & mask) != val) 827 if (!(v & valid) || (v & mask) != val)
@@ -810,8 +830,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
810 /* Lock the PTE and read it under the lock */ 830 /* Lock the PTE and read it under the lock */
811 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) 831 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
812 cpu_relax(); 832 cpu_relax();
813 v = hpte[i] & ~HPTE_V_HVLOCK; 833 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
814 r = hpte[i+1]; 834 r = be64_to_cpu(hpte[i+1]);
815 835
816 /* 836 /*
817 * Check the HPTE again, including base page size 837 * Check the HPTE again, including base page size
@@ -822,7 +842,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
822 return (hash << 3) + (i >> 1); 842 return (hash << 3) + (i >> 1);
823 843
824 /* Unlock and move on */ 844 /* Unlock and move on */
825 hpte[i] = v; 845 hpte[i] = cpu_to_be64(v);
826 } 846 }
827 847
828 if (val & HPTE_V_SECONDARY) 848 if (val & HPTE_V_SECONDARY)
@@ -851,7 +871,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
851 struct kvm *kvm = vcpu->kvm; 871 struct kvm *kvm = vcpu->kvm;
852 long int index; 872 long int index;
853 unsigned long v, r, gr; 873 unsigned long v, r, gr;
854 unsigned long *hpte; 874 __be64 *hpte;
855 unsigned long valid; 875 unsigned long valid;
856 struct revmap_entry *rev; 876 struct revmap_entry *rev;
857 unsigned long pp, key; 877 unsigned long pp, key;
@@ -867,9 +887,9 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
867 return status; /* there really was no HPTE */ 887 return status; /* there really was no HPTE */
868 return 0; /* for prot fault, HPTE disappeared */ 888 return 0; /* for prot fault, HPTE disappeared */
869 } 889 }
870 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 890 hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
871 v = hpte[0] & ~HPTE_V_HVLOCK; 891 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
872 r = hpte[1]; 892 r = be64_to_cpu(hpte[1]);
873 rev = real_vmalloc_addr(&kvm->arch.revmap[index]); 893 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
874 gr = rev->guest_rpte; 894 gr = rev->guest_rpte;
875 895
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 558a67df8126..855521ef04e8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,10 +32,6 @@
32 32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34 34
35#ifdef __LITTLE_ENDIAN__
36#error Need to fix lppaca and SLB shadow accesses in little endian mode
37#endif
38
39/* Values in HSTATE_NAPPING(r13) */ 35/* Values in HSTATE_NAPPING(r13) */
40#define NAPPING_CEDE 1 36#define NAPPING_CEDE 1
41#define NAPPING_NOVCPU 2 37#define NAPPING_NOVCPU 2
@@ -595,9 +591,10 @@ kvmppc_got_guest:
595 ld r3, VCPU_VPA(r4) 591 ld r3, VCPU_VPA(r4)
596 cmpdi r3, 0 592 cmpdi r3, 0
597 beq 25f 593 beq 25f
598 lwz r5, LPPACA_YIELDCOUNT(r3) 594 li r6, LPPACA_YIELDCOUNT
595 LWZX_BE r5, r3, r6
599 addi r5, r5, 1 596 addi r5, r5, 1
600 stw r5, LPPACA_YIELDCOUNT(r3) 597 STWX_BE r5, r3, r6
601 li r6, 1 598 li r6, 1
602 stb r6, VCPU_VPA_DIRTY(r4) 599 stb r6, VCPU_VPA_DIRTY(r4)
60325: 60025:
@@ -671,9 +668,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
671 668
672 mr r31, r4 669 mr r31, r4
673 addi r3, r31, VCPU_FPRS_TM 670 addi r3, r31, VCPU_FPRS_TM
674 bl .load_fp_state 671 bl load_fp_state
675 addi r3, r31, VCPU_VRS_TM 672 addi r3, r31, VCPU_VRS_TM
676 bl .load_vr_state 673 bl load_vr_state
677 mr r4, r31 674 mr r4, r31
678 lwz r7, VCPU_VRSAVE_TM(r4) 675 lwz r7, VCPU_VRSAVE_TM(r4)
679 mtspr SPRN_VRSAVE, r7 676 mtspr SPRN_VRSAVE, r7
@@ -1417,9 +1414,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1417 1414
1418 /* Save FP/VSX. */ 1415 /* Save FP/VSX. */
1419 addi r3, r9, VCPU_FPRS_TM 1416 addi r3, r9, VCPU_FPRS_TM
1420 bl .store_fp_state 1417 bl store_fp_state
1421 addi r3, r9, VCPU_VRS_TM 1418 addi r3, r9, VCPU_VRS_TM
1422 bl .store_vr_state 1419 bl store_vr_state
1423 mfspr r6, SPRN_VRSAVE 1420 mfspr r6, SPRN_VRSAVE
1424 stw r6, VCPU_VRSAVE_TM(r9) 1421 stw r6, VCPU_VRSAVE_TM(r9)
14251: 14221:
@@ -1442,9 +1439,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1442 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1439 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1443 cmpdi r8, 0 1440 cmpdi r8, 0
1444 beq 25f 1441 beq 25f
1445 lwz r3, LPPACA_YIELDCOUNT(r8) 1442 li r4, LPPACA_YIELDCOUNT
1443 LWZX_BE r3, r8, r4
1446 addi r3, r3, 1 1444 addi r3, r3, 1
1447 stw r3, LPPACA_YIELDCOUNT(r8) 1445 STWX_BE r3, r8, r4
1448 li r3, 1 1446 li r3, 1
1449 stb r3, VCPU_VPA_DIRTY(r9) 1447 stb r3, VCPU_VPA_DIRTY(r9)
145025: 144825:
@@ -1757,8 +1755,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
175733: ld r8,PACA_SLBSHADOWPTR(r13) 175533: ld r8,PACA_SLBSHADOWPTR(r13)
1758 1756
1759 .rept SLB_NUM_BOLTED 1757 .rept SLB_NUM_BOLTED
1760 ld r5,SLBSHADOW_SAVEAREA(r8) 1758 li r3, SLBSHADOW_SAVEAREA
1761 ld r6,SLBSHADOW_SAVEAREA+8(r8) 1759 LDX_BE r5, r8, r3
1760 addi r3, r3, 8
1761 LDX_BE r6, r8, r3
1762 andis. r7,r5,SLB_ESID_V@h 1762 andis. r7,r5,SLB_ESID_V@h
1763 beq 1f 1763 beq 1f
1764 slbmte r6,r5 1764 slbmte r6,r5
@@ -1909,12 +1909,23 @@ hcall_try_real_mode:
1909 clrrdi r3,r3,2 1909 clrrdi r3,r3,2
1910 cmpldi r3,hcall_real_table_end - hcall_real_table 1910 cmpldi r3,hcall_real_table_end - hcall_real_table
1911 bge guest_exit_cont 1911 bge guest_exit_cont
1912 /* See if this hcall is enabled for in-kernel handling */
1913 ld r4, VCPU_KVM(r9)
1914 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1915 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1916 add r4, r4, r0
1917 ld r0, KVM_ENABLED_HCALLS(r4)
1918 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1919 srd r0, r0, r4
1920 andi. r0, r0, 1
1921 beq guest_exit_cont
1922 /* Get pointer to handler, if any, and call it */
1912 LOAD_REG_ADDR(r4, hcall_real_table) 1923 LOAD_REG_ADDR(r4, hcall_real_table)
1913 lwax r3,r3,r4 1924 lwax r3,r3,r4
1914 cmpwi r3,0 1925 cmpwi r3,0
1915 beq guest_exit_cont 1926 beq guest_exit_cont
1916 add r3,r3,r4 1927 add r12,r3,r4
1917 mtctr r3 1928 mtctr r12
1918 mr r3,r9 /* get vcpu pointer */ 1929 mr r3,r9 /* get vcpu pointer */
1919 ld r4,VCPU_GPR(R4)(r9) 1930 ld r4,VCPU_GPR(R4)(r9)
1920 bctrl 1931 bctrl
@@ -2031,6 +2042,7 @@ hcall_real_table:
2031 .long 0 /* 0x12c */ 2042 .long 0 /* 0x12c */
2032 .long 0 /* 0x130 */ 2043 .long 0 /* 0x130 */
2033 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2044 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2045 .globl hcall_real_table_end
2034hcall_real_table_end: 2046hcall_real_table_end:
2035 2047
2036ignore_hdec: 2048ignore_hdec:
@@ -2338,7 +2350,18 @@ kvmppc_read_intr:
2338 cmpdi r6, 0 2350 cmpdi r6, 0
2339 beq- 1f 2351 beq- 1f
2340 lwzcix r0, r6, r7 2352 lwzcix r0, r6, r7
2341 rlwinm. r3, r0, 0, 0xffffff 2353 /*
2354 * Save XIRR for later. Since we get in in reverse endian on LE
2355 * systems, save it byte reversed and fetch it back in host endian.
2356 */
2357 li r3, HSTATE_SAVED_XIRR
2358 STWX_BE r0, r3, r13
2359#ifdef __LITTLE_ENDIAN__
2360 lwz r3, HSTATE_SAVED_XIRR(r13)
2361#else
2362 mr r3, r0
2363#endif
2364 rlwinm. r3, r3, 0, 0xffffff
2342 sync 2365 sync
2343 beq 1f /* if nothing pending in the ICP */ 2366 beq 1f /* if nothing pending in the ICP */
2344 2367
@@ -2370,10 +2393,9 @@ kvmppc_read_intr:
2370 li r3, -1 2393 li r3, -1
23711: blr 23941: blr
2372 2395
237342: /* It's not an IPI and it's for the host, stash it in the PACA 239642: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2374 * before exit, it will be picked up by the host ICP driver 2397 * the PACA earlier, it will be picked up by the host ICP driver
2375 */ 2398 */
2376 stw r0, HSTATE_SAVED_XIRR(r13)
2377 li r3, 1 2399 li r3, 1
2378 b 1b 2400 b 1b
2379 2401
@@ -2408,11 +2430,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2408 mtmsrd r8 2430 mtmsrd r8
2409 isync 2431 isync
2410 addi r3,r3,VCPU_FPRS 2432 addi r3,r3,VCPU_FPRS
2411 bl .store_fp_state 2433 bl store_fp_state
2412#ifdef CONFIG_ALTIVEC 2434#ifdef CONFIG_ALTIVEC
2413BEGIN_FTR_SECTION 2435BEGIN_FTR_SECTION
2414 addi r3,r31,VCPU_VRS 2436 addi r3,r31,VCPU_VRS
2415 bl .store_vr_state 2437 bl store_vr_state
2416END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2438END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2417#endif 2439#endif
2418 mfspr r6,SPRN_VRSAVE 2440 mfspr r6,SPRN_VRSAVE
@@ -2444,11 +2466,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2444 mtmsrd r8 2466 mtmsrd r8
2445 isync 2467 isync
2446 addi r3,r4,VCPU_FPRS 2468 addi r3,r4,VCPU_FPRS
2447 bl .load_fp_state 2469 bl load_fp_state
2448#ifdef CONFIG_ALTIVEC 2470#ifdef CONFIG_ALTIVEC
2449BEGIN_FTR_SECTION 2471BEGIN_FTR_SECTION
2450 addi r3,r31,VCPU_VRS 2472 addi r3,r31,VCPU_VRS
2451 bl .load_vr_state 2473 bl load_vr_state
2452END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2474END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2453#endif 2475#endif
2454 lwz r7,VCPU_VRSAVE(r31) 2476 lwz r7,VCPU_VRSAVE(r31)
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index 6c8011fd57e6..bfb8035314e3 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -639,26 +639,36 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
639 639
640int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) 640int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
641{ 641{
642 u32 inst = kvmppc_get_last_inst(vcpu); 642 u32 inst;
643 enum emulation_result emulated = EMULATE_DONE; 643 enum emulation_result emulated = EMULATE_DONE;
644 int ax_rd, ax_ra, ax_rb, ax_rc;
645 short full_d;
646 u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c;
644 647
645 int ax_rd = inst_get_field(inst, 6, 10); 648 bool rcomp;
646 int ax_ra = inst_get_field(inst, 11, 15); 649 u32 cr;
647 int ax_rb = inst_get_field(inst, 16, 20);
648 int ax_rc = inst_get_field(inst, 21, 25);
649 short full_d = inst_get_field(inst, 16, 31);
650
651 u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
652 u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
653 u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
654 u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
655
656 bool rcomp = (inst & 1) ? true : false;
657 u32 cr = kvmppc_get_cr(vcpu);
658#ifdef DEBUG 650#ifdef DEBUG
659 int i; 651 int i;
660#endif 652#endif
661 653
654 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
655 if (emulated != EMULATE_DONE)
656 return emulated;
657
658 ax_rd = inst_get_field(inst, 6, 10);
659 ax_ra = inst_get_field(inst, 11, 15);
660 ax_rb = inst_get_field(inst, 16, 20);
661 ax_rc = inst_get_field(inst, 21, 25);
662 full_d = inst_get_field(inst, 16, 31);
663
664 fpr_d = &VCPU_FPR(vcpu, ax_rd);
665 fpr_a = &VCPU_FPR(vcpu, ax_ra);
666 fpr_b = &VCPU_FPR(vcpu, ax_rb);
667 fpr_c = &VCPU_FPR(vcpu, ax_rc);
668
669 rcomp = (inst & 1) ? true : false;
670 cr = kvmppc_get_cr(vcpu);
671
662 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 672 if (!kvmppc_inst_is_paired_single(vcpu, inst))
663 return EMULATE_FAIL; 673 return EMULATE_FAIL;
664 674
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 8eef1e519077..faffb27badd9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -62,6 +62,35 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
62#define HW_PAGE_SIZE PAGE_SIZE 62#define HW_PAGE_SIZE PAGE_SIZE
63#endif 63#endif
64 64
65static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
66{
67 ulong msr = kvmppc_get_msr(vcpu);
68 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
69}
70
71static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
72{
73 ulong msr = kvmppc_get_msr(vcpu);
74 ulong pc = kvmppc_get_pc(vcpu);
75
76 /* We are in DR only split real mode */
77 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
78 return;
79
80 /* We have not fixed up the guest already */
81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
82 return;
83
84 /* The code is in fixupable address space */
85 if (pc & SPLIT_HACK_MASK)
86 return;
87
88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
89 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
90}
91
92void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
93
65static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) 94static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
66{ 95{
67#ifdef CONFIG_PPC_BOOK3S_64 96#ifdef CONFIG_PPC_BOOK3S_64
@@ -71,10 +100,19 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
71 svcpu->in_use = 0; 100 svcpu->in_use = 0;
72 svcpu_put(svcpu); 101 svcpu_put(svcpu);
73#endif 102#endif
103
104 /* Disable AIL if supported */
105 if (cpu_has_feature(CPU_FTR_HVMODE) &&
106 cpu_has_feature(CPU_FTR_ARCH_207S))
107 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
108
74 vcpu->cpu = smp_processor_id(); 109 vcpu->cpu = smp_processor_id();
75#ifdef CONFIG_PPC_BOOK3S_32 110#ifdef CONFIG_PPC_BOOK3S_32
76 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; 111 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
77#endif 112#endif
113
114 if (kvmppc_is_split_real(vcpu))
115 kvmppc_fixup_split_real(vcpu);
78} 116}
79 117
80static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) 118static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
@@ -89,8 +127,17 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
89 svcpu_put(svcpu); 127 svcpu_put(svcpu);
90#endif 128#endif
91 129
130 if (kvmppc_is_split_real(vcpu))
131 kvmppc_unfixup_split_real(vcpu);
132
92 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 133 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
93 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 134 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
135
136 /* Enable AIL if supported */
137 if (cpu_has_feature(CPU_FTR_HVMODE) &&
138 cpu_has_feature(CPU_FTR_ARCH_207S))
139 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
140
94 vcpu->cpu = -1; 141 vcpu->cpu = -1;
95} 142}
96 143
@@ -120,6 +167,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
120#ifdef CONFIG_PPC_BOOK3S_64 167#ifdef CONFIG_PPC_BOOK3S_64
121 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; 168 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
122#endif 169#endif
170 /*
171 * Now also save the current time base value. We use this
172 * to find the guest purr and spurr value.
173 */
174 vcpu->arch.entry_tb = get_tb();
175 vcpu->arch.entry_vtb = get_vtb();
176 if (cpu_has_feature(CPU_FTR_ARCH_207S))
177 vcpu->arch.entry_ic = mfspr(SPRN_IC);
123 svcpu->in_use = true; 178 svcpu->in_use = true;
124} 179}
125 180
@@ -166,6 +221,14 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
166#ifdef CONFIG_PPC_BOOK3S_64 221#ifdef CONFIG_PPC_BOOK3S_64
167 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; 222 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
168#endif 223#endif
224 /*
225 * Update purr and spurr using time base on exit.
226 */
227 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
228 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
229 vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb;
230 if (cpu_has_feature(CPU_FTR_ARCH_207S))
231 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
169 svcpu->in_use = false; 232 svcpu->in_use = false;
170 233
171out: 234out:
@@ -294,6 +357,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
294 } 357 }
295 } 358 }
296 359
360 if (kvmppc_is_split_real(vcpu))
361 kvmppc_fixup_split_real(vcpu);
362 else
363 kvmppc_unfixup_split_real(vcpu);
364
297 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != 365 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
298 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 366 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
299 kvmppc_mmu_flush_segments(vcpu); 367 kvmppc_mmu_flush_segments(vcpu);
@@ -443,19 +511,19 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
443 put_page(hpage); 511 put_page(hpage);
444} 512}
445 513
446static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 514static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
447{ 515{
448 ulong mp_pa = vcpu->arch.magic_page_pa; 516 ulong mp_pa = vcpu->arch.magic_page_pa;
449 517
450 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 518 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
451 mp_pa = (uint32_t)mp_pa; 519 mp_pa = (uint32_t)mp_pa;
452 520
453 if (unlikely(mp_pa) && 521 gpa &= ~0xFFFULL;
454 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { 522 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
455 return 1; 523 return 1;
456 } 524 }
457 525
458 return kvm_is_visible_gfn(vcpu->kvm, gfn); 526 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
459} 527}
460 528
461int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, 529int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -494,6 +562,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
494 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 562 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
495 break; 563 break;
496 case MSR_DR: 564 case MSR_DR:
565 if (!data &&
566 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
567 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
568 pte.raddr &= ~SPLIT_HACK_MASK;
569 /* fall through */
497 case MSR_IR: 570 case MSR_IR:
498 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 571 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
499 572
@@ -541,7 +614,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
541 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 614 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
542 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 615 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
543 } else if (!is_mmio && 616 } else if (!is_mmio &&
544 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 617 kvmppc_visible_gpa(vcpu, pte.raddr)) {
545 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { 618 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
546 /* 619 /*
547 * There is already a host HPTE there, presumably 620 * There is already a host HPTE there, presumably
@@ -637,42 +710,6 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
637#endif 710#endif
638} 711}
639 712
640static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
641{
642 ulong srr0 = kvmppc_get_pc(vcpu);
643 u32 last_inst = kvmppc_get_last_inst(vcpu);
644 int ret;
645
646 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
647 if (ret == -ENOENT) {
648 ulong msr = kvmppc_get_msr(vcpu);
649
650 msr = kvmppc_set_field(msr, 33, 33, 1);
651 msr = kvmppc_set_field(msr, 34, 36, 0);
652 msr = kvmppc_set_field(msr, 42, 47, 0);
653 kvmppc_set_msr_fast(vcpu, msr);
654 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
655 return EMULATE_AGAIN;
656 }
657
658 return EMULATE_DONE;
659}
660
661static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
662{
663
664 /* Need to do paired single emulation? */
665 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
666 return EMULATE_DONE;
667
668 /* Read out the instruction */
669 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
670 /* Need to emulate */
671 return EMULATE_FAIL;
672
673 return EMULATE_AGAIN;
674}
675
676/* Handle external providers (FPU, Altivec, VSX) */ 713/* Handle external providers (FPU, Altivec, VSX) */
677static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 714static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
678 ulong msr) 715 ulong msr)
@@ -834,6 +871,15 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
834 871
835 return RESUME_GUEST; 872 return RESUME_GUEST;
836} 873}
874
875void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
876{
877 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
878 /* TAR got dropped, drop it in shadow too */
879 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
880 }
881 vcpu->arch.fscr = fscr;
882}
837#endif 883#endif
838 884
839int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 885int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -858,6 +904,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
858 ulong shadow_srr1 = vcpu->arch.shadow_srr1; 904 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
859 vcpu->stat.pf_instruc++; 905 vcpu->stat.pf_instruc++;
860 906
907 if (kvmppc_is_split_real(vcpu))
908 kvmppc_fixup_split_real(vcpu);
909
861#ifdef CONFIG_PPC_BOOK3S_32 910#ifdef CONFIG_PPC_BOOK3S_32
862 /* We set segments as unused segments when invalidating them. So 911 /* We set segments as unused segments when invalidating them. So
863 * treat the respective fault as segment fault. */ 912 * treat the respective fault as segment fault. */
@@ -960,6 +1009,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
960 case BOOK3S_INTERRUPT_DECREMENTER: 1009 case BOOK3S_INTERRUPT_DECREMENTER:
961 case BOOK3S_INTERRUPT_HV_DECREMENTER: 1010 case BOOK3S_INTERRUPT_HV_DECREMENTER:
962 case BOOK3S_INTERRUPT_DOORBELL: 1011 case BOOK3S_INTERRUPT_DOORBELL:
1012 case BOOK3S_INTERRUPT_H_DOORBELL:
963 vcpu->stat.dec_exits++; 1013 vcpu->stat.dec_exits++;
964 r = RESUME_GUEST; 1014 r = RESUME_GUEST;
965 break; 1015 break;
@@ -977,15 +1027,24 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
977 { 1027 {
978 enum emulation_result er; 1028 enum emulation_result er;
979 ulong flags; 1029 ulong flags;
1030 u32 last_inst;
1031 int emul;
980 1032
981program_interrupt: 1033program_interrupt:
982 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; 1034 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
983 1035
1036 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1037 if (emul != EMULATE_DONE) {
1038 r = RESUME_GUEST;
1039 break;
1040 }
1041
984 if (kvmppc_get_msr(vcpu) & MSR_PR) { 1042 if (kvmppc_get_msr(vcpu) & MSR_PR) {
985#ifdef EXIT_DEBUG 1043#ifdef EXIT_DEBUG
986 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 1044 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1045 kvmppc_get_pc(vcpu), last_inst);
987#endif 1046#endif
988 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != 1047 if ((last_inst & 0xff0007ff) !=
989 (INS_DCBZ & 0xfffffff7)) { 1048 (INS_DCBZ & 0xfffffff7)) {
990 kvmppc_core_queue_program(vcpu, flags); 1049 kvmppc_core_queue_program(vcpu, flags);
991 r = RESUME_GUEST; 1050 r = RESUME_GUEST;
@@ -1004,7 +1063,7 @@ program_interrupt:
1004 break; 1063 break;
1005 case EMULATE_FAIL: 1064 case EMULATE_FAIL:
1006 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 1065 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
1007 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 1066 __func__, kvmppc_get_pc(vcpu), last_inst);
1008 kvmppc_core_queue_program(vcpu, flags); 1067 kvmppc_core_queue_program(vcpu, flags);
1009 r = RESUME_GUEST; 1068 r = RESUME_GUEST;
1010 break; 1069 break;
@@ -1021,8 +1080,23 @@ program_interrupt:
1021 break; 1080 break;
1022 } 1081 }
1023 case BOOK3S_INTERRUPT_SYSCALL: 1082 case BOOK3S_INTERRUPT_SYSCALL:
1083 {
1084 u32 last_sc;
1085 int emul;
1086
1087 /* Get last sc for papr */
1088 if (vcpu->arch.papr_enabled) {
1089 /* The sc instuction points SRR0 to the next inst */
1090 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1091 if (emul != EMULATE_DONE) {
1092 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1093 r = RESUME_GUEST;
1094 break;
1095 }
1096 }
1097
1024 if (vcpu->arch.papr_enabled && 1098 if (vcpu->arch.papr_enabled &&
1025 (kvmppc_get_last_sc(vcpu) == 0x44000022) && 1099 (last_sc == 0x44000022) &&
1026 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 1100 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1027 /* SC 1 papr hypercalls */ 1101 /* SC 1 papr hypercalls */
1028 ulong cmd = kvmppc_get_gpr(vcpu, 3); 1102 ulong cmd = kvmppc_get_gpr(vcpu, 3);
@@ -1067,36 +1141,51 @@ program_interrupt:
1067 r = RESUME_GUEST; 1141 r = RESUME_GUEST;
1068 } 1142 }
1069 break; 1143 break;
1144 }
1070 case BOOK3S_INTERRUPT_FP_UNAVAIL: 1145 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1071 case BOOK3S_INTERRUPT_ALTIVEC: 1146 case BOOK3S_INTERRUPT_ALTIVEC:
1072 case BOOK3S_INTERRUPT_VSX: 1147 case BOOK3S_INTERRUPT_VSX:
1073 { 1148 {
1074 int ext_msr = 0; 1149 int ext_msr = 0;
1150 int emul;
1151 u32 last_inst;
1152
1153 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1154 /* Do paired single instruction emulation */
1155 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1156 &last_inst);
1157 if (emul == EMULATE_DONE)
1158 goto program_interrupt;
1159 else
1160 r = RESUME_GUEST;
1075 1161
1076 switch (exit_nr) { 1162 break;
1077 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
1078 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
1079 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
1080 } 1163 }
1081 1164
1082 switch (kvmppc_check_ext(vcpu, exit_nr)) { 1165 /* Enable external provider */
1083 case EMULATE_DONE: 1166 switch (exit_nr) {
1084 /* everything ok - let's enable the ext */ 1167 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1085 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); 1168 ext_msr = MSR_FP;
1086 break; 1169 break;
1087 case EMULATE_FAIL: 1170
1088 /* we need to emulate this instruction */ 1171 case BOOK3S_INTERRUPT_ALTIVEC:
1089 goto program_interrupt; 1172 ext_msr = MSR_VEC;
1090 break; 1173 break;
1091 default: 1174
1092 /* nothing to worry about - go again */ 1175 case BOOK3S_INTERRUPT_VSX:
1176 ext_msr = MSR_VSX;
1093 break; 1177 break;
1094 } 1178 }
1179
1180 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1095 break; 1181 break;
1096 } 1182 }
1097 case BOOK3S_INTERRUPT_ALIGNMENT: 1183 case BOOK3S_INTERRUPT_ALIGNMENT:
1098 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { 1184 {
1099 u32 last_inst = kvmppc_get_last_inst(vcpu); 1185 u32 last_inst;
1186 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1187
1188 if (emul == EMULATE_DONE) {
1100 u32 dsisr; 1189 u32 dsisr;
1101 u64 dar; 1190 u64 dar;
1102 1191
@@ -1110,6 +1199,7 @@ program_interrupt:
1110 } 1199 }
1111 r = RESUME_GUEST; 1200 r = RESUME_GUEST;
1112 break; 1201 break;
1202 }
1113#ifdef CONFIG_PPC_BOOK3S_64 1203#ifdef CONFIG_PPC_BOOK3S_64
1114 case BOOK3S_INTERRUPT_FAC_UNAVAIL: 1204 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1115 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); 1205 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
@@ -1233,6 +1323,7 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1233 *val = get_reg_val(id, to_book3s(vcpu)->hior); 1323 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1234 break; 1324 break;
1235 case KVM_REG_PPC_LPCR: 1325 case KVM_REG_PPC_LPCR:
1326 case KVM_REG_PPC_LPCR_64:
1236 /* 1327 /*
1237 * We are only interested in the LPCR_ILE bit 1328 * We are only interested in the LPCR_ILE bit
1238 */ 1329 */
@@ -1268,6 +1359,7 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1268 to_book3s(vcpu)->hior_explicit = true; 1359 to_book3s(vcpu)->hior_explicit = true;
1269 break; 1360 break;
1270 case KVM_REG_PPC_LPCR: 1361 case KVM_REG_PPC_LPCR:
1362 case KVM_REG_PPC_LPCR_64:
1271 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); 1363 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1272 break; 1364 break;
1273 default: 1365 default:
@@ -1310,8 +1402,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1310 p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1402 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1311 if (!p) 1403 if (!p)
1312 goto uninit_vcpu; 1404 goto uninit_vcpu;
1313 /* the real shared page fills the last 4k of our page */ 1405 vcpu->arch.shared = (void *)p;
1314 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1315#ifdef CONFIG_PPC_BOOK3S_64 1406#ifdef CONFIG_PPC_BOOK3S_64
1316 /* Always start the shared struct in native endian mode */ 1407 /* Always start the shared struct in native endian mode */
1317#ifdef __BIG_ENDIAN__ 1408#ifdef __BIG_ENDIAN__
@@ -1568,6 +1659,11 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1568{ 1659{
1569 mutex_init(&kvm->arch.hpt_mutex); 1660 mutex_init(&kvm->arch.hpt_mutex);
1570 1661
1662#ifdef CONFIG_PPC_BOOK3S_64
1663 /* Start out with the default set of hcalls enabled */
1664 kvmppc_pr_init_default_hcalls(kvm);
1665#endif
1666
1571 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 1667 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1572 spin_lock(&kvm_global_user_count_lock); 1668 spin_lock(&kvm_global_user_count_lock);
1573 if (++kvm_global_user_count == 1) 1669 if (++kvm_global_user_count == 1)
@@ -1636,6 +1732,9 @@ static struct kvmppc_ops kvm_ops_pr = {
1636 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, 1732 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1637 .fast_vcpu_kick = kvm_vcpu_kick, 1733 .fast_vcpu_kick = kvm_vcpu_kick,
1638 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, 1734 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1735#ifdef CONFIG_PPC_BOOK3S_64
1736 .hcall_implemented = kvmppc_hcall_impl_pr,
1737#endif
1639}; 1738};
1640 1739
1641 1740
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 52a63bfe3f07..ce3c893d509b 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -40,8 +40,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
40{ 40{
41 long flags = kvmppc_get_gpr(vcpu, 4); 41 long flags = kvmppc_get_gpr(vcpu, 4);
42 long pte_index = kvmppc_get_gpr(vcpu, 5); 42 long pte_index = kvmppc_get_gpr(vcpu, 5);
43 unsigned long pteg[2 * 8]; 43 __be64 pteg[2 * 8];
44 unsigned long pteg_addr, i, *hpte; 44 __be64 *hpte;
45 unsigned long pteg_addr, i;
45 long int ret; 46 long int ret;
46 47
47 i = pte_index & 7; 48 i = pte_index & 7;
@@ -93,8 +94,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
93 pteg = get_pteg_addr(vcpu, pte_index); 94 pteg = get_pteg_addr(vcpu, pte_index);
94 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 95 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
95 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 96 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
96 pte[0] = be64_to_cpu(pte[0]); 97 pte[0] = be64_to_cpu((__force __be64)pte[0]);
97 pte[1] = be64_to_cpu(pte[1]); 98 pte[1] = be64_to_cpu((__force __be64)pte[1]);
98 99
99 ret = H_NOT_FOUND; 100 ret = H_NOT_FOUND;
100 if ((pte[0] & HPTE_V_VALID) == 0 || 101 if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -171,8 +172,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
171 172
172 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); 173 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
173 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 174 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
174 pte[0] = be64_to_cpu(pte[0]); 175 pte[0] = be64_to_cpu((__force __be64)pte[0]);
175 pte[1] = be64_to_cpu(pte[1]); 176 pte[1] = be64_to_cpu((__force __be64)pte[1]);
176 177
177 /* tsl = AVPN */ 178 /* tsl = AVPN */
178 flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; 179 flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
@@ -211,8 +212,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
211 pteg = get_pteg_addr(vcpu, pte_index); 212 pteg = get_pteg_addr(vcpu, pte_index);
212 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 213 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
213 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 214 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
214 pte[0] = be64_to_cpu(pte[0]); 215 pte[0] = be64_to_cpu((__force __be64)pte[0]);
215 pte[1] = be64_to_cpu(pte[1]); 216 pte[1] = be64_to_cpu((__force __be64)pte[1]);
216 217
217 ret = H_NOT_FOUND; 218 ret = H_NOT_FOUND;
218 if ((pte[0] & HPTE_V_VALID) == 0 || 219 if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -231,8 +232,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
231 232
232 rb = compute_tlbie_rb(v, r, pte_index); 233 rb = compute_tlbie_rb(v, r, pte_index);
233 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
234 pte[0] = cpu_to_be64(pte[0]); 235 pte[0] = (__force u64)cpu_to_be64(pte[0]);
235 pte[1] = cpu_to_be64(pte[1]); 236 pte[1] = (__force u64)cpu_to_be64(pte[1]);
236 copy_to_user((void __user *)pteg, pte, sizeof(pte)); 237 copy_to_user((void __user *)pteg, pte, sizeof(pte));
237 ret = H_SUCCESS; 238 ret = H_SUCCESS;
238 239
@@ -266,6 +267,12 @@ static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
266 267
267int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) 268int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
268{ 269{
270 int rc, idx;
271
272 if (cmd <= MAX_HCALL_OPCODE &&
273 !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
274 return EMULATE_FAIL;
275
269 switch (cmd) { 276 switch (cmd) {
270 case H_ENTER: 277 case H_ENTER:
271 return kvmppc_h_pr_enter(vcpu); 278 return kvmppc_h_pr_enter(vcpu);
@@ -294,8 +301,11 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
294 break; 301 break;
295 case H_RTAS: 302 case H_RTAS:
296 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) 303 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
297 return RESUME_HOST; 304 break;
298 if (kvmppc_rtas_hcall(vcpu)) 305 idx = srcu_read_lock(&vcpu->kvm->srcu);
306 rc = kvmppc_rtas_hcall(vcpu);
307 srcu_read_unlock(&vcpu->kvm->srcu, idx);
308 if (rc)
299 break; 309 break;
300 kvmppc_set_gpr(vcpu, 3, 0); 310 kvmppc_set_gpr(vcpu, 3, 0);
301 return EMULATE_DONE; 311 return EMULATE_DONE;
@@ -303,3 +313,61 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
303 313
304 return EMULATE_FAIL; 314 return EMULATE_FAIL;
305} 315}
316
317int kvmppc_hcall_impl_pr(unsigned long cmd)
318{
319 switch (cmd) {
320 case H_ENTER:
321 case H_REMOVE:
322 case H_PROTECT:
323 case H_BULK_REMOVE:
324 case H_PUT_TCE:
325 case H_CEDE:
326#ifdef CONFIG_KVM_XICS
327 case H_XIRR:
328 case H_CPPR:
329 case H_EOI:
330 case H_IPI:
331 case H_IPOLL:
332 case H_XIRR_X:
333#endif
334 return 1;
335 }
336 return 0;
337}
338
339/*
340 * List of hcall numbers to enable by default.
341 * For compatibility with old userspace, we enable by default
342 * all hcalls that were implemented before the hcall-enabling
343 * facility was added. Note this list should not include H_RTAS.
344 */
345static unsigned int default_hcall_list[] = {
346 H_ENTER,
347 H_REMOVE,
348 H_PROTECT,
349 H_BULK_REMOVE,
350 H_PUT_TCE,
351 H_CEDE,
352#ifdef CONFIG_KVM_XICS
353 H_XIRR,
354 H_CPPR,
355 H_EOI,
356 H_IPI,
357 H_IPOLL,
358 H_XIRR_X,
359#endif
360 0
361};
362
363void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
364{
365 int i;
366 unsigned int hcall;
367
368 for (i = 0; default_hcall_list[i]; ++i) {
369 hcall = default_hcall_list[i];
370 WARN_ON(!kvmppc_hcall_impl_pr(hcall));
371 __set_bit(hcall / 4, kvm->arch.enabled_hcalls);
372 }
373}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ab62109fdfa3..b4c89fa6f109 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -51,7 +51,6 @@ unsigned long kvmppc_booke_handlers;
51 51
52struct kvm_stats_debugfs_item debugfs_entries[] = { 52struct kvm_stats_debugfs_item debugfs_entries[] = {
53 { "mmio", VCPU_STAT(mmio_exits) }, 53 { "mmio", VCPU_STAT(mmio_exits) },
54 { "dcr", VCPU_STAT(dcr_exits) },
55 { "sig", VCPU_STAT(signal_exits) }, 54 { "sig", VCPU_STAT(signal_exits) },
56 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, 55 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
57 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, 56 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
@@ -185,24 +184,28 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
185 set_bit(priority, &vcpu->arch.pending_exceptions); 184 set_bit(priority, &vcpu->arch.pending_exceptions);
186} 185}
187 186
188static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, 187void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
189 ulong dear_flags, ulong esr_flags) 188 ulong dear_flags, ulong esr_flags)
190{ 189{
191 vcpu->arch.queued_dear = dear_flags; 190 vcpu->arch.queued_dear = dear_flags;
192 vcpu->arch.queued_esr = esr_flags; 191 vcpu->arch.queued_esr = esr_flags;
193 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); 192 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
194} 193}
195 194
196static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, 195void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
197 ulong dear_flags, ulong esr_flags) 196 ulong dear_flags, ulong esr_flags)
198{ 197{
199 vcpu->arch.queued_dear = dear_flags; 198 vcpu->arch.queued_dear = dear_flags;
200 vcpu->arch.queued_esr = esr_flags; 199 vcpu->arch.queued_esr = esr_flags;
201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); 200 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
202} 201}
203 202
204static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, 203void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
205 ulong esr_flags) 204{
205 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
206}
207
208void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
206{ 209{
207 vcpu->arch.queued_esr = esr_flags; 210 vcpu->arch.queued_esr = esr_flags;
208 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); 211 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
@@ -266,13 +269,8 @@ static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
266 269
267static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 270static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
268{ 271{
269#ifdef CONFIG_KVM_BOOKE_HV 272 kvmppc_set_srr0(vcpu, srr0);
270 mtspr(SPRN_GSRR0, srr0); 273 kvmppc_set_srr1(vcpu, srr1);
271 mtspr(SPRN_GSRR1, srr1);
272#else
273 vcpu->arch.shared->srr0 = srr0;
274 vcpu->arch.shared->srr1 = srr1;
275#endif
276} 274}
277 275
278static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 276static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
@@ -297,51 +295,6 @@ static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
297 vcpu->arch.mcsrr1 = srr1; 295 vcpu->arch.mcsrr1 = srr1;
298} 296}
299 297
300static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
301{
302#ifdef CONFIG_KVM_BOOKE_HV
303 return mfspr(SPRN_GDEAR);
304#else
305 return vcpu->arch.shared->dar;
306#endif
307}
308
309static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
310{
311#ifdef CONFIG_KVM_BOOKE_HV
312 mtspr(SPRN_GDEAR, dear);
313#else
314 vcpu->arch.shared->dar = dear;
315#endif
316}
317
318static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
319{
320#ifdef CONFIG_KVM_BOOKE_HV
321 return mfspr(SPRN_GESR);
322#else
323 return vcpu->arch.shared->esr;
324#endif
325}
326
327static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
328{
329#ifdef CONFIG_KVM_BOOKE_HV
330 mtspr(SPRN_GESR, esr);
331#else
332 vcpu->arch.shared->esr = esr;
333#endif
334}
335
336static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
337{
338#ifdef CONFIG_KVM_BOOKE_HV
339 return mfspr(SPRN_GEPR);
340#else
341 return vcpu->arch.epr;
342#endif
343}
344
345/* Deliver the interrupt of the corresponding priority, if possible. */ 298/* Deliver the interrupt of the corresponding priority, if possible. */
346static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 299static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
347 unsigned int priority) 300 unsigned int priority)
@@ -450,9 +403,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
450 403
451 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 404 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
452 if (update_esr == true) 405 if (update_esr == true)
453 set_guest_esr(vcpu, vcpu->arch.queued_esr); 406 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
454 if (update_dear == true) 407 if (update_dear == true)
455 set_guest_dear(vcpu, vcpu->arch.queued_dear); 408 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
456 if (update_epr == true) { 409 if (update_epr == true) {
457 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) 410 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
458 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); 411 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
@@ -752,9 +705,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
752 * they were actually modified by emulation. */ 705 * they were actually modified by emulation. */
753 return RESUME_GUEST_NV; 706 return RESUME_GUEST_NV;
754 707
755 case EMULATE_DO_DCR: 708 case EMULATE_AGAIN:
756 run->exit_reason = KVM_EXIT_DCR; 709 return RESUME_GUEST;
757 return RESUME_HOST;
758 710
759 case EMULATE_FAIL: 711 case EMULATE_FAIL:
760 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 712 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
@@ -866,6 +818,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
866 } 818 }
867} 819}
868 820
821static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
822 enum emulation_result emulated, u32 last_inst)
823{
824 switch (emulated) {
825 case EMULATE_AGAIN:
826 return RESUME_GUEST;
827
828 case EMULATE_FAIL:
829 pr_debug("%s: load instruction from guest address %lx failed\n",
830 __func__, vcpu->arch.pc);
831 /* For debugging, encode the failing instruction and
832 * report it to userspace. */
833 run->hw.hardware_exit_reason = ~0ULL << 32;
834 run->hw.hardware_exit_reason |= last_inst;
835 kvmppc_core_queue_program(vcpu, ESR_PIL);
836 return RESUME_HOST;
837
838 default:
839 BUG();
840 }
841}
842
869/** 843/**
870 * kvmppc_handle_exit 844 * kvmppc_handle_exit
871 * 845 *
@@ -877,6 +851,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
877 int r = RESUME_HOST; 851 int r = RESUME_HOST;
878 int s; 852 int s;
879 int idx; 853 int idx;
854 u32 last_inst = KVM_INST_FETCH_FAILED;
855 enum emulation_result emulated = EMULATE_DONE;
880 856
881 /* update before a new last_exit_type is rewritten */ 857 /* update before a new last_exit_type is rewritten */
882 kvmppc_update_timing_stats(vcpu); 858 kvmppc_update_timing_stats(vcpu);
@@ -884,6 +860,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
884 /* restart interrupts if they were meant for the host */ 860 /* restart interrupts if they were meant for the host */
885 kvmppc_restart_interrupt(vcpu, exit_nr); 861 kvmppc_restart_interrupt(vcpu, exit_nr);
886 862
863 /*
864 * get last instruction before beeing preempted
865 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
866 */
867 switch (exit_nr) {
868 case BOOKE_INTERRUPT_DATA_STORAGE:
869 case BOOKE_INTERRUPT_DTLB_MISS:
870 case BOOKE_INTERRUPT_HV_PRIV:
871 emulated = kvmppc_get_last_inst(vcpu, false, &last_inst);
872 break;
873 default:
874 break;
875 }
876
887 local_irq_enable(); 877 local_irq_enable();
888 878
889 trace_kvm_exit(exit_nr, vcpu); 879 trace_kvm_exit(exit_nr, vcpu);
@@ -892,6 +882,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
892 run->exit_reason = KVM_EXIT_UNKNOWN; 882 run->exit_reason = KVM_EXIT_UNKNOWN;
893 run->ready_for_interrupt_injection = 1; 883 run->ready_for_interrupt_injection = 1;
894 884
885 if (emulated != EMULATE_DONE) {
886 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
887 goto out;
888 }
889
895 switch (exit_nr) { 890 switch (exit_nr) {
896 case BOOKE_INTERRUPT_MACHINE_CHECK: 891 case BOOKE_INTERRUPT_MACHINE_CHECK:
897 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 892 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
@@ -1181,6 +1176,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1181 BUG(); 1176 BUG();
1182 } 1177 }
1183 1178
1179out:
1184 /* 1180 /*
1185 * To avoid clobbering exit_reason, only check for signals if we 1181 * To avoid clobbering exit_reason, only check for signals if we
1186 * aren't already exiting to userspace for some other reason. 1182 * aren't already exiting to userspace for some other reason.
@@ -1265,17 +1261,17 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1265 regs->lr = vcpu->arch.lr; 1261 regs->lr = vcpu->arch.lr;
1266 regs->xer = kvmppc_get_xer(vcpu); 1262 regs->xer = kvmppc_get_xer(vcpu);
1267 regs->msr = vcpu->arch.shared->msr; 1263 regs->msr = vcpu->arch.shared->msr;
1268 regs->srr0 = vcpu->arch.shared->srr0; 1264 regs->srr0 = kvmppc_get_srr0(vcpu);
1269 regs->srr1 = vcpu->arch.shared->srr1; 1265 regs->srr1 = kvmppc_get_srr1(vcpu);
1270 regs->pid = vcpu->arch.pid; 1266 regs->pid = vcpu->arch.pid;
1271 regs->sprg0 = vcpu->arch.shared->sprg0; 1267 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1272 regs->sprg1 = vcpu->arch.shared->sprg1; 1268 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1273 regs->sprg2 = vcpu->arch.shared->sprg2; 1269 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1274 regs->sprg3 = vcpu->arch.shared->sprg3; 1270 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1275 regs->sprg4 = vcpu->arch.shared->sprg4; 1271 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1276 regs->sprg5 = vcpu->arch.shared->sprg5; 1272 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1277 regs->sprg6 = vcpu->arch.shared->sprg6; 1273 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1278 regs->sprg7 = vcpu->arch.shared->sprg7; 1274 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1279 1275
1280 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1276 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1281 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 1277 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -1293,17 +1289,17 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1293 vcpu->arch.lr = regs->lr; 1289 vcpu->arch.lr = regs->lr;
1294 kvmppc_set_xer(vcpu, regs->xer); 1290 kvmppc_set_xer(vcpu, regs->xer);
1295 kvmppc_set_msr(vcpu, regs->msr); 1291 kvmppc_set_msr(vcpu, regs->msr);
1296 vcpu->arch.shared->srr0 = regs->srr0; 1292 kvmppc_set_srr0(vcpu, regs->srr0);
1297 vcpu->arch.shared->srr1 = regs->srr1; 1293 kvmppc_set_srr1(vcpu, regs->srr1);
1298 kvmppc_set_pid(vcpu, regs->pid); 1294 kvmppc_set_pid(vcpu, regs->pid);
1299 vcpu->arch.shared->sprg0 = regs->sprg0; 1295 kvmppc_set_sprg0(vcpu, regs->sprg0);
1300 vcpu->arch.shared->sprg1 = regs->sprg1; 1296 kvmppc_set_sprg1(vcpu, regs->sprg1);
1301 vcpu->arch.shared->sprg2 = regs->sprg2; 1297 kvmppc_set_sprg2(vcpu, regs->sprg2);
1302 vcpu->arch.shared->sprg3 = regs->sprg3; 1298 kvmppc_set_sprg3(vcpu, regs->sprg3);
1303 vcpu->arch.shared->sprg4 = regs->sprg4; 1299 kvmppc_set_sprg4(vcpu, regs->sprg4);
1304 vcpu->arch.shared->sprg5 = regs->sprg5; 1300 kvmppc_set_sprg5(vcpu, regs->sprg5);
1305 vcpu->arch.shared->sprg6 = regs->sprg6; 1301 kvmppc_set_sprg6(vcpu, regs->sprg6);
1306 vcpu->arch.shared->sprg7 = regs->sprg7; 1302 kvmppc_set_sprg7(vcpu, regs->sprg7);
1307 1303
1308 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1304 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1309 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 1305 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -1321,8 +1317,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
1321 sregs->u.e.csrr0 = vcpu->arch.csrr0; 1317 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1322 sregs->u.e.csrr1 = vcpu->arch.csrr1; 1318 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1323 sregs->u.e.mcsr = vcpu->arch.mcsr; 1319 sregs->u.e.mcsr = vcpu->arch.mcsr;
1324 sregs->u.e.esr = get_guest_esr(vcpu); 1320 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1325 sregs->u.e.dear = get_guest_dear(vcpu); 1321 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1326 sregs->u.e.tsr = vcpu->arch.tsr; 1322 sregs->u.e.tsr = vcpu->arch.tsr;
1327 sregs->u.e.tcr = vcpu->arch.tcr; 1323 sregs->u.e.tcr = vcpu->arch.tcr;
1328 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); 1324 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -1339,8 +1335,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
1339 vcpu->arch.csrr0 = sregs->u.e.csrr0; 1335 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1340 vcpu->arch.csrr1 = sregs->u.e.csrr1; 1336 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1341 vcpu->arch.mcsr = sregs->u.e.mcsr; 1337 vcpu->arch.mcsr = sregs->u.e.mcsr;
1342 set_guest_esr(vcpu, sregs->u.e.esr); 1338 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1343 set_guest_dear(vcpu, sregs->u.e.dear); 1339 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1344 vcpu->arch.vrsave = sregs->u.e.vrsave; 1340 vcpu->arch.vrsave = sregs->u.e.vrsave;
1345 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 1341 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1346 1342
@@ -1493,7 +1489,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1493 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2); 1489 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
1494 break; 1490 break;
1495 case KVM_REG_PPC_EPR: { 1491 case KVM_REG_PPC_EPR: {
1496 u32 epr = get_guest_epr(vcpu); 1492 u32 epr = kvmppc_get_epr(vcpu);
1497 val = get_reg_val(reg->id, epr); 1493 val = get_reg_val(reg->id, epr);
1498 break; 1494 break;
1499 } 1495 }
@@ -1788,6 +1784,57 @@ void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1788#endif 1784#endif
1789} 1785}
1790 1786
1787int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1788 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1789{
1790 int gtlb_index;
1791 gpa_t gpaddr;
1792
1793#ifdef CONFIG_KVM_E500V2
1794 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1795 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1796 pte->eaddr = eaddr;
1797 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1798 (eaddr & ~PAGE_MASK);
1799 pte->vpage = eaddr >> PAGE_SHIFT;
1800 pte->may_read = true;
1801 pte->may_write = true;
1802 pte->may_execute = true;
1803
1804 return 0;
1805 }
1806#endif
1807
1808 /* Check the guest TLB. */
1809 switch (xlid) {
1810 case XLATE_INST:
1811 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1812 break;
1813 case XLATE_DATA:
1814 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1815 break;
1816 default:
1817 BUG();
1818 }
1819
1820 /* Do we have a TLB entry at all? */
1821 if (gtlb_index < 0)
1822 return -ENOENT;
1823
1824 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1825
1826 pte->eaddr = eaddr;
1827 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1828 pte->vpage = eaddr >> PAGE_SHIFT;
1829
1830 /* XXX read permissions from the guest TLB */
1831 pte->may_read = true;
1832 pte->may_write = true;
1833 pte->may_execute = true;
1834
1835 return 0;
1836}
1837
1791int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1838int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1792 struct kvm_guest_debug *dbg) 1839 struct kvm_guest_debug *dbg)
1793{ 1840{
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index b632cd35919b..f753543c56fa 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -99,13 +99,6 @@ enum int_class {
99 99
100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
101 101
102extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
103extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
104 unsigned int inst, int *advance);
105extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
106 ulong spr_val);
107extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
108 ulong *spr_val);
109extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); 102extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, 103extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
111 struct kvm_vcpu *vcpu, 104 struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 27a4b2877c10..28c158881d23 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -165,16 +165,16 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
165 * guest (PR-mode only). 165 * guest (PR-mode only).
166 */ 166 */
167 case SPRN_SPRG4: 167 case SPRN_SPRG4:
168 vcpu->arch.shared->sprg4 = spr_val; 168 kvmppc_set_sprg4(vcpu, spr_val);
169 break; 169 break;
170 case SPRN_SPRG5: 170 case SPRN_SPRG5:
171 vcpu->arch.shared->sprg5 = spr_val; 171 kvmppc_set_sprg5(vcpu, spr_val);
172 break; 172 break;
173 case SPRN_SPRG6: 173 case SPRN_SPRG6:
174 vcpu->arch.shared->sprg6 = spr_val; 174 kvmppc_set_sprg6(vcpu, spr_val);
175 break; 175 break;
176 case SPRN_SPRG7: 176 case SPRN_SPRG7:
177 vcpu->arch.shared->sprg7 = spr_val; 177 kvmppc_set_sprg7(vcpu, spr_val);
178 break; 178 break;
179 179
180 case SPRN_IVPR: 180 case SPRN_IVPR:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 2c6deb5ef2fe..84c308a9a371 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -21,7 +21,6 @@
21#include <asm/ppc_asm.h> 21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
23#include <asm/reg.h> 23#include <asm/reg.h>
24#include <asm/mmu-44x.h>
25#include <asm/page.h> 24#include <asm/page.h>
26#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
27 26
@@ -424,10 +423,6 @@ lightweight_exit:
424 mtspr SPRN_PID1, r3 423 mtspr SPRN_PID1, r3
425#endif 424#endif
426 425
427#ifdef CONFIG_44x
428 iccci 0, 0 /* XXX hack */
429#endif
430
431 /* Load some guest volatiles. */ 426 /* Load some guest volatiles. */
432 lwz r0, VCPU_GPR(R0)(r4) 427 lwz r0, VCPU_GPR(R0)(r4)
433 lwz r2, VCPU_GPR(R2)(r4) 428 lwz r2, VCPU_GPR(R2)(r4)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index a1712b818a5f..e9fa56a911fd 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -24,12 +24,10 @@
24#include <asm/ppc_asm.h> 24#include <asm/ppc_asm.h>
25#include <asm/kvm_asm.h> 25#include <asm/kvm_asm.h>
26#include <asm/reg.h> 26#include <asm/reg.h>
27#include <asm/mmu-44x.h>
28#include <asm/page.h> 27#include <asm/page.h>
29#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
30#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
31#include <asm/bitsperlong.h> 30#include <asm/bitsperlong.h>
32#include <asm/thread_info.h>
33 31
34#ifdef CONFIG_64BIT 32#ifdef CONFIG_64BIT
35#include <asm/exception-64e.h> 33#include <asm/exception-64e.h>
@@ -122,38 +120,14 @@
1221: 1201:
123 121
124 .if \flags & NEED_EMU 122 .if \flags & NEED_EMU
125 /*
126 * This assumes you have external PID support.
127 * To support a bookehv CPU without external PID, you'll
128 * need to look up the TLB entry and create a temporary mapping.
129 *
130 * FIXME: we don't currently handle if the lwepx faults. PR-mode
131 * booke doesn't handle it either. Since Linux doesn't use
132 * broadcast tlbivax anymore, the only way this should happen is
133 * if the guest maps its memory execute-but-not-read, or if we
134 * somehow take a TLB miss in the middle of this entry code and
135 * evict the relevant entry. On e500mc, all kernel lowmem is
136 * bolted into TLB1 large page mappings, and we don't use
137 * broadcast invalidates, so we should not take a TLB miss here.
138 *
139 * Later we'll need to deal with faults here. Disallowing guest
140 * mappings that are execute-but-not-read could be an option on
141 * e500mc, but not on chips with an LRAT if it is used.
142 */
143
144 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
145 PPC_STL r15, VCPU_GPR(R15)(r4) 123 PPC_STL r15, VCPU_GPR(R15)(r4)
146 PPC_STL r16, VCPU_GPR(R16)(r4) 124 PPC_STL r16, VCPU_GPR(R16)(r4)
147 PPC_STL r17, VCPU_GPR(R17)(r4) 125 PPC_STL r17, VCPU_GPR(R17)(r4)
148 PPC_STL r18, VCPU_GPR(R18)(r4) 126 PPC_STL r18, VCPU_GPR(R18)(r4)
149 PPC_STL r19, VCPU_GPR(R19)(r4) 127 PPC_STL r19, VCPU_GPR(R19)(r4)
150 mr r8, r3
151 PPC_STL r20, VCPU_GPR(R20)(r4) 128 PPC_STL r20, VCPU_GPR(R20)(r4)
152 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
153 PPC_STL r21, VCPU_GPR(R21)(r4) 129 PPC_STL r21, VCPU_GPR(R21)(r4)
154 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
155 PPC_STL r22, VCPU_GPR(R22)(r4) 130 PPC_STL r22, VCPU_GPR(R22)(r4)
156 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
157 PPC_STL r23, VCPU_GPR(R23)(r4) 131 PPC_STL r23, VCPU_GPR(R23)(r4)
158 PPC_STL r24, VCPU_GPR(R24)(r4) 132 PPC_STL r24, VCPU_GPR(R24)(r4)
159 PPC_STL r25, VCPU_GPR(R25)(r4) 133 PPC_STL r25, VCPU_GPR(R25)(r4)
@@ -163,33 +137,15 @@
163 PPC_STL r29, VCPU_GPR(R29)(r4) 137 PPC_STL r29, VCPU_GPR(R29)(r4)
164 PPC_STL r30, VCPU_GPR(R30)(r4) 138 PPC_STL r30, VCPU_GPR(R30)(r4)
165 PPC_STL r31, VCPU_GPR(R31)(r4) 139 PPC_STL r31, VCPU_GPR(R31)(r4)
166 mtspr SPRN_EPLC, r8
167
168 /* disable preemption, so we are sure we hit the fixup handler */
169 CURRENT_THREAD_INFO(r8, r1)
170 li r7, 1
171 stw r7, TI_PREEMPT(r8)
172
173 isync
174 140
175 /* 141 /*
176 * In case the read goes wrong, we catch it and write an invalid value 142 * We don't use external PID support. lwepx faults would need to be
177 * in LAST_INST instead. 143 * handled by KVM and this implies aditional code in DO_KVM (for
144 * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which
145 * is too intrusive for the host. Get last instuction in
146 * kvmppc_get_last_inst().
178 */ 147 */
1791: lwepx r9, 0, r5 148 li r9, KVM_INST_FETCH_FAILED
1802:
181.section .fixup, "ax"
1823: li r9, KVM_INST_FETCH_FAILED
183 b 2b
184.previous
185.section __ex_table,"a"
186 PPC_LONG_ALIGN
187 PPC_LONG 1b,3b
188.previous
189
190 mtspr SPRN_EPLC, r3
191 li r7, 0
192 stw r7, TI_PREEMPT(r8)
193 stw r9, VCPU_LAST_INST(r4) 149 stw r9, VCPU_LAST_INST(r4)
194 .endif 150 .endif
195 151
@@ -441,6 +397,7 @@ _GLOBAL(kvmppc_resume_host)
441#ifdef CONFIG_64BIT 397#ifdef CONFIG_64BIT
442 PPC_LL r3, PACA_SPRG_VDSO(r13) 398 PPC_LL r3, PACA_SPRG_VDSO(r13)
443#endif 399#endif
400 mfspr r5, SPRN_SPRG9
444 PPC_STD(r6, VCPU_SHARED_SPRG4, r11) 401 PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
445 mfspr r8, SPRN_SPRG6 402 mfspr r8, SPRN_SPRG6
446 PPC_STD(r7, VCPU_SHARED_SPRG5, r11) 403 PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
@@ -448,6 +405,7 @@ _GLOBAL(kvmppc_resume_host)
448#ifdef CONFIG_64BIT 405#ifdef CONFIG_64BIT
449 mtspr SPRN_SPRG_VDSO_WRITE, r3 406 mtspr SPRN_SPRG_VDSO_WRITE, r3
450#endif 407#endif
408 PPC_STD(r5, VCPU_SPRG9, r4)
451 PPC_STD(r8, VCPU_SHARED_SPRG6, r11) 409 PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
452 mfxer r3 410 mfxer r3
453 PPC_STD(r9, VCPU_SHARED_SPRG7, r11) 411 PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
@@ -682,7 +640,9 @@ lightweight_exit:
682 mtspr SPRN_SPRG5W, r6 640 mtspr SPRN_SPRG5W, r6
683 PPC_LD(r8, VCPU_SHARED_SPRG7, r11) 641 PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
684 mtspr SPRN_SPRG6W, r7 642 mtspr SPRN_SPRG6W, r7
643 PPC_LD(r5, VCPU_SPRG9, r4)
685 mtspr SPRN_SPRG7W, r8 644 mtspr SPRN_SPRG7W, r8
645 mtspr SPRN_SPRG9, r5
686 646
687 /* Load some guest volatiles. */ 647 /* Load some guest volatiles. */
688 PPC_LL r3, VCPU_LR(r4) 648 PPC_LL r3, VCPU_LR(r4)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 002d51764143..c99c40e9182a 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -250,6 +250,14 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
250 spr_val); 250 spr_val);
251 break; 251 break;
252 252
253 case SPRN_PWRMGTCR0:
254 /*
255 * Guest relies on host power management configurations
256 * Treat the request as a general store
257 */
258 vcpu->arch.pwrmgtcr0 = spr_val;
259 break;
260
253 /* extra exceptions */ 261 /* extra exceptions */
254 case SPRN_IVOR32: 262 case SPRN_IVOR32:
255 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; 263 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -368,6 +376,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
368 *spr_val = vcpu->arch.eptcfg; 376 *spr_val = vcpu->arch.eptcfg;
369 break; 377 break;
370 378
379 case SPRN_PWRMGTCR0:
380 *spr_val = vcpu->arch.pwrmgtcr0;
381 break;
382
371 /* extra exceptions */ 383 /* extra exceptions */
372 case SPRN_IVOR32: 384 case SPRN_IVOR32:
373 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; 385 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 86903d3f5a03..08f14bb57897 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -107,11 +107,15 @@ static u32 get_host_mas0(unsigned long eaddr)
107{ 107{
108 unsigned long flags; 108 unsigned long flags;
109 u32 mas0; 109 u32 mas0;
110 u32 mas4;
110 111
111 local_irq_save(flags); 112 local_irq_save(flags);
112 mtspr(SPRN_MAS6, 0); 113 mtspr(SPRN_MAS6, 0);
114 mas4 = mfspr(SPRN_MAS4);
115 mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
113 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); 116 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
114 mas0 = mfspr(SPRN_MAS0); 117 mas0 = mfspr(SPRN_MAS0);
118 mtspr(SPRN_MAS4, mas4);
115 local_irq_restore(flags); 119 local_irq_restore(flags);
116 120
117 return mas0; 121 return mas0;
@@ -607,6 +611,104 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
607 } 611 }
608} 612}
609 613
614#ifdef CONFIG_KVM_BOOKE_HV
615int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
616 u32 *instr)
617{
618 gva_t geaddr;
619 hpa_t addr;
620 hfn_t pfn;
621 hva_t eaddr;
622 u32 mas1, mas2, mas3;
623 u64 mas7_mas3;
624 struct page *page;
625 unsigned int addr_space, psize_shift;
626 bool pr;
627 unsigned long flags;
628
629 /* Search TLB for guest pc to get the real address */
630 geaddr = kvmppc_get_pc(vcpu);
631
632 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
633
634 local_irq_save(flags);
635 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
636 mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid);
637 asm volatile("tlbsx 0, %[geaddr]\n" : :
638 [geaddr] "r" (geaddr));
639 mtspr(SPRN_MAS5, 0);
640 mtspr(SPRN_MAS8, 0);
641 mas1 = mfspr(SPRN_MAS1);
642 mas2 = mfspr(SPRN_MAS2);
643 mas3 = mfspr(SPRN_MAS3);
644#ifdef CONFIG_64BIT
645 mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
646#else
647 mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
648#endif
649 local_irq_restore(flags);
650
651 /*
652 * If the TLB entry for guest pc was evicted, return to the guest.
653 * There are high chances to find a valid TLB entry next time.
654 */
655 if (!(mas1 & MAS1_VALID))
656 return EMULATE_AGAIN;
657
658 /*
659 * Another thread may rewrite the TLB entry in parallel, don't
660 * execute from the address if the execute permission is not set
661 */
662 pr = vcpu->arch.shared->msr & MSR_PR;
663 if (unlikely((pr && !(mas3 & MAS3_UX)) ||
664 (!pr && !(mas3 & MAS3_SX)))) {
665 pr_err_ratelimited(
666 "%s: Instuction emulation from guest addres %08lx without execute permission\n",
667 __func__, geaddr);
668 return EMULATE_AGAIN;
669 }
670
671 /*
672 * The real address will be mapped by a cacheable, memory coherent,
673 * write-back page. Check for mismatches when LRAT is used.
674 */
675 if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
676 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
677 pr_err_ratelimited(
678 "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n",
679 __func__, geaddr);
680 return EMULATE_AGAIN;
681 }
682
683 /* Get pfn */
684 psize_shift = MAS1_GET_TSIZE(mas1) + 10;
685 addr = (mas7_mas3 & (~0ULL << psize_shift)) |
686 (geaddr & ((1ULL << psize_shift) - 1ULL));
687 pfn = addr >> PAGE_SHIFT;
688
689 /* Guard against emulation from devices area */
690 if (unlikely(!page_is_ram(pfn))) {
691 pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n",
692 __func__, addr);
693 return EMULATE_AGAIN;
694 }
695
696 /* Map a page and get guest's instruction */
697 page = pfn_to_page(pfn);
698 eaddr = (unsigned long)kmap_atomic(page);
699 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
700 kunmap_atomic((u32 *)eaddr);
701
702 return EMULATE_DONE;
703}
704#else
705int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
706 u32 *instr)
707{
708 return EMULATE_AGAIN;
709}
710#endif
711
610/************* MMU Notifiers *************/ 712/************* MMU Notifiers *************/
611 713
612int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 714int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 17e456279224..164bad2a19bf 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
110{ 110{
111} 111}
112 112
113static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); 113static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
114 114
115static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) 115static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
116{ 116{
@@ -141,9 +141,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
141 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 141 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
142 142
143 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || 143 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
144 __get_cpu_var(last_vcpu_on_cpu) != vcpu) { 144 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) {
145 kvmppc_e500_tlbil_all(vcpu_e500); 145 kvmppc_e500_tlbil_all(vcpu_e500);
146 __get_cpu_var(last_vcpu_on_cpu) = vcpu; 146 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
147 } 147 }
148 148
149 kvmppc_load_guest_fp(vcpu); 149 kvmppc_load_guest_fp(vcpu);
@@ -267,14 +267,32 @@ static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
267static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, 267static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
268 union kvmppc_one_reg *val) 268 union kvmppc_one_reg *val)
269{ 269{
270 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 270 int r = 0;
271
272 switch (id) {
273 case KVM_REG_PPC_SPRG9:
274 *val = get_reg_val(id, vcpu->arch.sprg9);
275 break;
276 default:
277 r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
278 }
279
271 return r; 280 return r;
272} 281}
273 282
274static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, 283static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
275 union kvmppc_one_reg *val) 284 union kvmppc_one_reg *val)
276{ 285{
277 int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); 286 int r = 0;
287
288 switch (id) {
289 case KVM_REG_PPC_SPRG9:
290 vcpu->arch.sprg9 = set_reg_val(id, *val);
291 break;
292 default:
293 r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
294 }
295
278 return r; 296 return r;
279} 297}
280 298
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index da86d9ba3476..e96b50d0bdab 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -207,36 +207,28 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
207 return emulated; 207 return emulated;
208} 208}
209 209
210/* XXX to do:
211 * lhax
212 * lhaux
213 * lswx
214 * lswi
215 * stswx
216 * stswi
217 * lha
218 * lhau
219 * lmw
220 * stmw
221 *
222 */
223/* XXX Should probably auto-generate instruction decoding for a particular core 210/* XXX Should probably auto-generate instruction decoding for a particular core
224 * from opcode tables in the future. */ 211 * from opcode tables in the future. */
225int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 212int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
226{ 213{
227 u32 inst = kvmppc_get_last_inst(vcpu); 214 u32 inst;
228 int ra = get_ra(inst); 215 int rs, rt, sprn;
229 int rs = get_rs(inst); 216 enum emulation_result emulated;
230 int rt = get_rt(inst);
231 int sprn = get_sprn(inst);
232 enum emulation_result emulated = EMULATE_DONE;
233 int advance = 1; 217 int advance = 1;
234 218
235 /* this default type might be overwritten by subcategories */ 219 /* this default type might be overwritten by subcategories */
236 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 220 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
237 221
222 emulated = kvmppc_get_last_inst(vcpu, false, &inst);
223 if (emulated != EMULATE_DONE)
224 return emulated;
225
238 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 226 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
239 227
228 rs = get_rs(inst);
229 rt = get_rt(inst);
230 sprn = get_sprn(inst);
231
240 switch (get_op(inst)) { 232 switch (get_op(inst)) {
241 case OP_TRAP: 233 case OP_TRAP:
242#ifdef CONFIG_PPC_BOOK3S 234#ifdef CONFIG_PPC_BOOK3S
@@ -264,200 +256,24 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
264#endif 256#endif
265 advance = 0; 257 advance = 0;
266 break; 258 break;
267 case OP_31_XOP_LWZX:
268 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
269 break;
270
271 case OP_31_XOP_LBZX:
272 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
273 break;
274
275 case OP_31_XOP_LBZUX:
276 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
277 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
278 break;
279
280 case OP_31_XOP_STWX:
281 emulated = kvmppc_handle_store(run, vcpu,
282 kvmppc_get_gpr(vcpu, rs),
283 4, 1);
284 break;
285
286 case OP_31_XOP_STBX:
287 emulated = kvmppc_handle_store(run, vcpu,
288 kvmppc_get_gpr(vcpu, rs),
289 1, 1);
290 break;
291
292 case OP_31_XOP_STBUX:
293 emulated = kvmppc_handle_store(run, vcpu,
294 kvmppc_get_gpr(vcpu, rs),
295 1, 1);
296 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
297 break;
298
299 case OP_31_XOP_LHAX:
300 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
301 break;
302
303 case OP_31_XOP_LHZX:
304 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
305 break;
306
307 case OP_31_XOP_LHZUX:
308 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
309 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
310 break;
311 259
312 case OP_31_XOP_MFSPR: 260 case OP_31_XOP_MFSPR:
313 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); 261 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
314 break; 262 break;
315 263
316 case OP_31_XOP_STHX:
317 emulated = kvmppc_handle_store(run, vcpu,
318 kvmppc_get_gpr(vcpu, rs),
319 2, 1);
320 break;
321
322 case OP_31_XOP_STHUX:
323 emulated = kvmppc_handle_store(run, vcpu,
324 kvmppc_get_gpr(vcpu, rs),
325 2, 1);
326 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
327 break;
328
329 case OP_31_XOP_MTSPR: 264 case OP_31_XOP_MTSPR:
330 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); 265 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
331 break; 266 break;
332 267
333 case OP_31_XOP_DCBST:
334 case OP_31_XOP_DCBF:
335 case OP_31_XOP_DCBI:
336 /* Do nothing. The guest is performing dcbi because
337 * hardware DMA is not snooped by the dcache, but
338 * emulated DMA either goes through the dcache as
339 * normal writes, or the host kernel has handled dcache
340 * coherence. */
341 break;
342
343 case OP_31_XOP_LWBRX:
344 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
345 break;
346
347 case OP_31_XOP_TLBSYNC: 268 case OP_31_XOP_TLBSYNC:
348 break; 269 break;
349 270
350 case OP_31_XOP_STWBRX:
351 emulated = kvmppc_handle_store(run, vcpu,
352 kvmppc_get_gpr(vcpu, rs),
353 4, 0);
354 break;
355
356 case OP_31_XOP_LHBRX:
357 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
358 break;
359
360 case OP_31_XOP_STHBRX:
361 emulated = kvmppc_handle_store(run, vcpu,
362 kvmppc_get_gpr(vcpu, rs),
363 2, 0);
364 break;
365
366 default: 271 default:
367 /* Attempt core-specific emulation below. */ 272 /* Attempt core-specific emulation below. */
368 emulated = EMULATE_FAIL; 273 emulated = EMULATE_FAIL;
369 } 274 }
370 break; 275 break;
371 276
372 case OP_LWZ:
373 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
374 break;
375
376 /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
377 case OP_LD:
378 rt = get_rt(inst);
379 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
380 break;
381
382 case OP_LWZU:
383 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
384 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
385 break;
386
387 case OP_LBZ:
388 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
389 break;
390
391 case OP_LBZU:
392 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
393 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
394 break;
395
396 case OP_STW:
397 emulated = kvmppc_handle_store(run, vcpu,
398 kvmppc_get_gpr(vcpu, rs),
399 4, 1);
400 break;
401
402 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
403 case OP_STD:
404 rs = get_rs(inst);
405 emulated = kvmppc_handle_store(run, vcpu,
406 kvmppc_get_gpr(vcpu, rs),
407 8, 1);
408 break;
409
410 case OP_STWU:
411 emulated = kvmppc_handle_store(run, vcpu,
412 kvmppc_get_gpr(vcpu, rs),
413 4, 1);
414 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
415 break;
416
417 case OP_STB:
418 emulated = kvmppc_handle_store(run, vcpu,
419 kvmppc_get_gpr(vcpu, rs),
420 1, 1);
421 break;
422
423 case OP_STBU:
424 emulated = kvmppc_handle_store(run, vcpu,
425 kvmppc_get_gpr(vcpu, rs),
426 1, 1);
427 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
428 break;
429
430 case OP_LHZ:
431 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
432 break;
433
434 case OP_LHZU:
435 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
436 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
437 break;
438
439 case OP_LHA:
440 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
441 break;
442
443 case OP_LHAU:
444 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
445 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
446 break;
447
448 case OP_STH:
449 emulated = kvmppc_handle_store(run, vcpu,
450 kvmppc_get_gpr(vcpu, rs),
451 2, 1);
452 break;
453
454 case OP_STHU:
455 emulated = kvmppc_handle_store(run, vcpu,
456 kvmppc_get_gpr(vcpu, rs),
457 2, 1);
458 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
459 break;
460
461 default: 277 default:
462 emulated = EMULATE_FAIL; 278 emulated = EMULATE_FAIL;
463 } 279 }
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
new file mode 100644
index 000000000000..0de4ffa175a9
--- /dev/null
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -0,0 +1,272 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <linux/jiffies.h>
22#include <linux/hrtimer.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <linux/kvm_host.h>
26#include <linux/clockchips.h>
27
28#include <asm/reg.h>
29#include <asm/time.h>
30#include <asm/byteorder.h>
31#include <asm/kvm_ppc.h>
32#include <asm/disassemble.h>
33#include <asm/ppc-opcode.h>
34#include "timing.h"
35#include "trace.h"
36
37/* XXX to do:
38 * lhax
39 * lhaux
40 * lswx
41 * lswi
42 * stswx
43 * stswi
44 * lha
45 * lhau
46 * lmw
47 * stmw
48 *
49 */
50int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
51{
52 struct kvm_run *run = vcpu->run;
53 u32 inst;
54 int ra, rs, rt;
55 enum emulation_result emulated;
56 int advance = 1;
57
58 /* this default type might be overwritten by subcategories */
59 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
60
61 emulated = kvmppc_get_last_inst(vcpu, false, &inst);
62 if (emulated != EMULATE_DONE)
63 return emulated;
64
65 ra = get_ra(inst);
66 rs = get_rs(inst);
67 rt = get_rt(inst);
68
69 switch (get_op(inst)) {
70 case 31:
71 switch (get_xop(inst)) {
72 case OP_31_XOP_LWZX:
73 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
74 break;
75
76 case OP_31_XOP_LBZX:
77 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
78 break;
79
80 case OP_31_XOP_LBZUX:
81 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
82 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
83 break;
84
85 case OP_31_XOP_STWX:
86 emulated = kvmppc_handle_store(run, vcpu,
87 kvmppc_get_gpr(vcpu, rs),
88 4, 1);
89 break;
90
91 case OP_31_XOP_STBX:
92 emulated = kvmppc_handle_store(run, vcpu,
93 kvmppc_get_gpr(vcpu, rs),
94 1, 1);
95 break;
96
97 case OP_31_XOP_STBUX:
98 emulated = kvmppc_handle_store(run, vcpu,
99 kvmppc_get_gpr(vcpu, rs),
100 1, 1);
101 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
102 break;
103
104 case OP_31_XOP_LHAX:
105 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
106 break;
107
108 case OP_31_XOP_LHZX:
109 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
110 break;
111
112 case OP_31_XOP_LHZUX:
113 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
114 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
115 break;
116
117 case OP_31_XOP_STHX:
118 emulated = kvmppc_handle_store(run, vcpu,
119 kvmppc_get_gpr(vcpu, rs),
120 2, 1);
121 break;
122
123 case OP_31_XOP_STHUX:
124 emulated = kvmppc_handle_store(run, vcpu,
125 kvmppc_get_gpr(vcpu, rs),
126 2, 1);
127 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
128 break;
129
130 case OP_31_XOP_DCBST:
131 case OP_31_XOP_DCBF:
132 case OP_31_XOP_DCBI:
133 /* Do nothing. The guest is performing dcbi because
134 * hardware DMA is not snooped by the dcache, but
135 * emulated DMA either goes through the dcache as
136 * normal writes, or the host kernel has handled dcache
137 * coherence. */
138 break;
139
140 case OP_31_XOP_LWBRX:
141 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
142 break;
143
144 case OP_31_XOP_STWBRX:
145 emulated = kvmppc_handle_store(run, vcpu,
146 kvmppc_get_gpr(vcpu, rs),
147 4, 0);
148 break;
149
150 case OP_31_XOP_LHBRX:
151 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
152 break;
153
154 case OP_31_XOP_STHBRX:
155 emulated = kvmppc_handle_store(run, vcpu,
156 kvmppc_get_gpr(vcpu, rs),
157 2, 0);
158 break;
159
160 default:
161 emulated = EMULATE_FAIL;
162 break;
163 }
164 break;
165
166 case OP_LWZ:
167 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
168 break;
169
170 /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
171 case OP_LD:
172 rt = get_rt(inst);
173 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
174 break;
175
176 case OP_LWZU:
177 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
178 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
179 break;
180
181 case OP_LBZ:
182 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
183 break;
184
185 case OP_LBZU:
186 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
187 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
188 break;
189
190 case OP_STW:
191 emulated = kvmppc_handle_store(run, vcpu,
192 kvmppc_get_gpr(vcpu, rs),
193 4, 1);
194 break;
195
196 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
197 case OP_STD:
198 rs = get_rs(inst);
199 emulated = kvmppc_handle_store(run, vcpu,
200 kvmppc_get_gpr(vcpu, rs),
201 8, 1);
202 break;
203
204 case OP_STWU:
205 emulated = kvmppc_handle_store(run, vcpu,
206 kvmppc_get_gpr(vcpu, rs),
207 4, 1);
208 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
209 break;
210
211 case OP_STB:
212 emulated = kvmppc_handle_store(run, vcpu,
213 kvmppc_get_gpr(vcpu, rs),
214 1, 1);
215 break;
216
217 case OP_STBU:
218 emulated = kvmppc_handle_store(run, vcpu,
219 kvmppc_get_gpr(vcpu, rs),
220 1, 1);
221 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
222 break;
223
224 case OP_LHZ:
225 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
226 break;
227
228 case OP_LHZU:
229 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
230 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
231 break;
232
233 case OP_LHA:
234 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
235 break;
236
237 case OP_LHAU:
238 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
239 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
240 break;
241
242 case OP_STH:
243 emulated = kvmppc_handle_store(run, vcpu,
244 kvmppc_get_gpr(vcpu, rs),
245 2, 1);
246 break;
247
248 case OP_STHU:
249 emulated = kvmppc_handle_store(run, vcpu,
250 kvmppc_get_gpr(vcpu, rs),
251 2, 1);
252 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
253 break;
254
255 default:
256 emulated = EMULATE_FAIL;
257 break;
258 }
259
260 if (emulated == EMULATE_FAIL) {
261 advance = 0;
262 kvmppc_core_queue_program(vcpu, 0);
263 }
264
265 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
266
267 /* Advance past emulated instruction. */
268 if (advance)
269 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
270
271 return emulated;
272}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 61c738ab1283..288b4bb05cbd 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -190,6 +190,25 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
192 192
193#ifdef CONFIG_PPC_64K_PAGES
194 /*
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
197 */
198 if ((vcpu->arch.magic_page_pa & 0xf000) !=
199 ((ulong)vcpu->arch.shared & 0xf000)) {
200 void *old_shared = vcpu->arch.shared;
201 ulong shared = (ulong)vcpu->arch.shared;
202 void *new_shared;
203
204 shared &= PAGE_MASK;
205 shared |= vcpu->arch.magic_page_pa & 0xf000;
206 new_shared = (void*)shared;
207 memcpy(new_shared, old_shared, 0x1000);
208 vcpu->arch.shared = new_shared;
209 }
210#endif
211
193 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 212 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
194 213
195 r = EV_SUCCESS; 214 r = EV_SUCCESS;
@@ -198,7 +217,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
198 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
199 r = EV_SUCCESS; 218 r = EV_SUCCESS;
200#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
201 /* XXX Missing magic page on 44x */
202 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 220 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
203#endif 221#endif
204 222
@@ -254,13 +272,16 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
254 enum emulation_result er; 272 enum emulation_result er;
255 int r; 273 int r;
256 274
257 er = kvmppc_emulate_instruction(run, vcpu); 275 er = kvmppc_emulate_loadstore(vcpu);
258 switch (er) { 276 switch (er) {
259 case EMULATE_DONE: 277 case EMULATE_DONE:
260 /* Future optimization: only reload non-volatiles if they were 278 /* Future optimization: only reload non-volatiles if they were
261 * actually modified. */ 279 * actually modified. */
262 r = RESUME_GUEST_NV; 280 r = RESUME_GUEST_NV;
263 break; 281 break;
282 case EMULATE_AGAIN:
283 r = RESUME_GUEST;
284 break;
264 case EMULATE_DO_MMIO: 285 case EMULATE_DO_MMIO:
265 run->exit_reason = KVM_EXIT_MMIO; 286 run->exit_reason = KVM_EXIT_MMIO;
266 /* We must reload nonvolatiles because "update" load/store 287 /* We must reload nonvolatiles because "update" load/store
@@ -270,11 +291,15 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
270 r = RESUME_HOST_NV; 291 r = RESUME_HOST_NV;
271 break; 292 break;
272 case EMULATE_FAIL: 293 case EMULATE_FAIL:
294 {
295 u32 last_inst;
296
297 kvmppc_get_last_inst(vcpu, false, &last_inst);
273 /* XXX Deliver Program interrupt to guest. */ 298 /* XXX Deliver Program interrupt to guest. */
274 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 299 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
275 kvmppc_get_last_inst(vcpu));
276 r = RESUME_HOST; 300 r = RESUME_HOST;
277 break; 301 break;
302 }
278 default: 303 default:
279 WARN_ON(1); 304 WARN_ON(1);
280 r = RESUME_GUEST; 305 r = RESUME_GUEST;
@@ -284,6 +309,81 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
284} 309}
285EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 310EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
286 311
312int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
313 bool data)
314{
315 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
316 struct kvmppc_pte pte;
317 int r;
318
319 vcpu->stat.st++;
320
321 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
322 XLATE_WRITE, &pte);
323 if (r < 0)
324 return r;
325
326 *eaddr = pte.raddr;
327
328 if (!pte.may_write)
329 return -EPERM;
330
331 /* Magic page override */
332 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
333 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
334 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
335 void *magic = vcpu->arch.shared;
336 magic += pte.eaddr & 0xfff;
337 memcpy(magic, ptr, size);
338 return EMULATE_DONE;
339 }
340
341 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
342 return EMULATE_DO_MMIO;
343
344 return EMULATE_DONE;
345}
346EXPORT_SYMBOL_GPL(kvmppc_st);
347
348int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349 bool data)
350{
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352 struct kvmppc_pte pte;
353 int rc;
354
355 vcpu->stat.ld++;
356
357 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
358 XLATE_READ, &pte);
359 if (rc)
360 return rc;
361
362 *eaddr = pte.raddr;
363
364 if (!pte.may_read)
365 return -EPERM;
366
367 if (!data && !pte.may_execute)
368 return -ENOEXEC;
369
370 /* Magic page override */
371 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
372 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
373 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
374 void *magic = vcpu->arch.shared;
375 magic += pte.eaddr & 0xfff;
376 memcpy(ptr, magic, size);
377 return EMULATE_DONE;
378 }
379
380 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
381 return EMULATE_DO_MMIO;
382
383 return EMULATE_DONE;
384}
385EXPORT_SYMBOL_GPL(kvmppc_ld);
386
287int kvm_arch_hardware_enable(void *garbage) 387int kvm_arch_hardware_enable(void *garbage)
288{ 388{
289 return 0; 389 return 0;
@@ -366,14 +466,20 @@ void kvm_arch_sync_events(struct kvm *kvm)
366{ 466{
367} 467}
368 468
369int kvm_dev_ioctl_check_extension(long ext) 469int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
370{ 470{
371 int r; 471 int r;
372 /* FIXME!! 472 /* Assume we're using HV mode when the HV module is loaded */
373 * Should some of this be vm ioctl ? is it possible now ?
374 */
375 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 473 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
376 474
475 if (kvm) {
476 /*
477 * Hooray - we know which VM type we're running on. Depend on
478 * that rather than the guess above.
479 */
480 hv_enabled = is_kvmppc_hv_enabled(kvm);
481 }
482
377 switch (ext) { 483 switch (ext) {
378#ifdef CONFIG_BOOKE 484#ifdef CONFIG_BOOKE
379 case KVM_CAP_PPC_BOOKE_SREGS: 485 case KVM_CAP_PPC_BOOKE_SREGS:
@@ -387,6 +493,7 @@ int kvm_dev_ioctl_check_extension(long ext)
387 case KVM_CAP_PPC_UNSET_IRQ: 493 case KVM_CAP_PPC_UNSET_IRQ:
388 case KVM_CAP_PPC_IRQ_LEVEL: 494 case KVM_CAP_PPC_IRQ_LEVEL:
389 case KVM_CAP_ENABLE_CAP: 495 case KVM_CAP_ENABLE_CAP:
496 case KVM_CAP_ENABLE_CAP_VM:
390 case KVM_CAP_ONE_REG: 497 case KVM_CAP_ONE_REG:
391 case KVM_CAP_IOEVENTFD: 498 case KVM_CAP_IOEVENTFD:
392 case KVM_CAP_DEVICE_CTRL: 499 case KVM_CAP_DEVICE_CTRL:
@@ -417,6 +524,7 @@ int kvm_dev_ioctl_check_extension(long ext)
417 case KVM_CAP_PPC_ALLOC_HTAB: 524 case KVM_CAP_PPC_ALLOC_HTAB:
418 case KVM_CAP_PPC_RTAS: 525 case KVM_CAP_PPC_RTAS:
419 case KVM_CAP_PPC_FIXUP_HCALL: 526 case KVM_CAP_PPC_FIXUP_HCALL:
527 case KVM_CAP_PPC_ENABLE_HCALL:
420#ifdef CONFIG_KVM_XICS 528#ifdef CONFIG_KVM_XICS
421 case KVM_CAP_IRQ_XICS: 529 case KVM_CAP_IRQ_XICS:
422#endif 530#endif
@@ -635,12 +743,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
635#endif 743#endif
636} 744}
637 745
638static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
639 struct kvm_run *run)
640{
641 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
642}
643
644static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 746static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
645 struct kvm_run *run) 747 struct kvm_run *run)
646{ 748{
@@ -837,10 +939,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
837 if (!vcpu->mmio_is_write) 939 if (!vcpu->mmio_is_write)
838 kvmppc_complete_mmio_load(vcpu, run); 940 kvmppc_complete_mmio_load(vcpu, run);
839 vcpu->mmio_needed = 0; 941 vcpu->mmio_needed = 0;
840 } else if (vcpu->arch.dcr_needed) {
841 if (!vcpu->arch.dcr_is_write)
842 kvmppc_complete_dcr_load(vcpu, run);
843 vcpu->arch.dcr_needed = 0;
844 } else if (vcpu->arch.osi_needed) { 942 } else if (vcpu->arch.osi_needed) {
845 u64 *gprs = run->osi.gprs; 943 u64 *gprs = run->osi.gprs;
846 int i; 944 int i;
@@ -1099,6 +1197,42 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1099 return 0; 1197 return 0;
1100} 1198}
1101 1199
1200
1201static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1202 struct kvm_enable_cap *cap)
1203{
1204 int r;
1205
1206 if (cap->flags)
1207 return -EINVAL;
1208
1209 switch (cap->cap) {
1210#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1211 case KVM_CAP_PPC_ENABLE_HCALL: {
1212 unsigned long hcall = cap->args[0];
1213
1214 r = -EINVAL;
1215 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1216 cap->args[1] > 1)
1217 break;
1218 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1219 break;
1220 if (cap->args[1])
1221 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1222 else
1223 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1224 r = 0;
1225 break;
1226 }
1227#endif
1228 default:
1229 r = -EINVAL;
1230 break;
1231 }
1232
1233 return r;
1234}
1235
1102long kvm_arch_vm_ioctl(struct file *filp, 1236long kvm_arch_vm_ioctl(struct file *filp,
1103 unsigned int ioctl, unsigned long arg) 1237 unsigned int ioctl, unsigned long arg)
1104{ 1238{
@@ -1118,6 +1252,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
1118 1252
1119 break; 1253 break;
1120 } 1254 }
1255 case KVM_ENABLE_CAP:
1256 {
1257 struct kvm_enable_cap cap;
1258 r = -EFAULT;
1259 if (copy_from_user(&cap, argp, sizeof(cap)))
1260 goto out;
1261 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1262 break;
1263 }
1121#ifdef CONFIG_PPC_BOOK3S_64 1264#ifdef CONFIG_PPC_BOOK3S_64
1122 case KVM_CREATE_SPAPR_TCE: { 1265 case KVM_CREATE_SPAPR_TCE: {
1123 struct kvm_create_spapr_tce create_tce; 1266 struct kvm_create_spapr_tce create_tce;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 07b6110a4bb7..e44d2b2ea97e 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -110,7 +110,6 @@ void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
110 110
111static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { 111static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
112 [MMIO_EXITS] = "MMIO", 112 [MMIO_EXITS] = "MMIO",
113 [DCR_EXITS] = "DCR",
114 [SIGNAL_EXITS] = "SIGNAL", 113 [SIGNAL_EXITS] = "SIGNAL",
115 [ITLB_REAL_MISS_EXITS] = "ITLBREAL", 114 [ITLB_REAL_MISS_EXITS] = "ITLBREAL",
116 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", 115 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT",
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index bf191e72b2d8..3123690c82dc 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -63,9 +63,6 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
63 case EMULATED_INST_EXITS: 63 case EMULATED_INST_EXITS:
64 vcpu->stat.emulated_inst_exits++; 64 vcpu->stat.emulated_inst_exits++;
65 break; 65 break;
66 case DCR_EXITS:
67 vcpu->stat.dcr_exits++;
68 break;
69 case DSI_EXITS: 66 case DSI_EXITS:
70 vcpu->stat.dsi_exits++; 67 vcpu->stat.dsi_exits++;
71 break; 68 break;
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index e1357cd8dc1f..a674f090dfb8 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -291,6 +291,26 @@ TRACE_EVENT(kvm_unmap_hva,
291 TP_printk("unmap hva 0x%lx\n", __entry->hva) 291 TP_printk("unmap hva 0x%lx\n", __entry->hva)
292); 292);
293 293
294TRACE_EVENT(kvm_ppc_instr,
295 TP_PROTO(unsigned int inst, unsigned long _pc, unsigned int emulate),
296 TP_ARGS(inst, _pc, emulate),
297
298 TP_STRUCT__entry(
299 __field( unsigned int, inst )
300 __field( unsigned long, pc )
301 __field( unsigned int, emulate )
302 ),
303
304 TP_fast_assign(
305 __entry->inst = inst;
306 __entry->pc = _pc;
307 __entry->emulate = emulate;
308 ),
309
310 TP_printk("inst %u pc 0x%lx emulate %u\n",
311 __entry->inst, __entry->pc, __entry->emulate)
312);
313
294#endif /* _TRACE_KVM_H */ 314#endif /* _TRACE_KVM_H */
295 315
296/* This part must be outside protection */ 316/* This part must be outside protection */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 339b34a02fb8..ce81eb2ab76a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -146,7 +146,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
146 return -EINVAL; 146 return -EINVAL;
147} 147}
148 148
149int kvm_dev_ioctl_check_extension(long ext) 149int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
150{ 150{
151 int r; 151 int r;
152 152
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b86d329b953a..204422de3fed 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2656,7 +2656,7 @@ out:
2656 return r; 2656 return r;
2657} 2657}
2658 2658
2659int kvm_dev_ioctl_check_extension(long ext) 2659int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2660{ 2660{
2661 int r; 2661 int r;
2662 2662
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ec4e3bd83d47..5065b953e6e8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -602,7 +602,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
602 unsigned int ioctl, unsigned long arg); 602 unsigned int ioctl, unsigned long arg);
603int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 603int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
604 604
605int kvm_dev_ioctl_check_extension(long ext); 605int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
606 606
607int kvm_get_dirty_log(struct kvm *kvm, 607int kvm_get_dirty_log(struct kvm *kvm,
608 struct kvm_dirty_log *log, int *is_dirty); 608 struct kvm_dirty_log *log, int *is_dirty);
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 9b744af871d7..cf3a2ff440e4 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -162,7 +162,7 @@ struct kvm_pit_config {
162#define KVM_EXIT_TPR_ACCESS 12 162#define KVM_EXIT_TPR_ACCESS 12
163#define KVM_EXIT_S390_SIEIC 13 163#define KVM_EXIT_S390_SIEIC 13
164#define KVM_EXIT_S390_RESET 14 164#define KVM_EXIT_S390_RESET 14
165#define KVM_EXIT_DCR 15 165#define KVM_EXIT_DCR 15 /* deprecated */
166#define KVM_EXIT_NMI 16 166#define KVM_EXIT_NMI 16
167#define KVM_EXIT_INTERNAL_ERROR 17 167#define KVM_EXIT_INTERNAL_ERROR 17
168#define KVM_EXIT_OSI 18 168#define KVM_EXIT_OSI 18
@@ -268,7 +268,7 @@ struct kvm_run {
268 __u64 trans_exc_code; 268 __u64 trans_exc_code;
269 __u32 pgm_code; 269 __u32 pgm_code;
270 } s390_ucontrol; 270 } s390_ucontrol;
271 /* KVM_EXIT_DCR */ 271 /* KVM_EXIT_DCR (deprecated) */
272 struct { 272 struct {
273 __u32 dcrn; 273 __u32 dcrn;
274 __u32 data; 274 __u32 data;
@@ -763,6 +763,8 @@ struct kvm_ppc_smmu_info {
763#define KVM_CAP_VM_ATTRIBUTES 101 763#define KVM_CAP_VM_ATTRIBUTES 101
764#define KVM_CAP_ARM_PSCI_0_2 102 764#define KVM_CAP_ARM_PSCI_0_2 102
765#define KVM_CAP_PPC_FIXUP_HCALL 103 765#define KVM_CAP_PPC_FIXUP_HCALL 103
766#define KVM_CAP_PPC_ENABLE_HCALL 104
767#define KVM_CAP_CHECK_EXTENSION_VM 105
766 768
767#ifdef KVM_CAP_IRQ_ROUTING 769#ifdef KVM_CAP_IRQ_ROUTING
768 770
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4b6c01b477f9..1b95cc926cfc 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2324,6 +2324,34 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
2324 return 0; 2324 return 0;
2325} 2325}
2326 2326
2327static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
2328{
2329 switch (arg) {
2330 case KVM_CAP_USER_MEMORY:
2331 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2332 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2333#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2334 case KVM_CAP_SET_BOOT_CPU_ID:
2335#endif
2336 case KVM_CAP_INTERNAL_ERROR_DATA:
2337#ifdef CONFIG_HAVE_KVM_MSI
2338 case KVM_CAP_SIGNAL_MSI:
2339#endif
2340#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2341 case KVM_CAP_IRQFD_RESAMPLE:
2342#endif
2343 case KVM_CAP_CHECK_EXTENSION_VM:
2344 return 1;
2345#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2346 case KVM_CAP_IRQ_ROUTING:
2347 return KVM_MAX_IRQ_ROUTES;
2348#endif
2349 default:
2350 break;
2351 }
2352 return kvm_vm_ioctl_check_extension(kvm, arg);
2353}
2354
2327static long kvm_vm_ioctl(struct file *filp, 2355static long kvm_vm_ioctl(struct file *filp,
2328 unsigned int ioctl, unsigned long arg) 2356 unsigned int ioctl, unsigned long arg)
2329{ 2357{
@@ -2487,6 +2515,9 @@ static long kvm_vm_ioctl(struct file *filp,
2487 r = 0; 2515 r = 0;
2488 break; 2516 break;
2489 } 2517 }
2518 case KVM_CHECK_EXTENSION:
2519 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
2520 break;
2490 default: 2521 default:
2491 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2522 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2492 if (r == -ENOTTY) 2523 if (r == -ENOTTY)
@@ -2571,33 +2602,6 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
2571 return r; 2602 return r;
2572} 2603}
2573 2604
2574static long kvm_dev_ioctl_check_extension_generic(long arg)
2575{
2576 switch (arg) {
2577 case KVM_CAP_USER_MEMORY:
2578 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2579 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2580#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2581 case KVM_CAP_SET_BOOT_CPU_ID:
2582#endif
2583 case KVM_CAP_INTERNAL_ERROR_DATA:
2584#ifdef CONFIG_HAVE_KVM_MSI
2585 case KVM_CAP_SIGNAL_MSI:
2586#endif
2587#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2588 case KVM_CAP_IRQFD_RESAMPLE:
2589#endif
2590 return 1;
2591#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2592 case KVM_CAP_IRQ_ROUTING:
2593 return KVM_MAX_IRQ_ROUTES;
2594#endif
2595 default:
2596 break;
2597 }
2598 return kvm_dev_ioctl_check_extension(arg);
2599}
2600
2601static long kvm_dev_ioctl(struct file *filp, 2605static long kvm_dev_ioctl(struct file *filp,
2602 unsigned int ioctl, unsigned long arg) 2606 unsigned int ioctl, unsigned long arg)
2603{ 2607{
@@ -2614,7 +2618,7 @@ static long kvm_dev_ioctl(struct file *filp,
2614 r = kvm_dev_ioctl_create_vm(arg); 2618 r = kvm_dev_ioctl_create_vm(arg);
2615 break; 2619 break;
2616 case KVM_CHECK_EXTENSION: 2620 case KVM_CHECK_EXTENSION:
2617 r = kvm_dev_ioctl_check_extension_generic(arg); 2621 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
2618 break; 2622 break;
2619 case KVM_GET_VCPU_MMAP_SIZE: 2623 case KVM_GET_VCPU_MMAP_SIZE:
2620 r = -EINVAL; 2624 r = -EINVAL;