aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi4
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/disassemble.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/fsl_ifc.h2
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h232
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h9
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h57
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h107
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h5
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/prom.h33
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h2
-rw-r--r--arch/powerpc/include/asm/reg.h15
-rw-r--r--arch/powerpc/include/asm/spu.h3
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/uprobes.h8
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h86
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c21
-rw-r--r--arch/powerpc/kernel/eeh.c5
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S30
-rw-r--r--arch/powerpc/kernel/ibmebus.c14
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/irq.c17
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c11
-rw-r--r--arch/powerpc/kernel/prom.c9
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/smp.c12
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/vio.c69
-rw-r--r--arch/powerpc/kvm/44x.c58
-rw-r--r--arch/powerpc/kvm/44x_emulate.c8
-rw-r--r--arch/powerpc/kvm/44x_tlb.c2
-rw-r--r--arch/powerpc/kvm/Kconfig28
-rw-r--r--arch/powerpc/kvm/Makefile29
-rw-r--r--arch/powerpc/kvm/book3s.c257
-rw-r--r--arch/powerpc/kvm/book3s.h34
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c73
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c16
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c181
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c106
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c18
-rw-r--r--arch/powerpc/kvm/book3s_exports.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c389
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S620
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S32
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c66
-rw-r--r--arch/powerpc/kvm/book3s_pr.c498
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c52
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S32
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/kvm/book3s_xics.c7
-rw-r--r--arch/powerpc/kvm/booke.c337
-rw-r--r--arch/powerpc/kvm/booke.h29
-rw-r--r--arch/powerpc/kvm/e500.c59
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_emulate.c34
-rw-r--r--arch/powerpc/kvm/e500_mmu.c4
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c24
-rw-r--r--arch/powerpc/kvm/e500mc.c58
-rw-r--r--arch/powerpc/kvm/emulate.c12
-rw-r--r--arch/powerpc/kvm/powerpc.c171
-rw-r--r--arch/powerpc/kvm/trace.h429
-rw-r--r--arch/powerpc/kvm/trace_booke.h177
-rw-r--r--arch/powerpc/kvm/trace_pr.h297
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
-rw-r--r--arch/powerpc/mm/pgtable_64.c7
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c1
-rw-r--r--arch/powerpc/platforms/512x/clock.c1
-rw-r--r--arch/powerpc/platforms/512x/pdm360ng.c2
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c2
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c1
-rw-r--r--arch/powerpc/platforms/85xx/common.c2
-rw-r--r--arch/powerpc/platforms/85xx/ppa8548.c1
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c1
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c2
-rw-r--r--arch/powerpc/platforms/86xx/pic.c1
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c1
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c6
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c7
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c7
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c13
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c89
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h3
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c2
-rw-r--r--arch/powerpc/platforms/fsl_uli1575.c12
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c1
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c6
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c1
-rw-r--r--arch/powerpc/platforms/powermac/pic.c10
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c1
-rw-r--r--arch/powerpc/platforms/powernv/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c43
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c2
-rw-r--r--arch/powerpc/sysdev/cpm_common.c1
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_gtm.c11
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c1
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c1
-rw-r--r--arch/powerpc/sysdev/mpic.c8
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c6
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c8
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c2
-rw-r--r--arch/powerpc/sysdev/of_rtc.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_ocm.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_soc.c1
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c2
141 files changed, 3602 insertions, 1881 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index de5c61d5db2c..b44b52c0a8f0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -85,6 +85,7 @@ config GENERIC_HWEIGHT
85config PPC 85config PPC
86 bool 86 bool
87 default y 87 default y
88 select ARCH_MIGHT_HAVE_PC_PARPORT
88 select BINFMT_ELF 89 select BINFMT_ELF
89 select OF 90 select OF
90 select OF_EARLY_FLATTREE 91 select OF_EARLY_FLATTREE
@@ -106,7 +107,6 @@ config PPC
106 select HAVE_MEMBLOCK_NODE_MAP 107 select HAVE_MEMBLOCK_NODE_MAP
107 select HAVE_DMA_ATTRS 108 select HAVE_DMA_ATTRS
108 select HAVE_DMA_API_DEBUG 109 select HAVE_DMA_API_DEBUG
109 select USE_GENERIC_SMP_HELPERS if SMP
110 select HAVE_OPROFILE 110 select HAVE_OPROFILE
111 select HAVE_DEBUG_KMEMLEAK 111 select HAVE_DEBUG_KMEMLEAK
112 select GENERIC_ATOMIC64 if PPC32 112 select GENERIC_ATOMIC64 if PPC32
@@ -138,6 +138,7 @@ config PPC
138 select OLD_SIGSUSPEND 138 select OLD_SIGSUSPEND
139 select OLD_SIGACTION if PPC32 139 select OLD_SIGACTION if PPC32
140 select HAVE_DEBUG_STACKOVERFLOW 140 select HAVE_DEBUG_STACKOVERFLOW
141 select HAVE_IRQ_EXIT_ON_IRQ_STACK
141 142
142config GENERIC_CSUM 143config GENERIC_CSUM
143 def_bool CPU_LITTLE_ENDIAN 144 def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 4c617bf8cdb2..4f6e48277c46 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -223,13 +223,13 @@
223 reg = <0xe2000 0x1000>; 223 reg = <0xe2000 0x1000>;
224 }; 224 };
225 225
226/include/ "qoriq-dma-0.dtsi" 226/include/ "elo3-dma-0.dtsi"
227 dma@100300 { 227 dma@100300 {
228 fsl,iommu-parent = <&pamu0>; 228 fsl,iommu-parent = <&pamu0>;
229 fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ 229 fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
230 }; 230 };
231 231
232/include/ "qoriq-dma-1.dtsi" 232/include/ "elo3-dma-1.dtsi"
233 dma@101300 { 233 dma@101300 {
234 fsl,iommu-parent = <&pamu0>; 234 fsl,iommu-parent = <&pamu0>;
235 fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ 235 fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi
new file mode 100644
index 000000000000..3c210e0d5201
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi
@@ -0,0 +1,82 @@
1/*
2 * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x100000 ]
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35dma0: dma@100300 {
36 #address-cells = <1>;
37 #size-cells = <1>;
38 compatible = "fsl,elo3-dma";
39 reg = <0x100300 0x4>,
40 <0x100600 0x4>;
41 ranges = <0x0 0x100100 0x500>;
42 dma-channel@0 {
43 compatible = "fsl,eloplus-dma-channel";
44 reg = <0x0 0x80>;
45 interrupts = <28 2 0 0>;
46 };
47 dma-channel@80 {
48 compatible = "fsl,eloplus-dma-channel";
49 reg = <0x80 0x80>;
50 interrupts = <29 2 0 0>;
51 };
52 dma-channel@100 {
53 compatible = "fsl,eloplus-dma-channel";
54 reg = <0x100 0x80>;
55 interrupts = <30 2 0 0>;
56 };
57 dma-channel@180 {
58 compatible = "fsl,eloplus-dma-channel";
59 reg = <0x180 0x80>;
60 interrupts = <31 2 0 0>;
61 };
62 dma-channel@300 {
63 compatible = "fsl,eloplus-dma-channel";
64 reg = <0x300 0x80>;
65 interrupts = <76 2 0 0>;
66 };
67 dma-channel@380 {
68 compatible = "fsl,eloplus-dma-channel";
69 reg = <0x380 0x80>;
70 interrupts = <77 2 0 0>;
71 };
72 dma-channel@400 {
73 compatible = "fsl,eloplus-dma-channel";
74 reg = <0x400 0x80>;
75 interrupts = <78 2 0 0>;
76 };
77 dma-channel@480 {
78 compatible = "fsl,eloplus-dma-channel";
79 reg = <0x480 0x80>;
80 interrupts = <79 2 0 0>;
81 };
82};
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi
new file mode 100644
index 000000000000..cccf3bb38224
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi
@@ -0,0 +1,82 @@
1/*
2 * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x101000 ]
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35dma1: dma@101300 {
36 #address-cells = <1>;
37 #size-cells = <1>;
38 compatible = "fsl,elo3-dma";
39 reg = <0x101300 0x4>,
40 <0x101600 0x4>;
41 ranges = <0x0 0x101100 0x500>;
42 dma-channel@0 {
43 compatible = "fsl,eloplus-dma-channel";
44 reg = <0x0 0x80>;
45 interrupts = <32 2 0 0>;
46 };
47 dma-channel@80 {
48 compatible = "fsl,eloplus-dma-channel";
49 reg = <0x80 0x80>;
50 interrupts = <33 2 0 0>;
51 };
52 dma-channel@100 {
53 compatible = "fsl,eloplus-dma-channel";
54 reg = <0x100 0x80>;
55 interrupts = <34 2 0 0>;
56 };
57 dma-channel@180 {
58 compatible = "fsl,eloplus-dma-channel";
59 reg = <0x180 0x80>;
60 interrupts = <35 2 0 0>;
61 };
62 dma-channel@300 {
63 compatible = "fsl,eloplus-dma-channel";
64 reg = <0x300 0x80>;
65 interrupts = <80 2 0 0>;
66 };
67 dma-channel@380 {
68 compatible = "fsl,eloplus-dma-channel";
69 reg = <0x380 0x80>;
70 interrupts = <81 2 0 0>;
71 };
72 dma-channel@400 {
73 compatible = "fsl,eloplus-dma-channel";
74 reg = <0x400 0x80>;
75 interrupts = <82 2 0 0>;
76 };
77 dma-channel@480 {
78 compatible = "fsl,eloplus-dma-channel";
79 reg = <0x480 0x80>;
80 interrupts = <83 2 0 0>;
81 };
82};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 510afa362de1..4143a9733cd0 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -387,8 +387,8 @@
387 reg = <0xea000 0x4000>; 387 reg = <0xea000 0x4000>;
388 }; 388 };
389 389
390/include/ "qoriq-dma-0.dtsi" 390/include/ "elo3-dma-0.dtsi"
391/include/ "qoriq-dma-1.dtsi" 391/include/ "elo3-dma-1.dtsi"
392 392
393/include/ "qoriq-espi-0.dtsi" 393/include/ "qoriq-espi-0.dtsi"
394 spi@110000 { 394 spi@110000 {
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 704e6f10ae80..d8f9d2f18a23 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,4 +2,5 @@
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += rwsem.h 3generic-y += rwsem.h
4generic-y += trace_clock.h 4generic-y += trace_clock.h
5generic-y += preempt.h
5generic-y += vtime.h \ No newline at end of file 6generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 9b198d1b3b2b..856f8deb557a 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
77 return inst & 0xffff; 77 return inst & 0xffff;
78} 78}
79 79
80static inline unsigned int get_oc(u32 inst)
81{
82 return (inst >> 11) & 0x7fff;
83}
80#endif /* __ASM_PPC_DISASSEMBLE_H__ */ 84#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index cca12f084842..894662a5d4d5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
198 cmpwi r10,0; \ 198 cmpwi r10,0; \
199 bne do_kvm_##n 199 bne do_kvm_##n
200 200
201#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
202/*
203 * If hv is possible, interrupts come into to the hv version
204 * of the kvmppc_interrupt code, which then jumps to the PR handler,
205 * kvmppc_interrupt_pr, if the guest is a PR guest.
206 */
207#define kvmppc_interrupt kvmppc_interrupt_hv
208#else
209#define kvmppc_interrupt kvmppc_interrupt_pr
210#endif
211
201#define __KVM_HANDLER(area, h, n) \ 212#define __KVM_HANDLER(area, h, n) \
202do_kvm_##n: \ 213do_kvm_##n: \
203 BEGIN_FTR_SECTION_NESTED(947) \ 214 BEGIN_FTR_SECTION_NESTED(947) \
204 ld r10,area+EX_CFAR(r13); \ 215 ld r10,area+EX_CFAR(r13); \
205 std r10,HSTATE_CFAR(r13); \ 216 std r10,HSTATE_CFAR(r13); \
206 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ 217 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
218 BEGIN_FTR_SECTION_NESTED(948) \
219 ld r10,area+EX_PPR(r13); \
220 std r10,HSTATE_PPR(r13); \
221 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
207 ld r10,area+EX_R10(r13); \ 222 ld r10,area+EX_R10(r13); \
208 stw r9,HSTATE_SCRATCH1(r13); \ 223 stw r9,HSTATE_SCRATCH1(r13); \
209 ld r9,area+EX_R9(r13); \ 224 ld r9,area+EX_R9(r13); \
@@ -217,6 +232,10 @@ do_kvm_##n: \
217 ld r10,area+EX_R10(r13); \ 232 ld r10,area+EX_R10(r13); \
218 beq 89f; \ 233 beq 89f; \
219 stw r9,HSTATE_SCRATCH1(r13); \ 234 stw r9,HSTATE_SCRATCH1(r13); \
235 BEGIN_FTR_SECTION_NESTED(948) \
236 ld r9,area+EX_PPR(r13); \
237 std r9,HSTATE_PPR(r13); \
238 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
220 ld r9,area+EX_R9(r13); \ 239 ld r9,area+EX_R9(r13); \
221 std r12,HSTATE_SCRATCH0(r13); \ 240 std r12,HSTATE_SCRATCH0(r13); \
222 li r12,n; \ 241 li r12,n; \
@@ -236,7 +255,7 @@ do_kvm_##n: \
236#define KVM_HANDLER_SKIP(area, h, n) 255#define KVM_HANDLER_SKIP(area, h, n)
237#endif 256#endif
238 257
239#ifdef CONFIG_KVM_BOOK3S_PR 258#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
240#define KVMTEST_PR(n) __KVMTEST(n) 259#define KVMTEST_PR(n) __KVMTEST(n)
241#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) 260#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
242#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 261#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
index b8a4b9bc50b3..f49ddb1b2273 100644
--- a/arch/powerpc/include/asm/fsl_ifc.h
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -93,6 +93,7 @@
93#define CSOR_NAND_PGS_512 0x00000000 93#define CSOR_NAND_PGS_512 0x00000000
94#define CSOR_NAND_PGS_2K 0x00080000 94#define CSOR_NAND_PGS_2K 0x00080000
95#define CSOR_NAND_PGS_4K 0x00100000 95#define CSOR_NAND_PGS_4K 0x00100000
96#define CSOR_NAND_PGS_8K 0x00180000
96/* Spare region Size */ 97/* Spare region Size */
97#define CSOR_NAND_SPRZ_MASK 0x0000E000 98#define CSOR_NAND_SPRZ_MASK 0x0000E000
98#define CSOR_NAND_SPRZ_SHIFT 13 99#define CSOR_NAND_SPRZ_SHIFT 13
@@ -102,6 +103,7 @@
102#define CSOR_NAND_SPRZ_210 0x00006000 103#define CSOR_NAND_SPRZ_210 0x00006000
103#define CSOR_NAND_SPRZ_218 0x00008000 104#define CSOR_NAND_SPRZ_218 0x00008000
104#define CSOR_NAND_SPRZ_224 0x0000A000 105#define CSOR_NAND_SPRZ_224 0x0000A000
106#define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000
105/* Pages Per Block */ 107/* Pages Per Block */
106#define CSOR_NAND_PB_MASK 0x00000700 108#define CSOR_NAND_PB_MASK 0x00000700
107#define CSOR_NAND_PB_SHIFT 8 109#define CSOR_NAND_PB_SHIFT 8
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f00..f016bb699b5f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
19 19
20static __always_inline bool arch_static_branch(struct static_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm_volatile_goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
24 ".pushsection __jump_table, \"aw\"\n\t" 24 ".pushsection __jump_table, \"aw\"\n\t"
25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 851bac7afa4b..1bd92fd43cfb 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -123,6 +123,8 @@
123#define BOOK3S_HFLAG_SLB 0x2 123#define BOOK3S_HFLAG_SLB 0x2
124#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 124#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
125#define BOOK3S_HFLAG_NATIVE_PS 0x8 125#define BOOK3S_HFLAG_NATIVE_PS 0x8
126#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
127#define BOOK3S_HFLAG_NEW_TLBIE 0x20
126 128
127#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 129#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
128#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 130#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -136,6 +138,8 @@
136#define KVM_GUEST_MODE_NONE 0 138#define KVM_GUEST_MODE_NONE 0
137#define KVM_GUEST_MODE_GUEST 1 139#define KVM_GUEST_MODE_GUEST 1
138#define KVM_GUEST_MODE_SKIP 2 140#define KVM_GUEST_MODE_SKIP 2
141#define KVM_GUEST_MODE_GUEST_HV 3
142#define KVM_GUEST_MODE_HOST_HV 4
139 143
140#define KVM_INST_FETCH_FAILED -1 144#define KVM_INST_FETCH_FAILED -1
141 145
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fa19e2f1a874..4a594b76674d 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -58,16 +58,18 @@ struct hpte_cache {
58 struct hlist_node list_pte_long; 58 struct hlist_node list_pte_long;
59 struct hlist_node list_vpte; 59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long; 60 struct hlist_node list_vpte_long;
61#ifdef CONFIG_PPC_BOOK3S_64
62 struct hlist_node list_vpte_64k;
63#endif
61 struct rcu_head rcu_head; 64 struct rcu_head rcu_head;
62 u64 host_vpn; 65 u64 host_vpn;
63 u64 pfn; 66 u64 pfn;
64 ulong slot; 67 ulong slot;
65 struct kvmppc_pte pte; 68 struct kvmppc_pte pte;
69 int pagesize;
66}; 70};
67 71
68struct kvmppc_vcpu_book3s { 72struct kvmppc_vcpu_book3s {
69 struct kvm_vcpu vcpu;
70 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
71 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 73 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
72 struct { 74 struct {
73 u64 esid; 75 u64 esid;
@@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s {
99 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 101 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
100 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 102 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
101 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 103 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
104#ifdef CONFIG_PPC_BOOK3S_64
105 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
106#endif
102 int hpte_cache_count; 107 int hpte_cache_count;
103 spinlock_t mmu_lock; 108 spinlock_t mmu_lock;
104}; 109};
@@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s {
107#define CONTEXT_GUEST 1 112#define CONTEXT_GUEST 1
108#define CONTEXT_GUEST_END 2 113#define CONTEXT_GUEST_END 2
109 114
110#define VSID_REAL 0x0fffffffffc00000ULL 115#define VSID_REAL 0x07ffffffffc00000ULL
111#define VSID_BAT 0x0fffffffffb00000ULL 116#define VSID_BAT 0x07ffffffffb00000ULL
117#define VSID_64K 0x0800000000000000ULL
112#define VSID_1T 0x1000000000000000ULL 118#define VSID_1T 0x1000000000000000ULL
113#define VSID_REAL_DR 0x2000000000000000ULL 119#define VSID_REAL_DR 0x2000000000000000ULL
114#define VSID_REAL_IR 0x4000000000000000ULL 120#define VSID_REAL_IR 0x4000000000000000ULL
@@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
118extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 124extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
119extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 125extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
120extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 126extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
121extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
122extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 127extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
123extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 128extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
124extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 129extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
125extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 130extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
131 bool iswrite);
132extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
126extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 133extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
127extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 134extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
128extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 135extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
134 141
135extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 142extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
136extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 143extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
144extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
137extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); 145extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
138extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); 146extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
139extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 147extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
151 bool upper, u32 val); 159 bool upper, u32 val);
152extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 160extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
153extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 161extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
154extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 162extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
163 bool *writable);
155extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 164extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
156 unsigned long *rmap, long pte_index, int realmode); 165 unsigned long *rmap, long pte_index, int realmode);
157extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 166extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
@@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
172 unsigned long *hpret); 181 unsigned long *hpret);
173extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 182extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
174 struct kvm_memory_slot *memslot, unsigned long *map); 183 struct kvm_memory_slot *memslot, unsigned long *map);
184extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
185 unsigned long mask);
175 186
176extern void kvmppc_entry_trampoline(void); 187extern void kvmppc_entry_trampoline(void);
177extern void kvmppc_hv_entry_trampoline(void); 188extern void kvmppc_hv_entry_trampoline(void);
@@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
184 195
185static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 196static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
186{ 197{
187 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); 198 return vcpu->arch.book3s;
188} 199}
189 200
190extern void kvm_return_point(void);
191
192/* Also add subarch specific defines */ 201/* Also add subarch specific defines */
193 202
194#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 203#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -198,203 +207,6 @@ extern void kvm_return_point(void);
198#include <asm/kvm_book3s_64.h> 207#include <asm/kvm_book3s_64.h>
199#endif 208#endif
200 209
201#ifdef CONFIG_KVM_BOOK3S_PR
202
203static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
204{
205 return to_book3s(vcpu)->hior;
206}
207
208static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
209 unsigned long pending_now, unsigned long old_pending)
210{
211 if (pending_now)
212 vcpu->arch.shared->int_pending = 1;
213 else if (old_pending)
214 vcpu->arch.shared->int_pending = 0;
215}
216
217static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
218{
219 if ( num < 14 ) {
220 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
221 svcpu->gpr[num] = val;
222 svcpu_put(svcpu);
223 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
224 } else
225 vcpu->arch.gpr[num] = val;
226}
227
228static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
229{
230 if ( num < 14 ) {
231 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
232 ulong r = svcpu->gpr[num];
233 svcpu_put(svcpu);
234 return r;
235 } else
236 return vcpu->arch.gpr[num];
237}
238
239static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
240{
241 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
242 svcpu->cr = val;
243 svcpu_put(svcpu);
244 to_book3s(vcpu)->shadow_vcpu->cr = val;
245}
246
247static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
248{
249 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
250 u32 r;
251 r = svcpu->cr;
252 svcpu_put(svcpu);
253 return r;
254}
255
256static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
257{
258 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
259 svcpu->xer = val;
260 to_book3s(vcpu)->shadow_vcpu->xer = val;
261 svcpu_put(svcpu);
262}
263
264static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
265{
266 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
267 u32 r;
268 r = svcpu->xer;
269 svcpu_put(svcpu);
270 return r;
271}
272
273static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
274{
275 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
276 svcpu->ctr = val;
277 svcpu_put(svcpu);
278}
279
280static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
281{
282 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
283 ulong r;
284 r = svcpu->ctr;
285 svcpu_put(svcpu);
286 return r;
287}
288
289static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
290{
291 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
292 svcpu->lr = val;
293 svcpu_put(svcpu);
294}
295
296static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
297{
298 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
299 ulong r;
300 r = svcpu->lr;
301 svcpu_put(svcpu);
302 return r;
303}
304
305static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
306{
307 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
308 svcpu->pc = val;
309 svcpu_put(svcpu);
310}
311
312static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
313{
314 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
315 ulong r;
316 r = svcpu->pc;
317 svcpu_put(svcpu);
318 return r;
319}
320
321static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
322{
323 ulong pc = kvmppc_get_pc(vcpu);
324 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
325 u32 r;
326
327 /* Load the instruction manually if it failed to do so in the
328 * exit path */
329 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
330 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
331
332 r = svcpu->last_inst;
333 svcpu_put(svcpu);
334 return r;
335}
336
337/*
338 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
339 * Because the sc instruction sets SRR0 to point to the following
340 * instruction, we have to fetch from pc - 4.
341 */
342static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
343{
344 ulong pc = kvmppc_get_pc(vcpu) - 4;
345 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
346 u32 r;
347
348 /* Load the instruction manually if it failed to do so in the
349 * exit path */
350 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
351 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
352
353 r = svcpu->last_inst;
354 svcpu_put(svcpu);
355 return r;
356}
357
358static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
359{
360 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
361 ulong r;
362 r = svcpu->fault_dar;
363 svcpu_put(svcpu);
364 return r;
365}
366
367static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
368{
369 ulong crit_raw = vcpu->arch.shared->critical;
370 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
371 bool crit;
372
373 /* Truncate crit indicators in 32 bit mode */
374 if (!(vcpu->arch.shared->msr & MSR_SF)) {
375 crit_raw &= 0xffffffff;
376 crit_r1 &= 0xffffffff;
377 }
378
379 /* Critical section when crit == r1 */
380 crit = (crit_raw == crit_r1);
381 /* ... and we're in supervisor mode */
382 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
383
384 return crit;
385}
386#else /* CONFIG_KVM_BOOK3S_PR */
387
388static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
389{
390 return 0;
391}
392
393static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
394 unsigned long pending_now, unsigned long old_pending)
395{
396}
397
398static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 210static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
399{ 211{
400 vcpu->arch.gpr[num] = val; 212 vcpu->arch.gpr[num] = val;
@@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
489 return vcpu->arch.fault_dar; 301 return vcpu->arch.fault_dar;
490} 302}
491 303
492static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
493{
494 return false;
495}
496#endif
497
498/* Magic register values loaded into r3 and r4 before the 'sc' assembly 304/* Magic register values loaded into r3 and r4 before the 'sc' assembly
499 * instruction for the OSI hypercalls */ 305 * instruction for the OSI hypercalls */
500#define OSI_SC_MAGIC_R3 0x113724FA 306#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index ce0ef6ce8f86..c720e0b3238d 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -22,7 +22,7 @@
22 22
23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
24{ 24{
25 return to_book3s(vcpu)->shadow_vcpu; 25 return vcpu->arch.shadow_vcpu;
26} 26}
27 27
28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) 28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 86d638a3b359..bf0fa8b0a883 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,7 +20,7 @@
20#ifndef __ASM_KVM_BOOK3S_64_H__ 20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__ 21#define __ASM_KVM_BOOK3S_64_H__
22 22
23#ifdef CONFIG_KVM_BOOK3S_PR 23#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25{ 25{
26 preempt_disable(); 26 preempt_disable();
@@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
35 35
36#define SPAPR_TCE_SHIFT 12 36#define SPAPR_TCE_SHIFT 12
37 37
38#ifdef CONFIG_KVM_BOOK3S_64_HV 38#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40extern unsigned long kvm_rma_pages; 40extern unsigned long kvm_rma_pages;
41#endif 41#endif
@@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
278 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); 278 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
279} 279}
280 280
281#ifdef CONFIG_KVM_BOOK3S_64_HV 281#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
282/* 282/*
283 * Note modification of an HPTE; set the HPTE modified bit 283 * Note modification of an HPTE; set the HPTE modified bit
284 * if anyone is interested. 284 * if anyone is interested.
@@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm,
289 if (atomic_read(&kvm->arch.hpte_mod_interest)) 289 if (atomic_read(&kvm->arch.hpte_mod_interest))
290 rev->guest_rpte |= HPTE_GR_MODIFIED; 290 rev->guest_rpte |= HPTE_GR_MODIFIED;
291} 291}
292#endif /* CONFIG_KVM_BOOK3S_64_HV */ 292#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
293 293
294#endif /* __ASM_KVM_BOOK3S_64_H__ */ 294#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 9039d3c97eec..0bd9348a4db9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -83,7 +83,7 @@ struct kvmppc_host_state {
83 u8 restore_hid5; 83 u8 restore_hid5;
84 u8 napping; 84 u8 napping;
85 85
86#ifdef CONFIG_KVM_BOOK3S_64_HV 86#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
87 u8 hwthread_req; 87 u8 hwthread_req;
88 u8 hwthread_state; 88 u8 hwthread_state;
89 u8 host_ipi; 89 u8 host_ipi;
@@ -101,6 +101,7 @@ struct kvmppc_host_state {
101#endif 101#endif
102#ifdef CONFIG_PPC_BOOK3S_64 102#ifdef CONFIG_PPC_BOOK3S_64
103 u64 cfar; 103 u64 cfar;
104 u64 ppr;
104#endif 105#endif
105}; 106};
106 107
@@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu {
108 ulong gpr[14]; 109 ulong gpr[14];
109 u32 cr; 110 u32 cr;
110 u32 xer; 111 u32 xer;
111
112 u32 fault_dsisr;
113 u32 last_inst;
114 ulong ctr; 112 ulong ctr;
115 ulong lr; 113 ulong lr;
116 ulong pc; 114 ulong pc;
115
117 ulong shadow_srr1; 116 ulong shadow_srr1;
118 ulong fault_dar; 117 ulong fault_dar;
118 u32 fault_dsisr;
119 u32 last_inst;
119 120
120#ifdef CONFIG_PPC_BOOK3S_32 121#ifdef CONFIG_PPC_BOOK3S_32
121 u32 sr[16]; /* Guest SRs */ 122 u32 sr[16]; /* Guest SRs */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d3c1eb34c986..dd8f61510dfd 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -26,7 +26,12 @@
26/* LPIDs we support with this build -- runtime limit may be lower */ 26/* LPIDs we support with this build -- runtime limit may be lower */
27#define KVMPPC_NR_LPIDS 64 27#define KVMPPC_NR_LPIDS 64
28 28
29#define KVMPPC_INST_EHPRIV 0x7c00021c 29#define KVMPPC_INST_EHPRIV 0x7c00021c
30#define EHPRIV_OC_SHIFT 11
31/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
32#define EHPRIV_OC_DEBUG 1
33#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
34 (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
30 35
31static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 36static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
32{ 37{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 33283532e9d8..237d1d25b448 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -63,20 +63,17 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
63 63
64#endif 64#endif
65 65
66/* We don't currently support large pages. */
67#define KVM_HPAGE_GFN_SHIFT(x) 0
68#define KVM_NR_PAGE_SIZES 1
69#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
70
71#define HPTEG_CACHE_NUM (1 << 15) 66#define HPTEG_CACHE_NUM (1 << 15)
72#define HPTEG_HASH_BITS_PTE 13 67#define HPTEG_HASH_BITS_PTE 13
73#define HPTEG_HASH_BITS_PTE_LONG 12 68#define HPTEG_HASH_BITS_PTE_LONG 12
74#define HPTEG_HASH_BITS_VPTE 13 69#define HPTEG_HASH_BITS_VPTE 13
75#define HPTEG_HASH_BITS_VPTE_LONG 5 70#define HPTEG_HASH_BITS_VPTE_LONG 5
71#define HPTEG_HASH_BITS_VPTE_64K 11
76#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) 72#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
77#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) 73#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
78#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) 74#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
79#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) 75#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
76#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
80 77
81/* Physical Address Mask - allowed range of real mode RAM access */ 78/* Physical Address Mask - allowed range of real mode RAM access */
82#define KVM_PAM 0x0fffffffffffffffULL 79#define KVM_PAM 0x0fffffffffffffffULL
@@ -89,6 +86,9 @@ struct lppaca;
89struct slb_shadow; 86struct slb_shadow;
90struct dtl_entry; 87struct dtl_entry;
91 88
89struct kvmppc_vcpu_book3s;
90struct kvmppc_book3s_shadow_vcpu;
91
92struct kvm_vm_stat { 92struct kvm_vm_stat {
93 u32 remote_tlb_flush; 93 u32 remote_tlb_flush;
94}; 94};
@@ -224,15 +224,15 @@ struct revmap_entry {
224#define KVMPPC_GOT_PAGE 0x80 224#define KVMPPC_GOT_PAGE 0x80
225 225
226struct kvm_arch_memory_slot { 226struct kvm_arch_memory_slot {
227#ifdef CONFIG_KVM_BOOK3S_64_HV 227#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
228 unsigned long *rmap; 228 unsigned long *rmap;
229 unsigned long *slot_phys; 229 unsigned long *slot_phys;
230#endif /* CONFIG_KVM_BOOK3S_64_HV */ 230#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
231}; 231};
232 232
233struct kvm_arch { 233struct kvm_arch {
234 unsigned int lpid; 234 unsigned int lpid;
235#ifdef CONFIG_KVM_BOOK3S_64_HV 235#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
236 unsigned long hpt_virt; 236 unsigned long hpt_virt;
237 struct revmap_entry *revmap; 237 struct revmap_entry *revmap;
238 unsigned int host_lpid; 238 unsigned int host_lpid;
@@ -256,7 +256,10 @@ struct kvm_arch {
256 cpumask_t need_tlb_flush; 256 cpumask_t need_tlb_flush;
257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
258 int hpt_cma_alloc; 258 int hpt_cma_alloc;
259#endif /* CONFIG_KVM_BOOK3S_64_HV */ 259#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
260#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
261 struct mutex hpt_mutex;
262#endif
260#ifdef CONFIG_PPC_BOOK3S_64 263#ifdef CONFIG_PPC_BOOK3S_64
261 struct list_head spapr_tce_tables; 264 struct list_head spapr_tce_tables;
262 struct list_head rtas_tokens; 265 struct list_head rtas_tokens;
@@ -267,6 +270,7 @@ struct kvm_arch {
267#ifdef CONFIG_KVM_XICS 270#ifdef CONFIG_KVM_XICS
268 struct kvmppc_xics *xics; 271 struct kvmppc_xics *xics;
269#endif 272#endif
273 struct kvmppc_ops *kvm_ops;
270}; 274};
271 275
272/* 276/*
@@ -294,6 +298,10 @@ struct kvmppc_vcore {
294 u64 stolen_tb; 298 u64 stolen_tb;
295 u64 preempt_tb; 299 u64 preempt_tb;
296 struct kvm_vcpu *runner; 300 struct kvm_vcpu *runner;
301 u64 tb_offset; /* guest timebase - host timebase */
302 ulong lpcr;
303 u32 arch_compat;
304 ulong pcr;
297}; 305};
298 306
299#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 307#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -328,6 +336,7 @@ struct kvmppc_pte {
328 bool may_read : 1; 336 bool may_read : 1;
329 bool may_write : 1; 337 bool may_write : 1;
330 bool may_execute : 1; 338 bool may_execute : 1;
339 u8 page_size; /* MMU_PAGE_xxx */
331}; 340};
332 341
333struct kvmppc_mmu { 342struct kvmppc_mmu {
@@ -340,7 +349,8 @@ struct kvmppc_mmu {
340 /* book3s */ 349 /* book3s */
341 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); 350 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
342 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); 351 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
343 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); 352 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
353 struct kvmppc_pte *pte, bool data, bool iswrite);
344 void (*reset_msr)(struct kvm_vcpu *vcpu); 354 void (*reset_msr)(struct kvm_vcpu *vcpu);
345 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); 355 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
346 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); 356 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
@@ -360,6 +370,7 @@ struct kvmppc_slb {
360 bool large : 1; /* PTEs are 16MB */ 370 bool large : 1; /* PTEs are 16MB */
361 bool tb : 1; /* 1TB segment */ 371 bool tb : 1; /* 1TB segment */
362 bool class : 1; 372 bool class : 1;
373 u8 base_page_size; /* MMU_PAGE_xxx */
363}; 374};
364 375
365# ifdef CONFIG_PPC_FSL_BOOK3E 376# ifdef CONFIG_PPC_FSL_BOOK3E
@@ -377,17 +388,6 @@ struct kvmppc_slb {
377#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ 388#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
378#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ 389#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
379 390
380struct kvmppc_booke_debug_reg {
381 u32 dbcr0;
382 u32 dbcr1;
383 u32 dbcr2;
384#ifdef CONFIG_KVM_E500MC
385 u32 dbcr4;
386#endif
387 u64 iac[KVMPPC_BOOKE_MAX_IAC];
388 u64 dac[KVMPPC_BOOKE_MAX_DAC];
389};
390
391#define KVMPPC_IRQ_DEFAULT 0 391#define KVMPPC_IRQ_DEFAULT 0
392#define KVMPPC_IRQ_MPIC 1 392#define KVMPPC_IRQ_MPIC 1
393#define KVMPPC_IRQ_XICS 2 393#define KVMPPC_IRQ_XICS 2
@@ -402,6 +402,10 @@ struct kvm_vcpu_arch {
402 int slb_max; /* 1 + index of last valid entry in slb[] */ 402 int slb_max; /* 1 + index of last valid entry in slb[] */
403 int slb_nr; /* total number of entries in SLB */ 403 int slb_nr; /* total number of entries in SLB */
404 struct kvmppc_mmu mmu; 404 struct kvmppc_mmu mmu;
405 struct kvmppc_vcpu_book3s *book3s;
406#endif
407#ifdef CONFIG_PPC_BOOK3S_32
408 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
405#endif 409#endif
406 410
407 ulong gpr[32]; 411 ulong gpr[32];
@@ -463,6 +467,8 @@ struct kvm_vcpu_arch {
463 u32 ctrl; 467 u32 ctrl;
464 ulong dabr; 468 ulong dabr;
465 ulong cfar; 469 ulong cfar;
470 ulong ppr;
471 ulong shadow_srr1;
466#endif 472#endif
467 u32 vrsave; /* also USPRG0 */ 473 u32 vrsave; /* also USPRG0 */
468 u32 mmucr; 474 u32 mmucr;
@@ -498,6 +504,8 @@ struct kvm_vcpu_arch {
498 504
499 u64 mmcr[3]; 505 u64 mmcr[3];
500 u32 pmc[8]; 506 u32 pmc[8];
507 u64 siar;
508 u64 sdar;
501 509
502#ifdef CONFIG_KVM_EXIT_TIMING 510#ifdef CONFIG_KVM_EXIT_TIMING
503 struct mutex exit_timing_lock; 511 struct mutex exit_timing_lock;
@@ -531,7 +539,10 @@ struct kvm_vcpu_arch {
531 u32 eptcfg; 539 u32 eptcfg;
532 u32 epr; 540 u32 epr;
533 u32 crit_save; 541 u32 crit_save;
534 struct kvmppc_booke_debug_reg dbg_reg; 542 /* guest debug registers*/
543 struct debug_reg dbg_reg;
544 /* hardware visible debug registers when in guest state */
545 struct debug_reg shadow_dbg_reg;
535#endif 546#endif
536 gpa_t paddr_accessed; 547 gpa_t paddr_accessed;
537 gva_t vaddr_accessed; 548 gva_t vaddr_accessed;
@@ -582,7 +593,7 @@ struct kvm_vcpu_arch {
582 struct kvmppc_icp *icp; /* XICS presentation controller */ 593 struct kvmppc_icp *icp; /* XICS presentation controller */
583#endif 594#endif
584 595
585#ifdef CONFIG_KVM_BOOK3S_64_HV 596#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
586 struct kvm_vcpu_arch_shared shregs; 597 struct kvm_vcpu_arch_shared shregs;
587 598
588 unsigned long pgfault_addr; 599 unsigned long pgfault_addr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b15554a26c20..c8317fbf92c4 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
106 struct kvm_interrupt *irq); 106 struct kvm_interrupt *irq);
107extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); 107extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
108extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); 108extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
109
110extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
111 unsigned int op, int *advance);
112extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
113 ulong val);
114extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
115 ulong *val);
116extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); 109extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
117 110
118extern int kvmppc_booke_init(void); 111extern int kvmppc_booke_init(void);
@@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
135 struct kvm_create_spapr_tce *args); 128 struct kvm_create_spapr_tce *args);
136extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 129extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
137 unsigned long ioba, unsigned long tce); 130 unsigned long ioba, unsigned long tce);
138extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
139 struct kvm_allocate_rma *rma);
140extern struct kvm_rma_info *kvm_alloc_rma(void); 131extern struct kvm_rma_info *kvm_alloc_rma(void);
141extern void kvm_release_rma(struct kvm_rma_info *ri); 132extern void kvm_release_rma(struct kvm_rma_info *ri);
142extern struct page *kvm_alloc_hpt(unsigned long nr_pages); 133extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
143extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); 134extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
144extern int kvmppc_core_init_vm(struct kvm *kvm); 135extern int kvmppc_core_init_vm(struct kvm *kvm);
145extern void kvmppc_core_destroy_vm(struct kvm *kvm); 136extern void kvmppc_core_destroy_vm(struct kvm *kvm);
146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 137extern void kvmppc_core_free_memslot(struct kvm *kvm,
138 struct kvm_memory_slot *free,
147 struct kvm_memory_slot *dont); 139 struct kvm_memory_slot *dont);
148extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 140extern int kvmppc_core_create_memslot(struct kvm *kvm,
141 struct kvm_memory_slot *slot,
149 unsigned long npages); 142 unsigned long npages);
150extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 143extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
151 struct kvm_memory_slot *memslot, 144 struct kvm_memory_slot *memslot,
@@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
177extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); 170extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
178extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); 171extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
179 172
173union kvmppc_one_reg {
174 u32 wval;
175 u64 dval;
176 vector128 vval;
177 u64 vsxval[2];
178 struct {
179 u64 addr;
180 u64 length;
181 } vpaval;
182};
183
184struct kvmppc_ops {
185 struct module *owner;
186 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
187 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
188 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
189 union kvmppc_one_reg *val);
190 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
191 union kvmppc_one_reg *val);
192 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
193 void (*vcpu_put)(struct kvm_vcpu *vcpu);
194 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
195 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
196 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
197 void (*vcpu_free)(struct kvm_vcpu *vcpu);
198 int (*check_requests)(struct kvm_vcpu *vcpu);
199 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
200 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
201 int (*prepare_memory_region)(struct kvm *kvm,
202 struct kvm_memory_slot *memslot,
203 struct kvm_userspace_memory_region *mem);
204 void (*commit_memory_region)(struct kvm *kvm,
205 struct kvm_userspace_memory_region *mem,
206 const struct kvm_memory_slot *old);
207 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
208 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
209 unsigned long end);
210 int (*age_hva)(struct kvm *kvm, unsigned long hva);
211 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
212 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
213 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
214 void (*free_memslot)(struct kvm_memory_slot *free,
215 struct kvm_memory_slot *dont);
216 int (*create_memslot)(struct kvm_memory_slot *slot,
217 unsigned long npages);
218 int (*init_vm)(struct kvm *kvm);
219 void (*destroy_vm)(struct kvm *kvm);
220 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
221 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
222 unsigned int inst, int *advance);
223 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
224 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
225 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
226 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
227 unsigned long arg);
228
229};
230
231extern struct kvmppc_ops *kvmppc_hv_ops;
232extern struct kvmppc_ops *kvmppc_pr_ops;
233
234static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
235{
236 return kvm->arch.kvm_ops == kvmppc_hv_ops;
237}
238
180/* 239/*
181 * Cuts out inst bits with ordering according to spec. 240 * Cuts out inst bits with ordering according to spec.
182 * That means the leftmost bit is zero. All given bits are included. 241 * That means the leftmost bit is zero. All given bits are included.
@@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
210 return r; 269 return r;
211} 270}
212 271
213union kvmppc_one_reg {
214 u32 wval;
215 u64 dval;
216 vector128 vval;
217 u64 vsxval[2];
218 struct {
219 u64 addr;
220 u64 length;
221 } vpaval;
222};
223
224#define one_reg_size(id) \ 272#define one_reg_size(id) \
225 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) 273 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
226 274
@@ -245,10 +293,10 @@ union kvmppc_one_reg {
245 __v; \ 293 __v; \
246}) 294})
247 295
248void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 296int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
249int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 297int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
250 298
251void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 299int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
252int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 300int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
253 301
254int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); 302int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
@@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
260 308
261struct openpic; 309struct openpic;
262 310
263#ifdef CONFIG_KVM_BOOK3S_64_HV 311#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
264extern void kvm_cma_reserve(void) __init; 312extern void kvm_cma_reserve(void) __init;
265static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 313static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
266{ 314{
@@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
269 317
270static inline u32 kvmppc_get_xics_latch(void) 318static inline u32 kvmppc_get_xics_latch(void)
271{ 319{
272 u32 xirr = get_paca()->kvm_hstate.saved_xirr; 320 u32 xirr;
273 321
322 xirr = get_paca()->kvm_hstate.saved_xirr;
274 get_paca()->kvm_hstate.saved_xirr = 0; 323 get_paca()->kvm_hstate.saved_xirr = 0;
275
276 return xirr; 324 return xirr;
277} 325}
278 326
@@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
281 paca[cpu].kvm_hstate.host_ipi = host_ipi; 329 paca[cpu].kvm_hstate.host_ipi = host_ipi;
282} 330}
283 331
284extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); 332static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
333{
334 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
335}
285 336
286#else 337#else
287static inline void __init kvm_cma_reserve(void) 338static inline void __init kvm_cma_reserve(void)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5954cebbc55..b6ea9e068c13 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -166,7 +166,7 @@ struct paca_struct {
166 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ 166 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
167 167
168#ifdef CONFIG_KVM_BOOK3S_HANDLER 168#ifdef CONFIG_KVM_BOOK3S_HANDLER
169#ifdef CONFIG_KVM_BOOK3S_PR 169#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
170 /* We use this to store guest state in */ 170 /* We use this to store guest state in */
171 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 171 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
172#endif 172#endif
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index f65e27b09bd3..16cb92d215d2 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -91,7 +91,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
91 if (!pte) 91 if (!pte)
92 return NULL; 92 return NULL;
93 page = virt_to_page(pte); 93 page = virt_to_page(pte);
94 pgtable_page_ctor(page); 94 if (!pgtable_page_ctor(page)) {
95 __free_page(page);
96 return NULL;
97 }
95 return page; 98 return page;
96} 99}
97 100
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7794b2b04eb2..fc14a38c7ccf 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -208,6 +208,7 @@ struct debug_reg {
208 208
209struct thread_struct { 209struct thread_struct {
210 unsigned long ksp; /* Kernel stack pointer */ 210 unsigned long ksp; /* Kernel stack pointer */
211
211#ifdef CONFIG_PPC64 212#ifdef CONFIG_PPC64
212 unsigned long ksp_vsid; 213 unsigned long ksp_vsid;
213#endif 214#endif
@@ -221,6 +222,7 @@ struct thread_struct {
221 void *pgdir; /* root of page-table tree */ 222 void *pgdir; /* root of page-table tree */
222 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ 223 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
223#endif 224#endif
225 /* Debug Registers */
224 struct debug_reg debug; 226 struct debug_reg debug;
225 struct thread_fp_state fp_state; 227 struct thread_fp_state fp_state;
226 struct thread_fp_state *fp_save_area; 228 struct thread_fp_state *fp_save_area;
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index bf09e5a065b8..d977b9b78696 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -1,4 +1,3 @@
1#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
2#ifndef _POWERPC_PROM_H 1#ifndef _POWERPC_PROM_H
3#define _POWERPC_PROM_H 2#define _POWERPC_PROM_H
4#ifdef __KERNEL__ 3#ifdef __KERNEL__
@@ -20,21 +19,17 @@
20#include <asm/irq.h> 19#include <asm/irq.h>
21#include <linux/atomic.h> 20#include <linux/atomic.h>
22 21
23#define HAVE_ARCH_DEVTREE_FIXUPS 22/* These includes should be removed once implicit includes are cleaned up. */
23#include <linux/of.h>
24#include <linux/of_fdt.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/platform_device.h>
24 28
25/* 29/*
26 * OF address retreival & translation 30 * OF address retreival & translation
27 */ 31 */
28 32
29/* Translate a DMA address from device space to CPU space */
30extern u64 of_translate_dma_address(struct device_node *dev,
31 const __be32 *in_addr);
32
33#ifdef CONFIG_PCI
34extern unsigned long pci_address_to_pio(phys_addr_t address);
35#define pci_address_to_pio pci_address_to_pio
36#endif /* CONFIG_PCI */
37
38/* Parse the ibm,dma-window property of an OF node into the busno, phys and 33/* Parse the ibm,dma-window property of an OF node into the busno, phys and
39 * size parameters. 34 * size parameters.
40 */ 35 */
@@ -44,13 +39,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
44 39
45extern void kdump_move_device_tree(void); 40extern void kdump_move_device_tree(void);
46 41
47#ifdef CONFIG_NUMA
48extern int of_node_to_nid(struct device_node *device);
49#else
50static inline int of_node_to_nid(struct device_node *device) { return 0; }
51#endif
52#define of_node_to_nid of_node_to_nid
53
54extern void of_instantiate_rtc(void); 42extern void of_instantiate_rtc(void);
55 43
56extern int of_get_ibm_chip_id(struct device_node *np); 44extern int of_get_ibm_chip_id(struct device_node *np);
@@ -140,14 +128,5 @@ struct of_drconf_cell {
140 */ 128 */
141extern unsigned char ibm_architecture_vec[]; 129extern unsigned char ibm_architecture_vec[];
142 130
143/* These includes are put at the bottom because they may contain things
144 * that are overridden by this file. Ideally they shouldn't be included
145 * by this file, but there are a bunch of .c files that currently depend
146 * on it. Eventually they will be cleaned up. */
147#include <linux/of_fdt.h>
148#include <linux/of_address.h>
149#include <linux/of_irq.h>
150#include <linux/platform_device.h>
151
152#endif /* __KERNEL__ */ 131#endif /* __KERNEL__ */
153#endif /* _POWERPC_PROM_H */ 132#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
index 0156702ba24e..576ad88104cb 100644
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -40,7 +40,7 @@
40#define _PAGE_U1 0x010000 40#define _PAGE_U1 0x010000
41#define _PAGE_U0 0x020000 41#define _PAGE_U0 0x020000
42#define _PAGE_ACCESSED 0x040000 42#define _PAGE_ACCESSED 0x040000
43#define _PAGE_LENDIAN 0x080000 43#define _PAGE_ENDIAN 0x080000
44#define _PAGE_GUARDED 0x100000 44#define _PAGE_GUARDED 0x100000
45#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ 45#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
46#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ 46#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 126f6e98f84d..5c45787d551e 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -248,6 +248,7 @@
248#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 248#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
249#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 249#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
250#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ 250#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
251#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
251#define SPRN_SPURR 0x134 /* Scaled PURR */ 252#define SPRN_SPURR 0x134 /* Scaled PURR */
252#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ 253#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
253#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ 254#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
@@ -288,6 +289,7 @@
288#define LPCR_ISL (1ul << (63-2)) 289#define LPCR_ISL (1ul << (63-2))
289#define LPCR_VC_SH (63-2) 290#define LPCR_VC_SH (63-2)
290#define LPCR_DPFD_SH (63-11) 291#define LPCR_DPFD_SH (63-11)
292#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
291#define LPCR_VRMASD (0x1ful << (63-16)) 293#define LPCR_VRMASD (0x1ful << (63-16))
292#define LPCR_VRMA_L (1ul << (63-12)) 294#define LPCR_VRMA_L (1ul << (63-12))
293#define LPCR_VRMA_LP0 (1ul << (63-15)) 295#define LPCR_VRMA_LP0 (1ul << (63-15))
@@ -304,6 +306,7 @@
304#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ 306#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
305#define LPCR_MER 0x00000800 /* Mediated External Exception */ 307#define LPCR_MER 0x00000800 /* Mediated External Exception */
306#define LPCR_MER_SH 11 308#define LPCR_MER_SH 11
309#define LPCR_TC 0x00000200 /* Translation control */
307#define LPCR_LPES 0x0000000c 310#define LPCR_LPES 0x0000000c
308#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ 311#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
309#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ 312#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
@@ -316,6 +319,10 @@
316#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ 319#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
317#define SPRN_HMER 0x150 /* Hardware m? error recovery */ 320#define SPRN_HMER 0x150 /* Hardware m? error recovery */
318#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ 321#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
322#define SPRN_PCR 0x152 /* Processor compatibility register */
323#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
324#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
325#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
319#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ 326#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
320#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ 327#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
321#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ 328#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
@@ -425,6 +432,7 @@
425#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ 432#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
426#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ 433#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
427#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ 434#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
435#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
428#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ 436#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
429#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ 437#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
430#define HID4_LPID1_SH 0 /* partition ID top 2 bits */ 438#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
@@ -1107,6 +1115,13 @@
1107#define PVR_BE 0x0070 1115#define PVR_BE 0x0070
1108#define PVR_PA6T 0x0090 1116#define PVR_PA6T 0x0090
1109 1117
1118/* "Logical" PVR values defined in PAPR, representing architecture levels */
1119#define PVR_ARCH_204 0x0f000001
1120#define PVR_ARCH_205 0x0f000002
1121#define PVR_ARCH_206 0x0f000003
1122#define PVR_ARCH_206p 0x0f100003
1123#define PVR_ARCH_207 0x0f000004
1124
1110/* Macros for setting and retrieving special purpose registers */ 1125/* Macros for setting and retrieving special purpose registers */
1111#ifndef __ASSEMBLY__ 1126#ifndef __ASSEMBLY__
1112#define mfmsr() ({unsigned long rval; \ 1127#define mfmsr() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 93f280e23279..37b7ca39ec9f 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -235,6 +235,7 @@ extern long spu_sys_callback(struct spu_syscall_block *s);
235 235
236/* syscalls implemented in spufs */ 236/* syscalls implemented in spufs */
237struct file; 237struct file;
238struct coredump_params;
238struct spufs_calls { 239struct spufs_calls {
239 long (*create_thread)(const char __user *name, 240 long (*create_thread)(const char __user *name,
240 unsigned int flags, umode_t mode, 241 unsigned int flags, umode_t mode,
@@ -242,7 +243,7 @@ struct spufs_calls {
242 long (*spu_run)(struct file *filp, __u32 __user *unpc, 243 long (*spu_run)(struct file *filp, __u32 __user *unpc,
243 __u32 __user *ustatus); 244 __u32 __user *ustatus);
244 int (*coredump_extra_notes_size)(void); 245 int (*coredump_extra_notes_size)(void);
245 int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset); 246 int (*coredump_extra_notes_write)(struct coredump_params *cprm);
246 void (*notify_spus_active)(void); 247 void (*notify_spus_active)(void);
247 struct module *owner; 248 struct module *owner;
248}; 249};
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 05a3030b4582..9854c564ac52 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -82,8 +82,6 @@ static inline struct thread_info *current_thread_info(void)
82 82
83#endif /* __ASSEMBLY__ */ 83#endif /* __ASSEMBLY__ */
84 84
85#define PREEMPT_ACTIVE 0x10000000
86
87/* 85/*
88 * thread information flag bit numbers 86 * thread information flag bit numbers
89 */ 87 */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 23016020915e..75c6ecdb8f37 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -37,6 +37,7 @@ typedef ppc_opcode_t uprobe_opcode_t;
37struct arch_uprobe { 37struct arch_uprobe {
38 union { 38 union {
39 u8 insn[MAX_UINSN_BYTES]; 39 u8 insn[MAX_UINSN_BYTES];
40 u8 ixol[MAX_UINSN_BYTES];
40 u32 ainsn; 41 u32 ainsn;
41 }; 42 };
42}; 43};
@@ -45,11 +46,4 @@ struct arch_uprobe_task {
45 unsigned long saved_trap_nr; 46 unsigned long saved_trap_nr;
46}; 47};
47 48
48extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
49extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
50extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
51extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
52extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
53extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
55#endif /* _ASM_UPROBES_H */ 49#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e9ff90..6836ec79a830 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
27#define __KVM_HAVE_PPC_SMT 27#define __KVM_HAVE_PPC_SMT
28#define __KVM_HAVE_IRQCHIP 28#define __KVM_HAVE_IRQCHIP
29#define __KVM_HAVE_IRQ_LINE 29#define __KVM_HAVE_IRQ_LINE
30#define __KVM_HAVE_GUEST_DEBUG
30 31
31struct kvm_regs { 32struct kvm_regs {
32 __u64 pc; 33 __u64 pc;
@@ -269,7 +270,24 @@ struct kvm_fpu {
269 __u64 fpr[32]; 270 __u64 fpr[32];
270}; 271};
271 272
273/*
274 * Defines for h/w breakpoint, watchpoint (read, write or both) and
275 * software breakpoint.
276 * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
277 * for KVM_DEBUG_EXIT.
278 */
279#define KVMPPC_DEBUG_NONE 0x0
280#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
281#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
282#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
272struct kvm_debug_exit_arch { 283struct kvm_debug_exit_arch {
284 __u64 address;
285 /*
286 * exiting to userspace because of h/w breakpoint, watchpoint
287 * (read, write or both) and software breakpoint.
288 */
289 __u32 status;
290 __u32 reserved;
273}; 291};
274 292
275/* for KVM_SET_GUEST_DEBUG */ 293/* for KVM_SET_GUEST_DEBUG */
@@ -281,10 +299,6 @@ struct kvm_guest_debug_arch {
281 * Type denotes h/w breakpoint, read watchpoint, write 299 * Type denotes h/w breakpoint, read watchpoint, write
282 * watchpoint or watchpoint (both read and write). 300 * watchpoint or watchpoint (both read and write).
283 */ 301 */
284#define KVMPPC_DEBUG_NONE 0x0
285#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
286#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
287#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
288 __u32 type; 302 __u32 type;
289 __u32 reserved; 303 __u32 reserved;
290 } bp[16]; 304 } bp[16];
@@ -429,6 +443,11 @@ struct kvm_get_htab_header {
429#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) 443#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
430#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) 444#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
431#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) 445#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
446#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
447#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
448#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
449#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
450#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
432 451
433#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) 452#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
434#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) 453#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
@@ -499,6 +518,65 @@ struct kvm_get_htab_header {
499#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) 518#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
500#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) 519#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
501 520
521/* Timebase offset */
522#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
523
524/* POWER8 registers */
525#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
526#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
527#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
528#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
529#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
530#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
531#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
532#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
533#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
534#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
535#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
536#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
537#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
538#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
539#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
540#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
541#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
542#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
543#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
544#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
545#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
546#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
547#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
548
549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
551#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
552
553/* Architecture compatibility level */
554#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
555
556/* Transactional Memory checkpointed state:
557 * This is all GPRs, all VSX regs and a subset of SPRs
558 */
559#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
560/* TM GPRs */
561#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
562#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
563#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
564/* TM VSX */
565#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
566#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
567#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
568/* TM SPRS */
569#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
570#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
571#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
572#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
573#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
574#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
575#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
576#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
577#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
578#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
579
502/* PPC64 eXternal Interrupt Controller Specification */ 580/* PPC64 eXternal Interrupt Controller Specification */
503#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ 581#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
504 582
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a6d74467c9ed..fa698324a1fd 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -83,4 +83,6 @@
83 83
84#define SO_BUSY_POLL 46 84#define SO_BUSY_POLL 46
85 85
86#define SO_MAX_PACING_RATE 47
87
86#endif /* _ASM_POWERPC_SOCKET_H */ 88#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e60a3697932c..2ea5cc033ec8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -439,7 +439,7 @@ int main(void)
439 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 439 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
440 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 440 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
441 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 441 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
442#ifdef CONFIG_KVM_BOOK3S_64_HV 442#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
443 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); 443 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
444 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); 444 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
445 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); 445 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
@@ -470,7 +470,7 @@ int main(void)
470 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 470 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
471 471
472 /* book3s */ 472 /* book3s */
473#ifdef CONFIG_KVM_BOOK3S_64_HV 473#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
474 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); 474 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
475 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); 475 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
476 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); 476 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -502,6 +502,8 @@ int main(void)
502 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); 502 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
503 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); 503 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
504 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); 504 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
505 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
506 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
505 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); 507 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
506 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); 508 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
507 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); 509 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -511,18 +513,22 @@ int main(void)
511 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); 513 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
512 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); 514 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
513 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); 515 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
516 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
517 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
514 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); 518 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
515 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); 519 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
516 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 520 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
517 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 521 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
518 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 522 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
519 offsetof(struct kvmppc_vcpu_book3s, vcpu)); 523 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
524 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
520 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); 525 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
521 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); 526 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
522 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); 527 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
523 528
524#ifdef CONFIG_PPC_BOOK3S_64 529#ifdef CONFIG_PPC_BOOK3S_64
525#ifdef CONFIG_KVM_BOOK3S_PR 530#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
531 DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
526# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) 532# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
527#else 533#else
528# define SVCPU_FIELD(x, f) 534# define SVCPU_FIELD(x, f)
@@ -574,7 +580,7 @@ int main(void)
574 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 580 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
575 HSTATE_FIELD(HSTATE_NAPPING, napping); 581 HSTATE_FIELD(HSTATE_NAPPING, napping);
576 582
577#ifdef CONFIG_KVM_BOOK3S_64_HV 583#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
578 HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req); 584 HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
579 HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); 585 HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
580 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); 586 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
@@ -590,10 +596,11 @@ int main(void)
590 HSTATE_FIELD(HSTATE_DABR, dabr); 596 HSTATE_FIELD(HSTATE_DABR, dabr);
591 HSTATE_FIELD(HSTATE_DECEXP, dec_expires); 597 HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
592 DEFINE(IPI_PRIORITY, IPI_PRIORITY); 598 DEFINE(IPI_PRIORITY, IPI_PRIORITY);
593#endif /* CONFIG_KVM_BOOK3S_64_HV */ 599#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
594 600
595#ifdef CONFIG_PPC_BOOK3S_64 601#ifdef CONFIG_PPC_BOOK3S_64
596 HSTATE_FIELD(HSTATE_CFAR, cfar); 602 HSTATE_FIELD(HSTATE_CFAR, cfar);
603 HSTATE_FIELD(HSTATE_PPR, ppr);
597#endif /* CONFIG_PPC_BOOK3S_64 */ 604#endif /* CONFIG_PPC_BOOK3S_64 */
598 605
599#else /* CONFIG_PPC_BOOK3S */ 606#else /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 1fb331db34c8..671302065347 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -189,14 +189,13 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
189 } 189 }
190 190
191 /* If PCI-E capable, dump PCI-E cap 10, and the AER */ 191 /* If PCI-E capable, dump PCI-E cap 10, and the AER */
192 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 192 if (pci_is_pcie(dev)) {
193 if (cap) {
194 n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); 193 n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
195 printk(KERN_WARNING 194 printk(KERN_WARNING
196 "EEH: PCI-E capabilities and status follow:\n"); 195 "EEH: PCI-E capabilities and status follow:\n");
197 196
198 for (i=0; i<=8; i++) { 197 for (i=0; i<=8; i++) {
199 eeh_ops->read_config(dn, cap+4*i, 4, &cfg); 198 eeh_ops->read_config(dn, dev->pcie_cap+4*i, 4, &cfg);
200 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); 199 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
201 printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg); 200 printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
202 } 201 }
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 6300c13bbde4..7898be90f2dc 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_fdt.h>
21#include <asm/epapr_hcalls.h> 22#include <asm/epapr_hcalls.h>
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/code-patching.h> 24#include <asm/code-patching.h>
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3a9ed6ac224b..9f905e40922e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -126,7 +126,7 @@ BEGIN_FTR_SECTION
126 bgt cr1,. 126 bgt cr1,.
127 GET_PACA(r13) 127 GET_PACA(r13)
128 128
129#ifdef CONFIG_KVM_BOOK3S_64_HV 129#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
130 li r0,KVM_HWTHREAD_IN_KERNEL 130 li r0,KVM_HWTHREAD_IN_KERNEL
131 stb r0,HSTATE_HWTHREAD_STATE(r13) 131 stb r0,HSTATE_HWTHREAD_STATE(r13)
132 /* Order setting hwthread_state vs. testing hwthread_req */ 132 /* Order setting hwthread_state vs. testing hwthread_req */
@@ -425,7 +425,7 @@ data_access_check_stab:
425 mfspr r9,SPRN_DSISR 425 mfspr r9,SPRN_DSISR
426 srdi r10,r10,60 426 srdi r10,r10,60
427 rlwimi r10,r9,16,0x20 427 rlwimi r10,r9,16,0x20
428#ifdef CONFIG_KVM_BOOK3S_PR 428#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
429 lbz r9,HSTATE_IN_GUEST(r13) 429 lbz r9,HSTATE_IN_GUEST(r13)
430 rlwimi r10,r9,8,0x300 430 rlwimi r10,r9,8,0x300
431#endif 431#endif
@@ -650,6 +650,32 @@ slb_miss_user_pseries:
650 b . /* prevent spec. execution */ 650 b . /* prevent spec. execution */
651#endif /* __DISABLED__ */ 651#endif /* __DISABLED__ */
652 652
653#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
654kvmppc_skip_interrupt:
655 /*
656 * Here all GPRs are unchanged from when the interrupt happened
657 * except for r13, which is saved in SPRG_SCRATCH0.
658 */
659 mfspr r13, SPRN_SRR0
660 addi r13, r13, 4
661 mtspr SPRN_SRR0, r13
662 GET_SCRATCH0(r13)
663 rfid
664 b .
665
666kvmppc_skip_Hinterrupt:
667 /*
668 * Here all GPRs are unchanged from when the interrupt happened
669 * except for r13, which is saved in SPRG_SCRATCH0.
670 */
671 mfspr r13, SPRN_HSRR0
672 addi r13, r13, 4
673 mtspr SPRN_HSRR0, r13
674 GET_SCRATCH0(r13)
675 hrfid
676 b .
677#endif
678
653/* 679/*
654 * Code from here down to __end_handlers is invoked from the 680 * Code from here down to __end_handlers is invoked from the
655 * exception prologs above. Because the prologs assemble the 681 * exception prologs above. Because the prologs assemble the
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 16a7c2326d48..1114d13ac19f 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -292,6 +292,7 @@ out:
292 return rc; 292 return rc;
293 return count; 293 return count;
294} 294}
295static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe);
295 296
296static ssize_t ibmebus_store_remove(struct bus_type *bus, 297static ssize_t ibmebus_store_remove(struct bus_type *bus,
297 const char *buf, size_t count) 298 const char *buf, size_t count)
@@ -317,13 +318,14 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
317 return -ENODEV; 318 return -ENODEV;
318 } 319 }
319} 320}
321static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove);
320 322
321 323static struct attribute *ibmbus_bus_attrs[] = {
322static struct bus_attribute ibmebus_bus_attrs[] = { 324 &bus_attr_probe.attr,
323 __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), 325 &bus_attr_remove.attr,
324 __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), 326 NULL,
325 __ATTR_NULL
326}; 327};
328ATTRIBUTE_GROUPS(ibmbus_bus);
327 329
328static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) 330static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
329{ 331{
@@ -713,7 +715,7 @@ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
713struct bus_type ibmebus_bus_type = { 715struct bus_type ibmebus_bus_type = {
714 .name = "ibmebus", 716 .name = "ibmebus",
715 .uevent = of_device_uevent_modalias, 717 .uevent = of_device_uevent_modalias,
716 .bus_attrs = ibmebus_bus_attrs, 718 .bus_groups = ibmbus_bus_groups,
717 .match = ibmebus_bus_bus_match, 719 .match = ibmebus_bus_bus_match,
718 .probe = ibmebus_bus_device_probe, 720 .probe = ibmebus_bus_device_probe,
719 .remove = ibmebus_bus_device_remove, 721 .remove = ibmebus_bus_device_remove,
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index e11863f4e595..847e40e62fce 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -84,7 +84,7 @@ _GLOBAL(power7_nap)
84 std r9,_MSR(r1) 84 std r9,_MSR(r1)
85 std r1,PACAR1(r13) 85 std r1,PACAR1(r13)
86 86
87#ifdef CONFIG_KVM_BOOK3S_64_HV 87#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
88 /* Tell KVM we're napping */ 88 /* Tell KVM we're napping */
89 li r4,KVM_HWTHREAD_IN_NAP 89 li r4,KVM_HWTHREAD_IN_NAP
90 stb r4,HSTATE_HWTHREAD_STATE(r13) 90 stb r4,HSTATE_HWTHREAD_STATE(r13)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c7cb8c232d2f..ba0165615215 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -594,7 +594,7 @@ void irq_ctx_init(void)
594 } 594 }
595} 595}
596 596
597static inline void do_softirq_onstack(void) 597void do_softirq_own_stack(void)
598{ 598{
599 struct thread_info *curtp, *irqtp; 599 struct thread_info *curtp, *irqtp;
600 600
@@ -612,21 +612,6 @@ static inline void do_softirq_onstack(void)
612 set_bits(irqtp->flags, &curtp->flags); 612 set_bits(irqtp->flags, &curtp->flags);
613} 613}
614 614
615void do_softirq(void)
616{
617 unsigned long flags;
618
619 if (in_interrupt())
620 return;
621
622 local_irq_save(flags);
623
624 if (local_softirq_pending())
625 do_softirq_onstack();
626
627 local_irq_restore(flags);
628}
629
630irq_hw_number_t virq_to_hw(unsigned int virq) 615irq_hw_number_t virq_to_hw(unsigned int virq)
631{ 616{
632 struct irq_data *irq_data = irq_get_irq_data(virq); 617 struct irq_data *irq_data = irq_get_irq_data(virq);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2156ea90eb54..90fab64d911d 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -429,7 +429,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
429 case KPROBE_HIT_SSDONE: 429 case KPROBE_HIT_SSDONE:
430 /* 430 /*
431 * We increment the nmissed count for accounting, 431 * We increment the nmissed count for accounting,
432 * we can also use npre/npostfault count for accouting 432 * we can also use npre/npostfault count for accounting
433 * these specific fault cases. 433 * these specific fault cases.
434 */ 434 */
435 kprobes_inc_nmissed_count(cur); 435 kprobes_inc_nmissed_count(cur);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 905a24bb7acc..a1e3e40ca3fd 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -228,7 +228,7 @@ int pcibios_add_platform_entries(struct pci_dev *pdev)
228 */ 228 */
229static int pci_read_irq_line(struct pci_dev *pci_dev) 229static int pci_read_irq_line(struct pci_dev *pci_dev)
230{ 230{
231 struct of_irq oirq; 231 struct of_phandle_args oirq;
232 unsigned int virq; 232 unsigned int virq;
233 233
234 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); 234 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
@@ -237,7 +237,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
237 memset(&oirq, 0xff, sizeof(oirq)); 237 memset(&oirq, 0xff, sizeof(oirq));
238#endif 238#endif
239 /* Try to get a mapping from the device-tree */ 239 /* Try to get a mapping from the device-tree */
240 if (of_irq_map_pci(pci_dev, &oirq)) { 240 if (of_irq_parse_pci(pci_dev, &oirq)) {
241 u8 line, pin; 241 u8 line, pin;
242 242
243 /* If that fails, lets fallback to what is in the config 243 /* If that fails, lets fallback to what is in the config
@@ -263,11 +263,10 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
263 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 263 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
264 } else { 264 } else {
265 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 265 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
266 oirq.size, oirq.specifier[0], oirq.specifier[1], 266 oirq.args_count, oirq.args[0], oirq.args[1],
267 of_node_full_name(oirq.controller)); 267 of_node_full_name(oirq.np));
268 268
269 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 269 virq = irq_create_of_mapping(&oirq);
270 oirq.size);
271 } 270 }
272 if(virq == NO_IRQ) { 271 if(virq == NO_IRQ) {
273 pr_debug(" Failed to map !\n"); 272 pr_debug(" Failed to map !\n");
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 4432fd86a6d2..f3a47098fb8e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -546,15 +546,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
546 memblock_add(base, size); 546 memblock_add(base, size);
547} 547}
548 548
549#ifdef CONFIG_BLK_DEV_INITRD
550void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
551{
552 initrd_start = (unsigned long)__va(start);
553 initrd_end = (unsigned long)__va(end);
554 initrd_below_start_ok = 1;
555}
556#endif
557
558static void __init early_reserve_mem_dt(void) 549static void __init early_reserve_mem_dt(void)
559{ 550{
560 unsigned long i, len, dt_root; 551 unsigned long i, len, dt_root;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 1a410aa57fb7..749778e0a69d 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -893,7 +893,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
893#endif 893#endif
894 894
895#ifdef CONFIG_PPC64 895#ifdef CONFIG_PPC64
896int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) 896int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
897{ 897{
898 int err; 898 int err;
899 899
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8e59abc237d7..930cd8af3503 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -844,18 +844,6 @@ void __cpu_die(unsigned int cpu)
844 smp_ops->cpu_die(cpu); 844 smp_ops->cpu_die(cpu);
845} 845}
846 846
847static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
848
849void cpu_hotplug_driver_lock()
850{
851 mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
852}
853
854void cpu_hotplug_driver_unlock()
855{
856 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
857}
858
859void cpu_die(void) 847void cpu_die(void)
860{ 848{
861 if (ppc_md.cpu_die) 849 if (ppc_md.cpu_die)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 62c3dd8c69f2..907a472f9a9e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1529,7 +1529,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1529 * back on or not. 1529 * back on or not.
1530 */ 1530 */
1531 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1531 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1532 current->thread.debug.dbcr1)) 1532 current->thread.debug.dbcr1))
1533 regs->msr |= MSR_DE; 1533 regs->msr |= MSR_DE;
1534 else 1534 else
1535 /* Make sure the IDM flag is off */ 1535 /* Make sure the IDM flag is off */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index cb92d8204ec7..e7d0c88f621a 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -997,21 +997,36 @@ static struct device_attribute vio_cmo_dev_attrs[] = {
997/* sysfs bus functions and data structures for CMO */ 997/* sysfs bus functions and data structures for CMO */
998 998
999#define viobus_cmo_rd_attr(name) \ 999#define viobus_cmo_rd_attr(name) \
1000static ssize_t \ 1000static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf) \
1001viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
1002{ \ 1001{ \
1003 return sprintf(buf, "%lu\n", vio_cmo.name); \ 1002 return sprintf(buf, "%lu\n", vio_cmo.name); \
1004} 1003} \
1004static BUS_ATTR_RO(cmo_##name)
1005 1005
1006#define viobus_cmo_pool_rd_attr(name, var) \ 1006#define viobus_cmo_pool_rd_attr(name, var) \
1007static ssize_t \ 1007static ssize_t \
1008viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \ 1008cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1009{ \ 1009{ \
1010 return sprintf(buf, "%lu\n", vio_cmo.name.var); \ 1010 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1011} \
1012static BUS_ATTR_RO(cmo_##name##_##var)
1013
1014viobus_cmo_rd_attr(entitled);
1015viobus_cmo_rd_attr(spare);
1016viobus_cmo_rd_attr(min);
1017viobus_cmo_rd_attr(desired);
1018viobus_cmo_rd_attr(curr);
1019viobus_cmo_pool_rd_attr(reserve, size);
1020viobus_cmo_pool_rd_attr(excess, size);
1021viobus_cmo_pool_rd_attr(excess, free);
1022
1023static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1024{
1025 return sprintf(buf, "%lu\n", vio_cmo.high);
1011} 1026}
1012 1027
1013static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, 1028static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1014 size_t count) 1029 size_t count)
1015{ 1030{
1016 unsigned long flags; 1031 unsigned long flags;
1017 1032
@@ -1021,35 +1036,26 @@ static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
1021 1036
1022 return count; 1037 return count;
1023} 1038}
1024 1039static BUS_ATTR_RW(cmo_high);
1025viobus_cmo_rd_attr(entitled); 1040
1026viobus_cmo_pool_rd_attr(reserve, size); 1041static struct attribute *vio_bus_attrs[] = {
1027viobus_cmo_pool_rd_attr(excess, size); 1042 &bus_attr_cmo_entitled.attr,
1028viobus_cmo_pool_rd_attr(excess, free); 1043 &bus_attr_cmo_spare.attr,
1029viobus_cmo_rd_attr(spare); 1044 &bus_attr_cmo_min.attr,
1030viobus_cmo_rd_attr(min); 1045 &bus_attr_cmo_desired.attr,
1031viobus_cmo_rd_attr(desired); 1046 &bus_attr_cmo_curr.attr,
1032viobus_cmo_rd_attr(curr); 1047 &bus_attr_cmo_high.attr,
1033viobus_cmo_rd_attr(high); 1048 &bus_attr_cmo_reserve_size.attr,
1034 1049 &bus_attr_cmo_excess_size.attr,
1035static struct bus_attribute vio_cmo_bus_attrs[] = { 1050 &bus_attr_cmo_excess_free.attr,
1036 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL), 1051 NULL,
1037 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
1038 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
1039 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
1040 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
1041 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
1042 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
1043 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
1044 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1045 viobus_cmo_high_show, viobus_cmo_high_reset),
1046 __ATTR_NULL
1047}; 1052};
1053ATTRIBUTE_GROUPS(vio_bus);
1048 1054
1049static void vio_cmo_sysfs_init(void) 1055static void vio_cmo_sysfs_init(void)
1050{ 1056{
1051 vio_bus_type.dev_attrs = vio_cmo_dev_attrs; 1057 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1052 vio_bus_type.bus_attrs = vio_cmo_bus_attrs; 1058 vio_bus_type.bus_groups = vio_bus_groups;
1053} 1059}
1054#else /* CONFIG_PPC_SMLPAR */ 1060#else /* CONFIG_PPC_SMLPAR */
1055int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } 1061int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
@@ -1413,8 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1413 1419
1414 /* needed to ensure proper operation of coherent allocations 1420 /* needed to ensure proper operation of coherent allocations
1415 * later, in case driver doesn't set it explicitly */ 1421 * later, in case driver doesn't set it explicitly */
1416 dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); 1422 dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
1417 dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
1418 } 1423 }
1419 1424
1420 /* register with generic device framework */ 1425 /* register with generic device framework */
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 2f5c6b6d6877..93221e87b911 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -31,13 +31,13 @@
31#include "44x_tlb.h" 31#include "44x_tlb.h"
32#include "booke.h" 32#include "booke.h"
33 33
34void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 34static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
35{ 35{
36 kvmppc_booke_vcpu_load(vcpu, cpu); 36 kvmppc_booke_vcpu_load(vcpu, cpu);
37 kvmppc_44x_tlb_load(vcpu); 37 kvmppc_44x_tlb_load(vcpu);
38} 38}
39 39
40void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 40static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
41{ 41{
42 kvmppc_44x_tlb_put(vcpu); 42 kvmppc_44x_tlb_put(vcpu);
43 kvmppc_booke_vcpu_put(vcpu); 43 kvmppc_booke_vcpu_put(vcpu);
@@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
114 return 0; 114 return 0;
115} 115}
116 116
117void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 117static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
118 struct kvm_sregs *sregs)
118{ 119{
119 kvmppc_get_sregs_ivor(vcpu, sregs); 120 return kvmppc_get_sregs_ivor(vcpu, sregs);
120} 121}
121 122
122int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 123static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
124 struct kvm_sregs *sregs)
123{ 125{
124 return kvmppc_set_sregs_ivor(vcpu, sregs); 126 return kvmppc_set_sregs_ivor(vcpu, sregs);
125} 127}
126 128
127int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 129static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
128 union kvmppc_one_reg *val) 130 union kvmppc_one_reg *val)
129{ 131{
130 return -EINVAL; 132 return -EINVAL;
131} 133}
132 134
133int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 135static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
134 union kvmppc_one_reg *val) 136 union kvmppc_one_reg *val)
135{ 137{
136 return -EINVAL; 138 return -EINVAL;
137} 139}
138 140
139struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 141static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
142 unsigned int id)
140{ 143{
141 struct kvmppc_vcpu_44x *vcpu_44x; 144 struct kvmppc_vcpu_44x *vcpu_44x;
142 struct kvm_vcpu *vcpu; 145 struct kvm_vcpu *vcpu;
@@ -167,7 +170,7 @@ out:
167 return ERR_PTR(err); 170 return ERR_PTR(err);
168} 171}
169 172
170void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 173static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
171{ 174{
172 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 175 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
173 176
@@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
176 kmem_cache_free(kvm_vcpu_cache, vcpu_44x); 179 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
177} 180}
178 181
179int kvmppc_core_init_vm(struct kvm *kvm) 182static int kvmppc_core_init_vm_44x(struct kvm *kvm)
180{ 183{
181 return 0; 184 return 0;
182} 185}
183 186
184void kvmppc_core_destroy_vm(struct kvm *kvm) 187static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
185{ 188{
186} 189}
187 190
191static struct kvmppc_ops kvm_ops_44x = {
192 .get_sregs = kvmppc_core_get_sregs_44x,
193 .set_sregs = kvmppc_core_set_sregs_44x,
194 .get_one_reg = kvmppc_get_one_reg_44x,
195 .set_one_reg = kvmppc_set_one_reg_44x,
196 .vcpu_load = kvmppc_core_vcpu_load_44x,
197 .vcpu_put = kvmppc_core_vcpu_put_44x,
198 .vcpu_create = kvmppc_core_vcpu_create_44x,
199 .vcpu_free = kvmppc_core_vcpu_free_44x,
200 .mmu_destroy = kvmppc_mmu_destroy_44x,
201 .init_vm = kvmppc_core_init_vm_44x,
202 .destroy_vm = kvmppc_core_destroy_vm_44x,
203 .emulate_op = kvmppc_core_emulate_op_44x,
204 .emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
205 .emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
206};
207
188static int __init kvmppc_44x_init(void) 208static int __init kvmppc_44x_init(void)
189{ 209{
190 int r; 210 int r;
191 211
192 r = kvmppc_booke_init(); 212 r = kvmppc_booke_init();
193 if (r) 213 if (r)
194 return r; 214 goto err_out;
215
216 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
217 if (r)
218 goto err_out;
219 kvm_ops_44x.owner = THIS_MODULE;
220 kvmppc_pr_ops = &kvm_ops_44x;
195 221
196 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); 222err_out:
223 return r;
197} 224}
198 225
199static void __exit kvmppc_44x_exit(void) 226static void __exit kvmppc_44x_exit(void)
200{ 227{
228 kvmppc_pr_ops = NULL;
201 kvmppc_booke_exit(); 229 kvmppc_booke_exit();
202} 230}
203 231
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 35ec0a8547da..92c9ab4bcfec 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -91,8 +91,8 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
91 return EMULATE_DONE; 91 return EMULATE_DONE;
92} 92}
93 93
94int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 94int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
95 unsigned int inst, int *advance) 95 unsigned int inst, int *advance)
96{ 96{
97 int emulated = EMULATE_DONE; 97 int emulated = EMULATE_DONE;
98 int dcrn = get_dcrn(inst); 98 int dcrn = get_dcrn(inst);
@@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
152 return emulated; 152 return emulated;
153} 153}
154 154
155int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 155int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
156{ 156{
157 int emulated = EMULATE_DONE; 157 int emulated = EMULATE_DONE;
158 158
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
172 return emulated; 172 return emulated;
173} 173}
174 174
175int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 175int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
176{ 176{
177 int emulated = EMULATE_DONE; 177 int emulated = EMULATE_DONE;
178 178
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ed0385448148..0deef1082e02 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
268 trace_kvm_stlb_inval(stlb_index); 268 trace_kvm_stlb_inval(stlb_index);
269} 269}
270 270
271void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 271void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
272{ 272{
273 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 273 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
274 int i; 274 int i;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index e593ff257bd3..141b2027189a 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -35,17 +35,20 @@ config KVM_BOOK3S_64_HANDLER
35 bool 35 bool
36 select KVM_BOOK3S_HANDLER 36 select KVM_BOOK3S_HANDLER
37 37
38config KVM_BOOK3S_PR 38config KVM_BOOK3S_PR_POSSIBLE
39 bool 39 bool
40 select KVM_MMIO 40 select KVM_MMIO
41 select MMU_NOTIFIER 41 select MMU_NOTIFIER
42 42
43config KVM_BOOK3S_HV_POSSIBLE
44 bool
45
43config KVM_BOOK3S_32 46config KVM_BOOK3S_32
44 tristate "KVM support for PowerPC book3s_32 processors" 47 tristate "KVM support for PowerPC book3s_32 processors"
45 depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT 48 depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
46 select KVM 49 select KVM
47 select KVM_BOOK3S_32_HANDLER 50 select KVM_BOOK3S_32_HANDLER
48 select KVM_BOOK3S_PR 51 select KVM_BOOK3S_PR_POSSIBLE
49 ---help--- 52 ---help---
50 Support running unmodified book3s_32 guest kernels 53 Support running unmodified book3s_32 guest kernels
51 in virtual machines on book3s_32 host processors. 54 in virtual machines on book3s_32 host processors.
@@ -60,6 +63,7 @@ config KVM_BOOK3S_64
60 depends on PPC_BOOK3S_64 63 depends on PPC_BOOK3S_64
61 select KVM_BOOK3S_64_HANDLER 64 select KVM_BOOK3S_64_HANDLER
62 select KVM 65 select KVM
66 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
63 ---help--- 67 ---help---
64 Support running unmodified book3s_64 and book3s_32 guest kernels 68 Support running unmodified book3s_64 and book3s_32 guest kernels
65 in virtual machines on book3s_64 host processors. 69 in virtual machines on book3s_64 host processors.
@@ -70,8 +74,9 @@ config KVM_BOOK3S_64
70 If unsure, say N. 74 If unsure, say N.
71 75
72config KVM_BOOK3S_64_HV 76config KVM_BOOK3S_64_HV
73 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" 77 tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
74 depends on KVM_BOOK3S_64 78 depends on KVM_BOOK3S_64
79 select KVM_BOOK3S_HV_POSSIBLE
75 select MMU_NOTIFIER 80 select MMU_NOTIFIER
76 select CMA 81 select CMA
77 ---help--- 82 ---help---
@@ -90,9 +95,20 @@ config KVM_BOOK3S_64_HV
90 If unsure, say N. 95 If unsure, say N.
91 96
92config KVM_BOOK3S_64_PR 97config KVM_BOOK3S_64_PR
93 def_bool y 98 tristate "KVM support without using hypervisor mode in host"
94 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV 99 depends on KVM_BOOK3S_64
95 select KVM_BOOK3S_PR 100 select KVM_BOOK3S_PR_POSSIBLE
101 ---help---
102 Support running guest kernels in virtual machines on processors
103 without using hypervisor mode in the host, by running the
104 guest in user mode (problem state) and emulating all
105 privileged instructions and registers.
106
107 This is not as fast as using hypervisor mode, but works on
108 machines where hypervisor mode is not available or not usable,
109 and can emulate processors that are different from the host
110 processor, including emulating 32-bit processors on a 64-bit
111 host.
96 112
97config KVM_BOOKE_HV 113config KVM_BOOKE_HV
98 bool 114 bool
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 6646c952c5e3..ce569b6bf4d8 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -53,41 +53,51 @@ kvm-e500mc-objs := \
53 e500_emulate.o 53 e500_emulate.o
54kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 54kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
55 55
56kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ 56kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
57 $(KVM)/coalesced_mmio.o \ 57 book3s_64_vio_hv.o
58
59kvm-pr-y := \
58 fpu.o \ 60 fpu.o \
59 book3s_paired_singles.o \ 61 book3s_paired_singles.o \
60 book3s_pr.o \ 62 book3s_pr.o \
61 book3s_pr_papr.o \ 63 book3s_pr_papr.o \
62 book3s_64_vio_hv.o \
63 book3s_emulate.o \ 64 book3s_emulate.o \
64 book3s_interrupts.o \ 65 book3s_interrupts.o \
65 book3s_mmu_hpte.o \ 66 book3s_mmu_hpte.o \
66 book3s_64_mmu_host.o \ 67 book3s_64_mmu_host.o \
67 book3s_64_mmu.o \ 68 book3s_64_mmu.o \
68 book3s_32_mmu.o 69 book3s_32_mmu.o
69kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ 70
71ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
72kvm-book3s_64-module-objs := \
73 $(KVM)/coalesced_mmio.o
74
75kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
70 book3s_rmhandlers.o 76 book3s_rmhandlers.o
77endif
71 78
72kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ 79kvm-hv-y += \
73 book3s_hv.o \ 80 book3s_hv.o \
74 book3s_hv_interrupts.o \ 81 book3s_hv_interrupts.o \
75 book3s_64_mmu_hv.o 82 book3s_64_mmu_hv.o
83
76kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ 84kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
77 book3s_hv_rm_xics.o 85 book3s_hv_rm_xics.o
78kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ 86
87ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
88kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
79 book3s_hv_rmhandlers.o \ 89 book3s_hv_rmhandlers.o \
80 book3s_hv_rm_mmu.o \ 90 book3s_hv_rm_mmu.o \
81 book3s_64_vio_hv.o \
82 book3s_hv_ras.o \ 91 book3s_hv_ras.o \
83 book3s_hv_builtin.o \ 92 book3s_hv_builtin.o \
84 book3s_hv_cma.o \ 93 book3s_hv_cma.o \
85 $(kvm-book3s_64-builtin-xics-objs-y) 94 $(kvm-book3s_64-builtin-xics-objs-y)
95endif
86 96
87kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ 97kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
88 book3s_xics.o 98 book3s_xics.o
89 99
90kvm-book3s_64-module-objs := \ 100kvm-book3s_64-module-objs += \
91 $(KVM)/kvm_main.o \ 101 $(KVM)/kvm_main.o \
92 $(KVM)/eventfd.o \ 102 $(KVM)/eventfd.o \
93 powerpc.o \ 103 powerpc.o \
@@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o
123obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 133obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
124obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o 134obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
125 135
136obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
137obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
138
126obj-y += $(kvm-book3s_64-builtin-objs-y) 139obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 700df6f1d32c..8912608b7e1b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -34,6 +34,7 @@
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/highmem.h> 35#include <linux/highmem.h>
36 36
37#include "book3s.h"
37#include "trace.h" 38#include "trace.h"
38 39
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -69,6 +70,50 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
69{ 70{
70} 71}
71 72
73static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
74{
75 if (!is_kvmppc_hv_enabled(vcpu->kvm))
76 return to_book3s(vcpu)->hior;
77 return 0;
78}
79
80static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
81 unsigned long pending_now, unsigned long old_pending)
82{
83 if (is_kvmppc_hv_enabled(vcpu->kvm))
84 return;
85 if (pending_now)
86 vcpu->arch.shared->int_pending = 1;
87 else if (old_pending)
88 vcpu->arch.shared->int_pending = 0;
89}
90
91static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
92{
93 ulong crit_raw;
94 ulong crit_r1;
95 bool crit;
96
97 if (is_kvmppc_hv_enabled(vcpu->kvm))
98 return false;
99
100 crit_raw = vcpu->arch.shared->critical;
101 crit_r1 = kvmppc_get_gpr(vcpu, 1);
102
103 /* Truncate crit indicators in 32 bit mode */
104 if (!(vcpu->arch.shared->msr & MSR_SF)) {
105 crit_raw &= 0xffffffff;
106 crit_r1 &= 0xffffffff;
107 }
108
109 /* Critical section when crit == r1 */
110 crit = (crit_raw == crit_r1);
111 /* ... and we're in supervisor mode */
112 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
113
114 return crit;
115}
116
72void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 117void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
73{ 118{
74 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); 119 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
@@ -126,28 +171,32 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
126 printk(KERN_INFO "Queueing interrupt %x\n", vec); 171 printk(KERN_INFO "Queueing interrupt %x\n", vec);
127#endif 172#endif
128} 173}
129 174EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
130 175
131void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 176void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
132{ 177{
133 /* might as well deliver this straight away */ 178 /* might as well deliver this straight away */
134 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); 179 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
135} 180}
181EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
136 182
137void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 183void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
138{ 184{
139 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 185 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
140} 186}
187EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
141 188
142int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 189int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
143{ 190{
144 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 191 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
145} 192}
193EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
146 194
147void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 195void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
148{ 196{
149 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 197 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
150} 198}
199EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
151 200
152void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 201void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
153 struct kvm_interrupt *irq) 202 struct kvm_interrupt *irq)
@@ -285,8 +334,10 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
285 334
286 return 0; 335 return 0;
287} 336}
337EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
288 338
289pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 339pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
340 bool *writable)
290{ 341{
291 ulong mp_pa = vcpu->arch.magic_page_pa; 342 ulong mp_pa = vcpu->arch.magic_page_pa;
292 343
@@ -302,20 +353,23 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
302 353
303 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 354 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
304 get_page(pfn_to_page(pfn)); 355 get_page(pfn_to_page(pfn));
356 if (writable)
357 *writable = true;
305 return pfn; 358 return pfn;
306 } 359 }
307 360
308 return gfn_to_pfn(vcpu->kvm, gfn); 361 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
309} 362}
363EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
310 364
311static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 365static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
312 struct kvmppc_pte *pte) 366 bool iswrite, struct kvmppc_pte *pte)
313{ 367{
314 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); 368 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
315 int r; 369 int r;
316 370
317 if (relocated) { 371 if (relocated) {
318 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); 372 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
319 } else { 373 } else {
320 pte->eaddr = eaddr; 374 pte->eaddr = eaddr;
321 pte->raddr = eaddr & KVM_PAM; 375 pte->raddr = eaddr & KVM_PAM;
@@ -361,7 +415,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
361 415
362 vcpu->stat.st++; 416 vcpu->stat.st++;
363 417
364 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 418 if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
365 return -ENOENT; 419 return -ENOENT;
366 420
367 *eaddr = pte.raddr; 421 *eaddr = pte.raddr;
@@ -374,6 +428,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
374 428
375 return EMULATE_DONE; 429 return EMULATE_DONE;
376} 430}
431EXPORT_SYMBOL_GPL(kvmppc_st);
377 432
378int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 433int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
379 bool data) 434 bool data)
@@ -383,7 +438,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
383 438
384 vcpu->stat.ld++; 439 vcpu->stat.ld++;
385 440
386 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 441 if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
387 goto nopte; 442 goto nopte;
388 443
389 *eaddr = pte.raddr; 444 *eaddr = pte.raddr;
@@ -404,6 +459,7 @@ nopte:
404mmio: 459mmio:
405 return EMULATE_DO_MMIO; 460 return EMULATE_DO_MMIO;
406} 461}
462EXPORT_SYMBOL_GPL(kvmppc_ld);
407 463
408int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 464int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
409{ 465{
@@ -419,6 +475,18 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
419{ 475{
420} 476}
421 477
478int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
479 struct kvm_sregs *sregs)
480{
481 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
482}
483
484int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
485 struct kvm_sregs *sregs)
486{
487 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
488}
489
422int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 490int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
423{ 491{
424 int i; 492 int i;
@@ -495,8 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
495 if (size > sizeof(val)) 563 if (size > sizeof(val))
496 return -EINVAL; 564 return -EINVAL;
497 565
498 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 566 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
499
500 if (r == -EINVAL) { 567 if (r == -EINVAL) {
501 r = 0; 568 r = 0;
502 switch (reg->id) { 569 switch (reg->id) {
@@ -528,6 +595,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
528 } 595 }
529 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); 596 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
530 break; 597 break;
598 case KVM_REG_PPC_VRSAVE:
599 val = get_reg_val(reg->id, vcpu->arch.vrsave);
600 break;
531#endif /* CONFIG_ALTIVEC */ 601#endif /* CONFIG_ALTIVEC */
532 case KVM_REG_PPC_DEBUG_INST: { 602 case KVM_REG_PPC_DEBUG_INST: {
533 u32 opcode = INS_TW; 603 u32 opcode = INS_TW;
@@ -572,8 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
572 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 642 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
573 return -EFAULT; 643 return -EFAULT;
574 644
575 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 645 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
576
577 if (r == -EINVAL) { 646 if (r == -EINVAL) {
578 r = 0; 647 r = 0;
579 switch (reg->id) { 648 switch (reg->id) {
@@ -605,6 +674,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
605 } 674 }
606 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); 675 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
607 break; 676 break;
677 case KVM_REG_PPC_VRSAVE:
678 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
679 r = -ENXIO;
680 break;
681 }
682 vcpu->arch.vrsave = set_reg_val(reg->id, val);
683 break;
608#endif /* CONFIG_ALTIVEC */ 684#endif /* CONFIG_ALTIVEC */
609#ifdef CONFIG_KVM_XICS 685#ifdef CONFIG_KVM_XICS
610 case KVM_REG_PPC_ICP_STATE: 686 case KVM_REG_PPC_ICP_STATE:
@@ -625,6 +701,27 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
625 return r; 701 return r;
626} 702}
627 703
704void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
705{
706 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
707}
708
709void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
710{
711 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
712}
713
714void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
715{
716 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
717}
718EXPORT_SYMBOL_GPL(kvmppc_set_msr);
719
720int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
721{
722 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
723}
724
628int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 725int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
629 struct kvm_translation *tr) 726 struct kvm_translation *tr)
630{ 727{
@@ -644,3 +741,141 @@ void kvmppc_decrementer_func(unsigned long data)
644 kvmppc_core_queue_dec(vcpu); 741 kvmppc_core_queue_dec(vcpu);
645 kvm_vcpu_kick(vcpu); 742 kvm_vcpu_kick(vcpu);
646} 743}
744
745struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
746{
747 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
748}
749
750void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
751{
752 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
753}
754
755int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
756{
757 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
758}
759
760int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
761{
762 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
763}
764
765void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
766 struct kvm_memory_slot *dont)
767{
768 kvm->arch.kvm_ops->free_memslot(free, dont);
769}
770
771int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
772 unsigned long npages)
773{
774 return kvm->arch.kvm_ops->create_memslot(slot, npages);
775}
776
777void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
778{
779 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
780}
781
782int kvmppc_core_prepare_memory_region(struct kvm *kvm,
783 struct kvm_memory_slot *memslot,
784 struct kvm_userspace_memory_region *mem)
785{
786 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
787}
788
789void kvmppc_core_commit_memory_region(struct kvm *kvm,
790 struct kvm_userspace_memory_region *mem,
791 const struct kvm_memory_slot *old)
792{
793 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
794}
795
796int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
797{
798 return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
799}
800EXPORT_SYMBOL_GPL(kvm_unmap_hva);
801
802int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
803{
804 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
805}
806
807int kvm_age_hva(struct kvm *kvm, unsigned long hva)
808{
809 return kvm->arch.kvm_ops->age_hva(kvm, hva);
810}
811
812int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
813{
814 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
815}
816
817void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
818{
819 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
820}
821
822void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
823{
824 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
825}
826
827int kvmppc_core_init_vm(struct kvm *kvm)
828{
829
830#ifdef CONFIG_PPC64
831 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
832 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
833#endif
834
835 return kvm->arch.kvm_ops->init_vm(kvm);
836}
837
838void kvmppc_core_destroy_vm(struct kvm *kvm)
839{
840 kvm->arch.kvm_ops->destroy_vm(kvm);
841
842#ifdef CONFIG_PPC64
843 kvmppc_rtas_tokens_free(kvm);
844 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
845#endif
846}
847
848int kvmppc_core_check_processor_compat(void)
849{
850 /*
851 * We always return 0 for book3s. We check
852 * for compatability while loading the HV
853 * or PR module
854 */
855 return 0;
856}
857
858static int kvmppc_book3s_init(void)
859{
860 int r;
861
862 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
863 if (r)
864 return r;
865#ifdef CONFIG_KVM_BOOK3S_32
866 r = kvmppc_book3s_init_pr();
867#endif
868 return r;
869
870}
871
872static void kvmppc_book3s_exit(void)
873{
874#ifdef CONFIG_KVM_BOOK3S_32
875 kvmppc_book3s_exit_pr();
876#endif
877 kvm_exit();
878}
879
880module_init(kvmppc_book3s_init);
881module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
new file mode 100644
index 000000000000..4bf956cf94d6
--- /dev/null
+++ b/arch/powerpc/kvm/book3s.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright IBM Corporation, 2013
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License or (at your optional) any later version of the license.
9 *
10 */
11
12#ifndef __POWERPC_KVM_BOOK3S_H__
13#define __POWERPC_KVM_BOOK3S_H__
14
15extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
16 struct kvm_memory_slot *memslot);
17extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
18extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
19 unsigned long end);
20extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
21extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
22extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
23
24extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
25extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
26 unsigned int inst, int *advance);
27extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
28 int sprn, ulong spr_val);
29extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
30 int sprn, ulong *spr_val);
31extern int kvmppc_book3s_init_pr(void);
32extern void kvmppc_book3s_exit_pr(void);
33
34#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index c8cefdd15fd8..76a64ce6a5b6 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw)
84} 84}
85 85
86static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 86static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
87 struct kvmppc_pte *pte, bool data); 87 struct kvmppc_pte *pte, bool data,
88 bool iswrite);
88static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 89static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
89 u64 *vsid); 90 u64 *vsid);
90 91
@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
99 u64 vsid; 100 u64 vsid;
100 struct kvmppc_pte pte; 101 struct kvmppc_pte pte;
101 102
102 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) 103 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
103 return pte.vpage; 104 return pte.vpage;
104 105
105 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 106 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
111 kvmppc_set_msr(vcpu, 0); 112 kvmppc_set_msr(vcpu, 0);
112} 113}
113 114
114static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, 115static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
115 u32 sre, gva_t eaddr, 116 u32 sre, gva_t eaddr,
116 bool primary) 117 bool primary)
117{ 118{
119 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
118 u32 page, hash, pteg, htabmask; 120 u32 page, hash, pteg, htabmask;
119 hva_t r; 121 hva_t r;
120 122
@@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
132 kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, 134 kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
133 sr_vsid(sre)); 135 sr_vsid(sre));
134 136
135 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); 137 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
136 if (kvm_is_error_hva(r)) 138 if (kvm_is_error_hva(r))
137 return r; 139 return r;
138 return r | (pteg & ~PAGE_MASK); 140 return r | (pteg & ~PAGE_MASK);
@@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
145} 147}
146 148
147static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 149static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
148 struct kvmppc_pte *pte, bool data) 150 struct kvmppc_pte *pte, bool data,
151 bool iswrite)
149{ 152{
150 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 153 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
151 struct kvmppc_bat *bat; 154 struct kvmppc_bat *bat;
@@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
186 printk(KERN_INFO "BAT is not readable!\n"); 189 printk(KERN_INFO "BAT is not readable!\n");
187 continue; 190 continue;
188 } 191 }
189 if (!pte->may_write) { 192 if (iswrite && !pte->may_write) {
190 /* let's treat r/o BATs as not-readable for now */
191 dprintk_pte("BAT is read-only!\n"); 193 dprintk_pte("BAT is read-only!\n");
192 continue; 194 continue;
193 } 195 }
@@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
201 203
202static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, 204static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
203 struct kvmppc_pte *pte, bool data, 205 struct kvmppc_pte *pte, bool data,
204 bool primary) 206 bool iswrite, bool primary)
205{ 207{
206 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
207 u32 sre; 208 u32 sre;
208 hva_t ptegp; 209 hva_t ptegp;
209 u32 pteg[16]; 210 u32 pteg[16];
@@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
218 219
219 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); 220 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
220 221
221 ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary); 222 ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
222 if (kvm_is_error_hva(ptegp)) { 223 if (kvm_is_error_hva(ptegp)) {
223 printk(KERN_INFO "KVM: Invalid PTEG!\n"); 224 printk(KERN_INFO "KVM: Invalid PTEG!\n");
224 goto no_page_found; 225 goto no_page_found;
@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
258 break; 259 break;
259 } 260 }
260 261
261 if ( !pte->may_read )
262 continue;
263
264 dprintk_pte("MMU: Found PTE -> %x %x - %x\n", 262 dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
265 pteg[i], pteg[i+1], pp); 263 pteg[i], pteg[i+1], pp);
266 found = 1; 264 found = 1;
@@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
271 /* Update PTE C and A bits, so the guest's swapper knows we used the 269 /* Update PTE C and A bits, so the guest's swapper knows we used the
272 page */ 270 page */
273 if (found) { 271 if (found) {
274 u32 oldpte = pteg[i+1]; 272 u32 pte_r = pteg[i+1];
275 273 char __user *addr = (char __user *) &pteg[i+1];
276 if (pte->may_read) 274
277 pteg[i+1] |= PTEG_FLAG_ACCESSED; 275 /*
278 if (pte->may_write) 276 * Use single-byte writes to update the HPTE, to
279 pteg[i+1] |= PTEG_FLAG_DIRTY; 277 * conform to what real hardware does.
280 else 278 */
281 dprintk_pte("KVM: Mapping read-only page!\n"); 279 if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
282 280 pte_r |= PTEG_FLAG_ACCESSED;
283 /* Write back into the PTEG */ 281 put_user(pte_r >> 8, addr + 2);
284 if (pteg[i+1] != oldpte) 282 }
285 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); 283 if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
286 284 pte_r |= PTEG_FLAG_DIRTY;
285 put_user(pte_r, addr + 3);
286 }
287 if (!pte->may_read || (iswrite && !pte->may_write))
288 return -EPERM;
287 return 0; 289 return 0;
288 } 290 }
289 291
@@ -302,12 +304,14 @@ no_page_found:
302} 304}
303 305
304static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 306static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
305 struct kvmppc_pte *pte, bool data) 307 struct kvmppc_pte *pte, bool data,
308 bool iswrite)
306{ 309{
307 int r; 310 int r;
308 ulong mp_ea = vcpu->arch.magic_page_ea; 311 ulong mp_ea = vcpu->arch.magic_page_ea;
309 312
310 pte->eaddr = eaddr; 313 pte->eaddr = eaddr;
314 pte->page_size = MMU_PAGE_4K;
311 315
312 /* Magic page override */ 316 /* Magic page override */
313 if (unlikely(mp_ea) && 317 if (unlikely(mp_ea) &&
@@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
323 return 0; 327 return 0;
324 } 328 }
325 329
326 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); 330 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
327 if (r < 0) 331 if (r < 0)
328 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); 332 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
333 data, iswrite, true);
329 if (r < 0) 334 if (r < 0)
330 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); 335 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
336 data, iswrite, false);
331 337
332 return r; 338 return r;
333} 339}
@@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
347 353
348static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) 354static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
349{ 355{
350 kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); 356 int i;
357 struct kvm_vcpu *v;
358
359 /* flush this VA on all cpus */
360 kvm_for_each_vcpu(i, v, vcpu->kvm)
361 kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
351} 362}
352 363
353static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 364static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 00e619bf608e..3a0abd2e5a15 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
138 138
139extern char etext[]; 139extern char etext[];
140 140
141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
142 bool iswrite)
142{ 143{
143 pfn_t hpaddr; 144 pfn_t hpaddr;
144 u64 vpn; 145 u64 vpn;
@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
152 bool evict = false; 153 bool evict = false;
153 struct hpte_cache *pte; 154 struct hpte_cache *pte;
154 int r = 0; 155 int r = 0;
156 bool writable;
155 157
156 /* Get host physical address for gpa */ 158 /* Get host physical address for gpa */
157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 159 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
160 iswrite, &writable);
158 if (is_error_noslot_pfn(hpaddr)) { 161 if (is_error_noslot_pfn(hpaddr)) {
159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 162 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
160 orig_pte->eaddr); 163 orig_pte->eaddr);
@@ -204,7 +207,7 @@ next_pteg:
204 (primary ? 0 : PTE_SEC); 207 (primary ? 0 : PTE_SEC);
205 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; 208 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
206 209
207 if (orig_pte->may_write) { 210 if (orig_pte->may_write && writable) {
208 pteg1 |= PP_RWRW; 211 pteg1 |= PP_RWRW;
209 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 212 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
210 } else { 213 } else {
@@ -259,6 +262,11 @@ out:
259 return r; 262 return r;
260} 263}
261 264
265void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
266{
267 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
268}
269
262static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 270static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
263{ 271{
264 struct kvmppc_sid_map *map; 272 struct kvmppc_sid_map *map;
@@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
341 svcpu_put(svcpu); 349 svcpu_put(svcpu);
342} 350}
343 351
344void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 352void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
345{ 353{
346 int i; 354 int i;
347 355
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 7e345e00661a..83da1f868fd5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
107 return kvmppc_slb_calc_vpn(slb, eaddr); 107 return kvmppc_slb_calc_vpn(slb, eaddr);
108} 108}
109 109
110static int mmu_pagesize(int mmu_pg)
111{
112 switch (mmu_pg) {
113 case MMU_PAGE_64K:
114 return 16;
115 case MMU_PAGE_16M:
116 return 24;
117 }
118 return 12;
119}
120
110static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) 121static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
111{ 122{
112 return slbe->large ? 24 : 12; 123 return mmu_pagesize(slbe->base_page_size);
113} 124}
114 125
115static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) 126static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
@@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
119 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); 130 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
120} 131}
121 132
122static hva_t kvmppc_mmu_book3s_64_get_pteg( 133static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
123 struct kvmppc_vcpu_book3s *vcpu_book3s,
124 struct kvmppc_slb *slbe, gva_t eaddr, 134 struct kvmppc_slb *slbe, gva_t eaddr,
125 bool second) 135 bool second)
126{ 136{
137 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
127 u64 hash, pteg, htabsize; 138 u64 hash, pteg, htabsize;
128 u32 ssize; 139 u32 ssize;
129 hva_t r; 140 hva_t r;
@@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
148 159
149 /* When running a PAPR guest, SDR1 contains a HVA address instead 160 /* When running a PAPR guest, SDR1 contains a HVA address instead
150 of a GPA */ 161 of a GPA */
151 if (vcpu_book3s->vcpu.arch.papr_enabled) 162 if (vcpu->arch.papr_enabled)
152 r = pteg; 163 r = pteg;
153 else 164 else
154 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); 165 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
155 166
156 if (kvm_is_error_hva(r)) 167 if (kvm_is_error_hva(r))
157 return r; 168 return r;
@@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
166 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); 177 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
167 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); 178 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
168 179
169 if (p < 24) 180 if (p < 16)
170 avpn >>= ((80 - p) - 56) - 8; 181 avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
171 else 182 else
172 avpn <<= 8; 183 avpn <<= p - 16;
173 184
174 return avpn; 185 return avpn;
175} 186}
176 187
188/*
189 * Return page size encoded in the second word of a HPTE, or
190 * -1 for an invalid encoding for the base page size indicated by
191 * the SLB entry. This doesn't handle mixed pagesize segments yet.
192 */
193static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
194{
195 switch (slbe->base_page_size) {
196 case MMU_PAGE_64K:
197 if ((r & 0xf000) == 0x1000)
198 return MMU_PAGE_64K;
199 break;
200 case MMU_PAGE_16M:
201 if ((r & 0xff000) == 0)
202 return MMU_PAGE_16M;
203 break;
204 }
205 return -1;
206}
207
177static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 208static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
178 struct kvmppc_pte *gpte, bool data) 209 struct kvmppc_pte *gpte, bool data,
210 bool iswrite)
179{ 211{
180 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
181 struct kvmppc_slb *slbe; 212 struct kvmppc_slb *slbe;
182 hva_t ptegp; 213 hva_t ptegp;
183 u64 pteg[16]; 214 u64 pteg[16];
@@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
189 u8 pp, key = 0; 220 u8 pp, key = 0;
190 bool found = false; 221 bool found = false;
191 bool second = false; 222 bool second = false;
223 int pgsize;
192 ulong mp_ea = vcpu->arch.magic_page_ea; 224 ulong mp_ea = vcpu->arch.magic_page_ea;
193 225
194 /* Magic page override */ 226 /* Magic page override */
@@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
202 gpte->may_execute = true; 234 gpte->may_execute = true;
203 gpte->may_read = true; 235 gpte->may_read = true;
204 gpte->may_write = true; 236 gpte->may_write = true;
237 gpte->page_size = MMU_PAGE_4K;
205 238
206 return 0; 239 return 0;
207 } 240 }
@@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
222 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | 255 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
223 HPTE_V_SECONDARY; 256 HPTE_V_SECONDARY;
224 257
258 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
259
260 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
261
225do_second: 262do_second:
226 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); 263 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
227 if (kvm_is_error_hva(ptegp)) 264 if (kvm_is_error_hva(ptegp))
228 goto no_page_found; 265 goto no_page_found;
229 266
@@ -240,6 +277,13 @@ do_second:
240 for (i=0; i<16; i+=2) { 277 for (i=0; i<16; i+=2) {
241 /* Check all relevant fields of 1st dword */ 278 /* Check all relevant fields of 1st dword */
242 if ((pteg[i] & v_mask) == v_val) { 279 if ((pteg[i] & v_mask) == v_val) {
280 /* If large page bit is set, check pgsize encoding */
281 if (slbe->large &&
282 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
283 pgsize = decode_pagesize(slbe, pteg[i+1]);
284 if (pgsize < 0)
285 continue;
286 }
243 found = true; 287 found = true;
244 break; 288 break;
245 } 289 }
@@ -256,13 +300,15 @@ do_second:
256 v = pteg[i]; 300 v = pteg[i];
257 r = pteg[i+1]; 301 r = pteg[i+1];
258 pp = (r & HPTE_R_PP) | key; 302 pp = (r & HPTE_R_PP) | key;
259 eaddr_mask = 0xFFF; 303 if (r & HPTE_R_PP0)
304 pp |= 8;
260 305
261 gpte->eaddr = eaddr; 306 gpte->eaddr = eaddr;
262 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); 307 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
263 if (slbe->large) 308
264 eaddr_mask = 0xFFFFFF; 309 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
265 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); 310 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
311 gpte->page_size = pgsize;
266 gpte->may_execute = ((r & HPTE_R_N) ? false : true); 312 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
267 gpte->may_read = false; 313 gpte->may_read = false;
268 gpte->may_write = false; 314 gpte->may_write = false;
@@ -277,6 +323,7 @@ do_second:
277 case 3: 323 case 3:
278 case 5: 324 case 5:
279 case 7: 325 case 7:
326 case 10:
280 gpte->may_read = true; 327 gpte->may_read = true;
281 break; 328 break;
282 } 329 }
@@ -287,30 +334,37 @@ do_second:
287 334
288 /* Update PTE R and C bits, so the guest's swapper knows we used the 335 /* Update PTE R and C bits, so the guest's swapper knows we used the
289 * page */ 336 * page */
290 if (gpte->may_read) { 337 if (gpte->may_read && !(r & HPTE_R_R)) {
291 /* Set the accessed flag */ 338 /*
339 * Set the accessed flag.
340 * We have to write this back with a single byte write
341 * because another vcpu may be accessing this on
342 * non-PAPR platforms such as mac99, and this is
343 * what real hardware does.
344 */
345 char __user *addr = (char __user *) &pteg[i+1];
292 r |= HPTE_R_R; 346 r |= HPTE_R_R;
347 put_user(r >> 8, addr + 6);
293 } 348 }
294 if (data && gpte->may_write) { 349 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
295 /* Set the dirty flag -- XXX even if not writing */ 350 /* Set the dirty flag */
351 /* Use a single byte write */
352 char __user *addr = (char __user *) &pteg[i+1];
296 r |= HPTE_R_C; 353 r |= HPTE_R_C;
354 put_user(r, addr + 7);
297 } 355 }
298 356
299 /* Write back into the PTEG */ 357 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
300 if (pteg[i+1] != r) {
301 pteg[i+1] = r;
302 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
303 }
304 358
305 if (!gpte->may_read) 359 if (!gpte->may_read || (iswrite && !gpte->may_write))
306 return -EPERM; 360 return -EPERM;
307 return 0; 361 return 0;
308 362
309no_page_found: 363no_page_found:
364 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
310 return -ENOENT; 365 return -ENOENT;
311 366
312no_seg_found: 367no_seg_found:
313
314 dprintk("KVM MMU: Trigger segment fault\n"); 368 dprintk("KVM MMU: Trigger segment fault\n");
315 return -EINVAL; 369 return -EINVAL;
316} 370}
@@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
345 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; 399 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
346 slbe->class = (rs & SLB_VSID_C) ? 1 : 0; 400 slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
347 401
402 slbe->base_page_size = MMU_PAGE_4K;
403 if (slbe->large) {
404 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
405 switch (rs & SLB_VSID_LP) {
406 case SLB_VSID_LP_00:
407 slbe->base_page_size = MMU_PAGE_16M;
408 break;
409 case SLB_VSID_LP_01:
410 slbe->base_page_size = MMU_PAGE_64K;
411 break;
412 }
413 } else
414 slbe->base_page_size = MMU_PAGE_16M;
415 }
416
348 slbe->orige = rb & (ESID_MASK | SLB_ESID_V); 417 slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
349 slbe->origv = rs; 418 slbe->origv = rs;
350 419
@@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
460 bool large) 529 bool large)
461{ 530{
462 u64 mask = 0xFFFFFFFFFULL; 531 u64 mask = 0xFFFFFFFFFULL;
532 long i;
533 struct kvm_vcpu *v;
463 534
464 dprintk("KVM MMU: tlbie(0x%lx)\n", va); 535 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
465 536
466 if (large) 537 /*
467 mask = 0xFFFFFF000ULL; 538 * The tlbie instruction changed behaviour starting with
468 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); 539 * POWER6. POWER6 and later don't have the large page flag
540 * in the instruction but in the RB value, along with bits
541 * indicating page and segment sizes.
542 */
543 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
544 /* POWER6 or later */
545 if (va & 1) { /* L bit */
546 if ((va & 0xf000) == 0x1000)
547 mask = 0xFFFFFFFF0ULL; /* 64k page */
548 else
549 mask = 0xFFFFFF000ULL; /* 16M page */
550 }
551 } else {
552 /* older processors, e.g. PPC970 */
553 if (large)
554 mask = 0xFFFFFF000ULL;
555 }
556 /* flush this VA on all vcpus */
557 kvm_for_each_vcpu(i, v, vcpu->kvm)
558 kvmppc_mmu_pte_vflush(v, va >> 12, mask);
469} 559}
470 560
561#ifdef CONFIG_PPC_64K_PAGES
562static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
563{
564 ulong mp_ea = vcpu->arch.magic_page_ea;
565
566 return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
567 (mp_ea >> SID_SHIFT) == esid;
568}
569#endif
570
471static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 571static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
472 u64 *vsid) 572 u64 *vsid)
473{ 573{
@@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
475 struct kvmppc_slb *slb; 575 struct kvmppc_slb *slb;
476 u64 gvsid = esid; 576 u64 gvsid = esid;
477 ulong mp_ea = vcpu->arch.magic_page_ea; 577 ulong mp_ea = vcpu->arch.magic_page_ea;
578 int pagesize = MMU_PAGE_64K;
478 579
479 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 580 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
480 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 581 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
481 if (slb) { 582 if (slb) {
482 gvsid = slb->vsid; 583 gvsid = slb->vsid;
584 pagesize = slb->base_page_size;
483 if (slb->tb) { 585 if (slb->tb) {
484 gvsid <<= SID_SHIFT_1T - SID_SHIFT; 586 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
485 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); 587 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
@@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
490 592
491 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 593 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
492 case 0: 594 case 0:
493 *vsid = VSID_REAL | esid; 595 gvsid = VSID_REAL | esid;
494 break; 596 break;
495 case MSR_IR: 597 case MSR_IR:
496 *vsid = VSID_REAL_IR | gvsid; 598 gvsid |= VSID_REAL_IR;
497 break; 599 break;
498 case MSR_DR: 600 case MSR_DR:
499 *vsid = VSID_REAL_DR | gvsid; 601 gvsid |= VSID_REAL_DR;
500 break; 602 break;
501 case MSR_DR|MSR_IR: 603 case MSR_DR|MSR_IR:
502 if (!slb) 604 if (!slb)
503 goto no_slb; 605 goto no_slb;
504 606
505 *vsid = gvsid;
506 break; 607 break;
507 default: 608 default:
508 BUG(); 609 BUG();
509 break; 610 break;
510 } 611 }
511 612
613#ifdef CONFIG_PPC_64K_PAGES
614 /*
615 * Mark this as a 64k segment if the host is using
616 * 64k pages, the host MMU supports 64k pages and
617 * the guest segment page size is >= 64k,
618 * but not if this segment contains the magic page.
619 */
620 if (pagesize >= MMU_PAGE_64K &&
621 mmu_psize_defs[MMU_PAGE_64K].shift &&
622 !segment_contains_magic_page(vcpu, esid))
623 gvsid |= VSID_64K;
624#endif
625
512 if (vcpu->arch.shared->msr & MSR_PR) 626 if (vcpu->arch.shared->msr & MSR_PR)
513 *vsid |= VSID_PR; 627 gvsid |= VSID_PR;
514 628
629 *vsid = gvsid;
515 return 0; 630 return 0;
516 631
517no_slb: 632no_slb:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index e5240524bf6c..0d513af62bba 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -27,14 +27,14 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
30#include "trace.h" 30#include "trace_pr.h"
31 31
32#define PTE_SIZE 12 32#define PTE_SIZE 12
33 33
34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35{ 35{
36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, 36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M, 37 pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
38 false); 38 false);
39} 39}
40 40
@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
78 return NULL; 78 return NULL;
79} 79}
80 80
81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
82 bool iswrite)
82{ 83{
83 unsigned long vpn; 84 unsigned long vpn;
84 pfn_t hpaddr; 85 pfn_t hpaddr;
@@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
90 int attempt = 0; 91 int attempt = 0;
91 struct kvmppc_sid_map *map; 92 struct kvmppc_sid_map *map;
92 int r = 0; 93 int r = 0;
94 int hpsize = MMU_PAGE_4K;
95 bool writable;
96 unsigned long mmu_seq;
97 struct kvm *kvm = vcpu->kvm;
98 struct hpte_cache *cpte;
99 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
100 unsigned long pfn;
101
102 /* used to check for invalidations in progress */
103 mmu_seq = kvm->mmu_notifier_seq;
104 smp_rmb();
93 105
94 /* Get host physical address for gpa */ 106 /* Get host physical address for gpa */
95 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 107 pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
96 if (is_error_noslot_pfn(hpaddr)) { 108 if (is_error_noslot_pfn(pfn)) {
97 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 109 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
98 r = -EINVAL; 110 r = -EINVAL;
99 goto out; 111 goto out;
100 } 112 }
101 hpaddr <<= PAGE_SHIFT; 113 hpaddr = pfn << PAGE_SHIFT;
102 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
103 114
104 /* and write the mapping ea -> hpa into the pt */ 115 /* and write the mapping ea -> hpa into the pt */
105 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 116 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
117 goto out; 128 goto out;
118 } 129 }
119 130
120 vsid = map->host_vsid; 131 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
121 vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
122 132
123 if (!orig_pte->may_write) 133 kvm_set_pfn_accessed(pfn);
124 rflags |= HPTE_R_PP; 134 if (!orig_pte->may_write || !writable)
125 else 135 rflags |= PP_RXRX;
126 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 136 else {
137 mark_page_dirty(vcpu->kvm, gfn);
138 kvm_set_pfn_dirty(pfn);
139 }
127 140
128 if (!orig_pte->may_execute) 141 if (!orig_pte->may_execute)
129 rflags |= HPTE_R_N; 142 rflags |= HPTE_R_N;
130 else 143 else
131 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 144 kvmppc_mmu_flush_icache(pfn);
145
146 /*
147 * Use 64K pages if possible; otherwise, on 64K page kernels,
148 * we need to transfer 4 more bits from guest real to host real addr.
149 */
150 if (vsid & VSID_64K)
151 hpsize = MMU_PAGE_64K;
152 else
153 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
154
155 hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
132 156
133 hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); 157 cpte = kvmppc_mmu_hpte_cache_next(vcpu);
158
159 spin_lock(&kvm->mmu_lock);
160 if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
161 r = -EAGAIN;
162 goto out_unlock;
163 }
134 164
135map_again: 165map_again:
136 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 166 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -139,11 +169,11 @@ map_again:
139 if (attempt > 1) 169 if (attempt > 1)
140 if (ppc_md.hpte_remove(hpteg) < 0) { 170 if (ppc_md.hpte_remove(hpteg) < 0) {
141 r = -1; 171 r = -1;
142 goto out; 172 goto out_unlock;
143 } 173 }
144 174
145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, 175 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M); 176 hpsize, hpsize, MMU_SEGSIZE_256M);
147 177
148 if (ret < 0) { 178 if (ret < 0) {
149 /* If we couldn't map a primary PTE, try a secondary */ 179 /* If we couldn't map a primary PTE, try a secondary */
@@ -152,8 +182,6 @@ map_again:
152 attempt++; 182 attempt++;
153 goto map_again; 183 goto map_again;
154 } else { 184 } else {
155 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
156
157 trace_kvm_book3s_64_mmu_map(rflags, hpteg, 185 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158 vpn, hpaddr, orig_pte); 186 vpn, hpaddr, orig_pte);
159 187
@@ -164,19 +192,37 @@ map_again:
164 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 192 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
165 } 193 }
166 194
167 pte->slot = hpteg + (ret & 7); 195 cpte->slot = hpteg + (ret & 7);
168 pte->host_vpn = vpn; 196 cpte->host_vpn = vpn;
169 pte->pte = *orig_pte; 197 cpte->pte = *orig_pte;
170 pte->pfn = hpaddr >> PAGE_SHIFT; 198 cpte->pfn = pfn;
199 cpte->pagesize = hpsize;
171 200
172 kvmppc_mmu_hpte_cache_map(vcpu, pte); 201 kvmppc_mmu_hpte_cache_map(vcpu, cpte);
202 cpte = NULL;
173 } 203 }
174 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 204
205out_unlock:
206 spin_unlock(&kvm->mmu_lock);
207 kvm_release_pfn_clean(pfn);
208 if (cpte)
209 kvmppc_mmu_hpte_cache_free(cpte);
175 210
176out: 211out:
177 return r; 212 return r;
178} 213}
179 214
215void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
216{
217 u64 mask = 0xfffffffffULL;
218 u64 vsid;
219
220 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
221 if (vsid & VSID_64K)
222 mask = 0xffffffff0ULL;
223 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
224}
225
180static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 226static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
181{ 227{
182 struct kvmppc_sid_map *map; 228 struct kvmppc_sid_map *map;
@@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
291 slb_vsid &= ~SLB_VSID_KP; 337 slb_vsid &= ~SLB_VSID_KP;
292 slb_esid |= slb_index; 338 slb_esid |= slb_index;
293 339
340#ifdef CONFIG_PPC_64K_PAGES
341 /* Set host segment base page size to 64K if possible */
342 if (gvsid & VSID_64K)
343 slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
344#endif
345
294 svcpu->slb[slb_index].esid = slb_esid; 346 svcpu->slb[slb_index].esid = slb_esid;
295 svcpu->slb[slb_index].vsid = slb_vsid; 347 svcpu->slb[slb_index].vsid = slb_vsid;
296 348
@@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
326 svcpu_put(svcpu); 378 svcpu_put(svcpu);
327} 379}
328 380
329void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 381void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
330{ 382{
331 kvmppc_mmu_hpte_destroy(vcpu); 383 kvmppc_mmu_hpte_destroy(vcpu);
332 __destroy_context(to_book3s(vcpu)->context_id[0]); 384 __destroy_context(to_book3s(vcpu)->context_id[0]);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 043eec8461e7..f3ff587a8b7d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void)
260 return 0; 260 return 0;
261} 261}
262 262
263void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
264{
265}
266
267static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
268{ 264{
269 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); 265 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
@@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
451} 447}
452 448
453static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 449static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
454 struct kvmppc_pte *gpte, bool data) 450 struct kvmppc_pte *gpte, bool data, bool iswrite)
455{ 451{
456 struct kvm *kvm = vcpu->kvm; 452 struct kvm *kvm = vcpu->kvm;
457 struct kvmppc_slb *slbe; 453 struct kvmppc_slb *slbe;
@@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
906 return 0; 902 return 0;
907} 903}
908 904
909int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 905int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
910{ 906{
911 if (kvm->arch.using_mmu_notifiers) 907 if (kvm->arch.using_mmu_notifiers)
912 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); 908 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
913 return 0; 909 return 0;
914} 910}
915 911
916int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 912int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
917{ 913{
918 if (kvm->arch.using_mmu_notifiers) 914 if (kvm->arch.using_mmu_notifiers)
919 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); 915 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
920 return 0; 916 return 0;
921} 917}
922 918
923void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 919void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
920 struct kvm_memory_slot *memslot)
924{ 921{
925 unsigned long *rmapp; 922 unsigned long *rmapp;
926 unsigned long gfn; 923 unsigned long gfn;
@@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
994 return ret; 991 return ret;
995} 992}
996 993
997int kvm_age_hva(struct kvm *kvm, unsigned long hva) 994int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
998{ 995{
999 if (!kvm->arch.using_mmu_notifiers) 996 if (!kvm->arch.using_mmu_notifiers)
1000 return 0; 997 return 0;
@@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1032 return ret; 1029 return ret;
1033} 1030}
1034 1031
1035int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 1032int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
1036{ 1033{
1037 if (!kvm->arch.using_mmu_notifiers) 1034 if (!kvm->arch.using_mmu_notifiers)
1038 return 0; 1035 return 0;
1039 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); 1036 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
1040} 1037}
1041 1038
1042void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 1039void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
1043{ 1040{
1044 if (!kvm->arch.using_mmu_notifiers) 1041 if (!kvm->arch.using_mmu_notifiers)
1045 return; 1042 return;
@@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1512 1509
1513 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1510 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1514 (VRMA_VSID << SLB_VSID_SHIFT_1T); 1511 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1515 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; 1512 lpcr = senc << (LPCR_VRMASD_SH - 4);
1516 lpcr |= senc << (LPCR_VRMASD_SH - 4); 1513 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
1517 kvm->arch.lpcr = lpcr;
1518 rma_setup = 1; 1514 rma_setup = 1;
1519 } 1515 }
1520 ++i; 1516 ++i;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 30c2f3b134c6..2c25f5412bdb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
74 /* Didn't find the liobn, punt it to userspace */ 74 /* Didn't find the liobn, punt it to userspace */
75 return H_TOO_HARD; 75 return H_TOO_HARD;
76} 76}
77EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 360ce68c9809..99d40f8977e8 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -86,8 +86,8 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
86 return true; 86 return true;
87} 87}
88 88
89int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 89int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
90 unsigned int inst, int *advance) 90 unsigned int inst, int *advance)
91{ 91{
92 int emulated = EMULATE_DONE; 92 int emulated = EMULATE_DONE;
93 int rt = get_rt(inst); 93 int rt = get_rt(inst);
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
172 vcpu->arch.mmu.tlbie(vcpu, addr, large); 172 vcpu->arch.mmu.tlbie(vcpu, addr, large);
173 break; 173 break;
174 } 174 }
175#ifdef CONFIG_KVM_BOOK3S_64_PR 175#ifdef CONFIG_PPC_BOOK3S_64
176 case OP_31_XOP_FAKE_SC1: 176 case OP_31_XOP_FAKE_SC1:
177 { 177 {
178 /* SC 1 papr hypercalls */ 178 /* SC 1 papr hypercalls */
@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
267 267
268 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 268 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
269 if ((r == -ENOENT) || (r == -EPERM)) { 269 if ((r == -ENOENT) || (r == -EPERM)) {
270 struct kvmppc_book3s_shadow_vcpu *svcpu;
271
272 svcpu = svcpu_get(vcpu);
273 *advance = 0; 270 *advance = 0;
274 vcpu->arch.shared->dar = vaddr; 271 vcpu->arch.shared->dar = vaddr;
275 svcpu->fault_dar = vaddr; 272 vcpu->arch.fault_dar = vaddr;
276 273
277 dsisr = DSISR_ISSTORE; 274 dsisr = DSISR_ISSTORE;
278 if (r == -ENOENT) 275 if (r == -ENOENT)
@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
281 dsisr |= DSISR_PROTFAULT; 278 dsisr |= DSISR_PROTFAULT;
282 279
283 vcpu->arch.shared->dsisr = dsisr; 280 vcpu->arch.shared->dsisr = dsisr;
284 svcpu->fault_dsisr = dsisr; 281 vcpu->arch.fault_dsisr = dsisr;
285 svcpu_put(svcpu);
286 282
287 kvmppc_book3s_queue_irqprio(vcpu, 283 kvmppc_book3s_queue_irqprio(vcpu,
288 BOOK3S_INTERRUPT_DATA_STORAGE); 284 BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
349 return bat; 345 return bat;
350} 346}
351 347
352int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 348int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
353{ 349{
354 int emulated = EMULATE_DONE; 350 int emulated = EMULATE_DONE;
355 351
@@ -472,7 +468,7 @@ unprivileged:
472 return emulated; 468 return emulated;
473} 469}
474 470
475int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 471int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
476{ 472{
477 int emulated = EMULATE_DONE; 473 int emulated = EMULATE_DONE;
478 474
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 7057a02f0906..852989a9bad3 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,9 +20,10 @@
20#include <linux/export.h> 20#include <linux/export.h>
21#include <asm/kvm_book3s.h> 21#include <asm/kvm_book3s.h>
22 22
23#ifdef CONFIG_KVM_BOOK3S_64_HV 23#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
24EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); 24EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
25#else 25#endif
26#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
26EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); 27EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
27EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); 28EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
28#ifdef CONFIG_ALTIVEC 29#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 62a2b5ab08ed..072287f1c3bc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -52,6 +52,9 @@
52#include <linux/vmalloc.h> 52#include <linux/vmalloc.h>
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/hugetlb.h> 54#include <linux/hugetlb.h>
55#include <linux/module.h>
56
57#include "book3s.h"
55 58
56/* #define EXIT_DEBUG */ 59/* #define EXIT_DEBUG */
57/* #define EXIT_DEBUG_SIMPLE */ 60/* #define EXIT_DEBUG_SIMPLE */
@@ -66,7 +69,7 @@
66static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 69static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
67static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 70static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
68 71
69void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) 72static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
70{ 73{
71 int me; 74 int me;
72 int cpu = vcpu->cpu; 75 int cpu = vcpu->cpu;
@@ -125,7 +128,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
125 * purely defensive; they should never fail.) 128 * purely defensive; they should never fail.)
126 */ 129 */
127 130
128void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
129{ 132{
130 struct kvmppc_vcore *vc = vcpu->arch.vcore; 133 struct kvmppc_vcore *vc = vcpu->arch.vcore;
131 134
@@ -143,7 +146,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
143 spin_unlock(&vcpu->arch.tbacct_lock); 146 spin_unlock(&vcpu->arch.tbacct_lock);
144} 147}
145 148
146void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 149static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
147{ 150{
148 struct kvmppc_vcore *vc = vcpu->arch.vcore; 151 struct kvmppc_vcore *vc = vcpu->arch.vcore;
149 152
@@ -155,17 +158,46 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
155 spin_unlock(&vcpu->arch.tbacct_lock); 158 spin_unlock(&vcpu->arch.tbacct_lock);
156} 159}
157 160
158void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 161static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
159{ 162{
160 vcpu->arch.shregs.msr = msr; 163 vcpu->arch.shregs.msr = msr;
161 kvmppc_end_cede(vcpu); 164 kvmppc_end_cede(vcpu);
162} 165}
163 166
164void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 167void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
165{ 168{
166 vcpu->arch.pvr = pvr; 169 vcpu->arch.pvr = pvr;
167} 170}
168 171
172int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
173{
174 unsigned long pcr = 0;
175 struct kvmppc_vcore *vc = vcpu->arch.vcore;
176
177 if (arch_compat) {
178 if (!cpu_has_feature(CPU_FTR_ARCH_206))
179 return -EINVAL; /* 970 has no compat mode support */
180
181 switch (arch_compat) {
182 case PVR_ARCH_205:
183 pcr = PCR_ARCH_205;
184 break;
185 case PVR_ARCH_206:
186 case PVR_ARCH_206p:
187 break;
188 default:
189 return -EINVAL;
190 }
191 }
192
193 spin_lock(&vc->lock);
194 vc->arch_compat = arch_compat;
195 vc->pcr = pcr;
196 spin_unlock(&vc->lock);
197
198 return 0;
199}
200
169void kvmppc_dump_regs(struct kvm_vcpu *vcpu) 201void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
170{ 202{
171 int r; 203 int r;
@@ -195,7 +227,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
195 pr_err(" ESID = %.16llx VSID = %.16llx\n", 227 pr_err(" ESID = %.16llx VSID = %.16llx\n",
196 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); 228 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
197 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", 229 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
198 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1, 230 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
199 vcpu->arch.last_inst); 231 vcpu->arch.last_inst);
200} 232}
201 233
@@ -489,7 +521,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
489 memset(dt, 0, sizeof(struct dtl_entry)); 521 memset(dt, 0, sizeof(struct dtl_entry));
490 dt->dispatch_reason = 7; 522 dt->dispatch_reason = 7;
491 dt->processor_id = vc->pcpu + vcpu->arch.ptid; 523 dt->processor_id = vc->pcpu + vcpu->arch.ptid;
492 dt->timebase = now; 524 dt->timebase = now + vc->tb_offset;
493 dt->enqueue_to_dispatch_time = stolen; 525 dt->enqueue_to_dispatch_time = stolen;
494 dt->srr0 = kvmppc_get_pc(vcpu); 526 dt->srr0 = kvmppc_get_pc(vcpu);
495 dt->srr1 = vcpu->arch.shregs.msr; 527 dt->srr1 = vcpu->arch.shregs.msr;
@@ -538,6 +570,15 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
538 } 570 }
539 break; 571 break;
540 case H_CONFER: 572 case H_CONFER:
573 target = kvmppc_get_gpr(vcpu, 4);
574 if (target == -1)
575 break;
576 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
577 if (!tvcpu) {
578 ret = H_PARAMETER;
579 break;
580 }
581 kvm_vcpu_yield_to(tvcpu);
541 break; 582 break;
542 case H_REGISTER_VPA: 583 case H_REGISTER_VPA:
543 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), 584 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -576,8 +617,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
576 return RESUME_GUEST; 617 return RESUME_GUEST;
577} 618}
578 619
579static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 620static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
580 struct task_struct *tsk) 621 struct task_struct *tsk)
581{ 622{
582 int r = RESUME_HOST; 623 int r = RESUME_HOST;
583 624
@@ -671,16 +712,16 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
671 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 712 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
672 vcpu->arch.trap, kvmppc_get_pc(vcpu), 713 vcpu->arch.trap, kvmppc_get_pc(vcpu),
673 vcpu->arch.shregs.msr); 714 vcpu->arch.shregs.msr);
715 run->hw.hardware_exit_reason = vcpu->arch.trap;
674 r = RESUME_HOST; 716 r = RESUME_HOST;
675 BUG();
676 break; 717 break;
677 } 718 }
678 719
679 return r; 720 return r;
680} 721}
681 722
682int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 723static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
683 struct kvm_sregs *sregs) 724 struct kvm_sregs *sregs)
684{ 725{
685 int i; 726 int i;
686 727
@@ -694,12 +735,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
694 return 0; 735 return 0;
695} 736}
696 737
697int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 738static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
698 struct kvm_sregs *sregs) 739 struct kvm_sregs *sregs)
699{ 740{
700 int i, j; 741 int i, j;
701 742
702 kvmppc_set_pvr(vcpu, sregs->pvr); 743 kvmppc_set_pvr_hv(vcpu, sregs->pvr);
703 744
704 j = 0; 745 j = 0;
705 for (i = 0; i < vcpu->arch.slb_nr; i++) { 746 for (i = 0; i < vcpu->arch.slb_nr; i++) {
@@ -714,7 +755,23 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
714 return 0; 755 return 0;
715} 756}
716 757
717int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 758static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
759{
760 struct kvmppc_vcore *vc = vcpu->arch.vcore;
761 u64 mask;
762
763 spin_lock(&vc->lock);
764 /*
765 * Userspace can only modify DPFD (default prefetch depth),
766 * ILE (interrupt little-endian) and TC (translation control).
767 */
768 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
769 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
770 spin_unlock(&vc->lock);
771}
772
773static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
774 union kvmppc_one_reg *val)
718{ 775{
719 int r = 0; 776 int r = 0;
720 long int i; 777 long int i;
@@ -749,6 +806,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
749 i = id - KVM_REG_PPC_PMC1; 806 i = id - KVM_REG_PPC_PMC1;
750 *val = get_reg_val(id, vcpu->arch.pmc[i]); 807 *val = get_reg_val(id, vcpu->arch.pmc[i]);
751 break; 808 break;
809 case KVM_REG_PPC_SIAR:
810 *val = get_reg_val(id, vcpu->arch.siar);
811 break;
812 case KVM_REG_PPC_SDAR:
813 *val = get_reg_val(id, vcpu->arch.sdar);
814 break;
752#ifdef CONFIG_VSX 815#ifdef CONFIG_VSX
753 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 816 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
754 if (cpu_has_feature(CPU_FTR_VSX)) { 817 if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -787,6 +850,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
787 val->vpaval.length = vcpu->arch.dtl.len; 850 val->vpaval.length = vcpu->arch.dtl.len;
788 spin_unlock(&vcpu->arch.vpa_update_lock); 851 spin_unlock(&vcpu->arch.vpa_update_lock);
789 break; 852 break;
853 case KVM_REG_PPC_TB_OFFSET:
854 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
855 break;
856 case KVM_REG_PPC_LPCR:
857 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
858 break;
859 case KVM_REG_PPC_PPR:
860 *val = get_reg_val(id, vcpu->arch.ppr);
861 break;
862 case KVM_REG_PPC_ARCH_COMPAT:
863 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
864 break;
790 default: 865 default:
791 r = -EINVAL; 866 r = -EINVAL;
792 break; 867 break;
@@ -795,7 +870,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
795 return r; 870 return r;
796} 871}
797 872
798int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 873static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
874 union kvmppc_one_reg *val)
799{ 875{
800 int r = 0; 876 int r = 0;
801 long int i; 877 long int i;
@@ -833,6 +909,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
833 i = id - KVM_REG_PPC_PMC1; 909 i = id - KVM_REG_PPC_PMC1;
834 vcpu->arch.pmc[i] = set_reg_val(id, *val); 910 vcpu->arch.pmc[i] = set_reg_val(id, *val);
835 break; 911 break;
912 case KVM_REG_PPC_SIAR:
913 vcpu->arch.siar = set_reg_val(id, *val);
914 break;
915 case KVM_REG_PPC_SDAR:
916 vcpu->arch.sdar = set_reg_val(id, *val);
917 break;
836#ifdef CONFIG_VSX 918#ifdef CONFIG_VSX
837 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 919 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
838 if (cpu_has_feature(CPU_FTR_VSX)) { 920 if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -880,6 +962,20 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
880 len -= len % sizeof(struct dtl_entry); 962 len -= len % sizeof(struct dtl_entry);
881 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 963 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
882 break; 964 break;
965 case KVM_REG_PPC_TB_OFFSET:
966 /* round up to multiple of 2^24 */
967 vcpu->arch.vcore->tb_offset =
968 ALIGN(set_reg_val(id, *val), 1UL << 24);
969 break;
970 case KVM_REG_PPC_LPCR:
971 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
972 break;
973 case KVM_REG_PPC_PPR:
974 vcpu->arch.ppr = set_reg_val(id, *val);
975 break;
976 case KVM_REG_PPC_ARCH_COMPAT:
977 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
978 break;
883 default: 979 default:
884 r = -EINVAL; 980 r = -EINVAL;
885 break; 981 break;
@@ -888,14 +984,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
888 return r; 984 return r;
889} 985}
890 986
891int kvmppc_core_check_processor_compat(void) 987static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
892{ 988 unsigned int id)
893 if (cpu_has_feature(CPU_FTR_HVMODE))
894 return 0;
895 return -EIO;
896}
897
898struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
899{ 989{
900 struct kvm_vcpu *vcpu; 990 struct kvm_vcpu *vcpu;
901 int err = -EINVAL; 991 int err = -EINVAL;
@@ -919,8 +1009,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
919 vcpu->arch.mmcr[0] = MMCR0_FC; 1009 vcpu->arch.mmcr[0] = MMCR0_FC;
920 vcpu->arch.ctrl = CTRL_RUNLATCH; 1010 vcpu->arch.ctrl = CTRL_RUNLATCH;
921 /* default to host PVR, since we can't spoof it */ 1011 /* default to host PVR, since we can't spoof it */
922 vcpu->arch.pvr = mfspr(SPRN_PVR); 1012 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
923 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
924 spin_lock_init(&vcpu->arch.vpa_update_lock); 1013 spin_lock_init(&vcpu->arch.vpa_update_lock);
925 spin_lock_init(&vcpu->arch.tbacct_lock); 1014 spin_lock_init(&vcpu->arch.tbacct_lock);
926 vcpu->arch.busy_preempt = TB_NIL; 1015 vcpu->arch.busy_preempt = TB_NIL;
@@ -940,6 +1029,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
940 spin_lock_init(&vcore->lock); 1029 spin_lock_init(&vcore->lock);
941 init_waitqueue_head(&vcore->wq); 1030 init_waitqueue_head(&vcore->wq);
942 vcore->preempt_tb = TB_NIL; 1031 vcore->preempt_tb = TB_NIL;
1032 vcore->lpcr = kvm->arch.lpcr;
943 } 1033 }
944 kvm->arch.vcores[core] = vcore; 1034 kvm->arch.vcores[core] = vcore;
945 kvm->arch.online_vcores++; 1035 kvm->arch.online_vcores++;
@@ -972,7 +1062,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
972 vpa->dirty); 1062 vpa->dirty);
973} 1063}
974 1064
975void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 1065static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
976{ 1066{
977 spin_lock(&vcpu->arch.vpa_update_lock); 1067 spin_lock(&vcpu->arch.vpa_update_lock);
978 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); 1068 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
@@ -983,6 +1073,12 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
983 kmem_cache_free(kvm_vcpu_cache, vcpu); 1073 kmem_cache_free(kvm_vcpu_cache, vcpu);
984} 1074}
985 1075
1076static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1077{
1078 /* Indicate we want to get back into the guest */
1079 return 1;
1080}
1081
986static void kvmppc_set_timer(struct kvm_vcpu *vcpu) 1082static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
987{ 1083{
988 unsigned long dec_nsec, now; 1084 unsigned long dec_nsec, now;
@@ -1264,8 +1360,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1264 1360
1265 ret = RESUME_GUEST; 1361 ret = RESUME_GUEST;
1266 if (vcpu->arch.trap) 1362 if (vcpu->arch.trap)
1267 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu, 1363 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1268 vcpu->arch.run_task); 1364 vcpu->arch.run_task);
1269 1365
1270 vcpu->arch.ret = ret; 1366 vcpu->arch.ret = ret;
1271 vcpu->arch.trap = 0; 1367 vcpu->arch.trap = 0;
@@ -1424,7 +1520,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1424 return vcpu->arch.ret; 1520 return vcpu->arch.ret;
1425} 1521}
1426 1522
1427int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 1523static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
1428{ 1524{
1429 int r; 1525 int r;
1430 int srcu_idx; 1526 int srcu_idx;
@@ -1546,7 +1642,8 @@ static const struct file_operations kvm_rma_fops = {
1546 .release = kvm_rma_release, 1642 .release = kvm_rma_release,
1547}; 1643};
1548 1644
1549long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) 1645static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
1646 struct kvm_allocate_rma *ret)
1550{ 1647{
1551 long fd; 1648 long fd;
1552 struct kvm_rma_info *ri; 1649 struct kvm_rma_info *ri;
@@ -1592,7 +1689,8 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1592 (*sps)++; 1689 (*sps)++;
1593} 1690}
1594 1691
1595int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) 1692static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
1693 struct kvm_ppc_smmu_info *info)
1596{ 1694{
1597 struct kvm_ppc_one_seg_page_size *sps; 1695 struct kvm_ppc_one_seg_page_size *sps;
1598 1696
@@ -1613,7 +1711,8 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1613/* 1711/*
1614 * Get (and clear) the dirty memory log for a memory slot. 1712 * Get (and clear) the dirty memory log for a memory slot.
1615 */ 1713 */
1616int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 1714static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
1715 struct kvm_dirty_log *log)
1617{ 1716{
1618 struct kvm_memory_slot *memslot; 1717 struct kvm_memory_slot *memslot;
1619 int r; 1718 int r;
@@ -1667,8 +1766,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot)
1667 } 1766 }
1668} 1767}
1669 1768
1670void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1769static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
1671 struct kvm_memory_slot *dont) 1770 struct kvm_memory_slot *dont)
1672{ 1771{
1673 if (!dont || free->arch.rmap != dont->arch.rmap) { 1772 if (!dont || free->arch.rmap != dont->arch.rmap) {
1674 vfree(free->arch.rmap); 1773 vfree(free->arch.rmap);
@@ -1681,8 +1780,8 @@ void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1681 } 1780 }
1682} 1781}
1683 1782
1684int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1783static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
1685 unsigned long npages) 1784 unsigned long npages)
1686{ 1785{
1687 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 1786 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
1688 if (!slot->arch.rmap) 1787 if (!slot->arch.rmap)
@@ -1692,9 +1791,9 @@ int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1692 return 0; 1791 return 0;
1693} 1792}
1694 1793
1695int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1794static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
1696 struct kvm_memory_slot *memslot, 1795 struct kvm_memory_slot *memslot,
1697 struct kvm_userspace_memory_region *mem) 1796 struct kvm_userspace_memory_region *mem)
1698{ 1797{
1699 unsigned long *phys; 1798 unsigned long *phys;
1700 1799
@@ -1710,9 +1809,9 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1710 return 0; 1809 return 0;
1711} 1810}
1712 1811
1713void kvmppc_core_commit_memory_region(struct kvm *kvm, 1812static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
1714 struct kvm_userspace_memory_region *mem, 1813 struct kvm_userspace_memory_region *mem,
1715 const struct kvm_memory_slot *old) 1814 const struct kvm_memory_slot *old)
1716{ 1815{
1717 unsigned long npages = mem->memory_size >> PAGE_SHIFT; 1816 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
1718 struct kvm_memory_slot *memslot; 1817 struct kvm_memory_slot *memslot;
@@ -1729,6 +1828,37 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
1729 } 1828 }
1730} 1829}
1731 1830
1831/*
1832 * Update LPCR values in kvm->arch and in vcores.
1833 * Caller must hold kvm->lock.
1834 */
1835void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
1836{
1837 long int i;
1838 u32 cores_done = 0;
1839
1840 if ((kvm->arch.lpcr & mask) == lpcr)
1841 return;
1842
1843 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
1844
1845 for (i = 0; i < KVM_MAX_VCORES; ++i) {
1846 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
1847 if (!vc)
1848 continue;
1849 spin_lock(&vc->lock);
1850 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
1851 spin_unlock(&vc->lock);
1852 if (++cores_done >= kvm->arch.online_vcores)
1853 break;
1854 }
1855}
1856
1857static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
1858{
1859 return;
1860}
1861
1732static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 1862static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1733{ 1863{
1734 int err = 0; 1864 int err = 0;
@@ -1737,7 +1867,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1737 unsigned long hva; 1867 unsigned long hva;
1738 struct kvm_memory_slot *memslot; 1868 struct kvm_memory_slot *memslot;
1739 struct vm_area_struct *vma; 1869 struct vm_area_struct *vma;
1740 unsigned long lpcr, senc; 1870 unsigned long lpcr = 0, senc;
1871 unsigned long lpcr_mask = 0;
1741 unsigned long psize, porder; 1872 unsigned long psize, porder;
1742 unsigned long rma_size; 1873 unsigned long rma_size;
1743 unsigned long rmls; 1874 unsigned long rmls;
@@ -1802,9 +1933,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1802 senc = slb_pgsize_encoding(psize); 1933 senc = slb_pgsize_encoding(psize);
1803 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1934 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1804 (VRMA_VSID << SLB_VSID_SHIFT_1T); 1935 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1805 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; 1936 lpcr_mask = LPCR_VRMASD;
1806 lpcr |= senc << (LPCR_VRMASD_SH - 4); 1937 /* the -4 is to account for senc values starting at 0x10 */
1807 kvm->arch.lpcr = lpcr; 1938 lpcr = senc << (LPCR_VRMASD_SH - 4);
1808 1939
1809 /* Create HPTEs in the hash page table for the VRMA */ 1940 /* Create HPTEs in the hash page table for the VRMA */
1810 kvmppc_map_vrma(vcpu, memslot, porder); 1941 kvmppc_map_vrma(vcpu, memslot, porder);
@@ -1825,23 +1956,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1825 kvm->arch.rma = ri; 1956 kvm->arch.rma = ri;
1826 1957
1827 /* Update LPCR and RMOR */ 1958 /* Update LPCR and RMOR */
1828 lpcr = kvm->arch.lpcr;
1829 if (cpu_has_feature(CPU_FTR_ARCH_201)) { 1959 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1830 /* PPC970; insert RMLS value (split field) in HID4 */ 1960 /* PPC970; insert RMLS value (split field) in HID4 */
1831 lpcr &= ~((1ul << HID4_RMLS0_SH) | 1961 lpcr_mask = (1ul << HID4_RMLS0_SH) |
1832 (3ul << HID4_RMLS2_SH)); 1962 (3ul << HID4_RMLS2_SH) | HID4_RMOR;
1833 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | 1963 lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
1834 ((rmls & 3) << HID4_RMLS2_SH); 1964 ((rmls & 3) << HID4_RMLS2_SH);
1835 /* RMOR is also in HID4 */ 1965 /* RMOR is also in HID4 */
1836 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) 1966 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1837 << HID4_RMOR_SH; 1967 << HID4_RMOR_SH;
1838 } else { 1968 } else {
1839 /* POWER7 */ 1969 /* POWER7 */
1840 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); 1970 lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
1841 lpcr |= rmls << LPCR_RMLS_SH; 1971 lpcr = rmls << LPCR_RMLS_SH;
1842 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; 1972 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
1843 } 1973 }
1844 kvm->arch.lpcr = lpcr;
1845 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", 1974 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1846 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1975 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1847 1976
@@ -1860,6 +1989,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1860 } 1989 }
1861 } 1990 }
1862 1991
1992 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
1993
1863 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ 1994 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1864 smp_wmb(); 1995 smp_wmb();
1865 kvm->arch.rma_setup_done = 1; 1996 kvm->arch.rma_setup_done = 1;
@@ -1875,7 +2006,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1875 goto out_srcu; 2006 goto out_srcu;
1876} 2007}
1877 2008
1878int kvmppc_core_init_vm(struct kvm *kvm) 2009static int kvmppc_core_init_vm_hv(struct kvm *kvm)
1879{ 2010{
1880 unsigned long lpcr, lpid; 2011 unsigned long lpcr, lpid;
1881 2012
@@ -1893,9 +2024,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1893 */ 2024 */
1894 cpumask_setall(&kvm->arch.need_tlb_flush); 2025 cpumask_setall(&kvm->arch.need_tlb_flush);
1895 2026
1896 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1897 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
1898
1899 kvm->arch.rma = NULL; 2027 kvm->arch.rma = NULL;
1900 2028
1901 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 2029 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -1931,61 +2059,162 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1931 return 0; 2059 return 0;
1932} 2060}
1933 2061
1934void kvmppc_core_destroy_vm(struct kvm *kvm) 2062static void kvmppc_free_vcores(struct kvm *kvm)
2063{
2064 long int i;
2065
2066 for (i = 0; i < KVM_MAX_VCORES; ++i)
2067 kfree(kvm->arch.vcores[i]);
2068 kvm->arch.online_vcores = 0;
2069}
2070
2071static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
1935{ 2072{
1936 uninhibit_secondary_onlining(); 2073 uninhibit_secondary_onlining();
1937 2074
2075 kvmppc_free_vcores(kvm);
1938 if (kvm->arch.rma) { 2076 if (kvm->arch.rma) {
1939 kvm_release_rma(kvm->arch.rma); 2077 kvm_release_rma(kvm->arch.rma);
1940 kvm->arch.rma = NULL; 2078 kvm->arch.rma = NULL;
1941 } 2079 }
1942 2080
1943 kvmppc_rtas_tokens_free(kvm);
1944
1945 kvmppc_free_hpt(kvm); 2081 kvmppc_free_hpt(kvm);
1946 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1947} 2082}
1948 2083
1949/* These are stubs for now */ 2084/* We don't need to emulate any privileged instructions or dcbz */
1950void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 2085static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
2086 unsigned int inst, int *advance)
1951{ 2087{
2088 return EMULATE_FAIL;
1952} 2089}
1953 2090
1954/* We don't need to emulate any privileged instructions or dcbz */ 2091static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
1955int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 2092 ulong spr_val)
1956 unsigned int inst, int *advance)
1957{ 2093{
1958 return EMULATE_FAIL; 2094 return EMULATE_FAIL;
1959} 2095}
1960 2096
1961int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 2097static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
2098 ulong *spr_val)
1962{ 2099{
1963 return EMULATE_FAIL; 2100 return EMULATE_FAIL;
1964} 2101}
1965 2102
1966int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 2103static int kvmppc_core_check_processor_compat_hv(void)
1967{ 2104{
1968 return EMULATE_FAIL; 2105 if (!cpu_has_feature(CPU_FTR_HVMODE))
2106 return -EIO;
2107 return 0;
1969} 2108}
1970 2109
1971static int kvmppc_book3s_hv_init(void) 2110static long kvm_arch_vm_ioctl_hv(struct file *filp,
2111 unsigned int ioctl, unsigned long arg)
1972{ 2112{
1973 int r; 2113 struct kvm *kvm __maybe_unused = filp->private_data;
2114 void __user *argp = (void __user *)arg;
2115 long r;
1974 2116
1975 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2117 switch (ioctl) {
1976 2118
1977 if (r) 2119 case KVM_ALLOCATE_RMA: {
2120 struct kvm_allocate_rma rma;
2121 struct kvm *kvm = filp->private_data;
2122
2123 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
2124 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
2125 r = -EFAULT;
2126 break;
2127 }
2128
2129 case KVM_PPC_ALLOCATE_HTAB: {
2130 u32 htab_order;
2131
2132 r = -EFAULT;
2133 if (get_user(htab_order, (u32 __user *)argp))
2134 break;
2135 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
2136 if (r)
2137 break;
2138 r = -EFAULT;
2139 if (put_user(htab_order, (u32 __user *)argp))
2140 break;
2141 r = 0;
2142 break;
2143 }
2144
2145 case KVM_PPC_GET_HTAB_FD: {
2146 struct kvm_get_htab_fd ghf;
2147
2148 r = -EFAULT;
2149 if (copy_from_user(&ghf, argp, sizeof(ghf)))
2150 break;
2151 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
2152 break;
2153 }
2154
2155 default:
2156 r = -ENOTTY;
2157 }
2158
2159 return r;
2160}
2161
2162static struct kvmppc_ops kvm_ops_hv = {
2163 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2164 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
2165 .get_one_reg = kvmppc_get_one_reg_hv,
2166 .set_one_reg = kvmppc_set_one_reg_hv,
2167 .vcpu_load = kvmppc_core_vcpu_load_hv,
2168 .vcpu_put = kvmppc_core_vcpu_put_hv,
2169 .set_msr = kvmppc_set_msr_hv,
2170 .vcpu_run = kvmppc_vcpu_run_hv,
2171 .vcpu_create = kvmppc_core_vcpu_create_hv,
2172 .vcpu_free = kvmppc_core_vcpu_free_hv,
2173 .check_requests = kvmppc_core_check_requests_hv,
2174 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
2175 .flush_memslot = kvmppc_core_flush_memslot_hv,
2176 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
2177 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
2178 .unmap_hva = kvm_unmap_hva_hv,
2179 .unmap_hva_range = kvm_unmap_hva_range_hv,
2180 .age_hva = kvm_age_hva_hv,
2181 .test_age_hva = kvm_test_age_hva_hv,
2182 .set_spte_hva = kvm_set_spte_hva_hv,
2183 .mmu_destroy = kvmppc_mmu_destroy_hv,
2184 .free_memslot = kvmppc_core_free_memslot_hv,
2185 .create_memslot = kvmppc_core_create_memslot_hv,
2186 .init_vm = kvmppc_core_init_vm_hv,
2187 .destroy_vm = kvmppc_core_destroy_vm_hv,
2188 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
2189 .emulate_op = kvmppc_core_emulate_op_hv,
2190 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
2191 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2192 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2193 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
2194};
2195
2196static int kvmppc_book3s_init_hv(void)
2197{
2198 int r;
2199 /*
2200 * FIXME!! Do we need to check on all cpus ?
2201 */
2202 r = kvmppc_core_check_processor_compat_hv();
2203 if (r < 0)
1978 return r; 2204 return r;
1979 2205
1980 r = kvmppc_mmu_hv_init(); 2206 kvm_ops_hv.owner = THIS_MODULE;
2207 kvmppc_hv_ops = &kvm_ops_hv;
1981 2208
2209 r = kvmppc_mmu_hv_init();
1982 return r; 2210 return r;
1983} 2211}
1984 2212
1985static void kvmppc_book3s_hv_exit(void) 2213static void kvmppc_book3s_exit_hv(void)
1986{ 2214{
1987 kvm_exit(); 2215 kvmppc_hv_ops = NULL;
1988} 2216}
1989 2217
1990module_init(kvmppc_book3s_hv_init); 2218module_init(kvmppc_book3s_init_hv);
1991module_exit(kvmppc_book3s_hv_exit); 2219module_exit(kvmppc_book3s_exit_hv);
2220MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 37f1cc417ca0..928142c64cb0 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
158 * Interrupts are enabled again at this point. 158 * Interrupts are enabled again at this point.
159 */ 159 */
160 160
161.global kvmppc_handler_highmem
162kvmppc_handler_highmem:
163
164 /* 161 /*
165 * Register usage at this point: 162 * Register usage at this point:
166 * 163 *
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af28cdd..bc8de75b1925 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,30 +33,6 @@
33#error Need to fix lppaca and SLB shadow accesses in little endian mode 33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif 34#endif
35 35
36/*****************************************************************************
37 * *
38 * Real Mode handlers that need to be in the linear mapping *
39 * *
40 ****************************************************************************/
41
42 .globl kvmppc_skip_interrupt
43kvmppc_skip_interrupt:
44 mfspr r13,SPRN_SRR0
45 addi r13,r13,4
46 mtspr SPRN_SRR0,r13
47 GET_SCRATCH0(r13)
48 rfid
49 b .
50
51 .globl kvmppc_skip_Hinterrupt
52kvmppc_skip_Hinterrupt:
53 mfspr r13,SPRN_HSRR0
54 addi r13,r13,4
55 mtspr SPRN_HSRR0,r13
56 GET_SCRATCH0(r13)
57 hrfid
58 b .
59
60/* 36/*
61 * Call kvmppc_hv_entry in real mode. 37 * Call kvmppc_hv_entry in real mode.
62 * Must be called with interrupts hard-disabled. 38 * Must be called with interrupts hard-disabled.
@@ -66,8 +42,11 @@ kvmppc_skip_Hinterrupt:
66 * LR = return address to continue at after eventually re-enabling MMU 42 * LR = return address to continue at after eventually re-enabling MMU
67 */ 43 */
68_GLOBAL(kvmppc_hv_entry_trampoline) 44_GLOBAL(kvmppc_hv_entry_trampoline)
45 mflr r0
46 std r0, PPC_LR_STKOFF(r1)
47 stdu r1, -112(r1)
69 mfmsr r10 48 mfmsr r10
70 LOAD_REG_ADDR(r5, kvmppc_hv_entry) 49 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
71 li r0,MSR_RI 50 li r0,MSR_RI
72 andc r0,r10,r0 51 andc r0,r10,r0
73 li r6,MSR_IR | MSR_DR 52 li r6,MSR_IR | MSR_DR
@@ -77,11 +56,103 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
77 mtsrr1 r6 56 mtsrr1 r6
78 RFI 57 RFI
79 58
80/****************************************************************************** 59kvmppc_call_hv_entry:
81 * * 60 bl kvmppc_hv_entry
82 * Entry code * 61
83 * * 62 /* Back from guest - restore host state and return to caller */
84 *****************************************************************************/ 63
64 /* Restore host DABR and DABRX */
65 ld r5,HSTATE_DABR(r13)
66 li r6,7
67 mtspr SPRN_DABR,r5
68 mtspr SPRN_DABRX,r6
69
70 /* Restore SPRG3 */
71 ld r3,PACA_SPRG3(r13)
72 mtspr SPRN_SPRG3,r3
73
74 /*
75 * Reload DEC. HDEC interrupts were disabled when
76 * we reloaded the host's LPCR value.
77 */
78 ld r3, HSTATE_DECEXP(r13)
79 mftb r4
80 subf r4, r4, r3
81 mtspr SPRN_DEC, r4
82
83 /* Reload the host's PMU registers */
84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
85 lbz r4, LPPACA_PMCINUSE(r3)
86 cmpwi r4, 0
87 beq 23f /* skip if not */
88 lwz r3, HSTATE_PMC(r13)
89 lwz r4, HSTATE_PMC + 4(r13)
90 lwz r5, HSTATE_PMC + 8(r13)
91 lwz r6, HSTATE_PMC + 12(r13)
92 lwz r8, HSTATE_PMC + 16(r13)
93 lwz r9, HSTATE_PMC + 20(r13)
94BEGIN_FTR_SECTION
95 lwz r10, HSTATE_PMC + 24(r13)
96 lwz r11, HSTATE_PMC + 28(r13)
97END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
98 mtspr SPRN_PMC1, r3
99 mtspr SPRN_PMC2, r4
100 mtspr SPRN_PMC3, r5
101 mtspr SPRN_PMC4, r6
102 mtspr SPRN_PMC5, r8
103 mtspr SPRN_PMC6, r9
104BEGIN_FTR_SECTION
105 mtspr SPRN_PMC7, r10
106 mtspr SPRN_PMC8, r11
107END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
108 ld r3, HSTATE_MMCR(r13)
109 ld r4, HSTATE_MMCR + 8(r13)
110 ld r5, HSTATE_MMCR + 16(r13)
111 mtspr SPRN_MMCR1, r4
112 mtspr SPRN_MMCRA, r5
113 mtspr SPRN_MMCR0, r3
114 isync
11523:
116
117 /*
118 * For external and machine check interrupts, we need
119 * to call the Linux handler to process the interrupt.
120 * We do that by jumping to absolute address 0x500 for
121 * external interrupts, or the machine_check_fwnmi label
122 * for machine checks (since firmware might have patched
123 * the vector area at 0x200). The [h]rfid at the end of the
124 * handler will return to the book3s_hv_interrupts.S code.
125 * For other interrupts we do the rfid to get back
126 * to the book3s_hv_interrupts.S code here.
127 */
128 ld r8, 112+PPC_LR_STKOFF(r1)
129 addi r1, r1, 112
130 ld r7, HSTATE_HOST_MSR(r13)
131
132 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
133 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
134BEGIN_FTR_SECTION
135 beq 11f
136END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
137
138 /* RFI into the highmem handler, or branch to interrupt handler */
139 mfmsr r6
140 li r0, MSR_RI
141 andc r6, r6, r0
142 mtmsrd r6, 1 /* Clear RI in MSR */
143 mtsrr0 r8
144 mtsrr1 r7
145 beqa 0x500 /* external interrupt (PPC970) */
146 beq cr1, 13f /* machine check */
147 RFI
148
149 /* On POWER7, we have external interrupts set to use HSRR0/1 */
15011: mtspr SPRN_HSRR0, r8
151 mtspr SPRN_HSRR1, r7
152 ba 0x500
153
15413: b machine_check_fwnmi
155
85 156
86/* 157/*
87 * We come in here when wakened from nap mode on a secondary hw thread. 158 * We come in here when wakened from nap mode on a secondary hw thread.
@@ -137,7 +208,7 @@ kvm_start_guest:
137 cmpdi r4,0 208 cmpdi r4,0
138 /* if we have no vcpu to run, go back to sleep */ 209 /* if we have no vcpu to run, go back to sleep */
139 beq kvm_no_guest 210 beq kvm_no_guest
140 b kvmppc_hv_entry 211 b 30f
141 212
14227: /* XXX should handle hypervisor maintenance interrupts etc. here */ 21327: /* XXX should handle hypervisor maintenance interrupts etc. here */
143 b kvm_no_guest 214 b kvm_no_guest
@@ -147,6 +218,57 @@ kvm_start_guest:
147 stw r8,HSTATE_SAVED_XIRR(r13) 218 stw r8,HSTATE_SAVED_XIRR(r13)
148 b kvm_no_guest 219 b kvm_no_guest
149 220
22130: bl kvmppc_hv_entry
222
223 /* Back from the guest, go back to nap */
224 /* Clear our vcpu pointer so we don't come back in early */
225 li r0, 0
226 std r0, HSTATE_KVM_VCPU(r13)
227 lwsync
228 /* Clear any pending IPI - we're an offline thread */
229 ld r5, HSTATE_XICS_PHYS(r13)
230 li r7, XICS_XIRR
231 lwzcix r3, r5, r7 /* ack any pending interrupt */
232 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
233 beq 37f
234 sync
235 li r0, 0xff
236 li r6, XICS_MFRR
237 stbcix r0, r5, r6 /* clear the IPI */
238 stwcix r3, r5, r7 /* EOI it */
23937: sync
240
241 /* increment the nap count and then go to nap mode */
242 ld r4, HSTATE_KVM_VCORE(r13)
243 addi r4, r4, VCORE_NAP_COUNT
244 lwsync /* make previous updates visible */
24551: lwarx r3, 0, r4
246 addi r3, r3, 1
247 stwcx. r3, 0, r4
248 bne 51b
249
250kvm_no_guest:
251 li r0, KVM_HWTHREAD_IN_NAP
252 stb r0, HSTATE_HWTHREAD_STATE(r13)
253 li r3, LPCR_PECE0
254 mfspr r4, SPRN_LPCR
255 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
256 mtspr SPRN_LPCR, r4
257 isync
258 std r0, HSTATE_SCRATCH0(r13)
259 ptesync
260 ld r0, HSTATE_SCRATCH0(r13)
2611: cmpd r0, r0
262 bne 1b
263 nap
264 b .
265
266/******************************************************************************
267 * *
268 * Entry code *
269 * *
270 *****************************************************************************/
271
150.global kvmppc_hv_entry 272.global kvmppc_hv_entry
151kvmppc_hv_entry: 273kvmppc_hv_entry:
152 274
@@ -159,7 +281,8 @@ kvmppc_hv_entry:
159 * all other volatile GPRS = free 281 * all other volatile GPRS = free
160 */ 282 */
161 mflr r0 283 mflr r0
162 std r0, HSTATE_VMHANDLER(r13) 284 std r0, PPC_LR_STKOFF(r1)
285 stdu r1, -112(r1)
163 286
164 /* Set partition DABR */ 287 /* Set partition DABR */
165 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 288 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -200,8 +323,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
200 ld r3, VCPU_MMCR(r4) 323 ld r3, VCPU_MMCR(r4)
201 ld r5, VCPU_MMCR + 8(r4) 324 ld r5, VCPU_MMCR + 8(r4)
202 ld r6, VCPU_MMCR + 16(r4) 325 ld r6, VCPU_MMCR + 16(r4)
326 ld r7, VCPU_SIAR(r4)
327 ld r8, VCPU_SDAR(r4)
203 mtspr SPRN_MMCR1, r5 328 mtspr SPRN_MMCR1, r5
204 mtspr SPRN_MMCRA, r6 329 mtspr SPRN_MMCRA, r6
330 mtspr SPRN_SIAR, r7
331 mtspr SPRN_SDAR, r8
205 mtspr SPRN_MMCR0, r3 332 mtspr SPRN_MMCR0, r3
206 isync 333 isync
207 334
@@ -254,22 +381,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
254 /* Save R1 in the PACA */ 381 /* Save R1 in the PACA */
255 std r1, HSTATE_HOST_R1(r13) 382 std r1, HSTATE_HOST_R1(r13)
256 383
257 /* Increment yield count if they have a VPA */
258 ld r3, VCPU_VPA(r4)
259 cmpdi r3, 0
260 beq 25f
261 lwz r5, LPPACA_YIELDCOUNT(r3)
262 addi r5, r5, 1
263 stw r5, LPPACA_YIELDCOUNT(r3)
264 li r6, 1
265 stb r6, VCPU_VPA_DIRTY(r4)
26625:
267 /* Load up DAR and DSISR */ 384 /* Load up DAR and DSISR */
268 ld r5, VCPU_DAR(r4) 385 ld r5, VCPU_DAR(r4)
269 lwz r6, VCPU_DSISR(r4) 386 lwz r6, VCPU_DSISR(r4)
270 mtspr SPRN_DAR, r5 387 mtspr SPRN_DAR, r5
271 mtspr SPRN_DSISR, r6 388 mtspr SPRN_DSISR, r6
272 389
390 li r6, KVM_GUEST_MODE_HOST_HV
391 stb r6, HSTATE_IN_GUEST(r13)
392
273BEGIN_FTR_SECTION 393BEGIN_FTR_SECTION
274 /* Restore AMR and UAMOR, set AMOR to all 1s */ 394 /* Restore AMR and UAMOR, set AMOR to all 1s */
275 ld r5,VCPU_AMR(r4) 395 ld r5,VCPU_AMR(r4)
@@ -343,7 +463,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
343 bdnz 28b 463 bdnz 28b
344 ptesync 464 ptesync
345 465
34622: li r0,1 466 /* Add timebase offset onto timebase */
46722: ld r8,VCORE_TB_OFFSET(r5)
468 cmpdi r8,0
469 beq 37f
470 mftb r6 /* current host timebase */
471 add r8,r8,r6
472 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
473 mftb r7 /* check if lower 24 bits overflowed */
474 clrldi r6,r6,40
475 clrldi r7,r7,40
476 cmpld r7,r6
477 bge 37f
478 addis r8,r8,0x100 /* if so, increment upper 40 bits */
479 mtspr SPRN_TBU40,r8
480
481 /* Load guest PCR value to select appropriate compat mode */
48237: ld r7, VCORE_PCR(r5)
483 cmpdi r7, 0
484 beq 38f
485 mtspr SPRN_PCR, r7
48638:
487 li r0,1
347 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 488 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
348 b 10f 489 b 10f
349 490
@@ -353,12 +494,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
353 beq 20b 494 beq 20b
354 495
355 /* Set LPCR and RMOR. */ 496 /* Set LPCR and RMOR. */
35610: ld r8,KVM_LPCR(r9) 49710: ld r8,VCORE_LPCR(r5)
357 mtspr SPRN_LPCR,r8 498 mtspr SPRN_LPCR,r8
358 ld r8,KVM_RMOR(r9) 499 ld r8,KVM_RMOR(r9)
359 mtspr SPRN_RMOR,r8 500 mtspr SPRN_RMOR,r8
360 isync 501 isync
361 502
503 /* Increment yield count if they have a VPA */
504 ld r3, VCPU_VPA(r4)
505 cmpdi r3, 0
506 beq 25f
507 lwz r5, LPPACA_YIELDCOUNT(r3)
508 addi r5, r5, 1
509 stw r5, LPPACA_YIELDCOUNT(r3)
510 li r6, 1
511 stb r6, VCPU_VPA_DIRTY(r4)
51225:
362 /* Check if HDEC expires soon */ 513 /* Check if HDEC expires soon */
363 mfspr r3,SPRN_HDEC 514 mfspr r3,SPRN_HDEC
364 cmpwi r3,10 515 cmpwi r3,10
@@ -405,7 +556,8 @@ toc_tlbie_lock:
405 bne 24b 556 bne 24b
406 isync 557 isync
407 558
408 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ 559 ld r5,HSTATE_KVM_VCORE(r13)
560 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
409 li r0,0x18f 561 li r0,0x18f
410 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 562 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
411 or r0,r7,r0 563 or r0,r7,r0
@@ -541,7 +693,7 @@ fast_guest_return:
541 mtspr SPRN_HSRR1,r11 693 mtspr SPRN_HSRR1,r11
542 694
543 /* Activate guest mode, so faults get handled by KVM */ 695 /* Activate guest mode, so faults get handled by KVM */
544 li r9, KVM_GUEST_MODE_GUEST 696 li r9, KVM_GUEST_MODE_GUEST_HV
545 stb r9, HSTATE_IN_GUEST(r13) 697 stb r9, HSTATE_IN_GUEST(r13)
546 698
547 /* Enter guest */ 699 /* Enter guest */
@@ -550,13 +702,15 @@ BEGIN_FTR_SECTION
550 ld r5, VCPU_CFAR(r4) 702 ld r5, VCPU_CFAR(r4)
551 mtspr SPRN_CFAR, r5 703 mtspr SPRN_CFAR, r5
552END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 704END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
705BEGIN_FTR_SECTION
706 ld r0, VCPU_PPR(r4)
707END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
553 708
554 ld r5, VCPU_LR(r4) 709 ld r5, VCPU_LR(r4)
555 lwz r6, VCPU_CR(r4) 710 lwz r6, VCPU_CR(r4)
556 mtlr r5 711 mtlr r5
557 mtcr r6 712 mtcr r6
558 713
559 ld r0, VCPU_GPR(R0)(r4)
560 ld r1, VCPU_GPR(R1)(r4) 714 ld r1, VCPU_GPR(R1)(r4)
561 ld r2, VCPU_GPR(R2)(r4) 715 ld r2, VCPU_GPR(R2)(r4)
562 ld r3, VCPU_GPR(R3)(r4) 716 ld r3, VCPU_GPR(R3)(r4)
@@ -570,6 +724,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
570 ld r12, VCPU_GPR(R12)(r4) 724 ld r12, VCPU_GPR(R12)(r4)
571 ld r13, VCPU_GPR(R13)(r4) 725 ld r13, VCPU_GPR(R13)(r4)
572 726
727BEGIN_FTR_SECTION
728 mtspr SPRN_PPR, r0
729END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
730 ld r0, VCPU_GPR(R0)(r4)
573 ld r4, VCPU_GPR(R4)(r4) 731 ld r4, VCPU_GPR(R4)(r4)
574 732
575 hrfid 733 hrfid
@@ -584,8 +742,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
584/* 742/*
585 * We come here from the first-level interrupt handlers. 743 * We come here from the first-level interrupt handlers.
586 */ 744 */
587 .globl kvmppc_interrupt 745 .globl kvmppc_interrupt_hv
588kvmppc_interrupt: 746kvmppc_interrupt_hv:
589 /* 747 /*
590 * Register contents: 748 * Register contents:
591 * R12 = interrupt vector 749 * R12 = interrupt vector
@@ -595,6 +753,19 @@ kvmppc_interrupt:
595 */ 753 */
596 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 754 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
597 std r9, HSTATE_HOST_R2(r13) 755 std r9, HSTATE_HOST_R2(r13)
756
757 lbz r9, HSTATE_IN_GUEST(r13)
758 cmpwi r9, KVM_GUEST_MODE_HOST_HV
759 beq kvmppc_bad_host_intr
760#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
761 cmpwi r9, KVM_GUEST_MODE_GUEST
762 ld r9, HSTATE_HOST_R2(r13)
763 beq kvmppc_interrupt_pr
764#endif
765 /* We're now back in the host but in guest MMU context */
766 li r9, KVM_GUEST_MODE_HOST_HV
767 stb r9, HSTATE_IN_GUEST(r13)
768
598 ld r9, HSTATE_KVM_VCPU(r13) 769 ld r9, HSTATE_KVM_VCPU(r13)
599 770
600 /* Save registers */ 771 /* Save registers */
@@ -620,6 +791,10 @@ BEGIN_FTR_SECTION
620 ld r3, HSTATE_CFAR(r13) 791 ld r3, HSTATE_CFAR(r13)
621 std r3, VCPU_CFAR(r9) 792 std r3, VCPU_CFAR(r9)
622END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 793END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
794BEGIN_FTR_SECTION
795 ld r4, HSTATE_PPR(r13)
796 std r4, VCPU_PPR(r9)
797END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
623 798
624 /* Restore R1/R2 so we can handle faults */ 799 /* Restore R1/R2 so we can handle faults */
625 ld r1, HSTATE_HOST_R1(r13) 800 ld r1, HSTATE_HOST_R1(r13)
@@ -642,10 +817,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
642 std r3, VCPU_GPR(R13)(r9) 817 std r3, VCPU_GPR(R13)(r9)
643 std r4, VCPU_LR(r9) 818 std r4, VCPU_LR(r9)
644 819
645 /* Unset guest mode */
646 li r0, KVM_GUEST_MODE_NONE
647 stb r0, HSTATE_IN_GUEST(r13)
648
649 stw r12,VCPU_TRAP(r9) 820 stw r12,VCPU_TRAP(r9)
650 821
651 /* Save HEIR (HV emulation assist reg) in last_inst 822 /* Save HEIR (HV emulation assist reg) in last_inst
@@ -696,46 +867,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
696 * set, we know the host wants us out so let's do it now 867 * set, we know the host wants us out so let's do it now
697 */ 868 */
698do_ext_interrupt: 869do_ext_interrupt:
699 lbz r0, HSTATE_HOST_IPI(r13) 870 bl kvmppc_read_intr
700 cmpwi r0, 0 871 cmpdi r3, 0
701 bne ext_interrupt_to_host 872 bgt ext_interrupt_to_host
702
703 /* Now read the interrupt from the ICP */
704 ld r5, HSTATE_XICS_PHYS(r13)
705 li r7, XICS_XIRR
706 cmpdi r5, 0
707 beq- ext_interrupt_to_host
708 lwzcix r3, r5, r7
709 rlwinm. r0, r3, 0, 0xffffff
710 sync
711 beq 3f /* if nothing pending in the ICP */
712
713 /* We found something in the ICP...
714 *
715 * If it's not an IPI, stash it in the PACA and return to
716 * the host, we don't (yet) handle directing real external
717 * interrupts directly to the guest
718 */
719 cmpwi r0, XICS_IPI
720 bne ext_stash_for_host
721
722 /* It's an IPI, clear the MFRR and EOI it */
723 li r0, 0xff
724 li r6, XICS_MFRR
725 stbcix r0, r5, r6 /* clear the IPI */
726 stwcix r3, r5, r7 /* EOI it */
727 sync
728
729 /* We need to re-check host IPI now in case it got set in the
730 * meantime. If it's clear, we bounce the interrupt to the
731 * guest
732 */
733 lbz r0, HSTATE_HOST_IPI(r13)
734 cmpwi r0, 0
735 bne- 1f
736 873
737 /* Allright, looks like an IPI for the guest, we need to set MER */ 874 /* Allright, looks like an IPI for the guest, we need to set MER */
7383:
739 /* Check if any CPU is heading out to the host, if so head out too */ 875 /* Check if any CPU is heading out to the host, if so head out too */
740 ld r5, HSTATE_KVM_VCORE(r13) 876 ld r5, HSTATE_KVM_VCORE(r13)
741 lwz r0, VCORE_ENTRY_EXIT(r5) 877 lwz r0, VCORE_ENTRY_EXIT(r5)
@@ -764,27 +900,9 @@ do_ext_interrupt:
764 mtspr SPRN_LPCR, r8 900 mtspr SPRN_LPCR, r8
765 b fast_guest_return 901 b fast_guest_return
766 902
767 /* We raced with the host, we need to resend that IPI, bummer */
7681: li r0, IPI_PRIORITY
769 stbcix r0, r5, r6 /* set the IPI */
770 sync
771 b ext_interrupt_to_host
772
773ext_stash_for_host:
774 /* It's not an IPI and it's for the host, stash it in the PACA
775 * before exit, it will be picked up by the host ICP driver
776 */
777 stw r3, HSTATE_SAVED_XIRR(r13)
778ext_interrupt_to_host: 903ext_interrupt_to_host:
779 904
780guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 905guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
781 /* Save DEC */
782 mfspr r5,SPRN_DEC
783 mftb r6
784 extsw r5,r5
785 add r5,r5,r6
786 std r5,VCPU_DEC_EXPIRES(r9)
787
788 /* Save more register state */ 906 /* Save more register state */
789 mfdar r6 907 mfdar r6
790 mfdsisr r7 908 mfdsisr r7
@@ -954,7 +1072,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
954 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 1072 mtspr SPRN_SDR1,r6 /* switch to partition page table */
955 mtspr SPRN_LPID,r7 1073 mtspr SPRN_LPID,r7
956 isync 1074 isync
957 li r0,0 1075
1076 /* Subtract timebase offset from timebase */
1077 ld r8,VCORE_TB_OFFSET(r5)
1078 cmpdi r8,0
1079 beq 17f
1080 mftb r6 /* current host timebase */
1081 subf r8,r8,r6
1082 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1083 mftb r7 /* check if lower 24 bits overflowed */
1084 clrldi r6,r6,40
1085 clrldi r7,r7,40
1086 cmpld r7,r6
1087 bge 17f
1088 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1089 mtspr SPRN_TBU40,r8
1090
1091 /* Reset PCR */
109217: ld r0, VCORE_PCR(r5)
1093 cmpdi r0, 0
1094 beq 18f
1095 li r0, 0
1096 mtspr SPRN_PCR, r0
109718:
1098 /* Signal secondary CPUs to continue */
958 stb r0,VCORE_IN_GUEST(r5) 1099 stb r0,VCORE_IN_GUEST(r5)
959 lis r8,0x7fff /* MAX_INT@h */ 1100 lis r8,0x7fff /* MAX_INT@h */
960 mtspr SPRN_HDEC,r8 1101 mtspr SPRN_HDEC,r8
@@ -1052,6 +1193,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
10521: addi r8,r8,16 11931: addi r8,r8,16
1053 .endr 1194 .endr
1054 1195
1196 /* Save DEC */
1197 mfspr r5,SPRN_DEC
1198 mftb r6
1199 extsw r5,r5
1200 add r5,r5,r6
1201 std r5,VCPU_DEC_EXPIRES(r9)
1202
1055 /* Save and reset AMR and UAMOR before turning on the MMU */ 1203 /* Save and reset AMR and UAMOR before turning on the MMU */
1056BEGIN_FTR_SECTION 1204BEGIN_FTR_SECTION
1057 mfspr r5,SPRN_AMR 1205 mfspr r5,SPRN_AMR
@@ -1062,11 +1210,15 @@ BEGIN_FTR_SECTION
1062 mtspr SPRN_AMR,r6 1210 mtspr SPRN_AMR,r6
1063END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1211END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1064 1212
1213 /* Unset guest mode */
1214 li r0, KVM_GUEST_MODE_NONE
1215 stb r0, HSTATE_IN_GUEST(r13)
1216
1065 /* Switch DSCR back to host value */ 1217 /* Switch DSCR back to host value */
1066BEGIN_FTR_SECTION 1218BEGIN_FTR_SECTION
1067 mfspr r8, SPRN_DSCR 1219 mfspr r8, SPRN_DSCR
1068 ld r7, HSTATE_DSCR(r13) 1220 ld r7, HSTATE_DSCR(r13)
1069 std r8, VCPU_DSCR(r7) 1221 std r8, VCPU_DSCR(r9)
1070 mtspr SPRN_DSCR, r7 1222 mtspr SPRN_DSCR, r7
1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1223END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1072 1224
@@ -1134,9 +1286,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1134 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1286 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1135 b 22f 1287 b 22f
113621: mfspr r5, SPRN_MMCR1 128821: mfspr r5, SPRN_MMCR1
1289 mfspr r7, SPRN_SIAR
1290 mfspr r8, SPRN_SDAR
1137 std r4, VCPU_MMCR(r9) 1291 std r4, VCPU_MMCR(r9)
1138 std r5, VCPU_MMCR + 8(r9) 1292 std r5, VCPU_MMCR + 8(r9)
1139 std r6, VCPU_MMCR + 16(r9) 1293 std r6, VCPU_MMCR + 16(r9)
1294 std r7, VCPU_SIAR(r9)
1295 std r8, VCPU_SDAR(r9)
1140 mfspr r3, SPRN_PMC1 1296 mfspr r3, SPRN_PMC1
1141 mfspr r4, SPRN_PMC2 1297 mfspr r4, SPRN_PMC2
1142 mfspr r5, SPRN_PMC3 1298 mfspr r5, SPRN_PMC3
@@ -1158,103 +1314,30 @@ BEGIN_FTR_SECTION
1158 stw r11, VCPU_PMC + 28(r9) 1314 stw r11, VCPU_PMC + 28(r9)
1159END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
116022: 131622:
1317 ld r0, 112+PPC_LR_STKOFF(r1)
1318 addi r1, r1, 112
1319 mtlr r0
1320 blr
1321secondary_too_late:
1322 ld r5,HSTATE_KVM_VCORE(r13)
1323 HMT_LOW
132413: lbz r3,VCORE_IN_GUEST(r5)
1325 cmpwi r3,0
1326 bne 13b
1327 HMT_MEDIUM
1328 li r0, KVM_GUEST_MODE_NONE
1329 stb r0, HSTATE_IN_GUEST(r13)
1330 ld r11,PACA_SLBSHADOWPTR(r13)
1161 1331
1162 /* Secondary threads go off to take a nap on POWER7 */ 1332 .rept SLB_NUM_BOLTED
1163BEGIN_FTR_SECTION 1333 ld r5,SLBSHADOW_SAVEAREA(r11)
1164 lwz r0,VCPU_PTID(r9) 1334 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1165 cmpwi r0,0 1335 andis. r7,r5,SLB_ESID_V@h
1166 bne secondary_nap 1336 beq 1f
1167END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1337 slbmte r6,r5
1168 13381: addi r11,r11,16
1169 /* Restore host DABR and DABRX */ 1339 .endr
1170 ld r5,HSTATE_DABR(r13) 1340 b 22b
1171 li r6,7
1172 mtspr SPRN_DABR,r5
1173 mtspr SPRN_DABRX,r6
1174
1175 /* Restore SPRG3 */
1176 ld r3,PACA_SPRG3(r13)
1177 mtspr SPRN_SPRG3,r3
1178
1179 /*
1180 * Reload DEC. HDEC interrupts were disabled when
1181 * we reloaded the host's LPCR value.
1182 */
1183 ld r3, HSTATE_DECEXP(r13)
1184 mftb r4
1185 subf r4, r4, r3
1186 mtspr SPRN_DEC, r4
1187
1188 /* Reload the host's PMU registers */
1189 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
1190 lbz r4, LPPACA_PMCINUSE(r3)
1191 cmpwi r4, 0
1192 beq 23f /* skip if not */
1193 lwz r3, HSTATE_PMC(r13)
1194 lwz r4, HSTATE_PMC + 4(r13)
1195 lwz r5, HSTATE_PMC + 8(r13)
1196 lwz r6, HSTATE_PMC + 12(r13)
1197 lwz r8, HSTATE_PMC + 16(r13)
1198 lwz r9, HSTATE_PMC + 20(r13)
1199BEGIN_FTR_SECTION
1200 lwz r10, HSTATE_PMC + 24(r13)
1201 lwz r11, HSTATE_PMC + 28(r13)
1202END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1203 mtspr SPRN_PMC1, r3
1204 mtspr SPRN_PMC2, r4
1205 mtspr SPRN_PMC3, r5
1206 mtspr SPRN_PMC4, r6
1207 mtspr SPRN_PMC5, r8
1208 mtspr SPRN_PMC6, r9
1209BEGIN_FTR_SECTION
1210 mtspr SPRN_PMC7, r10
1211 mtspr SPRN_PMC8, r11
1212END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1213 ld r3, HSTATE_MMCR(r13)
1214 ld r4, HSTATE_MMCR + 8(r13)
1215 ld r5, HSTATE_MMCR + 16(r13)
1216 mtspr SPRN_MMCR1, r4
1217 mtspr SPRN_MMCRA, r5
1218 mtspr SPRN_MMCR0, r3
1219 isync
122023:
1221 /*
1222 * For external and machine check interrupts, we need
1223 * to call the Linux handler to process the interrupt.
1224 * We do that by jumping to absolute address 0x500 for
1225 * external interrupts, or the machine_check_fwnmi label
1226 * for machine checks (since firmware might have patched
1227 * the vector area at 0x200). The [h]rfid at the end of the
1228 * handler will return to the book3s_hv_interrupts.S code.
1229 * For other interrupts we do the rfid to get back
1230 * to the book3s_hv_interrupts.S code here.
1231 */
1232 ld r8, HSTATE_VMHANDLER(r13)
1233 ld r7, HSTATE_HOST_MSR(r13)
1234
1235 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1236 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1237BEGIN_FTR_SECTION
1238 beq 11f
1239END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1240
1241 /* RFI into the highmem handler, or branch to interrupt handler */
1242 mfmsr r6
1243 li r0, MSR_RI
1244 andc r6, r6, r0
1245 mtmsrd r6, 1 /* Clear RI in MSR */
1246 mtsrr0 r8
1247 mtsrr1 r7
1248 beqa 0x500 /* external interrupt (PPC970) */
1249 beq cr1, 13f /* machine check */
1250 RFI
1251
1252 /* On POWER7, we have external interrupts set to use HSRR0/1 */
125311: mtspr SPRN_HSRR0, r8
1254 mtspr SPRN_HSRR1, r7
1255 ba 0x500
1256
125713: b machine_check_fwnmi
1258 1341
1259/* 1342/*
1260 * Check whether an HDSI is an HPTE not found fault or something else. 1343 * Check whether an HDSI is an HPTE not found fault or something else.
@@ -1333,7 +1416,7 @@ fast_interrupt_c_return:
1333 stw r8, VCPU_LAST_INST(r9) 1416 stw r8, VCPU_LAST_INST(r9)
1334 1417
1335 /* Unset guest mode. */ 1418 /* Unset guest mode. */
1336 li r0, KVM_GUEST_MODE_NONE 1419 li r0, KVM_GUEST_MODE_HOST_HV
1337 stb r0, HSTATE_IN_GUEST(r13) 1420 stb r0, HSTATE_IN_GUEST(r13)
1338 b guest_exit_cont 1421 b guest_exit_cont
1339 1422
@@ -1701,67 +1784,70 @@ machine_check_realmode:
1701 rotldi r11, r11, 63 1784 rotldi r11, r11, 63
1702 b fast_interrupt_c_return 1785 b fast_interrupt_c_return
1703 1786
1704secondary_too_late: 1787/*
1705 ld r5,HSTATE_KVM_VCORE(r13) 1788 * Determine what sort of external interrupt is pending (if any).
1706 HMT_LOW 1789 * Returns:
170713: lbz r3,VCORE_IN_GUEST(r5) 1790 * 0 if no interrupt is pending
1708 cmpwi r3,0 1791 * 1 if an interrupt is pending that needs to be handled by the host
1709 bne 13b 1792 * -1 if there was a guest wakeup IPI (which has now been cleared)
1710 HMT_MEDIUM 1793 */
1711 ld r11,PACA_SLBSHADOWPTR(r13) 1794kvmppc_read_intr:
1712 1795 /* see if a host IPI is pending */
1713 .rept SLB_NUM_BOLTED 1796 li r3, 1
1714 ld r5,SLBSHADOW_SAVEAREA(r11) 1797 lbz r0, HSTATE_HOST_IPI(r13)
1715 ld r6,SLBSHADOW_SAVEAREA+8(r11) 1798 cmpwi r0, 0
1716 andis. r7,r5,SLB_ESID_V@h 1799 bne 1f
1717 beq 1f
1718 slbmte r6,r5
17191: addi r11,r11,16
1720 .endr
1721 1800
1722secondary_nap: 1801 /* Now read the interrupt from the ICP */
1723 /* Clear our vcpu pointer so we don't come back in early */ 1802 ld r6, HSTATE_XICS_PHYS(r13)
1724 li r0, 0
1725 std r0, HSTATE_KVM_VCPU(r13)
1726 lwsync
1727 /* Clear any pending IPI - assume we're a secondary thread */
1728 ld r5, HSTATE_XICS_PHYS(r13)
1729 li r7, XICS_XIRR 1803 li r7, XICS_XIRR
1730 lwzcix r3, r5, r7 /* ack any pending interrupt */ 1804 cmpdi r6, 0
1731 rlwinm. r0, r3, 0, 0xffffff /* any pending? */ 1805 beq- 1f
1732 beq 37f 1806 lwzcix r0, r6, r7
1807 rlwinm. r3, r0, 0, 0xffffff
1733 sync 1808 sync
1734 li r0, 0xff 1809 beq 1f /* if nothing pending in the ICP */
1735 li r6, XICS_MFRR
1736 stbcix r0, r5, r6 /* clear the IPI */
1737 stwcix r3, r5, r7 /* EOI it */
173837: sync
1739 1810
1740 /* increment the nap count and then go to nap mode */ 1811 /* We found something in the ICP...
1741 ld r4, HSTATE_KVM_VCORE(r13) 1812 *
1742 addi r4, r4, VCORE_NAP_COUNT 1813 * If it's not an IPI, stash it in the PACA and return to
1743 lwsync /* make previous updates visible */ 1814 * the host, we don't (yet) handle directing real external
174451: lwarx r3, 0, r4 1815 * interrupts directly to the guest
1745 addi r3, r3, 1 1816 */
1746 stwcx. r3, 0, r4 1817 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
1747 bne 51b 1818 li r3, 1
1819 bne 42f
1748 1820
1749kvm_no_guest: 1821 /* It's an IPI, clear the MFRR and EOI it */
1750 li r0, KVM_HWTHREAD_IN_NAP 1822 li r3, 0xff
1751 stb r0, HSTATE_HWTHREAD_STATE(r13) 1823 li r8, XICS_MFRR
1824 stbcix r3, r6, r8 /* clear the IPI */
1825 stwcix r0, r6, r7 /* EOI it */
1826 sync
1752 1827
1753 li r3, LPCR_PECE0 1828 /* We need to re-check host IPI now in case it got set in the
1754 mfspr r4, SPRN_LPCR 1829 * meantime. If it's clear, we bounce the interrupt to the
1755 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 1830 * guest
1756 mtspr SPRN_LPCR, r4 1831 */
1757 isync 1832 lbz r0, HSTATE_HOST_IPI(r13)
1758 std r0, HSTATE_SCRATCH0(r13) 1833 cmpwi r0, 0
1759 ptesync 1834 bne- 43f
1760 ld r0, HSTATE_SCRATCH0(r13) 1835
17611: cmpd r0, r0 1836 /* OK, it's an IPI for us */
1762 bne 1b 1837 li r3, -1
1763 nap 18381: blr
1764 b . 1839
184042: /* It's not an IPI and it's for the host, stash it in the PACA
1841 * before exit, it will be picked up by the host ICP driver
1842 */
1843 stw r0, HSTATE_SAVED_XIRR(r13)
1844 b 1b
1845
184643: /* We raced with the host, we need to resend that IPI, bummer */
1847 li r0, IPI_PRIORITY
1848 stbcix r0, r6, r8 /* set the IPI */
1849 sync
1850 b 1b
1765 1851
1766/* 1852/*
1767 * Save away FP, VMX and VSX registers. 1853 * Save away FP, VMX and VSX registers.
@@ -1879,3 +1965,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1879 lwz r7,VCPU_VRSAVE(r4) 1965 lwz r7,VCPU_VRSAVE(r4)
1880 mtspr SPRN_VRSAVE,r7 1966 mtspr SPRN_VRSAVE,r7
1881 blr 1967 blr
1968
1969/*
1970 * We come here if we get any exception or interrupt while we are
1971 * executing host real mode code while in guest MMU context.
1972 * For now just spin, but we should do something better.
1973 */
1974kvmppc_bad_host_intr:
1975 b .
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 17cfae5497a3..f4dd041c14ea 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -26,8 +26,12 @@
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28#define FUNC(name) GLUE(.,name) 28#define FUNC(name) GLUE(.,name)
29#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
30
29#elif defined(CONFIG_PPC_BOOK3S_32) 31#elif defined(CONFIG_PPC_BOOK3S_32)
30#define FUNC(name) name 32#define FUNC(name) name
33#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
34
31#endif /* CONFIG_PPC_BOOK3S_XX */ 35#endif /* CONFIG_PPC_BOOK3S_XX */
32 36
33#define VCPU_LOAD_NVGPRS(vcpu) \ 37#define VCPU_LOAD_NVGPRS(vcpu) \
@@ -87,8 +91,14 @@ kvm_start_entry:
87 VCPU_LOAD_NVGPRS(r4) 91 VCPU_LOAD_NVGPRS(r4)
88 92
89kvm_start_lightweight: 93kvm_start_lightweight:
94 /* Copy registers into shadow vcpu so we can access them in real mode */
95 GET_SHADOW_VCPU(r3)
96 bl FUNC(kvmppc_copy_to_svcpu)
97 nop
98 REST_GPR(4, r1)
90 99
91#ifdef CONFIG_PPC_BOOK3S_64 100#ifdef CONFIG_PPC_BOOK3S_64
101 /* Get the dcbz32 flag */
92 PPC_LL r3, VCPU_HFLAGS(r4) 102 PPC_LL r3, VCPU_HFLAGS(r4)
93 rldicl r3, r3, 0, 63 /* r3 &= 1 */ 103 rldicl r3, r3, 0, 63 /* r3 &= 1 */
94 stb r3, HSTATE_RESTORE_HID5(r13) 104 stb r3, HSTATE_RESTORE_HID5(r13)
@@ -111,9 +121,6 @@ kvm_start_lightweight:
111 * 121 *
112 */ 122 */
113 123
114.global kvmppc_handler_highmem
115kvmppc_handler_highmem:
116
117 /* 124 /*
118 * Register usage at this point: 125 * Register usage at this point:
119 * 126 *
@@ -125,18 +132,31 @@ kvmppc_handler_highmem:
125 * 132 *
126 */ 133 */
127 134
128 /* R7 = vcpu */ 135 /* Transfer reg values from shadow vcpu back to vcpu struct */
129 PPC_LL r7, GPR4(r1) 136 /* On 64-bit, interrupts are still off at this point */
137 PPC_LL r3, GPR4(r1) /* vcpu pointer */
138 GET_SHADOW_VCPU(r4)
139 bl FUNC(kvmppc_copy_from_svcpu)
140 nop
130 141
131#ifdef CONFIG_PPC_BOOK3S_64 142#ifdef CONFIG_PPC_BOOK3S_64
143 /* Re-enable interrupts */
144 ld r3, HSTATE_HOST_MSR(r13)
145 ori r3, r3, MSR_EE
146 MTMSR_EERI(r3)
147
132 /* 148 /*
133 * Reload kernel SPRG3 value. 149 * Reload kernel SPRG3 value.
134 * No need to save guest value as usermode can't modify SPRG3. 150 * No need to save guest value as usermode can't modify SPRG3.
135 */ 151 */
136 ld r3, PACA_SPRG3(r13) 152 ld r3, PACA_SPRG3(r13)
137 mtspr SPRN_SPRG3, r3 153 mtspr SPRN_SPRG3, r3
154
138#endif /* CONFIG_PPC_BOOK3S_64 */ 155#endif /* CONFIG_PPC_BOOK3S_64 */
139 156
157 /* R7 = vcpu */
158 PPC_LL r7, GPR4(r1)
159
140 PPC_STL r14, VCPU_GPR(R14)(r7) 160 PPC_STL r14, VCPU_GPR(R14)(r7)
141 PPC_STL r15, VCPU_GPR(R15)(r7) 161 PPC_STL r15, VCPU_GPR(R15)(r7)
142 PPC_STL r16, VCPU_GPR(R16)(r7) 162 PPC_STL r16, VCPU_GPR(R16)(r7)
@@ -161,7 +181,7 @@ kvmppc_handler_highmem:
161 181
162 /* Restore r3 (kvm_run) and r4 (vcpu) */ 182 /* Restore r3 (kvm_run) and r4 (vcpu) */
163 REST_2GPRS(3, r1) 183 REST_2GPRS(3, r1)
164 bl FUNC(kvmppc_handle_exit) 184 bl FUNC(kvmppc_handle_exit_pr)
165 185
166 /* If RESUME_GUEST, get back in the loop */ 186 /* If RESUME_GUEST, get back in the loop */
167 cmpwi r3, RESUME_GUEST 187 cmpwi r3, RESUME_GUEST
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index da8b13c4b776..5a1ab1250a05 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -28,7 +28,7 @@
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
30 30
31#include "trace.h" 31#include "trace_pr.h"
32 32
33#define PTE_SIZE 12 33#define PTE_SIZE 12
34 34
@@ -56,6 +56,14 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
56 HPTEG_HASH_BITS_VPTE_LONG); 56 HPTEG_HASH_BITS_VPTE_LONG);
57} 57}
58 58
59#ifdef CONFIG_PPC_BOOK3S_64
60static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
61{
62 return hash_64((vpage & 0xffffffff0ULL) >> 4,
63 HPTEG_HASH_BITS_VPTE_64K);
64}
65#endif
66
59void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 67void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
60{ 68{
61 u64 index; 69 u64 index;
@@ -83,6 +91,15 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
83 hlist_add_head_rcu(&pte->list_vpte_long, 91 hlist_add_head_rcu(&pte->list_vpte_long,
84 &vcpu3s->hpte_hash_vpte_long[index]); 92 &vcpu3s->hpte_hash_vpte_long[index]);
85 93
94#ifdef CONFIG_PPC_BOOK3S_64
95 /* Add to vPTE_64k list */
96 index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
97 hlist_add_head_rcu(&pte->list_vpte_64k,
98 &vcpu3s->hpte_hash_vpte_64k[index]);
99#endif
100
101 vcpu3s->hpte_cache_count++;
102
86 spin_unlock(&vcpu3s->mmu_lock); 103 spin_unlock(&vcpu3s->mmu_lock);
87} 104}
88 105
@@ -113,10 +130,13 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
113 hlist_del_init_rcu(&pte->list_pte_long); 130 hlist_del_init_rcu(&pte->list_pte_long);
114 hlist_del_init_rcu(&pte->list_vpte); 131 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long); 132 hlist_del_init_rcu(&pte->list_vpte_long);
133#ifdef CONFIG_PPC_BOOK3S_64
134 hlist_del_init_rcu(&pte->list_vpte_64k);
135#endif
136 vcpu3s->hpte_cache_count--;
116 137
117 spin_unlock(&vcpu3s->mmu_lock); 138 spin_unlock(&vcpu3s->mmu_lock);
118 139
119 vcpu3s->hpte_cache_count--;
120 call_rcu(&pte->rcu_head, free_pte_rcu); 140 call_rcu(&pte->rcu_head, free_pte_rcu);
121} 141}
122 142
@@ -219,6 +239,29 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
219 rcu_read_unlock(); 239 rcu_read_unlock();
220} 240}
221 241
242#ifdef CONFIG_PPC_BOOK3S_64
243/* Flush with mask 0xffffffff0 */
244static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
245{
246 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
247 struct hlist_head *list;
248 struct hpte_cache *pte;
249 u64 vp_mask = 0xffffffff0ULL;
250
251 list = &vcpu3s->hpte_hash_vpte_64k[
252 kvmppc_mmu_hash_vpte_64k(guest_vp)];
253
254 rcu_read_lock();
255
256 /* Check the list for matching entries and invalidate */
257 hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
258 if ((pte->pte.vpage & vp_mask) == guest_vp)
259 invalidate_pte(vcpu, pte);
260
261 rcu_read_unlock();
262}
263#endif
264
222/* Flush with mask 0xffffff000 */ 265/* Flush with mask 0xffffff000 */
223static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) 266static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
224{ 267{
@@ -249,6 +292,11 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
249 case 0xfffffffffULL: 292 case 0xfffffffffULL:
250 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); 293 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
251 break; 294 break;
295#ifdef CONFIG_PPC_BOOK3S_64
296 case 0xffffffff0ULL:
297 kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
298 break;
299#endif
252 case 0xffffff000ULL: 300 case 0xffffff000ULL:
253 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); 301 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
254 break; 302 break;
@@ -285,15 +333,19 @@ struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
285 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 333 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
286 struct hpte_cache *pte; 334 struct hpte_cache *pte;
287 335
288 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289 vcpu3s->hpte_cache_count++;
290
291 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM) 336 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
292 kvmppc_mmu_pte_flush_all(vcpu); 337 kvmppc_mmu_pte_flush_all(vcpu);
293 338
339 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
340
294 return pte; 341 return pte;
295} 342}
296 343
344void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
345{
346 kmem_cache_free(hpte_cache, pte);
347}
348
297void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) 349void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
298{ 350{
299 kvmppc_mmu_pte_flush(vcpu, 0, 0); 351 kvmppc_mmu_pte_flush(vcpu, 0, 0);
@@ -320,6 +372,10 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
320 ARRAY_SIZE(vcpu3s->hpte_hash_vpte)); 372 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
321 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long, 373 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
322 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long)); 374 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
375#ifdef CONFIG_PPC_BOOK3S_64
376 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
377 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
378#endif
323 379
324 spin_lock_init(&vcpu3s->mmu_lock); 380 spin_lock_init(&vcpu3s->mmu_lock);
325 381
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c0b48f96a91c..fe14ca3dd171 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -40,8 +40,12 @@
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/module.h>
43 44
44#include "trace.h" 45#include "book3s.h"
46
47#define CREATE_TRACE_POINTS
48#include "trace_pr.h"
45 49
46/* #define EXIT_DEBUG */ 50/* #define EXIT_DEBUG */
47/* #define DEBUG_EXT */ 51/* #define DEBUG_EXT */
@@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56#define HW_PAGE_SIZE PAGE_SIZE 60#define HW_PAGE_SIZE PAGE_SIZE
57#endif 61#endif
58 62
59void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 63static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
60{ 64{
61#ifdef CONFIG_PPC_BOOK3S_64 65#ifdef CONFIG_PPC_BOOK3S_64
62 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
63 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
64 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
65 sizeof(get_paca()->shadow_vcpu));
66 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67 svcpu_put(svcpu); 69 svcpu_put(svcpu);
68#endif 70#endif
69 vcpu->cpu = smp_processor_id(); 71 vcpu->cpu = smp_processor_id();
70#ifdef CONFIG_PPC_BOOK3S_32 72#ifdef CONFIG_PPC_BOOK3S_32
71 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; 73 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
72#endif 74#endif
73} 75}
74 76
75void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 77static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
76{ 78{
77#ifdef CONFIG_PPC_BOOK3S_64 79#ifdef CONFIG_PPC_BOOK3S_64
78 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 80 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
79 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 81 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
80 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81 sizeof(get_paca()->shadow_vcpu));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu); 83 svcpu_put(svcpu);
84#endif 84#endif
@@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
87 vcpu->cpu = -1; 87 vcpu->cpu = -1;
88} 88}
89 89
90int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 90/* Copy data needed by real-mode code from vcpu to shadow vcpu */
91void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
92 struct kvm_vcpu *vcpu)
93{
94 svcpu->gpr[0] = vcpu->arch.gpr[0];
95 svcpu->gpr[1] = vcpu->arch.gpr[1];
96 svcpu->gpr[2] = vcpu->arch.gpr[2];
97 svcpu->gpr[3] = vcpu->arch.gpr[3];
98 svcpu->gpr[4] = vcpu->arch.gpr[4];
99 svcpu->gpr[5] = vcpu->arch.gpr[5];
100 svcpu->gpr[6] = vcpu->arch.gpr[6];
101 svcpu->gpr[7] = vcpu->arch.gpr[7];
102 svcpu->gpr[8] = vcpu->arch.gpr[8];
103 svcpu->gpr[9] = vcpu->arch.gpr[9];
104 svcpu->gpr[10] = vcpu->arch.gpr[10];
105 svcpu->gpr[11] = vcpu->arch.gpr[11];
106 svcpu->gpr[12] = vcpu->arch.gpr[12];
107 svcpu->gpr[13] = vcpu->arch.gpr[13];
108 svcpu->cr = vcpu->arch.cr;
109 svcpu->xer = vcpu->arch.xer;
110 svcpu->ctr = vcpu->arch.ctr;
111 svcpu->lr = vcpu->arch.lr;
112 svcpu->pc = vcpu->arch.pc;
113}
114
115/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117 struct kvmppc_book3s_shadow_vcpu *svcpu)
118{
119 vcpu->arch.gpr[0] = svcpu->gpr[0];
120 vcpu->arch.gpr[1] = svcpu->gpr[1];
121 vcpu->arch.gpr[2] = svcpu->gpr[2];
122 vcpu->arch.gpr[3] = svcpu->gpr[3];
123 vcpu->arch.gpr[4] = svcpu->gpr[4];
124 vcpu->arch.gpr[5] = svcpu->gpr[5];
125 vcpu->arch.gpr[6] = svcpu->gpr[6];
126 vcpu->arch.gpr[7] = svcpu->gpr[7];
127 vcpu->arch.gpr[8] = svcpu->gpr[8];
128 vcpu->arch.gpr[9] = svcpu->gpr[9];
129 vcpu->arch.gpr[10] = svcpu->gpr[10];
130 vcpu->arch.gpr[11] = svcpu->gpr[11];
131 vcpu->arch.gpr[12] = svcpu->gpr[12];
132 vcpu->arch.gpr[13] = svcpu->gpr[13];
133 vcpu->arch.cr = svcpu->cr;
134 vcpu->arch.xer = svcpu->xer;
135 vcpu->arch.ctr = svcpu->ctr;
136 vcpu->arch.lr = svcpu->lr;
137 vcpu->arch.pc = svcpu->pc;
138 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
139 vcpu->arch.fault_dar = svcpu->fault_dar;
140 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141 vcpu->arch.last_inst = svcpu->last_inst;
142}
143
144static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
91{ 145{
92 int r = 1; /* Indicate we want to get back into the guest */ 146 int r = 1; /* Indicate we want to get back into the guest */
93 147
@@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
100} 154}
101 155
102/************* MMU Notifiers *************/ 156/************* MMU Notifiers *************/
157static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
158 unsigned long end)
159{
160 long i;
161 struct kvm_vcpu *vcpu;
162 struct kvm_memslots *slots;
163 struct kvm_memory_slot *memslot;
164
165 slots = kvm_memslots(kvm);
166 kvm_for_each_memslot(memslot, slots) {
167 unsigned long hva_start, hva_end;
168 gfn_t gfn, gfn_end;
169
170 hva_start = max(start, memslot->userspace_addr);
171 hva_end = min(end, memslot->userspace_addr +
172 (memslot->npages << PAGE_SHIFT));
173 if (hva_start >= hva_end)
174 continue;
175 /*
176 * {gfn(page) | page intersects with [hva_start, hva_end)} =
177 * {gfn, gfn+1, ..., gfn_end-1}.
178 */
179 gfn = hva_to_gfn_memslot(hva_start, memslot);
180 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
181 kvm_for_each_vcpu(i, vcpu, kvm)
182 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
183 gfn_end << PAGE_SHIFT);
184 }
185}
103 186
104int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 187static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
105{ 188{
106 trace_kvm_unmap_hva(hva); 189 trace_kvm_unmap_hva(hva);
107 190
108 /* 191 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
109 * Flush all shadow tlb entries everywhere. This is slow, but
110 * we are 100% sure that we catch the to be unmapped page
111 */
112 kvm_flush_remote_tlbs(kvm);
113 192
114 return 0; 193 return 0;
115} 194}
116 195
117int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 196static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
197 unsigned long end)
118{ 198{
119 /* kvm_unmap_hva flushes everything anyways */ 199 do_kvm_unmap_hva(kvm, start, end);
120 kvm_unmap_hva(kvm, start);
121 200
122 return 0; 201 return 0;
123} 202}
124 203
125int kvm_age_hva(struct kvm *kvm, unsigned long hva) 204static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
126{ 205{
127 /* XXX could be more clever ;) */ 206 /* XXX could be more clever ;) */
128 return 0; 207 return 0;
129} 208}
130 209
131int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 210static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
132{ 211{
133 /* XXX could be more clever ;) */ 212 /* XXX could be more clever ;) */
134 return 0; 213 return 0;
135} 214}
136 215
137void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 216static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
138{ 217{
139 /* The page will get remapped properly on its next fault */ 218 /* The page will get remapped properly on its next fault */
140 kvm_unmap_hva(kvm, hva); 219 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
141} 220}
142 221
143/*****************************************/ 222/*****************************************/
@@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
159 vcpu->arch.shadow_msr = smsr; 238 vcpu->arch.shadow_msr = smsr;
160} 239}
161 240
162void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 241static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
163{ 242{
164 ulong old_msr = vcpu->arch.shared->msr; 243 ulong old_msr = vcpu->arch.shared->msr;
165 244
@@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
219 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 298 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
220} 299}
221 300
222void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 301void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
223{ 302{
224 u32 host_pvr; 303 u32 host_pvr;
225 304
@@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
256 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) 335 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
257 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); 336 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
258 337
338 /*
339 * If they're asking for POWER6 or later, set the flag
340 * indicating that we can do multiple large page sizes
341 * and 1TB segments.
342 * Also set the flag that indicates that tlbie has the large
343 * page bit in the RB operand instead of the instruction.
344 */
345 switch (PVR_VER(pvr)) {
346 case PVR_POWER6:
347 case PVR_POWER7:
348 case PVR_POWER7p:
349 case PVR_POWER8:
350 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
351 BOOK3S_HFLAG_NEW_TLBIE;
352 break;
353 }
354
259#ifdef CONFIG_PPC_BOOK3S_32 355#ifdef CONFIG_PPC_BOOK3S_32
260 /* 32 bit Book3S always has 32 byte dcbz */ 356 /* 32 bit Book3S always has 32 byte dcbz */
261 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 357 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
@@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
334 ulong eaddr, int vec) 430 ulong eaddr, int vec)
335{ 431{
336 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); 432 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
433 bool iswrite = false;
337 int r = RESUME_GUEST; 434 int r = RESUME_GUEST;
338 int relocated; 435 int relocated;
339 int page_found = 0; 436 int page_found = 0;
@@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
344 u64 vsid; 441 u64 vsid;
345 442
346 relocated = data ? dr : ir; 443 relocated = data ? dr : ir;
444 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
445 iswrite = true;
347 446
348 /* Resolve real address if translation turned on */ 447 /* Resolve real address if translation turned on */
349 if (relocated) { 448 if (relocated) {
350 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); 449 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
351 } else { 450 } else {
352 pte.may_execute = true; 451 pte.may_execute = true;
353 pte.may_read = true; 452 pte.may_read = true;
@@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
355 pte.raddr = eaddr & KVM_PAM; 454 pte.raddr = eaddr & KVM_PAM;
356 pte.eaddr = eaddr; 455 pte.eaddr = eaddr;
357 pte.vpage = eaddr >> 12; 456 pte.vpage = eaddr >> 12;
457 pte.page_size = MMU_PAGE_64K;
358 } 458 }
359 459
360 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 460 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
@@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
388 488
389 if (page_found == -ENOENT) { 489 if (page_found == -ENOENT) {
390 /* Page not found in guest PTE entries */ 490 /* Page not found in guest PTE entries */
391 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
392 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 491 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
393 vcpu->arch.shared->dsisr = svcpu->fault_dsisr; 492 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
394 vcpu->arch.shared->msr |= 493 vcpu->arch.shared->msr |=
395 (svcpu->shadow_srr1 & 0x00000000f8000000ULL); 494 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
396 svcpu_put(svcpu);
397 kvmppc_book3s_queue_irqprio(vcpu, vec); 495 kvmppc_book3s_queue_irqprio(vcpu, vec);
398 } else if (page_found == -EPERM) { 496 } else if (page_found == -EPERM) {
399 /* Storage protection */ 497 /* Storage protection */
400 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
401 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 498 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
402 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; 499 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
403 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; 500 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
404 vcpu->arch.shared->msr |= 501 vcpu->arch.shared->msr |=
405 svcpu->shadow_srr1 & 0x00000000f8000000ULL; 502 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
406 svcpu_put(svcpu);
407 kvmppc_book3s_queue_irqprio(vcpu, vec); 503 kvmppc_book3s_queue_irqprio(vcpu, vec);
408 } else if (page_found == -EINVAL) { 504 } else if (page_found == -EINVAL) {
409 /* Page not found in guest SLB */ 505 /* Page not found in guest SLB */
@@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
411 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 507 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
412 } else if (!is_mmio && 508 } else if (!is_mmio &&
413 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 509 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
510 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
511 /*
512 * There is already a host HPTE there, presumably
513 * a read-only one for a page the guest thinks
514 * is writable, so get rid of it first.
515 */
516 kvmppc_mmu_unmap_page(vcpu, &pte);
517 }
414 /* The guest's PTE is not mapped yet. Map on the host */ 518 /* The guest's PTE is not mapped yet. Map on the host */
415 kvmppc_mmu_map_page(vcpu, &pte); 519 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
416 if (data) 520 if (data)
417 vcpu->stat.sp_storage++; 521 vcpu->stat.sp_storage++;
418 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 522 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
419 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) 523 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
420 kvmppc_patch_dcbz(vcpu, &pte); 524 kvmppc_patch_dcbz(vcpu, &pte);
421 } else { 525 } else {
422 /* MMIO */ 526 /* MMIO */
@@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
619 723
620 if (lost_ext & MSR_FP) 724 if (lost_ext & MSR_FP)
621 kvmppc_load_up_fpu(); 725 kvmppc_load_up_fpu();
726#ifdef CONFIG_ALTIVEC
622 if (lost_ext & MSR_VEC) 727 if (lost_ext & MSR_VEC)
623 kvmppc_load_up_altivec(); 728 kvmppc_load_up_altivec();
729#endif
624 current->thread.regs->msr |= lost_ext; 730 current->thread.regs->msr |= lost_ext;
625} 731}
626 732
627int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 733int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
628 unsigned int exit_nr) 734 unsigned int exit_nr)
629{ 735{
630 int r = RESUME_HOST; 736 int r = RESUME_HOST;
631 int s; 737 int s;
@@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
643 switch (exit_nr) { 749 switch (exit_nr) {
644 case BOOK3S_INTERRUPT_INST_STORAGE: 750 case BOOK3S_INTERRUPT_INST_STORAGE:
645 { 751 {
646 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 752 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
647 ulong shadow_srr1 = svcpu->shadow_srr1;
648 vcpu->stat.pf_instruc++; 753 vcpu->stat.pf_instruc++;
649 754
650#ifdef CONFIG_PPC_BOOK3S_32 755#ifdef CONFIG_PPC_BOOK3S_32
651 /* We set segments as unused segments when invalidating them. So 756 /* We set segments as unused segments when invalidating them. So
652 * treat the respective fault as segment fault. */ 757 * treat the respective fault as segment fault. */
653 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { 758 {
654 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 759 struct kvmppc_book3s_shadow_vcpu *svcpu;
655 r = RESUME_GUEST; 760 u32 sr;
761
762 svcpu = svcpu_get(vcpu);
763 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
656 svcpu_put(svcpu); 764 svcpu_put(svcpu);
657 break; 765 if (sr == SR_INVALID) {
766 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
767 r = RESUME_GUEST;
768 break;
769 }
658 } 770 }
659#endif 771#endif
660 svcpu_put(svcpu);
661 772
662 /* only care about PTEG not found errors, but leave NX alone */ 773 /* only care about PTEG not found errors, but leave NX alone */
663 if (shadow_srr1 & 0x40000000) { 774 if (shadow_srr1 & 0x40000000) {
775 int idx = srcu_read_lock(&vcpu->kvm->srcu);
664 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 776 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
777 srcu_read_unlock(&vcpu->kvm->srcu, idx);
665 vcpu->stat.sp_instruc++; 778 vcpu->stat.sp_instruc++;
666 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 779 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
667 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 780 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
682 case BOOK3S_INTERRUPT_DATA_STORAGE: 795 case BOOK3S_INTERRUPT_DATA_STORAGE:
683 { 796 {
684 ulong dar = kvmppc_get_fault_dar(vcpu); 797 ulong dar = kvmppc_get_fault_dar(vcpu);
685 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 798 u32 fault_dsisr = vcpu->arch.fault_dsisr;
686 u32 fault_dsisr = svcpu->fault_dsisr;
687 vcpu->stat.pf_storage++; 799 vcpu->stat.pf_storage++;
688 800
689#ifdef CONFIG_PPC_BOOK3S_32 801#ifdef CONFIG_PPC_BOOK3S_32
690 /* We set segments as unused segments when invalidating them. So 802 /* We set segments as unused segments when invalidating them. So
691 * treat the respective fault as segment fault. */ 803 * treat the respective fault as segment fault. */
692 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { 804 {
693 kvmppc_mmu_map_segment(vcpu, dar); 805 struct kvmppc_book3s_shadow_vcpu *svcpu;
694 r = RESUME_GUEST; 806 u32 sr;
807
808 svcpu = svcpu_get(vcpu);
809 sr = svcpu->sr[dar >> SID_SHIFT];
695 svcpu_put(svcpu); 810 svcpu_put(svcpu);
696 break; 811 if (sr == SR_INVALID) {
812 kvmppc_mmu_map_segment(vcpu, dar);
813 r = RESUME_GUEST;
814 break;
815 }
697 } 816 }
698#endif 817#endif
699 svcpu_put(svcpu);
700 818
701 /* The only case we need to handle is missing shadow PTEs */ 819 /*
702 if (fault_dsisr & DSISR_NOHPTE) { 820 * We need to handle missing shadow PTEs, and
821 * protection faults due to us mapping a page read-only
822 * when the guest thinks it is writable.
823 */
824 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
825 int idx = srcu_read_lock(&vcpu->kvm->srcu);
703 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 826 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
827 srcu_read_unlock(&vcpu->kvm->srcu, idx);
704 } else { 828 } else {
705 vcpu->arch.shared->dar = dar; 829 vcpu->arch.shared->dar = dar;
706 vcpu->arch.shared->dsisr = fault_dsisr; 830 vcpu->arch.shared->dsisr = fault_dsisr;
@@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
743 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 867 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
744 { 868 {
745 enum emulation_result er; 869 enum emulation_result er;
746 struct kvmppc_book3s_shadow_vcpu *svcpu;
747 ulong flags; 870 ulong flags;
748 871
749program_interrupt: 872program_interrupt:
750 svcpu = svcpu_get(vcpu); 873 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
751 flags = svcpu->shadow_srr1 & 0x1f0000ull;
752 svcpu_put(svcpu);
753 874
754 if (vcpu->arch.shared->msr & MSR_PR) { 875 if (vcpu->arch.shared->msr & MSR_PR) {
755#ifdef EXIT_DEBUG 876#ifdef EXIT_DEBUG
@@ -798,7 +919,7 @@ program_interrupt:
798 ulong cmd = kvmppc_get_gpr(vcpu, 3); 919 ulong cmd = kvmppc_get_gpr(vcpu, 3);
799 int i; 920 int i;
800 921
801#ifdef CONFIG_KVM_BOOK3S_64_PR 922#ifdef CONFIG_PPC_BOOK3S_64
802 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { 923 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
803 r = RESUME_GUEST; 924 r = RESUME_GUEST;
804 break; 925 break;
@@ -881,9 +1002,7 @@ program_interrupt:
881 break; 1002 break;
882 default: 1003 default:
883 { 1004 {
884 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 1005 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
885 ulong shadow_srr1 = svcpu->shadow_srr1;
886 svcpu_put(svcpu);
887 /* Ugh - bork here! What did we get? */ 1006 /* Ugh - bork here! What did we get? */
888 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 1007 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
889 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); 1008 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
@@ -920,8 +1039,8 @@ program_interrupt:
920 return r; 1039 return r;
921} 1040}
922 1041
923int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1042static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
924 struct kvm_sregs *sregs) 1043 struct kvm_sregs *sregs)
925{ 1044{
926 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1045 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
927 int i; 1046 int i;
@@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
947 return 0; 1066 return 0;
948} 1067}
949 1068
950int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1069static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
951 struct kvm_sregs *sregs) 1070 struct kvm_sregs *sregs)
952{ 1071{
953 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1072 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
954 int i; 1073 int i;
955 1074
956 kvmppc_set_pvr(vcpu, sregs->pvr); 1075 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
957 1076
958 vcpu3s->sdr1 = sregs->u.s.sdr1; 1077 vcpu3s->sdr1 = sregs->u.s.sdr1;
959 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1078 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
@@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
983 return 0; 1102 return 0;
984} 1103}
985 1104
986int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 1105static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1106 union kvmppc_one_reg *val)
987{ 1107{
988 int r = 0; 1108 int r = 0;
989 1109
@@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1012 return r; 1132 return r;
1013} 1133}
1014 1134
1015int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 1135static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1136 union kvmppc_one_reg *val)
1016{ 1137{
1017 int r = 0; 1138 int r = 0;
1018 1139
@@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1042 return r; 1163 return r;
1043} 1164}
1044 1165
1045int kvmppc_core_check_processor_compat(void) 1166static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1046{ 1167 unsigned int id)
1047 return 0;
1048}
1049
1050struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1051{ 1168{
1052 struct kvmppc_vcpu_book3s *vcpu_book3s; 1169 struct kvmppc_vcpu_book3s *vcpu_book3s;
1053 struct kvm_vcpu *vcpu; 1170 struct kvm_vcpu *vcpu;
1054 int err = -ENOMEM; 1171 int err = -ENOMEM;
1055 unsigned long p; 1172 unsigned long p;
1056 1173
1057 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); 1174 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1058 if (!vcpu_book3s) 1175 if (!vcpu)
1059 goto out; 1176 goto out;
1060 1177
1061 vcpu_book3s->shadow_vcpu = 1178 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1062 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); 1179 if (!vcpu_book3s)
1063 if (!vcpu_book3s->shadow_vcpu)
1064 goto free_vcpu; 1180 goto free_vcpu;
1181 vcpu->arch.book3s = vcpu_book3s;
1182
1183#ifdef CONFIG_KVM_BOOK3S_32
1184 vcpu->arch.shadow_vcpu =
1185 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1186 if (!vcpu->arch.shadow_vcpu)
1187 goto free_vcpu3s;
1188#endif
1065 1189
1066 vcpu = &vcpu_book3s->vcpu;
1067 err = kvm_vcpu_init(vcpu, kvm, id); 1190 err = kvm_vcpu_init(vcpu, kvm, id);
1068 if (err) 1191 if (err)
1069 goto free_shadow_vcpu; 1192 goto free_shadow_vcpu;
@@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1076 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); 1199 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1077 1200
1078#ifdef CONFIG_PPC_BOOK3S_64 1201#ifdef CONFIG_PPC_BOOK3S_64
1079 /* default to book3s_64 (970fx) */ 1202 /*
1203 * Default to the same as the host if we're on sufficiently
1204 * recent machine that we have 1TB segments;
1205 * otherwise default to PPC970FX.
1206 */
1080 vcpu->arch.pvr = 0x3C0301; 1207 vcpu->arch.pvr = 0x3C0301;
1208 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1209 vcpu->arch.pvr = mfspr(SPRN_PVR);
1081#else 1210#else
1082 /* default to book3s_32 (750) */ 1211 /* default to book3s_32 (750) */
1083 vcpu->arch.pvr = 0x84202; 1212 vcpu->arch.pvr = 0x84202;
1084#endif 1213#endif
1085 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1214 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1086 vcpu->arch.slb_nr = 64; 1215 vcpu->arch.slb_nr = 64;
1087 1216
1088 vcpu->arch.shadow_msr = MSR_USER64; 1217 vcpu->arch.shadow_msr = MSR_USER64;
@@ -1096,24 +1225,31 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1096uninit_vcpu: 1225uninit_vcpu:
1097 kvm_vcpu_uninit(vcpu); 1226 kvm_vcpu_uninit(vcpu);
1098free_shadow_vcpu: 1227free_shadow_vcpu:
1099 kfree(vcpu_book3s->shadow_vcpu); 1228#ifdef CONFIG_KVM_BOOK3S_32
1100free_vcpu: 1229 kfree(vcpu->arch.shadow_vcpu);
1230free_vcpu3s:
1231#endif
1101 vfree(vcpu_book3s); 1232 vfree(vcpu_book3s);
1233free_vcpu:
1234 kmem_cache_free(kvm_vcpu_cache, vcpu);
1102out: 1235out:
1103 return ERR_PTR(err); 1236 return ERR_PTR(err);
1104} 1237}
1105 1238
1106void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 1239static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1107{ 1240{
1108 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1241 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1109 1242
1110 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1243 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1111 kvm_vcpu_uninit(vcpu); 1244 kvm_vcpu_uninit(vcpu);
1112 kfree(vcpu_book3s->shadow_vcpu); 1245#ifdef CONFIG_KVM_BOOK3S_32
1246 kfree(vcpu->arch.shadow_vcpu);
1247#endif
1113 vfree(vcpu_book3s); 1248 vfree(vcpu_book3s);
1249 kmem_cache_free(kvm_vcpu_cache, vcpu);
1114} 1250}
1115 1251
1116int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1252static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1117{ 1253{
1118 int ret; 1254 int ret;
1119 struct thread_fp_state fp; 1255 struct thread_fp_state fp;
@@ -1216,8 +1352,8 @@ out:
1216/* 1352/*
1217 * Get (and clear) the dirty memory log for a memory slot. 1353 * Get (and clear) the dirty memory log for a memory slot.
1218 */ 1354 */
1219int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1355static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1220 struct kvm_dirty_log *log) 1356 struct kvm_dirty_log *log)
1221{ 1357{
1222 struct kvm_memory_slot *memslot; 1358 struct kvm_memory_slot *memslot;
1223 struct kvm_vcpu *vcpu; 1359 struct kvm_vcpu *vcpu;
@@ -1252,67 +1388,100 @@ out:
1252 return r; 1388 return r;
1253} 1389}
1254 1390
1255#ifdef CONFIG_PPC64 1391static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1256int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) 1392 struct kvm_memory_slot *memslot)
1257{ 1393{
1258 info->flags = KVM_PPC_1T_SEGMENTS; 1394 return;
1259 1395}
1260 /* SLB is always 64 entries */
1261 info->slb_size = 64;
1262
1263 /* Standard 4k base page size segment */
1264 info->sps[0].page_shift = 12;
1265 info->sps[0].slb_enc = 0;
1266 info->sps[0].enc[0].page_shift = 12;
1267 info->sps[0].enc[0].pte_enc = 0;
1268
1269 /* Standard 16M large page size segment */
1270 info->sps[1].page_shift = 24;
1271 info->sps[1].slb_enc = SLB_VSID_L;
1272 info->sps[1].enc[0].page_shift = 24;
1273 info->sps[1].enc[0].pte_enc = 0;
1274 1396
1397static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1398 struct kvm_memory_slot *memslot,
1399 struct kvm_userspace_memory_region *mem)
1400{
1275 return 0; 1401 return 0;
1276} 1402}
1277#endif /* CONFIG_PPC64 */
1278 1403
1279void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1404static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1280 struct kvm_memory_slot *dont) 1405 struct kvm_userspace_memory_region *mem,
1406 const struct kvm_memory_slot *old)
1281{ 1407{
1408 return;
1282} 1409}
1283 1410
1284int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1411static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1285 unsigned long npages) 1412 struct kvm_memory_slot *dont)
1286{ 1413{
1287 return 0; 1414 return;
1288} 1415}
1289 1416
1290int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1417static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1291 struct kvm_memory_slot *memslot, 1418 unsigned long npages)
1292 struct kvm_userspace_memory_region *mem)
1293{ 1419{
1294 return 0; 1420 return 0;
1295} 1421}
1296 1422
1297void kvmppc_core_commit_memory_region(struct kvm *kvm, 1423
1298 struct kvm_userspace_memory_region *mem, 1424#ifdef CONFIG_PPC64
1299 const struct kvm_memory_slot *old) 1425static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1426 struct kvm_ppc_smmu_info *info)
1300{ 1427{
1301} 1428 long int i;
1429 struct kvm_vcpu *vcpu;
1430
1431 info->flags = 0;
1432
1433 /* SLB is always 64 entries */
1434 info->slb_size = 64;
1435
1436 /* Standard 4k base page size segment */
1437 info->sps[0].page_shift = 12;
1438 info->sps[0].slb_enc = 0;
1439 info->sps[0].enc[0].page_shift = 12;
1440 info->sps[0].enc[0].pte_enc = 0;
1441
1442 /*
1443 * 64k large page size.
1444 * We only want to put this in if the CPUs we're emulating
1445 * support it, but unfortunately we don't have a vcpu easily
1446 * to hand here to test. Just pick the first vcpu, and if
1447 * that doesn't exist yet, report the minimum capability,
1448 * i.e., no 64k pages.
1449 * 1T segment support goes along with 64k pages.
1450 */
1451 i = 1;
1452 vcpu = kvm_get_vcpu(kvm, 0);
1453 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1454 info->flags = KVM_PPC_1T_SEGMENTS;
1455 info->sps[i].page_shift = 16;
1456 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1457 info->sps[i].enc[0].page_shift = 16;
1458 info->sps[i].enc[0].pte_enc = 1;
1459 ++i;
1460 }
1461
1462 /* Standard 16M large page size segment */
1463 info->sps[i].page_shift = 24;
1464 info->sps[i].slb_enc = SLB_VSID_L;
1465 info->sps[i].enc[0].page_shift = 24;
1466 info->sps[i].enc[0].pte_enc = 0;
1302 1467
1303void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 1468 return 0;
1469}
1470#else
1471static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1472 struct kvm_ppc_smmu_info *info)
1304{ 1473{
1474 /* We should not get called */
1475 BUG();
1305} 1476}
1477#endif /* CONFIG_PPC64 */
1306 1478
1307static unsigned int kvm_global_user_count = 0; 1479static unsigned int kvm_global_user_count = 0;
1308static DEFINE_SPINLOCK(kvm_global_user_count_lock); 1480static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1309 1481
1310int kvmppc_core_init_vm(struct kvm *kvm) 1482static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1311{ 1483{
1312#ifdef CONFIG_PPC64 1484 mutex_init(&kvm->arch.hpt_mutex);
1313 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1314 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
1315#endif
1316 1485
1317 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 1486 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1318 spin_lock(&kvm_global_user_count_lock); 1487 spin_lock(&kvm_global_user_count_lock);
@@ -1323,7 +1492,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1323 return 0; 1492 return 0;
1324} 1493}
1325 1494
1326void kvmppc_core_destroy_vm(struct kvm *kvm) 1495static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
1327{ 1496{
1328#ifdef CONFIG_PPC64 1497#ifdef CONFIG_PPC64
1329 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 1498 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
@@ -1338,26 +1507,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
1338 } 1507 }
1339} 1508}
1340 1509
1341static int kvmppc_book3s_init(void) 1510static int kvmppc_core_check_processor_compat_pr(void)
1342{ 1511{
1343 int r; 1512 /* we are always compatible */
1513 return 0;
1514}
1344 1515
1345 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, 1516static long kvm_arch_vm_ioctl_pr(struct file *filp,
1346 THIS_MODULE); 1517 unsigned int ioctl, unsigned long arg)
1518{
1519 return -ENOTTY;
1520}
1347 1521
1348 if (r) 1522static struct kvmppc_ops kvm_ops_pr = {
1523 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1524 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1525 .get_one_reg = kvmppc_get_one_reg_pr,
1526 .set_one_reg = kvmppc_set_one_reg_pr,
1527 .vcpu_load = kvmppc_core_vcpu_load_pr,
1528 .vcpu_put = kvmppc_core_vcpu_put_pr,
1529 .set_msr = kvmppc_set_msr_pr,
1530 .vcpu_run = kvmppc_vcpu_run_pr,
1531 .vcpu_create = kvmppc_core_vcpu_create_pr,
1532 .vcpu_free = kvmppc_core_vcpu_free_pr,
1533 .check_requests = kvmppc_core_check_requests_pr,
1534 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1535 .flush_memslot = kvmppc_core_flush_memslot_pr,
1536 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1537 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1538 .unmap_hva = kvm_unmap_hva_pr,
1539 .unmap_hva_range = kvm_unmap_hva_range_pr,
1540 .age_hva = kvm_age_hva_pr,
1541 .test_age_hva = kvm_test_age_hva_pr,
1542 .set_spte_hva = kvm_set_spte_hva_pr,
1543 .mmu_destroy = kvmppc_mmu_destroy_pr,
1544 .free_memslot = kvmppc_core_free_memslot_pr,
1545 .create_memslot = kvmppc_core_create_memslot_pr,
1546 .init_vm = kvmppc_core_init_vm_pr,
1547 .destroy_vm = kvmppc_core_destroy_vm_pr,
1548 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1549 .emulate_op = kvmppc_core_emulate_op_pr,
1550 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1551 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1552 .fast_vcpu_kick = kvm_vcpu_kick,
1553 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1554};
1555
1556
1557int kvmppc_book3s_init_pr(void)
1558{
1559 int r;
1560
1561 r = kvmppc_core_check_processor_compat_pr();
1562 if (r < 0)
1349 return r; 1563 return r;
1350 1564
1351 r = kvmppc_mmu_hpte_sysinit(); 1565 kvm_ops_pr.owner = THIS_MODULE;
1566 kvmppc_pr_ops = &kvm_ops_pr;
1352 1567
1568 r = kvmppc_mmu_hpte_sysinit();
1353 return r; 1569 return r;
1354} 1570}
1355 1571
1356static void kvmppc_book3s_exit(void) 1572void kvmppc_book3s_exit_pr(void)
1357{ 1573{
1574 kvmppc_pr_ops = NULL;
1358 kvmppc_mmu_hpte_sysexit(); 1575 kvmppc_mmu_hpte_sysexit();
1359 kvm_exit();
1360} 1576}
1361 1577
1362module_init(kvmppc_book3s_init); 1578/*
1363module_exit(kvmppc_book3s_exit); 1579 * We only support separate modules for book3s 64
1580 */
1581#ifdef CONFIG_PPC_BOOK3S_64
1582
1583module_init(kvmppc_book3s_init_pr);
1584module_exit(kvmppc_book3s_exit_pr);
1585
1586MODULE_LICENSE("GPL");
1587#endif
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index da0e0bc268bd..5efa97b993d8 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -21,6 +21,8 @@
21#include <asm/kvm_ppc.h> 21#include <asm/kvm_ppc.h>
22#include <asm/kvm_book3s.h> 22#include <asm/kvm_book3s.h>
23 23
24#define HPTE_SIZE 16 /* bytes per HPT entry */
25
24static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) 26static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
25{ 27{
26 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 28 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -40,32 +42,41 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
40 long pte_index = kvmppc_get_gpr(vcpu, 5); 42 long pte_index = kvmppc_get_gpr(vcpu, 5);
41 unsigned long pteg[2 * 8]; 43 unsigned long pteg[2 * 8];
42 unsigned long pteg_addr, i, *hpte; 44 unsigned long pteg_addr, i, *hpte;
45 long int ret;
43 46
47 i = pte_index & 7;
44 pte_index &= ~7UL; 48 pte_index &= ~7UL;
45 pteg_addr = get_pteg_addr(vcpu, pte_index); 49 pteg_addr = get_pteg_addr(vcpu, pte_index);
46 50
51 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
47 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); 52 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
48 hpte = pteg; 53 hpte = pteg;
49 54
55 ret = H_PTEG_FULL;
50 if (likely((flags & H_EXACT) == 0)) { 56 if (likely((flags & H_EXACT) == 0)) {
51 pte_index &= ~7UL;
52 for (i = 0; ; ++i) { 57 for (i = 0; ; ++i) {
53 if (i == 8) 58 if (i == 8)
54 return H_PTEG_FULL; 59 goto done;
55 if ((*hpte & HPTE_V_VALID) == 0) 60 if ((*hpte & HPTE_V_VALID) == 0)
56 break; 61 break;
57 hpte += 2; 62 hpte += 2;
58 } 63 }
59 } else { 64 } else {
60 i = kvmppc_get_gpr(vcpu, 5) & 7UL;
61 hpte += i * 2; 65 hpte += i * 2;
66 if (*hpte & HPTE_V_VALID)
67 goto done;
62 } 68 }
63 69
64 hpte[0] = kvmppc_get_gpr(vcpu, 6); 70 hpte[0] = kvmppc_get_gpr(vcpu, 6);
65 hpte[1] = kvmppc_get_gpr(vcpu, 7); 71 hpte[1] = kvmppc_get_gpr(vcpu, 7);
66 copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg)); 72 pteg_addr += i * HPTE_SIZE;
67 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); 73 copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
68 kvmppc_set_gpr(vcpu, 4, pte_index | i); 74 kvmppc_set_gpr(vcpu, 4, pte_index | i);
75 ret = H_SUCCESS;
76
77 done:
78 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
79 kvmppc_set_gpr(vcpu, 3, ret);
69 80
70 return EMULATE_DONE; 81 return EMULATE_DONE;
71} 82}
@@ -77,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
77 unsigned long avpn = kvmppc_get_gpr(vcpu, 6); 88 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
78 unsigned long v = 0, pteg, rb; 89 unsigned long v = 0, pteg, rb;
79 unsigned long pte[2]; 90 unsigned long pte[2];
91 long int ret;
80 92
81 pteg = get_pteg_addr(vcpu, pte_index); 93 pteg = get_pteg_addr(vcpu, pte_index);
94 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
82 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 95 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
83 96
97 ret = H_NOT_FOUND;
84 if ((pte[0] & HPTE_V_VALID) == 0 || 98 if ((pte[0] & HPTE_V_VALID) == 0 ||
85 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || 99 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
86 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) { 100 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
87 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); 101 goto done;
88 return EMULATE_DONE;
89 }
90 102
91 copy_to_user((void __user *)pteg, &v, sizeof(v)); 103 copy_to_user((void __user *)pteg, &v, sizeof(v));
92 104
93 rb = compute_tlbie_rb(pte[0], pte[1], pte_index); 105 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
94 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 106 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
95 107
96 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); 108 ret = H_SUCCESS;
97 kvmppc_set_gpr(vcpu, 4, pte[0]); 109 kvmppc_set_gpr(vcpu, 4, pte[0]);
98 kvmppc_set_gpr(vcpu, 5, pte[1]); 110 kvmppc_set_gpr(vcpu, 5, pte[1]);
99 111
112 done:
113 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
114 kvmppc_set_gpr(vcpu, 3, ret);
115
100 return EMULATE_DONE; 116 return EMULATE_DONE;
101} 117}
102 118
@@ -124,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
124 int paramnr = 4; 140 int paramnr = 4;
125 int ret = H_SUCCESS; 141 int ret = H_SUCCESS;
126 142
143 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
127 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { 144 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
128 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); 145 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
129 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); 146 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
@@ -172,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
172 } 189 }
173 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); 190 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
174 } 191 }
192 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
175 kvmppc_set_gpr(vcpu, 3, ret); 193 kvmppc_set_gpr(vcpu, 3, ret);
176 194
177 return EMULATE_DONE; 195 return EMULATE_DONE;
@@ -184,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
184 unsigned long avpn = kvmppc_get_gpr(vcpu, 6); 202 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
185 unsigned long rb, pteg, r, v; 203 unsigned long rb, pteg, r, v;
186 unsigned long pte[2]; 204 unsigned long pte[2];
205 long int ret;
187 206
188 pteg = get_pteg_addr(vcpu, pte_index); 207 pteg = get_pteg_addr(vcpu, pte_index);
208 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
189 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 209 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
190 210
211 ret = H_NOT_FOUND;
191 if ((pte[0] & HPTE_V_VALID) == 0 || 212 if ((pte[0] & HPTE_V_VALID) == 0 ||
192 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) { 213 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
193 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); 214 goto done;
194 return EMULATE_DONE;
195 }
196 215
197 v = pte[0]; 216 v = pte[0];
198 r = pte[1]; 217 r = pte[1];
@@ -207,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
207 rb = compute_tlbie_rb(v, r, pte_index); 226 rb = compute_tlbie_rb(v, r, pte_index);
208 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 227 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
209 copy_to_user((void __user *)pteg, pte, sizeof(pte)); 228 copy_to_user((void __user *)pteg, pte, sizeof(pte));
229 ret = H_SUCCESS;
210 230
211 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); 231 done:
232 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
233 kvmppc_set_gpr(vcpu, 3, ret);
212 234
213 return EMULATE_DONE; 235 return EMULATE_DONE;
214} 236}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 8f7633e3afb8..a38c4c9edab8 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -38,32 +38,6 @@
38 38
39#define FUNC(name) GLUE(.,name) 39#define FUNC(name) GLUE(.,name)
40 40
41 .globl kvmppc_skip_interrupt
42kvmppc_skip_interrupt:
43 /*
44 * Here all GPRs are unchanged from when the interrupt happened
45 * except for r13, which is saved in SPRG_SCRATCH0.
46 */
47 mfspr r13, SPRN_SRR0
48 addi r13, r13, 4
49 mtspr SPRN_SRR0, r13
50 GET_SCRATCH0(r13)
51 rfid
52 b .
53
54 .globl kvmppc_skip_Hinterrupt
55kvmppc_skip_Hinterrupt:
56 /*
57 * Here all GPRs are unchanged from when the interrupt happened
58 * except for r13, which is saved in SPRG_SCRATCH0.
59 */
60 mfspr r13, SPRN_HSRR0
61 addi r13, r13, 4
62 mtspr SPRN_HSRR0, r13
63 GET_SCRATCH0(r13)
64 hrfid
65 b .
66
67#elif defined(CONFIG_PPC_BOOK3S_32) 41#elif defined(CONFIG_PPC_BOOK3S_32)
68 42
69#define FUNC(name) name 43#define FUNC(name) name
@@ -179,11 +153,15 @@ _GLOBAL(kvmppc_entry_trampoline)
179 153
180 li r6, MSR_IR | MSR_DR 154 li r6, MSR_IR | MSR_DR
181 andc r6, r5, r6 /* Clear DR and IR in MSR value */ 155 andc r6, r5, r6 /* Clear DR and IR in MSR value */
156#ifdef CONFIG_PPC_BOOK3S_32
182 /* 157 /*
183 * Set EE in HOST_MSR so that it's enabled when we get into our 158 * Set EE in HOST_MSR so that it's enabled when we get into our
184 * C exit handler function 159 * C exit handler function. On 64-bit we delay enabling
160 * interrupts until we have finished transferring stuff
161 * to or from the PACA.
185 */ 162 */
186 ori r5, r5, MSR_EE 163 ori r5, r5, MSR_EE
164#endif
187 mtsrr0 r7 165 mtsrr0 r7
188 mtsrr1 r6 166 mtsrr1 r6
189 RFI 167 RFI
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 3219ba895246..cf95cdef73c9 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -260,6 +260,7 @@ fail:
260 */ 260 */
261 return rc; 261 return rc;
262} 262}
263EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
263 264
264void kvmppc_rtas_tokens_free(struct kvm *kvm) 265void kvmppc_rtas_tokens_free(struct kvm *kvm)
265{ 266{
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1abe4788191a..bc50c97751d3 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end:
161.global kvmppc_handler_trampoline_exit 161.global kvmppc_handler_trampoline_exit
162kvmppc_handler_trampoline_exit: 162kvmppc_handler_trampoline_exit:
163 163
164.global kvmppc_interrupt 164.global kvmppc_interrupt_pr
165kvmppc_interrupt: 165kvmppc_interrupt_pr:
166 166
167 /* Register usage at this point: 167 /* Register usage at this point:
168 * 168 *
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index a3a5cb8ee7ea..02a17dcf1610 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
818 } 818 }
819 819
820 /* Check for real mode returning too hard */ 820 /* Check for real mode returning too hard */
821 if (xics->real_mode) 821 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
822 return kvmppc_xics_rm_complete(vcpu, req); 822 return kvmppc_xics_rm_complete(vcpu, req);
823 823
824 switch (req) { 824 switch (req) {
@@ -840,6 +840,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
840 840
841 return rc; 841 return rc;
842} 842}
843EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
843 844
844 845
845/* -- Initialisation code etc. -- */ 846/* -- Initialisation code etc. -- */
@@ -1250,13 +1251,13 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1250 1251
1251 xics_debugfs_init(xics); 1252 xics_debugfs_init(xics);
1252 1253
1253#ifdef CONFIG_KVM_BOOK3S_64_HV 1254#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1254 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 1255 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1255 /* Enable real mode support */ 1256 /* Enable real mode support */
1256 xics->real_mode = ENABLE_REALMODE; 1257 xics->real_mode = ENABLE_REALMODE;
1257 xics->real_mode_dbg = DEBUG_REALMODE; 1258 xics->real_mode_dbg = DEBUG_REALMODE;
1258 } 1259 }
1259#endif /* CONFIG_KVM_BOOK3S_64_HV */ 1260#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1260 1261
1261 return 0; 1262 return 0;
1262} 1263}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 5133199f6cb7..53e65a210b9a 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -40,7 +40,9 @@
40 40
41#include "timing.h" 41#include "timing.h"
42#include "booke.h" 42#include "booke.h"
43#include "trace.h" 43
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
44 46
45unsigned long kvmppc_booke_handlers; 47unsigned long kvmppc_booke_handlers;
46 48
@@ -133,6 +135,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
133#endif 135#endif
134} 136}
135 137
138static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
139{
140 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
141#ifndef CONFIG_KVM_BOOKE_HV
142 vcpu->arch.shadow_msr &= ~MSR_DE;
143 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
144#endif
145
146 /* Force enable debug interrupts when user space wants to debug */
147 if (vcpu->guest_debug) {
148#ifdef CONFIG_KVM_BOOKE_HV
149 /*
150 * Since there is no shadow MSR, sync MSR_DE into the guest
151 * visible MSR.
152 */
153 vcpu->arch.shared->msr |= MSR_DE;
154#else
155 vcpu->arch.shadow_msr |= MSR_DE;
156 vcpu->arch.shared->msr &= ~MSR_DE;
157#endif
158 }
159}
160
136/* 161/*
137 * Helper function for "full" MSR writes. No need to call this if only 162 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing. 163 * EE/CE/ME/DE/RI are changing.
@@ -150,6 +175,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
150 kvmppc_mmu_msr_notify(vcpu, old_msr); 175 kvmppc_mmu_msr_notify(vcpu, old_msr);
151 kvmppc_vcpu_sync_spe(vcpu); 176 kvmppc_vcpu_sync_spe(vcpu);
152 kvmppc_vcpu_sync_fpu(vcpu); 177 kvmppc_vcpu_sync_fpu(vcpu);
178 kvmppc_vcpu_sync_debug(vcpu);
153} 179}
154 180
155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 181static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
@@ -655,6 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
655int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
656{ 682{
657 int ret, s; 683 int ret, s;
684 struct thread_struct thread;
658#ifdef CONFIG_PPC_FPU 685#ifdef CONFIG_PPC_FPU
659 struct thread_fp_state fp; 686 struct thread_fp_state fp;
660 int fpexc_mode; 687 int fpexc_mode;
@@ -695,6 +722,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
695 kvmppc_load_guest_fp(vcpu); 722 kvmppc_load_guest_fp(vcpu);
696#endif 723#endif
697 724
725 /* Switch to guest debug context */
726 thread.debug = vcpu->arch.shadow_dbg_reg;
727 switch_booke_debug_regs(&thread);
728 thread.debug = current->thread.debug;
729 current->thread.debug = vcpu->arch.shadow_dbg_reg;
730
698 kvmppc_fix_ee_before_entry(); 731 kvmppc_fix_ee_before_entry();
699 732
700 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 733 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -702,6 +735,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
702 /* No need for kvm_guest_exit. It's done in handle_exit. 735 /* No need for kvm_guest_exit. It's done in handle_exit.
703 We also get here with interrupts enabled. */ 736 We also get here with interrupts enabled. */
704 737
738 /* Switch back to user space debug context */
739 switch_booke_debug_regs(&thread);
740 current->thread.debug = thread.debug;
741
705#ifdef CONFIG_PPC_FPU 742#ifdef CONFIG_PPC_FPU
706 kvmppc_save_guest_fp(vcpu); 743 kvmppc_save_guest_fp(vcpu);
707 744
@@ -757,6 +794,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
757 } 794 }
758} 795}
759 796
797static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
798{
799 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
800 u32 dbsr = vcpu->arch.dbsr;
801
802 run->debug.arch.status = 0;
803 run->debug.arch.address = vcpu->arch.pc;
804
805 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
806 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
807 } else {
808 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
809 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
810 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
811 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
812 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
813 run->debug.arch.address = dbg_reg->dac1;
814 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
815 run->debug.arch.address = dbg_reg->dac2;
816 }
817
818 return RESUME_HOST;
819}
820
760static void kvmppc_fill_pt_regs(struct pt_regs *regs) 821static void kvmppc_fill_pt_regs(struct pt_regs *regs)
761{ 822{
762 ulong r1, ip, msr, lr; 823 ulong r1, ip, msr, lr;
@@ -817,6 +878,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
817 case BOOKE_INTERRUPT_CRITICAL: 878 case BOOKE_INTERRUPT_CRITICAL:
818 unknown_exception(&regs); 879 unknown_exception(&regs);
819 break; 880 break;
881 case BOOKE_INTERRUPT_DEBUG:
882 /* Save DBSR before preemption is enabled */
883 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
884 kvmppc_clear_dbsr();
885 break;
820 } 886 }
821} 887}
822 888
@@ -1134,18 +1200,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1134 } 1200 }
1135 1201
1136 case BOOKE_INTERRUPT_DEBUG: { 1202 case BOOKE_INTERRUPT_DEBUG: {
1137 u32 dbsr; 1203 r = kvmppc_handle_debug(run, vcpu);
1138 1204 if (r == RESUME_HOST)
1139 vcpu->arch.pc = mfspr(SPRN_CSRR0); 1205 run->exit_reason = KVM_EXIT_DEBUG;
1140
1141 /* clear IAC events in DBSR register */
1142 dbsr = mfspr(SPRN_DBSR);
1143 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1144 mtspr(SPRN_DBSR, dbsr);
1145
1146 run->exit_reason = KVM_EXIT_DEBUG;
1147 kvmppc_account_exit(vcpu, DEBUG_EXITS); 1206 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1148 r = RESUME_HOST;
1149 break; 1207 break;
1150 } 1208 }
1151 1209
@@ -1196,7 +1254,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1196 kvmppc_set_msr(vcpu, 0); 1254 kvmppc_set_msr(vcpu, 0);
1197 1255
1198#ifndef CONFIG_KVM_BOOKE_HV 1256#ifndef CONFIG_KVM_BOOKE_HV
1199 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 1257 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
1200 vcpu->arch.shadow_pid = 1; 1258 vcpu->arch.shadow_pid = 1;
1201 vcpu->arch.shared->msr = 0; 1259 vcpu->arch.shared->msr = 0;
1202#endif 1260#endif
@@ -1358,7 +1416,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1358 return 0; 1416 return 0;
1359} 1417}
1360 1418
1361void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1419int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1362{ 1420{
1363 sregs->u.e.features |= KVM_SREGS_E_IVOR; 1421 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1364 1422
@@ -1378,6 +1436,7 @@ void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1378 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; 1436 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1379 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; 1437 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1380 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 1438 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1439 return 0;
1381} 1440}
1382 1441
1383int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1442int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
@@ -1412,8 +1471,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1412 1471
1413 get_sregs_base(vcpu, sregs); 1472 get_sregs_base(vcpu, sregs);
1414 get_sregs_arch206(vcpu, sregs); 1473 get_sregs_arch206(vcpu, sregs);
1415 kvmppc_core_get_sregs(vcpu, sregs); 1474 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1416 return 0;
1417} 1475}
1418 1476
1419int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1477int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -1432,7 +1490,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1432 if (ret < 0) 1490 if (ret < 0)
1433 return ret; 1491 return ret;
1434 1492
1435 return kvmppc_core_set_sregs(vcpu, sregs); 1493 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1436} 1494}
1437 1495
1438int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1496int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
@@ -1440,7 +1498,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1440 int r = 0; 1498 int r = 0;
1441 union kvmppc_one_reg val; 1499 union kvmppc_one_reg val;
1442 int size; 1500 int size;
1443 long int i;
1444 1501
1445 size = one_reg_size(reg->id); 1502 size = one_reg_size(reg->id);
1446 if (size > sizeof(val)) 1503 if (size > sizeof(val))
@@ -1448,16 +1505,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1448 1505
1449 switch (reg->id) { 1506 switch (reg->id) {
1450 case KVM_REG_PPC_IAC1: 1507 case KVM_REG_PPC_IAC1:
1508 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
1509 break;
1451 case KVM_REG_PPC_IAC2: 1510 case KVM_REG_PPC_IAC2:
1511 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1512 break;
1513#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1452 case KVM_REG_PPC_IAC3: 1514 case KVM_REG_PPC_IAC3:
1515 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1516 break;
1453 case KVM_REG_PPC_IAC4: 1517 case KVM_REG_PPC_IAC4:
1454 i = reg->id - KVM_REG_PPC_IAC1; 1518 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
1455 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
1456 break; 1519 break;
1520#endif
1457 case KVM_REG_PPC_DAC1: 1521 case KVM_REG_PPC_DAC1:
1522 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1523 break;
1458 case KVM_REG_PPC_DAC2: 1524 case KVM_REG_PPC_DAC2:
1459 i = reg->id - KVM_REG_PPC_DAC1; 1525 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
1460 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
1461 break; 1526 break;
1462 case KVM_REG_PPC_EPR: { 1527 case KVM_REG_PPC_EPR: {
1463 u32 epr = get_guest_epr(vcpu); 1528 u32 epr = get_guest_epr(vcpu);
@@ -1476,10 +1541,13 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1476 val = get_reg_val(reg->id, vcpu->arch.tsr); 1541 val = get_reg_val(reg->id, vcpu->arch.tsr);
1477 break; 1542 break;
1478 case KVM_REG_PPC_DEBUG_INST: 1543 case KVM_REG_PPC_DEBUG_INST:
1479 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV); 1544 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
1545 break;
1546 case KVM_REG_PPC_VRSAVE:
1547 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1480 break; 1548 break;
1481 default: 1549 default:
1482 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1550 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
1483 break; 1551 break;
1484 } 1552 }
1485 1553
@@ -1497,7 +1565,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1497 int r = 0; 1565 int r = 0;
1498 union kvmppc_one_reg val; 1566 union kvmppc_one_reg val;
1499 int size; 1567 int size;
1500 long int i;
1501 1568
1502 size = one_reg_size(reg->id); 1569 size = one_reg_size(reg->id);
1503 if (size > sizeof(val)) 1570 if (size > sizeof(val))
@@ -1508,16 +1575,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1508 1575
1509 switch (reg->id) { 1576 switch (reg->id) {
1510 case KVM_REG_PPC_IAC1: 1577 case KVM_REG_PPC_IAC1:
1578 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
1579 break;
1511 case KVM_REG_PPC_IAC2: 1580 case KVM_REG_PPC_IAC2:
1581 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1582 break;
1583#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1512 case KVM_REG_PPC_IAC3: 1584 case KVM_REG_PPC_IAC3:
1585 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1586 break;
1513 case KVM_REG_PPC_IAC4: 1587 case KVM_REG_PPC_IAC4:
1514 i = reg->id - KVM_REG_PPC_IAC1; 1588 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
1515 vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
1516 break; 1589 break;
1590#endif
1517 case KVM_REG_PPC_DAC1: 1591 case KVM_REG_PPC_DAC1:
1592 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1593 break;
1518 case KVM_REG_PPC_DAC2: 1594 case KVM_REG_PPC_DAC2:
1519 i = reg->id - KVM_REG_PPC_DAC1; 1595 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
1520 vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
1521 break; 1596 break;
1522 case KVM_REG_PPC_EPR: { 1597 case KVM_REG_PPC_EPR: {
1523 u32 new_epr = set_reg_val(reg->id, val); 1598 u32 new_epr = set_reg_val(reg->id, val);
@@ -1551,20 +1626,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1551 kvmppc_set_tcr(vcpu, tcr); 1626 kvmppc_set_tcr(vcpu, tcr);
1552 break; 1627 break;
1553 } 1628 }
1629 case KVM_REG_PPC_VRSAVE:
1630 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1631 break;
1554 default: 1632 default:
1555 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1633 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
1556 break; 1634 break;
1557 } 1635 }
1558 1636
1559 return r; 1637 return r;
1560} 1638}
1561 1639
1562int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1563 struct kvm_guest_debug *dbg)
1564{
1565 return -EINVAL;
1566}
1567
1568int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1640int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1569{ 1641{
1570 return -ENOTSUPP; 1642 return -ENOTSUPP;
@@ -1589,12 +1661,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1589 return -ENOTSUPP; 1661 return -ENOTSUPP;
1590} 1662}
1591 1663
1592void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1664void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1593 struct kvm_memory_slot *dont) 1665 struct kvm_memory_slot *dont)
1594{ 1666{
1595} 1667}
1596 1668
1597int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1669int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1598 unsigned long npages) 1670 unsigned long npages)
1599{ 1671{
1600 return 0; 1672 return 0;
@@ -1670,6 +1742,157 @@ void kvmppc_decrementer_func(unsigned long data)
1670 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1742 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1671} 1743}
1672 1744
1745static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1746 uint64_t addr, int index)
1747{
1748 switch (index) {
1749 case 0:
1750 dbg_reg->dbcr0 |= DBCR0_IAC1;
1751 dbg_reg->iac1 = addr;
1752 break;
1753 case 1:
1754 dbg_reg->dbcr0 |= DBCR0_IAC2;
1755 dbg_reg->iac2 = addr;
1756 break;
1757#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1758 case 2:
1759 dbg_reg->dbcr0 |= DBCR0_IAC3;
1760 dbg_reg->iac3 = addr;
1761 break;
1762 case 3:
1763 dbg_reg->dbcr0 |= DBCR0_IAC4;
1764 dbg_reg->iac4 = addr;
1765 break;
1766#endif
1767 default:
1768 return -EINVAL;
1769 }
1770
1771 dbg_reg->dbcr0 |= DBCR0_IDM;
1772 return 0;
1773}
1774
1775static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1776 int type, int index)
1777{
1778 switch (index) {
1779 case 0:
1780 if (type & KVMPPC_DEBUG_WATCH_READ)
1781 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1782 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1783 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1784 dbg_reg->dac1 = addr;
1785 break;
1786 case 1:
1787 if (type & KVMPPC_DEBUG_WATCH_READ)
1788 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1789 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1790 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1791 dbg_reg->dac2 = addr;
1792 break;
1793 default:
1794 return -EINVAL;
1795 }
1796
1797 dbg_reg->dbcr0 |= DBCR0_IDM;
1798 return 0;
1799}
1800void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1801{
1802 /* XXX: Add similar MSR protection for BookE-PR */
1803#ifdef CONFIG_KVM_BOOKE_HV
1804 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1805 if (set) {
1806 if (prot_bitmap & MSR_UCLE)
1807 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1808 if (prot_bitmap & MSR_DE)
1809 vcpu->arch.shadow_msrp |= MSRP_DEP;
1810 if (prot_bitmap & MSR_PMM)
1811 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1812 } else {
1813 if (prot_bitmap & MSR_UCLE)
1814 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1815 if (prot_bitmap & MSR_DE)
1816 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1817 if (prot_bitmap & MSR_PMM)
1818 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1819 }
1820#endif
1821}
1822
1823int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1824 struct kvm_guest_debug *dbg)
1825{
1826 struct debug_reg *dbg_reg;
1827 int n, b = 0, w = 0;
1828
1829 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1830 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1831 vcpu->guest_debug = 0;
1832 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1833 return 0;
1834 }
1835
1836 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1837 vcpu->guest_debug = dbg->control;
1838 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1839 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1840 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1841
1842 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1843 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1844
1845 /* Code below handles only HW breakpoints */
1846 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1847
1848#ifdef CONFIG_KVM_BOOKE_HV
1849 /*
1850 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1851 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1852 */
1853 dbg_reg->dbcr1 = 0;
1854 dbg_reg->dbcr2 = 0;
1855#else
1856 /*
1857 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1858 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1859 * is set.
1860 */
1861 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1862 DBCR1_IAC4US;
1863 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1864#endif
1865
1866 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1867 return 0;
1868
1869 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1870 uint64_t addr = dbg->arch.bp[n].addr;
1871 uint32_t type = dbg->arch.bp[n].type;
1872
1873 if (type == KVMPPC_DEBUG_NONE)
1874 continue;
1875
1876 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1877 KVMPPC_DEBUG_WATCH_WRITE |
1878 KVMPPC_DEBUG_BREAKPOINT))
1879 return -EINVAL;
1880
1881 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1882 /* Setting H/W breakpoint */
1883 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1884 return -EINVAL;
1885 } else {
1886 /* Setting H/W watchpoint */
1887 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1888 type, w++))
1889 return -EINVAL;
1890 }
1891 }
1892
1893 return 0;
1894}
1895
1673void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1896void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1674{ 1897{
1675 vcpu->cpu = smp_processor_id(); 1898 vcpu->cpu = smp_processor_id();
@@ -1680,6 +1903,44 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1680{ 1903{
1681 current->thread.kvm_vcpu = NULL; 1904 current->thread.kvm_vcpu = NULL;
1682 vcpu->cpu = -1; 1905 vcpu->cpu = -1;
1906
1907 /* Clear pending debug event in DBSR */
1908 kvmppc_clear_dbsr();
1909}
1910
1911void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1912{
1913 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
1914}
1915
1916int kvmppc_core_init_vm(struct kvm *kvm)
1917{
1918 return kvm->arch.kvm_ops->init_vm(kvm);
1919}
1920
1921struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1922{
1923 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
1924}
1925
1926void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1927{
1928 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
1929}
1930
1931void kvmppc_core_destroy_vm(struct kvm *kvm)
1932{
1933 kvm->arch.kvm_ops->destroy_vm(kvm);
1934}
1935
1936void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1937{
1938 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
1939}
1940
1941void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1942{
1943 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
1683} 1944}
1684 1945
1685int __init kvmppc_booke_init(void) 1946int __init kvmppc_booke_init(void)
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 5fd1ba693579..09bfd9bc7cf8 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -99,6 +99,30 @@ enum int_class {
99 99
100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
101 101
102extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
103extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
104 unsigned int inst, int *advance);
105extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
106 ulong spr_val);
107extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
108 ulong *spr_val);
109extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
111 struct kvm_vcpu *vcpu,
112 unsigned int inst, int *advance);
113extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
114 ulong spr_val);
115extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
116 ulong *spr_val);
117extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
118extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
119 struct kvm_vcpu *vcpu,
120 unsigned int inst, int *advance);
121extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
122 ulong spr_val);
123extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
124 ulong *spr_val);
125
102/* 126/*
103 * Load up guest vcpu FP state if it's needed. 127 * Load up guest vcpu FP state if it's needed.
104 * It also set the MSR_FP in thread so that host know 128 * It also set the MSR_FP in thread so that host know
@@ -129,4 +153,9 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
129 giveup_fpu(current); 153 giveup_fpu(current);
130#endif 154#endif
131} 155}
156
157static inline void kvmppc_clear_dbsr(void)
158{
159 mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
160}
132#endif /* __KVM_BOOKE_H__ */ 161#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ce6b73c29612..497b142f651c 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -305,7 +305,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
305{ 305{
306} 306}
307 307
308void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 308static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
309{ 309{
310 kvmppc_booke_vcpu_load(vcpu, cpu); 310 kvmppc_booke_vcpu_load(vcpu, cpu);
311 311
@@ -313,7 +313,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
313 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); 313 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
314} 314}
315 315
316void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 316static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
317{ 317{
318#ifdef CONFIG_SPE 318#ifdef CONFIG_SPE
319 if (vcpu->arch.shadow_msr & MSR_SPE) 319 if (vcpu->arch.shadow_msr & MSR_SPE)
@@ -367,7 +367,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
367 return 0; 367 return 0;
368} 368}
369 369
370void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 370static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
371 struct kvm_sregs *sregs)
371{ 372{
372 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 373 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
373 374
@@ -388,9 +389,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
388 389
389 kvmppc_get_sregs_ivor(vcpu, sregs); 390 kvmppc_get_sregs_ivor(vcpu, sregs);
390 kvmppc_get_sregs_e500_tlb(vcpu, sregs); 391 kvmppc_get_sregs_e500_tlb(vcpu, sregs);
392 return 0;
391} 393}
392 394
393int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 395static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
396 struct kvm_sregs *sregs)
394{ 397{
395 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 398 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
396 int ret; 399 int ret;
@@ -425,21 +428,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
425 return kvmppc_set_sregs_ivor(vcpu, sregs); 428 return kvmppc_set_sregs_ivor(vcpu, sregs);
426} 429}
427 430
428int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 431static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
429 union kvmppc_one_reg *val) 432 union kvmppc_one_reg *val)
430{ 433{
431 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 434 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
432 return r; 435 return r;
433} 436}
434 437
435int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 438static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
436 union kvmppc_one_reg *val) 439 union kvmppc_one_reg *val)
437{ 440{
438 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 441 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
439 return r; 442 return r;
440} 443}
441 444
442struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 445static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
446 unsigned int id)
443{ 447{
444 struct kvmppc_vcpu_e500 *vcpu_e500; 448 struct kvmppc_vcpu_e500 *vcpu_e500;
445 struct kvm_vcpu *vcpu; 449 struct kvm_vcpu *vcpu;
@@ -481,7 +485,7 @@ out:
481 return ERR_PTR(err); 485 return ERR_PTR(err);
482} 486}
483 487
484void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 488static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
485{ 489{
486 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 490 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
487 491
@@ -492,15 +496,32 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
492 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 496 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
493} 497}
494 498
495int kvmppc_core_init_vm(struct kvm *kvm) 499static int kvmppc_core_init_vm_e500(struct kvm *kvm)
496{ 500{
497 return 0; 501 return 0;
498} 502}
499 503
500void kvmppc_core_destroy_vm(struct kvm *kvm) 504static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
501{ 505{
502} 506}
503 507
508static struct kvmppc_ops kvm_ops_e500 = {
509 .get_sregs = kvmppc_core_get_sregs_e500,
510 .set_sregs = kvmppc_core_set_sregs_e500,
511 .get_one_reg = kvmppc_get_one_reg_e500,
512 .set_one_reg = kvmppc_set_one_reg_e500,
513 .vcpu_load = kvmppc_core_vcpu_load_e500,
514 .vcpu_put = kvmppc_core_vcpu_put_e500,
515 .vcpu_create = kvmppc_core_vcpu_create_e500,
516 .vcpu_free = kvmppc_core_vcpu_free_e500,
517 .mmu_destroy = kvmppc_mmu_destroy_e500,
518 .init_vm = kvmppc_core_init_vm_e500,
519 .destroy_vm = kvmppc_core_destroy_vm_e500,
520 .emulate_op = kvmppc_core_emulate_op_e500,
521 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
522 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
523};
524
504static int __init kvmppc_e500_init(void) 525static int __init kvmppc_e500_init(void)
505{ 526{
506 int r, i; 527 int r, i;
@@ -512,11 +533,11 @@ static int __init kvmppc_e500_init(void)
512 533
513 r = kvmppc_core_check_processor_compat(); 534 r = kvmppc_core_check_processor_compat();
514 if (r) 535 if (r)
515 return r; 536 goto err_out;
516 537
517 r = kvmppc_booke_init(); 538 r = kvmppc_booke_init();
518 if (r) 539 if (r)
519 return r; 540 goto err_out;
520 541
521 /* copy extra E500 exception handlers */ 542 /* copy extra E500 exception handlers */
522 ivor[0] = mfspr(SPRN_IVOR32); 543 ivor[0] = mfspr(SPRN_IVOR32);
@@ -534,11 +555,19 @@ static int __init kvmppc_e500_init(void)
534 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + 555 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
535 ivor[max_ivor] + handler_len); 556 ivor[max_ivor] + handler_len);
536 557
537 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 558 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
559 if (r)
560 goto err_out;
561 kvm_ops_e500.owner = THIS_MODULE;
562 kvmppc_pr_ops = &kvm_ops_e500;
563
564err_out:
565 return r;
538} 566}
539 567
540static void __exit kvmppc_e500_exit(void) 568static void __exit kvmppc_e500_exit(void)
541{ 569{
570 kvmppc_pr_ops = NULL;
542 kvmppc_booke_exit(); 571 kvmppc_booke_exit();
543} 572}
544 573
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c2e5e98453a6..4fd9650eb018 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
117#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) 117#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
118#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) 118#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
119#define MAS2_ATTRIB_MASK \ 119#define MAS2_ATTRIB_MASK \
120 (MAS2_X0 | MAS2_X1) 120 (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
121#define MAS3_ATTRIB_MASK \ 121#define MAS3_ATTRIB_MASK \
122 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ 122 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
123 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) 123 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index b10a01243abd..89b7f821f6c4 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -26,6 +26,7 @@
26#define XOP_TLBRE 946 26#define XOP_TLBRE 946
27#define XOP_TLBWE 978 27#define XOP_TLBWE 978
28#define XOP_TLBILX 18 28#define XOP_TLBILX 18
29#define XOP_EHPRIV 270
29 30
30#ifdef CONFIG_KVM_E500MC 31#ifdef CONFIG_KVM_E500MC
31static int dbell2prio(ulong param) 32static int dbell2prio(ulong param)
@@ -82,8 +83,28 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
82} 83}
83#endif 84#endif
84 85
85int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 86static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
86 unsigned int inst, int *advance) 87 unsigned int inst, int *advance)
88{
89 int emulated = EMULATE_DONE;
90
91 switch (get_oc(inst)) {
92 case EHPRIV_OC_DEBUG:
93 run->exit_reason = KVM_EXIT_DEBUG;
94 run->debug.arch.address = vcpu->arch.pc;
95 run->debug.arch.status = 0;
96 kvmppc_account_exit(vcpu, DEBUG_EXITS);
97 emulated = EMULATE_EXIT_USER;
98 *advance = 0;
99 break;
100 default:
101 emulated = EMULATE_FAIL;
102 }
103 return emulated;
104}
105
106int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
107 unsigned int inst, int *advance)
87{ 108{
88 int emulated = EMULATE_DONE; 109 int emulated = EMULATE_DONE;
89 int ra = get_ra(inst); 110 int ra = get_ra(inst);
@@ -130,6 +151,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
130 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); 151 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
131 break; 152 break;
132 153
154 case XOP_EHPRIV:
155 emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
156 advance);
157 break;
158
133 default: 159 default:
134 emulated = EMULATE_FAIL; 160 emulated = EMULATE_FAIL;
135 } 161 }
@@ -146,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
146 return emulated; 172 return emulated;
147} 173}
148 174
149int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 175int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
150{ 176{
151 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 177 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
152 int emulated = EMULATE_DONE; 178 int emulated = EMULATE_DONE;
@@ -237,7 +263,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
237 return emulated; 263 return emulated;
238} 264}
239 265
240int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 266int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
241{ 267{
242 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 268 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
243 int emulated = EMULATE_DONE; 269 int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 6d6f153b6c1d..ebca6b88ea5e 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -32,7 +32,7 @@
32#include <asm/kvm_ppc.h> 32#include <asm/kvm_ppc.h>
33 33
34#include "e500.h" 34#include "e500.h"
35#include "trace.h" 35#include "trace_booke.h"
36#include "timing.h" 36#include "timing.h"
37#include "e500_mmu_host.h" 37#include "e500_mmu_host.h"
38 38
@@ -536,7 +536,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
536 return get_tlb_raddr(gtlbe) | (eaddr & pgmask); 536 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
537} 537}
538 538
539void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 539void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
540{ 540{
541} 541}
542 542
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..ecf2247b13be 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -32,10 +32,11 @@
32#include <asm/kvm_ppc.h> 32#include <asm/kvm_ppc.h>
33 33
34#include "e500.h" 34#include "e500.h"
35#include "trace.h"
36#include "timing.h" 35#include "timing.h"
37#include "e500_mmu_host.h" 36#include "e500_mmu_host.h"
38 37
38#include "trace_booke.h"
39
39#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 40#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
40 41
41static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; 42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
@@ -253,6 +254,9 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
253 ref->pfn = pfn; 254 ref->pfn = pfn;
254 ref->flags |= E500_TLB_VALID; 255 ref->flags |= E500_TLB_VALID;
255 256
257 /* Mark the page accessed */
258 kvm_set_pfn_accessed(pfn);
259
256 if (tlbe_is_writable(gtlbe)) 260 if (tlbe_is_writable(gtlbe))
257 kvm_set_pfn_dirty(pfn); 261 kvm_set_pfn_dirty(pfn);
258} 262}
@@ -332,6 +336,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
332 unsigned long hva; 336 unsigned long hva;
333 int pfnmap = 0; 337 int pfnmap = 0;
334 int tsize = BOOK3E_PAGESZ_4K; 338 int tsize = BOOK3E_PAGESZ_4K;
339 int ret = 0;
340 unsigned long mmu_seq;
341 struct kvm *kvm = vcpu_e500->vcpu.kvm;
342
343 /* used to check for invalidations in progress */
344 mmu_seq = kvm->mmu_notifier_seq;
345 smp_rmb();
335 346
336 /* 347 /*
337 * Translate guest physical to true physical, acquiring 348 * Translate guest physical to true physical, acquiring
@@ -449,6 +460,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 460 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
450 } 461 }
451 462
463 spin_lock(&kvm->mmu_lock);
464 if (mmu_notifier_retry(kvm, mmu_seq)) {
465 ret = -EAGAIN;
466 goto out;
467 }
468
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 469 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453 470
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 471 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +474,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
457 /* Clear i-cache for new pages */ 474 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn); 475 kvmppc_mmu_flush_icache(pfn);
459 476
477out:
478 spin_unlock(&kvm->mmu_lock);
479
460 /* Drop refcount on page, so that mmu notifiers can clear it */ 480 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn); 481 kvm_release_pfn_clean(pfn);
462 482
463 return 0; 483 return ret;
464} 484}
465 485
466/* XXX only map the one-one case, for now use TLB0 */ 486/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19c8379575f7..4132cd2fc171 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
110 110
111static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); 111static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
112 112
113void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 113static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
114{ 114{
115 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 115 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
116 116
@@ -147,7 +147,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
147 kvmppc_load_guest_fp(vcpu); 147 kvmppc_load_guest_fp(vcpu);
148} 148}
149 149
150void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 150static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
151{ 151{
152 vcpu->arch.eplc = mfspr(SPRN_EPLC); 152 vcpu->arch.eplc = mfspr(SPRN_EPLC);
153 vcpu->arch.epsc = mfspr(SPRN_EPSC); 153 vcpu->arch.epsc = mfspr(SPRN_EPSC);
@@ -204,7 +204,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
204 return 0; 204 return 0;
205} 205}
206 206
207void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 207static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
208 struct kvm_sregs *sregs)
208{ 209{
209 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 210 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
210 211
@@ -224,10 +225,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
224 sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; 225 sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
225 sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; 226 sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
226 227
227 kvmppc_get_sregs_ivor(vcpu, sregs); 228 return kvmppc_get_sregs_ivor(vcpu, sregs);
228} 229}
229 230
230int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 231static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
232 struct kvm_sregs *sregs)
231{ 233{
232 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 234 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
233 int ret; 235 int ret;
@@ -260,21 +262,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
260 return kvmppc_set_sregs_ivor(vcpu, sregs); 262 return kvmppc_set_sregs_ivor(vcpu, sregs);
261} 263}
262 264
263int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 265static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
264 union kvmppc_one_reg *val) 266 union kvmppc_one_reg *val)
265{ 267{
266 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 268 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
267 return r; 269 return r;
268} 270}
269 271
270int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 272static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
271 union kvmppc_one_reg *val) 273 union kvmppc_one_reg *val)
272{ 274{
273 int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); 275 int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
274 return r; 276 return r;
275} 277}
276 278
277struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 279static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
280 unsigned int id)
278{ 281{
279 struct kvmppc_vcpu_e500 *vcpu_e500; 282 struct kvmppc_vcpu_e500 *vcpu_e500;
280 struct kvm_vcpu *vcpu; 283 struct kvm_vcpu *vcpu;
@@ -315,7 +318,7 @@ out:
315 return ERR_PTR(err); 318 return ERR_PTR(err);
316} 319}
317 320
318void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 321static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
319{ 322{
320 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 323 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
321 324
@@ -325,7 +328,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
325 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 328 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
326} 329}
327 330
328int kvmppc_core_init_vm(struct kvm *kvm) 331static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
329{ 332{
330 int lpid; 333 int lpid;
331 334
@@ -337,27 +340,52 @@ int kvmppc_core_init_vm(struct kvm *kvm)
337 return 0; 340 return 0;
338} 341}
339 342
340void kvmppc_core_destroy_vm(struct kvm *kvm) 343static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
341{ 344{
342 kvmppc_free_lpid(kvm->arch.lpid); 345 kvmppc_free_lpid(kvm->arch.lpid);
343} 346}
344 347
348static struct kvmppc_ops kvm_ops_e500mc = {
349 .get_sregs = kvmppc_core_get_sregs_e500mc,
350 .set_sregs = kvmppc_core_set_sregs_e500mc,
351 .get_one_reg = kvmppc_get_one_reg_e500mc,
352 .set_one_reg = kvmppc_set_one_reg_e500mc,
353 .vcpu_load = kvmppc_core_vcpu_load_e500mc,
354 .vcpu_put = kvmppc_core_vcpu_put_e500mc,
355 .vcpu_create = kvmppc_core_vcpu_create_e500mc,
356 .vcpu_free = kvmppc_core_vcpu_free_e500mc,
357 .mmu_destroy = kvmppc_mmu_destroy_e500,
358 .init_vm = kvmppc_core_init_vm_e500mc,
359 .destroy_vm = kvmppc_core_destroy_vm_e500mc,
360 .emulate_op = kvmppc_core_emulate_op_e500,
361 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
362 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
363};
364
345static int __init kvmppc_e500mc_init(void) 365static int __init kvmppc_e500mc_init(void)
346{ 366{
347 int r; 367 int r;
348 368
349 r = kvmppc_booke_init(); 369 r = kvmppc_booke_init();
350 if (r) 370 if (r)
351 return r; 371 goto err_out;
352 372
353 kvmppc_init_lpid(64); 373 kvmppc_init_lpid(64);
354 kvmppc_claim_lpid(0); /* host */ 374 kvmppc_claim_lpid(0); /* host */
355 375
356 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 376 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
377 if (r)
378 goto err_out;
379 kvm_ops_e500mc.owner = THIS_MODULE;
380 kvmppc_pr_ops = &kvm_ops_e500mc;
381
382err_out:
383 return r;
357} 384}
358 385
359static void __exit kvmppc_e500mc_exit(void) 386static void __exit kvmppc_e500mc_exit(void)
360{ 387{
388 kvmppc_pr_ops = NULL;
361 kvmppc_booke_exit(); 389 kvmppc_booke_exit();
362} 390}
363 391
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 751cd45f65a0..2f9a0873b44f 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
130 case SPRN_PIR: break; 130 case SPRN_PIR: break;
131 131
132 default: 132 default:
133 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, 133 emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
134 spr_val); 134 spr_val);
135 if (emulated == EMULATE_FAIL) 135 if (emulated == EMULATE_FAIL)
136 printk(KERN_INFO "mtspr: unknown spr " 136 printk(KERN_INFO "mtspr: unknown spr "
137 "0x%x\n", sprn); 137 "0x%x\n", sprn);
@@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
191 spr_val = kvmppc_get_dec(vcpu, get_tb()); 191 spr_val = kvmppc_get_dec(vcpu, get_tb());
192 break; 192 break;
193 default: 193 default:
194 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, 194 emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
195 &spr_val); 195 &spr_val);
196 if (unlikely(emulated == EMULATE_FAIL)) { 196 if (unlikely(emulated == EMULATE_FAIL)) {
197 printk(KERN_INFO "mfspr: unknown spr " 197 printk(KERN_INFO "mfspr: unknown spr "
198 "0x%x\n", sprn); 198 "0x%x\n", sprn);
@@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
464 } 464 }
465 465
466 if (emulated == EMULATE_FAIL) { 466 if (emulated == EMULATE_FAIL) {
467 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 467 emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
468 &advance);
468 if (emulated == EMULATE_AGAIN) { 469 if (emulated == EMULATE_AGAIN) {
469 advance = 0; 470 advance = 0;
470 } else if (emulated == EMULATE_FAIL) { 471 } else if (emulated == EMULATE_FAIL) {
@@ -483,3 +484,4 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
483 484
484 return emulated; 485 return emulated;
485} 486}
487EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 07c0106fab76..9ae97686e9f4 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -26,6 +26,7 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/file.h> 28#include <linux/file.h>
29#include <linux/module.h>
29#include <asm/cputable.h> 30#include <asm/cputable.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/kvm_ppc.h> 32#include <asm/kvm_ppc.h>
@@ -39,6 +40,12 @@
39#define CREATE_TRACE_POINTS 40#define CREATE_TRACE_POINTS
40#include "trace.h" 41#include "trace.h"
41 42
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
48
42int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
43{ 50{
44 return !!(v->arch.pending_exceptions) || 51 return !!(v->arch.pending_exceptions) ||
@@ -50,7 +57,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
50 return 1; 57 return 1;
51} 58}
52 59
53#ifndef CONFIG_KVM_BOOK3S_64_HV
54/* 60/*
55 * Common checks before entering the guest world. Call with interrupts 61 * Common checks before entering the guest world. Call with interrupts
56 * disabled. 62 * disabled.
@@ -125,7 +131,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
125 131
126 return r; 132 return r;
127} 133}
128#endif /* CONFIG_KVM_BOOK3S_64_HV */ 134EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
129 135
130int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 136int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
131{ 137{
@@ -179,6 +185,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
179 185
180 return r; 186 return r;
181} 187}
188EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
182 189
183int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 190int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
184{ 191{
@@ -192,11 +199,9 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
192 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 199 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
193 goto out; 200 goto out;
194 201
195#ifdef CONFIG_KVM_BOOK3S_64_HV
196 /* HV KVM can only do PAPR mode for now */ 202 /* HV KVM can only do PAPR mode for now */
197 if (!vcpu->arch.papr_enabled) 203 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
198 goto out; 204 goto out;
199#endif
200 205
201#ifdef CONFIG_KVM_BOOKE_HV 206#ifdef CONFIG_KVM_BOOKE_HV
202 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 207 if (!cpu_has_feature(CPU_FTR_EMB_HV))
@@ -209,6 +214,7 @@ out:
209 vcpu->arch.sane = r; 214 vcpu->arch.sane = r;
210 return r ? 0 : -EINVAL; 215 return r ? 0 : -EINVAL;
211} 216}
217EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
212 218
213int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 219int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
214{ 220{
@@ -243,6 +249,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
243 249
244 return r; 250 return r;
245} 251}
252EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
246 253
247int kvm_arch_hardware_enable(void *garbage) 254int kvm_arch_hardware_enable(void *garbage)
248{ 255{
@@ -269,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn)
269 276
270int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 277int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
271{ 278{
272 if (type) 279 struct kvmppc_ops *kvm_ops = NULL;
273 return -EINVAL; 280 /*
274 281 * if we have both HV and PR enabled, default is HV
282 */
283 if (type == 0) {
284 if (kvmppc_hv_ops)
285 kvm_ops = kvmppc_hv_ops;
286 else
287 kvm_ops = kvmppc_pr_ops;
288 if (!kvm_ops)
289 goto err_out;
290 } else if (type == KVM_VM_PPC_HV) {
291 if (!kvmppc_hv_ops)
292 goto err_out;
293 kvm_ops = kvmppc_hv_ops;
294 } else if (type == KVM_VM_PPC_PR) {
295 if (!kvmppc_pr_ops)
296 goto err_out;
297 kvm_ops = kvmppc_pr_ops;
298 } else
299 goto err_out;
300
301 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
302 return -ENOENT;
303
304 kvm->arch.kvm_ops = kvm_ops;
275 return kvmppc_core_init_vm(kvm); 305 return kvmppc_core_init_vm(kvm);
306err_out:
307 return -EINVAL;
276} 308}
277 309
278void kvm_arch_destroy_vm(struct kvm *kvm) 310void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -292,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
292 kvmppc_core_destroy_vm(kvm); 324 kvmppc_core_destroy_vm(kvm);
293 325
294 mutex_unlock(&kvm->lock); 326 mutex_unlock(&kvm->lock);
327
328 /* drop the module reference */
329 module_put(kvm->arch.kvm_ops->owner);
295} 330}
296 331
297void kvm_arch_sync_events(struct kvm *kvm) 332void kvm_arch_sync_events(struct kvm *kvm)
@@ -301,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm)
301int kvm_dev_ioctl_check_extension(long ext) 336int kvm_dev_ioctl_check_extension(long ext)
302{ 337{
303 int r; 338 int r;
339 /* FIXME!!
340 * Should some of this be vm ioctl ? is it possible now ?
341 */
342 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
304 343
305 switch (ext) { 344 switch (ext) {
306#ifdef CONFIG_BOOKE 345#ifdef CONFIG_BOOKE
@@ -320,22 +359,26 @@ int kvm_dev_ioctl_check_extension(long ext)
320 case KVM_CAP_DEVICE_CTRL: 359 case KVM_CAP_DEVICE_CTRL:
321 r = 1; 360 r = 1;
322 break; 361 break;
323#ifndef CONFIG_KVM_BOOK3S_64_HV
324 case KVM_CAP_PPC_PAIRED_SINGLES: 362 case KVM_CAP_PPC_PAIRED_SINGLES:
325 case KVM_CAP_PPC_OSI: 363 case KVM_CAP_PPC_OSI:
326 case KVM_CAP_PPC_GET_PVINFO: 364 case KVM_CAP_PPC_GET_PVINFO:
327#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 365#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
328 case KVM_CAP_SW_TLB: 366 case KVM_CAP_SW_TLB:
329#endif 367#endif
330#ifdef CONFIG_KVM_MPIC 368 /* We support this only for PR */
331 case KVM_CAP_IRQ_MPIC: 369 r = !hv_enabled;
332#endif
333 r = 1;
334 break; 370 break;
371#ifdef CONFIG_KVM_MMIO
335 case KVM_CAP_COALESCED_MMIO: 372 case KVM_CAP_COALESCED_MMIO:
336 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 373 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
337 break; 374 break;
338#endif 375#endif
376#ifdef CONFIG_KVM_MPIC
377 case KVM_CAP_IRQ_MPIC:
378 r = 1;
379 break;
380#endif
381
339#ifdef CONFIG_PPC_BOOK3S_64 382#ifdef CONFIG_PPC_BOOK3S_64
340 case KVM_CAP_SPAPR_TCE: 383 case KVM_CAP_SPAPR_TCE:
341 case KVM_CAP_PPC_ALLOC_HTAB: 384 case KVM_CAP_PPC_ALLOC_HTAB:
@@ -346,32 +389,37 @@ int kvm_dev_ioctl_check_extension(long ext)
346 r = 1; 389 r = 1;
347 break; 390 break;
348#endif /* CONFIG_PPC_BOOK3S_64 */ 391#endif /* CONFIG_PPC_BOOK3S_64 */
349#ifdef CONFIG_KVM_BOOK3S_64_HV 392#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
350 case KVM_CAP_PPC_SMT: 393 case KVM_CAP_PPC_SMT:
351 r = threads_per_core; 394 if (hv_enabled)
395 r = threads_per_core;
396 else
397 r = 0;
352 break; 398 break;
353 case KVM_CAP_PPC_RMA: 399 case KVM_CAP_PPC_RMA:
354 r = 1; 400 r = hv_enabled;
355 /* PPC970 requires an RMA */ 401 /* PPC970 requires an RMA */
356 if (cpu_has_feature(CPU_FTR_ARCH_201)) 402 if (r && cpu_has_feature(CPU_FTR_ARCH_201))
357 r = 2; 403 r = 2;
358 break; 404 break;
359#endif 405#endif
360 case KVM_CAP_SYNC_MMU: 406 case KVM_CAP_SYNC_MMU:
361#ifdef CONFIG_KVM_BOOK3S_64_HV 407#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
362 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 408 if (hv_enabled)
409 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
410 else
411 r = 0;
363#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 412#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
364 r = 1; 413 r = 1;
365#else 414#else
366 r = 0; 415 r = 0;
367 break;
368#endif 416#endif
369#ifdef CONFIG_KVM_BOOK3S_64_HV 417 break;
418#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
370 case KVM_CAP_PPC_HTAB_FD: 419 case KVM_CAP_PPC_HTAB_FD:
371 r = 1; 420 r = hv_enabled;
372 break; 421 break;
373#endif 422#endif
374 break;
375 case KVM_CAP_NR_VCPUS: 423 case KVM_CAP_NR_VCPUS:
376 /* 424 /*
377 * Recommending a number of CPUs is somewhat arbitrary; we 425 * Recommending a number of CPUs is somewhat arbitrary; we
@@ -379,11 +427,10 @@ int kvm_dev_ioctl_check_extension(long ext)
379 * will have secondary threads "offline"), and for other KVM 427 * will have secondary threads "offline"), and for other KVM
380 * implementations just count online CPUs. 428 * implementations just count online CPUs.
381 */ 429 */
382#ifdef CONFIG_KVM_BOOK3S_64_HV 430 if (hv_enabled)
383 r = num_present_cpus(); 431 r = num_present_cpus();
384#else 432 else
385 r = num_online_cpus(); 433 r = num_online_cpus();
386#endif
387 break; 434 break;
388 case KVM_CAP_MAX_VCPUS: 435 case KVM_CAP_MAX_VCPUS:
389 r = KVM_MAX_VCPUS; 436 r = KVM_MAX_VCPUS;
@@ -407,15 +454,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
407 return -EINVAL; 454 return -EINVAL;
408} 455}
409 456
410void kvm_arch_free_memslot(struct kvm_memory_slot *free, 457void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
411 struct kvm_memory_slot *dont) 458 struct kvm_memory_slot *dont)
412{ 459{
413 kvmppc_core_free_memslot(free, dont); 460 kvmppc_core_free_memslot(kvm, free, dont);
414} 461}
415 462
416int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 463int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
464 unsigned long npages)
417{ 465{
418 return kvmppc_core_create_memslot(slot, npages); 466 return kvmppc_core_create_memslot(kvm, slot, npages);
419} 467}
420 468
421void kvm_arch_memslots_updated(struct kvm *kvm) 469void kvm_arch_memslots_updated(struct kvm *kvm)
@@ -659,6 +707,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
659 707
660 return EMULATE_DO_MMIO; 708 return EMULATE_DO_MMIO;
661} 709}
710EXPORT_SYMBOL_GPL(kvmppc_handle_load);
662 711
663/* Same as above, but sign extends */ 712/* Same as above, but sign extends */
664int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 713int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -720,6 +769,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
720 769
721 return EMULATE_DO_MMIO; 770 return EMULATE_DO_MMIO;
722} 771}
772EXPORT_SYMBOL_GPL(kvmppc_handle_store);
723 773
724int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 774int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
725{ 775{
@@ -1024,52 +1074,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
1024 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 1074 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1025 goto out; 1075 goto out;
1026 } 1076 }
1027#endif /* CONFIG_PPC_BOOK3S_64 */
1028
1029#ifdef CONFIG_KVM_BOOK3S_64_HV
1030 case KVM_ALLOCATE_RMA: {
1031 struct kvm_allocate_rma rma;
1032 struct kvm *kvm = filp->private_data;
1033
1034 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
1035 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
1036 r = -EFAULT;
1037 break;
1038 }
1039
1040 case KVM_PPC_ALLOCATE_HTAB: {
1041 u32 htab_order;
1042
1043 r = -EFAULT;
1044 if (get_user(htab_order, (u32 __user *)argp))
1045 break;
1046 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
1047 if (r)
1048 break;
1049 r = -EFAULT;
1050 if (put_user(htab_order, (u32 __user *)argp))
1051 break;
1052 r = 0;
1053 break;
1054 }
1055
1056 case KVM_PPC_GET_HTAB_FD: {
1057 struct kvm_get_htab_fd ghf;
1058
1059 r = -EFAULT;
1060 if (copy_from_user(&ghf, argp, sizeof(ghf)))
1061 break;
1062 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
1063 break;
1064 }
1065#endif /* CONFIG_KVM_BOOK3S_64_HV */
1066
1067#ifdef CONFIG_PPC_BOOK3S_64
1068 case KVM_PPC_GET_SMMU_INFO: { 1077 case KVM_PPC_GET_SMMU_INFO: {
1069 struct kvm_ppc_smmu_info info; 1078 struct kvm_ppc_smmu_info info;
1079 struct kvm *kvm = filp->private_data;
1070 1080
1071 memset(&info, 0, sizeof(info)); 1081 memset(&info, 0, sizeof(info));
1072 r = kvm_vm_ioctl_get_smmu_info(kvm, &info); 1082 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1073 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 1083 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1074 r = -EFAULT; 1084 r = -EFAULT;
1075 break; 1085 break;
@@ -1080,11 +1090,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
1080 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 1090 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1081 break; 1091 break;
1082 } 1092 }
1083#endif /* CONFIG_PPC_BOOK3S_64 */ 1093 default: {
1094 struct kvm *kvm = filp->private_data;
1095 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1096 }
1097#else /* CONFIG_PPC_BOOK3S_64 */
1084 default: 1098 default:
1085 r = -ENOTTY; 1099 r = -ENOTTY;
1100#endif
1086 } 1101 }
1087
1088out: 1102out:
1089 return r; 1103 return r;
1090} 1104}
@@ -1106,22 +1120,26 @@ long kvmppc_alloc_lpid(void)
1106 1120
1107 return lpid; 1121 return lpid;
1108} 1122}
1123EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1109 1124
1110void kvmppc_claim_lpid(long lpid) 1125void kvmppc_claim_lpid(long lpid)
1111{ 1126{
1112 set_bit(lpid, lpid_inuse); 1127 set_bit(lpid, lpid_inuse);
1113} 1128}
1129EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1114 1130
1115void kvmppc_free_lpid(long lpid) 1131void kvmppc_free_lpid(long lpid)
1116{ 1132{
1117 clear_bit(lpid, lpid_inuse); 1133 clear_bit(lpid, lpid_inuse);
1118} 1134}
1135EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1119 1136
1120void kvmppc_init_lpid(unsigned long nr_lpids_param) 1137void kvmppc_init_lpid(unsigned long nr_lpids_param)
1121{ 1138{
1122 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1139 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1123 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1140 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1124} 1141}
1142EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1125 1143
1126int kvm_arch_init(void *opaque) 1144int kvm_arch_init(void *opaque)
1127{ 1145{
@@ -1130,4 +1148,5 @@ int kvm_arch_init(void *opaque)
1130 1148
1131void kvm_arch_exit(void) 1149void kvm_arch_exit(void)
1132{ 1150{
1151
1133} 1152}
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index e326489a5420..2e0e67ef3544 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -31,126 +31,6 @@ TRACE_EVENT(kvm_ppc_instr,
31 __entry->inst, __entry->pc, __entry->emulate) 31 __entry->inst, __entry->pc, __entry->emulate)
32); 32);
33 33
34#ifdef CONFIG_PPC_BOOK3S
35#define kvm_trace_symbol_exit \
36 {0x100, "SYSTEM_RESET"}, \
37 {0x200, "MACHINE_CHECK"}, \
38 {0x300, "DATA_STORAGE"}, \
39 {0x380, "DATA_SEGMENT"}, \
40 {0x400, "INST_STORAGE"}, \
41 {0x480, "INST_SEGMENT"}, \
42 {0x500, "EXTERNAL"}, \
43 {0x501, "EXTERNAL_LEVEL"}, \
44 {0x502, "EXTERNAL_HV"}, \
45 {0x600, "ALIGNMENT"}, \
46 {0x700, "PROGRAM"}, \
47 {0x800, "FP_UNAVAIL"}, \
48 {0x900, "DECREMENTER"}, \
49 {0x980, "HV_DECREMENTER"}, \
50 {0xc00, "SYSCALL"}, \
51 {0xd00, "TRACE"}, \
52 {0xe00, "H_DATA_STORAGE"}, \
53 {0xe20, "H_INST_STORAGE"}, \
54 {0xe40, "H_EMUL_ASSIST"}, \
55 {0xf00, "PERFMON"}, \
56 {0xf20, "ALTIVEC"}, \
57 {0xf40, "VSX"}
58#else
59#define kvm_trace_symbol_exit \
60 {0, "CRITICAL"}, \
61 {1, "MACHINE_CHECK"}, \
62 {2, "DATA_STORAGE"}, \
63 {3, "INST_STORAGE"}, \
64 {4, "EXTERNAL"}, \
65 {5, "ALIGNMENT"}, \
66 {6, "PROGRAM"}, \
67 {7, "FP_UNAVAIL"}, \
68 {8, "SYSCALL"}, \
69 {9, "AP_UNAVAIL"}, \
70 {10, "DECREMENTER"}, \
71 {11, "FIT"}, \
72 {12, "WATCHDOG"}, \
73 {13, "DTLB_MISS"}, \
74 {14, "ITLB_MISS"}, \
75 {15, "DEBUG"}, \
76 {32, "SPE_UNAVAIL"}, \
77 {33, "SPE_FP_DATA"}, \
78 {34, "SPE_FP_ROUND"}, \
79 {35, "PERFORMANCE_MONITOR"}, \
80 {36, "DOORBELL"}, \
81 {37, "DOORBELL_CRITICAL"}, \
82 {38, "GUEST_DBELL"}, \
83 {39, "GUEST_DBELL_CRIT"}, \
84 {40, "HV_SYSCALL"}, \
85 {41, "HV_PRIV"}
86#endif
87
88TRACE_EVENT(kvm_exit,
89 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
90 TP_ARGS(exit_nr, vcpu),
91
92 TP_STRUCT__entry(
93 __field( unsigned int, exit_nr )
94 __field( unsigned long, pc )
95 __field( unsigned long, msr )
96 __field( unsigned long, dar )
97#ifdef CONFIG_KVM_BOOK3S_PR
98 __field( unsigned long, srr1 )
99#endif
100 __field( unsigned long, last_inst )
101 ),
102
103 TP_fast_assign(
104#ifdef CONFIG_KVM_BOOK3S_PR
105 struct kvmppc_book3s_shadow_vcpu *svcpu;
106#endif
107 __entry->exit_nr = exit_nr;
108 __entry->pc = kvmppc_get_pc(vcpu);
109 __entry->dar = kvmppc_get_fault_dar(vcpu);
110 __entry->msr = vcpu->arch.shared->msr;
111#ifdef CONFIG_KVM_BOOK3S_PR
112 svcpu = svcpu_get(vcpu);
113 __entry->srr1 = svcpu->shadow_srr1;
114 svcpu_put(svcpu);
115#endif
116 __entry->last_inst = vcpu->arch.last_inst;
117 ),
118
119 TP_printk("exit=%s"
120 " | pc=0x%lx"
121 " | msr=0x%lx"
122 " | dar=0x%lx"
123#ifdef CONFIG_KVM_BOOK3S_PR
124 " | srr1=0x%lx"
125#endif
126 " | last_inst=0x%lx"
127 ,
128 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
129 __entry->pc,
130 __entry->msr,
131 __entry->dar,
132#ifdef CONFIG_KVM_BOOK3S_PR
133 __entry->srr1,
134#endif
135 __entry->last_inst
136 )
137);
138
139TRACE_EVENT(kvm_unmap_hva,
140 TP_PROTO(unsigned long hva),
141 TP_ARGS(hva),
142
143 TP_STRUCT__entry(
144 __field( unsigned long, hva )
145 ),
146
147 TP_fast_assign(
148 __entry->hva = hva;
149 ),
150
151 TP_printk("unmap hva 0x%lx\n", __entry->hva)
152);
153
154TRACE_EVENT(kvm_stlb_inval, 34TRACE_EVENT(kvm_stlb_inval,
155 TP_PROTO(unsigned int stlb_index), 35 TP_PROTO(unsigned int stlb_index),
156 TP_ARGS(stlb_index), 36 TP_ARGS(stlb_index),
@@ -236,315 +116,6 @@ TRACE_EVENT(kvm_check_requests,
236 __entry->cpu_nr, __entry->requests) 116 __entry->cpu_nr, __entry->requests)
237); 117);
238 118
239
240/*************************************************************************
241 * Book3S trace points *
242 *************************************************************************/
243
244#ifdef CONFIG_KVM_BOOK3S_PR
245
246TRACE_EVENT(kvm_book3s_reenter,
247 TP_PROTO(int r, struct kvm_vcpu *vcpu),
248 TP_ARGS(r, vcpu),
249
250 TP_STRUCT__entry(
251 __field( unsigned int, r )
252 __field( unsigned long, pc )
253 ),
254
255 TP_fast_assign(
256 __entry->r = r;
257 __entry->pc = kvmppc_get_pc(vcpu);
258 ),
259
260 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
261);
262
263#ifdef CONFIG_PPC_BOOK3S_64
264
265TRACE_EVENT(kvm_book3s_64_mmu_map,
266 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
267 struct kvmppc_pte *orig_pte),
268 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
269
270 TP_STRUCT__entry(
271 __field( unsigned char, flag_w )
272 __field( unsigned char, flag_x )
273 __field( unsigned long, eaddr )
274 __field( unsigned long, hpteg )
275 __field( unsigned long, va )
276 __field( unsigned long long, vpage )
277 __field( unsigned long, hpaddr )
278 ),
279
280 TP_fast_assign(
281 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
282 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
283 __entry->eaddr = orig_pte->eaddr;
284 __entry->hpteg = hpteg;
285 __entry->va = va;
286 __entry->vpage = orig_pte->vpage;
287 __entry->hpaddr = hpaddr;
288 ),
289
290 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
291 __entry->flag_w, __entry->flag_x, __entry->eaddr,
292 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
293);
294
295#endif /* CONFIG_PPC_BOOK3S_64 */
296
297TRACE_EVENT(kvm_book3s_mmu_map,
298 TP_PROTO(struct hpte_cache *pte),
299 TP_ARGS(pte),
300
301 TP_STRUCT__entry(
302 __field( u64, host_vpn )
303 __field( u64, pfn )
304 __field( ulong, eaddr )
305 __field( u64, vpage )
306 __field( ulong, raddr )
307 __field( int, flags )
308 ),
309
310 TP_fast_assign(
311 __entry->host_vpn = pte->host_vpn;
312 __entry->pfn = pte->pfn;
313 __entry->eaddr = pte->pte.eaddr;
314 __entry->vpage = pte->pte.vpage;
315 __entry->raddr = pte->pte.raddr;
316 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
317 (pte->pte.may_write ? 0x2 : 0) |
318 (pte->pte.may_execute ? 0x1 : 0);
319 ),
320
321 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
322 __entry->host_vpn, __entry->pfn, __entry->eaddr,
323 __entry->vpage, __entry->raddr, __entry->flags)
324);
325
326TRACE_EVENT(kvm_book3s_mmu_invalidate,
327 TP_PROTO(struct hpte_cache *pte),
328 TP_ARGS(pte),
329
330 TP_STRUCT__entry(
331 __field( u64, host_vpn )
332 __field( u64, pfn )
333 __field( ulong, eaddr )
334 __field( u64, vpage )
335 __field( ulong, raddr )
336 __field( int, flags )
337 ),
338
339 TP_fast_assign(
340 __entry->host_vpn = pte->host_vpn;
341 __entry->pfn = pte->pfn;
342 __entry->eaddr = pte->pte.eaddr;
343 __entry->vpage = pte->pte.vpage;
344 __entry->raddr = pte->pte.raddr;
345 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
346 (pte->pte.may_write ? 0x2 : 0) |
347 (pte->pte.may_execute ? 0x1 : 0);
348 ),
349
350 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
351 __entry->host_vpn, __entry->pfn, __entry->eaddr,
352 __entry->vpage, __entry->raddr, __entry->flags)
353);
354
355TRACE_EVENT(kvm_book3s_mmu_flush,
356 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
357 unsigned long long p2),
358 TP_ARGS(type, vcpu, p1, p2),
359
360 TP_STRUCT__entry(
361 __field( int, count )
362 __field( unsigned long long, p1 )
363 __field( unsigned long long, p2 )
364 __field( const char *, type )
365 ),
366
367 TP_fast_assign(
368 __entry->count = to_book3s(vcpu)->hpte_cache_count;
369 __entry->p1 = p1;
370 __entry->p2 = p2;
371 __entry->type = type;
372 ),
373
374 TP_printk("Flush %d %sPTEs: %llx - %llx",
375 __entry->count, __entry->type, __entry->p1, __entry->p2)
376);
377
378TRACE_EVENT(kvm_book3s_slb_found,
379 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
380 TP_ARGS(gvsid, hvsid),
381
382 TP_STRUCT__entry(
383 __field( unsigned long long, gvsid )
384 __field( unsigned long long, hvsid )
385 ),
386
387 TP_fast_assign(
388 __entry->gvsid = gvsid;
389 __entry->hvsid = hvsid;
390 ),
391
392 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
393);
394
395TRACE_EVENT(kvm_book3s_slb_fail,
396 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
397 TP_ARGS(sid_map_mask, gvsid),
398
399 TP_STRUCT__entry(
400 __field( unsigned short, sid_map_mask )
401 __field( unsigned long long, gvsid )
402 ),
403
404 TP_fast_assign(
405 __entry->sid_map_mask = sid_map_mask;
406 __entry->gvsid = gvsid;
407 ),
408
409 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
410 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
411);
412
413TRACE_EVENT(kvm_book3s_slb_map,
414 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
415 unsigned long long hvsid),
416 TP_ARGS(sid_map_mask, gvsid, hvsid),
417
418 TP_STRUCT__entry(
419 __field( unsigned short, sid_map_mask )
420 __field( unsigned long long, guest_vsid )
421 __field( unsigned long long, host_vsid )
422 ),
423
424 TP_fast_assign(
425 __entry->sid_map_mask = sid_map_mask;
426 __entry->guest_vsid = gvsid;
427 __entry->host_vsid = hvsid;
428 ),
429
430 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
431 __entry->guest_vsid, __entry->host_vsid)
432);
433
434TRACE_EVENT(kvm_book3s_slbmte,
435 TP_PROTO(u64 slb_vsid, u64 slb_esid),
436 TP_ARGS(slb_vsid, slb_esid),
437
438 TP_STRUCT__entry(
439 __field( u64, slb_vsid )
440 __field( u64, slb_esid )
441 ),
442
443 TP_fast_assign(
444 __entry->slb_vsid = slb_vsid;
445 __entry->slb_esid = slb_esid;
446 ),
447
448 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
449);
450
451#endif /* CONFIG_PPC_BOOK3S */
452
453
454/*************************************************************************
455 * Book3E trace points *
456 *************************************************************************/
457
458#ifdef CONFIG_BOOKE
459
460TRACE_EVENT(kvm_booke206_stlb_write,
461 TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
462 TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
463
464 TP_STRUCT__entry(
465 __field( __u32, mas0 )
466 __field( __u32, mas8 )
467 __field( __u32, mas1 )
468 __field( __u64, mas2 )
469 __field( __u64, mas7_3 )
470 ),
471
472 TP_fast_assign(
473 __entry->mas0 = mas0;
474 __entry->mas8 = mas8;
475 __entry->mas1 = mas1;
476 __entry->mas2 = mas2;
477 __entry->mas7_3 = mas7_3;
478 ),
479
480 TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
481 __entry->mas0, __entry->mas8, __entry->mas1,
482 __entry->mas2, __entry->mas7_3)
483);
484
485TRACE_EVENT(kvm_booke206_gtlb_write,
486 TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
487 TP_ARGS(mas0, mas1, mas2, mas7_3),
488
489 TP_STRUCT__entry(
490 __field( __u32, mas0 )
491 __field( __u32, mas1 )
492 __field( __u64, mas2 )
493 __field( __u64, mas7_3 )
494 ),
495
496 TP_fast_assign(
497 __entry->mas0 = mas0;
498 __entry->mas1 = mas1;
499 __entry->mas2 = mas2;
500 __entry->mas7_3 = mas7_3;
501 ),
502
503 TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
504 __entry->mas0, __entry->mas1,
505 __entry->mas2, __entry->mas7_3)
506);
507
508TRACE_EVENT(kvm_booke206_ref_release,
509 TP_PROTO(__u64 pfn, __u32 flags),
510 TP_ARGS(pfn, flags),
511
512 TP_STRUCT__entry(
513 __field( __u64, pfn )
514 __field( __u32, flags )
515 ),
516
517 TP_fast_assign(
518 __entry->pfn = pfn;
519 __entry->flags = flags;
520 ),
521
522 TP_printk("pfn=%llx flags=%x",
523 __entry->pfn, __entry->flags)
524);
525
526TRACE_EVENT(kvm_booke_queue_irqprio,
527 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
528 TP_ARGS(vcpu, priority),
529
530 TP_STRUCT__entry(
531 __field( __u32, cpu_nr )
532 __field( __u32, priority )
533 __field( unsigned long, pending )
534 ),
535
536 TP_fast_assign(
537 __entry->cpu_nr = vcpu->vcpu_id;
538 __entry->priority = priority;
539 __entry->pending = vcpu->arch.pending_exceptions;
540 ),
541
542 TP_printk("vcpu=%x prio=%x pending=%lx",
543 __entry->cpu_nr, __entry->priority, __entry->pending)
544);
545
546#endif
547
548#endif /* _TRACE_KVM_H */ 119#endif /* _TRACE_KVM_H */
549 120
550/* This part must be outside protection */ 121/* This part must be outside protection */
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
new file mode 100644
index 000000000000..f7537cf26ce7
--- /dev/null
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -0,0 +1,177 @@
1#if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_BOOKE_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm_booke
8#define TRACE_INCLUDE_PATH .
9#define TRACE_INCLUDE_FILE trace_booke
10
11#define kvm_trace_symbol_exit \
12 {0, "CRITICAL"}, \
13 {1, "MACHINE_CHECK"}, \
14 {2, "DATA_STORAGE"}, \
15 {3, "INST_STORAGE"}, \
16 {4, "EXTERNAL"}, \
17 {5, "ALIGNMENT"}, \
18 {6, "PROGRAM"}, \
19 {7, "FP_UNAVAIL"}, \
20 {8, "SYSCALL"}, \
21 {9, "AP_UNAVAIL"}, \
22 {10, "DECREMENTER"}, \
23 {11, "FIT"}, \
24 {12, "WATCHDOG"}, \
25 {13, "DTLB_MISS"}, \
26 {14, "ITLB_MISS"}, \
27 {15, "DEBUG"}, \
28 {32, "SPE_UNAVAIL"}, \
29 {33, "SPE_FP_DATA"}, \
30 {34, "SPE_FP_ROUND"}, \
31 {35, "PERFORMANCE_MONITOR"}, \
32 {36, "DOORBELL"}, \
33 {37, "DOORBELL_CRITICAL"}, \
34 {38, "GUEST_DBELL"}, \
35 {39, "GUEST_DBELL_CRIT"}, \
36 {40, "HV_SYSCALL"}, \
37 {41, "HV_PRIV"}
38
39TRACE_EVENT(kvm_exit,
40 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
41 TP_ARGS(exit_nr, vcpu),
42
43 TP_STRUCT__entry(
44 __field( unsigned int, exit_nr )
45 __field( unsigned long, pc )
46 __field( unsigned long, msr )
47 __field( unsigned long, dar )
48 __field( unsigned long, last_inst )
49 ),
50
51 TP_fast_assign(
52 __entry->exit_nr = exit_nr;
53 __entry->pc = kvmppc_get_pc(vcpu);
54 __entry->dar = kvmppc_get_fault_dar(vcpu);
55 __entry->msr = vcpu->arch.shared->msr;
56 __entry->last_inst = vcpu->arch.last_inst;
57 ),
58
59 TP_printk("exit=%s"
60 " | pc=0x%lx"
61 " | msr=0x%lx"
62 " | dar=0x%lx"
63 " | last_inst=0x%lx"
64 ,
65 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
66 __entry->pc,
67 __entry->msr,
68 __entry->dar,
69 __entry->last_inst
70 )
71);
72
73TRACE_EVENT(kvm_unmap_hva,
74 TP_PROTO(unsigned long hva),
75 TP_ARGS(hva),
76
77 TP_STRUCT__entry(
78 __field( unsigned long, hva )
79 ),
80
81 TP_fast_assign(
82 __entry->hva = hva;
83 ),
84
85 TP_printk("unmap hva 0x%lx\n", __entry->hva)
86);
87
88TRACE_EVENT(kvm_booke206_stlb_write,
89 TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
90 TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
91
92 TP_STRUCT__entry(
93 __field( __u32, mas0 )
94 __field( __u32, mas8 )
95 __field( __u32, mas1 )
96 __field( __u64, mas2 )
97 __field( __u64, mas7_3 )
98 ),
99
100 TP_fast_assign(
101 __entry->mas0 = mas0;
102 __entry->mas8 = mas8;
103 __entry->mas1 = mas1;
104 __entry->mas2 = mas2;
105 __entry->mas7_3 = mas7_3;
106 ),
107
108 TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
109 __entry->mas0, __entry->mas8, __entry->mas1,
110 __entry->mas2, __entry->mas7_3)
111);
112
113TRACE_EVENT(kvm_booke206_gtlb_write,
114 TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
115 TP_ARGS(mas0, mas1, mas2, mas7_3),
116
117 TP_STRUCT__entry(
118 __field( __u32, mas0 )
119 __field( __u32, mas1 )
120 __field( __u64, mas2 )
121 __field( __u64, mas7_3 )
122 ),
123
124 TP_fast_assign(
125 __entry->mas0 = mas0;
126 __entry->mas1 = mas1;
127 __entry->mas2 = mas2;
128 __entry->mas7_3 = mas7_3;
129 ),
130
131 TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
132 __entry->mas0, __entry->mas1,
133 __entry->mas2, __entry->mas7_3)
134);
135
136TRACE_EVENT(kvm_booke206_ref_release,
137 TP_PROTO(__u64 pfn, __u32 flags),
138 TP_ARGS(pfn, flags),
139
140 TP_STRUCT__entry(
141 __field( __u64, pfn )
142 __field( __u32, flags )
143 ),
144
145 TP_fast_assign(
146 __entry->pfn = pfn;
147 __entry->flags = flags;
148 ),
149
150 TP_printk("pfn=%llx flags=%x",
151 __entry->pfn, __entry->flags)
152);
153
154TRACE_EVENT(kvm_booke_queue_irqprio,
155 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
156 TP_ARGS(vcpu, priority),
157
158 TP_STRUCT__entry(
159 __field( __u32, cpu_nr )
160 __field( __u32, priority )
161 __field( unsigned long, pending )
162 ),
163
164 TP_fast_assign(
165 __entry->cpu_nr = vcpu->vcpu_id;
166 __entry->priority = priority;
167 __entry->pending = vcpu->arch.pending_exceptions;
168 ),
169
170 TP_printk("vcpu=%x prio=%x pending=%lx",
171 __entry->cpu_nr, __entry->priority, __entry->pending)
172);
173
174#endif
175
176/* This part must be outside protection */
177#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
new file mode 100644
index 000000000000..8b22e4748344
--- /dev/null
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -0,0 +1,297 @@
1
2#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_PR_H
4
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm_pr
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace_pr
11
12#define kvm_trace_symbol_exit \
13 {0x100, "SYSTEM_RESET"}, \
14 {0x200, "MACHINE_CHECK"}, \
15 {0x300, "DATA_STORAGE"}, \
16 {0x380, "DATA_SEGMENT"}, \
17 {0x400, "INST_STORAGE"}, \
18 {0x480, "INST_SEGMENT"}, \
19 {0x500, "EXTERNAL"}, \
20 {0x501, "EXTERNAL_LEVEL"}, \
21 {0x502, "EXTERNAL_HV"}, \
22 {0x600, "ALIGNMENT"}, \
23 {0x700, "PROGRAM"}, \
24 {0x800, "FP_UNAVAIL"}, \
25 {0x900, "DECREMENTER"}, \
26 {0x980, "HV_DECREMENTER"}, \
27 {0xc00, "SYSCALL"}, \
28 {0xd00, "TRACE"}, \
29 {0xe00, "H_DATA_STORAGE"}, \
30 {0xe20, "H_INST_STORAGE"}, \
31 {0xe40, "H_EMUL_ASSIST"}, \
32 {0xf00, "PERFMON"}, \
33 {0xf20, "ALTIVEC"}, \
34 {0xf40, "VSX"}
35
36TRACE_EVENT(kvm_book3s_reenter,
37 TP_PROTO(int r, struct kvm_vcpu *vcpu),
38 TP_ARGS(r, vcpu),
39
40 TP_STRUCT__entry(
41 __field( unsigned int, r )
42 __field( unsigned long, pc )
43 ),
44
45 TP_fast_assign(
46 __entry->r = r;
47 __entry->pc = kvmppc_get_pc(vcpu);
48 ),
49
50 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
51);
52
53#ifdef CONFIG_PPC_BOOK3S_64
54
55TRACE_EVENT(kvm_book3s_64_mmu_map,
56 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
57 struct kvmppc_pte *orig_pte),
58 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
59
60 TP_STRUCT__entry(
61 __field( unsigned char, flag_w )
62 __field( unsigned char, flag_x )
63 __field( unsigned long, eaddr )
64 __field( unsigned long, hpteg )
65 __field( unsigned long, va )
66 __field( unsigned long long, vpage )
67 __field( unsigned long, hpaddr )
68 ),
69
70 TP_fast_assign(
71 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
72 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
73 __entry->eaddr = orig_pte->eaddr;
74 __entry->hpteg = hpteg;
75 __entry->va = va;
76 __entry->vpage = orig_pte->vpage;
77 __entry->hpaddr = hpaddr;
78 ),
79
80 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
81 __entry->flag_w, __entry->flag_x, __entry->eaddr,
82 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
83);
84
85#endif /* CONFIG_PPC_BOOK3S_64 */
86
87TRACE_EVENT(kvm_book3s_mmu_map,
88 TP_PROTO(struct hpte_cache *pte),
89 TP_ARGS(pte),
90
91 TP_STRUCT__entry(
92 __field( u64, host_vpn )
93 __field( u64, pfn )
94 __field( ulong, eaddr )
95 __field( u64, vpage )
96 __field( ulong, raddr )
97 __field( int, flags )
98 ),
99
100 TP_fast_assign(
101 __entry->host_vpn = pte->host_vpn;
102 __entry->pfn = pte->pfn;
103 __entry->eaddr = pte->pte.eaddr;
104 __entry->vpage = pte->pte.vpage;
105 __entry->raddr = pte->pte.raddr;
106 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
107 (pte->pte.may_write ? 0x2 : 0) |
108 (pte->pte.may_execute ? 0x1 : 0);
109 ),
110
111 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
112 __entry->host_vpn, __entry->pfn, __entry->eaddr,
113 __entry->vpage, __entry->raddr, __entry->flags)
114);
115
116TRACE_EVENT(kvm_book3s_mmu_invalidate,
117 TP_PROTO(struct hpte_cache *pte),
118 TP_ARGS(pte),
119
120 TP_STRUCT__entry(
121 __field( u64, host_vpn )
122 __field( u64, pfn )
123 __field( ulong, eaddr )
124 __field( u64, vpage )
125 __field( ulong, raddr )
126 __field( int, flags )
127 ),
128
129 TP_fast_assign(
130 __entry->host_vpn = pte->host_vpn;
131 __entry->pfn = pte->pfn;
132 __entry->eaddr = pte->pte.eaddr;
133 __entry->vpage = pte->pte.vpage;
134 __entry->raddr = pte->pte.raddr;
135 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
136 (pte->pte.may_write ? 0x2 : 0) |
137 (pte->pte.may_execute ? 0x1 : 0);
138 ),
139
140 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
141 __entry->host_vpn, __entry->pfn, __entry->eaddr,
142 __entry->vpage, __entry->raddr, __entry->flags)
143);
144
145TRACE_EVENT(kvm_book3s_mmu_flush,
146 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
147 unsigned long long p2),
148 TP_ARGS(type, vcpu, p1, p2),
149
150 TP_STRUCT__entry(
151 __field( int, count )
152 __field( unsigned long long, p1 )
153 __field( unsigned long long, p2 )
154 __field( const char *, type )
155 ),
156
157 TP_fast_assign(
158 __entry->count = to_book3s(vcpu)->hpte_cache_count;
159 __entry->p1 = p1;
160 __entry->p2 = p2;
161 __entry->type = type;
162 ),
163
164 TP_printk("Flush %d %sPTEs: %llx - %llx",
165 __entry->count, __entry->type, __entry->p1, __entry->p2)
166);
167
168TRACE_EVENT(kvm_book3s_slb_found,
169 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
170 TP_ARGS(gvsid, hvsid),
171
172 TP_STRUCT__entry(
173 __field( unsigned long long, gvsid )
174 __field( unsigned long long, hvsid )
175 ),
176
177 TP_fast_assign(
178 __entry->gvsid = gvsid;
179 __entry->hvsid = hvsid;
180 ),
181
182 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
183);
184
185TRACE_EVENT(kvm_book3s_slb_fail,
186 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
187 TP_ARGS(sid_map_mask, gvsid),
188
189 TP_STRUCT__entry(
190 __field( unsigned short, sid_map_mask )
191 __field( unsigned long long, gvsid )
192 ),
193
194 TP_fast_assign(
195 __entry->sid_map_mask = sid_map_mask;
196 __entry->gvsid = gvsid;
197 ),
198
199 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
200 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
201);
202
203TRACE_EVENT(kvm_book3s_slb_map,
204 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
205 unsigned long long hvsid),
206 TP_ARGS(sid_map_mask, gvsid, hvsid),
207
208 TP_STRUCT__entry(
209 __field( unsigned short, sid_map_mask )
210 __field( unsigned long long, guest_vsid )
211 __field( unsigned long long, host_vsid )
212 ),
213
214 TP_fast_assign(
215 __entry->sid_map_mask = sid_map_mask;
216 __entry->guest_vsid = gvsid;
217 __entry->host_vsid = hvsid;
218 ),
219
220 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
221 __entry->guest_vsid, __entry->host_vsid)
222);
223
224TRACE_EVENT(kvm_book3s_slbmte,
225 TP_PROTO(u64 slb_vsid, u64 slb_esid),
226 TP_ARGS(slb_vsid, slb_esid),
227
228 TP_STRUCT__entry(
229 __field( u64, slb_vsid )
230 __field( u64, slb_esid )
231 ),
232
233 TP_fast_assign(
234 __entry->slb_vsid = slb_vsid;
235 __entry->slb_esid = slb_esid;
236 ),
237
238 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
239);
240
241TRACE_EVENT(kvm_exit,
242 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
243 TP_ARGS(exit_nr, vcpu),
244
245 TP_STRUCT__entry(
246 __field( unsigned int, exit_nr )
247 __field( unsigned long, pc )
248 __field( unsigned long, msr )
249 __field( unsigned long, dar )
250 __field( unsigned long, srr1 )
251 __field( unsigned long, last_inst )
252 ),
253
254 TP_fast_assign(
255 __entry->exit_nr = exit_nr;
256 __entry->pc = kvmppc_get_pc(vcpu);
257 __entry->dar = kvmppc_get_fault_dar(vcpu);
258 __entry->msr = vcpu->arch.shared->msr;
259 __entry->srr1 = vcpu->arch.shadow_srr1;
260 __entry->last_inst = vcpu->arch.last_inst;
261 ),
262
263 TP_printk("exit=%s"
264 " | pc=0x%lx"
265 " | msr=0x%lx"
266 " | dar=0x%lx"
267 " | srr1=0x%lx"
268 " | last_inst=0x%lx"
269 ,
270 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
271 __entry->pc,
272 __entry->msr,
273 __entry->dar,
274 __entry->srr1,
275 __entry->last_inst
276 )
277);
278
279TRACE_EVENT(kvm_unmap_hva,
280 TP_PROTO(unsigned long hva),
281 TP_ARGS(hva),
282
283 TP_STRUCT__entry(
284 __field( unsigned long, hva )
285 ),
286
287 TP_fast_assign(
288 __entry->hva = hva;
289 ),
290
291 TP_printk("unmap hva 0x%lx\n", __entry->hva)
292);
293
294#endif /* _TRACE_KVM_H */
295
296/* This part must be outside protection */
297#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 6747eece84af..7b6c10750179 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -287,9 +287,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
287 pte_clear(&init_mm, addr, ptep); 287 pte_clear(&init_mm, addr, ptep);
288 if (pfn_valid(pfn)) { 288 if (pfn_valid(pfn)) {
289 struct page *page = pfn_to_page(pfn); 289 struct page *page = pfn_to_page(pfn);
290 290 __free_reserved_page(page);
291 ClearPageReserved(page);
292 __free_page(page);
293 } 291 }
294 } 292 }
295 addr += PAGE_SIZE; 293 addr += PAGE_SIZE;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index d67db4bd672d..90bb6d9409bf 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -633,8 +633,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
633 633
634/* 634/*
635 * This function frees user-level page tables of a process. 635 * This function frees user-level page tables of a process.
636 *
637 * Must be called with pagetable lock held.
638 */ 636 */
639void hugetlb_free_pgd_range(struct mmu_gather *tlb, 637void hugetlb_free_pgd_range(struct mmu_gather *tlb,
640 unsigned long addr, unsigned long end, 638 unsigned long addr, unsigned long end,
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 33d67844062c..078d3e00a616 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -938,8 +938,7 @@ static void __init mark_reserved_regions_for_nid(int nid)
938 unsigned long start_pfn = physbase >> PAGE_SHIFT; 938 unsigned long start_pfn = physbase >> PAGE_SHIFT;
939 unsigned long end_pfn = PFN_UP(physbase + size); 939 unsigned long end_pfn = PFN_UP(physbase + size);
940 struct node_active_region node_ar; 940 struct node_active_region node_ar;
941 unsigned long node_end_pfn = node->node_start_pfn + 941 unsigned long node_end_pfn = pgdat_end_pfn(node);
942 node->node_spanned_pages;
943 942
944 /* 943 /*
945 * Check to make sure that this memblock.reserved area is 944 * Check to make sure that this memblock.reserved area is
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6c856fb8c15b..5b9601715289 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -121,7 +121,10 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
121 ptepage = alloc_pages(flags, 0); 121 ptepage = alloc_pages(flags, 0);
122 if (!ptepage) 122 if (!ptepage)
123 return NULL; 123 return NULL;
124 pgtable_page_ctor(ptepage); 124 if (!pgtable_page_ctor(ptepage)) {
125 __free_page(ptepage);
126 return NULL;
127 }
125 return ptepage; 128 return ptepage;
126} 129}
127 130
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 536eec72c0f7..9d95786aa80f 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -378,6 +378,10 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
378 __GFP_REPEAT | __GFP_ZERO); 378 __GFP_REPEAT | __GFP_ZERO);
379 if (!page) 379 if (!page)
380 return NULL; 380 return NULL;
381 if (!kernel && !pgtable_page_ctor(page)) {
382 __free_page(page);
383 return NULL;
384 }
381 385
382 ret = page_address(page); 386 ret = page_address(page);
383 spin_lock(&mm->page_table_lock); 387 spin_lock(&mm->page_table_lock);
@@ -392,9 +396,6 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
392 } 396 }
393 spin_unlock(&mm->page_table_lock); 397 spin_unlock(&mm->page_table_lock);
394 398
395 if (!kernel)
396 pgtable_page_ctor(page);
397
398 return (pte_t *)ret; 399 return (pte_t *)ret;
399} 400}
400 401
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 475d4f26fc6e..ac3c2a10dafd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -698,4 +698,5 @@ void bpf_jit_free(struct sk_filter *fp)
698{ 698{
699 if (fp->bpf_func != sk_run_filter) 699 if (fp->bpf_func != sk_run_filter)
700 module_free(NULL, fp->bpf_func); 700 module_free(NULL, fp->bpf_func);
701 kfree(fp);
701} 702}
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index e504166e089a..fd8a37653417 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -24,6 +24,7 @@
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/io.h> 25#include <linux/io.h>
26 26
27#include <linux/of_address.h>
27#include <linux/of_platform.h> 28#include <linux/of_platform.h>
28#include <asm/mpc5xxx.h> 29#include <asm/mpc5xxx.h>
29#include <asm/mpc5121.h> 30#include <asm/mpc5121.h>
diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c
index 24b314d7bd5f..116f2325b20b 100644
--- a/arch/powerpc/platforms/512x/pdm360ng.c
+++ b/arch/powerpc/platforms/512x/pdm360ng.c
@@ -14,6 +14,8 @@
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/of_address.h>
18#include <linux/of_fdt.h>
17#include <linux/of_platform.h> 19#include <linux/of_platform.h>
18 20
19#include <asm/machdep.h> 21#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 30394b409b3f..6a14cf50f4a2 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -16,6 +16,8 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/fsl_devices.h> 18#include <linux/fsl_devices.h>
19#include <linux/of_address.h>
20#include <linux/of_fdt.h>
19#include <linux/of_platform.h> 21#include <linux/of_platform.h>
20#include <linux/io.h> 22#include <linux/io.h>
21 23
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index e1dceeec4994..e5f82ec8df17 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -15,6 +15,8 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/fsl_devices.h> 17#include <linux/fsl_devices.h>
18#include <linux/of_address.h>
19#include <linux/of_fdt.h>
18#include <linux/of_platform.h> 20#include <linux/of_platform.h>
19 21
20#include <asm/io.h> 22#include <asm/io.h>
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 1d769a29249f..3d9716ccd327 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -20,6 +20,8 @@
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/fsl_devices.h> 22#include <linux/fsl_devices.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
23#include <linux/of_platform.h> 25#include <linux/of_platform.h>
24#include <linux/export.h> 26#include <linux/export.h>
25 27
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 6208e49142bf..213d5b815827 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/stddef.h> 12#include <linux/stddef.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/of_fdt.h>
14#include <linux/of_platform.h> 15#include <linux/of_platform.h>
15 16
16#include <asm/machdep.h> 17#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index d0861a0d8360..eba78c85303f 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -5,6 +5,8 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8
9#include <linux/of_irq.h>
8#include <linux/of_platform.h> 10#include <linux/of_platform.h>
9 11
10#include <sysdev/cpm2_pic.h> 12#include <sysdev/cpm2_pic.h>
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
index 6a7704b92c3b..3daff7c63569 100644
--- a/arch/powerpc/platforms/85xx/ppa8548.c
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/reboot.h> 20#include <linux/reboot.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/of_fdt.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
23 24
24#include <asm/machdep.h> 25#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 7179726ba5c5..b9197cea1854 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/of_gpio.h> 18#include <linux/of_gpio.h>
19#include <linux/of_irq.h>
19#include <linux/workqueue.h> 20#include <linux/workqueue.h>
20#include <linux/reboot.h> 21#include <linux/reboot.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 281b7f01df63..393f975ab397 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/of_address.h>
18#include <linux/kexec.h> 19#include <linux/kexec.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
20#include <linux/cpu.h> 21#include <linux/cpu.h>
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 3bbbf7489487..55a9682b9529 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -9,6 +9,8 @@
9 */ 9 */
10 10
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/of_address.h>
13#include <linux/of_irq.h>
12#include <linux/of_platform.h> 14#include <linux/of_platform.h>
13#include <linux/io.h> 15#include <linux/io.h>
14 16
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 9982f57c98b9..d5b98c0f958a 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -10,6 +10,7 @@
10#include <linux/stddef.h> 10#include <linux/stddef.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/of_irq.h>
13#include <linux/of_platform.h> 14#include <linux/of_platform.h>
14 15
15#include <asm/mpic.h> 16#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index 7d9ac6040d63..e62166681d08 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -10,6 +10,8 @@
10 */ 10 */
11 11
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/of_address.h>
14#include <linux/of_fdt.h>
13#include <linux/of_platform.h> 15#include <linux/of_platform.h>
14 16
15#include <asm/machdep.h> 17#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index 866feff83c91..63084640c5c5 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -15,6 +15,8 @@
15 */ 15 */
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/of_address.h>
19#include <linux/of_fdt.h>
18#include <linux/of_platform.h> 20#include <linux/of_platform.h>
19 21
20#include <asm/io.h> 22#include <asm/io.h>
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index 5d98398c2f5e..c1262581b63c 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -25,6 +25,8 @@
25#include <linux/fs_uart_pd.h> 25#include <linux/fs_uart_pd.h>
26#include <linux/fsl_devices.h> 26#include <linux/fsl_devices.h>
27#include <linux/mii.h> 27#include <linux/mii.h>
28#include <linux/of_address.h>
29#include <linux/of_fdt.h>
28#include <linux/of_platform.h> 30#include <linux/of_platform.h>
29 31
30#include <asm/delay.h> 32#include <asm/delay.h>
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index ef0778a0ca8f..251aba8759e4 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -28,6 +28,7 @@
28#include <linux/fs_uart_pd.h> 28#include <linux/fs_uart_pd.h>
29#include <linux/fsl_devices.h> 29#include <linux/fsl_devices.h>
30#include <linux/mii.h> 30#include <linux/mii.h>
31#include <linux/of_fdt.h>
31#include <linux/of_platform.h> 32#include <linux/of_platform.h>
32 33
33#include <asm/delay.h> 34#include <asm/delay.h>
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index 14be2bd358b8..4278acfa2ede 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -486,7 +486,6 @@ static __init int celleb_setup_pciex(struct device_node *node,
486 struct pci_controller *phb) 486 struct pci_controller *phb)
487{ 487{
488 struct resource r; 488 struct resource r;
489 struct of_irq oirq;
490 int virq; 489 int virq;
491 490
492 /* SMMIO registers; used inside this file */ 491 /* SMMIO registers; used inside this file */
@@ -507,12 +506,11 @@ static __init int celleb_setup_pciex(struct device_node *node,
507 phb->ops = &scc_pciex_pci_ops; 506 phb->ops = &scc_pciex_pci_ops;
508 507
509 /* internal interrupt handler */ 508 /* internal interrupt handler */
510 if (of_irq_map_one(node, 1, &oirq)) { 509 virq = irq_of_parse_and_map(node, 1);
510 if (!virq) {
511 pr_err("PCIEXC:Failed to map irq\n"); 511 pr_err("PCIEXC:Failed to map irq\n");
512 goto error; 512 goto error;
513 } 513 }
514 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
515 oirq.size);
516 if (request_irq(virq, pciex_handle_internal_irq, 514 if (request_irq(virq, pciex_handle_internal_irq,
517 0, "pciex", (void *)phb)) { 515 0, "pciex", (void *)phb)) {
518 pr_err("PCIEXC:Failed to request irq\n"); 516 pr_err("PCIEXC:Failed to request irq\n");
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
index 9c339ec646f5..c8eb57193826 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c
@@ -45,7 +45,7 @@ static int __init txx9_serial_init(void)
45 struct device_node *node; 45 struct device_node *node;
46 int i; 46 int i;
47 struct uart_port req; 47 struct uart_port req;
48 struct of_irq irq; 48 struct of_phandle_args irq;
49 struct resource res; 49 struct resource res;
50 50
51 for_each_compatible_node(node, "serial", "toshiba,sio-scc") { 51 for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
@@ -53,7 +53,7 @@ static int __init txx9_serial_init(void)
53 if (!(txx9_serial_bitmap & (1<<i))) 53 if (!(txx9_serial_bitmap & (1<<i)))
54 continue; 54 continue;
55 55
56 if (of_irq_map_one(node, i, &irq)) 56 if (of_irq_parse_one(node, i, &irq))
57 continue; 57 continue;
58 if (of_address_to_resource(node, 58 if (of_address_to_resource(node,
59 txx9_scc_tab[i].index, &res)) 59 txx9_scc_tab[i].index, &res))
@@ -66,8 +66,7 @@ static int __init txx9_serial_init(void)
66#ifdef CONFIG_SERIAL_TXX9_CONSOLE 66#ifdef CONFIG_SERIAL_TXX9_CONSOLE
67 req.membase = ioremap(req.mapbase, 0x24); 67 req.membase = ioremap(req.mapbase, 0x24);
68#endif 68#endif
69 req.irq = irq_create_of_mapping(irq.controller, 69 req.irq = irq_create_of_mapping(&irq);
70 irq.specifier, irq.size);
71 req.flags |= UPF_IOREMAP | UPF_BUGGY_UART 70 req.flags |= UPF_IOREMAP | UPF_BUGGY_UART
72 /*HAVE_CTS_LINE*/; 71 /*HAVE_CTS_LINE*/;
73 req.uartclk = 83300000; 72 req.uartclk = 83300000;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 8e299447127e..1f72f4ab6353 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -235,12 +235,9 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
235 /* First, we check whether we have a real "interrupts" in the device 235 /* First, we check whether we have a real "interrupts" in the device
236 * tree in case the device-tree is ever fixed 236 * tree in case the device-tree is ever fixed
237 */ 237 */
238 struct of_irq oirq; 238 virq = irq_of_parse_and_map(pic->host->of_node, 0);
239 if (of_irq_map_one(pic->host->of_node, 0, &oirq) == 0) { 239 if (virq)
240 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
241 oirq.size);
242 return virq; 240 return virq;
243 }
244 241
245 /* Now do the horrible hacks */ 242 /* Now do the horrible hacks */
246 tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL); 243 tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 2bb6977c0a5a..c3327f3d8cf7 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -177,21 +177,20 @@ out:
177 177
178static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) 178static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
179{ 179{
180 struct of_irq oirq; 180 struct of_phandle_args oirq;
181 int ret; 181 int ret;
182 int i; 182 int i;
183 183
184 for (i=0; i < 3; i++) { 184 for (i=0; i < 3; i++) {
185 ret = of_irq_map_one(np, i, &oirq); 185 ret = of_irq_parse_one(np, i, &oirq);
186 if (ret) { 186 if (ret) {
187 pr_debug("spu_new: failed to get irq %d\n", i); 187 pr_debug("spu_new: failed to get irq %d\n", i);
188 goto err; 188 goto err;
189 } 189 }
190 ret = -EINVAL; 190 ret = -EINVAL;
191 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0], 191 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0],
192 oirq.controller->full_name); 192 oirq.np->full_name);
193 spu->irqs[i] = irq_create_of_mapping(oirq.controller, 193 spu->irqs[i] = irq_create_of_mapping(&oirq);
194 oirq.specifier, oirq.size);
195 if (spu->irqs[i] == NO_IRQ) { 194 if (spu->irqs[i] == NO_IRQ) {
196 pr_debug("spu_new: failed to map it !\n"); 195 pr_debug("spu_new: failed to map it !\n");
197 goto err; 196 goto err;
@@ -200,7 +199,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
200 return 0; 199 return 0;
201 200
202err: 201err:
203 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, 202 pr_debug("failed to map irq %x for spu %s\n", *oirq.args,
204 spu->name); 203 spu->name);
205 for (; i >= 0; i--) { 204 for (; i >= 0; i--) {
206 if (spu->irqs[i] != NO_IRQ) 205 if (spu->irqs[i] != NO_IRQ)
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index db4e638cf408..3844f1397fc3 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/rcupdate.h> 27#include <linux/rcupdate.h>
28#include <linux/binfmts.h>
28 29
29#include <asm/spu.h> 30#include <asm/spu.h>
30 31
@@ -126,7 +127,7 @@ int elf_coredump_extra_notes_size(void)
126 return ret; 127 return ret;
127} 128}
128 129
129int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset) 130int elf_coredump_extra_notes_write(struct coredump_params *cprm)
130{ 131{
131 struct spufs_calls *calls; 132 struct spufs_calls *calls;
132 int ret; 133 int ret;
@@ -135,7 +136,7 @@ int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset)
135 if (!calls) 136 if (!calls)
136 return 0; 137 return 0;
137 138
138 ret = calls->coredump_extra_notes_write(file, foffset); 139 ret = calls->coredump_extra_notes_write(cprm);
139 140
140 spufs_calls_put(calls); 141 spufs_calls_put(calls);
141 142
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index c9500ea7be2f..be6212ddbf06 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -27,6 +27,8 @@
27#include <linux/gfp.h> 27#include <linux/gfp.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/syscalls.h> 29#include <linux/syscalls.h>
30#include <linux/coredump.h>
31#include <linux/binfmts.h>
30 32
31#include <asm/uaccess.h> 33#include <asm/uaccess.h>
32 34
@@ -48,44 +50,6 @@ static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer,
48 return ++ret; /* count trailing NULL */ 50 return ++ret; /* count trailing NULL */
49} 51}
50 52
51/*
52 * These are the only things you should do on a core-file: use only these
53 * functions to write out all the necessary info.
54 */
55static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset)
56{
57 unsigned long limit = rlimit(RLIMIT_CORE);
58 ssize_t written;
59
60 if (*foffset + nr > limit)
61 return -EIO;
62
63 written = file->f_op->write(file, addr, nr, &file->f_pos);
64 *foffset += written;
65
66 if (written != nr)
67 return -EIO;
68
69 return 0;
70}
71
72static int spufs_dump_align(struct file *file, char *buf, loff_t new_off,
73 loff_t *foffset)
74{
75 int rc, size;
76
77 size = min((loff_t)PAGE_SIZE, new_off - *foffset);
78 memset(buf, 0, size);
79
80 rc = 0;
81 while (rc == 0 && new_off > *foffset) {
82 size = min((loff_t)PAGE_SIZE, new_off - *foffset);
83 rc = spufs_dump_write(file, buf, size, foffset);
84 }
85
86 return rc;
87}
88
89static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) 53static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
90{ 54{
91 int i, sz, total = 0; 55 int i, sz, total = 0;
@@ -165,10 +129,10 @@ int spufs_coredump_extra_notes_size(void)
165} 129}
166 130
167static int spufs_arch_write_note(struct spu_context *ctx, int i, 131static int spufs_arch_write_note(struct spu_context *ctx, int i,
168 struct file *file, int dfd, loff_t *foffset) 132 struct coredump_params *cprm, int dfd)
169{ 133{
170 loff_t pos = 0; 134 loff_t pos = 0;
171 int sz, rc, nread, total = 0; 135 int sz, rc, total = 0;
172 const int bufsz = PAGE_SIZE; 136 const int bufsz = PAGE_SIZE;
173 char *name; 137 char *name;
174 char fullname[80], *buf; 138 char fullname[80], *buf;
@@ -186,42 +150,39 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
186 en.n_descsz = sz; 150 en.n_descsz = sz;
187 en.n_type = NT_SPU; 151 en.n_type = NT_SPU;
188 152
189 rc = spufs_dump_write(file, &en, sizeof(en), foffset); 153 if (!dump_emit(cprm, &en, sizeof(en)))
190 if (rc) 154 goto Eio;
191 goto out;
192 155
193 rc = spufs_dump_write(file, fullname, en.n_namesz, foffset); 156 if (!dump_emit(cprm, fullname, en.n_namesz))
194 if (rc) 157 goto Eio;
195 goto out;
196 158
197 rc = spufs_dump_align(file, buf, roundup(*foffset, 4), foffset); 159 if (!dump_align(cprm, 4))
198 if (rc) 160 goto Eio;
199 goto out;
200 161
201 do { 162 do {
202 nread = do_coredump_read(i, ctx, buf, bufsz, &pos); 163 rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
203 if (nread > 0) { 164 if (rc > 0) {
204 rc = spufs_dump_write(file, buf, nread, foffset); 165 if (!dump_emit(cprm, buf, rc))
205 if (rc) 166 goto Eio;
206 goto out; 167 total += rc;
207 total += nread;
208 } 168 }
209 } while (nread == bufsz && total < sz); 169 } while (rc == bufsz && total < sz);
210 170
211 if (nread < 0) { 171 if (rc < 0)
212 rc = nread;
213 goto out; 172 goto out;
214 }
215
216 rc = spufs_dump_align(file, buf, roundup(*foffset - total + sz, 4),
217 foffset);
218 173
174 if (!dump_skip(cprm,
175 roundup(cprm->written - total + sz, 4) - cprm->written))
176 goto Eio;
219out: 177out:
220 free_page((unsigned long)buf); 178 free_page((unsigned long)buf);
221 return rc; 179 return rc;
180Eio:
181 free_page((unsigned long)buf);
182 return -EIO;
222} 183}
223 184
224int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset) 185int spufs_coredump_extra_notes_write(struct coredump_params *cprm)
225{ 186{
226 struct spu_context *ctx; 187 struct spu_context *ctx;
227 int fd, j, rc; 188 int fd, j, rc;
@@ -233,7 +194,7 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
233 return rc; 194 return rc;
234 195
235 for (j = 0; spufs_coredump_read[j].name != NULL; j++) { 196 for (j = 0; spufs_coredump_read[j].name != NULL; j++) {
236 rc = spufs_arch_write_note(ctx, j, file, fd, foffset); 197 rc = spufs_arch_write_note(ctx, j, cprm, fd);
237 if (rc) { 198 if (rc) {
238 spu_release_saved(ctx); 199 spu_release_saved(ctx);
239 return rc; 200 return rc;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 67852ade4c01..0ba3c9598358 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -247,12 +247,13 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[];
247 247
248/* system call implementation */ 248/* system call implementation */
249extern struct spufs_calls spufs_calls; 249extern struct spufs_calls spufs_calls;
250struct coredump_params;
250long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); 251long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
251long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags, 252long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
252 umode_t mode, struct file *filp); 253 umode_t mode, struct file *filp);
253/* ELF coredump callbacks for writing SPU ELF notes */ 254/* ELF coredump callbacks for writing SPU ELF notes */
254extern int spufs_coredump_extra_notes_size(void); 255extern int spufs_coredump_extra_notes_size(void);
255extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset); 256extern int spufs_coredump_extra_notes_write(struct coredump_params *cprm);
256 257
257extern const struct file_operations spufs_context_fops; 258extern const struct file_operations spufs_context_fops;
258 259
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 53d6eee01963..4cde8e7da4b8 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_address.h>
21#include <asm/io.h> 22#include <asm/io.h>
22 23
23#include "flipper-pic.h" 24#include "flipper-pic.h"
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 6f61e21b3617..6c03034dbbd3 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -18,6 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
21#include <asm/io.h> 23#include <asm/io.h>
22 24
23#include "hlwd-pic.h" 25#include "hlwd-pic.h"
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index 92ac9b52b32d..b97f6f3d3c5b 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -321,8 +321,7 @@ static void hpcd_final_uli5288(struct pci_dev *dev)
321{ 321{
322 struct pci_controller *hose = pci_bus_to_host(dev->bus); 322 struct pci_controller *hose = pci_bus_to_host(dev->bus);
323 struct device_node *hosenode = hose ? hose->dn : NULL; 323 struct device_node *hosenode = hose ? hose->dn : NULL;
324 struct of_irq oirq; 324 struct of_phandle_args oirq;
325 int virq, pin = 2;
326 u32 laddr[3]; 325 u32 laddr[3];
327 326
328 if (!machine_is(mpc86xx_hpcd)) 327 if (!machine_is(mpc86xx_hpcd))
@@ -331,12 +330,13 @@ static void hpcd_final_uli5288(struct pci_dev *dev)
331 if (!hosenode) 330 if (!hosenode)
332 return; 331 return;
333 332
333 oirq.np = hosenode;
334 oirq.args[0] = 2;
335 oirq.args_count = 1;
334 laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(31, 0) << 8); 336 laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(31, 0) << 8);
335 laddr[1] = laddr[2] = 0; 337 laddr[1] = laddr[2] = 0;
336 of_irq_map_raw(hosenode, &pin, 1, laddr, &oirq); 338 of_irq_parse_raw(laddr, &oirq);
337 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 339 dev->irq = irq_create_of_mapping(&oirq);
338 oirq.size);
339 dev->irq = virq;
340} 340}
341 341
342DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, hpcd_quirk_uli1575); 342DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, hpcd_quirk_uli1575);
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index 0237ab782fb8..15adee544638 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -30,6 +30,7 @@
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/phy.h> 32#include <linux/phy.h>
33#include <linux/of_address.h>
33#include <linux/of_mdio.h> 34#include <linux/of_mdio.h>
34#include <linux/of_platform.h> 35#include <linux/of_platform.h>
35 36
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index fc536f2971c0..7553b6a77c64 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -452,7 +452,7 @@ static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
452 */ 452 */
453 if (use_irq) { 453 if (use_irq) {
454 /* Clear completion */ 454 /* Clear completion */
455 INIT_COMPLETION(host->complete); 455 reinit_completion(&host->complete);
456 /* Ack stale interrupts */ 456 /* Ack stale interrupts */
457 kw_write_reg(reg_isr, kw_read_reg(reg_isr)); 457 kw_write_reg(reg_isr, kw_read_reg(reg_isr));
458 /* Arm timeout */ 458 /* Arm timeout */
@@ -717,7 +717,7 @@ static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
717 return -EINVAL; 717 return -EINVAL;
718 } 718 }
719 719
720 INIT_COMPLETION(comp); 720 reinit_completion(&comp);
721 req->data[0] = PMU_I2C_CMD; 721 req->data[0] = PMU_I2C_CMD;
722 req->reply[0] = 0xff; 722 req->reply[0] = 0xff;
723 req->nbytes = sizeof(struct pmu_i2c_hdr) + 1; 723 req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
@@ -748,7 +748,7 @@ static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
748 748
749 hdr->bus = PMU_I2C_BUS_STATUS; 749 hdr->bus = PMU_I2C_BUS_STATUS;
750 750
751 INIT_COMPLETION(comp); 751 reinit_completion(&comp);
752 req->data[0] = PMU_I2C_CMD; 752 req->data[0] = PMU_I2C_CMD;
753 req->reply[0] = 0xff; 753 req->reply[0] = 0xff;
754 req->nbytes = 2; 754 req->nbytes = 2;
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index f5e3cda6660e..e49d07f3d542 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -4,6 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/of_irq.h>
7 8
8#include <asm/pmac_feature.h> 9#include <asm/pmac_feature.h>
9#include <asm/pmac_pfunc.h> 10#include <asm/pmac_pfunc.h>
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 31036b56670e..4c24bf60d39d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -393,8 +393,8 @@ static void __init pmac_pic_probe_oldstyle(void)
393#endif 393#endif
394} 394}
395 395
396int of_irq_map_oldworld(struct device_node *device, int index, 396int of_irq_parse_oldworld(struct device_node *device, int index,
397 struct of_irq *out_irq) 397 struct of_phandle_args *out_irq)
398{ 398{
399 const u32 *ints = NULL; 399 const u32 *ints = NULL;
400 int intlen; 400 int intlen;
@@ -422,9 +422,9 @@ int of_irq_map_oldworld(struct device_node *device, int index,
422 if (index >= intlen) 422 if (index >= intlen)
423 return -EINVAL; 423 return -EINVAL;
424 424
425 out_irq->controller = NULL; 425 out_irq->np = NULL;
426 out_irq->specifier[0] = ints[index]; 426 out_irq->args[0] = ints[index];
427 out_irq->size = 1; 427 out_irq->args_count = 1;
428 428
429 return 0; 429 return 0;
430} 430}
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index a7614bb14e17..e7e59e4f9892 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -17,6 +17,7 @@
17#include <asm/firmware.h> 17#include <asm/firmware.h>
18#include <asm/xics.h> 18#include <asm/xics.h>
19#include <asm/opal.h> 19#include <asm/opal.h>
20#include <asm/prom.h>
20 21
21static int opal_lpc_chip_id = -1; 22static int opal_lpc_chip_id = -1;
22 23
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index b56c243aaee9..1c798cd55372 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_fdt.h>
16#include <linux/of_platform.h> 17#include <linux/of_platform.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/notifier.h> 19#include <linux/notifier.h>
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index e239dcfa224c..19884b2a51b4 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -23,6 +23,7 @@
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_fdt.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
27#include <linux/bug.h> 28#include <linux/bug.h>
28 29
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 7cfdaae1721a..a8fe5aa3d34f 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -404,46 +404,38 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
404 unsigned long drc_index; 404 unsigned long drc_index;
405 int rc; 405 int rc;
406 406
407 cpu_hotplug_driver_lock();
408 rc = strict_strtoul(buf, 0, &drc_index); 407 rc = strict_strtoul(buf, 0, &drc_index);
409 if (rc) { 408 if (rc)
410 rc = -EINVAL; 409 return -EINVAL;
411 goto out;
412 }
413 410
414 parent = of_find_node_by_path("/cpus"); 411 parent = of_find_node_by_path("/cpus");
415 if (!parent) { 412 if (!parent)
416 rc = -ENODEV; 413 return -ENODEV;
417 goto out;
418 }
419 414
420 dn = dlpar_configure_connector(drc_index, parent); 415 dn = dlpar_configure_connector(drc_index, parent);
421 if (!dn) { 416 if (!dn)
422 rc = -EINVAL; 417 return -EINVAL;
423 goto out;
424 }
425 418
426 of_node_put(parent); 419 of_node_put(parent);
427 420
428 rc = dlpar_acquire_drc(drc_index); 421 rc = dlpar_acquire_drc(drc_index);
429 if (rc) { 422 if (rc) {
430 dlpar_free_cc_nodes(dn); 423 dlpar_free_cc_nodes(dn);
431 rc = -EINVAL; 424 return -EINVAL;
432 goto out;
433 } 425 }
434 426
435 rc = dlpar_attach_node(dn); 427 rc = dlpar_attach_node(dn);
436 if (rc) { 428 if (rc) {
437 dlpar_release_drc(drc_index); 429 dlpar_release_drc(drc_index);
438 dlpar_free_cc_nodes(dn); 430 dlpar_free_cc_nodes(dn);
439 goto out; 431 return rc;
440 } 432 }
441 433
442 rc = dlpar_online_cpu(dn); 434 rc = dlpar_online_cpu(dn);
443out: 435 if (rc)
444 cpu_hotplug_driver_unlock(); 436 return rc;
445 437
446 return rc ? rc : count; 438 return count;
447} 439}
448 440
449static int dlpar_offline_cpu(struct device_node *dn) 441static int dlpar_offline_cpu(struct device_node *dn)
@@ -516,30 +508,27 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
516 return -EINVAL; 508 return -EINVAL;
517 } 509 }
518 510
519 cpu_hotplug_driver_lock();
520 rc = dlpar_offline_cpu(dn); 511 rc = dlpar_offline_cpu(dn);
521 if (rc) { 512 if (rc) {
522 of_node_put(dn); 513 of_node_put(dn);
523 rc = -EINVAL; 514 return -EINVAL;
524 goto out;
525 } 515 }
526 516
527 rc = dlpar_release_drc(*drc_index); 517 rc = dlpar_release_drc(*drc_index);
528 if (rc) { 518 if (rc) {
529 of_node_put(dn); 519 of_node_put(dn);
530 goto out; 520 return rc;
531 } 521 }
532 522
533 rc = dlpar_detach_node(dn); 523 rc = dlpar_detach_node(dn);
534 if (rc) { 524 if (rc) {
535 dlpar_acquire_drc(*drc_index); 525 dlpar_acquire_drc(*drc_index);
536 goto out; 526 return rc;
537 } 527 }
538 528
539 of_node_put(dn); 529 of_node_put(dn);
540out: 530
541 cpu_hotplug_driver_unlock(); 531 return count;
542 return rc ? rc : count;
543} 532}
544 533
545static int __init pseries_dlpar_init(void) 534static int __init pseries_dlpar_init(void)
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 2605c310166a..18380e8f6dfe 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -25,7 +25,7 @@ void request_event_sources_irqs(struct device_node *np,
25 const char *name) 25 const char *name)
26{ 26{
27 int i, index, count = 0; 27 int i, index, count = 0;
28 struct of_irq oirq; 28 struct of_phandle_args oirq;
29 const u32 *opicprop; 29 const u32 *opicprop;
30 unsigned int opicplen; 30 unsigned int opicplen;
31 unsigned int virqs[16]; 31 unsigned int virqs[16];
@@ -55,13 +55,11 @@ void request_event_sources_irqs(struct device_node *np,
55 /* Else use normal interrupt tree parsing */ 55 /* Else use normal interrupt tree parsing */
56 else { 56 else {
57 /* First try to do a proper OF tree parsing */ 57 /* First try to do a proper OF tree parsing */
58 for (index = 0; of_irq_map_one(np, index, &oirq) == 0; 58 for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
59 index++) { 59 index++) {
60 if (count > 15) 60 if (count > 15)
61 break; 61 break;
62 virqs[count] = irq_create_of_mapping(oirq.controller, 62 virqs[count] = irq_create_of_mapping(&oirq);
63 oirq.specifier,
64 oirq.size);
65 if (virqs[count] == NO_IRQ) { 63 if (virqs[count] == NO_IRQ) {
66 pr_err("event-sources: Unable to allocate " 64 pr_err("event-sources: Unable to allocate "
67 "interrupt number for %s\n", 65 "interrupt number for %s\n",
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 9a432de363b8..9590dbb756f2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -10,12 +10,14 @@
10 */ 10 */
11 11
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/of_address.h>
13#include <linux/memblock.h> 14#include <linux/memblock.h>
14#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
15#include <linux/memory.h> 16#include <linux/memory.h>
16 17
17#include <asm/firmware.h> 18#include <asm/firmware.h>
18#include <asm/machdep.h> 19#include <asm/machdep.h>
20#include <asm/prom.h>
19#include <asm/sparsemem.h> 21#include <asm/sparsemem.h>
20 22
21static unsigned long get_memblock_size(void) 23static unsigned long get_memblock_size(void)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 057fc894be51..7bfaf58d4664 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -31,7 +31,7 @@
31#define NVRW_CNT 0x20 31#define NVRW_CNT 0x20
32 32
33/* 33/*
34 * Set oops header version to distingush between old and new format header. 34 * Set oops header version to distinguish between old and new format header.
35 * lnx,oops-log partition max size is 4000, header version > 4000 will 35 * lnx,oops-log partition max size is 4000, header version > 4000 will
36 * help in identifying new header. 36 * help in identifying new header.
37 */ 37 */
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 5f997e79d570..16a255255d30 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -106,7 +106,7 @@ static int pseries_prepare_late(void)
106 atomic_set(&suspend_data.done, 0); 106 atomic_set(&suspend_data.done, 0);
107 atomic_set(&suspend_data.error, 0); 107 atomic_set(&suspend_data.error, 0);
108 suspend_data.complete = &suspend_work; 108 suspend_data.complete = &suspend_work;
109 INIT_COMPLETION(suspend_work); 109 reinit_completion(&suspend_work);
110 return 0; 110 return 0;
111} 111}
112 112
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 4dd534194ae8..4f7869571290 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -22,6 +22,7 @@
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_address.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26 27
27#include <asm/udbg.h> 28#include <asm/udbg.h>
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 9cd0e60716fe..b74085cea1af 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -19,6 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h>
22 23
23#include <asm/io.h> 24#include <asm/io.h>
24#include <asm/irq.h> 25#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
index 0eb871cc3437..06ac3c61b3d0 100644
--- a/arch/powerpc/sysdev/fsl_gtm.c
+++ b/arch/powerpc/sysdev/fsl_gtm.c
@@ -19,6 +19,8 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
22#include <linux/spinlock.h> 24#include <linux/spinlock.h>
23#include <linux/bitops.h> 25#include <linux/bitops.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
@@ -401,16 +403,15 @@ static int __init fsl_gtm_init(void)
401 gtm->clock = *clock; 403 gtm->clock = *clock;
402 404
403 for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) { 405 for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) {
404 int ret; 406 unsigned int irq;
405 struct resource irq;
406 407
407 ret = of_irq_to_resource(np, i, &irq); 408 irq = irq_of_parse_and_map(np, i);
408 if (ret == NO_IRQ) { 409 if (irq == NO_IRQ) {
409 pr_err("%s: not enough interrupts specified\n", 410 pr_err("%s: not enough interrupts specified\n",
410 np->full_name); 411 np->full_name);
411 goto err; 412 goto err;
412 } 413 }
413 gtm->timers[i].irq = irq.start; 414 gtm->timers[i].irq = irq;
414 gtm->timers[i].gtm = gtm; 415 gtm->timers[i].gtm = gtm;
415 } 416 }
416 417
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 21039634d1d0..4dfd61df8aba 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -45,7 +45,7 @@ static void quirk_fsl_pcie_early(struct pci_dev *dev)
45 u8 hdr_type; 45 u8 hdr_type;
46 46
47 /* if we aren't a PCIe don't bother */ 47 /* if we aren't a PCIe don't bother */
48 if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) 48 if (!pci_is_pcie(dev))
49 return; 49 return;
50 50
51 /* if we aren't in host mode don't bother */ 51 /* if we aren't in host mode don't bother */
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 592a0f8d527a..8cf4aa0e3a25 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -18,6 +18,7 @@
18#include <linux/suspend.h> 18#include <linux/suspend.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/of_address.h>
21#include <linux/of_platform.h> 22#include <linux/of_platform.h>
22 23
23struct pmc_regs { 24struct pmc_regs {
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index e2fb3171f41b..95dd892e9904 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -28,6 +28,8 @@
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/of_address.h>
32#include <linux/of_irq.h>
31#include <linux/of_platform.h> 33#include <linux/of_platform.h>
32#include <linux/delay.h> 34#include <linux/delay.h>
33#include <linux/slab.h> 35#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 14bd5221f28a..00e224a1048c 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/of_irq.h>
30#include <linux/of_platform.h> 31#include <linux/of_platform.h>
31#include <linux/slab.h> 32#include <linux/slab.h>
32 33
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index bdcb8588e492..0e166ed4cd16 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -535,7 +535,7 @@ static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
535 mpic->fixups[irq].data = readl(base + 4) | 0x80000000; 535 mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
536 } 536 }
537} 537}
538 538
539 539
540static void __init mpic_scan_ht_pics(struct mpic *mpic) 540static void __init mpic_scan_ht_pics(struct mpic *mpic)
541{ 541{
@@ -1481,7 +1481,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1481 * as a default instead of the value read from the HW. 1481 * as a default instead of the value read from the HW.
1482 */ 1482 */
1483 last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) 1483 last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
1484 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT; 1484 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;
1485 if (isu_size) 1485 if (isu_size)
1486 last_irq = isu_size * MPIC_MAX_ISU - 1; 1486 last_irq = isu_size * MPIC_MAX_ISU - 1;
1487 of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq); 1487 of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
@@ -1631,7 +1631,7 @@ void __init mpic_init(struct mpic *mpic)
1631 /* start with vector = source number, and masked */ 1631 /* start with vector = source number, and masked */
1632 u32 vecpri = MPIC_VECPRI_MASK | i | 1632 u32 vecpri = MPIC_VECPRI_MASK | i |
1633 (8 << MPIC_VECPRI_PRIORITY_SHIFT); 1633 (8 << MPIC_VECPRI_PRIORITY_SHIFT);
1634 1634
1635 /* check if protected */ 1635 /* check if protected */
1636 if (mpic->protected && test_bit(i, mpic->protected)) 1636 if (mpic->protected && test_bit(i, mpic->protected))
1637 continue; 1637 continue;
@@ -1640,7 +1640,7 @@ void __init mpic_init(struct mpic *mpic)
1640 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); 1640 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
1641 } 1641 }
1642 } 1642 }
1643 1643
1644 /* Init spurious vector */ 1644 /* Init spurious vector */
1645 mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec); 1645 mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec);
1646 1646
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index c75325865a85..2c9b52aa266c 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -237,15 +237,13 @@ static int mpic_msgr_probe(struct platform_device *dev)
237 raw_spin_lock_init(&msgr->lock); 237 raw_spin_lock_init(&msgr->lock);
238 238
239 if (receive_mask & (1 << i)) { 239 if (receive_mask & (1 << i)) {
240 struct resource irq; 240 msgr->irq = irq_of_parse_and_map(np, irq_index);
241 241 if (msgr->irq == NO_IRQ) {
242 if (of_irq_to_resource(np, irq_index, &irq) == NO_IRQ) {
243 dev_err(&dev->dev, 242 dev_err(&dev->dev,
244 "Missing interrupt specifier"); 243 "Missing interrupt specifier");
245 kfree(msgr); 244 kfree(msgr);
246 return -EFAULT; 245 return -EFAULT;
247 } 246 }
248 msgr->irq = irq.start;
249 irq_index += 1; 247 irq_index += 1;
250 } else { 248 } else {
251 msgr->irq = NO_IRQ; 249 msgr->irq = NO_IRQ;
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index bbf342c88314..7dc39f35a4cc 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -35,7 +35,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
35 const struct irq_domain_ops *ops = mpic->irqhost->ops; 35 const struct irq_domain_ops *ops = mpic->irqhost->ops;
36 struct device_node *np; 36 struct device_node *np;
37 int flags, index, i; 37 int flags, index, i;
38 struct of_irq oirq; 38 struct of_phandle_args oirq;
39 39
40 pr_debug("mpic: found U3, guessing msi allocator setup\n"); 40 pr_debug("mpic: found U3, guessing msi allocator setup\n");
41 41
@@ -63,9 +63,9 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
63 pr_debug("mpic: mapping hwirqs for %s\n", np->full_name); 63 pr_debug("mpic: mapping hwirqs for %s\n", np->full_name);
64 64
65 index = 0; 65 index = 0;
66 while (of_irq_map_one(np, index++, &oirq) == 0) { 66 while (of_irq_parse_one(np, index++, &oirq) == 0) {
67 ops->xlate(mpic->irqhost, NULL, oirq.specifier, 67 ops->xlate(mpic->irqhost, NULL, oirq.args,
68 oirq.size, &hwirq, &flags); 68 oirq.args_count, &hwirq, &flags);
69 msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq); 69 msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq);
70 } 70 }
71 } 71 }
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index c06db92a4fb1..22d7d57eead9 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -19,7 +19,9 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h>
22#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/of_irq.h>
23#include <linux/syscore_ops.h> 25#include <linux/syscore_ops.h>
24#include <sysdev/fsl_soc.h> 26#include <sysdev/fsl_soc.h>
25#include <asm/io.h> 27#include <asm/io.h>
diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c
index c9e803f3e267..6f54b54b1328 100644
--- a/arch/powerpc/sysdev/of_rtc.c
+++ b/arch/powerpc/sysdev/of_rtc.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/of_address.h>
14#include <linux/of_platform.h> 15#include <linux/of_platform.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16 17
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c
index 1b15f93479c3..b7c43453236d 100644
--- a/arch/powerpc/sysdev/ppc4xx_ocm.c
+++ b/arch/powerpc/sysdev/ppc4xx_ocm.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_address.h>
29#include <asm/rheap.h> 30#include <asm/rheap.h>
30#include <asm/ppc4xx_ocm.h> 31#include <asm/ppc4xx_ocm.h>
31#include <linux/slab.h> 32#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index 0debcc31ad70..5c77c9ba33aa 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -19,6 +19,7 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/of_irq.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
23 24
24#include <asm/dcr.h> 25#include <asm/dcr.h>
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 8d73c3c0bee6..83f943a8e0db 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -23,6 +23,8 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
26#include <asm/io.h> 28#include <asm/io.h>
27#include <asm/processor.h> 29#include <asm/processor.h>
28#include <asm/i8259.h> 30#include <asm/i8259.h>