aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/Makefile5
-rw-r--r--arch/powerpc/boot/4xx.c12
-rw-r--r--arch/powerpc/boot/dts/icon.dts447
-rw-r--r--arch/powerpc/boot/dts/katmai.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts15
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts7
-rw-r--r--arch/powerpc/boot/dts/p1021mds.dts698
-rw-r--r--arch/powerpc/boot/dts/redwood.dts122
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig1451
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/bug.h6
-rw-r--r--arch/powerpc/include/asm/cache.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/device.h16
-rw-r--r--arch/powerpc/include/asm/kexec.h13
-rw-r--r--arch/powerpc/include/asm/kvm.h10
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h157
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h42
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h28
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h (renamed from arch/powerpc/include/asm/kvm_book3s_64_asm.h)25
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h96
-rw-r--r--arch/powerpc/include/asm/kvm_fpu.h85
-rw-r--r--arch/powerpc/include/asm/kvm_host.h38
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h97
-rw-r--r--arch/powerpc/include/asm/macio.h6
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h1
-rw-r--r--arch/powerpc/include/asm/of_device.h3
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/page_64.h8
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h10
-rw-r--r--arch/powerpc/include/asm/reg_booke.h33
-rw-r--r--arch/powerpc/include/asm/scatterlist.h28
-rw-r--r--arch/powerpc/kernel/Makefile8
-rw-r--r--arch/powerpc/kernel/asm-offsets.c102
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/crash.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--arch/powerpc/kernel/dma.c12
-rw-r--r--arch/powerpc/kernel/fsl_booke_entry_mapping.S237
-rw-r--r--arch/powerpc/kernel/head_32.S14
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S200
-rw-r--r--arch/powerpc/kernel/ibmebus.c6
-rw-r--r--arch/powerpc/kernel/kprobes.c14
-rw-r--r--arch/powerpc/kernel/misc_32.S17
-rw-r--r--arch/powerpc/kernel/of_device.c13
-rw-r--r--arch/powerpc/kernel/of_platform.c21
-rw-r--r--arch/powerpc/kernel/pci-common.c5
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/swsusp_booke.S193
-rw-r--r--arch/powerpc/kernel/traps.c88
-rw-r--r--arch/powerpc/kernel/vio.c20
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S10
-rw-r--r--arch/powerpc/kvm/44x.c2
-rw-r--r--arch/powerpc/kvm/Kconfig24
-rw-r--r--arch/powerpc/kvm/Makefile20
-rw-r--r--arch/powerpc/kvm/book3s.c503
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c483
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S143
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c36
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c102
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S183
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c (renamed from arch/powerpc/kvm/book3s_64_emulate.c)245
-rw-r--r--arch/powerpc/kvm/book3s_exports.c (renamed from arch/powerpc/kvm/book3s_64_exports.c)0
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S (renamed from arch/powerpc/kvm/book3s_64_interrupts.S)204
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c1289
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S (renamed from arch/powerpc/kvm/book3s_64_rmhandlers.S)135
-rw-r--r--arch/powerpc/kvm/book3s_segment.S259
-rw-r--r--arch/powerpc/kvm/booke.c21
-rw-r--r--arch/powerpc/kvm/e500.c4
-rw-r--r--arch/powerpc/kvm/emulate.c55
-rw-r--r--arch/powerpc/kvm/fpu.S273
-rw-r--r--arch/powerpc/kvm/powerpc.c110
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c29
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c2
-rw-r--r--arch/powerpc/platforms/44x/Kconfig11
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c3
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c78
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpio.c18
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c23
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c14
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c9
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c9
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c102
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c9
-rw-r--r--arch/powerpc/platforms/cell/iommu.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c9
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c4
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c10
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/sysdev/axonram.c14
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c18
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c138
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h3
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c9
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c413
-rw-r--r--arch/powerpc/sysdev/pmi.c9
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c119
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.h58
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c7
109 files changed, 8538 insertions, 1210 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c4c4549c22bb..328774bd41ee 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -351,7 +351,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
351 351
352config KEXEC 352config KEXEC
353 bool "kexec system call (EXPERIMENTAL)" 353 bool "kexec system call (EXPERIMENTAL)"
354 depends on PPC_BOOK3S && EXPERIMENTAL 354 depends on (PPC_BOOK3S || (FSL_BOOKE && !SMP)) && EXPERIMENTAL
355 help 355 help
356 kexec is a system call that implements the ability to shutdown your 356 kexec is a system call that implements the ability to shutdown your
357 current kernel, and to start another kernel. It is like a reboot 357 current kernel, and to start another kernel. It is like a reboot
@@ -663,6 +663,9 @@ config ZONE_DMA
663config NEED_DMA_MAP_STATE 663config NEED_DMA_MAP_STATE
664 def_bool (PPC64 || NOT_COHERENT_CACHE) 664 def_bool (PPC64 || NOT_COHERENT_CACHE)
665 665
666config NEED_SG_DMA_LENGTH
667 def_bool y
668
666config GENERIC_ISA_DMA 669config GENERIC_ISA_DMA
667 bool 670 bool
668 depends on PPC64 || POWER4 || 6xx && !CPM2 671 depends on PPC64 || POWER4 || 6xx && !CPM2
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 1a54a3b3a3fa..42dcd3f4ad7b 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -112,6 +112,11 @@ KBUILD_CFLAGS += $(call cc-option,-mspe=no)
112# kernel considerably. 112# kernel considerably.
113KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) 113KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
114 114
115# FIXME: the module load should be taught about the additional relocs
116# generated by this.
117# revert to pre-gcc-4.4 behaviour of .eh_frame
118KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
119
115# Never use string load/store instructions as they are 120# Never use string load/store instructions as they are
116# often slow when they are implemented at all 121# often slow when they are implemented at all
117KBUILD_CFLAGS += -mno-string 122KBUILD_CFLAGS += -mno-string
diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c
index 27db8938827a..9d3bd4c45a24 100644
--- a/arch/powerpc/boot/4xx.c
+++ b/arch/powerpc/boot/4xx.c
@@ -519,7 +519,7 @@ void ibm440ep_fixup_clocks(unsigned int sys_clk,
519{ 519{
520 unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 0); 520 unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 0);
521 521
522 /* serial clocks beed fixup based on int/ext */ 522 /* serial clocks need fixup based on int/ext */
523 eplike_fixup_uart_clk(0, "/plb/opb/serial@ef600300", ser_clk, plb_clk); 523 eplike_fixup_uart_clk(0, "/plb/opb/serial@ef600300", ser_clk, plb_clk);
524 eplike_fixup_uart_clk(1, "/plb/opb/serial@ef600400", ser_clk, plb_clk); 524 eplike_fixup_uart_clk(1, "/plb/opb/serial@ef600400", ser_clk, plb_clk);
525 eplike_fixup_uart_clk(2, "/plb/opb/serial@ef600500", ser_clk, plb_clk); 525 eplike_fixup_uart_clk(2, "/plb/opb/serial@ef600500", ser_clk, plb_clk);
@@ -532,7 +532,7 @@ void ibm440gx_fixup_clocks(unsigned int sys_clk,
532{ 532{
533 unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1); 533 unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1);
534 534
535 /* serial clocks beed fixup based on int/ext */ 535 /* serial clocks need fixup based on int/ext */
536 eplike_fixup_uart_clk(0, "/plb/opb/serial@40000200", ser_clk, plb_clk); 536 eplike_fixup_uart_clk(0, "/plb/opb/serial@40000200", ser_clk, plb_clk);
537 eplike_fixup_uart_clk(1, "/plb/opb/serial@40000300", ser_clk, plb_clk); 537 eplike_fixup_uart_clk(1, "/plb/opb/serial@40000300", ser_clk, plb_clk);
538} 538}
@@ -543,10 +543,10 @@ void ibm440spe_fixup_clocks(unsigned int sys_clk,
543{ 543{
544 unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1); 544 unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1);
545 545
546 /* serial clocks beed fixup based on int/ext */ 546 /* serial clocks need fixup based on int/ext */
547 eplike_fixup_uart_clk(0, "/plb/opb/serial@10000200", ser_clk, plb_clk); 547 eplike_fixup_uart_clk(0, "/plb/opb/serial@f0000200", ser_clk, plb_clk);
548 eplike_fixup_uart_clk(1, "/plb/opb/serial@10000300", ser_clk, plb_clk); 548 eplike_fixup_uart_clk(1, "/plb/opb/serial@f0000300", ser_clk, plb_clk);
549 eplike_fixup_uart_clk(2, "/plb/opb/serial@10000600", ser_clk, plb_clk); 549 eplike_fixup_uart_clk(2, "/plb/opb/serial@f0000600", ser_clk, plb_clk);
550} 550}
551 551
552void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk) 552void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk)
diff --git a/arch/powerpc/boot/dts/icon.dts b/arch/powerpc/boot/dts/icon.dts
new file mode 100644
index 000000000000..abcd0caeccae
--- /dev/null
+++ b/arch/powerpc/boot/dts/icon.dts
@@ -0,0 +1,447 @@
1/*
2 * Device Tree Source for Mosaix Technologies, Inc. ICON board
3 *
4 * Copyright 2010 DENX Software Engineering, Stefan Roese <sr@denx.de>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without
8 * any warranty of any kind, whether express or implied.
9 */
10
11/dts-v1/;
12
13/ {
14 #address-cells = <2>;
15 #size-cells = <2>;
16 model = "mosaixtech,icon";
17 compatible = "mosaixtech,icon";
18 dcr-parent = <&{/cpus/cpu@0}>;
19
20 aliases {
21 ethernet0 = &EMAC0;
22 serial0 = &UART0;
23 serial1 = &UART1;
24 serial2 = &UART2;
25 };
26
27 cpus {
28 #address-cells = <1>;
29 #size-cells = <0>;
30
31 cpu@0 {
32 device_type = "cpu";
33 model = "PowerPC,440SPe";
34 reg = <0x00000000>;
35 clock-frequency = <0>; /* Filled in by U-Boot */
36 timebase-frequency = <0>; /* Filled in by U-Boot */
37 i-cache-line-size = <32>;
38 d-cache-line-size = <32>;
39 i-cache-size = <32768>;
40 d-cache-size = <32768>;
41 dcr-controller;
42 dcr-access-method = "native";
43 reset-type = <2>; /* Use chip-reset */
44 };
45 };
46
47 memory {
48 device_type = "memory";
49 reg = <0x0 0x00000000 0x0 0x00000000>; /* Filled in by U-Boot */
50 };
51
52 UIC0: interrupt-controller0 {
53 compatible = "ibm,uic-440spe","ibm,uic";
54 interrupt-controller;
55 cell-index = <0>;
56 dcr-reg = <0x0c0 0x009>;
57 #address-cells = <0>;
58 #size-cells = <0>;
59 #interrupt-cells = <2>;
60 };
61
62 UIC1: interrupt-controller1 {
63 compatible = "ibm,uic-440spe","ibm,uic";
64 interrupt-controller;
65 cell-index = <1>;
66 dcr-reg = <0x0d0 0x009>;
67 #address-cells = <0>;
68 #size-cells = <0>;
69 #interrupt-cells = <2>;
70 interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
71 interrupt-parent = <&UIC0>;
72 };
73
74 UIC2: interrupt-controller2 {
75 compatible = "ibm,uic-440spe","ibm,uic";
76 interrupt-controller;
77 cell-index = <2>;
78 dcr-reg = <0x0e0 0x009>;
79 #address-cells = <0>;
80 #size-cells = <0>;
81 #interrupt-cells = <2>;
82 interrupts = <0xa 0x4 0xb 0x4>; /* cascade */
83 interrupt-parent = <&UIC0>;
84 };
85
86 UIC3: interrupt-controller3 {
87 compatible = "ibm,uic-440spe","ibm,uic";
88 interrupt-controller;
89 cell-index = <3>;
90 dcr-reg = <0x0f0 0x009>;
91 #address-cells = <0>;
92 #size-cells = <0>;
93 #interrupt-cells = <2>;
94 interrupts = <0x10 0x4 0x11 0x4>; /* cascade */
95 interrupt-parent = <&UIC0>;
96 };
97
98 SDR0: sdr {
99 compatible = "ibm,sdr-440spe";
100 dcr-reg = <0x00e 0x002>;
101 };
102
103 CPR0: cpr {
104 compatible = "ibm,cpr-440spe";
105 dcr-reg = <0x00c 0x002>;
106 };
107
108 MQ0: mq {
109 compatible = "ibm,mq-440spe";
110 dcr-reg = <0x040 0x020>;
111 };
112
113 plb {
114 compatible = "ibm,plb-440spe", "ibm,plb-440gp", "ibm,plb4";
115 #address-cells = <2>;
116 #size-cells = <1>;
117 /* addr-child addr-parent size */
118 ranges = <0x4 0x00100000 0x4 0x00100000 0x00001000
119 0x4 0x00200000 0x4 0x00200000 0x00000400
120 0x4 0xe0000000 0x4 0xe0000000 0x20000000
121 0xc 0x00000000 0xc 0x00000000 0x20000000
122 0xd 0x00000000 0xd 0x00000000 0x80000000
123 0xd 0x80000000 0xd 0x80000000 0x80000000
124 0xe 0x00000000 0xe 0x00000000 0x80000000
125 0xe 0x80000000 0xe 0x80000000 0x80000000
126 0xf 0x00000000 0xf 0x00000000 0x80000000
127 0xf 0x80000000 0xf 0x80000000 0x80000000>;
128 clock-frequency = <0>; /* Filled in by U-Boot */
129
130 SDRAM0: sdram {
131 compatible = "ibm,sdram-440spe", "ibm,sdram-405gp";
132 dcr-reg = <0x010 0x002>;
133 };
134
135 MAL0: mcmal {
136 compatible = "ibm,mcmal-440spe", "ibm,mcmal2";
137 dcr-reg = <0x180 0x062>;
138 num-tx-chans = <2>;
139 num-rx-chans = <1>;
140 interrupt-parent = <&MAL0>;
141 interrupts = <0x0 0x1 0x2 0x3 0x4>;
142 #interrupt-cells = <1>;
143 #address-cells = <0>;
144 #size-cells = <0>;
145 interrupt-map = </*TXEOB*/ 0x0 &UIC1 0x6 0x4
146 /*RXEOB*/ 0x1 &UIC1 0x7 0x4
147 /*SERR*/ 0x2 &UIC1 0x1 0x4
148 /*TXDE*/ 0x3 &UIC1 0x2 0x4
149 /*RXDE*/ 0x4 &UIC1 0x3 0x4>;
150 };
151
152 POB0: opb {
153 compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb";
154 #address-cells = <1>;
155 #size-cells = <1>;
156 ranges = <0xe0000000 0x00000004 0xe0000000 0x20000000>;
157 clock-frequency = <0>; /* Filled in by U-Boot */
158
159 EBC0: ebc {
160 compatible = "ibm,ebc-440spe", "ibm,ebc-440gp", "ibm,ebc";
161 dcr-reg = <0x012 0x002>;
162 #address-cells = <2>;
163 #size-cells = <1>;
164 clock-frequency = <0>; /* Filled in by U-Boot */
165 /* ranges property is supplied by U-Boot */
166 interrupts = <0x5 0x1>;
167 interrupt-parent = <&UIC1>;
168
169 nor_flash@0,0 {
170 compatible = "cfi-flash";
171 bank-width = <2>;
172 reg = <0x00000000 0x00000000 0x01000000>;
173 #address-cells = <1>;
174 #size-cells = <1>;
175 partition@0 {
176 label = "kernel";
177 reg = <0x00000000 0x001e0000>;
178 };
179 partition@1e0000 {
180 label = "dtb";
181 reg = <0x001e0000 0x00020000>;
182 };
183 partition@200000 {
184 label = "root";
185 reg = <0x00200000 0x00200000>;
186 };
187 partition@400000 {
188 label = "user";
189 reg = <0x00400000 0x00b60000>;
190 };
191 partition@f60000 {
192 label = "env";
193 reg = <0x00f60000 0x00040000>;
194 };
195 partition@fa0000 {
196 label = "u-boot";
197 reg = <0x00fa0000 0x00060000>;
198 };
199 };
200
201 SysACE_CompactFlash: sysace@1,0 {
202 compatible = "xlnx,sysace";
203 interrupt-parent = <&UIC2>;
204 interrupts = <24 0x4>;
205 reg = <0x00000001 0x00000000 0x10000>;
206 };
207 };
208
209 UART0: serial@f0000200 {
210 device_type = "serial";
211 compatible = "ns16550";
212 reg = <0xf0000200 0x00000008>;
213 virtual-reg = <0xa0000200>;
214 clock-frequency = <0>; /* Filled in by U-Boot */
215 current-speed = <115200>;
216 interrupt-parent = <&UIC0>;
217 interrupts = <0x0 0x4>;
218 };
219
220 UART1: serial@f0000300 {
221 device_type = "serial";
222 compatible = "ns16550";
223 reg = <0xf0000300 0x00000008>;
224 virtual-reg = <0xa0000300>;
225 clock-frequency = <0>;
226 current-speed = <0>;
227 interrupt-parent = <&UIC0>;
228 interrupts = <0x1 0x4>;
229 };
230
231
232 UART2: serial@f0000600 {
233 device_type = "serial";
234 compatible = "ns16550";
235 reg = <0xf0000600 0x00000008>;
236 virtual-reg = <0xa0000600>;
237 clock-frequency = <0>;
238 current-speed = <0>;
239 interrupt-parent = <&UIC1>;
240 interrupts = <0x5 0x4>;
241 };
242
243 IIC0: i2c@f0000400 {
244 compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic";
245 reg = <0xf0000400 0x00000014>;
246 interrupt-parent = <&UIC0>;
247 interrupts = <0x2 0x4>;
248 };
249
250 IIC1: i2c@f0000500 {
251 compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic";
252 reg = <0xf0000500 0x00000014>;
253 interrupt-parent = <&UIC0>;
254 interrupts = <0x3 0x4>;
255 #address-cells = <1>;
256 #size-cells = <0>;
257
258 rtc@68 {
259 compatible = "stm,m41t00";
260 reg = <0x68>;
261 };
262 };
263
264 EMAC0: ethernet@f0000800 {
265 linux,network-index = <0x0>;
266 device_type = "network";
267 compatible = "ibm,emac-440spe", "ibm,emac4";
268 interrupt-parent = <&UIC1>;
269 interrupts = <0x1c 0x4 0x1d 0x4>;
270 reg = <0xf0000800 0x00000074>;
271 local-mac-address = [000000000000];
272 mal-device = <&MAL0>;
273 mal-tx-channel = <0>;
274 mal-rx-channel = <0>;
275 cell-index = <0>;
276 max-frame-size = <9000>;
277 rx-fifo-size = <4096>;
278 tx-fifo-size = <2048>;
279 phy-mode = "gmii";
280 phy-map = <0x00000000>;
281 has-inverted-stacr-oc;
282 has-new-stacr-staopc;
283 };
284 };
285
286 PCIX0: pci@c0ec00000 {
287 device_type = "pci";
288 #interrupt-cells = <1>;
289 #size-cells = <2>;
290 #address-cells = <3>;
291 compatible = "ibm,plb-pcix-440spe", "ibm,plb-pcix";
292 primary;
293 large-inbound-windows;
294 enable-msi-hole;
295 reg = <0x0000000c 0x0ec00000 0x00000008 /* Config space access */
296 0x00000000 0x00000000 0x00000000 /* no IACK cycles */
297 0x0000000c 0x0ed00000 0x00000004 /* Special cycles */
298 0x0000000c 0x0ec80000 0x00000100 /* Internal registers */
299 0x0000000c 0x0ec80100 0x000000fc>; /* Internal messaging registers */
300
301 /* Outbound ranges, one memory and one IO,
302 * later cannot be changed
303 */
304 ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000
305 0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>;
306
307 /* Inbound 4GB range starting at 0 */
308 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
309
310 /* This drives busses 0 to 0xf */
311 bus-range = <0x0 0xf>;
312
313 /* PCI-X interrupt (SM502) is routed to extIRQ10 (UIC1, 19) */
314 interrupt-map-mask = <0x0 0x0 0x0 0x0>;
315 interrupt-map = <0x0 0x0 0x0 0x0 &UIC1 19 0x8>;
316 };
317
318 PCIE0: pciex@d00000000 {
319 device_type = "pci";
320 #interrupt-cells = <1>;
321 #size-cells = <2>;
322 #address-cells = <3>;
323 compatible = "ibm,plb-pciex-440spe", "ibm,plb-pciex";
324 primary;
325 port = <0x0>; /* port number */
326 reg = <0x0000000d 0x00000000 0x20000000 /* Config space access */
327 0x0000000c 0x10000000 0x00001000>; /* Registers */
328 dcr-reg = <0x100 0x020>;
329 sdr-base = <0x300>;
330
331 /* Outbound ranges, one memory and one IO,
332 * later cannot be changed
333 */
334 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
335 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
336
337 /* Inbound 4GB range starting at 0 */
338 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
339
340 /* This drives busses 0x10 to 0x1f */
341 bus-range = <0x10 0x1f>;
342
343 /* Legacy interrupts (note the weird polarity, the bridge seems
344 * to invert PCIe legacy interrupts).
345 * We are de-swizzling here because the numbers are actually for
346 * port of the root complex virtual P2P bridge. But I want
347 * to avoid putting a node for it in the tree, so the numbers
348 * below are basically de-swizzled numbers.
349 * The real slot is on idsel 0, so the swizzling is 1:1
350 */
351 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
352 interrupt-map = <
353 0x0 0x0 0x0 0x1 &UIC3 0x0 0x4 /* swizzled int A */
354 0x0 0x0 0x0 0x2 &UIC3 0x1 0x4 /* swizzled int B */
355 0x0 0x0 0x0 0x3 &UIC3 0x2 0x4 /* swizzled int C */
356 0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>;
357 };
358
359 PCIE1: pciex@d20000000 {
360 device_type = "pci";
361 #interrupt-cells = <1>;
362 #size-cells = <2>;
363 #address-cells = <3>;
364 compatible = "ibm,plb-pciex-440spe", "ibm,plb-pciex";
365 primary;
366 port = <0x1>; /* port number */
367 reg = <0x0000000d 0x20000000 0x20000000 /* Config space access */
368 0x0000000c 0x10001000 0x00001000>; /* Registers */
369 dcr-reg = <0x120 0x020>;
370 sdr-base = <0x340>;
371
372 /* Outbound ranges, one memory and one IO,
373 * later cannot be changed
374 */
375 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
376 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
377
378 /* Inbound 4GB range starting at 0 */
379 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
380
381 /* This drives busses 0x20 to 0x2f */
382 bus-range = <0x20 0x2f>;
383
384 /* Legacy interrupts (note the weird polarity, the bridge seems
385 * to invert PCIe legacy interrupts).
386 * We are de-swizzling here because the numbers are actually for
387 * port of the root complex virtual P2P bridge. But I want
388 * to avoid putting a node for it in the tree, so the numbers
389 * below are basically de-swizzled numbers.
390 * The real slot is on idsel 0, so the swizzling is 1:1
391 */
392 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
393 interrupt-map = <
394 0x0 0x0 0x0 0x1 &UIC3 0x4 0x4 /* swizzled int A */
395 0x0 0x0 0x0 0x2 &UIC3 0x5 0x4 /* swizzled int B */
396 0x0 0x0 0x0 0x3 &UIC3 0x6 0x4 /* swizzled int C */
397 0x0 0x0 0x0 0x4 &UIC3 0x7 0x4 /* swizzled int D */>;
398 };
399
400 I2O: i2o@400100000 {
401 compatible = "ibm,i2o-440spe";
402 reg = <0x00000004 0x00100000 0x100>;
403 dcr-reg = <0x060 0x020>;
404 };
405
406 DMA0: dma0@400100100 {
407 compatible = "ibm,dma-440spe";
408 cell-index = <0>;
409 reg = <0x00000004 0x00100100 0x100>;
410 dcr-reg = <0x060 0x020>;
411 interrupt-parent = <&DMA0>;
412 interrupts = <0 1>;
413 #interrupt-cells = <1>;
414 #address-cells = <0>;
415 #size-cells = <0>;
416 interrupt-map = <
417 0 &UIC0 0x14 4
418 1 &UIC1 0x16 4>;
419 };
420
421 DMA1: dma1@400100200 {
422 compatible = "ibm,dma-440spe";
423 cell-index = <1>;
424 reg = <0x00000004 0x00100200 0x100>;
425 dcr-reg = <0x060 0x020>;
426 interrupt-parent = <&DMA1>;
427 interrupts = <0 1>;
428 #interrupt-cells = <1>;
429 #address-cells = <0>;
430 #size-cells = <0>;
431 interrupt-map = <
432 0 &UIC0 0x16 4
433 1 &UIC1 0x16 4>;
434 };
435
436 xor-accel@400200000 {
437 compatible = "amcc,xor-accelerator";
438 reg = <0x00000004 0x00200000 0x400>;
439 interrupt-parent = <&UIC1>;
440 interrupts = <0x1f 4>;
441 };
442 };
443
444 chosen {
445 linux,stdout-path = "/plb/opb/serial@f0000200";
446 };
447};
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index 8cf2c0c88c05..7c3be5e45748 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -44,6 +44,7 @@
44 d-cache-size = <32768>; 44 d-cache-size = <32768>;
45 dcr-controller; 45 dcr-controller;
46 dcr-access-method = "native"; 46 dcr-access-method = "native";
47 reset-type = <2>; /* Use chip-reset */
47 }; 48 };
48 }; 49 };
49 50
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index 4173af387c63..0f5262452682 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -20,10 +20,8 @@
20 aliases { 20 aliases {
21 ethernet0 = &enet0; 21 ethernet0 = &enet0;
22 ethernet1 = &enet1; 22 ethernet1 = &enet1;
23/*
24 ethernet2 = &enet2; 23 ethernet2 = &enet2;
25 ethernet3 = &enet3; 24 ethernet3 = &enet3;
26*/
27 serial0 = &serial0; 25 serial0 = &serial0;
28 serial1 = &serial1; 26 serial1 = &serial1;
29 pci0 = &pci0; 27 pci0 = &pci0;
@@ -254,7 +252,6 @@
254 }; 252 };
255 }; 253 };
256 254
257/* eTSEC 3/4 are currently broken
258 enet2: ethernet@26000 { 255 enet2: ethernet@26000 {
259 #address-cells = <1>; 256 #address-cells = <1>;
260 #size-cells = <1>; 257 #size-cells = <1>;
@@ -310,7 +307,6 @@
310 }; 307 };
311 }; 308 };
312 }; 309 };
313 */
314 310
315 serial0: serial@4500 { 311 serial0: serial@4500 {
316 cell-index = <0>; 312 cell-index = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
index 5bd1011fde96..3375c2ab0c32 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -215,6 +215,18 @@
215 clock-frequency = <0>; 215 clock-frequency = <0>;
216 }; 216 };
217 217
218 msi@41600 {
219 compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
220 reg = <0x41600 0x80>;
221 msi-available-ranges = <0 0x80>;
222 interrupts = <
223 0xe0 0
224 0xe1 0
225 0xe2 0
226 0xe3 0>;
227 interrupt-parent = <&mpic>;
228 };
229
218 global-utilities@e0000 { //global utilities block 230 global-utilities@e0000 { //global utilities block
219 compatible = "fsl,mpc8572-guts"; 231 compatible = "fsl,mpc8572-guts";
220 reg = <0xe0000 0x1000>; 232 reg = <0xe0000 0x1000>;
@@ -243,8 +255,7 @@
243 protected-sources = < 255 protected-sources = <
244 31 32 33 37 38 39 /* enet2 enet3 */ 256 31 32 33 37 38 39 /* enet2 enet3 */
245 76 77 78 79 26 42 /* dma2 pci2 serial*/ 257 76 77 78 79 26 42 /* dma2 pci2 serial*/
246 0xe0 0xe1 0xe2 0xe3 /* msi */ 258 0xe4 0xe5 0xe6 0xe7 /* msi */
247 0xe4 0xe5 0xe6 0xe7
248 >; 259 >;
249 }; 260 };
250 }; 261 };
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
index 0efc3456e297..e7b477f6a3fe 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -154,12 +154,8 @@
154 msi@41600 { 154 msi@41600 {
155 compatible = "fsl,mpc8572-msi", "fsl,mpic-msi"; 155 compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
156 reg = <0x41600 0x80>; 156 reg = <0x41600 0x80>;
157 msi-available-ranges = <0 0x100>; 157 msi-available-ranges = <0x80 0x80>;
158 interrupts = < 158 interrupts = <
159 0xe0 0
160 0xe1 0
161 0xe2 0
162 0xe3 0
163 0xe4 0 159 0xe4 0
164 0xe5 0 160 0xe5 0
165 0xe6 0 161 0xe6 0
@@ -190,6 +186,7 @@
190 0x1 0x2 0x3 0x4 /* pci slot */ 186 0x1 0x2 0x3 0x4 /* pci slot */
191 0x9 0xa 0xb 0xc /* usb */ 187 0x9 0xa 0xb 0xc /* usb */
192 0x6 0x7 0xe 0x5 /* Audio elgacy SATA */ 188 0x6 0x7 0xe 0x5 /* Audio elgacy SATA */
189 0xe0 0xe1 0xe2 0xe3 /* msi */
193 >; 190 >;
194 }; 191 };
195 }; 192 };
diff --git a/arch/powerpc/boot/dts/p1021mds.dts b/arch/powerpc/boot/dts/p1021mds.dts
new file mode 100644
index 000000000000..7fad2df25981
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1021mds.dts
@@ -0,0 +1,698 @@
1/*
2 * P1021 MDS Device Tree Source
3 *
4 * Copyright 2010 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/dts-v1/;
13/ {
14 model = "fsl,P1021";
15 compatible = "fsl,P1021MDS";
16 #address-cells = <2>;
17 #size-cells = <2>;
18
19 aliases {
20 serial0 = &serial0;
21 serial1 = &serial1;
22 ethernet0 = &enet0;
23 ethernet1 = &enet1;
24 ethernet2 = &enet2;
25 ethernet3 = &enet3;
26 ethernet4 = &enet4;
27 pci0 = &pci0;
28 pci1 = &pci1;
29 };
30
31 cpus {
32 #address-cells = <1>;
33 #size-cells = <0>;
34
35 PowerPC,P1021@0 {
36 device_type = "cpu";
37 reg = <0x0>;
38 next-level-cache = <&L2>;
39 };
40
41 PowerPC,P1021@1 {
42 device_type = "cpu";
43 reg = <0x1>;
44 next-level-cache = <&L2>;
45 };
46 };
47
48 memory {
49 device_type = "memory";
50 };
51
52 localbus@ffe05000 {
53 #address-cells = <2>;
54 #size-cells = <1>;
55 compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
56 reg = <0 0xffe05000 0 0x1000>;
57 interrupts = <19 2>;
58 interrupt-parent = <&mpic>;
59
60 /* NAND Flash, BCSR, PMC0/1*/
61 ranges = <0x0 0x0 0x0 0xfc000000 0x02000000
62 0x1 0x0 0x0 0xf8000000 0x00008000
63 0x2 0x0 0x0 0xf8010000 0x00020000
64 0x3 0x0 0x0 0xf8020000 0x00020000>;
65
66 nand@0,0 {
67 #address-cells = <1>;
68 #size-cells = <1>;
69 compatible = "fsl,p1021-fcm-nand",
70 "fsl,elbc-fcm-nand";
71 reg = <0x0 0x0 0x40000>;
72
73 partition@0 {
74 /* This location must not be altered */
75 /* 1MB for u-boot Bootloader Image */
76 reg = <0x0 0x00100000>;
77 label = "NAND (RO) U-Boot Image";
78 read-only;
79 };
80
81 partition@100000 {
82 /* 1MB for DTB Image */
83 reg = <0x00100000 0x00100000>;
84 label = "NAND (RO) DTB Image";
85 read-only;
86 };
87
88 partition@200000 {
89 /* 4MB for Linux Kernel Image */
90 reg = <0x00200000 0x00400000>;
91 label = "NAND (RO) Linux Kernel Image";
92 read-only;
93 };
94
95 partition@600000 {
96 /* 5MB for Compressed Root file System Image */
97 reg = <0x00600000 0x00500000>;
98 label = "NAND (RO) Compressed RFS Image";
99 read-only;
100 };
101
102 partition@b00000 {
103 /* 6MB for JFFS2 based Root file System */
104 reg = <0x00a00000 0x00600000>;
105 label = "NAND (RW) JFFS2 Root File System";
106 };
107
108 partition@1100000 {
109 /* 14MB for JFFS2 based Root file System */
110 reg = <0x01100000 0x00e00000>;
111 label = "NAND (RW) Writable User area";
112 };
113
114 partition@1f00000 {
115 /* 1MB for microcode */
116 reg = <0x01f00000 0x00100000>;
117 label = "NAND (RO) QE Ucode";
118 read-only;
119 };
120 };
121
122 bcsr@1,0 {
123 #address-cells = <1>;
124 #size-cells = <1>;
125 compatible = "fsl,p1021mds-bcsr";
126 reg = <1 0 0x8000>;
127 ranges = <0 1 0 0x8000>;
128 };
129
130 pib@2,0 {
131 compatible = "fsl,p1021mds-pib";
132 reg = <2 0 0x10000>;
133 };
134
135 pib@3,0 {
136 compatible = "fsl,p1021mds-pib";
137 reg = <3 0 0x10000>;
138 };
139 };
140
141 soc@ffe00000 {
142
143 #address-cells = <1>;
144 #size-cells = <1>;
145 device_type = "soc";
146 compatible = "fsl,p1021-immr", "simple-bus";
147 ranges = <0x0 0x0 0xffe00000 0x100000>;
148 bus-frequency = <0>; // Filled out by uboot.
149
150 ecm-law@0 {
151 compatible = "fsl,ecm-law";
152 reg = <0x0 0x1000>;
153 fsl,num-laws = <12>;
154 };
155
156 ecm@1000 {
157 compatible = "fsl,p1021-ecm", "fsl,ecm";
158 reg = <0x1000 0x1000>;
159 interrupts = <16 2>;
160 interrupt-parent = <&mpic>;
161 };
162
163 memory-controller@2000 {
164 compatible = "fsl,p1021-memory-controller";
165 reg = <0x2000 0x1000>;
166 interrupt-parent = <&mpic>;
167 interrupts = <16 2>;
168 };
169
170 i2c@3000 {
171 #address-cells = <1>;
172 #size-cells = <0>;
173 cell-index = <0>;
174 compatible = "fsl-i2c";
175 reg = <0x3000 0x100>;
176 interrupts = <43 2>;
177 interrupt-parent = <&mpic>;
178 dfsrr;
179 rtc@68 {
180 compatible = "dallas,ds1374";
181 reg = <0x68>;
182 };
183 };
184
185 i2c@3100 {
186 #address-cells = <1>;
187 #size-cells = <0>;
188 cell-index = <1>;
189 compatible = "fsl-i2c";
190 reg = <0x3100 0x100>;
191 interrupts = <43 2>;
192 interrupt-parent = <&mpic>;
193 dfsrr;
194 };
195
196 serial0: serial@4500 {
197 cell-index = <0>;
198 device_type = "serial";
199 compatible = "ns16550";
200 reg = <0x4500 0x100>;
201 clock-frequency = <0>;
202 interrupts = <42 2>;
203 interrupt-parent = <&mpic>;
204 };
205
206 serial1: serial@4600 {
207 cell-index = <1>;
208 device_type = "serial";
209 compatible = "ns16550";
210 reg = <0x4600 0x100>;
211 clock-frequency = <0>;
212 interrupts = <42 2>;
213 interrupt-parent = <&mpic>;
214 };
215
216 spi@7000 {
217 cell-index = <0>;
218 #address-cells = <1>;
219 #size-cells = <0>;
220 compatible = "fsl,espi";
221 reg = <0x7000 0x1000>;
222 interrupts = <59 0x2>;
223 interrupt-parent = <&mpic>;
224 espi,num-ss-bits = <4>;
225 mode = "cpu";
226
227 fsl_m25p80@0 {
228 #address-cells = <1>;
229 #size-cells = <1>;
230 compatible = "fsl,espi-flash";
231 reg = <0>;
232 linux,modalias = "fsl_m25p80";
233 spi-max-frequency = <40000000>; /* input clock */
234 partition@u-boot {
235 label = "u-boot-spi";
236 reg = <0x00000000 0x00100000>;
237 read-only;
238 };
239 partition@kernel {
240 label = "kernel-spi";
241 reg = <0x00100000 0x00500000>;
242 read-only;
243 };
244 partition@dtb {
245 label = "dtb-spi";
246 reg = <0x00600000 0x00100000>;
247 read-only;
248 };
249 partition@fs {
250 label = "file system-spi";
251 reg = <0x00700000 0x00900000>;
252 };
253 };
254 };
255
256 gpio: gpio-controller@f000 {
257 #gpio-cells = <2>;
258 compatible = "fsl,mpc8572-gpio";
259 reg = <0xf000 0x100>;
260 interrupts = <47 0x2>;
261 interrupt-parent = <&mpic>;
262 gpio-controller;
263 };
264
265 L2: l2-cache-controller@20000 {
266 compatible = "fsl,p1021-l2-cache-controller";
267 reg = <0x20000 0x1000>;
268 cache-line-size = <32>; // 32 bytes
269 cache-size = <0x40000>; // L2,256K
270 interrupt-parent = <&mpic>;
271 interrupts = <16 2>;
272 };
273
274 dma@21300 {
275 #address-cells = <1>;
276 #size-cells = <1>;
277 compatible = "fsl,eloplus-dma";
278 reg = <0x21300 0x4>;
279 ranges = <0x0 0x21100 0x200>;
280 cell-index = <0>;
281 dma-channel@0 {
282 compatible = "fsl,eloplus-dma-channel";
283 reg = <0x0 0x80>;
284 cell-index = <0>;
285 interrupt-parent = <&mpic>;
286 interrupts = <20 2>;
287 };
288 dma-channel@80 {
289 compatible = "fsl,eloplus-dma-channel";
290 reg = <0x80 0x80>;
291 cell-index = <1>;
292 interrupt-parent = <&mpic>;
293 interrupts = <21 2>;
294 };
295 dma-channel@100 {
296 compatible = "fsl,eloplus-dma-channel";
297 reg = <0x100 0x80>;
298 cell-index = <2>;
299 interrupt-parent = <&mpic>;
300 interrupts = <22 2>;
301 };
302 dma-channel@180 {
303 compatible = "fsl,eloplus-dma-channel";
304 reg = <0x180 0x80>;
305 cell-index = <3>;
306 interrupt-parent = <&mpic>;
307 interrupts = <23 2>;
308 };
309 };
310
311 usb@22000 {
312 #address-cells = <1>;
313 #size-cells = <0>;
314 compatible = "fsl-usb2-dr";
315 reg = <0x22000 0x1000>;
316 interrupt-parent = <&mpic>;
317 interrupts = <28 0x2>;
318 phy_type = "ulpi";
319 };
320
321 mdio@24000 {
322 #address-cells = <1>;
323 #size-cells = <0>;
324 compatible = "fsl,etsec2-mdio";
325 reg = <0x24000 0x1000 0xb0030 0x4>;
326
327 phy0: ethernet-phy@0 {
328 interrupt-parent = <&mpic>;
329 interrupts = <1 1>;
330 reg = <0x0>;
331 };
332 phy1: ethernet-phy@1 {
333 interrupt-parent = <&mpic>;
334 interrupts = <2 1>;
335 reg = <0x1>;
336 };
337 phy4: ethernet-phy@4 {
338 interrupt-parent = <&mpic>;
339 reg = <0x4>;
340 };
341 };
342
343 mdio@25000 {
344 #address-cells = <1>;
345 #size-cells = <0>;
346 compatible = "fsl,etsec2-tbi";
347 reg = <0x25000 0x1000 0xb1030 0x4>;
348 tbi0: tbi-phy@11 {
349 reg = <0x11>;
350 device_type = "tbi-phy";
351 };
352 };
353
354 enet0: ethernet@B0000 {
355 #address-cells = <1>;
356 #size-cells = <1>;
357 cell-index = <0>;
358 device_type = "network";
359 model = "eTSEC";
360 compatible = "fsl,etsec2";
361 fsl,num_rx_queues = <0x8>;
362 fsl,num_tx_queues = <0x8>;
363 local-mac-address = [ 00 00 00 00 00 00 ];
364 interrupt-parent = <&mpic>;
365 phy-handle = <&phy0>;
366 phy-connection-type = "rgmii-id";
367 queue-group@0{
368 #address-cells = <1>;
369 #size-cells = <1>;
370 reg = <0xB0000 0x1000>;
371 interrupts = <29 2 30 2 34 2>;
372 };
373 queue-group@1{
374 #address-cells = <1>;
375 #size-cells = <1>;
376 reg = <0xB4000 0x1000>;
377 interrupts = <17 2 18 2 24 2>;
378 };
379 };
380
381 enet1: ethernet@B1000 {
382 #address-cells = <1>;
383 #size-cells = <1>;
384 cell-index = <0>;
385 device_type = "network";
386 model = "eTSEC";
387 compatible = "fsl,etsec2";
388 fsl,num_rx_queues = <0x8>;
389 fsl,num_tx_queues = <0x8>;
390 local-mac-address = [ 00 00 00 00 00 00 ];
391 interrupt-parent = <&mpic>;
392 phy-handle = <&phy4>;
393 tbi-handle = <&tbi0>;
394 phy-connection-type = "sgmii";
395 queue-group@0{
396 #address-cells = <1>;
397 #size-cells = <1>;
398 reg = <0xB1000 0x1000>;
399 interrupts = <35 2 36 2 40 2>;
400 };
401 queue-group@1{
402 #address-cells = <1>;
403 #size-cells = <1>;
404 reg = <0xB5000 0x1000>;
405 interrupts = <51 2 52 2 67 2>;
406 };
407 };
408
409 enet2: ethernet@B2000 {
410 #address-cells = <1>;
411 #size-cells = <1>;
412 cell-index = <0>;
413 device_type = "network";
414 model = "eTSEC";
415 compatible = "fsl,etsec2";
416 fsl,num_rx_queues = <0x8>;
417 fsl,num_tx_queues = <0x8>;
418 local-mac-address = [ 00 00 00 00 00 00 ];
419 interrupt-parent = <&mpic>;
420 phy-handle = <&phy1>;
421 phy-connection-type = "rgmii-id";
422 queue-group@0{
423 #address-cells = <1>;
424 #size-cells = <1>;
425 reg = <0xB2000 0x1000>;
426 interrupts = <31 2 32 2 33 2>;
427 };
428 queue-group@1{
429 #address-cells = <1>;
430 #size-cells = <1>;
431 reg = <0xB6000 0x1000>;
432 interrupts = <25 2 26 2 27 2>;
433 };
434 };
435
436 sdhci@2e000 {
437 compatible = "fsl,p1021-esdhc", "fsl,esdhc";
438 reg = <0x2e000 0x1000>;
439 interrupts = <72 0x2>;
440 interrupt-parent = <&mpic>;
441 /* Filled in by U-Boot */
442 clock-frequency = <0>;
443 };
444
445 crypto@30000 {
446 compatible = "fsl,sec3.3", "fsl,sec3.1",
447 "fsl,sec3.0", "fsl,sec2.4",
448 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
449 reg = <0x30000 0x10000>;
450 interrupts = <45 2 58 2>;
451 interrupt-parent = <&mpic>;
452 fsl,num-channels = <4>;
453 fsl,channel-fifo-len = <24>;
454 fsl,exec-units-mask = <0x97c>;
455 fsl,descriptor-types-mask = <0x3a30abf>;
456 };
457
458 mpic: pic@40000 {
459 interrupt-controller;
460 #address-cells = <0>;
461 #interrupt-cells = <2>;
462 reg = <0x40000 0x40000>;
463 compatible = "chrp,open-pic";
464 device_type = "open-pic";
465 };
466
467 msi@41600 {
468 compatible = "fsl,p1021-msi", "fsl,mpic-msi";
469 reg = <0x41600 0x80>;
470 msi-available-ranges = <0 0x100>;
471 interrupts = <
472 0xe0 0
473 0xe1 0
474 0xe2 0
475 0xe3 0
476 0xe4 0
477 0xe5 0
478 0xe6 0
479 0xe7 0>;
480 interrupt-parent = <&mpic>;
481 };
482
483 global-utilities@e0000 { //global utilities block
484 compatible = "fsl,p1021-guts";
485 reg = <0xe0000 0x1000>;
486 fsl,has-rstcr;
487 };
488
489 par_io@e0100 {
490 #address-cells = <1>;
491 #size-cells = <1>;
492 reg = <0xe0100 0x60>;
493 ranges = <0x0 0xe0100 0x60>;
494 device_type = "par_io";
495 num-ports = <3>;
496 pio1: ucc_pin@01 {
497 pio-map = <
498 /* port pin dir open_drain assignment has_irq */
499 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
500 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
501 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */
502 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9
503*/
504 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */
505 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */
506 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */
507 0x0 0xc 0x1 0x0 0x2 0x0 /* ENET1_TXD3_SER1_TXD3 */
508 0x0 0x6 0x2 0x0 0x2 0x0 /* ENET1_RXD0_SER1_RXD0 */
509 0x0 0xa 0x2 0x0 0x2 0x0 /* ENET1_RXD1_SER1_RXD1 */
510 0x0 0xe 0x2 0x0 0x2 0x0 /* ENET1_RXD2_SER1_RXD2 */
511 0x0 0xf 0x2 0x0 0x2 0x0 /* ENET1_RXD3_SER1_RXD3 */
512 0x0 0x5 0x1 0x0 0x2 0x0 /* ENET1_TX_EN_SER1_RTS_B */
513 0x0 0xd 0x1 0x0 0x2 0x0 /* ENET1_TX_ER */
514 0x0 0x4 0x2 0x0 0x2 0x0 /* ENET1_RX_DV_SER1_CTS_B */
515 0x0 0x8 0x2 0x0 0x2 0x0 /* ENET1_RX_ER_SER1_CD_B */
516 0x0 0x11 0x2 0x0 0x2 0x0 /* ENET1_CRS */
517 0x0 0x10 0x2 0x0 0x2 0x0>; /* ENET1_COL */
518 };
519
520 pio2: ucc_pin@02 {
521 pio-map = <
522 /* port pin dir open_drain assignment has_irq */
523 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
524 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
525 0x1 0xb 0x2 0x0 0x1 0x0 /* CLK13 */
526 0x1 0x7 0x1 0x0 0x2 0x0 /* ENET5_TXD0_SER5_TXD0 */
527 0x1 0xa 0x1 0x0 0x2 0x0 /* ENET5_TXD1_SER5_TXD1 */
528 0x1 0x6 0x2 0x0 0x2 0x0 /* ENET5_RXD0_SER5_RXD0 */
529 0x1 0x9 0x2 0x0 0x2 0x0 /* ENET5_RXD1_SER5_RXD1 */
530 0x1 0x5 0x1 0x0 0x2 0x0 /* ENET5_TX_EN_SER5_RTS_B */
531 0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */
532 0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */
533 };
534 };
535 };
536
537 pci0: pcie@ffe09000 {
538 compatible = "fsl,mpc8548-pcie";
539 device_type = "pci";
540 #interrupt-cells = <1>;
541 #size-cells = <2>;
542 #address-cells = <3>;
543 reg = <0 0xffe09000 0 0x1000>;
544 bus-range = <0 255>;
545 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
546 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
547 clock-frequency = <33333333>;
548 interrupt-parent = <&mpic>;
549 interrupts = <16 2>;
550 interrupt-map-mask = <0xf800 0 0 7>;
551 interrupt-map = <
552 /* IDSEL 0x0 */
553 0000 0 0 1 &mpic 4 1
554 0000 0 0 2 &mpic 5 1
555 0000 0 0 3 &mpic 6 1
556 0000 0 0 4 &mpic 7 1
557 >;
558 pcie@0 {
559 reg = <0x0 0x0 0x0 0x0 0x0>;
560 #size-cells = <2>;
561 #address-cells = <3>;
562 device_type = "pci";
563 ranges = <0x2000000 0x0 0xa0000000
564 0x2000000 0x0 0xa0000000
565 0x0 0x20000000
566
567 0x1000000 0x0 0x0
568 0x1000000 0x0 0x0
569 0x0 0x100000>;
570 };
571 };
572
573 pci1: pcie@ffe0a000 {
574 compatible = "fsl,mpc8548-pcie";
575 device_type = "pci";
576 #interrupt-cells = <1>;
577 #size-cells = <2>;
578 #address-cells = <3>;
579 reg = <0 0xffe0a000 0 0x1000>;
580 bus-range = <0 255>;
581 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
582 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
583 clock-frequency = <33333333>;
584 interrupt-parent = <&mpic>;
585 interrupts = <16 2>;
586 interrupt-map-mask = <0xf800 0 0 7>;
587 interrupt-map = <
588 /* IDSEL 0x0 */
589 0000 0 0 1 &mpic 0 1
590 0000 0 0 2 &mpic 1 1
591 0000 0 0 3 &mpic 2 1
592 0000 0 0 4 &mpic 3 1
593 >;
594 pcie@0 {
595 reg = <0x0 0x0 0x0 0x0 0x0>;
596 #size-cells = <2>;
597 #address-cells = <3>;
598 device_type = "pci";
599 ranges = <0x2000000 0x0 0xc0000000
600 0x2000000 0x0 0xc0000000
601 0x0 0x20000000
602
603 0x1000000 0x0 0x0
604 0x1000000 0x0 0x0
605 0x0 0x100000>;
606 };
607 };
608
609 qe@ffe80000 {
610 #address-cells = <1>;
611 #size-cells = <1>;
612 device_type = "qe";
613 compatible = "fsl,qe";
614 ranges = <0x0 0x0 0xffe80000 0x40000>;
615 reg = <0 0xffe80000 0 0x480>;
616 brg-frequency = <0>;
617 bus-frequency = <0>;
618 fsl,qe-num-riscs = <1>;
619 fsl,qe-num-snums = <28>;
620
621 qeic: interrupt-controller@80 {
622 interrupt-controller;
623 compatible = "fsl,qe-ic";
624 #address-cells = <0>;
625 #interrupt-cells = <1>;
626 reg = <0x80 0x80>;
627 interrupts = <63 2 60 2>; //high:47 low:44
628 interrupt-parent = <&mpic>;
629 };
630
631 enet3: ucc@2000 {
632 device_type = "network";
633 compatible = "ucc_geth";
634 cell-index = <1>;
635 reg = <0x2000 0x200>;
636 interrupts = <32>;
637 interrupt-parent = <&qeic>;
638 local-mac-address = [ 00 00 00 00 00 00 ];
639 rx-clock-name = "clk12";
640 tx-clock-name = "clk9";
641 pio-handle = <&pio1>;
642 phy-handle = <&qe_phy0>;
643 phy-connection-type = "mii";
644 };
645
646 mdio@2120 {
647 #address-cells = <1>;
648 #size-cells = <0>;
649 reg = <0x2120 0x18>;
650 compatible = "fsl,ucc-mdio";
651
652 qe_phy0: ethernet-phy@0 {
653 interrupt-parent = <&mpic>;
654 interrupts = <4 1>;
655 reg = <0x0>;
656 device_type = "ethernet-phy";
657 };
658 qe_phy1: ethernet-phy@03 {
659 interrupt-parent = <&mpic>;
660 interrupts = <5 1>;
661 reg = <0x3>;
662 device_type = "ethernet-phy";
663 };
664 tbi-phy@11 {
665 reg = <0x11>;
666 device_type = "tbi-phy";
667 };
668 };
669
670 enet4: ucc@2400 {
671 device_type = "network";
672 compatible = "ucc_geth";
673 cell-index = <5>;
674 reg = <0x2400 0x200>;
675 interrupts = <40>;
676 interrupt-parent = <&qeic>;
677 local-mac-address = [ 00 00 00 00 00 00 ];
678 rx-clock-name = "none";
679 tx-clock-name = "clk13";
680 pio-handle = <&pio2>;
681 phy-handle = <&qe_phy1>;
682 phy-connection-type = "rmii";
683 };
684
685 muram@10000 {
686 #address-cells = <1>;
687 #size-cells = <1>;
688 compatible = "fsl,qe-muram", "fsl,cpm-muram";
689 ranges = <0x0 0x10000 0x6000>;
690
691 data-only@0 {
692 compatible = "fsl,qe-muram-data",
693 "fsl,cpm-muram-data";
694 reg = <0x0 0x6000>;
695 };
696 };
697 };
698};
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts
index d2af32e2bf7a..81636c01d906 100644
--- a/arch/powerpc/boot/dts/redwood.dts
+++ b/arch/powerpc/boot/dts/redwood.dts
@@ -234,10 +234,132 @@
234 has-inverted-stacr-oc; 234 has-inverted-stacr-oc;
235 has-new-stacr-staopc; 235 has-new-stacr-staopc;
236 }; 236 };
237 };
238 PCIE0: pciex@d00000000 {
239 device_type = "pci";
240 #interrupt-cells = <1>;
241 #size-cells = <2>;
242 #address-cells = <3>;
243 compatible = "ibm,plb-pciex-460sx", "ibm,plb-pciex";
244 primary;
245 port = <0x0>; /* port number */
246 reg = <0x0000000d 0x00000000 0x20000000 /* Config space access */
247 0x0000000c 0x10000000 0x00001000>; /* Registers */
248 dcr-reg = <0x100 0x020>;
249 sdr-base = <0x300>;
250
251 /* Outbound ranges, one memory and one IO,
252 * later cannot be changed
253 */
254 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
255 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
256
257 /* Inbound 2GB range starting at 0 */
258 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
237 259
260 /* This drives busses 10 to 0x1f */
261 bus-range = <0x10 0x1f>;
262
263 /* Legacy interrupts (note the weird polarity, the bridge seems
264 * to invert PCIe legacy interrupts).
265 * We are de-swizzling here because the numbers are actually for
266 * port of the root complex virtual P2P bridge. But I want
267 * to avoid putting a node for it in the tree, so the numbers
268 * below are basically de-swizzled numbers.
269 * The real slot is on idsel 0, so the swizzling is 1:1
270 */
271 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
272 interrupt-map = <
273 0x0 0x0 0x0 0x1 &UIC3 0x0 0x4 /* swizzled int A */
274 0x0 0x0 0x0 0x2 &UIC3 0x1 0x4 /* swizzled int B */
275 0x0 0x0 0x0 0x3 &UIC3 0x2 0x4 /* swizzled int C */
276 0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>;
277 };
278
279 PCIE1: pciex@d20000000 {
280 device_type = "pci";
281 #interrupt-cells = <1>;
282 #size-cells = <2>;
283 #address-cells = <3>;
284 compatible = "ibm,plb-pciex-460sx", "ibm,plb-pciex";
285 primary;
286 port = <0x1>; /* port number */
287 reg = <0x0000000d 0x20000000 0x20000000 /* Config space access */
288 0x0000000c 0x10001000 0x00001000>; /* Registers */
289 dcr-reg = <0x120 0x020>;
290 sdr-base = <0x340>;
291
292 /* Outbound ranges, one memory and one IO,
293 * later cannot be changed
294 */
295 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
296 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
297
298 /* Inbound 2GB range starting at 0 */
299 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
300
301 /* This drives busses 10 to 0x1f */
302 bus-range = <0x20 0x2f>;
303
304 /* Legacy interrupts (note the weird polarity, the bridge seems
305 * to invert PCIe legacy interrupts).
306 * We are de-swizzling here because the numbers are actually for
307 * port of the root complex virtual P2P bridge. But I want
308 * to avoid putting a node for it in the tree, so the numbers
309 * below are basically de-swizzled numbers.
310 * The real slot is on idsel 0, so the swizzling is 1:1
311 */
312 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
313 interrupt-map = <
314 0x0 0x0 0x0 0x1 &UIC3 0x4 0x4 /* swizzled int A */
315 0x0 0x0 0x0 0x2 &UIC3 0x5 0x4 /* swizzled int B */
316 0x0 0x0 0x0 0x3 &UIC3 0x6 0x4 /* swizzled int C */
317 0x0 0x0 0x0 0x4 &UIC3 0x7 0x4 /* swizzled int D */>;
318 };
319
320 PCIE2: pciex@d40000000 {
321 device_type = "pci";
322 #interrupt-cells = <1>;
323 #size-cells = <2>;
324 #address-cells = <3>;
325 compatible = "ibm,plb-pciex-460sx", "ibm,plb-pciex";
326 primary;
327 port = <0x2>; /* port number */
328 reg = <0x0000000d 0x40000000 0x20000000 /* Config space access */
329 0x0000000c 0x10002000 0x00001000>; /* Registers */
330 dcr-reg = <0x140 0x020>;
331 sdr-base = <0x370>;
332
333 /* Outbound ranges, one memory and one IO,
334 * later cannot be changed
335 */
336 ranges = <0x02000000 0x00000000 0x80000000 0x0000000f 0x00000000 0x00000000 0x80000000
337 0x01000000 0x00000000 0x00000000 0x0000000f 0x80020000 0x00000000 0x00010000>;
338
339 /* Inbound 2GB range starting at 0 */
340 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
341
342 /* This drives busses 10 to 0x1f */
343 bus-range = <0x30 0x3f>;
344
345 /* Legacy interrupts (note the weird polarity, the bridge seems
346 * to invert PCIe legacy interrupts).
347 * We are de-swizzling here because the numbers are actually for
348 * port of the root complex virtual P2P bridge. But I want
349 * to avoid putting a node for it in the tree, so the numbers
350 * below are basically de-swizzled numbers.
351 * The real slot is on idsel 0, so the swizzling is 1:1
352 */
353 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
354 interrupt-map = <
355 0x0 0x0 0x0 0x1 &UIC3 0x8 0x4 /* swizzled int A */
356 0x0 0x0 0x0 0x2 &UIC3 0x9 0x4 /* swizzled int B */
357 0x0 0x0 0x0 0x3 &UIC3 0xa 0x4 /* swizzled int C */
358 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
238 }; 359 };
239 360
240 }; 361 };
362
241 chosen { 363 chosen {
242 linux,stdout-path = "/plb/opb/serial@ef600200"; 364 linux,stdout-path = "/plb/opb/serial@ef600200";
243 }; 365 };
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
new file mode 100644
index 000000000000..277f88c2750f
--- /dev/null
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -0,0 +1,1451 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34-rc7
4# Fri May 21 17:40:22 2010
5#
6# CONFIG_PPC64 is not set
7
8#
9# Processor support
10#
11# CONFIG_PPC_BOOK3S_32 is not set
12# CONFIG_PPC_85xx is not set
13# CONFIG_PPC_8xx is not set
14# CONFIG_40x is not set
15CONFIG_44x=y
16# CONFIG_E200 is not set
17CONFIG_4xx=y
18CONFIG_BOOKE=y
19CONFIG_PTE_64BIT=y
20CONFIG_PHYS_64BIT=y
21CONFIG_PPC_MMU_NOHASH=y
22CONFIG_PPC_MMU_NOHASH_32=y
23# CONFIG_PPC_MM_SLICES is not set
24CONFIG_NOT_COHERENT_CACHE=y
25CONFIG_PPC32=y
26CONFIG_WORD_SIZE=32
27CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
28CONFIG_MMU=y
29CONFIG_GENERIC_CMOS_UPDATE=y
30CONFIG_GENERIC_TIME=y
31CONFIG_GENERIC_TIME_VSYSCALL=y
32CONFIG_GENERIC_CLOCKEVENTS=y
33CONFIG_GENERIC_HARDIRQS=y
34CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
35# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
36# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
37CONFIG_IRQ_PER_CPU=y
38CONFIG_NR_IRQS=512
39CONFIG_STACKTRACE_SUPPORT=y
40CONFIG_HAVE_LATENCYTOP_SUPPORT=y
41CONFIG_TRACE_IRQFLAGS_SUPPORT=y
42CONFIG_LOCKDEP_SUPPORT=y
43CONFIG_RWSEM_XCHGADD_ALGORITHM=y
44CONFIG_ARCH_HAS_ILOG2_U32=y
45CONFIG_GENERIC_HWEIGHT=y
46CONFIG_GENERIC_FIND_NEXT_BIT=y
47# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
48CONFIG_PPC=y
49CONFIG_EARLY_PRINTK=y
50CONFIG_GENERIC_NVRAM=y
51CONFIG_SCHED_OMIT_FRAME_POINTER=y
52CONFIG_ARCH_MAY_HAVE_PC_FDC=y
53CONFIG_PPC_OF=y
54CONFIG_OF=y
55CONFIG_PPC_UDBG_16550=y
56# CONFIG_GENERIC_TBSYNC is not set
57CONFIG_AUDIT_ARCH=y
58CONFIG_GENERIC_BUG=y
59CONFIG_DTC=y
60# CONFIG_DEFAULT_UIMAGE is not set
61CONFIG_ARCH_HIBERNATION_POSSIBLE=y
62CONFIG_PPC_DCR_NATIVE=y
63# CONFIG_PPC_DCR_MMIO is not set
64CONFIG_PPC_DCR=y
65CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
66CONFIG_PPC_ADV_DEBUG_REGS=y
67CONFIG_PPC_ADV_DEBUG_IACS=4
68CONFIG_PPC_ADV_DEBUG_DACS=2
69CONFIG_PPC_ADV_DEBUG_DVCS=2
70CONFIG_PPC_ADV_DEBUG_DAC_RANGE=y
71CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
72CONFIG_CONSTRUCTORS=y
73
74#
75# General setup
76#
77CONFIG_EXPERIMENTAL=y
78CONFIG_BROKEN_ON_SMP=y
79CONFIG_INIT_ENV_ARG_LIMIT=32
80CONFIG_LOCALVERSION=""
81CONFIG_LOCALVERSION_AUTO=y
82CONFIG_SWAP=y
83CONFIG_SYSVIPC=y
84CONFIG_SYSVIPC_SYSCTL=y
85CONFIG_POSIX_MQUEUE=y
86CONFIG_POSIX_MQUEUE_SYSCTL=y
87# CONFIG_BSD_PROCESS_ACCT is not set
88# CONFIG_TASKSTATS is not set
89# CONFIG_AUDIT is not set
90
91#
92# RCU Subsystem
93#
94CONFIG_TREE_RCU=y
95# CONFIG_TREE_PREEMPT_RCU is not set
96# CONFIG_TINY_RCU is not set
97# CONFIG_RCU_TRACE is not set
98CONFIG_RCU_FANOUT=32
99# CONFIG_RCU_FANOUT_EXACT is not set
100# CONFIG_TREE_RCU_TRACE is not set
101# CONFIG_IKCONFIG is not set
102CONFIG_LOG_BUF_SHIFT=14
103# CONFIG_CGROUPS is not set
104CONFIG_SYSFS_DEPRECATED=y
105CONFIG_SYSFS_DEPRECATED_V2=y
106# CONFIG_RELAY is not set
107# CONFIG_NAMESPACES is not set
108CONFIG_BLK_DEV_INITRD=y
109CONFIG_INITRAMFS_SOURCE=""
110CONFIG_RD_GZIP=y
111# CONFIG_RD_BZIP2 is not set
112# CONFIG_RD_LZMA is not set
113# CONFIG_RD_LZO is not set
114# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
115CONFIG_SYSCTL=y
116CONFIG_ANON_INODES=y
117CONFIG_EMBEDDED=y
118CONFIG_SYSCTL_SYSCALL=y
119CONFIG_KALLSYMS=y
120# CONFIG_KALLSYMS_ALL is not set
121# CONFIG_KALLSYMS_EXTRA_PASS is not set
122CONFIG_HOTPLUG=y
123CONFIG_PRINTK=y
124# CONFIG_LOGBUFFER is not set
125CONFIG_BUG=y
126CONFIG_ELF_CORE=y
127CONFIG_BASE_FULL=y
128CONFIG_FUTEX=y
129CONFIG_EPOLL=y
130CONFIG_SIGNALFD=y
131CONFIG_TIMERFD=y
132CONFIG_EVENTFD=y
133CONFIG_SHMEM=y
134CONFIG_AIO=y
135CONFIG_HAVE_PERF_EVENTS=y
136
137#
138# Kernel Performance Events And Counters
139#
140# CONFIG_PERF_EVENTS is not set
141# CONFIG_PERF_COUNTERS is not set
142CONFIG_VM_EVENT_COUNTERS=y
143CONFIG_PCI_QUIRKS=y
144CONFIG_SLUB_DEBUG=y
145CONFIG_COMPAT_BRK=y
146# CONFIG_SLAB is not set
147CONFIG_SLUB=y
148# CONFIG_SLOB is not set
149# CONFIG_PROFILING is not set
150CONFIG_HAVE_OPROFILE=y
151# CONFIG_KPROBES is not set
152CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
153CONFIG_HAVE_IOREMAP_PROT=y
154CONFIG_HAVE_KPROBES=y
155CONFIG_HAVE_KRETPROBES=y
156CONFIG_HAVE_ARCH_TRACEHOOK=y
157CONFIG_HAVE_DMA_ATTRS=y
158CONFIG_HAVE_DMA_API_DEBUG=y
159
160#
161# GCOV-based kernel profiling
162#
163# CONFIG_SLOW_WORK is not set
164# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
165CONFIG_SLABINFO=y
166CONFIG_RT_MUTEXES=y
167CONFIG_BASE_SMALL=0
168CONFIG_MODULES=y
169# CONFIG_MODULE_FORCE_LOAD is not set
170CONFIG_MODULE_UNLOAD=y
171# CONFIG_MODULE_FORCE_UNLOAD is not set
172# CONFIG_MODVERSIONS is not set
173# CONFIG_MODULE_SRCVERSION_ALL is not set
174CONFIG_BLOCK=y
175CONFIG_LBDAF=y
176# CONFIG_BLK_DEV_BSG is not set
177# CONFIG_BLK_DEV_INTEGRITY is not set
178
179#
180# IO Schedulers
181#
182CONFIG_IOSCHED_NOOP=y
183CONFIG_IOSCHED_DEADLINE=y
184CONFIG_IOSCHED_CFQ=y
185# CONFIG_DEFAULT_DEADLINE is not set
186CONFIG_DEFAULT_CFQ=y
187# CONFIG_DEFAULT_NOOP is not set
188CONFIG_DEFAULT_IOSCHED="cfq"
189# CONFIG_INLINE_SPIN_TRYLOCK is not set
190# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
191# CONFIG_INLINE_SPIN_LOCK is not set
192# CONFIG_INLINE_SPIN_LOCK_BH is not set
193# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
194# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
195CONFIG_INLINE_SPIN_UNLOCK=y
196# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
197CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
198# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
199# CONFIG_INLINE_READ_TRYLOCK is not set
200# CONFIG_INLINE_READ_LOCK is not set
201# CONFIG_INLINE_READ_LOCK_BH is not set
202# CONFIG_INLINE_READ_LOCK_IRQ is not set
203# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
204CONFIG_INLINE_READ_UNLOCK=y
205# CONFIG_INLINE_READ_UNLOCK_BH is not set
206CONFIG_INLINE_READ_UNLOCK_IRQ=y
207# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
208# CONFIG_INLINE_WRITE_TRYLOCK is not set
209# CONFIG_INLINE_WRITE_LOCK is not set
210# CONFIG_INLINE_WRITE_LOCK_BH is not set
211# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
212# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
213CONFIG_INLINE_WRITE_UNLOCK=y
214# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
215CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
216# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
217# CONFIG_MUTEX_SPIN_ON_OWNER is not set
218# CONFIG_FREEZER is not set
219CONFIG_PPC4xx_PCI_EXPRESS=y
220
221#
222# Platform support
223#
224# CONFIG_PPC_CELL is not set
225# CONFIG_PPC_CELL_NATIVE is not set
226# CONFIG_PQ2ADS is not set
227# CONFIG_BAMBOO is not set
228# CONFIG_EBONY is not set
229# CONFIG_SAM440EP is not set
230# CONFIG_SEQUOIA is not set
231# CONFIG_TAISHAN is not set
232# CONFIG_KATMAI is not set
233# CONFIG_RAINIER is not set
234# CONFIG_WARP is not set
235# CONFIG_ARCHES is not set
236# CONFIG_CANYONLANDS is not set
237# CONFIG_GLACIER is not set
238# CONFIG_REDWOOD is not set
239# CONFIG_EIGER is not set
240# CONFIG_YOSEMITE is not set
241CONFIG_ICON=y
242# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set
243CONFIG_PPC44x_SIMPLE=y
244# CONFIG_PPC4xx_GPIO is not set
245CONFIG_440SPe=y
246CONFIG_STDBINUTILS=y
247# CONFIG_IPIC is not set
248# CONFIG_MPIC is not set
249# CONFIG_MPIC_WEIRD is not set
250# CONFIG_PPC_I8259 is not set
251# CONFIG_PPC_RTAS is not set
252# CONFIG_MMIO_NVRAM is not set
253# CONFIG_PPC_MPC106 is not set
254# CONFIG_PPC_970_NAP is not set
255# CONFIG_PPC_INDIRECT_IO is not set
256# CONFIG_GENERIC_IOMAP is not set
257# CONFIG_CPU_FREQ is not set
258# CONFIG_FSL_ULI1575 is not set
259# CONFIG_SIMPLE_GPIO is not set
260
261#
262# Kernel options
263#
264CONFIG_HIGHMEM=y
265# CONFIG_NO_HZ is not set
266# CONFIG_HIGH_RES_TIMERS is not set
267CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
268# CONFIG_HZ_100 is not set
269CONFIG_HZ_250=y
270# CONFIG_HZ_300 is not set
271# CONFIG_HZ_1000 is not set
272CONFIG_HZ=250
273# CONFIG_SCHED_HRTICK is not set
274CONFIG_PREEMPT_NONE=y
275# CONFIG_PREEMPT_VOLUNTARY is not set
276# CONFIG_PREEMPT is not set
277CONFIG_BINFMT_ELF=y
278# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
279# CONFIG_HAVE_AOUT is not set
280# CONFIG_BINFMT_MISC is not set
281# CONFIG_MATH_EMULATION is not set
282# CONFIG_IOMMU_HELPER is not set
283# CONFIG_SWIOTLB is not set
284CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
285CONFIG_ARCH_HAS_WALK_MEMORY=y
286CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
287CONFIG_SPARSE_IRQ=y
288CONFIG_MAX_ACTIVE_REGIONS=32
289CONFIG_ARCH_FLATMEM_ENABLE=y
290CONFIG_ARCH_POPULATES_NODE_MAP=y
291CONFIG_SELECT_MEMORY_MODEL=y
292CONFIG_FLATMEM_MANUAL=y
293# CONFIG_DISCONTIGMEM_MANUAL is not set
294# CONFIG_SPARSEMEM_MANUAL is not set
295CONFIG_FLATMEM=y
296CONFIG_FLAT_NODE_MEM_MAP=y
297CONFIG_PAGEFLAGS_EXTENDED=y
298CONFIG_SPLIT_PTLOCK_CPUS=4
299CONFIG_MIGRATION=y
300CONFIG_PHYS_ADDR_T_64BIT=y
301CONFIG_ZONE_DMA_FLAG=1
302CONFIG_BOUNCE=y
303CONFIG_VIRT_TO_BUS=y
304# CONFIG_KSM is not set
305CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
306CONFIG_PPC_4K_PAGES=y
307# CONFIG_PPC_16K_PAGES is not set
308# CONFIG_PPC_64K_PAGES is not set
309# CONFIG_PPC_256K_PAGES is not set
310CONFIG_FORCE_MAX_ZONEORDER=11
311CONFIG_PROC_DEVICETREE=y
312CONFIG_CMDLINE_BOOL=y
313CONFIG_CMDLINE=""
314CONFIG_EXTRA_TARGETS=""
315# CONFIG_ARCH_HAS_NMI_WATCHDOG is not set
316CONFIG_SECCOMP=y
317CONFIG_ISA_DMA_API=y
318
319#
320# Bus options
321#
322CONFIG_ZONE_DMA=y
323CONFIG_NEED_DMA_MAP_STATE=y
324CONFIG_PPC_INDIRECT_PCI=y
325CONFIG_4xx_SOC=y
326CONFIG_PPC_PCI_CHOICE=y
327CONFIG_PCI=y
328CONFIG_PCI_DOMAINS=y
329CONFIG_PCI_SYSCALL=y
330CONFIG_PCIEPORTBUS=y
331CONFIG_PCIEAER=y
332# CONFIG_PCIE_ECRC is not set
333# CONFIG_PCIEAER_INJECT is not set
334# CONFIG_PCIEASPM is not set
335CONFIG_ARCH_SUPPORTS_MSI=y
336# CONFIG_PCI_MSI is not set
337# CONFIG_PCI_DEBUG is not set
338# CONFIG_PCI_STUB is not set
339# CONFIG_PCI_IOV is not set
340# CONFIG_PCCARD is not set
341# CONFIG_HOTPLUG_PCI is not set
342# CONFIG_HAS_RAPIDIO is not set
343
344#
345# Advanced setup
346#
347# CONFIG_ADVANCED_OPTIONS is not set
348
349#
350# Default settings for advanced configuration options are used
351#
352CONFIG_LOWMEM_SIZE=0x30000000
353CONFIG_PAGE_OFFSET=0xc0000000
354CONFIG_KERNEL_START=0xc0000000
355CONFIG_PHYSICAL_START=0x00000000
356CONFIG_TASK_SIZE=0xc0000000
357CONFIG_CONSISTENT_SIZE=0x00200000
358CONFIG_NET=y
359
360#
361# Networking options
362#
363CONFIG_PACKET=y
364CONFIG_UNIX=y
365# CONFIG_NET_KEY is not set
366CONFIG_INET=y
367# CONFIG_IP_MULTICAST is not set
368# CONFIG_IP_ADVANCED_ROUTER is not set
369CONFIG_IP_FIB_HASH=y
370CONFIG_IP_PNP=y
371CONFIG_IP_PNP_DHCP=y
372CONFIG_IP_PNP_BOOTP=y
373# CONFIG_IP_PNP_RARP is not set
374# CONFIG_NET_IPIP is not set
375# CONFIG_NET_IPGRE is not set
376# CONFIG_ARPD is not set
377# CONFIG_SYN_COOKIES is not set
378# CONFIG_INET_AH is not set
379# CONFIG_INET_ESP is not set
380# CONFIG_INET_IPCOMP is not set
381# CONFIG_INET_XFRM_TUNNEL is not set
382# CONFIG_INET_TUNNEL is not set
383# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
384# CONFIG_INET_XFRM_MODE_TUNNEL is not set
385# CONFIG_INET_XFRM_MODE_BEET is not set
386# CONFIG_INET_LRO is not set
387CONFIG_INET_DIAG=y
388CONFIG_INET_TCP_DIAG=y
389# CONFIG_TCP_CONG_ADVANCED is not set
390CONFIG_TCP_CONG_CUBIC=y
391CONFIG_DEFAULT_TCP_CONG="cubic"
392# CONFIG_TCP_MD5SIG is not set
393# CONFIG_IPV6 is not set
394# CONFIG_NETWORK_SECMARK is not set
395# CONFIG_NETFILTER is not set
396# CONFIG_IP_DCCP is not set
397# CONFIG_IP_SCTP is not set
398# CONFIG_RDS is not set
399# CONFIG_TIPC is not set
400# CONFIG_ATM is not set
401# CONFIG_BRIDGE is not set
402# CONFIG_NET_DSA is not set
403# CONFIG_VLAN_8021Q is not set
404# CONFIG_DECNET is not set
405# CONFIG_LLC2 is not set
406# CONFIG_IPX is not set
407# CONFIG_ATALK is not set
408# CONFIG_X25 is not set
409# CONFIG_LAPB is not set
410# CONFIG_ECONET is not set
411# CONFIG_WAN_ROUTER is not set
412# CONFIG_PHONET is not set
413# CONFIG_IEEE802154 is not set
414# CONFIG_NET_SCHED is not set
415# CONFIG_DCB is not set
416
417#
418# Network testing
419#
420# CONFIG_NET_PKTGEN is not set
421# CONFIG_HAMRADIO is not set
422# CONFIG_CAN is not set
423# CONFIG_IRDA is not set
424# CONFIG_BT is not set
425# CONFIG_AF_RXRPC is not set
426CONFIG_WIRELESS=y
427# CONFIG_CFG80211 is not set
428# CONFIG_LIB80211 is not set
429
430#
431# CFG80211 needs to be enabled for MAC80211
432#
433# CONFIG_WIMAX is not set
434# CONFIG_RFKILL is not set
435# CONFIG_NET_9P is not set
436
437#
438# Device Drivers
439#
440
441#
442# Generic Driver Options
443#
444CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
445# CONFIG_DEVTMPFS is not set
446CONFIG_STANDALONE=y
447CONFIG_PREVENT_FIRMWARE_BUILD=y
448CONFIG_FW_LOADER=y
449CONFIG_FIRMWARE_IN_KERNEL=y
450CONFIG_EXTRA_FIRMWARE=""
451# CONFIG_DEBUG_DRIVER is not set
452# CONFIG_DEBUG_DEVRES is not set
453# CONFIG_SYS_HYPERVISOR is not set
454CONFIG_CONNECTOR=y
455CONFIG_PROC_EVENTS=y
456CONFIG_MTD=y
457# CONFIG_MTD_DEBUG is not set
458# CONFIG_MTD_TESTS is not set
459# CONFIG_MTD_CONCAT is not set
460CONFIG_MTD_PARTITIONS=y
461# CONFIG_MTD_REDBOOT_PARTS is not set
462CONFIG_MTD_CMDLINE_PARTS=y
463CONFIG_MTD_OF_PARTS=y
464# CONFIG_MTD_AR7_PARTS is not set
465
466#
467# User Modules And Translation Layers
468#
469CONFIG_MTD_CHAR=y
470CONFIG_MTD_BLKDEVS=y
471CONFIG_MTD_BLOCK=y
472# CONFIG_FTL is not set
473# CONFIG_NFTL is not set
474# CONFIG_INFTL is not set
475# CONFIG_RFD_FTL is not set
476# CONFIG_SSFDC is not set
477# CONFIG_MTD_OOPS is not set
478
479#
480# RAM/ROM/Flash chip drivers
481#
482CONFIG_MTD_CFI=y
483# CONFIG_MTD_JEDECPROBE is not set
484CONFIG_MTD_GEN_PROBE=y
485# CONFIG_MTD_CFI_ADV_OPTIONS is not set
486CONFIG_MTD_MAP_BANK_WIDTH_1=y
487CONFIG_MTD_MAP_BANK_WIDTH_2=y
488CONFIG_MTD_MAP_BANK_WIDTH_4=y
489# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
490# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
491# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
492CONFIG_MTD_CFI_I1=y
493CONFIG_MTD_CFI_I2=y
494# CONFIG_MTD_CFI_I4 is not set
495# CONFIG_MTD_CFI_I8 is not set
496# CONFIG_MTD_CFI_INTELEXT is not set
497CONFIG_MTD_CFI_AMDSTD=y
498# CONFIG_MTD_CFI_STAA is not set
499CONFIG_MTD_CFI_UTIL=y
500# CONFIG_MTD_RAM is not set
501# CONFIG_MTD_ROM is not set
502# CONFIG_MTD_ABSENT is not set
503
504#
505# Mapping drivers for chip access
506#
507# CONFIG_MTD_COMPLEX_MAPPINGS is not set
508# CONFIG_MTD_PHYSMAP is not set
509CONFIG_MTD_PHYSMAP_OF=y
510# CONFIG_MTD_INTEL_VR_NOR is not set
511# CONFIG_MTD_PLATRAM is not set
512
513#
514# Self-contained MTD device drivers
515#
516# CONFIG_MTD_PMC551 is not set
517# CONFIG_MTD_SLRAM is not set
518# CONFIG_MTD_PHRAM is not set
519# CONFIG_MTD_MTDRAM is not set
520# CONFIG_MTD_BLOCK2MTD is not set
521
522#
523# Disk-On-Chip Device Drivers
524#
525# CONFIG_MTD_DOC2000 is not set
526# CONFIG_MTD_DOC2001 is not set
527# CONFIG_MTD_DOC2001PLUS is not set
528# CONFIG_MTD_NAND is not set
529# CONFIG_MTD_ONENAND is not set
530
531#
532# LPDDR flash memory drivers
533#
534# CONFIG_MTD_LPDDR is not set
535
536#
537# UBI - Unsorted block images
538#
539# CONFIG_MTD_UBI is not set
540CONFIG_OF_FLATTREE=y
541CONFIG_OF_DYNAMIC=y
542CONFIG_OF_DEVICE=y
543CONFIG_OF_I2C=y
544# CONFIG_PARPORT is not set
545CONFIG_BLK_DEV=y
546# CONFIG_BLK_DEV_FD is not set
547# CONFIG_BLK_CPQ_DA is not set
548# CONFIG_BLK_CPQ_CISS_DA is not set
549# CONFIG_BLK_DEV_DAC960 is not set
550# CONFIG_BLK_DEV_UMEM is not set
551# CONFIG_BLK_DEV_COW_COMMON is not set
552# CONFIG_BLK_DEV_LOOP is not set
553# CONFIG_BLK_DEV_DRBD is not set
554# CONFIG_BLK_DEV_NBD is not set
555# CONFIG_BLK_DEV_SX8 is not set
556CONFIG_BLK_DEV_RAM=y
557CONFIG_BLK_DEV_RAM_COUNT=16
558CONFIG_BLK_DEV_RAM_SIZE=35000
559# CONFIG_BLK_DEV_XIP is not set
560# CONFIG_CDROM_PKTCDVD is not set
561# CONFIG_ATA_OVER_ETH is not set
562CONFIG_XILINX_SYSACE=y
563# CONFIG_BLK_DEV_HD is not set
564# CONFIG_MISC_DEVICES is not set
565CONFIG_HAVE_IDE=y
566# CONFIG_IDE is not set
567
568#
569# SCSI device support
570#
571CONFIG_SCSI_MOD=y
572# CONFIG_RAID_ATTRS is not set
573CONFIG_SCSI=y
574CONFIG_SCSI_DMA=y
575# CONFIG_SCSI_TGT is not set
576# CONFIG_SCSI_NETLINK is not set
577CONFIG_SCSI_PROC_FS=y
578
579#
580# SCSI support type (disk, tape, CD-ROM)
581#
582CONFIG_BLK_DEV_SD=y
583# CONFIG_CHR_DEV_ST is not set
584# CONFIG_CHR_DEV_OSST is not set
585# CONFIG_BLK_DEV_SR is not set
586# CONFIG_CHR_DEV_SG is not set
587# CONFIG_CHR_DEV_SCH is not set
588# CONFIG_SCSI_MULTI_LUN is not set
589CONFIG_SCSI_CONSTANTS=y
590CONFIG_SCSI_LOGGING=y
591# CONFIG_SCSI_SCAN_ASYNC is not set
592CONFIG_SCSI_WAIT_SCAN=m
593
594#
595# SCSI Transports
596#
597# CONFIG_SCSI_SPI_ATTRS is not set
598# CONFIG_SCSI_FC_ATTRS is not set
599# CONFIG_SCSI_ISCSI_ATTRS is not set
600CONFIG_SCSI_SAS_ATTRS=y
601# CONFIG_SCSI_SAS_LIBSAS is not set
602# CONFIG_SCSI_SRP_ATTRS is not set
603# CONFIG_SCSI_LOWLEVEL is not set
604# CONFIG_SCSI_DH is not set
605# CONFIG_SCSI_OSD_INITIATOR is not set
606# CONFIG_ATA is not set
607# CONFIG_MD is not set
608CONFIG_FUSION=y
609# CONFIG_FUSION_SPI is not set
610# CONFIG_FUSION_FC is not set
611CONFIG_FUSION_SAS=y
612CONFIG_FUSION_MAX_SGE=128
613CONFIG_FUSION_CTL=y
614CONFIG_FUSION_LOGGING=y
615
616#
617# IEEE 1394 (FireWire) support
618#
619
620#
621# You can enable one or both FireWire driver stacks.
622#
623
624#
625# The newer stack is recommended.
626#
627# CONFIG_FIREWIRE is not set
628# CONFIG_IEEE1394 is not set
629# CONFIG_I2O is not set
630# CONFIG_MACINTOSH_DRIVERS is not set
631CONFIG_NETDEVICES=y
632# CONFIG_DUMMY is not set
633# CONFIG_BONDING is not set
634# CONFIG_MACVLAN is not set
635# CONFIG_EQUALIZER is not set
636# CONFIG_TUN is not set
637# CONFIG_VETH is not set
638# CONFIG_ARCNET is not set
639# CONFIG_PHYLIB is not set
640CONFIG_NET_ETHERNET=y
641# CONFIG_MII is not set
642# CONFIG_HAPPYMEAL is not set
643# CONFIG_SUNGEM is not set
644# CONFIG_CASSINI is not set
645# CONFIG_NET_VENDOR_3COM is not set
646# CONFIG_ETHOC is not set
647# CONFIG_DNET is not set
648# CONFIG_NET_TULIP is not set
649# CONFIG_HP100 is not set
650CONFIG_IBM_NEW_EMAC=y
651CONFIG_IBM_NEW_EMAC_RXB=128
652CONFIG_IBM_NEW_EMAC_TXB=64
653CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
654CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
655CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
656# CONFIG_IBM_NEW_EMAC_DEBUG is not set
657# CONFIG_IBM_NEW_EMAC_ZMII is not set
658# CONFIG_IBM_NEW_EMAC_RGMII is not set
659# CONFIG_IBM_NEW_EMAC_TAH is not set
660CONFIG_IBM_NEW_EMAC_EMAC4=y
661# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
662# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
663# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
664# CONFIG_NET_PCI is not set
665# CONFIG_B44 is not set
666# CONFIG_KS8842 is not set
667# CONFIG_KS8851_MLL is not set
668# CONFIG_ATL2 is not set
669# CONFIG_XILINX_EMACLITE is not set
670# CONFIG_NETDEV_1000 is not set
671# CONFIG_NETDEV_10000 is not set
672# CONFIG_TR is not set
673# CONFIG_WLAN is not set
674
675#
676# Enable WiMAX (Networking options) to see the WiMAX drivers
677#
678# CONFIG_WAN is not set
679# CONFIG_FDDI is not set
680# CONFIG_HIPPI is not set
681# CONFIG_PPP is not set
682# CONFIG_SLIP is not set
683# CONFIG_NET_FC is not set
684# CONFIG_NETCONSOLE is not set
685# CONFIG_NETPOLL is not set
686# CONFIG_NET_POLL_CONTROLLER is not set
687# CONFIG_VMXNET3 is not set
688# CONFIG_ISDN is not set
689# CONFIG_PHONE is not set
690
691#
692# Input device support
693#
694CONFIG_INPUT=y
695# CONFIG_INPUT_FF_MEMLESS is not set
696# CONFIG_INPUT_POLLDEV is not set
697# CONFIG_INPUT_SPARSEKMAP is not set
698
699#
700# Userland interfaces
701#
702CONFIG_INPUT_MOUSEDEV=y
703CONFIG_INPUT_MOUSEDEV_PSAUX=y
704CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
705CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
706# CONFIG_INPUT_JOYDEV is not set
707# CONFIG_INPUT_EVDEV is not set
708# CONFIG_INPUT_EVBUG is not set
709
710#
711# Input Device Drivers
712#
713CONFIG_INPUT_KEYBOARD=y
714# CONFIG_KEYBOARD_ADP5588 is not set
715CONFIG_KEYBOARD_ATKBD=y
716# CONFIG_QT2160 is not set
717# CONFIG_KEYBOARD_LKKBD is not set
718# CONFIG_KEYBOARD_MAX7359 is not set
719# CONFIG_KEYBOARD_NEWTON is not set
720# CONFIG_KEYBOARD_OPENCORES is not set
721# CONFIG_KEYBOARD_STOWAWAY is not set
722# CONFIG_KEYBOARD_SUNKBD is not set
723# CONFIG_KEYBOARD_XTKBD is not set
724CONFIG_INPUT_MOUSE=y
725CONFIG_MOUSE_PS2=y
726# CONFIG_MOUSE_PS2_ALPS is not set
727# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
728# CONFIG_MOUSE_PS2_SYNAPTICS is not set
729# CONFIG_MOUSE_PS2_TRACKPOINT is not set
730# CONFIG_MOUSE_PS2_ELANTECH is not set
731# CONFIG_MOUSE_PS2_SENTELIC is not set
732# CONFIG_MOUSE_PS2_TOUCHKIT is not set
733# CONFIG_MOUSE_SERIAL is not set
734# CONFIG_MOUSE_VSXXXAA is not set
735# CONFIG_MOUSE_SYNAPTICS_I2C is not set
736# CONFIG_INPUT_JOYSTICK is not set
737# CONFIG_INPUT_TABLET is not set
738# CONFIG_INPUT_TOUCHSCREEN is not set
739# CONFIG_INPUT_MISC is not set
740
741#
742# Hardware I/O ports
743#
744CONFIG_SERIO=y
745CONFIG_SERIO_I8042=y
746CONFIG_SERIO_SERPORT=y
747# CONFIG_SERIO_PCIPS2 is not set
748CONFIG_SERIO_LIBPS2=y
749# CONFIG_SERIO_RAW is not set
750# CONFIG_SERIO_XILINX_XPS_PS2 is not set
751# CONFIG_SERIO_ALTERA_PS2 is not set
752# CONFIG_GAMEPORT is not set
753
754#
755# Character devices
756#
757CONFIG_VT=y
758CONFIG_CONSOLE_TRANSLATIONS=y
759CONFIG_VT_CONSOLE=y
760CONFIG_HW_CONSOLE=y
761# CONFIG_VT_HW_CONSOLE_BINDING is not set
762CONFIG_DEVKMEM=y
763# CONFIG_SERIAL_NONSTANDARD is not set
764# CONFIG_NOZOMI is not set
765
766#
767# Serial drivers
768#
769CONFIG_SERIAL_8250=y
770CONFIG_SERIAL_8250_CONSOLE=y
771# CONFIG_SERIAL_8250_PCI is not set
772CONFIG_SERIAL_8250_NR_UARTS=4
773CONFIG_SERIAL_8250_RUNTIME_UARTS=4
774CONFIG_SERIAL_8250_EXTENDED=y
775# CONFIG_SERIAL_8250_MANY_PORTS is not set
776CONFIG_SERIAL_8250_SHARE_IRQ=y
777# CONFIG_SERIAL_8250_DETECT_IRQ is not set
778# CONFIG_SERIAL_8250_RSA is not set
779
780#
781# Non-8250 serial port support
782#
783# CONFIG_SERIAL_UARTLITE is not set
784CONFIG_SERIAL_CORE=y
785CONFIG_SERIAL_CORE_CONSOLE=y
786# CONFIG_SERIAL_JSM is not set
787CONFIG_SERIAL_OF_PLATFORM=y
788# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set
789# CONFIG_SERIAL_TIMBERDALE is not set
790# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
791CONFIG_UNIX98_PTYS=y
792# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
793CONFIG_LEGACY_PTYS=y
794CONFIG_LEGACY_PTY_COUNT=256
795# CONFIG_HVC_UDBG is not set
796# CONFIG_IPMI_HANDLER is not set
797# CONFIG_HW_RANDOM is not set
798# CONFIG_NVRAM is not set
799# CONFIG_R3964 is not set
800# CONFIG_APPLICOM is not set
801# CONFIG_RAW_DRIVER is not set
802# CONFIG_BOOTCOUNT is not set
803# CONFIG_DISPLAY_PDSP1880 is not set
804# CONFIG_MUCMC52_IO is not set
805# CONFIG_UC101_IO is not set
806# CONFIG_SRAM is not set
807# CONFIG_TCG_TPM is not set
808CONFIG_DEVPORT=y
809CONFIG_I2C=y
810CONFIG_I2C_BOARDINFO=y
811CONFIG_I2C_COMPAT=y
812CONFIG_I2C_CHARDEV=y
813CONFIG_I2C_HELPER_AUTO=y
814
815#
816# I2C Hardware Bus support
817#
818
819#
820# PC SMBus host controller drivers
821#
822# CONFIG_I2C_ALI1535 is not set
823# CONFIG_I2C_ALI1563 is not set
824# CONFIG_I2C_ALI15X3 is not set
825# CONFIG_I2C_AMD756 is not set
826# CONFIG_I2C_AMD8111 is not set
827# CONFIG_I2C_I801 is not set
828# CONFIG_I2C_ISCH is not set
829# CONFIG_I2C_PIIX4 is not set
830# CONFIG_I2C_NFORCE2 is not set
831# CONFIG_I2C_SIS5595 is not set
832# CONFIG_I2C_SIS630 is not set
833# CONFIG_I2C_SIS96X is not set
834# CONFIG_I2C_VIA is not set
835# CONFIG_I2C_VIAPRO is not set
836
837#
838# I2C system bus drivers (mostly embedded / system-on-chip)
839#
840CONFIG_I2C_IBM_IIC=y
841# CONFIG_I2C_MPC is not set
842# CONFIG_I2C_OCORES is not set
843# CONFIG_I2C_SIMTEC is not set
844# CONFIG_I2C_XILINX is not set
845
846#
847# External I2C/SMBus adapter drivers
848#
849# CONFIG_I2C_PARPORT_LIGHT is not set
850# CONFIG_I2C_TAOS_EVM is not set
851
852#
853# Other I2C/SMBus bus drivers
854#
855# CONFIG_I2C_PCA_PLATFORM is not set
856# CONFIG_I2C_STUB is not set
857# CONFIG_I2C_DEBUG_CORE is not set
858# CONFIG_I2C_DEBUG_ALGO is not set
859# CONFIG_I2C_DEBUG_BUS is not set
860# CONFIG_SPI is not set
861
862#
863# PPS support
864#
865# CONFIG_PPS is not set
866CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
867# CONFIG_GPIOLIB is not set
868# CONFIG_W1 is not set
869# CONFIG_POWER_SUPPLY is not set
870# CONFIG_HWMON is not set
871# CONFIG_THERMAL is not set
872# CONFIG_WATCHDOG is not set
873CONFIG_SSB_POSSIBLE=y
874
875#
876# Sonics Silicon Backplane
877#
878# CONFIG_SSB is not set
879
880#
881# Multifunction device drivers
882#
883# CONFIG_MFD_CORE is not set
884# CONFIG_MFD_88PM860X is not set
885CONFIG_MFD_SM501=y
886# CONFIG_HTC_PASIC3 is not set
887# CONFIG_TWL4030_CORE is not set
888# CONFIG_MFD_TMIO is not set
889# CONFIG_PMIC_DA903X is not set
890# CONFIG_PMIC_ADP5520 is not set
891# CONFIG_MFD_MAX8925 is not set
892# CONFIG_MFD_WM8400 is not set
893# CONFIG_MFD_WM831X is not set
894# CONFIG_MFD_WM8350_I2C is not set
895# CONFIG_MFD_WM8994 is not set
896# CONFIG_MFD_PCF50633 is not set
897# CONFIG_AB3100_CORE is not set
898# CONFIG_LPC_SCH is not set
899# CONFIG_REGULATOR is not set
900# CONFIG_MEDIA_SUPPORT is not set
901
902#
903# Graphics support
904#
905# CONFIG_AGP is not set
906CONFIG_VGA_ARB=y
907CONFIG_VGA_ARB_MAX_GPUS=16
908# CONFIG_DRM is not set
909# CONFIG_VGASTATE is not set
910CONFIG_VIDEO_OUTPUT_CONTROL=m
911CONFIG_FB=y
912# CONFIG_FIRMWARE_EDID is not set
913# CONFIG_FB_DDC is not set
914# CONFIG_FB_BOOT_VESA_SUPPORT is not set
915CONFIG_FB_CFB_FILLRECT=y
916CONFIG_FB_CFB_COPYAREA=y
917CONFIG_FB_CFB_IMAGEBLIT=y
918# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
919# CONFIG_FB_SYS_FILLRECT is not set
920# CONFIG_FB_SYS_COPYAREA is not set
921# CONFIG_FB_SYS_IMAGEBLIT is not set
922# CONFIG_FB_FOREIGN_ENDIAN is not set
923# CONFIG_FB_SYS_FOPS is not set
924# CONFIG_FB_SVGALIB is not set
925# CONFIG_FB_MACMODES is not set
926# CONFIG_FB_BACKLIGHT is not set
927# CONFIG_FB_MODE_HELPERS is not set
928# CONFIG_FB_TILEBLITTING is not set
929
930#
931# Frame buffer hardware drivers
932#
933# CONFIG_FB_CIRRUS is not set
934# CONFIG_FB_PM2 is not set
935# CONFIG_FB_CYBER2000 is not set
936# CONFIG_FB_OF is not set
937# CONFIG_FB_CT65550 is not set
938# CONFIG_FB_ASILIANT is not set
939# CONFIG_FB_IMSTT is not set
940# CONFIG_FB_VGA16 is not set
941# CONFIG_FB_UVESA is not set
942# CONFIG_FB_S1D13XXX is not set
943# CONFIG_FB_NVIDIA is not set
944# CONFIG_FB_RIVA is not set
945# CONFIG_FB_MATROX is not set
946# CONFIG_FB_RADEON is not set
947# CONFIG_FB_ATY128 is not set
948# CONFIG_FB_ATY is not set
949# CONFIG_FB_S3 is not set
950# CONFIG_FB_SAVAGE is not set
951# CONFIG_FB_SIS is not set
952# CONFIG_FB_VIA is not set
953# CONFIG_FB_NEOMAGIC is not set
954# CONFIG_FB_KYRO is not set
955# CONFIG_FB_3DFX is not set
956# CONFIG_FB_VOODOO1 is not set
957# CONFIG_FB_VT8623 is not set
958# CONFIG_FB_TRIDENT is not set
959# CONFIG_FB_ARK is not set
960# CONFIG_FB_PM3 is not set
961# CONFIG_FB_CARMINE is not set
962CONFIG_FB_SM501=y
963# CONFIG_FB_IBM_GXT4500 is not set
964# CONFIG_FB_VIRTUAL is not set
965# CONFIG_FB_METRONOME is not set
966# CONFIG_FB_MB862XX is not set
967# CONFIG_FB_BROADSHEET is not set
968# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
969
970#
971# Display device support
972#
973# CONFIG_DISPLAY_SUPPORT is not set
974
975#
976# Console display driver support
977#
978CONFIG_DUMMY_CONSOLE=y
979CONFIG_FRAMEBUFFER_CONSOLE=y
980# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
981# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
982# CONFIG_FONTS is not set
983CONFIG_FONT_8x8=y
984CONFIG_FONT_8x16=y
985CONFIG_LOGO=y
986# CONFIG_LOGO_LINUX_MONO is not set
987# CONFIG_LOGO_LINUX_VGA16 is not set
988CONFIG_LOGO_LINUX_CLUT224=y
989# CONFIG_SOUND is not set
990CONFIG_HID_SUPPORT=y
991CONFIG_HID=y
992# CONFIG_HIDRAW is not set
993# CONFIG_HID_PID is not set
994
995#
996# Special HID drivers
997#
998# CONFIG_USB_SUPPORT is not set
999# CONFIG_UWB is not set
1000# CONFIG_MMC is not set
1001# CONFIG_MEMSTICK is not set
1002# CONFIG_NEW_LEDS is not set
1003# CONFIG_ACCESSIBILITY is not set
1004# CONFIG_INFINIBAND is not set
1005# CONFIG_EDAC is not set
1006CONFIG_RTC_LIB=y
1007CONFIG_RTC_CLASS=y
1008CONFIG_RTC_HCTOSYS=y
1009CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1010# CONFIG_RTC_DEBUG is not set
1011
1012#
1013# RTC interfaces
1014#
1015CONFIG_RTC_INTF_SYSFS=y
1016CONFIG_RTC_INTF_PROC=y
1017CONFIG_RTC_INTF_DEV=y
1018# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1019# CONFIG_RTC_DRV_TEST is not set
1020
1021#
1022# I2C RTC drivers
1023#
1024CONFIG_RTC_DRV_DS1307=y
1025# CONFIG_RTC_DRV_DS1374 is not set
1026# CONFIG_RTC_DRV_DS1672 is not set
1027# CONFIG_RTC_DRV_MAX6900 is not set
1028# CONFIG_RTC_DRV_RS5C372 is not set
1029# CONFIG_RTC_DRV_ISL1208 is not set
1030# CONFIG_RTC_DRV_X1205 is not set
1031# CONFIG_RTC_DRV_PCF8563 is not set
1032# CONFIG_RTC_DRV_PCF8583 is not set
1033# CONFIG_RTC_DRV_M41T80 is not set
1034# CONFIG_RTC_DRV_BQ32K is not set
1035# CONFIG_RTC_DRV_S35390A is not set
1036# CONFIG_RTC_DRV_FM3130 is not set
1037# CONFIG_RTC_DRV_RX8581 is not set
1038# CONFIG_RTC_DRV_RX8025 is not set
1039
1040#
1041# SPI RTC drivers
1042#
1043
1044#
1045# Platform RTC drivers
1046#
1047# CONFIG_RTC_DRV_CMOS is not set
1048# CONFIG_RTC_DRV_DS1286 is not set
1049# CONFIG_RTC_DRV_DS1511 is not set
1050# CONFIG_RTC_DRV_DS1553 is not set
1051# CONFIG_RTC_DRV_DS1742 is not set
1052# CONFIG_RTC_DRV_STK17TA8 is not set
1053# CONFIG_RTC_DRV_M48T86 is not set
1054# CONFIG_RTC_DRV_M48T35 is not set
1055# CONFIG_RTC_DRV_M48T59 is not set
1056# CONFIG_RTC_DRV_MSM6242 is not set
1057# CONFIG_RTC_DRV_BQ4802 is not set
1058# CONFIG_RTC_DRV_RP5C01 is not set
1059# CONFIG_RTC_DRV_V3020 is not set
1060
1061#
1062# on-CPU RTC drivers
1063#
1064# CONFIG_RTC_DRV_GENERIC is not set
1065# CONFIG_DMADEVICES is not set
1066# CONFIG_AUXDISPLAY is not set
1067# CONFIG_UIO is not set
1068
1069#
1070# TI VLYNQ
1071#
1072# CONFIG_STAGING is not set
1073
1074#
1075# File systems
1076#
1077CONFIG_EXT2_FS=y
1078# CONFIG_EXT2_FS_XATTR is not set
1079# CONFIG_EXT2_FS_XIP is not set
1080CONFIG_EXT3_FS=y
1081# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
1082CONFIG_EXT3_FS_XATTR=y
1083# CONFIG_EXT3_FS_POSIX_ACL is not set
1084# CONFIG_EXT3_FS_SECURITY is not set
1085# CONFIG_EXT4_FS is not set
1086CONFIG_JBD=y
1087CONFIG_FS_MBCACHE=y
1088# CONFIG_REISERFS_FS is not set
1089# CONFIG_JFS_FS is not set
1090# CONFIG_FS_POSIX_ACL is not set
1091# CONFIG_XFS_FS is not set
1092# CONFIG_GFS2_FS is not set
1093# CONFIG_OCFS2_FS is not set
1094# CONFIG_BTRFS_FS is not set
1095# CONFIG_NILFS2_FS is not set
1096CONFIG_FILE_LOCKING=y
1097CONFIG_FSNOTIFY=y
1098CONFIG_DNOTIFY=y
1099CONFIG_INOTIFY=y
1100CONFIG_INOTIFY_USER=y
1101# CONFIG_QUOTA is not set
1102# CONFIG_AUTOFS_FS is not set
1103# CONFIG_AUTOFS4_FS is not set
1104# CONFIG_FUSE_FS is not set
1105
1106#
1107# Caches
1108#
1109# CONFIG_FSCACHE is not set
1110
1111#
1112# CD-ROM/DVD Filesystems
1113#
1114# CONFIG_ISO9660_FS is not set
1115# CONFIG_UDF_FS is not set
1116
1117#
1118# DOS/FAT/NT Filesystems
1119#
1120CONFIG_FAT_FS=y
1121# CONFIG_MSDOS_FS is not set
1122CONFIG_VFAT_FS=y
1123CONFIG_FAT_DEFAULT_CODEPAGE=437
1124CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1125# CONFIG_NTFS_FS is not set
1126
1127#
1128# Pseudo filesystems
1129#
1130CONFIG_PROC_FS=y
1131CONFIG_PROC_KCORE=y
1132CONFIG_PROC_SYSCTL=y
1133CONFIG_PROC_PAGE_MONITOR=y
1134CONFIG_SYSFS=y
1135CONFIG_TMPFS=y
1136# CONFIG_TMPFS_POSIX_ACL is not set
1137# CONFIG_HUGETLB_PAGE is not set
1138# CONFIG_CONFIGFS_FS is not set
1139CONFIG_MISC_FILESYSTEMS=y
1140# CONFIG_ADFS_FS is not set
1141# CONFIG_AFFS_FS is not set
1142# CONFIG_HFS_FS is not set
1143# CONFIG_HFSPLUS_FS is not set
1144# CONFIG_BEFS_FS is not set
1145# CONFIG_BFS_FS is not set
1146# CONFIG_EFS_FS is not set
1147# CONFIG_JFFS2_FS is not set
1148# CONFIG_YAFFS_FS is not set
1149# CONFIG_LOGFS is not set
1150CONFIG_CRAMFS=y
1151# CONFIG_SQUASHFS is not set
1152# CONFIG_VXFS_FS is not set
1153# CONFIG_MINIX_FS is not set
1154# CONFIG_OMFS_FS is not set
1155# CONFIG_HPFS_FS is not set
1156# CONFIG_QNX4FS_FS is not set
1157# CONFIG_ROMFS_FS is not set
1158# CONFIG_SYSV_FS is not set
1159# CONFIG_UFS_FS is not set
1160CONFIG_NETWORK_FILESYSTEMS=y
1161CONFIG_NFS_FS=y
1162CONFIG_NFS_V3=y
1163# CONFIG_NFS_V3_ACL is not set
1164# CONFIG_NFS_V4 is not set
1165CONFIG_ROOT_NFS=y
1166# CONFIG_NFSD is not set
1167CONFIG_LOCKD=y
1168CONFIG_LOCKD_V4=y
1169CONFIG_NFS_COMMON=y
1170CONFIG_SUNRPC=y
1171# CONFIG_RPCSEC_GSS_KRB5 is not set
1172# CONFIG_RPCSEC_GSS_SPKM3 is not set
1173# CONFIG_SMB_FS is not set
1174# CONFIG_CEPH_FS is not set
1175# CONFIG_CIFS is not set
1176# CONFIG_NCP_FS is not set
1177# CONFIG_CODA_FS is not set
1178# CONFIG_AFS_FS is not set
1179
1180#
1181# Partition Types
1182#
1183# CONFIG_PARTITION_ADVANCED is not set
1184CONFIG_MSDOS_PARTITION=y
1185CONFIG_NLS=y
1186CONFIG_NLS_DEFAULT="iso8859-1"
1187CONFIG_NLS_CODEPAGE_437=y
1188# CONFIG_NLS_CODEPAGE_737 is not set
1189# CONFIG_NLS_CODEPAGE_775 is not set
1190CONFIG_NLS_CODEPAGE_850=y
1191# CONFIG_NLS_CODEPAGE_852 is not set
1192# CONFIG_NLS_CODEPAGE_855 is not set
1193# CONFIG_NLS_CODEPAGE_857 is not set
1194# CONFIG_NLS_CODEPAGE_860 is not set
1195# CONFIG_NLS_CODEPAGE_861 is not set
1196# CONFIG_NLS_CODEPAGE_862 is not set
1197# CONFIG_NLS_CODEPAGE_863 is not set
1198# CONFIG_NLS_CODEPAGE_864 is not set
1199# CONFIG_NLS_CODEPAGE_865 is not set
1200# CONFIG_NLS_CODEPAGE_866 is not set
1201# CONFIG_NLS_CODEPAGE_869 is not set
1202# CONFIG_NLS_CODEPAGE_936 is not set
1203# CONFIG_NLS_CODEPAGE_950 is not set
1204# CONFIG_NLS_CODEPAGE_932 is not set
1205# CONFIG_NLS_CODEPAGE_949 is not set
1206# CONFIG_NLS_CODEPAGE_874 is not set
1207# CONFIG_NLS_ISO8859_8 is not set
1208# CONFIG_NLS_CODEPAGE_1250 is not set
1209# CONFIG_NLS_CODEPAGE_1251 is not set
1210# CONFIG_NLS_ASCII is not set
1211CONFIG_NLS_ISO8859_1=y
1212# CONFIG_NLS_ISO8859_2 is not set
1213# CONFIG_NLS_ISO8859_3 is not set
1214# CONFIG_NLS_ISO8859_4 is not set
1215# CONFIG_NLS_ISO8859_5 is not set
1216# CONFIG_NLS_ISO8859_6 is not set
1217# CONFIG_NLS_ISO8859_7 is not set
1218# CONFIG_NLS_ISO8859_9 is not set
1219# CONFIG_NLS_ISO8859_13 is not set
1220# CONFIG_NLS_ISO8859_14 is not set
1221CONFIG_NLS_ISO8859_15=y
1222# CONFIG_NLS_KOI8_R is not set
1223# CONFIG_NLS_KOI8_U is not set
1224# CONFIG_NLS_UTF8 is not set
1225# CONFIG_DLM is not set
1226# CONFIG_BINARY_PRINTF is not set
1227
1228#
1229# Library routines
1230#
1231CONFIG_BITREVERSE=y
1232CONFIG_GENERIC_FIND_LAST_BIT=y
1233# CONFIG_CRC_CCITT is not set
1234# CONFIG_CRC16 is not set
1235# CONFIG_CRC_T10DIF is not set
1236# CONFIG_CRC_ITU_T is not set
1237CONFIG_CRC32=y
1238# CONFIG_CRC7 is not set
1239# CONFIG_LIBCRC32C is not set
1240CONFIG_ZLIB_INFLATE=y
1241CONFIG_DECOMPRESS_GZIP=y
1242CONFIG_HAS_IOMEM=y
1243CONFIG_HAS_IOPORT=y
1244CONFIG_HAS_DMA=y
1245CONFIG_HAVE_LMB=y
1246CONFIG_NLATTR=y
1247CONFIG_GENERIC_ATOMIC64=y
1248
1249#
1250# Kernel hacking
1251#
1252# CONFIG_PRINTK_TIME is not set
1253CONFIG_ENABLE_WARN_DEPRECATED=y
1254CONFIG_ENABLE_MUST_CHECK=y
1255CONFIG_FRAME_WARN=1024
1256CONFIG_MAGIC_SYSRQ=y
1257# CONFIG_STRIP_ASM_SYMS is not set
1258# CONFIG_UNUSED_SYMBOLS is not set
1259# CONFIG_DEBUG_FS is not set
1260# CONFIG_HEADERS_CHECK is not set
1261CONFIG_DEBUG_KERNEL=y
1262# CONFIG_DEBUG_SHIRQ is not set
1263CONFIG_DETECT_SOFTLOCKUP=y
1264# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1265CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1266CONFIG_DETECT_HUNG_TASK=y
1267# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1268CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1269CONFIG_SCHED_DEBUG=y
1270# CONFIG_SCHEDSTATS is not set
1271# CONFIG_TIMER_STATS is not set
1272# CONFIG_DEBUG_OBJECTS is not set
1273# CONFIG_SLUB_DEBUG_ON is not set
1274# CONFIG_SLUB_STATS is not set
1275# CONFIG_DEBUG_KMEMLEAK is not set
1276# CONFIG_DEBUG_RT_MUTEXES is not set
1277# CONFIG_RT_MUTEX_TESTER is not set
1278# CONFIG_DEBUG_SPINLOCK is not set
1279# CONFIG_DEBUG_MUTEXES is not set
1280# CONFIG_DEBUG_LOCK_ALLOC is not set
1281# CONFIG_PROVE_LOCKING is not set
1282# CONFIG_LOCK_STAT is not set
1283# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1284# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1285# CONFIG_DEBUG_KOBJECT is not set
1286# CONFIG_DEBUG_HIGHMEM is not set
1287# CONFIG_DEBUG_BUGVERBOSE is not set
1288# CONFIG_DEBUG_INFO is not set
1289# CONFIG_DEBUG_VM is not set
1290# CONFIG_DEBUG_WRITECOUNT is not set
1291# CONFIG_DEBUG_MEMORY_INIT is not set
1292# CONFIG_DEBUG_LIST is not set
1293# CONFIG_DEBUG_SG is not set
1294# CONFIG_DEBUG_NOTIFIERS is not set
1295# CONFIG_DEBUG_CREDENTIALS is not set
1296# CONFIG_RCU_TORTURE_TEST is not set
1297# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1298# CONFIG_BACKTRACE_SELF_TEST is not set
1299# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1300# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1301# CONFIG_FAULT_INJECTION is not set
1302# CONFIG_LATENCYTOP is not set
1303CONFIG_SYSCTL_SYSCALL_CHECK=y
1304# CONFIG_DEBUG_PAGEALLOC is not set
1305CONFIG_HAVE_FUNCTION_TRACER=y
1306CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1307CONFIG_HAVE_DYNAMIC_FTRACE=y
1308CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1309CONFIG_TRACING_SUPPORT=y
1310CONFIG_FTRACE=y
1311# CONFIG_FUNCTION_TRACER is not set
1312# CONFIG_IRQSOFF_TRACER is not set
1313# CONFIG_SCHED_TRACER is not set
1314# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1315# CONFIG_BOOT_TRACER is not set
1316CONFIG_BRANCH_PROFILE_NONE=y
1317# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1318# CONFIG_PROFILE_ALL_BRANCHES is not set
1319# CONFIG_STACK_TRACER is not set
1320# CONFIG_KMEMTRACE is not set
1321# CONFIG_WORKQUEUE_TRACER is not set
1322# CONFIG_BLK_DEV_IO_TRACE is not set
1323# CONFIG_DMA_API_DEBUG is not set
1324# CONFIG_SAMPLES is not set
1325CONFIG_HAVE_ARCH_KGDB=y
1326# CONFIG_KGDB is not set
1327# CONFIG_PPC_DISABLE_WERROR is not set
1328CONFIG_PPC_WERROR=y
1329CONFIG_PRINT_STACK_DEPTH=64
1330# CONFIG_DEBUG_STACKOVERFLOW is not set
1331# CONFIG_DEBUG_STACK_USAGE is not set
1332# CONFIG_CODE_PATCHING_SELFTEST is not set
1333# CONFIG_FTR_FIXUP_SELFTEST is not set
1334# CONFIG_MSI_BITMAP_SELFTEST is not set
1335# CONFIG_XMON is not set
1336# CONFIG_IRQSTACKS is not set
1337# CONFIG_BDI_SWITCH is not set
1338# CONFIG_PPC_EARLY_DEBUG is not set
1339
1340#
1341# Security options
1342#
1343# CONFIG_KEYS is not set
1344# CONFIG_SECURITY is not set
1345# CONFIG_SECURITYFS is not set
1346# CONFIG_DEFAULT_SECURITY_SELINUX is not set
1347# CONFIG_DEFAULT_SECURITY_SMACK is not set
1348# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1349CONFIG_DEFAULT_SECURITY_DAC=y
1350CONFIG_DEFAULT_SECURITY=""
1351CONFIG_CRYPTO=y
1352
1353#
1354# Crypto core or helper
1355#
1356CONFIG_CRYPTO_ALGAPI=y
1357CONFIG_CRYPTO_ALGAPI2=y
1358CONFIG_CRYPTO_AEAD2=y
1359CONFIG_CRYPTO_BLKCIPHER=y
1360CONFIG_CRYPTO_BLKCIPHER2=y
1361CONFIG_CRYPTO_HASH=y
1362CONFIG_CRYPTO_HASH2=y
1363CONFIG_CRYPTO_RNG2=y
1364CONFIG_CRYPTO_PCOMP=y
1365CONFIG_CRYPTO_MANAGER=y
1366CONFIG_CRYPTO_MANAGER2=y
1367# CONFIG_CRYPTO_GF128MUL is not set
1368# CONFIG_CRYPTO_NULL is not set
1369CONFIG_CRYPTO_WORKQUEUE=y
1370# CONFIG_CRYPTO_CRYPTD is not set
1371# CONFIG_CRYPTO_AUTHENC is not set
1372# CONFIG_CRYPTO_TEST is not set
1373
1374#
1375# Authenticated Encryption with Associated Data
1376#
1377# CONFIG_CRYPTO_CCM is not set
1378# CONFIG_CRYPTO_GCM is not set
1379# CONFIG_CRYPTO_SEQIV is not set
1380
1381#
1382# Block modes
1383#
1384CONFIG_CRYPTO_CBC=y
1385# CONFIG_CRYPTO_CTR is not set
1386# CONFIG_CRYPTO_CTS is not set
1387CONFIG_CRYPTO_ECB=y
1388# CONFIG_CRYPTO_LRW is not set
1389CONFIG_CRYPTO_PCBC=y
1390# CONFIG_CRYPTO_XTS is not set
1391
1392#
1393# Hash modes
1394#
1395# CONFIG_CRYPTO_HMAC is not set
1396# CONFIG_CRYPTO_XCBC is not set
1397# CONFIG_CRYPTO_VMAC is not set
1398
1399#
1400# Digest
1401#
1402# CONFIG_CRYPTO_CRC32C is not set
1403# CONFIG_CRYPTO_GHASH is not set
1404# CONFIG_CRYPTO_MD4 is not set
1405CONFIG_CRYPTO_MD5=y
1406# CONFIG_CRYPTO_MICHAEL_MIC is not set
1407# CONFIG_CRYPTO_RMD128 is not set
1408# CONFIG_CRYPTO_RMD160 is not set
1409# CONFIG_CRYPTO_RMD256 is not set
1410# CONFIG_CRYPTO_RMD320 is not set
1411# CONFIG_CRYPTO_SHA1 is not set
1412# CONFIG_CRYPTO_SHA256 is not set
1413# CONFIG_CRYPTO_SHA512 is not set
1414# CONFIG_CRYPTO_TGR192 is not set
1415# CONFIG_CRYPTO_WP512 is not set
1416
1417#
1418# Ciphers
1419#
1420# CONFIG_CRYPTO_AES is not set
1421# CONFIG_CRYPTO_ANUBIS is not set
1422# CONFIG_CRYPTO_ARC4 is not set
1423# CONFIG_CRYPTO_BLOWFISH is not set
1424# CONFIG_CRYPTO_CAMELLIA is not set
1425# CONFIG_CRYPTO_CAST5 is not set
1426# CONFIG_CRYPTO_CAST6 is not set
1427CONFIG_CRYPTO_DES=y
1428# CONFIG_CRYPTO_FCRYPT is not set
1429# CONFIG_CRYPTO_KHAZAD is not set
1430# CONFIG_CRYPTO_SALSA20 is not set
1431# CONFIG_CRYPTO_SEED is not set
1432# CONFIG_CRYPTO_SERPENT is not set
1433# CONFIG_CRYPTO_TEA is not set
1434# CONFIG_CRYPTO_TWOFISH is not set
1435
1436#
1437# Compression
1438#
1439# CONFIG_CRYPTO_DEFLATE is not set
1440# CONFIG_CRYPTO_ZLIB is not set
1441# CONFIG_CRYPTO_LZO is not set
1442
1443#
1444# Random Number Generation
1445#
1446# CONFIG_CRYPTO_ANSI_CPRNG is not set
1447CONFIG_CRYPTO_HW=y
1448# CONFIG_CRYPTO_DEV_HIFN_795X is not set
1449# CONFIG_CRYPTO_DEV_PPC4XX is not set
1450# CONFIG_PPC_CLOCK is not set
1451# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index a9b91ed3d4b9..2048a6aeea91 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -21,6 +21,7 @@
21/* operations for longs and pointers */ 21/* operations for longs and pointers */
22#define PPC_LL stringify_in_c(ld) 22#define PPC_LL stringify_in_c(ld)
23#define PPC_STL stringify_in_c(std) 23#define PPC_STL stringify_in_c(std)
24#define PPC_STLU stringify_in_c(stdu)
24#define PPC_LCMPI stringify_in_c(cmpdi) 25#define PPC_LCMPI stringify_in_c(cmpdi)
25#define PPC_LONG stringify_in_c(.llong) 26#define PPC_LONG stringify_in_c(.llong)
26#define PPC_LONG_ALIGN stringify_in_c(.balign 8) 27#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
@@ -44,6 +45,7 @@
44/* operations for longs and pointers */ 45/* operations for longs and pointers */
45#define PPC_LL stringify_in_c(lwz) 46#define PPC_LL stringify_in_c(lwz)
46#define PPC_STL stringify_in_c(stw) 47#define PPC_STL stringify_in_c(stw)
48#define PPC_STLU stringify_in_c(stwu)
47#define PPC_LCMPI stringify_in_c(cmpwi) 49#define PPC_LCMPI stringify_in_c(cmpwi)
48#define PPC_LONG stringify_in_c(.long) 50#define PPC_LONG stringify_in_c(.long)
49#define PPC_LONG_ALIGN stringify_in_c(.balign 4) 51#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 2c15212e1700..065c590c991d 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -85,12 +85,12 @@
85 } \ 85 } \
86} while (0) 86} while (0)
87 87
88#define __WARN() do { \ 88#define __WARN_TAINT(taint) do { \
89 __asm__ __volatile__( \ 89 __asm__ __volatile__( \
90 "1: twi 31,0,0\n" \ 90 "1: twi 31,0,0\n" \
91 _EMIT_BUG_ENTRY \ 91 _EMIT_BUG_ENTRY \
92 : : "i" (__FILE__), "i" (__LINE__), \ 92 : : "i" (__FILE__), "i" (__LINE__), \
93 "i" (BUGFLAG_WARNING), \ 93 "i" (BUGFLAG_TAINT(taint)), \
94 "i" (sizeof(struct bug_entry))); \ 94 "i" (sizeof(struct bug_entry))); \
95} while (0) 95} while (0)
96 96
@@ -104,7 +104,7 @@
104 "1: "PPC_TLNEI" %4,0\n" \ 104 "1: "PPC_TLNEI" %4,0\n" \
105 _EMIT_BUG_ENTRY \ 105 _EMIT_BUG_ENTRY \
106 : : "i" (__FILE__), "i" (__LINE__), \ 106 : : "i" (__FILE__), "i" (__LINE__), \
107 "i" (BUGFLAG_WARNING), \ 107 "i" (BUGFLAG_TAINT(TAINT_WARN)), \
108 "i" (sizeof(struct bug_entry)), \ 108 "i" (sizeof(struct bug_entry)), \
109 "r" (__ret_warn_on)); \ 109 "r" (__ret_warn_on)); \
110 } \ 110 } \
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 725634fc18c6..4b509411ad8a 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -42,7 +42,7 @@ extern struct ppc64_caches ppc64_caches;
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43 43
44#if !defined(__ASSEMBLY__) 44#if !defined(__ASSEMBLY__)
45#define __read_mostly __attribute__((__section__(".data.read_mostly"))) 45#define __read_mostly __attribute__((__section__(".data..read_mostly")))
46#endif 46#endif
47 47
48#endif /* __KERNEL__ */ 48#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index e3cba4e1eb34..b0b21134f61a 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -70,6 +70,7 @@ struct pt_regs;
70extern int machine_check_generic(struct pt_regs *regs); 70extern int machine_check_generic(struct pt_regs *regs);
71extern int machine_check_4xx(struct pt_regs *regs); 71extern int machine_check_4xx(struct pt_regs *regs);
72extern int machine_check_440A(struct pt_regs *regs); 72extern int machine_check_440A(struct pt_regs *regs);
73extern int machine_check_e500mc(struct pt_regs *regs);
73extern int machine_check_e500(struct pt_regs *regs); 74extern int machine_check_e500(struct pt_regs *regs);
74extern int machine_check_e200(struct pt_regs *regs); 75extern int machine_check_e200(struct pt_regs *regs);
75extern int machine_check_47x(struct pt_regs *regs); 76extern int machine_check_47x(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 6d94d27ed850..a3954e4fcbe2 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -10,9 +10,6 @@ struct dma_map_ops;
10struct device_node; 10struct device_node;
11 11
12struct dev_archdata { 12struct dev_archdata {
13 /* Optional pointer to an OF device node */
14 struct device_node *of_node;
15
16 /* DMA operations on that device */ 13 /* DMA operations on that device */
17 struct dma_map_ops *dma_ops; 14 struct dma_map_ops *dma_ops;
18 15
@@ -30,19 +27,8 @@ struct dev_archdata {
30#endif 27#endif
31}; 28};
32 29
33static inline void dev_archdata_set_node(struct dev_archdata *ad,
34 struct device_node *np)
35{
36 ad->of_node = np;
37}
38
39static inline struct device_node *
40dev_archdata_get_node(const struct dev_archdata *ad)
41{
42 return ad->of_node;
43}
44
45struct pdev_archdata { 30struct pdev_archdata {
31 u64 dma_mask;
46}; 32};
47 33
48#endif /* _ASM_POWERPC_DEVICE_H */ 34#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index a6ca6da1430b..2a9cd74a841e 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -2,6 +2,18 @@
2#define _ASM_POWERPC_KEXEC_H 2#define _ASM_POWERPC_KEXEC_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#ifdef CONFIG_FSL_BOOKE
6
7/*
8 * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
9 * and therefore we can only deal with memory within this range
10 */
11#define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL)
12#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL)
13#define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL)
14
15#else
16
5/* 17/*
6 * Maximum page that is mapped directly into kernel memory. 18 * Maximum page that is mapped directly into kernel memory.
7 * XXX: Since we copy virt we can use any page we allocate 19 * XXX: Since we copy virt we can use any page we allocate
@@ -21,6 +33,7 @@
21/* TASK_SIZE, probably left over from use_mm ?? */ 33/* TASK_SIZE, probably left over from use_mm ?? */
22#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE 34#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
23#endif 35#endif
36#endif
24 37
25#define KEXEC_CONTROL_PAGE_SIZE 4096 38#define KEXEC_CONTROL_PAGE_SIZE 4096
26 39
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 81f3b0b5601e..6c5547d82bbe 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -77,4 +77,14 @@ struct kvm_debug_exit_arch {
77struct kvm_guest_debug_arch { 77struct kvm_guest_debug_arch {
78}; 78};
79 79
80#define KVM_REG_MASK 0x001f
81#define KVM_REG_EXT_MASK 0xffe0
82#define KVM_REG_GPR 0x0000
83#define KVM_REG_FPR 0x0020
84#define KVM_REG_QPR 0x0040
85#define KVM_REG_FQPR 0x0060
86
87#define KVM_INTERRUPT_SET -1U
88#define KVM_INTERRUPT_UNSET -2U
89
80#endif /* __LINUX_KVM_POWERPC_H */ 90#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index aadf2dd6f84e..c5ea4cda34b3 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -88,6 +88,8 @@
88 88
89#define BOOK3S_HFLAG_DCBZ32 0x1 89#define BOOK3S_HFLAG_DCBZ32 0x1
90#define BOOK3S_HFLAG_SLB 0x2 90#define BOOK3S_HFLAG_SLB 0x2
91#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
92#define BOOK3S_HFLAG_NATIVE_PS 0x8
91 93
92#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 94#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
93#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 95#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index db7db0a96967..6f74d93725a0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -22,46 +22,47 @@
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25#include <asm/kvm_book3s_64_asm.h> 25#include <asm/kvm_book3s_asm.h>
26 26
27struct kvmppc_slb { 27struct kvmppc_slb {
28 u64 esid; 28 u64 esid;
29 u64 vsid; 29 u64 vsid;
30 u64 orige; 30 u64 orige;
31 u64 origv; 31 u64 origv;
32 bool valid; 32 bool valid : 1;
33 bool Ks; 33 bool Ks : 1;
34 bool Kp; 34 bool Kp : 1;
35 bool nx; 35 bool nx : 1;
36 bool large; /* PTEs are 16MB */ 36 bool large : 1; /* PTEs are 16MB */
37 bool tb; /* 1TB segment */ 37 bool tb : 1; /* 1TB segment */
38 bool class; 38 bool class : 1;
39}; 39};
40 40
41struct kvmppc_sr { 41struct kvmppc_sr {
42 u32 raw; 42 u32 raw;
43 u32 vsid; 43 u32 vsid;
44 bool Ks; 44 bool Ks : 1;
45 bool Kp; 45 bool Kp : 1;
46 bool nx; 46 bool nx : 1;
47 bool valid : 1;
47}; 48};
48 49
49struct kvmppc_bat { 50struct kvmppc_bat {
50 u64 raw; 51 u64 raw;
51 u32 bepi; 52 u32 bepi;
52 u32 bepi_mask; 53 u32 bepi_mask;
53 bool vs;
54 bool vp;
55 u32 brpn; 54 u32 brpn;
56 u8 wimg; 55 u8 wimg;
57 u8 pp; 56 u8 pp;
57 bool vs : 1;
58 bool vp : 1;
58}; 59};
59 60
60struct kvmppc_sid_map { 61struct kvmppc_sid_map {
61 u64 guest_vsid; 62 u64 guest_vsid;
62 u64 guest_esid; 63 u64 guest_esid;
63 u64 host_vsid; 64 u64 host_vsid;
64 bool valid; 65 bool valid : 1;
65}; 66};
66 67
67#define SID_MAP_BITS 9 68#define SID_MAP_BITS 9
@@ -70,7 +71,7 @@ struct kvmppc_sid_map {
70 71
71struct kvmppc_vcpu_book3s { 72struct kvmppc_vcpu_book3s {
72 struct kvm_vcpu vcpu; 73 struct kvm_vcpu vcpu;
73 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 74 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
74 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 75 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
75 struct kvmppc_slb slb[64]; 76 struct kvmppc_slb slb[64];
76 struct { 77 struct {
@@ -82,9 +83,10 @@ struct kvmppc_vcpu_book3s {
82 struct kvmppc_bat ibat[8]; 83 struct kvmppc_bat ibat[8];
83 struct kvmppc_bat dbat[8]; 84 struct kvmppc_bat dbat[8];
84 u64 hid[6]; 85 u64 hid[6];
86 u64 gqr[8];
85 int slb_nr; 87 int slb_nr;
88 u32 dsisr;
86 u64 sdr1; 89 u64 sdr1;
87 u64 dsisr;
88 u64 hior; 90 u64 hior;
89 u64 msr_mask; 91 u64 msr_mask;
90 u64 vsid_first; 92 u64 vsid_first;
@@ -98,15 +100,15 @@ struct kvmppc_vcpu_book3s {
98#define CONTEXT_GUEST 1 100#define CONTEXT_GUEST 1
99#define CONTEXT_GUEST_END 2 101#define CONTEXT_GUEST_END 2
100 102
101#define VSID_REAL 0xfffffffffff00000 103#define VSID_REAL 0x1fffffffffc00000ULL
102#define VSID_REAL_DR 0xffffffffffe00000 104#define VSID_BAT 0x1fffffffffb00000ULL
103#define VSID_REAL_IR 0xffffffffffd00000 105#define VSID_REAL_DR 0x2000000000000000ULL
104#define VSID_BAT 0xffffffffffc00000 106#define VSID_REAL_IR 0x4000000000000000ULL
105#define VSID_PR 0x8000000000000000 107#define VSID_PR 0x8000000000000000ULL
106 108
107extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask); 109extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
108extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 110extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
109extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end); 111extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
110extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 112extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
111extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 113extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
112extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 114extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
@@ -114,11 +116,13 @@ extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
114extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 116extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
115extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 117extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
116extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data); 118extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data);
117extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); 119extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
118extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); 120extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
119extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 121extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
120extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 122extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
121 bool upper, u32 val); 123 bool upper, u32 val);
124extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
125extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
122 126
123extern u32 kvmppc_trampoline_lowmem; 127extern u32 kvmppc_trampoline_lowmem;
124extern u32 kvmppc_trampoline_enter; 128extern u32 kvmppc_trampoline_enter;
@@ -126,6 +130,8 @@ extern void kvmppc_rmcall(ulong srr0, ulong srr1);
126extern void kvmppc_load_up_fpu(void); 130extern void kvmppc_load_up_fpu(void);
127extern void kvmppc_load_up_altivec(void); 131extern void kvmppc_load_up_altivec(void);
128extern void kvmppc_load_up_vsx(void); 132extern void kvmppc_load_up_vsx(void);
133extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
134extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
129 135
130static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 136static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
131{ 137{
@@ -140,7 +146,108 @@ static inline ulong dsisr(void)
140} 146}
141 147
142extern void kvm_return_point(void); 148extern void kvm_return_point(void);
149static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
150
151static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
152{
153 if ( num < 14 ) {
154 to_svcpu(vcpu)->gpr[num] = val;
155 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
156 } else
157 vcpu->arch.gpr[num] = val;
158}
159
160static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
161{
162 if ( num < 14 )
163 return to_svcpu(vcpu)->gpr[num];
164 else
165 return vcpu->arch.gpr[num];
166}
167
168static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
169{
170 to_svcpu(vcpu)->cr = val;
171 to_book3s(vcpu)->shadow_vcpu->cr = val;
172}
173
174static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
175{
176 return to_svcpu(vcpu)->cr;
177}
178
179static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
180{
181 to_svcpu(vcpu)->xer = val;
182 to_book3s(vcpu)->shadow_vcpu->xer = val;
183}
184
185static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
186{
187 return to_svcpu(vcpu)->xer;
188}
189
190static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
191{
192 to_svcpu(vcpu)->ctr = val;
193}
194
195static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
196{
197 return to_svcpu(vcpu)->ctr;
198}
199
200static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
201{
202 to_svcpu(vcpu)->lr = val;
203}
204
205static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
206{
207 return to_svcpu(vcpu)->lr;
208}
209
210static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
211{
212 to_svcpu(vcpu)->pc = val;
213}
214
215static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
216{
217 return to_svcpu(vcpu)->pc;
218}
219
220static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
221{
222 ulong pc = kvmppc_get_pc(vcpu);
223 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
224
225 /* Load the instruction manually if it failed to do so in the
226 * exit path */
227 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
228 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
229
230 return svcpu->last_inst;
231}
232
233static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
234{
235 return to_svcpu(vcpu)->fault_dar;
236}
237
238/* Magic register values loaded into r3 and r4 before the 'sc' assembly
239 * instruction for the OSI hypercalls */
240#define OSI_SC_MAGIC_R3 0x113724FA
241#define OSI_SC_MAGIC_R4 0x77810F9B
143 242
144#define INS_DCBZ 0x7c0007ec 243#define INS_DCBZ 0x7c0007ec
145 244
245/* Also add subarch specific defines */
246
247#ifdef CONFIG_PPC_BOOK3S_32
248#include <asm/kvm_book3s_32.h>
249#else
250#include <asm/kvm_book3s_64.h>
251#endif
252
146#endif /* __ASM_KVM_BOOK3S_H__ */ 253#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
new file mode 100644
index 000000000000..de604db135f5
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -0,0 +1,42 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_32_H__
21#define __ASM_KVM_BOOK3S_32_H__
22
23static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
24{
25 return to_book3s(vcpu)->shadow_vcpu;
26}
27
28#define PTE_SIZE 12
29#define VSID_ALL 0
30#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
31#define SR_KP 0x20000000
32#define PTE_V 0x80000000
33#define PTE_SEC 0x00000040
34#define PTE_M 0x00000010
35#define PTE_R 0x00000100
36#define PTE_C 0x00000080
37
38#define SID_SHIFT 28
39#define ESID_MASK 0xf0000000
40#define VSID_MASK 0x00fffffff0000000ULL
41
42#endif /* __ASM_KVM_BOOK3S_32_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
new file mode 100644
index 000000000000..4cadd612d575
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -0,0 +1,28 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
23static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
24{
25 return &get_paca()->shadow_vcpu;
26}
27
28#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 183461b48407..36fdb3aff30b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -22,7 +22,7 @@
22 22
23#ifdef __ASSEMBLY__ 23#ifdef __ASSEMBLY__
24 24
25#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 25#ifdef CONFIG_KVM_BOOK3S_HANDLER
26 26
27#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
28 28
@@ -55,7 +55,7 @@ kvmppc_resume_\intno:
55.macro DO_KVM intno 55.macro DO_KVM intno
56.endm 56.endm
57 57
58#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */ 58#endif /* CONFIG_KVM_BOOK3S_HANDLER */
59 59
60#else /*__ASSEMBLY__ */ 60#else /*__ASSEMBLY__ */
61 61
@@ -63,12 +63,33 @@ struct kvmppc_book3s_shadow_vcpu {
63 ulong gpr[14]; 63 ulong gpr[14];
64 u32 cr; 64 u32 cr;
65 u32 xer; 65 u32 xer;
66
67 u32 fault_dsisr;
68 u32 last_inst;
69 ulong ctr;
70 ulong lr;
71 ulong pc;
72 ulong shadow_srr1;
73 ulong fault_dar;
74
66 ulong host_r1; 75 ulong host_r1;
67 ulong host_r2; 76 ulong host_r2;
68 ulong handler; 77 ulong handler;
69 ulong scratch0; 78 ulong scratch0;
70 ulong scratch1; 79 ulong scratch1;
71 ulong vmhandler; 80 ulong vmhandler;
81 u8 in_guest;
82
83#ifdef CONFIG_PPC_BOOK3S_32
84 u32 sr[16]; /* Guest SRs */
85#endif
86#ifdef CONFIG_PPC_BOOK3S_64
87 u8 slb_max; /* highest used guest slb entry */
88 struct {
89 u64 esid;
90 u64 vsid;
91 } slb[64]; /* guest SLB */
92#endif
72}; 93};
73 94
74#endif /*__ASSEMBLY__ */ 95#endif /*__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
new file mode 100644
index 000000000000..9c9ba3d59b1b
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -0,0 +1,96 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOKE_H__
21#define __ASM_KVM_BOOKE_H__
22
23#include <linux/types.h>
24#include <linux/kvm_host.h>
25
26static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
27{
28 vcpu->arch.gpr[num] = val;
29}
30
31static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
32{
33 return vcpu->arch.gpr[num];
34}
35
36static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
37{
38 vcpu->arch.cr = val;
39}
40
41static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
42{
43 return vcpu->arch.cr;
44}
45
46static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
47{
48 vcpu->arch.xer = val;
49}
50
51static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
52{
53 return vcpu->arch.xer;
54}
55
56static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
57{
58 return vcpu->arch.last_inst;
59}
60
61static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
62{
63 vcpu->arch.ctr = val;
64}
65
66static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
67{
68 return vcpu->arch.ctr;
69}
70
71static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
72{
73 vcpu->arch.lr = val;
74}
75
76static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
77{
78 return vcpu->arch.lr;
79}
80
81static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
82{
83 vcpu->arch.pc = val;
84}
85
86static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
87{
88 return vcpu->arch.pc;
89}
90
91static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
92{
93 return vcpu->arch.fault_dear;
94}
95
96#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h
new file mode 100644
index 000000000000..94f05de9ad04
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_fpu.h
@@ -0,0 +1,85 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright Novell Inc. 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_FPU_H__
21#define __ASM_KVM_FPU_H__
22
23#include <linux/types.h>
24
25extern void fps_fres(struct thread_struct *t, u32 *dst, u32 *src1);
26extern void fps_frsqrte(struct thread_struct *t, u32 *dst, u32 *src1);
27extern void fps_fsqrts(struct thread_struct *t, u32 *dst, u32 *src1);
28
29extern void fps_fadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
30extern void fps_fdivs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
31extern void fps_fmuls(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
32extern void fps_fsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
33
34extern void fps_fmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
35 u32 *src3);
36extern void fps_fmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
37 u32 *src3);
38extern void fps_fnmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
39 u32 *src3);
40extern void fps_fnmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
41 u32 *src3);
42extern void fps_fsel(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
43 u32 *src3);
44
45#define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
46 u64 *dst, u64 *src1);
47#define FPD_TWO_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
48 u64 *dst, u64 *src1, u64 *src2);
49#define FPD_THREE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
50 u64 *dst, u64 *src1, u64 *src2, u64 *src3);
51
52extern void fpd_fcmpu(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
53extern void fpd_fcmpo(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
54
55FPD_ONE_IN(fsqrts)
56FPD_ONE_IN(frsqrtes)
57FPD_ONE_IN(fres)
58FPD_ONE_IN(frsp)
59FPD_ONE_IN(fctiw)
60FPD_ONE_IN(fctiwz)
61FPD_ONE_IN(fsqrt)
62FPD_ONE_IN(fre)
63FPD_ONE_IN(frsqrte)
64FPD_ONE_IN(fneg)
65FPD_ONE_IN(fabs)
66FPD_TWO_IN(fadds)
67FPD_TWO_IN(fsubs)
68FPD_TWO_IN(fdivs)
69FPD_TWO_IN(fmuls)
70FPD_TWO_IN(fcpsgn)
71FPD_TWO_IN(fdiv)
72FPD_TWO_IN(fadd)
73FPD_TWO_IN(fmul)
74FPD_TWO_IN(fsub)
75FPD_THREE_IN(fmsubs)
76FPD_THREE_IN(fmadds)
77FPD_THREE_IN(fnmsubs)
78FPD_THREE_IN(fnmadds)
79FPD_THREE_IN(fsel)
80FPD_THREE_IN(fmsub)
81FPD_THREE_IN(fmadd)
82FPD_THREE_IN(fnmsub)
83FPD_THREE_IN(fnmadd)
84
85#endif
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 5e5bae7e152f..0c9ad869decd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -66,7 +66,7 @@ struct kvm_vcpu_stat {
66 u32 dec_exits; 66 u32 dec_exits;
67 u32 ext_intr_exits; 67 u32 ext_intr_exits;
68 u32 halt_wakeup; 68 u32 halt_wakeup;
69#ifdef CONFIG_PPC64 69#ifdef CONFIG_PPC_BOOK3S
70 u32 pf_storage; 70 u32 pf_storage;
71 u32 pf_instruc; 71 u32 pf_instruc;
72 u32 sp_storage; 72 u32 sp_storage;
@@ -124,12 +124,12 @@ struct kvm_arch {
124}; 124};
125 125
126struct kvmppc_pte { 126struct kvmppc_pte {
127 u64 eaddr; 127 ulong eaddr;
128 u64 vpage; 128 u64 vpage;
129 u64 raddr; 129 ulong raddr;
130 bool may_read; 130 bool may_read : 1;
131 bool may_write; 131 bool may_write : 1;
132 bool may_execute; 132 bool may_execute : 1;
133}; 133};
134 134
135struct kvmppc_mmu { 135struct kvmppc_mmu {
@@ -145,7 +145,7 @@ struct kvmppc_mmu {
145 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); 145 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
146 void (*reset_msr)(struct kvm_vcpu *vcpu); 146 void (*reset_msr)(struct kvm_vcpu *vcpu);
147 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); 147 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
148 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid); 148 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
149 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); 149 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
150 bool (*is_dcbz32)(struct kvm_vcpu *vcpu); 150 bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
151}; 151};
@@ -160,7 +160,7 @@ struct hpte_cache {
160struct kvm_vcpu_arch { 160struct kvm_vcpu_arch {
161 ulong host_stack; 161 ulong host_stack;
162 u32 host_pid; 162 u32 host_pid;
163#ifdef CONFIG_PPC64 163#ifdef CONFIG_PPC_BOOK3S
164 ulong host_msr; 164 ulong host_msr;
165 ulong host_r2; 165 ulong host_r2;
166 void *host_retip; 166 void *host_retip;
@@ -175,7 +175,7 @@ struct kvm_vcpu_arch {
175 ulong gpr[32]; 175 ulong gpr[32];
176 176
177 u64 fpr[32]; 177 u64 fpr[32];
178 u32 fpscr; 178 u64 fpscr;
179 179
180#ifdef CONFIG_ALTIVEC 180#ifdef CONFIG_ALTIVEC
181 vector128 vr[32]; 181 vector128 vr[32];
@@ -186,19 +186,23 @@ struct kvm_vcpu_arch {
186 u64 vsr[32]; 186 u64 vsr[32];
187#endif 187#endif
188 188
189#ifdef CONFIG_PPC_BOOK3S
190 /* For Gekko paired singles */
191 u32 qpr[32];
192#endif
193
194#ifdef CONFIG_BOOKE
189 ulong pc; 195 ulong pc;
190 ulong ctr; 196 ulong ctr;
191 ulong lr; 197 ulong lr;
192 198
193#ifdef CONFIG_BOOKE
194 ulong xer; 199 ulong xer;
195 u32 cr; 200 u32 cr;
196#endif 201#endif
197 202
198 ulong msr; 203 ulong msr;
199#ifdef CONFIG_PPC64 204#ifdef CONFIG_PPC_BOOK3S
200 ulong shadow_msr; 205 ulong shadow_msr;
201 ulong shadow_srr1;
202 ulong hflags; 206 ulong hflags;
203 ulong guest_owned_ext; 207 ulong guest_owned_ext;
204#endif 208#endif
@@ -253,20 +257,22 @@ struct kvm_vcpu_arch {
253 struct dentry *debugfs_exit_timing; 257 struct dentry *debugfs_exit_timing;
254#endif 258#endif
255 259
260#ifdef CONFIG_BOOKE
256 u32 last_inst; 261 u32 last_inst;
257#ifdef CONFIG_PPC64
258 ulong fault_dsisr;
259#endif
260 ulong fault_dear; 262 ulong fault_dear;
261 ulong fault_esr; 263 ulong fault_esr;
262 ulong queued_dear; 264 ulong queued_dear;
263 ulong queued_esr; 265 ulong queued_esr;
266#endif
264 gpa_t paddr_accessed; 267 gpa_t paddr_accessed;
265 268
266 u8 io_gpr; /* GPR used as IO source/target */ 269 u8 io_gpr; /* GPR used as IO source/target */
267 u8 mmio_is_bigendian; 270 u8 mmio_is_bigendian;
271 u8 mmio_sign_extend;
268 u8 dcr_needed; 272 u8 dcr_needed;
269 u8 dcr_is_write; 273 u8 dcr_is_write;
274 u8 osi_needed;
275 u8 osi_enabled;
270 276
271 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ 277 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
272 278
@@ -275,7 +281,7 @@ struct kvm_vcpu_arch {
275 u64 dec_jiffies; 281 u64 dec_jiffies;
276 unsigned long pending_exceptions; 282 unsigned long pending_exceptions;
277 283
278#ifdef CONFIG_PPC64 284#ifdef CONFIG_PPC_BOOK3S
279 struct hpte_cache hpte_cache[HPTEG_CACHE_NUM]; 285 struct hpte_cache hpte_cache[HPTEG_CACHE_NUM];
280 int hpte_cache_offset; 286 int hpte_cache_offset;
281#endif 287#endif
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e2642829e435..18d139ec2d22 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -30,6 +30,8 @@
30#include <linux/kvm_host.h> 30#include <linux/kvm_host.h>
31#ifdef CONFIG_PPC_BOOK3S 31#ifdef CONFIG_PPC_BOOK3S
32#include <asm/kvm_book3s.h> 32#include <asm/kvm_book3s.h>
33#else
34#include <asm/kvm_booke.h>
33#endif 35#endif
34 36
35enum emulation_result { 37enum emulation_result {
@@ -37,6 +39,7 @@ enum emulation_result {
37 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 39 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
38 EMULATE_DO_DCR, /* kvm_run filled with DCR request */ 40 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
39 EMULATE_FAIL, /* can't emulate this instruction */ 41 EMULATE_FAIL, /* can't emulate this instruction */
42 EMULATE_AGAIN, /* something went wrong. go again */
40}; 43};
41 44
42extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 45extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -48,8 +51,11 @@ extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
48extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 51extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
49 unsigned int rt, unsigned int bytes, 52 unsigned int rt, unsigned int bytes,
50 int is_bigendian); 53 int is_bigendian);
54extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
55 unsigned int rt, unsigned int bytes,
56 int is_bigendian);
51extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 57extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
52 u32 val, unsigned int bytes, int is_bigendian); 58 u64 val, unsigned int bytes, int is_bigendian);
53 59
54extern int kvmppc_emulate_instruction(struct kvm_run *run, 60extern int kvmppc_emulate_instruction(struct kvm_run *run,
55 struct kvm_vcpu *vcpu); 61 struct kvm_vcpu *vcpu);
@@ -63,6 +69,7 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
63extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 69extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
64extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); 70extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
65extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); 71extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
72extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
66extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 73extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
67extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 74extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
68extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, 75extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
@@ -88,6 +95,8 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
88extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); 95extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
89extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 96extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
90 struct kvm_interrupt *irq); 97 struct kvm_interrupt *irq);
98extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
99 struct kvm_interrupt *irq);
91 100
92extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 101extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
93 unsigned int op, int *advance); 102 unsigned int op, int *advance);
@@ -99,81 +108,37 @@ extern void kvmppc_booke_exit(void);
99 108
100extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 109extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
101 110
102#ifdef CONFIG_PPC_BOOK3S 111/*
103 112 * Cuts out inst bits with ordering according to spec.
104/* We assume we're always acting on the current vcpu */ 113 * That means the leftmost bit is zero. All given bits are included.
105 114 */
106static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 115static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
107{
108 if ( num < 14 ) {
109 get_paca()->shadow_vcpu.gpr[num] = val;
110 to_book3s(vcpu)->shadow_vcpu.gpr[num] = val;
111 } else
112 vcpu->arch.gpr[num] = val;
113}
114
115static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
116{
117 if ( num < 14 )
118 return get_paca()->shadow_vcpu.gpr[num];
119 else
120 return vcpu->arch.gpr[num];
121}
122
123static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
124{
125 get_paca()->shadow_vcpu.cr = val;
126 to_book3s(vcpu)->shadow_vcpu.cr = val;
127}
128
129static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
130{
131 return get_paca()->shadow_vcpu.cr;
132}
133
134static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
135{
136 get_paca()->shadow_vcpu.xer = val;
137 to_book3s(vcpu)->shadow_vcpu.xer = val;
138}
139
140static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
141{ 116{
142 return get_paca()->shadow_vcpu.xer; 117 u32 r;
143} 118 u32 mask;
144 119
145#else 120 BUG_ON(msb > lsb);
146 121
147static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 122 mask = (1 << (lsb - msb + 1)) - 1;
148{ 123 r = (inst >> (63 - lsb)) & mask;
149 vcpu->arch.gpr[num] = val;
150}
151 124
152static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 125 return r;
153{
154 return vcpu->arch.gpr[num];
155} 126}
156 127
157static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 128/*
129 * Replaces inst bits with ordering according to spec.
130 */
131static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
158{ 132{
159 vcpu->arch.cr = val; 133 u32 r;
160} 134 u32 mask;
161 135
162static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 136 BUG_ON(msb > lsb);
163{
164 return vcpu->arch.cr;
165}
166 137
167static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 138 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
168{ 139 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
169 vcpu->arch.xer = val;
170}
171 140
172static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 141 return r;
173{
174 return vcpu->arch.xer;
175} 142}
176 143
177#endif
178
179#endif /* __POWERPC_KVM_PPC_H__ */ 144#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index a062c57696d0..675e159b5ef4 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -108,7 +108,7 @@ static inline void* macio_get_drvdata(struct macio_dev *dev)
108 108
109static inline struct device_node *macio_get_of_node(struct macio_dev *mdev) 109static inline struct device_node *macio_get_of_node(struct macio_dev *mdev)
110{ 110{
111 return mdev->ofdev.node; 111 return mdev->ofdev.dev.of_node;
112} 112}
113 113
114#ifdef CONFIG_PCI 114#ifdef CONFIG_PCI
@@ -123,10 +123,6 @@ static inline struct pci_dev *macio_get_pci_dev(struct macio_dev *mdev)
123 */ 123 */
124struct macio_driver 124struct macio_driver
125{ 125{
126 char *name;
127 struct of_device_id *match_table;
128 struct module *owner;
129
130 int (*probe)(struct macio_dev* dev, const struct of_device_id *match); 126 int (*probe)(struct macio_dev* dev, const struct of_device_id *match);
131 int (*remove)(struct macio_dev* dev); 127 int (*remove)(struct macio_dev* dev);
132 128
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 26383e0778aa..81fb41289d6c 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -27,6 +27,8 @@ extern int __init_new_context(void);
27extern void __destroy_context(int context_id); 27extern void __destroy_context(int context_id);
28static inline void mmu_context_init(void) { } 28static inline void mmu_context_init(void) { }
29#else 29#else
30extern unsigned long __init_new_context(void);
31extern void __destroy_context(unsigned long context_id);
30extern void mmu_context_init(void); 32extern void mmu_context_init(void);
31#endif 33#endif
32 34
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index 42561f4f032d..ecc4fc69ac13 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -248,6 +248,7 @@ struct mpc52xx_psc_fifo {
248 u16 tflwfptr; /* PSC + 0x9e */ 248 u16 tflwfptr; /* PSC + 0x9e */
249}; 249};
250 250
251#define MPC512x_PSC_FIFO_EOF 0x100
251#define MPC512x_PSC_FIFO_RESET_SLICE 0x80 252#define MPC512x_PSC_FIFO_RESET_SLICE 0x80
252#define MPC512x_PSC_FIFO_ENABLE_SLICE 0x01 253#define MPC512x_PSC_FIFO_ENABLE_SLICE 0x01
253#define MPC512x_PSC_FIFO_ENABLE_DMA 0x04 254#define MPC512x_PSC_FIFO_ENABLE_DMA 0x04
diff --git a/arch/powerpc/include/asm/of_device.h b/arch/powerpc/include/asm/of_device.h
index a64debf177dc..444e97e2982e 100644
--- a/arch/powerpc/include/asm/of_device.h
+++ b/arch/powerpc/include/asm/of_device.h
@@ -12,9 +12,8 @@
12 */ 12 */
13struct of_device 13struct of_device
14{ 14{
15 struct device_node *node; /* to be obsoleted */
16 u64 dma_mask; /* DMA mask */
17 struct device dev; /* Generic device interface */ 15 struct device dev; /* Generic device interface */
16 struct pdev_archdata archdata;
18}; 17};
19 18
20extern struct of_device *of_device_alloc(struct device_node *np, 19extern struct of_device *of_device_alloc(struct device_node *np,
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 971dfa4815f0..8ce7963ad41d 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -23,7 +23,7 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/exception-64e.h> 24#include <asm/exception-64e.h>
25#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 25#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
26#include <asm/kvm_book3s_64_asm.h> 26#include <asm/kvm_book3s_asm.h>
27#endif 27#endif
28 28
29register struct paca_struct *local_paca asm("r13"); 29register struct paca_struct *local_paca asm("r13");
@@ -137,15 +137,9 @@ struct paca_struct {
137 u64 startpurr; /* PURR/TB value snapshot */ 137 u64 startpurr; /* PURR/TB value snapshot */
138 u64 startspurr; /* SPURR value snapshot */ 138 u64 startspurr; /* SPURR value snapshot */
139 139
140#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 140#ifdef CONFIG_KVM_BOOK3S_HANDLER
141 struct {
142 u64 esid;
143 u64 vsid;
144 } kvm_slb[64]; /* guest SLB */
145 /* We use this to store guest state in */ 141 /* We use this to store guest state in */
146 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 142 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
147 u8 kvm_slb_max; /* highest used guest slb entry */
148 u8 kvm_in_guest; /* are we inside the guest? */
149#endif 143#endif
150}; 144};
151 145
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index bfc4e027e2ad..358ff14ea25e 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -162,14 +162,6 @@ do { \
162 162
163#endif /* !CONFIG_HUGETLB_PAGE */ 163#endif /* !CONFIG_HUGETLB_PAGE */
164 164
165#ifdef MODULE
166#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
167#else
168#define __page_aligned \
169 __attribute__((__aligned__(PAGE_SIZE), \
170 __section__(".data.page_aligned")))
171#endif
172
173#define VM_DATA_DEFAULT_FLAGS \ 165#define VM_DATA_DEFAULT_FLAGS \
174 (test_thread_flag(TIF_32BIT) ? \ 166 (test_thread_flag(TIF_32BIT) ? \
175 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) 167 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 221ba6240464..7492fe8ad6e4 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -229,6 +229,9 @@ struct thread_struct {
229 unsigned long spefscr; /* SPE & eFP status */ 229 unsigned long spefscr; /* SPE & eFP status */
230 int used_spe; /* set if process has used spe */ 230 int used_spe; /* set if process has used spe */
231#endif /* CONFIG_SPE */ 231#endif /* CONFIG_SPE */
232#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
233 void* kvm_shadow_vcpu; /* KVM internal data */
234#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
232}; 235};
233 236
234#define ARCH_MIN_TASKALIGN 16 237#define ARCH_MIN_TASKALIGN 16
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index b68f025924a8..d62fdf4e504b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -293,10 +293,12 @@
293#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ 293#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
294#define HID1_PS (1<<16) /* 750FX PLL selection */ 294#define HID1_PS (1<<16) /* 750FX PLL selection */
295#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ 295#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
296#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
296#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ 297#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
297#define SPRN_IABR2 0x3FA /* 83xx */ 298#define SPRN_IABR2 0x3FA /* 83xx */
298#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ 299#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
299#define SPRN_HID4 0x3F4 /* 970 HID4 */ 300#define SPRN_HID4 0x3F4 /* 970 HID4 */
301#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
300#define SPRN_HID5 0x3F6 /* 970 HID5 */ 302#define SPRN_HID5 0x3F6 /* 970 HID5 */
301#define SPRN_HID6 0x3F9 /* BE HID 6 */ 303#define SPRN_HID6 0x3F9 /* BE HID 6 */
302#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */ 304#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
@@ -465,6 +467,14 @@
465#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ 467#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
466#define SPRN_XER 0x001 /* Fixed Point Exception Register */ 468#define SPRN_XER 0x001 /* Fixed Point Exception Register */
467 469
470#define SPRN_MMCR0_GEKKO 0x3B8 /* Gekko Monitor Mode Control Register 0 */
471#define SPRN_MMCR1_GEKKO 0x3BC /* Gekko Monitor Mode Control Register 1 */
472#define SPRN_PMC1_GEKKO 0x3B9 /* Gekko Performance Monitor Control 1 */
473#define SPRN_PMC2_GEKKO 0x3BA /* Gekko Performance Monitor Control 2 */
474#define SPRN_PMC3_GEKKO 0x3BD /* Gekko Performance Monitor Control 3 */
475#define SPRN_PMC4_GEKKO 0x3BE /* Gekko Performance Monitor Control 4 */
476#define SPRN_WPAR_GEKKO 0x399 /* Gekko Write Pipe Address Register */
477
468#define SPRN_SCOMC 0x114 /* SCOM Access Control */ 478#define SPRN_SCOMC 0x114 /* SCOM Access Control */
469#define SPRN_SCOMD 0x115 /* SCOM Access DATA */ 479#define SPRN_SCOMD 0x115 /* SCOM Access DATA */
470 480
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 5304a37ba425..2360317179a9 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -4,6 +4,12 @@
4 * are not true Book E PowerPCs, they borrowed a number of features 4 * are not true Book E PowerPCs, they borrowed a number of features
5 * before Book E was finalized, and are included here as well. Unfortunatly, 5 * before Book E was finalized, and are included here as well. Unfortunatly,
6 * they sometimes used different locations than true Book E CPUs did. 6 * they sometimes used different locations than true Book E CPUs did.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
11 *
12 * Copyright 2009-2010 Freescale Semiconductor, Inc.
7 */ 13 */
8#ifdef __KERNEL__ 14#ifdef __KERNEL__
9#ifndef __ASM_POWERPC_REG_BOOKE_H__ 15#ifndef __ASM_POWERPC_REG_BOOKE_H__
@@ -88,6 +94,7 @@
88#define SPRN_IVOR35 0x213 /* Interrupt Vector Offset Register 35 */ 94#define SPRN_IVOR35 0x213 /* Interrupt Vector Offset Register 35 */
89#define SPRN_IVOR36 0x214 /* Interrupt Vector Offset Register 36 */ 95#define SPRN_IVOR36 0x214 /* Interrupt Vector Offset Register 36 */
90#define SPRN_IVOR37 0x215 /* Interrupt Vector Offset Register 37 */ 96#define SPRN_IVOR37 0x215 /* Interrupt Vector Offset Register 37 */
97#define SPRN_MCARU 0x239 /* Machine Check Address Register Upper */
91#define SPRN_MCSRR0 0x23A /* Machine Check Save and Restore Register 0 */ 98#define SPRN_MCSRR0 0x23A /* Machine Check Save and Restore Register 0 */
92#define SPRN_MCSRR1 0x23B /* Machine Check Save and Restore Register 1 */ 99#define SPRN_MCSRR1 0x23B /* Machine Check Save and Restore Register 1 */
93#define SPRN_MCSR 0x23C /* Machine Check Status Register */ 100#define SPRN_MCSR 0x23C /* Machine Check Status Register */
@@ -196,8 +203,11 @@
196#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */ 203#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
197 204
198#ifdef CONFIG_E500 205#ifdef CONFIG_E500
206/* All e500 */
199#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ 207#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
200#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */ 208#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
209
210/* e500v1/v2 */
201#define MCSR_DCP_PERR 0x20000000UL /* D-Cache Push Parity Error */ 211#define MCSR_DCP_PERR 0x20000000UL /* D-Cache Push Parity Error */
202#define MCSR_DCPERR 0x10000000UL /* D-Cache Parity Error */ 212#define MCSR_DCPERR 0x10000000UL /* D-Cache Parity Error */
203#define MCSR_BUS_IAERR 0x00000080UL /* Instruction Address Error */ 213#define MCSR_BUS_IAERR 0x00000080UL /* Instruction Address Error */
@@ -209,12 +219,20 @@
209#define MCSR_BUS_IPERR 0x00000002UL /* Instruction parity Error */ 219#define MCSR_BUS_IPERR 0x00000002UL /* Instruction parity Error */
210#define MCSR_BUS_RPERR 0x00000001UL /* Read parity Error */ 220#define MCSR_BUS_RPERR 0x00000001UL /* Read parity Error */
211 221
212/* e500 parts may set unused bits in MCSR; mask these off */ 222/* e500mc */
213#define MCSR_MASK (MCSR_MCP | MCSR_ICPERR | MCSR_DCP_PERR | \ 223#define MCSR_DCPERR_MC 0x20000000UL /* D-Cache Parity Error */
214 MCSR_DCPERR | MCSR_BUS_IAERR | MCSR_BUS_RAERR | \ 224#define MCSR_L2MMU_MHIT 0x04000000UL /* Hit on multiple TLB entries */
215 MCSR_BUS_WAERR | MCSR_BUS_IBERR | MCSR_BUS_RBERR | \ 225#define MCSR_NMI 0x00100000UL /* Non-Maskable Interrupt */
216 MCSR_BUS_WBERR | MCSR_BUS_IPERR | MCSR_BUS_RPERR) 226#define MCSR_MAV 0x00080000UL /* MCAR address valid */
227#define MCSR_MEA 0x00040000UL /* MCAR is effective address */
228#define MCSR_IF 0x00010000UL /* Instruction Fetch */
229#define MCSR_LD 0x00008000UL /* Load */
230#define MCSR_ST 0x00004000UL /* Store */
231#define MCSR_LDG 0x00002000UL /* Guarded Load */
232#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
233#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
217#endif 234#endif
235
218#ifdef CONFIG_E200 236#ifdef CONFIG_E200
219#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ 237#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
220#define MCSR_CP_PERR 0x20000000UL /* Cache Push Parity Error */ 238#define MCSR_CP_PERR 0x20000000UL /* Cache Push Parity Error */
@@ -225,11 +243,6 @@
225#define MCSR_BUS_DRERR 0x00000008UL /* Read Bus Error on data load */ 243#define MCSR_BUS_DRERR 0x00000008UL /* Read Bus Error on data load */
226#define MCSR_BUS_WRERR 0x00000004UL /* Write Bus Error on buffered 244#define MCSR_BUS_WRERR 0x00000004UL /* Write Bus Error on buffered
227 store or cache line push */ 245 store or cache line push */
228
229/* e200 parts may set unused bits in MCSR; mask these off */
230#define MCSR_MASK (MCSR_MCP | MCSR_CP_PERR | MCSR_CPERR | \
231 MCSR_EXCP_ERR | MCSR_BUS_IRERR | MCSR_BUS_DRERR | \
232 MCSR_BUS_WRERR)
233#endif 246#endif
234 247
235/* Bit definitions for the DBSR. */ 248/* Bit definitions for the DBSR. */
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
index 912bf597870f..34cc78fd0ef4 100644
--- a/arch/powerpc/include/asm/scatterlist.h
+++ b/arch/powerpc/include/asm/scatterlist.h
@@ -9,38 +9,12 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifdef __KERNEL__
13#include <linux/types.h>
14#include <asm/dma.h> 12#include <asm/dma.h>
15 13#include <asm-generic/scatterlist.h>
16struct scatterlist {
17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
21 unsigned int offset;
22 unsigned int length;
23
24 /* For TCE or SWIOTLB support */
25 dma_addr_t dma_address;
26 u32 dma_length;
27};
28
29/*
30 * These macros should be used after a dma_map_sg call has been done
31 * to get bus addresses of each of the SG entries and their lengths.
32 * You should only work with the number of sg entries pci_map_sg
33 * returns, or alternatively stop on the first sg_dma_len(sg) which
34 * is 0.
35 */
36#define sg_dma_address(sg) ((sg)->dma_address)
37#define sg_dma_len(sg) ((sg)->dma_length)
38 14
39#ifdef __powerpc64__ 15#ifdef __powerpc64__
40#define ISA_DMA_THRESHOLD (~0UL) 16#define ISA_DMA_THRESHOLD (~0UL)
41#endif 17#endif
42
43#define ARCH_HAS_SG_CHAIN 18#define ARCH_HAS_SG_CHAIN
44 19
45#endif /* __KERNEL__ */
46#endif /* _ASM_POWERPC_SCATTERLIST_H */ 20#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 877326320e74..58d0572de6f9 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -57,8 +57,12 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
57obj-$(CONFIG_E500) += idle_e500.o 57obj-$(CONFIG_E500) += idle_e500.o
58obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o 58obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
59obj-$(CONFIG_TAU) += tau_6xx.o 59obj-$(CONFIG_TAU) += tau_6xx.o
60obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o \ 60obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
61 swsusp_$(CONFIG_WORD_SIZE).o 61ifeq ($(CONFIG_FSL_BOOKE),y)
62obj-$(CONFIG_HIBERNATION) += swsusp_booke.o
63else
64obj-$(CONFIG_HIBERNATION) += swsusp_$(CONFIG_WORD_SIZE).o
65endif
62obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o 66obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
63obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o 67obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
64obj-$(CONFIG_44x) += cpu_setup_44x.o 68obj-$(CONFIG_44x) += cpu_setup_44x.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 28a686fb269c..496cc5b3984f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -50,6 +50,9 @@
50#endif 50#endif
51#ifdef CONFIG_KVM 51#ifdef CONFIG_KVM
52#include <linux/kvm_host.h> 52#include <linux/kvm_host.h>
53#ifndef CONFIG_BOOKE
54#include <asm/kvm_book3s.h>
55#endif
53#endif 56#endif
54 57
55#ifdef CONFIG_PPC32 58#ifdef CONFIG_PPC32
@@ -105,6 +108,9 @@ int main(void)
105 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); 108 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
106#endif /* CONFIG_SPE */ 109#endif /* CONFIG_SPE */
107#endif /* CONFIG_PPC64 */ 110#endif /* CONFIG_PPC64 */
111#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
112 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
113#endif
108 114
109 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 115 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
110 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 116 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -191,33 +197,9 @@ int main(void)
191 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); 197 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
192 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 198 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
193#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 199#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); 200 DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); 201 DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); 202 DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
197 DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
198 DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
199 DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
200 DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
201 DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
202 DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
203 DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
204 DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
205 DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
206 DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
207 DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
208 DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
209 DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
210 DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
211 DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
212 DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
213 DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
214 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
215 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
216 shadow_vcpu.vmhandler));
217 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
218 shadow_vcpu.scratch0));
219 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
220 shadow_vcpu.scratch1));
221#endif 203#endif
222#endif /* CONFIG_PPC64 */ 204#endif /* CONFIG_PPC64 */
223 205
@@ -228,8 +210,8 @@ int main(void)
228 /* Interrupt register frame */ 210 /* Interrupt register frame */
229 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); 211 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
230 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); 212 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
231#ifdef CONFIG_PPC64
232 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); 213 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
214#ifdef CONFIG_PPC64
233 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ 215 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
234 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 216 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
235 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 217 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
@@ -412,9 +394,6 @@ int main(void)
412 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 394 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
413 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 395 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
414 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 396 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
415 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
416 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
417 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
418 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); 397 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
419 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 398 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
420 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 399 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
@@ -422,27 +401,68 @@ int main(void)
422 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 401 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
423 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 402 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
424 403
425 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 404 /* book3s */
426 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 405#ifdef CONFIG_PPC_BOOK3S
427 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
428
429 /* book3s_64 */
430#ifdef CONFIG_PPC64
431 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
432 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); 406 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
433 DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
434 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 407 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
435 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 408 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
436 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
437 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 409 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
438 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 410 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
439 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 411 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
440 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 412 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
441 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 413 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
414 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
415 offsetof(struct kvmppc_vcpu_book3s, vcpu));
416 DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
417 DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
418 DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
419 DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
420 DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
421 DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
422 DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
423 DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
424 DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
425 DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
426 DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
427 DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
428 DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
429 DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
430 DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
431 DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
432 DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
433 DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
434 DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
435 DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
436 DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
437 DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
438 vmhandler));
439 DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
440 scratch0));
441 DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
442 scratch1));
443 DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
444 in_guest));
445 DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
446 fault_dsisr));
447 DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
448 fault_dar));
449 DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
450 last_inst));
451 DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
452 shadow_srr1));
453#ifdef CONFIG_PPC_BOOK3S_32
454 DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
455#endif
442#else 456#else
443 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 457 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
444 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 458 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
445#endif /* CONFIG_PPC64 */ 459 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
460 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
461 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
462 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
463 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
464 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
465#endif /* CONFIG_PPC_BOOK3S */
446#endif 466#endif
447#ifdef CONFIG_44x 467#ifdef CONFIG_44x
448 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 468 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9556be903e96..87aa0f3c6047 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1840,7 +1840,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1840 .oprofile_cpu_type = "ppc/e500mc", 1840 .oprofile_cpu_type = "ppc/e500mc",
1841 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1841 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1842 .cpu_setup = __setup_cpu_e500mc, 1842 .cpu_setup = __setup_cpu_e500mc,
1843 .machine_check = machine_check_e500, 1843 .machine_check = machine_check_e500mc,
1844 .platform = "ppce500mc", 1844 .platform = "ppce500mc",
1845 }, 1845 },
1846 { /* default match */ 1846 { /* default match */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 8c066d6a8e4b..b46f2e09bd81 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -163,6 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
163} 163}
164 164
165/* wait for all the CPUs to hit real mode but timeout if they don't come in */ 165/* wait for all the CPUs to hit real mode but timeout if they don't come in */
166#ifdef CONFIG_PPC_STD_MMU_64
166static void crash_kexec_wait_realmode(int cpu) 167static void crash_kexec_wait_realmode(int cpu)
167{ 168{
168 unsigned int msecs; 169 unsigned int msecs;
@@ -187,6 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
187 } 188 }
188 mb(); 189 mb();
189} 190}
191#endif
190 192
191/* 193/*
192 * This function will be called by secondary cpus or by kexec cpu 194 * This function will be called by secondary cpus or by kexec cpu
@@ -445,7 +447,9 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
445 crash_kexec_prepare_cpus(crashing_cpu); 447 crash_kexec_prepare_cpus(crashing_cpu);
446 cpu_set(crashing_cpu, cpus_in_crash); 448 cpu_set(crashing_cpu, cpus_in_crash);
447 crash_kexec_stop_spus(); 449 crash_kexec_stop_spus();
450#ifdef CONFIG_PPC_STD_MMU_64
448 crash_kexec_wait_realmode(crashing_cpu); 451 crash_kexec_wait_realmode(crashing_cpu);
452#endif
449 if (ppc_md.kexec_cpu_down) 453 if (ppc_md.kexec_cpu_down)
450 ppc_md.kexec_cpu_down(1, 0); 454 ppc_md.kexec_cpu_down(1, 0);
451} 455}
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 4ff4da2c238b..e7fe218b8697 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -39,8 +39,8 @@ struct dma_map_ops swiotlb_dma_ops = {
39 .dma_supported = swiotlb_dma_supported, 39 .dma_supported = swiotlb_dma_supported,
40 .map_page = swiotlb_map_page, 40 .map_page = swiotlb_map_page,
41 .unmap_page = swiotlb_unmap_page, 41 .unmap_page = swiotlb_unmap_page,
42 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 42 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
43 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 43 .sync_single_for_device = swiotlb_sync_single_for_device,
44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
45 .sync_sg_for_device = swiotlb_sync_sg_for_device, 45 .sync_sg_for_device = swiotlb_sync_sg_for_device,
46 .mapping_error = swiotlb_dma_mapping_error, 46 .mapping_error = swiotlb_dma_mapping_error,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 6c1df5757cd6..8d1de6f31d5a 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -127,11 +127,11 @@ static inline void dma_direct_sync_sg(struct device *dev,
127 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 127 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
128} 128}
129 129
130static inline void dma_direct_sync_single_range(struct device *dev, 130static inline void dma_direct_sync_single(struct device *dev,
131 dma_addr_t dma_handle, unsigned long offset, size_t size, 131 dma_addr_t dma_handle, size_t size,
132 enum dma_data_direction direction) 132 enum dma_data_direction direction)
133{ 133{
134 __dma_sync(bus_to_virt(dma_handle+offset), size, direction); 134 __dma_sync(bus_to_virt(dma_handle), size, direction);
135} 135}
136#endif 136#endif
137 137
@@ -144,8 +144,8 @@ struct dma_map_ops dma_direct_ops = {
144 .map_page = dma_direct_map_page, 144 .map_page = dma_direct_map_page,
145 .unmap_page = dma_direct_unmap_page, 145 .unmap_page = dma_direct_unmap_page,
146#ifdef CONFIG_NOT_COHERENT_CACHE 146#ifdef CONFIG_NOT_COHERENT_CACHE
147 .sync_single_range_for_cpu = dma_direct_sync_single_range, 147 .sync_single_for_cpu = dma_direct_sync_single,
148 .sync_single_range_for_device = dma_direct_sync_single_range, 148 .sync_single_for_device = dma_direct_sync_single,
149 .sync_sg_for_cpu = dma_direct_sync_sg, 149 .sync_sg_for_cpu = dma_direct_sync_sg,
150 .sync_sg_for_device = dma_direct_sync_sg, 150 .sync_sg_for_device = dma_direct_sync_sg,
151#endif 151#endif
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
new file mode 100644
index 000000000000..beb4d78a2304
--- /dev/null
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -0,0 +1,237 @@
1
2/* 1. Find the index of the entry we're executing in */
3 bl invstr /* Find our address */
4invstr: mflr r6 /* Make it accessible */
5 mfmsr r7
6 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
7 mfspr r7, SPRN_PID0
8 slwi r7,r7,16
9 or r7,r7,r4
10 mtspr SPRN_MAS6,r7
11 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
12 mfspr r7,SPRN_MAS1
13 andis. r7,r7,MAS1_VALID@h
14 bne match_TLB
15
16 mfspr r7,SPRN_MMUCFG
17 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
18 cmpwi r7,3
19 bne match_TLB /* skip if NPIDS != 3 */
20
21 mfspr r7,SPRN_PID1
22 slwi r7,r7,16
23 or r7,r7,r4
24 mtspr SPRN_MAS6,r7
25 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
26 mfspr r7,SPRN_MAS1
27 andis. r7,r7,MAS1_VALID@h
28 bne match_TLB
29 mfspr r7, SPRN_PID2
30 slwi r7,r7,16
31 or r7,r7,r4
32 mtspr SPRN_MAS6,r7
33 tlbsx 0,r6 /* Fall through, we had to match */
34
35match_TLB:
36 mfspr r7,SPRN_MAS0
37 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
38
39 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
40 oris r7,r7,MAS1_IPROT@h
41 mtspr SPRN_MAS1,r7
42 tlbwe
43
44/* 2. Invalidate all entries except the entry we're executing in */
45 mfspr r9,SPRN_TLB1CFG
46 andi. r9,r9,0xfff
47 li r6,0 /* Set Entry counter to 0 */
481: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
49 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
50 mtspr SPRN_MAS0,r7
51 tlbre
52 mfspr r7,SPRN_MAS1
53 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
54 cmpw r3,r6
55 beq skpinv /* Dont update the current execution TLB */
56 mtspr SPRN_MAS1,r7
57 tlbwe
58 isync
59skpinv: addi r6,r6,1 /* Increment */
60 cmpw r6,r9 /* Are we done? */
61 bne 1b /* If not, repeat */
62
63 /* Invalidate TLB0 */
64 li r6,0x04
65 tlbivax 0,r6
66 TLBSYNC
67 /* Invalidate TLB1 */
68 li r6,0x0c
69 tlbivax 0,r6
70 TLBSYNC
71
72/* 3. Setup a temp mapping and jump to it */
73 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
74 addi r5, r5, 0x1
75 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
76 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
77 mtspr SPRN_MAS0,r7
78 tlbre
79
80 /* grab and fixup the RPN */
81 mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */
82 rlwinm r6,r6,25,27,31
83 li r8,-1
84 addi r6,r6,10
85 slw r6,r8,r6 /* convert to mask */
86
87 bl 1f /* Find our address */
881: mflr r7
89
90 mfspr r8,SPRN_MAS3
91#ifdef CONFIG_PHYS_64BIT
92 mfspr r23,SPRN_MAS7
93#endif
94 and r8,r6,r8
95 subfic r9,r6,-4096
96 and r9,r9,r7
97
98 or r25,r8,r9
99 ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR)
100
101 /* Just modify the entry ID and EPN for the temp mapping */
102 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
103 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
104 mtspr SPRN_MAS0,r7
105 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
106 slwi r6,r6,12
107 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
108 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l
109 mtspr SPRN_MAS1,r6
110 mfspr r6,SPRN_MAS2
111 li r7,0 /* temp EPN = 0 */
112 rlwimi r7,r6,0,20,31
113 mtspr SPRN_MAS2,r7
114 mtspr SPRN_MAS3,r8
115 tlbwe
116
117 xori r6,r4,1
118 slwi r6,r6,5 /* setup new context with other address space */
119 bl 1f /* Find our address */
1201: mflr r9
121 rlwimi r7,r9,0,20,31
122 addi r7,r7,(2f - 1b)
123 mtspr SPRN_SRR0,r7
124 mtspr SPRN_SRR1,r6
125 rfi
1262:
127/* 4. Clear out PIDs & Search info */
128 li r6,0
129 mtspr SPRN_MAS6,r6
130 mtspr SPRN_PID0,r6
131
132 mfspr r7,SPRN_MMUCFG
133 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
134 cmpwi r7,3
135 bne 2f /* skip if NPIDS != 3 */
136
137 mtspr SPRN_PID1,r6
138 mtspr SPRN_PID2,r6
139
140/* 5. Invalidate mapping we started in */
1412:
142 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
143 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
144 mtspr SPRN_MAS0,r7
145 tlbre
146 mfspr r6,SPRN_MAS1
147 rlwinm r6,r6,0,2,0 /* clear IPROT */
148 mtspr SPRN_MAS1,r6
149 tlbwe
150 /* Invalidate TLB1 */
151 li r9,0x0c
152 tlbivax 0,r9
153 TLBSYNC
154
155/* The mapping only needs to be cache-coherent on SMP */
156#ifdef CONFIG_SMP
157#define M_IF_SMP MAS2_M
158#else
159#define M_IF_SMP 0
160#endif
161
162#if defined(ENTRY_MAPPING_BOOT_SETUP)
163
164/* 6. Setup KERNELBASE mapping in TLB1[0] */
165 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
166 mtspr SPRN_MAS0,r6
167 lis r6,(MAS1_VALID|MAS1_IPROT)@h
168 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
169 mtspr SPRN_MAS1,r6
170 lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
171 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
172 mtspr SPRN_MAS2,r6
173 mtspr SPRN_MAS3,r8
174 tlbwe
175
176/* 7. Jump to KERNELBASE mapping */
177 lis r6,(KERNELBASE & ~0xfff)@h
178 ori r6,r6,(KERNELBASE & ~0xfff)@l
179
180#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
181/*
182 * 6. Setup a 1:1 mapping in TLB1. Esel 0 is unsued, 1 or 2 contains the tmp
183 * mapping so we start at 3. We setup 8 mappings, each 256MiB in size. This
184 * will cover the first 2GiB of memory.
185 */
186
187 lis r10, (MAS1_VALID|MAS1_IPROT)@h
188 ori r10,r10, (MAS1_TSIZE(BOOK3E_PAGESZ_256M))@l
189 li r11, 0
190 li r0, 8
191 mtctr r0
192
193next_tlb_setup:
194 addi r0, r11, 3
195 rlwinm r0, r0, 16, 4, 15 // Compute esel
196 rlwinm r9, r11, 28, 0, 3 // Compute [ER]PN
197 oris r0, r0, (MAS0_TLBSEL(1))@h
198 mtspr SPRN_MAS0,r0
199 mtspr SPRN_MAS1,r10
200 mtspr SPRN_MAS2,r9
201 ori r9, r9, (MAS3_SX|MAS3_SW|MAS3_SR)
202 mtspr SPRN_MAS3,r9
203 tlbwe
204 addi r11, r11, 1
205 bdnz+ next_tlb_setup
206
207/* 7. Jump to our 1:1 mapping */
208 li r6, 0
209
210#else
211 #error You need to specify the mapping or not use this at all.
212#endif
213
214 lis r7,MSR_KERNEL@h
215 ori r7,r7,MSR_KERNEL@l
216 bl 1f /* Find our address */
2171: mflr r9
218 rlwimi r6,r9,0,20,31
219 addi r6,r6,(2f - 1b)
220 add r6, r6, r25
221 mtspr SPRN_SRR0,r6
222 mtspr SPRN_SRR1,r7
223 rfi /* start execution out of TLB1[0] entry */
224
225/* 8. Clear out the temp mapping */
2262: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
227 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
228 mtspr SPRN_MAS0,r7
229 tlbre
230 mfspr r8,SPRN_MAS1
231 rlwinm r8,r8,0,2,0 /* clear IPROT */
232 mtspr SPRN_MAS1,r8
233 tlbwe
234 /* Invalidate TLB1 */
235 li r9,0x0c
236 tlbivax 0,r9
237 TLBSYNC
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e025e89fe93e..98c4b29a56f4 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -33,6 +33,7 @@
33#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
34#include <asm/ptrace.h> 34#include <asm/ptrace.h>
35#include <asm/bug.h> 35#include <asm/bug.h>
36#include <asm/kvm_book3s_asm.h>
36 37
37/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ 38/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
38#define LOAD_BAT(n, reg, RA, RB) \ 39#define LOAD_BAT(n, reg, RA, RB) \
@@ -303,6 +304,7 @@ __secondary_hold_acknowledge:
303 */ 304 */
304#define EXCEPTION(n, label, hdlr, xfer) \ 305#define EXCEPTION(n, label, hdlr, xfer) \
305 . = n; \ 306 . = n; \
307 DO_KVM n; \
306label: \ 308label: \
307 EXCEPTION_PROLOG; \ 309 EXCEPTION_PROLOG; \
308 addi r3,r1,STACK_FRAME_OVERHEAD; \ 310 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -358,6 +360,7 @@ i##n: \
358 * -- paulus. 360 * -- paulus.
359 */ 361 */
360 . = 0x200 362 . = 0x200
363 DO_KVM 0x200
361 mtspr SPRN_SPRG_SCRATCH0,r10 364 mtspr SPRN_SPRG_SCRATCH0,r10
362 mtspr SPRN_SPRG_SCRATCH1,r11 365 mtspr SPRN_SPRG_SCRATCH1,r11
363 mfcr r10 366 mfcr r10
@@ -381,6 +384,7 @@ i##n: \
381 384
382/* Data access exception. */ 385/* Data access exception. */
383 . = 0x300 386 . = 0x300
387 DO_KVM 0x300
384DataAccess: 388DataAccess:
385 EXCEPTION_PROLOG 389 EXCEPTION_PROLOG
386 mfspr r10,SPRN_DSISR 390 mfspr r10,SPRN_DSISR
@@ -397,6 +401,7 @@ DataAccess:
397 401
398/* Instruction access exception. */ 402/* Instruction access exception. */
399 . = 0x400 403 . = 0x400
404 DO_KVM 0x400
400InstructionAccess: 405InstructionAccess:
401 EXCEPTION_PROLOG 406 EXCEPTION_PROLOG
402 andis. r0,r9,0x4000 /* no pte found? */ 407 andis. r0,r9,0x4000 /* no pte found? */
@@ -413,6 +418,7 @@ InstructionAccess:
413 418
414/* Alignment exception */ 419/* Alignment exception */
415 . = 0x600 420 . = 0x600
421 DO_KVM 0x600
416Alignment: 422Alignment:
417 EXCEPTION_PROLOG 423 EXCEPTION_PROLOG
418 mfspr r4,SPRN_DAR 424 mfspr r4,SPRN_DAR
@@ -427,6 +433,7 @@ Alignment:
427 433
428/* Floating-point unavailable */ 434/* Floating-point unavailable */
429 . = 0x800 435 . = 0x800
436 DO_KVM 0x800
430FPUnavailable: 437FPUnavailable:
431BEGIN_FTR_SECTION 438BEGIN_FTR_SECTION
432/* 439/*
@@ -450,6 +457,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
450 457
451/* System call */ 458/* System call */
452 . = 0xc00 459 . = 0xc00
460 DO_KVM 0xc00
453SystemCall: 461SystemCall:
454 EXCEPTION_PROLOG 462 EXCEPTION_PROLOG
455 EXC_XFER_EE_LITE(0xc00, DoSyscall) 463 EXC_XFER_EE_LITE(0xc00, DoSyscall)
@@ -467,9 +475,11 @@ SystemCall:
467 * by executing an altivec instruction. 475 * by executing an altivec instruction.
468 */ 476 */
469 . = 0xf00 477 . = 0xf00
478 DO_KVM 0xf00
470 b PerformanceMonitor 479 b PerformanceMonitor
471 480
472 . = 0xf20 481 . = 0xf20
482 DO_KVM 0xf20
473 b AltiVecUnavailable 483 b AltiVecUnavailable
474 484
475/* 485/*
@@ -882,6 +892,10 @@ __secondary_start:
882 RFI 892 RFI
883#endif /* CONFIG_SMP */ 893#endif /* CONFIG_SMP */
884 894
895#ifdef CONFIG_KVM_BOOK3S_HANDLER
896#include "../kvm/book3s_rmhandlers.S"
897#endif
898
885/* 899/*
886 * Those generic dummy functions are kept for CPUs not 900 * Those generic dummy functions are kept for CPUs not
887 * included in CONFIG_6xx 901 * included in CONFIG_6xx
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index bed9a29ee383..844a44b64472 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -37,7 +37,7 @@
37#include <asm/firmware.h> 37#include <asm/firmware.h>
38#include <asm/page_64.h> 38#include <asm/page_64.h>
39#include <asm/irqflags.h> 39#include <asm/irqflags.h>
40#include <asm/kvm_book3s_64_asm.h> 40#include <asm/kvm_book3s_asm.h>
41 41
42/* The physical memory is layed out such that the secondary processor 42/* The physical memory is layed out such that the secondary processor
43 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 43 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -169,7 +169,7 @@ exception_marker:
169/* KVM trampoline code needs to be close to the interrupt handlers */ 169/* KVM trampoline code needs to be close to the interrupt handlers */
170 170
171#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 171#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
172#include "../kvm/book3s_64_rmhandlers.S" 172#include "../kvm/book3s_rmhandlers.S"
173#endif 173#endif
174 174
175_GLOBAL(generic_secondary_thread_init) 175_GLOBAL(generic_secondary_thread_init)
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index edd4a57fd29e..4faeba247854 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -94,204 +94,10 @@ _ENTRY(_start);
94 */ 94 */
95 95
96_ENTRY(__early_start) 96_ENTRY(__early_start)
97/* 1. Find the index of the entry we're executing in */
98 bl invstr /* Find our address */
99invstr: mflr r6 /* Make it accessible */
100 mfmsr r7
101 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
102 mfspr r7, SPRN_PID0
103 slwi r7,r7,16
104 or r7,r7,r4
105 mtspr SPRN_MAS6,r7
106 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
107 mfspr r7,SPRN_MAS1
108 andis. r7,r7,MAS1_VALID@h
109 bne match_TLB
110
111 mfspr r7,SPRN_MMUCFG
112 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
113 cmpwi r7,3
114 bne match_TLB /* skip if NPIDS != 3 */
115
116 mfspr r7,SPRN_PID1
117 slwi r7,r7,16
118 or r7,r7,r4
119 mtspr SPRN_MAS6,r7
120 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
121 mfspr r7,SPRN_MAS1
122 andis. r7,r7,MAS1_VALID@h
123 bne match_TLB
124 mfspr r7, SPRN_PID2
125 slwi r7,r7,16
126 or r7,r7,r4
127 mtspr SPRN_MAS6,r7
128 tlbsx 0,r6 /* Fall through, we had to match */
129
130match_TLB:
131 mfspr r7,SPRN_MAS0
132 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
133
134 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
135 oris r7,r7,MAS1_IPROT@h
136 mtspr SPRN_MAS1,r7
137 tlbwe
138
139/* 2. Invalidate all entries except the entry we're executing in */
140 mfspr r9,SPRN_TLB1CFG
141 andi. r9,r9,0xfff
142 li r6,0 /* Set Entry counter to 0 */
1431: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
144 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
145 mtspr SPRN_MAS0,r7
146 tlbre
147 mfspr r7,SPRN_MAS1
148 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
149 cmpw r3,r6
150 beq skpinv /* Dont update the current execution TLB */
151 mtspr SPRN_MAS1,r7
152 tlbwe
153 isync
154skpinv: addi r6,r6,1 /* Increment */
155 cmpw r6,r9 /* Are we done? */
156 bne 1b /* If not, repeat */
157
158 /* Invalidate TLB0 */
159 li r6,0x04
160 tlbivax 0,r6
161 TLBSYNC
162 /* Invalidate TLB1 */
163 li r6,0x0c
164 tlbivax 0,r6
165 TLBSYNC
166
167/* 3. Setup a temp mapping and jump to it */
168 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
169 addi r5, r5, 0x1
170 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
171 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
172 mtspr SPRN_MAS0,r7
173 tlbre
174
175 /* grab and fixup the RPN */
176 mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */
177 rlwinm r6,r6,25,27,31
178 li r8,-1
179 addi r6,r6,10
180 slw r6,r8,r6 /* convert to mask */
181
182 bl 1f /* Find our address */
1831: mflr r7
184
185 mfspr r8,SPRN_MAS3
186#ifdef CONFIG_PHYS_64BIT
187 mfspr r23,SPRN_MAS7
188#endif
189 and r8,r6,r8
190 subfic r9,r6,-4096
191 and r9,r9,r7
192
193 or r25,r8,r9
194 ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR)
195
196 /* Just modify the entry ID and EPN for the temp mapping */
197 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
198 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
199 mtspr SPRN_MAS0,r7
200 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
201 slwi r6,r6,12
202 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
203 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l
204 mtspr SPRN_MAS1,r6
205 mfspr r6,SPRN_MAS2
206 li r7,0 /* temp EPN = 0 */
207 rlwimi r7,r6,0,20,31
208 mtspr SPRN_MAS2,r7
209 mtspr SPRN_MAS3,r8
210 tlbwe
211
212 xori r6,r4,1
213 slwi r6,r6,5 /* setup new context with other address space */
214 bl 1f /* Find our address */
2151: mflr r9
216 rlwimi r7,r9,0,20,31
217 addi r7,r7,(2f - 1b)
218 mtspr SPRN_SRR0,r7
219 mtspr SPRN_SRR1,r6
220 rfi
2212:
222/* 4. Clear out PIDs & Search info */
223 li r6,0
224 mtspr SPRN_MAS6,r6
225 mtspr SPRN_PID0,r6
226
227 mfspr r7,SPRN_MMUCFG
228 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
229 cmpwi r7,3
230 bne 2f /* skip if NPIDS != 3 */
231 97
232 mtspr SPRN_PID1,r6 98#define ENTRY_MAPPING_BOOT_SETUP
233 mtspr SPRN_PID2,r6 99#include "fsl_booke_entry_mapping.S"
234 100#undef ENTRY_MAPPING_BOOT_SETUP
235/* 5. Invalidate mapping we started in */
2362:
237 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
238 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
239 mtspr SPRN_MAS0,r7
240 tlbre
241 mfspr r6,SPRN_MAS1
242 rlwinm r6,r6,0,2,0 /* clear IPROT */
243 mtspr SPRN_MAS1,r6
244 tlbwe
245 /* Invalidate TLB1 */
246 li r9,0x0c
247 tlbivax 0,r9
248 TLBSYNC
249
250/* The mapping only needs to be cache-coherent on SMP */
251#ifdef CONFIG_SMP
252#define M_IF_SMP MAS2_M
253#else
254#define M_IF_SMP 0
255#endif
256
257/* 6. Setup KERNELBASE mapping in TLB1[0] */
258 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
259 mtspr SPRN_MAS0,r6
260 lis r6,(MAS1_VALID|MAS1_IPROT)@h
261 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
262 mtspr SPRN_MAS1,r6
263 lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
264 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
265 mtspr SPRN_MAS2,r6
266 mtspr SPRN_MAS3,r8
267 tlbwe
268
269/* 7. Jump to KERNELBASE mapping */
270 lis r6,(KERNELBASE & ~0xfff)@h
271 ori r6,r6,(KERNELBASE & ~0xfff)@l
272 lis r7,MSR_KERNEL@h
273 ori r7,r7,MSR_KERNEL@l
274 bl 1f /* Find our address */
2751: mflr r9
276 rlwimi r6,r9,0,20,31
277 addi r6,r6,(2f - 1b)
278 mtspr SPRN_SRR0,r6
279 mtspr SPRN_SRR1,r7
280 rfi /* start execution out of TLB1[0] entry */
281
282/* 8. Clear out the temp mapping */
2832: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
284 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
285 mtspr SPRN_MAS0,r7
286 tlbre
287 mfspr r8,SPRN_MAS1
288 rlwinm r8,r8,0,2,0 /* clear IPROT */
289 mtspr SPRN_MAS1,r8
290 tlbwe
291 /* Invalidate TLB1 */
292 li r9,0x0c
293 tlbivax 0,r9
294 TLBSYNC
295 101
296 /* Establish the interrupt vector offsets */ 102 /* Establish the interrupt vector offsets */
297 SET_IVOR(0, CriticalInput); 103 SET_IVOR(0, CriticalInput);
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 71cf280da184..21266abfbda6 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -140,14 +140,14 @@ static struct dma_map_ops ibmebus_dma_ops = {
140 140
141static int ibmebus_match_path(struct device *dev, void *data) 141static int ibmebus_match_path(struct device *dev, void *data)
142{ 142{
143 struct device_node *dn = to_of_device(dev)->node; 143 struct device_node *dn = to_of_device(dev)->dev.of_node;
144 return (dn->full_name && 144 return (dn->full_name &&
145 (strcasecmp((char *)data, dn->full_name) == 0)); 145 (strcasecmp((char *)data, dn->full_name) == 0));
146} 146}
147 147
148static int ibmebus_match_node(struct device *dev, void *data) 148static int ibmebus_match_node(struct device *dev, void *data)
149{ 149{
150 return to_of_device(dev)->node == data; 150 return to_of_device(dev)->dev.of_node == data;
151} 151}
152 152
153static int ibmebus_create_device(struct device_node *dn) 153static int ibmebus_create_device(struct device_node *dn)
@@ -202,7 +202,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
202int ibmebus_register_driver(struct of_platform_driver *drv) 202int ibmebus_register_driver(struct of_platform_driver *drv)
203{ 203{
204 /* If the driver uses devices that ibmebus doesn't know, add them */ 204 /* If the driver uses devices that ibmebus doesn't know, add them */
205 ibmebus_create_devices(drv->match_table); 205 ibmebus_create_devices(drv->driver.of_match_table);
206 206
207 return of_register_driver(drv, &ibmebus_bus_type); 207 return of_register_driver(drv, &ibmebus_bus_type);
208} 208}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index c533525ca56a..bc47352deb1f 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -378,17 +378,6 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
378 * single-stepped a copy of the instruction. The address of this 378 * single-stepped a copy of the instruction. The address of this
379 * copy is p->ainsn.insn. 379 * copy is p->ainsn.insn.
380 */ 380 */
381static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
382{
383 int ret;
384 unsigned int insn = *p->ainsn.insn;
385
386 regs->nip = (unsigned long)p->addr;
387 ret = emulate_step(regs, insn);
388 if (ret == 0)
389 regs->nip = (unsigned long)p->addr + 4;
390}
391
392static int __kprobes post_kprobe_handler(struct pt_regs *regs) 381static int __kprobes post_kprobe_handler(struct pt_regs *regs)
393{ 382{
394 struct kprobe *cur = kprobe_running(); 383 struct kprobe *cur = kprobe_running();
@@ -406,7 +395,8 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
406 cur->post_handler(cur, regs, 0); 395 cur->post_handler(cur, regs, 0);
407 } 396 }
408 397
409 resume_execution(cur, regs); 398 /* Adjust nip to after the single-stepped instruction */
399 regs->nip = (unsigned long)cur->addr + 4;
410 regs->msr |= kcb->kprobe_saved_msr; 400 regs->msr |= kcb->kprobe_saved_msr;
411 401
412 /*Restore back the original saved kprobes variables and continue. */ 402 /*Restore back the original saved kprobes variables and continue. */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8043d1b73cf0..dc66d52dcff5 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -711,6 +711,22 @@ relocate_new_kernel:
711 /* r4 = reboot_code_buffer */ 711 /* r4 = reboot_code_buffer */
712 /* r5 = start_address */ 712 /* r5 = start_address */
713 713
714#ifdef CONFIG_FSL_BOOKE
715
716 mr r29, r3
717 mr r30, r4
718 mr r31, r5
719
720#define ENTRY_MAPPING_KEXEC_SETUP
721#include "fsl_booke_entry_mapping.S"
722#undef ENTRY_MAPPING_KEXEC_SETUP
723
724 mr r3, r29
725 mr r4, r30
726 mr r5, r31
727
728 li r0, 0
729#else
714 li r0, 0 730 li r0, 0
715 731
716 /* 732 /*
@@ -727,6 +743,7 @@ relocate_new_kernel:
727 rfi 743 rfi
728 744
7291: 7451:
746#endif
730 /* from this point address translation is turned off */ 747 /* from this point address translation is turned off */
731 /* and interrupts are disabled */ 748 /* and interrupts are disabled */
732 749
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index a359cb08e900..df78e0236a02 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -13,7 +13,7 @@
13static void of_device_make_bus_id(struct of_device *dev) 13static void of_device_make_bus_id(struct of_device *dev)
14{ 14{
15 static atomic_t bus_no_reg_magic; 15 static atomic_t bus_no_reg_magic;
16 struct device_node *node = dev->node; 16 struct device_node *node = dev->dev.of_node;
17 const u32 *reg; 17 const u32 *reg;
18 u64 addr; 18 u64 addr;
19 int magic; 19 int magic;
@@ -69,11 +69,10 @@ struct of_device *of_device_alloc(struct device_node *np,
69 if (!dev) 69 if (!dev)
70 return NULL; 70 return NULL;
71 71
72 dev->node = of_node_get(np); 72 dev->dev.of_node = of_node_get(np);
73 dev->dev.dma_mask = &dev->dma_mask; 73 dev->dev.dma_mask = &dev->archdata.dma_mask;
74 dev->dev.parent = parent; 74 dev->dev.parent = parent;
75 dev->dev.release = of_release_dev; 75 dev->dev.release = of_release_dev;
76 dev->dev.archdata.of_node = np;
77 76
78 if (bus_id) 77 if (bus_id)
79 dev_set_name(&dev->dev, "%s", bus_id); 78 dev_set_name(&dev->dev, "%s", bus_id);
@@ -95,17 +94,17 @@ int of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
95 94
96 ofdev = to_of_device(dev); 95 ofdev = to_of_device(dev);
97 96
98 if (add_uevent_var(env, "OF_NAME=%s", ofdev->node->name)) 97 if (add_uevent_var(env, "OF_NAME=%s", ofdev->dev.of_node->name))
99 return -ENOMEM; 98 return -ENOMEM;
100 99
101 if (add_uevent_var(env, "OF_TYPE=%s", ofdev->node->type)) 100 if (add_uevent_var(env, "OF_TYPE=%s", ofdev->dev.of_node->type))
102 return -ENOMEM; 101 return -ENOMEM;
103 102
104 /* Since the compatible field can contain pretty much anything 103 /* Since the compatible field can contain pretty much anything
105 * it's not really legal to split it out with commas. We split it 104 * it's not really legal to split it out with commas. We split it
106 * up using a number of environment variables instead. */ 105 * up using a number of environment variables instead. */
107 106
108 compat = of_get_property(ofdev->node, "compatible", &cplen); 107 compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen);
109 while (compat && *compat && cplen > 0) { 108 while (compat && *compat && cplen > 0) {
110 if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat)) 109 if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat))
111 return -ENOMEM; 110 return -ENOMEM;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 6c1dfc3ff8bc..487a98851ba6 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -74,7 +74,7 @@ struct of_device* of_platform_device_create(struct device_node *np,
74 if (!dev) 74 if (!dev)
75 return NULL; 75 return NULL;
76 76
77 dev->dma_mask = 0xffffffffUL; 77 dev->archdata.dma_mask = 0xffffffffUL;
78 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 78 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
79 79
80 dev->dev.bus = &of_platform_bus_type; 80 dev->dev.bus = &of_platform_bus_type;
@@ -195,7 +195,7 @@ EXPORT_SYMBOL(of_platform_bus_probe);
195 195
196static int of_dev_node_match(struct device *dev, void *data) 196static int of_dev_node_match(struct device *dev, void *data)
197{ 197{
198 return to_of_device(dev)->node == data; 198 return to_of_device(dev)->dev.of_node == data;
199} 199}
200 200
201struct of_device *of_find_device_by_node(struct device_node *np) 201struct of_device *of_find_device_by_node(struct device_node *np)
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
213static int of_dev_phandle_match(struct device *dev, void *data) 213static int of_dev_phandle_match(struct device *dev, void *data)
214{ 214{
215 phandle *ph = data; 215 phandle *ph = data;
216 return to_of_device(dev)->node->phandle == *ph; 216 return to_of_device(dev)->dev.of_node->phandle == *ph;
217} 217}
218 218
219struct of_device *of_find_device_by_phandle(phandle ph) 219struct of_device *of_find_device_by_phandle(phandle ph)
@@ -246,10 +246,10 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
246 if (ppc_md.pci_setup_phb == NULL) 246 if (ppc_md.pci_setup_phb == NULL)
247 return -ENODEV; 247 return -ENODEV;
248 248
249 printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name); 249 pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name);
250 250
251 /* Alloc and setup PHB data structure */ 251 /* Alloc and setup PHB data structure */
252 phb = pcibios_alloc_controller(dev->node); 252 phb = pcibios_alloc_controller(dev->dev.of_node);
253 if (!phb) 253 if (!phb)
254 return -ENODEV; 254 return -ENODEV;
255 255
@@ -263,19 +263,19 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
263 } 263 }
264 264
265 /* Process "ranges" property */ 265 /* Process "ranges" property */
266 pci_process_bridge_OF_ranges(phb, dev->node, 0); 266 pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0);
267 267
268 /* Init pci_dn data structures */ 268 /* Init pci_dn data structures */
269 pci_devs_phb_init_dynamic(phb); 269 pci_devs_phb_init_dynamic(phb);
270 270
271 /* Register devices with EEH */ 271 /* Register devices with EEH */
272#ifdef CONFIG_EEH 272#ifdef CONFIG_EEH
273 if (dev->node->child) 273 if (dev->dev.of_node->child)
274 eeh_add_device_tree_early(dev->node); 274 eeh_add_device_tree_early(dev->dev.of_node);
275#endif /* CONFIG_EEH */ 275#endif /* CONFIG_EEH */
276 276
277 /* Scan the bus */ 277 /* Scan the bus */
278 pcibios_scan_phb(phb, dev->node); 278 pcibios_scan_phb(phb, dev->dev.of_node);
279 if (phb->bus == NULL) 279 if (phb->bus == NULL)
280 return -ENXIO; 280 return -ENXIO;
281 281
@@ -306,10 +306,11 @@ static struct of_device_id of_pci_phb_ids[] = {
306}; 306};
307 307
308static struct of_platform_driver of_pci_phb_driver = { 308static struct of_platform_driver of_pci_phb_driver = {
309 .match_table = of_pci_phb_ids,
310 .probe = of_pci_phb_probe, 309 .probe = of_pci_phb_probe,
311 .driver = { 310 .driver = {
312 .name = "of-pci", 311 .name = "of-pci",
312 .owner = THIS_MODULE,
313 .of_match_table = of_pci_phb_ids,
313 }, 314 },
314}; 315};
315 316
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0c0567e58409..5b38f6ae2b29 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1097,8 +1097,8 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1097 if (dev->is_added) 1097 if (dev->is_added)
1098 continue; 1098 continue;
1099 1099
1100 /* Setup OF node pointer in archdata */ 1100 /* Setup OF node pointer in the device */
1101 sd->of_node = pci_device_to_OF_node(dev); 1101 dev->dev.of_node = pci_device_to_OF_node(dev);
1102 1102
1103 /* Fixup NUMA node as it may not be setup yet by the generic 1103 /* Fixup NUMA node as it may not be setup yet by the generic
1104 * code and is needed by the DMA init 1104 * code and is needed by the DMA init
@@ -1309,6 +1309,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
1309 printk(KERN_WARNING "PCI: Cannot allocate resource region " 1309 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1310 "%d of PCI bridge %d, will remap\n", i, bus->number); 1310 "%d of PCI bridge %d, will remap\n", i, bus->number);
1311clear_resource: 1311clear_resource:
1312 res->start = res->end = 0;
1312 res->flags = 0; 1313 res->flags = 0;
1313 } 1314 }
1314 1315
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392ac63c..3b4dcc82a4c1 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -101,6 +101,10 @@ EXPORT_SYMBOL(pci_dram_offset);
101EXPORT_SYMBOL(start_thread); 101EXPORT_SYMBOL(start_thread);
102EXPORT_SYMBOL(kernel_thread); 102EXPORT_SYMBOL(kernel_thread);
103 103
104#ifdef CONFIG_PPC_FPU
105EXPORT_SYMBOL_GPL(cvt_df);
106EXPORT_SYMBOL_GPL(cvt_fd);
107#endif
104EXPORT_SYMBOL(giveup_fpu); 108EXPORT_SYMBOL(giveup_fpu);
105#ifdef CONFIG_ALTIVEC 109#ifdef CONFIG_ALTIVEC
106EXPORT_SYMBOL(giveup_altivec); 110EXPORT_SYMBOL(giveup_altivec);
diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_booke.S
new file mode 100644
index 000000000000..11a39307dd71
--- /dev/null
+++ b/arch/powerpc/kernel/swsusp_booke.S
@@ -0,0 +1,193 @@
1/*
2 * Based on swsusp_32.S, modified for FSL BookE by
3 * Anton Vorontsov <avorontsov@ru.mvista.com>
4 * Copyright (c) 2009-2010 MontaVista Software, LLC.
5 */
6
7#include <linux/threads.h>
8#include <asm/processor.h>
9#include <asm/page.h>
10#include <asm/cputable.h>
11#include <asm/thread_info.h>
12#include <asm/ppc_asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/mmu.h>
15
16/*
17 * Structure for storing CPU registers on the save area.
18 */
19#define SL_SP 0
20#define SL_PC 4
21#define SL_MSR 8
22#define SL_TCR 0xc
23#define SL_SPRG0 0x10
24#define SL_SPRG1 0x14
25#define SL_SPRG2 0x18
26#define SL_SPRG3 0x1c
27#define SL_SPRG4 0x20
28#define SL_SPRG5 0x24
29#define SL_SPRG6 0x28
30#define SL_SPRG7 0x2c
31#define SL_TBU 0x30
32#define SL_TBL 0x34
33#define SL_R2 0x38
34#define SL_CR 0x3c
35#define SL_LR 0x40
36#define SL_R12 0x44 /* r12 to r31 */
37#define SL_SIZE (SL_R12 + 80)
38
39 .section .data
40 .align 5
41
42_GLOBAL(swsusp_save_area)
43 .space SL_SIZE
44
45
46 .section .text
47 .align 5
48
49_GLOBAL(swsusp_arch_suspend)
50 lis r11,swsusp_save_area@h
51 ori r11,r11,swsusp_save_area@l
52
53 mflr r0
54 stw r0,SL_LR(r11)
55 mfcr r0
56 stw r0,SL_CR(r11)
57 stw r1,SL_SP(r11)
58 stw r2,SL_R2(r11)
59 stmw r12,SL_R12(r11)
60
61 /* Save MSR & TCR */
62 mfmsr r4
63 stw r4,SL_MSR(r11)
64 mfspr r4,SPRN_TCR
65 stw r4,SL_TCR(r11)
66
67 /* Get a stable timebase and save it */
681: mfspr r4,SPRN_TBRU
69 stw r4,SL_TBU(r11)
70 mfspr r5,SPRN_TBRL
71 stw r5,SL_TBL(r11)
72 mfspr r3,SPRN_TBRU
73 cmpw r3,r4
74 bne 1b
75
76 /* Save SPRGs */
77 mfsprg r4,0
78 stw r4,SL_SPRG0(r11)
79 mfsprg r4,1
80 stw r4,SL_SPRG1(r11)
81 mfsprg r4,2
82 stw r4,SL_SPRG2(r11)
83 mfsprg r4,3
84 stw r4,SL_SPRG3(r11)
85 mfsprg r4,4
86 stw r4,SL_SPRG4(r11)
87 mfsprg r4,5
88 stw r4,SL_SPRG5(r11)
89 mfsprg r4,6
90 stw r4,SL_SPRG6(r11)
91 mfsprg r4,7
92 stw r4,SL_SPRG7(r11)
93
94 /* Call the low level suspend stuff (we should probably have made
95 * a stackframe...
96 */
97 bl swsusp_save
98
99 /* Restore LR from the save area */
100 lis r11,swsusp_save_area@h
101 ori r11,r11,swsusp_save_area@l
102 lwz r0,SL_LR(r11)
103 mtlr r0
104
105 blr
106
107_GLOBAL(swsusp_arch_resume)
108 sync
109
110 /* Load ptr the list of pages to copy in r3 */
111 lis r11,(restore_pblist)@h
112 ori r11,r11,restore_pblist@l
113 lwz r3,0(r11)
114
115 /* Copy the pages. This is a very basic implementation, to
116 * be replaced by something more cache efficient */
1171:
118 li r0,256
119 mtctr r0
120 lwz r5,pbe_address(r3) /* source */
121 lwz r6,pbe_orig_address(r3) /* destination */
1222:
123 lwz r8,0(r5)
124 lwz r9,4(r5)
125 lwz r10,8(r5)
126 lwz r11,12(r5)
127 addi r5,r5,16
128 stw r8,0(r6)
129 stw r9,4(r6)
130 stw r10,8(r6)
131 stw r11,12(r6)
132 addi r6,r6,16
133 bdnz 2b
134 lwz r3,pbe_next(r3)
135 cmpwi 0,r3,0
136 bne 1b
137
138 bl flush_dcache_L1
139 bl flush_instruction_cache
140
141 lis r11,swsusp_save_area@h
142 ori r11,r11,swsusp_save_area@l
143
144 lwz r4,SL_SPRG0(r11)
145 mtsprg 0,r4
146 lwz r4,SL_SPRG1(r11)
147 mtsprg 1,r4
148 lwz r4,SL_SPRG2(r11)
149 mtsprg 2,r4
150 lwz r4,SL_SPRG3(r11)
151 mtsprg 3,r4
152 lwz r4,SL_SPRG4(r11)
153 mtsprg 4,r4
154 lwz r4,SL_SPRG5(r11)
155 mtsprg 5,r4
156 lwz r4,SL_SPRG6(r11)
157 mtsprg 6,r4
158 lwz r4,SL_SPRG7(r11)
159 mtsprg 7,r4
160
161 /* restore the MSR */
162 lwz r3,SL_MSR(r11)
163 mtmsr r3
164
165 /* Restore TB */
166 li r3,0
167 mtspr SPRN_TBWL,r3
168 lwz r3,SL_TBU(r11)
169 lwz r4,SL_TBL(r11)
170 mtspr SPRN_TBWU,r3
171 mtspr SPRN_TBWL,r4
172
173 /* Restore TCR and clear any pending bits in TSR. */
174 lwz r4,SL_TCR(r11)
175 mtspr SPRN_TCR,r4
176 lis r4, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h
177 mtspr SPRN_TSR,r4
178
179 /* Kick decrementer */
180 li r0,1
181 mtdec r0
182
183 /* Restore the callee-saved registers and return */
184 lwz r0,SL_CR(r11)
185 mtcr r0
186 lwz r2,SL_R2(r11)
187 lmw r12,SL_R12(r11)
188 lwz r1,SL_SP(r11)
189 lwz r0,SL_LR(r11)
190 mtlr r0
191
192 li r3,0
193 blr
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3031fc712ad0..25fc33984c2b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
3 * 4 *
4 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 6 * modify it under the terms of the GNU General Public License
@@ -305,7 +306,7 @@ static inline int check_io_access(struct pt_regs *regs)
305#ifndef CONFIG_FSL_BOOKE 306#ifndef CONFIG_FSL_BOOKE
306#define get_mc_reason(regs) ((regs)->dsisr) 307#define get_mc_reason(regs) ((regs)->dsisr)
307#else 308#else
308#define get_mc_reason(regs) (mfspr(SPRN_MCSR) & MCSR_MASK) 309#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
309#endif 310#endif
310#define REASON_FP ESR_FP 311#define REASON_FP ESR_FP
311#define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 312#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
@@ -421,6 +422,91 @@ int machine_check_47x(struct pt_regs *regs)
421 return 0; 422 return 0;
422} 423}
423#elif defined(CONFIG_E500) 424#elif defined(CONFIG_E500)
425int machine_check_e500mc(struct pt_regs *regs)
426{
427 unsigned long mcsr = mfspr(SPRN_MCSR);
428 unsigned long reason = mcsr;
429 int recoverable = 1;
430
431 printk("Machine check in kernel mode.\n");
432 printk("Caused by (from MCSR=%lx): ", reason);
433
434 if (reason & MCSR_MCP)
435 printk("Machine Check Signal\n");
436
437 if (reason & MCSR_ICPERR) {
438 printk("Instruction Cache Parity Error\n");
439
440 /*
441 * This is recoverable by invalidating the i-cache.
442 */
443 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
444 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
445 ;
446
447 /*
448 * This will generally be accompanied by an instruction
449 * fetch error report -- only treat MCSR_IF as fatal
450 * if it wasn't due to an L1 parity error.
451 */
452 reason &= ~MCSR_IF;
453 }
454
455 if (reason & MCSR_DCPERR_MC) {
456 printk("Data Cache Parity Error\n");
457 recoverable = 0;
458 }
459
460 if (reason & MCSR_L2MMU_MHIT) {
461 printk("Hit on multiple TLB entries\n");
462 recoverable = 0;
463 }
464
465 if (reason & MCSR_NMI)
466 printk("Non-maskable interrupt\n");
467
468 if (reason & MCSR_IF) {
469 printk("Instruction Fetch Error Report\n");
470 recoverable = 0;
471 }
472
473 if (reason & MCSR_LD) {
474 printk("Load Error Report\n");
475 recoverable = 0;
476 }
477
478 if (reason & MCSR_ST) {
479 printk("Store Error Report\n");
480 recoverable = 0;
481 }
482
483 if (reason & MCSR_LDG) {
484 printk("Guarded Load Error Report\n");
485 recoverable = 0;
486 }
487
488 if (reason & MCSR_TLBSYNC)
489 printk("Simultaneous tlbsync operations\n");
490
491 if (reason & MCSR_BSL2_ERR) {
492 printk("Level 2 Cache Error\n");
493 recoverable = 0;
494 }
495
496 if (reason & MCSR_MAV) {
497 u64 addr;
498
499 addr = mfspr(SPRN_MCAR);
500 addr |= (u64)mfspr(SPRN_MCARU) << 32;
501
502 printk("Machine Check %s Address: %#llx\n",
503 reason & MCSR_MEA ? "Effective" : "Physical", addr);
504 }
505
506 mtspr(SPRN_MCSR, mcsr);
507 return mfspr(SPRN_MCSR) == 0 && recoverable;
508}
509
424int machine_check_e500(struct pt_regs *regs) 510int machine_check_e500(struct pt_regs *regs)
425{ 511{
426 unsigned long reason = get_mc_reason(regs); 512 unsigned long reason = get_mc_reason(regs);
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 9ce7b62dc3a4..00b9436f7652 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -707,7 +707,7 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
707 * Check to see that device has a DMA window and configure 707 * Check to see that device has a DMA window and configure
708 * entitlement for the device. 708 * entitlement for the device.
709 */ 709 */
710 if (of_get_property(viodev->dev.archdata.of_node, 710 if (of_get_property(viodev->dev.of_node,
711 "ibm,my-dma-window", NULL)) { 711 "ibm,my-dma-window", NULL)) {
712 /* Check that the driver is CMO enabled and get desired DMA */ 712 /* Check that the driver is CMO enabled and get desired DMA */
713 if (!viodrv->get_desired_dma) { 713 if (!viodrv->get_desired_dma) {
@@ -1054,7 +1054,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1054 if (firmware_has_feature(FW_FEATURE_ISERIES)) 1054 if (firmware_has_feature(FW_FEATURE_ISERIES))
1055 return vio_build_iommu_table_iseries(dev); 1055 return vio_build_iommu_table_iseries(dev);
1056 1056
1057 dma_window = of_get_property(dev->dev.archdata.of_node, 1057 dma_window = of_get_property(dev->dev.of_node,
1058 "ibm,my-dma-window", NULL); 1058 "ibm,my-dma-window", NULL);
1059 if (!dma_window) 1059 if (!dma_window)
1060 return NULL; 1060 return NULL;
@@ -1063,7 +1063,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1063 if (tbl == NULL) 1063 if (tbl == NULL)
1064 return NULL; 1064 return NULL;
1065 1065
1066 of_parse_dma_window(dev->dev.archdata.of_node, dma_window, 1066 of_parse_dma_window(dev->dev.of_node, dma_window,
1067 &tbl->it_index, &offset, &size); 1067 &tbl->it_index, &offset, &size);
1068 1068
1069 /* TCE table size - measured in tce entries */ 1069 /* TCE table size - measured in tce entries */
@@ -1091,7 +1091,7 @@ static const struct vio_device_id *vio_match_device(
1091{ 1091{
1092 while (ids->type[0] != '\0') { 1092 while (ids->type[0] != '\0') {
1093 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 1093 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1094 of_device_is_compatible(dev->dev.archdata.of_node, 1094 of_device_is_compatible(dev->dev.of_node,
1095 ids->compat)) 1095 ids->compat))
1096 return ids; 1096 return ids;
1097 ids++; 1097 ids++;
@@ -1184,7 +1184,7 @@ EXPORT_SYMBOL(vio_unregister_driver);
1184static void __devinit vio_dev_release(struct device *dev) 1184static void __devinit vio_dev_release(struct device *dev)
1185{ 1185{
1186 /* XXX should free TCE table */ 1186 /* XXX should free TCE table */
1187 of_node_put(dev->archdata.of_node); 1187 of_node_put(dev->of_node);
1188 kfree(to_vio_dev(dev)); 1188 kfree(to_vio_dev(dev));
1189} 1189}
1190 1190
@@ -1235,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1235 if (unit_address != NULL) 1235 if (unit_address != NULL)
1236 viodev->unit_address = *unit_address; 1236 viodev->unit_address = *unit_address;
1237 } 1237 }
1238 viodev->dev.archdata.of_node = of_node_get(of_node); 1238 viodev->dev.of_node = of_node_get(of_node);
1239 1239
1240 if (firmware_has_feature(FW_FEATURE_CMO)) 1240 if (firmware_has_feature(FW_FEATURE_CMO))
1241 vio_cmo_set_dma_ops(viodev); 1241 vio_cmo_set_dma_ops(viodev);
@@ -1320,7 +1320,7 @@ static ssize_t name_show(struct device *dev,
1320static ssize_t devspec_show(struct device *dev, 1320static ssize_t devspec_show(struct device *dev,
1321 struct device_attribute *attr, char *buf) 1321 struct device_attribute *attr, char *buf)
1322{ 1322{
1323 struct device_node *of_node = dev->archdata.of_node; 1323 struct device_node *of_node = dev->of_node;
1324 1324
1325 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); 1325 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
1326} 1326}
@@ -1332,7 +1332,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1332 struct device_node *dn; 1332 struct device_node *dn;
1333 const char *cp; 1333 const char *cp;
1334 1334
1335 dn = dev->archdata.of_node; 1335 dn = dev->of_node;
1336 if (!dn) 1336 if (!dn)
1337 return -ENODEV; 1337 return -ENODEV;
1338 cp = of_get_property(dn, "compatible", NULL); 1338 cp = of_get_property(dn, "compatible", NULL);
@@ -1370,7 +1370,7 @@ static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1370 struct device_node *dn; 1370 struct device_node *dn;
1371 const char *cp; 1371 const char *cp;
1372 1372
1373 dn = dev->archdata.of_node; 1373 dn = dev->of_node;
1374 if (!dn) 1374 if (!dn)
1375 return -ENODEV; 1375 return -ENODEV;
1376 cp = of_get_property(dn, "compatible", NULL); 1376 cp = of_get_property(dn, "compatible", NULL);
@@ -1402,7 +1402,7 @@ static struct bus_type vio_bus_type = {
1402*/ 1402*/
1403const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) 1403const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1404{ 1404{
1405 return of_get_property(vdev->dev.archdata.of_node, which, length); 1405 return of_get_property(vdev->dev.of_node, which, length);
1406} 1406}
1407EXPORT_SYMBOL(vio_get_attribute); 1407EXPORT_SYMBOL(vio_get_attribute);
1408 1408
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index dcd01c82e701..8a0deefac08d 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -223,19 +223,17 @@ SECTIONS
223#endif 223#endif
224 224
225 /* The initial task and kernel stack */ 225 /* The initial task and kernel stack */
226 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { 226 INIT_TASK_DATA_SECTION(THREAD_SIZE)
227 INIT_TASK_DATA(THREAD_SIZE)
228 }
229 227
230 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { 228 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
231 PAGE_ALIGNED_DATA(PAGE_SIZE) 229 PAGE_ALIGNED_DATA(PAGE_SIZE)
232 } 230 }
233 231
234 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { 232 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
235 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 233 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
236 } 234 }
237 235
238 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { 236 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
239 READ_MOSTLY_DATA(L1_CACHE_BYTES) 237 READ_MOSTLY_DATA(L1_CACHE_BYTES)
240 } 238 }
241 239
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 689a57c2ac80..73c0a3f64ed1 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -147,7 +147,7 @@ static int __init kvmppc_44x_init(void)
147 if (r) 147 if (r)
148 return r; 148 return r;
149 149
150 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); 150 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
151} 151}
152 152
153static void __exit kvmppc_44x_exit(void) 153static void __exit kvmppc_44x_exit(void)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 60624cc9f4d4..b7baff78f90c 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,12 +22,34 @@ config KVM
22 select ANON_INODES 22 select ANON_INODES
23 select KVM_MMIO 23 select KVM_MMIO
24 24
25config KVM_BOOK3S_HANDLER
26 bool
27
28config KVM_BOOK3S_32_HANDLER
29 bool
30 select KVM_BOOK3S_HANDLER
31
25config KVM_BOOK3S_64_HANDLER 32config KVM_BOOK3S_64_HANDLER
26 bool 33 bool
34 select KVM_BOOK3S_HANDLER
35
36config KVM_BOOK3S_32
37 tristate "KVM support for PowerPC book3s_32 processors"
38 depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
39 select KVM
40 select KVM_BOOK3S_32_HANDLER
41 ---help---
42 Support running unmodified book3s_32 guest kernels
43 in virtual machines on book3s_32 host processors.
44
45 This module provides access to the hardware capabilities through
46 a character device node named /dev/kvm.
47
48 If unsure, say N.
27 49
28config KVM_BOOK3S_64 50config KVM_BOOK3S_64
29 tristate "KVM support for PowerPC book3s_64 processors" 51 tristate "KVM support for PowerPC book3s_64 processors"
30 depends on EXPERIMENTAL && PPC64 52 depends on EXPERIMENTAL && PPC_BOOK3S_64
31 select KVM 53 select KVM
32 select KVM_BOOK3S_64_HANDLER 54 select KVM_BOOK3S_64_HANDLER
33 ---help--- 55 ---help---
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 56484d652377..ff436066bf77 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -14,7 +14,7 @@ CFLAGS_emulate.o := -I.
14 14
15common-objs-y += powerpc.o emulate.o 15common-objs-y += powerpc.o emulate.o
16obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o 16obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
17obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o 17obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
18 18
19AFLAGS_booke_interrupts.o := -I$(obj) 19AFLAGS_booke_interrupts.o := -I$(obj)
20 20
@@ -40,17 +40,31 @@ kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
40 40
41kvm-book3s_64-objs := \ 41kvm-book3s_64-objs := \
42 $(common-objs-y) \ 42 $(common-objs-y) \
43 fpu.o \
44 book3s_paired_singles.o \
43 book3s.o \ 45 book3s.o \
44 book3s_64_emulate.o \ 46 book3s_emulate.o \
45 book3s_64_interrupts.o \ 47 book3s_interrupts.o \
46 book3s_64_mmu_host.o \ 48 book3s_64_mmu_host.o \
47 book3s_64_mmu.o \ 49 book3s_64_mmu.o \
48 book3s_32_mmu.o 50 book3s_32_mmu.o
49kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) 51kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
50 52
53kvm-book3s_32-objs := \
54 $(common-objs-y) \
55 fpu.o \
56 book3s_paired_singles.o \
57 book3s.o \
58 book3s_emulate.o \
59 book3s_interrupts.o \
60 book3s_32_mmu_host.o \
61 book3s_32_mmu.o
62kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
63
51kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 64kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
52 65
53obj-$(CONFIG_KVM_440) += kvm.o 66obj-$(CONFIG_KVM_440) += kvm.o
54obj-$(CONFIG_KVM_E500) += kvm.o 67obj-$(CONFIG_KVM_E500) += kvm.o
55obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 68obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
69obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
56 70
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 604af29b71ed..b998abf1a63d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h>
19 20
20#include <asm/reg.h> 21#include <asm/reg.h>
21#include <asm/cputable.h> 22#include <asm/cputable.h>
@@ -29,6 +30,7 @@
29#include <linux/gfp.h> 30#include <linux/gfp.h>
30#include <linux/sched.h> 31#include <linux/sched.h>
31#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/highmem.h>
32 34
33#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34 36
@@ -36,7 +38,15 @@
36/* #define EXIT_DEBUG_SIMPLE */ 38/* #define EXIT_DEBUG_SIMPLE */
37/* #define DEBUG_EXT */ 39/* #define DEBUG_EXT */
38 40
39static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
42 ulong msr);
43
44/* Some compatibility defines */
45#ifdef CONFIG_PPC_BOOK3S_32
46#define MSR_USER32 MSR_USER
47#define MSR_USER64 MSR_USER
48#define HW_PAGE_SIZE PAGE_SIZE
49#endif
40 50
41struct kvm_stats_debugfs_item debugfs_entries[] = { 51struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "exits", VCPU_STAT(sum_exits) }, 52 { "exits", VCPU_STAT(sum_exits) },
@@ -69,18 +79,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
69 79
70void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 80void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
71{ 81{
72 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); 82#ifdef CONFIG_PPC_BOOK3S_64
73 memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, 83 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
84 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
74 sizeof(get_paca()->shadow_vcpu)); 85 sizeof(get_paca()->shadow_vcpu));
75 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; 86 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
87#endif
88
89#ifdef CONFIG_PPC_BOOK3S_32
90 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
91#endif
76} 92}
77 93
78void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 94void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
79{ 95{
80 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); 96#ifdef CONFIG_PPC_BOOK3S_64
81 memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, 97 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
98 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
82 sizeof(get_paca()->shadow_vcpu)); 99 sizeof(get_paca()->shadow_vcpu));
83 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; 100 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
101#endif
84 102
85 kvmppc_giveup_ext(vcpu, MSR_FP); 103 kvmppc_giveup_ext(vcpu, MSR_FP);
86 kvmppc_giveup_ext(vcpu, MSR_VEC); 104 kvmppc_giveup_ext(vcpu, MSR_VEC);
@@ -131,18 +149,22 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
131 } 149 }
132 } 150 }
133 151
134 if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || 152 if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
135 (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { 153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
136 kvmppc_mmu_flush_segments(vcpu); 154 kvmppc_mmu_flush_segments(vcpu);
137 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); 155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
138 } 156 }
157
158 /* Preload FPU if it's enabled */
159 if (vcpu->arch.msr & MSR_FP)
160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
139} 161}
140 162
141void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 163void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
142{ 164{
143 vcpu->arch.srr0 = vcpu->arch.pc; 165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
144 vcpu->arch.srr1 = vcpu->arch.msr | flags; 166 vcpu->arch.srr1 = vcpu->arch.msr | flags;
145 vcpu->arch.pc = to_book3s(vcpu)->hior + vec; 167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
146 vcpu->arch.mmu.reset_msr(vcpu); 168 vcpu->arch.mmu.reset_msr(vcpu);
147} 169}
148 170
@@ -218,6 +240,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
218 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
219} 241}
220 242
243void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
244 struct kvm_interrupt *irq)
245{
246 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
247}
248
221int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 249int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
222{ 250{
223 int deliver = 1; 251 int deliver = 1;
@@ -302,7 +330,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
302 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 330 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
303#endif 331#endif
304 priority = __ffs(*pending); 332 priority = __ffs(*pending);
305 while (priority <= (sizeof(unsigned int) * 8)) { 333 while (priority < BOOK3S_IRQPRIO_MAX) {
306 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 334 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
307 (priority != BOOK3S_IRQPRIO_DECREMENTER)) { 335 (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
308 /* DEC interrupts get cleared by mtdec */ 336 /* DEC interrupts get cleared by mtdec */
@@ -318,13 +346,18 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
318 346
319void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 347void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
320{ 348{
349 u32 host_pvr;
350
321 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; 351 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
322 vcpu->arch.pvr = pvr; 352 vcpu->arch.pvr = pvr;
353#ifdef CONFIG_PPC_BOOK3S_64
323 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 354 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
324 kvmppc_mmu_book3s_64_init(vcpu); 355 kvmppc_mmu_book3s_64_init(vcpu);
325 to_book3s(vcpu)->hior = 0xfff00000; 356 to_book3s(vcpu)->hior = 0xfff00000;
326 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; 357 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
327 } else { 358 } else
359#endif
360 {
328 kvmppc_mmu_book3s_32_init(vcpu); 361 kvmppc_mmu_book3s_32_init(vcpu);
329 to_book3s(vcpu)->hior = 0; 362 to_book3s(vcpu)->hior = 0;
330 to_book3s(vcpu)->msr_mask = 0xffffffffULL; 363 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
@@ -337,6 +370,32 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
337 !strcmp(cur_cpu_spec->platform, "ppc970")) 370 !strcmp(cur_cpu_spec->platform, "ppc970"))
338 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 371 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
339 372
373 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
374 really needs them in a VM on Cell and force disable them. */
375 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
376 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
377
378#ifdef CONFIG_PPC_BOOK3S_32
379 /* 32 bit Book3S always has 32 byte dcbz */
380 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
381#endif
382
383 /* On some CPUs we can execute paired single operations natively */
384 asm ( "mfpvr %0" : "=r"(host_pvr));
385 switch (host_pvr) {
386 case 0x00080200: /* lonestar 2.0 */
387 case 0x00088202: /* lonestar 2.2 */
388 case 0x70000100: /* gekko 1.0 */
389 case 0x00080100: /* gekko 2.0 */
390 case 0x00083203: /* gekko 2.3a */
391 case 0x00083213: /* gekko 2.3b */
392 case 0x00083204: /* gekko 2.4 */
393 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
394 case 0x00087200: /* broadway */
395 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
396 /* Enable HID2.PSE - in case we need it later */
397 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
398 }
340} 399}
341 400
342/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 401/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
@@ -350,34 +409,29 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
350 */ 409 */
351static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) 410static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
352{ 411{
353 bool touched = false; 412 struct page *hpage;
354 hva_t hpage; 413 u64 hpage_offset;
355 u32 *page; 414 u32 *page;
356 int i; 415 int i;
357 416
358 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 417 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
359 if (kvm_is_error_hva(hpage)) 418 if (is_error_page(hpage))
360 return; 419 return;
361 420
362 hpage |= pte->raddr & ~PAGE_MASK; 421 hpage_offset = pte->raddr & ~PAGE_MASK;
363 hpage &= ~0xFFFULL; 422 hpage_offset &= ~0xFFFULL;
364 423 hpage_offset /= 4;
365 page = vmalloc(HW_PAGE_SIZE);
366
367 if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
368 goto out;
369 424
370 for (i=0; i < HW_PAGE_SIZE / 4; i++) 425 get_page(hpage);
371 if ((page[i] & 0xff0007ff) == INS_DCBZ) { 426 page = kmap_atomic(hpage, KM_USER0);
372 page[i] &= 0xfffffff7; // reserved instruction, so we trap
373 touched = true;
374 }
375 427
376 if (touched) 428 /* patch dcbz into reserved instruction, so we trap */
377 copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); 429 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
430 if ((page[i] & 0xff0007ff) == INS_DCBZ)
431 page[i] &= 0xfffffff7;
378 432
379out: 433 kunmap_atomic(page, KM_USER0);
380 vfree(page); 434 put_page(hpage);
381} 435}
382 436
383static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 437static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
@@ -391,15 +445,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
391 } else { 445 } else {
392 pte->eaddr = eaddr; 446 pte->eaddr = eaddr;
393 pte->raddr = eaddr & 0xffffffff; 447 pte->raddr = eaddr & 0xffffffff;
394 pte->vpage = eaddr >> 12; 448 pte->vpage = VSID_REAL | eaddr >> 12;
395 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
396 case 0:
397 pte->vpage |= VSID_REAL;
398 case MSR_DR:
399 pte->vpage |= VSID_REAL_DR;
400 case MSR_IR:
401 pte->vpage |= VSID_REAL_IR;
402 }
403 pte->may_read = true; 449 pte->may_read = true;
404 pte->may_write = true; 450 pte->may_write = true;
405 pte->may_execute = true; 451 pte->may_execute = true;
@@ -434,55 +480,55 @@ err:
434 return kvmppc_bad_hva(); 480 return kvmppc_bad_hva();
435} 481}
436 482
437int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) 483int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
484 bool data)
438{ 485{
439 struct kvmppc_pte pte; 486 struct kvmppc_pte pte;
440 hva_t hva = eaddr;
441 487
442 vcpu->stat.st++; 488 vcpu->stat.st++;
443 489
444 if (kvmppc_xlate(vcpu, eaddr, false, &pte)) 490 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
445 goto err; 491 return -ENOENT;
446 492
447 hva = kvmppc_pte_to_hva(vcpu, &pte, false); 493 *eaddr = pte.raddr;
448 if (kvm_is_error_hva(hva))
449 goto err;
450 494
451 if (copy_to_user((void __user *)hva, ptr, size)) { 495 if (!pte.may_write)
452 printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); 496 return -EPERM;
453 goto err;
454 }
455 497
456 return 0; 498 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
499 return EMULATE_DO_MMIO;
457 500
458err: 501 return EMULATE_DONE;
459 return -ENOENT;
460} 502}
461 503
462int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, 504int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
463 bool data) 505 bool data)
464{ 506{
465 struct kvmppc_pte pte; 507 struct kvmppc_pte pte;
466 hva_t hva = eaddr; 508 hva_t hva = *eaddr;
467 509
468 vcpu->stat.ld++; 510 vcpu->stat.ld++;
469 511
470 if (kvmppc_xlate(vcpu, eaddr, data, &pte)) 512 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
471 goto err; 513 goto nopte;
514
515 *eaddr = pte.raddr;
472 516
473 hva = kvmppc_pte_to_hva(vcpu, &pte, true); 517 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
474 if (kvm_is_error_hva(hva)) 518 if (kvm_is_error_hva(hva))
475 goto err; 519 goto mmio;
476 520
477 if (copy_from_user(ptr, (void __user *)hva, size)) { 521 if (copy_from_user(ptr, (void __user *)hva, size)) {
478 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); 522 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
479 goto err; 523 goto mmio;
480 } 524 }
481 525
482 return 0; 526 return EMULATE_DONE;
483 527
484err: 528nopte:
485 return -ENOENT; 529 return -ENOENT;
530mmio:
531 return EMULATE_DO_MMIO;
486} 532}
487 533
488static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 534static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -499,12 +545,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
499 int page_found = 0; 545 int page_found = 0;
500 struct kvmppc_pte pte; 546 struct kvmppc_pte pte;
501 bool is_mmio = false; 547 bool is_mmio = false;
548 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
549 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
550 u64 vsid;
502 551
503 if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { 552 relocated = data ? dr : ir;
504 relocated = (vcpu->arch.msr & MSR_DR);
505 } else {
506 relocated = (vcpu->arch.msr & MSR_IR);
507 }
508 553
509 /* Resolve real address if translation turned on */ 554 /* Resolve real address if translation turned on */
510 if (relocated) { 555 if (relocated) {
@@ -516,14 +561,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
516 pte.raddr = eaddr & 0xffffffff; 561 pte.raddr = eaddr & 0xffffffff;
517 pte.eaddr = eaddr; 562 pte.eaddr = eaddr;
518 pte.vpage = eaddr >> 12; 563 pte.vpage = eaddr >> 12;
519 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 564 }
520 case 0: 565
521 pte.vpage |= VSID_REAL; 566 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
522 case MSR_DR: 567 case 0:
523 pte.vpage |= VSID_REAL_DR; 568 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
524 case MSR_IR: 569 break;
525 pte.vpage |= VSID_REAL_IR; 570 case MSR_DR:
526 } 571 case MSR_IR:
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
573
574 if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
576 else
577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
578 pte.vpage |= vsid;
579
580 if (vsid == -1)
581 page_found = -EINVAL;
582 break;
527 } 583 }
528 584
529 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 585 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -538,20 +594,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
538 594
539 if (page_found == -ENOENT) { 595 if (page_found == -ENOENT) {
540 /* Page not found in guest PTE entries */ 596 /* Page not found in guest PTE entries */
541 vcpu->arch.dear = vcpu->arch.fault_dear; 597 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
542 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; 598 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
543 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); 599 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
544 kvmppc_book3s_queue_irqprio(vcpu, vec); 600 kvmppc_book3s_queue_irqprio(vcpu, vec);
545 } else if (page_found == -EPERM) { 601 } else if (page_found == -EPERM) {
546 /* Storage protection */ 602 /* Storage protection */
547 vcpu->arch.dear = vcpu->arch.fault_dear; 603 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
548 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; 604 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
549 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 605 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
550 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); 606 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
551 kvmppc_book3s_queue_irqprio(vcpu, vec); 607 kvmppc_book3s_queue_irqprio(vcpu, vec);
552 } else if (page_found == -EINVAL) { 608 } else if (page_found == -EINVAL) {
553 /* Page not found in guest SLB */ 609 /* Page not found in guest SLB */
554 vcpu->arch.dear = vcpu->arch.fault_dear; 610 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
555 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 611 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
556 } else if (!is_mmio && 612 } else if (!is_mmio &&
557 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 613 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -583,11 +639,13 @@ static inline int get_fpr_index(int i)
583} 639}
584 640
585/* Give up external provider (FPU, Altivec, VSX) */ 641/* Give up external provider (FPU, Altivec, VSX) */
586static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) 642void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
587{ 643{
588 struct thread_struct *t = &current->thread; 644 struct thread_struct *t = &current->thread;
589 u64 *vcpu_fpr = vcpu->arch.fpr; 645 u64 *vcpu_fpr = vcpu->arch.fpr;
646#ifdef CONFIG_VSX
590 u64 *vcpu_vsx = vcpu->arch.vsr; 647 u64 *vcpu_vsx = vcpu->arch.vsr;
648#endif
591 u64 *thread_fpr = (u64*)t->fpr; 649 u64 *thread_fpr = (u64*)t->fpr;
592 int i; 650 int i;
593 651
@@ -629,21 +687,65 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
629 kvmppc_recalc_shadow_msr(vcpu); 687 kvmppc_recalc_shadow_msr(vcpu);
630} 688}
631 689
690static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
691{
692 ulong srr0 = kvmppc_get_pc(vcpu);
693 u32 last_inst = kvmppc_get_last_inst(vcpu);
694 int ret;
695
696 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
697 if (ret == -ENOENT) {
698 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
699 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
700 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
701 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
702 return EMULATE_AGAIN;
703 }
704
705 return EMULATE_DONE;
706}
707
708static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
709{
710
711 /* Need to do paired single emulation? */
712 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
713 return EMULATE_DONE;
714
715 /* Read out the instruction */
716 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
717 /* Need to emulate */
718 return EMULATE_FAIL;
719
720 return EMULATE_AGAIN;
721}
722
632/* Handle external providers (FPU, Altivec, VSX) */ 723/* Handle external providers (FPU, Altivec, VSX) */
633static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 724static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
634 ulong msr) 725 ulong msr)
635{ 726{
636 struct thread_struct *t = &current->thread; 727 struct thread_struct *t = &current->thread;
637 u64 *vcpu_fpr = vcpu->arch.fpr; 728 u64 *vcpu_fpr = vcpu->arch.fpr;
729#ifdef CONFIG_VSX
638 u64 *vcpu_vsx = vcpu->arch.vsr; 730 u64 *vcpu_vsx = vcpu->arch.vsr;
731#endif
639 u64 *thread_fpr = (u64*)t->fpr; 732 u64 *thread_fpr = (u64*)t->fpr;
640 int i; 733 int i;
641 734
735 /* When we have paired singles, we emulate in software */
736 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
737 return RESUME_GUEST;
738
642 if (!(vcpu->arch.msr & msr)) { 739 if (!(vcpu->arch.msr & msr)) {
643 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 740 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
644 return RESUME_GUEST; 741 return RESUME_GUEST;
645 } 742 }
646 743
744 /* We already own the ext */
745 if (vcpu->arch.guest_owned_ext & msr) {
746 return RESUME_GUEST;
747 }
748
647#ifdef DEBUG_EXT 749#ifdef DEBUG_EXT
648 printk(KERN_INFO "Loading up ext 0x%lx\n", msr); 750 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
649#endif 751#endif
@@ -696,21 +798,33 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
696 run->ready_for_interrupt_injection = 1; 798 run->ready_for_interrupt_injection = 1;
697#ifdef EXIT_DEBUG 799#ifdef EXIT_DEBUG
698 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", 800 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
699 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, 801 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
700 kvmppc_get_dec(vcpu), vcpu->arch.msr); 802 kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
701#elif defined (EXIT_DEBUG_SIMPLE) 803#elif defined (EXIT_DEBUG_SIMPLE)
702 if ((exit_nr != 0x900) && (exit_nr != 0x500)) 804 if ((exit_nr != 0x900) && (exit_nr != 0x500))
703 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", 805 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
704 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, 806 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
705 vcpu->arch.msr); 807 vcpu->arch.msr);
706#endif 808#endif
707 kvm_resched(vcpu); 809 kvm_resched(vcpu);
708 switch (exit_nr) { 810 switch (exit_nr) {
709 case BOOK3S_INTERRUPT_INST_STORAGE: 811 case BOOK3S_INTERRUPT_INST_STORAGE:
710 vcpu->stat.pf_instruc++; 812 vcpu->stat.pf_instruc++;
813
814#ifdef CONFIG_PPC_BOOK3S_32
815 /* We set segments as unused segments when invalidating them. So
816 * treat the respective fault as segment fault. */
817 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
818 == SR_INVALID) {
819 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
820 r = RESUME_GUEST;
821 break;
822 }
823#endif
824
711 /* only care about PTEG not found errors, but leave NX alone */ 825 /* only care about PTEG not found errors, but leave NX alone */
712 if (vcpu->arch.shadow_srr1 & 0x40000000) { 826 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
713 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); 827 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
714 vcpu->stat.sp_instruc++; 828 vcpu->stat.sp_instruc++;
715 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 829 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
716 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 830 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -719,37 +833,52 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
719 * so we can't use the NX bit inside the guest. Let's cross our fingers, 833 * so we can't use the NX bit inside the guest. Let's cross our fingers,
720 * that no guest that needs the dcbz hack does NX. 834 * that no guest that needs the dcbz hack does NX.
721 */ 835 */
722 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 836 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
837 r = RESUME_GUEST;
723 } else { 838 } else {
724 vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; 839 vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
725 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 840 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
726 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 841 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
727 r = RESUME_GUEST; 842 r = RESUME_GUEST;
728 } 843 }
729 break; 844 break;
730 case BOOK3S_INTERRUPT_DATA_STORAGE: 845 case BOOK3S_INTERRUPT_DATA_STORAGE:
846 {
847 ulong dar = kvmppc_get_fault_dar(vcpu);
731 vcpu->stat.pf_storage++; 848 vcpu->stat.pf_storage++;
849
850#ifdef CONFIG_PPC_BOOK3S_32
851 /* We set segments as unused segments when invalidating them. So
852 * treat the respective fault as segment fault. */
853 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
854 kvmppc_mmu_map_segment(vcpu, dar);
855 r = RESUME_GUEST;
856 break;
857 }
858#endif
859
732 /* The only case we need to handle is missing shadow PTEs */ 860 /* The only case we need to handle is missing shadow PTEs */
733 if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { 861 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
734 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); 862 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
735 } else { 863 } else {
736 vcpu->arch.dear = vcpu->arch.fault_dear; 864 vcpu->arch.dear = dar;
737 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; 865 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
738 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 866 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
739 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); 867 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
740 r = RESUME_GUEST; 868 r = RESUME_GUEST;
741 } 869 }
742 break; 870 break;
871 }
743 case BOOK3S_INTERRUPT_DATA_SEGMENT: 872 case BOOK3S_INTERRUPT_DATA_SEGMENT:
744 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { 873 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
745 vcpu->arch.dear = vcpu->arch.fault_dear; 874 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
746 kvmppc_book3s_queue_irqprio(vcpu, 875 kvmppc_book3s_queue_irqprio(vcpu,
747 BOOK3S_INTERRUPT_DATA_SEGMENT); 876 BOOK3S_INTERRUPT_DATA_SEGMENT);
748 } 877 }
749 r = RESUME_GUEST; 878 r = RESUME_GUEST;
750 break; 879 break;
751 case BOOK3S_INTERRUPT_INST_SEGMENT: 880 case BOOK3S_INTERRUPT_INST_SEGMENT:
752 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { 881 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
753 kvmppc_book3s_queue_irqprio(vcpu, 882 kvmppc_book3s_queue_irqprio(vcpu,
754 BOOK3S_INTERRUPT_INST_SEGMENT); 883 BOOK3S_INTERRUPT_INST_SEGMENT);
755 } 884 }
@@ -764,18 +893,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
764 vcpu->stat.ext_intr_exits++; 893 vcpu->stat.ext_intr_exits++;
765 r = RESUME_GUEST; 894 r = RESUME_GUEST;
766 break; 895 break;
896 case BOOK3S_INTERRUPT_PERFMON:
897 r = RESUME_GUEST;
898 break;
767 case BOOK3S_INTERRUPT_PROGRAM: 899 case BOOK3S_INTERRUPT_PROGRAM:
768 { 900 {
769 enum emulation_result er; 901 enum emulation_result er;
770 ulong flags; 902 ulong flags;
771 903
772 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; 904program_interrupt:
905 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
773 906
774 if (vcpu->arch.msr & MSR_PR) { 907 if (vcpu->arch.msr & MSR_PR) {
775#ifdef EXIT_DEBUG 908#ifdef EXIT_DEBUG
776 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); 909 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
777#endif 910#endif
778 if ((vcpu->arch.last_inst & 0xff0007ff) != 911 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
779 (INS_DCBZ & 0xfffffff7)) { 912 (INS_DCBZ & 0xfffffff7)) {
780 kvmppc_core_queue_program(vcpu, flags); 913 kvmppc_core_queue_program(vcpu, flags);
781 r = RESUME_GUEST; 914 r = RESUME_GUEST;
@@ -789,33 +922,80 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
789 case EMULATE_DONE: 922 case EMULATE_DONE:
790 r = RESUME_GUEST_NV; 923 r = RESUME_GUEST_NV;
791 break; 924 break;
925 case EMULATE_AGAIN:
926 r = RESUME_GUEST;
927 break;
792 case EMULATE_FAIL: 928 case EMULATE_FAIL:
793 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 929 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
794 __func__, vcpu->arch.pc, vcpu->arch.last_inst); 930 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
795 kvmppc_core_queue_program(vcpu, flags); 931 kvmppc_core_queue_program(vcpu, flags);
796 r = RESUME_GUEST; 932 r = RESUME_GUEST;
797 break; 933 break;
934 case EMULATE_DO_MMIO:
935 run->exit_reason = KVM_EXIT_MMIO;
936 r = RESUME_HOST_NV;
937 break;
798 default: 938 default:
799 BUG(); 939 BUG();
800 } 940 }
801 break; 941 break;
802 } 942 }
803 case BOOK3S_INTERRUPT_SYSCALL: 943 case BOOK3S_INTERRUPT_SYSCALL:
804#ifdef EXIT_DEBUG 944 // XXX make user settable
805 printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); 945 if (vcpu->arch.osi_enabled &&
806#endif 946 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
807 vcpu->stat.syscall_exits++; 947 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
808 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 948 u64 *gprs = run->osi.gprs;
809 r = RESUME_GUEST; 949 int i;
950
951 run->exit_reason = KVM_EXIT_OSI;
952 for (i = 0; i < 32; i++)
953 gprs[i] = kvmppc_get_gpr(vcpu, i);
954 vcpu->arch.osi_needed = 1;
955 r = RESUME_HOST_NV;
956
957 } else {
958 vcpu->stat.syscall_exits++;
959 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
960 r = RESUME_GUEST;
961 }
810 break; 962 break;
811 case BOOK3S_INTERRUPT_FP_UNAVAIL: 963 case BOOK3S_INTERRUPT_FP_UNAVAIL:
812 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
813 break;
814 case BOOK3S_INTERRUPT_ALTIVEC: 964 case BOOK3S_INTERRUPT_ALTIVEC:
815 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
816 break;
817 case BOOK3S_INTERRUPT_VSX: 965 case BOOK3S_INTERRUPT_VSX:
818 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); 966 {
967 int ext_msr = 0;
968
969 switch (exit_nr) {
970 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
971 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
972 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
973 }
974
975 switch (kvmppc_check_ext(vcpu, exit_nr)) {
976 case EMULATE_DONE:
977 /* everything ok - let's enable the ext */
978 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
979 break;
980 case EMULATE_FAIL:
981 /* we need to emulate this instruction */
982 goto program_interrupt;
983 break;
984 default:
985 /* nothing to worry about - go again */
986 break;
987 }
988 break;
989 }
990 case BOOK3S_INTERRUPT_ALIGNMENT:
991 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
992 to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu,
993 kvmppc_get_last_inst(vcpu));
994 vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
995 kvmppc_get_last_inst(vcpu));
996 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
997 }
998 r = RESUME_GUEST;
819 break; 999 break;
820 case BOOK3S_INTERRUPT_MACHINE_CHECK: 1000 case BOOK3S_INTERRUPT_MACHINE_CHECK:
821 case BOOK3S_INTERRUPT_TRACE: 1001 case BOOK3S_INTERRUPT_TRACE:
@@ -825,7 +1005,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
825 default: 1005 default:
826 /* Ugh - bork here! What did we get? */ 1006 /* Ugh - bork here! What did we get? */
827 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 1007 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
828 exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); 1008 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
829 r = RESUME_HOST; 1009 r = RESUME_HOST;
830 BUG(); 1010 BUG();
831 break; 1011 break;
@@ -852,7 +1032,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
852 } 1032 }
853 1033
854#ifdef EXIT_DEBUG 1034#ifdef EXIT_DEBUG
855 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); 1035 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
856#endif 1036#endif
857 1037
858 return r; 1038 return r;
@@ -867,10 +1047,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
867{ 1047{
868 int i; 1048 int i;
869 1049
870 regs->pc = vcpu->arch.pc; 1050 vcpu_load(vcpu);
1051
1052 regs->pc = kvmppc_get_pc(vcpu);
871 regs->cr = kvmppc_get_cr(vcpu); 1053 regs->cr = kvmppc_get_cr(vcpu);
872 regs->ctr = vcpu->arch.ctr; 1054 regs->ctr = kvmppc_get_ctr(vcpu);
873 regs->lr = vcpu->arch.lr; 1055 regs->lr = kvmppc_get_lr(vcpu);
874 regs->xer = kvmppc_get_xer(vcpu); 1056 regs->xer = kvmppc_get_xer(vcpu);
875 regs->msr = vcpu->arch.msr; 1057 regs->msr = vcpu->arch.msr;
876 regs->srr0 = vcpu->arch.srr0; 1058 regs->srr0 = vcpu->arch.srr0;
@@ -887,6 +1069,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
887 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1069 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
888 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 1070 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
889 1071
1072 vcpu_put(vcpu);
1073
890 return 0; 1074 return 0;
891} 1075}
892 1076
@@ -894,10 +1078,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
894{ 1078{
895 int i; 1079 int i;
896 1080
897 vcpu->arch.pc = regs->pc; 1081 vcpu_load(vcpu);
1082
1083 kvmppc_set_pc(vcpu, regs->pc);
898 kvmppc_set_cr(vcpu, regs->cr); 1084 kvmppc_set_cr(vcpu, regs->cr);
899 vcpu->arch.ctr = regs->ctr; 1085 kvmppc_set_ctr(vcpu, regs->ctr);
900 vcpu->arch.lr = regs->lr; 1086 kvmppc_set_lr(vcpu, regs->lr);
901 kvmppc_set_xer(vcpu, regs->xer); 1087 kvmppc_set_xer(vcpu, regs->xer);
902 kvmppc_set_msr(vcpu, regs->msr); 1088 kvmppc_set_msr(vcpu, regs->msr);
903 vcpu->arch.srr0 = regs->srr0; 1089 vcpu->arch.srr0 = regs->srr0;
@@ -913,6 +1099,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
913 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1099 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
914 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 1100 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
915 1101
1102 vcpu_put(vcpu);
1103
916 return 0; 1104 return 0;
917} 1105}
918 1106
@@ -922,6 +1110,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
922 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1110 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
923 int i; 1111 int i;
924 1112
1113 vcpu_load(vcpu);
1114
925 sregs->pvr = vcpu->arch.pvr; 1115 sregs->pvr = vcpu->arch.pvr;
926 1116
927 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; 1117 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
@@ -940,6 +1130,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
940 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; 1130 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
941 } 1131 }
942 } 1132 }
1133
1134 vcpu_put(vcpu);
1135
943 return 0; 1136 return 0;
944} 1137}
945 1138
@@ -949,6 +1142,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
949 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1142 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
950 int i; 1143 int i;
951 1144
1145 vcpu_load(vcpu);
1146
952 kvmppc_set_pvr(vcpu, sregs->pvr); 1147 kvmppc_set_pvr(vcpu, sregs->pvr);
953 1148
954 vcpu3s->sdr1 = sregs->u.s.sdr1; 1149 vcpu3s->sdr1 = sregs->u.s.sdr1;
@@ -975,6 +1170,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
975 1170
976 /* Flush the MMU after messing with the segments */ 1171 /* Flush the MMU after messing with the segments */
977 kvmppc_mmu_pte_flush(vcpu, 0, 0); 1172 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1173
1174 vcpu_put(vcpu);
1175
978 return 0; 1176 return 0;
979} 1177}
980 1178
@@ -1042,24 +1240,33 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1042{ 1240{
1043 struct kvmppc_vcpu_book3s *vcpu_book3s; 1241 struct kvmppc_vcpu_book3s *vcpu_book3s;
1044 struct kvm_vcpu *vcpu; 1242 struct kvm_vcpu *vcpu;
1045 int err; 1243 int err = -ENOMEM;
1046 1244
1047 vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, 1245 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
1048 get_order(sizeof(struct kvmppc_vcpu_book3s))); 1246 if (!vcpu_book3s)
1049 if (!vcpu_book3s) {
1050 err = -ENOMEM;
1051 goto out; 1247 goto out;
1052 } 1248
1249 memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
1250
1251 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
1252 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1253 if (!vcpu_book3s->shadow_vcpu)
1254 goto free_vcpu;
1053 1255
1054 vcpu = &vcpu_book3s->vcpu; 1256 vcpu = &vcpu_book3s->vcpu;
1055 err = kvm_vcpu_init(vcpu, kvm, id); 1257 err = kvm_vcpu_init(vcpu, kvm, id);
1056 if (err) 1258 if (err)
1057 goto free_vcpu; 1259 goto free_shadow_vcpu;
1058 1260
1059 vcpu->arch.host_retip = kvm_return_point; 1261 vcpu->arch.host_retip = kvm_return_point;
1060 vcpu->arch.host_msr = mfmsr(); 1262 vcpu->arch.host_msr = mfmsr();
1263#ifdef CONFIG_PPC_BOOK3S_64
1061 /* default to book3s_64 (970fx) */ 1264 /* default to book3s_64 (970fx) */
1062 vcpu->arch.pvr = 0x3C0301; 1265 vcpu->arch.pvr = 0x3C0301;
1266#else
1267 /* default to book3s_32 (750) */
1268 vcpu->arch.pvr = 0x84202;
1269#endif
1063 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1270 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1064 vcpu_book3s->slb_nr = 64; 1271 vcpu_book3s->slb_nr = 64;
1065 1272
@@ -1067,23 +1274,24 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1067 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; 1274 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
1068 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; 1275 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
1069 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; 1276 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
1277#ifdef CONFIG_PPC_BOOK3S_64
1070 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; 1278 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
1279#else
1280 vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
1281#endif
1071 1282
1072 vcpu->arch.shadow_msr = MSR_USER64; 1283 vcpu->arch.shadow_msr = MSR_USER64;
1073 1284
1074 err = __init_new_context(); 1285 err = kvmppc_mmu_init(vcpu);
1075 if (err < 0) 1286 if (err < 0)
1076 goto free_vcpu; 1287 goto free_shadow_vcpu;
1077 vcpu_book3s->context_id = err;
1078
1079 vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
1080 vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
1081 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
1082 1288
1083 return vcpu; 1289 return vcpu;
1084 1290
1291free_shadow_vcpu:
1292 kfree(vcpu_book3s->shadow_vcpu);
1085free_vcpu: 1293free_vcpu:
1086 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); 1294 vfree(vcpu_book3s);
1087out: 1295out:
1088 return ERR_PTR(err); 1296 return ERR_PTR(err);
1089} 1297}
@@ -1092,9 +1300,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1092{ 1300{
1093 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1301 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1094 1302
1095 __destroy_context(vcpu_book3s->context_id);
1096 kvm_vcpu_uninit(vcpu); 1303 kvm_vcpu_uninit(vcpu);
1097 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); 1304 kfree(vcpu_book3s->shadow_vcpu);
1305 vfree(vcpu_book3s);
1098} 1306}
1099 1307
1100extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1308extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -1102,8 +1310,12 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1102{ 1310{
1103 int ret; 1311 int ret;
1104 struct thread_struct ext_bkp; 1312 struct thread_struct ext_bkp;
1313#ifdef CONFIG_ALTIVEC
1105 bool save_vec = current->thread.used_vr; 1314 bool save_vec = current->thread.used_vr;
1315#endif
1316#ifdef CONFIG_VSX
1106 bool save_vsx = current->thread.used_vsr; 1317 bool save_vsx = current->thread.used_vsr;
1318#endif
1107 ulong ext_msr; 1319 ulong ext_msr;
1108 1320
1109 /* No need to go into the guest when all we do is going out */ 1321 /* No need to go into the guest when all we do is going out */
@@ -1144,6 +1356,10 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1144 /* XXX we get called with irq disabled - change that! */ 1356 /* XXX we get called with irq disabled - change that! */
1145 local_irq_enable(); 1357 local_irq_enable();
1146 1358
1359 /* Preload FPU if it's enabled */
1360 if (vcpu->arch.msr & MSR_FP)
1361 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1362
1147 ret = __kvmppc_vcpu_entry(kvm_run, vcpu); 1363 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
1148 1364
1149 local_irq_disable(); 1365 local_irq_disable();
@@ -1179,7 +1395,8 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1179 1395
1180static int kvmppc_book3s_init(void) 1396static int kvmppc_book3s_init(void)
1181{ 1397{
1182 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); 1398 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1399 THIS_MODULE);
1183} 1400}
1184 1401
1185static void kvmppc_book3s_exit(void) 1402static void kvmppc_book3s_exit(void)
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index faf99f20d993..0b10503c8a4a 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -37,7 +37,7 @@
37#define dprintk(X...) do { } while(0) 37#define dprintk(X...) do { } while(0)
38#endif 38#endif
39 39
40#ifdef DEBUG_PTE 40#ifdef DEBUG_MMU_PTE
41#define dprintk_pte(X...) printk(KERN_INFO X) 41#define dprintk_pte(X...) printk(KERN_INFO X)
42#else 42#else
43#define dprintk_pte(X...) do { } while(0) 43#define dprintk_pte(X...) do { } while(0)
@@ -45,6 +45,9 @@
45 45
46#define PTEG_FLAG_ACCESSED 0x00000100 46#define PTEG_FLAG_ACCESSED 0x00000100
47#define PTEG_FLAG_DIRTY 0x00000080 47#define PTEG_FLAG_DIRTY 0x00000080
48#ifndef SID_SHIFT
49#define SID_SHIFT 28
50#endif
48 51
49static inline bool check_debug_ip(struct kvm_vcpu *vcpu) 52static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
50{ 53{
@@ -57,6 +60,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
57 60
58static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 61static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
59 struct kvmppc_pte *pte, bool data); 62 struct kvmppc_pte *pte, bool data);
63static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
64 u64 *vsid);
60 65
61static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) 66static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
62{ 67{
@@ -66,13 +71,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e
66static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 71static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
67 bool data) 72 bool data)
68{ 73{
69 struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr); 74 u64 vsid;
70 struct kvmppc_pte pte; 75 struct kvmppc_pte pte;
71 76
72 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) 77 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
73 return pte.vpage; 78 return pte.vpage;
74 79
75 return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16); 80 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
81 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
76} 82}
77 83
78static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) 84static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
@@ -142,8 +148,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
142 bat->bepi_mask); 148 bat->bepi_mask);
143 } 149 }
144 if ((eaddr & bat->bepi_mask) == bat->bepi) { 150 if ((eaddr & bat->bepi_mask) == bat->bepi) {
151 u64 vsid;
152 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
153 eaddr >> SID_SHIFT, &vsid);
154 vsid <<= 16;
155 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
156
145 pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); 157 pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
146 pte->vpage = (eaddr >> 12) | VSID_BAT;
147 pte->may_read = bat->pp; 158 pte->may_read = bat->pp;
148 pte->may_write = bat->pp > 1; 159 pte->may_write = bat->pp > 1;
149 pte->may_execute = true; 160 pte->may_execute = true;
@@ -172,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
172 struct kvmppc_sr *sre; 183 struct kvmppc_sr *sre;
173 hva_t ptegp; 184 hva_t ptegp;
174 u32 pteg[16]; 185 u32 pteg[16];
175 u64 ptem = 0; 186 u32 ptem = 0;
176 int i; 187 int i;
177 int found = 0; 188 int found = 0;
178 189
@@ -302,6 +313,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
302 /* And then put in the new SR */ 313 /* And then put in the new SR */
303 sre->raw = value; 314 sre->raw = value;
304 sre->vsid = (value & 0x0fffffff); 315 sre->vsid = (value & 0x0fffffff);
316 sre->valid = (value & 0x80000000) ? false : true;
305 sre->Ks = (value & 0x40000000) ? true : false; 317 sre->Ks = (value & 0x40000000) ? true : false;
306 sre->Kp = (value & 0x20000000) ? true : false; 318 sre->Kp = (value & 0x20000000) ? true : false;
307 sre->nx = (value & 0x10000000) ? true : false; 319 sre->nx = (value & 0x10000000) ? true : false;
@@ -312,36 +324,48 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
312 324
313static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) 325static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
314{ 326{
315 kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL); 327 kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
316} 328}
317 329
318static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, 330static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
319 u64 *vsid) 331 u64 *vsid)
320{ 332{
333 ulong ea = esid << SID_SHIFT;
334 struct kvmppc_sr *sr;
335 u64 gvsid = esid;
336
337 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
338 sr = find_sr(to_book3s(vcpu), ea);
339 if (sr->valid)
340 gvsid = sr->vsid;
341 }
342
321 /* In case we only have one of MSR_IR or MSR_DR set, let's put 343 /* In case we only have one of MSR_IR or MSR_DR set, let's put
322 that in the real-mode context (and hope RM doesn't access 344 that in the real-mode context (and hope RM doesn't access
323 high memory) */ 345 high memory) */
324 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 346 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
325 case 0: 347 case 0:
326 *vsid = (VSID_REAL >> 16) | esid; 348 *vsid = VSID_REAL | esid;
327 break; 349 break;
328 case MSR_IR: 350 case MSR_IR:
329 *vsid = (VSID_REAL_IR >> 16) | esid; 351 *vsid = VSID_REAL_IR | gvsid;
330 break; 352 break;
331 case MSR_DR: 353 case MSR_DR:
332 *vsid = (VSID_REAL_DR >> 16) | esid; 354 *vsid = VSID_REAL_DR | gvsid;
333 break; 355 break;
334 case MSR_DR|MSR_IR: 356 case MSR_DR|MSR_IR:
335 { 357 if (!sr->valid)
336 ulong ea; 358 return -1;
337 ea = esid << SID_SHIFT; 359
338 *vsid = find_sr(to_book3s(vcpu), ea)->vsid; 360 *vsid = sr->vsid;
339 break; 361 break;
340 }
341 default: 362 default:
342 BUG(); 363 BUG();
343 } 364 }
344 365
366 if (vcpu->arch.msr & MSR_PR)
367 *vsid |= VSID_PR;
368
345 return 0; 369 return 0;
346} 370}
347 371
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
new file mode 100644
index 000000000000..0bb66005338f
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -0,0 +1,483 @@
1/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22
23#include <asm/kvm_ppc.h>
24#include <asm/kvm_book3s.h>
25#include <asm/mmu-hash32.h>
26#include <asm/machdep.h>
27#include <asm/mmu_context.h>
28#include <asm/hw_irq.h>
29
30/* #define DEBUG_MMU */
31/* #define DEBUG_SR */
32
33#ifdef DEBUG_MMU
34#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
35#else
36#define dprintk_mmu(a, ...) do { } while(0)
37#endif
38
39#ifdef DEBUG_SR
40#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
41#else
42#define dprintk_sr(a, ...) do { } while(0)
43#endif
44
45#if PAGE_SHIFT != 12
46#error Unknown page size
47#endif
48
49#ifdef CONFIG_SMP
50#error XXX need to grab mmu_hash_lock
51#endif
52
53#ifdef CONFIG_PTE_64BIT
54#error Only 32 bit pages are supported for now
55#endif
56
57static ulong htab;
58static u32 htabmask;
59
60static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
61{
62 volatile u32 *pteg;
63
64 dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n",
65 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
66
67 pteg = (u32*)pte->slot;
68
69 pteg[0] = 0;
70 asm volatile ("sync");
71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
72 asm volatile ("sync");
73 asm volatile ("tlbsync");
74
75 pte->host_va = 0;
76
77 if (pte->pte.may_write)
78 kvm_release_pfn_dirty(pte->pfn);
79 else
80 kvm_release_pfn_clean(pte->pfn);
81}
82
83void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
84{
85 int i;
86
87 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
88 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
89 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
90
91 guest_ea &= ea_mask;
92 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
93 struct hpte_cache *pte;
94
95 pte = &vcpu->arch.hpte_cache[i];
96 if (!pte->host_va)
97 continue;
98
99 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
100 invalidate_pte(vcpu, pte);
101 }
102 }
103
104 /* Doing a complete flush -> start from scratch */
105 if (!ea_mask)
106 vcpu->arch.hpte_cache_offset = 0;
107}
108
109void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
110{
111 int i;
112
113 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
114 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
115 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
116
117 guest_vp &= vp_mask;
118 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
119 struct hpte_cache *pte;
120
121 pte = &vcpu->arch.hpte_cache[i];
122 if (!pte->host_va)
123 continue;
124
125 if ((pte->pte.vpage & vp_mask) == guest_vp) {
126 invalidate_pte(vcpu, pte);
127 }
128 }
129}
130
131void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
132{
133 int i;
134
135 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
136 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
137 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
138
139 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
140 struct hpte_cache *pte;
141
142 pte = &vcpu->arch.hpte_cache[i];
143 if (!pte->host_va)
144 continue;
145
146 if ((pte->pte.raddr >= pa_start) &&
147 (pte->pte.raddr < pa_end)) {
148 invalidate_pte(vcpu, pte);
149 }
150 }
151}
152
153struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
154{
155 int i;
156 u64 guest_vp;
157
158 guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
159 for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
160 struct hpte_cache *pte;
161
162 pte = &vcpu->arch.hpte_cache[i];
163 if (!pte->host_va)
164 continue;
165
166 if (pte->pte.vpage == guest_vp)
167 return &pte->pte;
168 }
169
170 return NULL;
171}
172
173static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
174{
175 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
176 kvmppc_mmu_pte_flush(vcpu, 0, 0);
177
178 return vcpu->arch.hpte_cache_offset++;
179}
180
181/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
182 * a hash, so we don't waste cycles on looping */
183static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
184{
185 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
186 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
187 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
188 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
189 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
190 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
191 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
192 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
193}
194
195
196static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
197{
198 struct kvmppc_sid_map *map;
199 u16 sid_map_mask;
200
201 if (vcpu->arch.msr & MSR_PR)
202 gvsid |= VSID_PR;
203
204 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
205 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
206 if (map->guest_vsid == gvsid) {
207 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
208 gvsid, map->host_vsid);
209 return map;
210 }
211
212 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
213 if (map->guest_vsid == gvsid) {
214 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
215 gvsid, map->host_vsid);
216 return map;
217 }
218
219 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
220 return NULL;
221}
222
223static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
224 bool primary)
225{
226 u32 page, hash;
227 ulong pteg = htab;
228
229 page = (eaddr & ~ESID_MASK) >> 12;
230
231 hash = ((vsid ^ page) << 6);
232 if (!primary)
233 hash = ~hash;
234
235 hash &= htabmask;
236
237 pteg |= hash;
238
239 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
240 htab, hash, htabmask, pteg);
241
242 return (u32*)pteg;
243}
244
245extern char etext[];
246
247int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
248{
249 pfn_t hpaddr;
250 u64 va;
251 u64 vsid;
252 struct kvmppc_sid_map *map;
253 volatile u32 *pteg;
254 u32 eaddr = orig_pte->eaddr;
255 u32 pteg0, pteg1;
256 register int rr = 0;
257 bool primary = false;
258 bool evict = false;
259 int hpte_id;
260 struct hpte_cache *pte;
261
262 /* Get host physical address for gpa */
263 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
264 if (kvm_is_error_hva(hpaddr)) {
265 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
266 orig_pte->eaddr);
267 return -EINVAL;
268 }
269 hpaddr <<= PAGE_SHIFT;
270
271 /* and write the mapping ea -> hpa into the pt */
272 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
273 map = find_sid_vsid(vcpu, vsid);
274 if (!map) {
275 kvmppc_mmu_map_segment(vcpu, eaddr);
276 map = find_sid_vsid(vcpu, vsid);
277 }
278 BUG_ON(!map);
279
280 vsid = map->host_vsid;
281 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
282
283next_pteg:
284 if (rr == 16) {
285 primary = !primary;
286 evict = true;
287 rr = 0;
288 }
289
290 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
291
292 /* not evicting yet */
293 if (!evict && (pteg[rr] & PTE_V)) {
294 rr += 2;
295 goto next_pteg;
296 }
297
298 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
299 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
300 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
301 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
302 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
303 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
304 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
305 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
306 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
307
308 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
309 (primary ? 0 : PTE_SEC);
310 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
311
312 if (orig_pte->may_write) {
313 pteg1 |= PP_RWRW;
314 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
315 } else {
316 pteg1 |= PP_RWRX;
317 }
318
319 local_irq_disable();
320
321 if (pteg[rr]) {
322 pteg[rr] = 0;
323 asm volatile ("sync");
324 }
325 pteg[rr + 1] = pteg1;
326 pteg[rr] = pteg0;
327 asm volatile ("sync");
328
329 local_irq_enable();
330
331 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
332 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
333 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
334 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
335 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
336 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
337 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
338 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
339 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
340
341
342 /* Now tell our Shadow PTE code about the new page */
343
344 hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
345 pte = &vcpu->arch.hpte_cache[hpte_id];
346
347 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
348 orig_pte->may_write ? 'w' : '-',
349 orig_pte->may_execute ? 'x' : '-',
350 orig_pte->eaddr, (ulong)pteg, va,
351 orig_pte->vpage, hpaddr);
352
353 pte->slot = (ulong)&pteg[rr];
354 pte->host_va = va;
355 pte->pte = *orig_pte;
356 pte->pfn = hpaddr >> PAGE_SHIFT;
357
358 return 0;
359}
360
361static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
362{
363 struct kvmppc_sid_map *map;
364 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
365 u16 sid_map_mask;
366 static int backwards_map = 0;
367
368 if (vcpu->arch.msr & MSR_PR)
369 gvsid |= VSID_PR;
370
371 /* We might get collisions that trap in preceding order, so let's
372 map them differently */
373
374 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
375 if (backwards_map)
376 sid_map_mask = SID_MAP_MASK - sid_map_mask;
377
378 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
379
380 /* Make sure we're taking the other map next time */
381 backwards_map = !backwards_map;
382
383 /* Uh-oh ... out of mappings. Let's flush! */
384 if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) {
385 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
386 memset(vcpu_book3s->sid_map, 0,
387 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
388 kvmppc_mmu_pte_flush(vcpu, 0, 0);
389 kvmppc_mmu_flush_segments(vcpu);
390 }
391 map->host_vsid = vcpu_book3s->vsid_next;
392
393 /* Would have to be 111 to be completely aligned with the rest of
394 Linux, but that is just way too little space! */
395 vcpu_book3s->vsid_next+=1;
396
397 map->guest_vsid = gvsid;
398 map->valid = true;
399
400 return map;
401}
402
403int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
404{
405 u32 esid = eaddr >> SID_SHIFT;
406 u64 gvsid;
407 u32 sr;
408 struct kvmppc_sid_map *map;
409 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
410
411 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
412 /* Invalidate an entry */
413 svcpu->sr[esid] = SR_INVALID;
414 return -ENOENT;
415 }
416
417 map = find_sid_vsid(vcpu, gvsid);
418 if (!map)
419 map = create_sid_map(vcpu, gvsid);
420
421 map->guest_esid = esid;
422 sr = map->host_vsid | SR_KP;
423 svcpu->sr[esid] = sr;
424
425 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
426
427 return 0;
428}
429
430void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
431{
432 int i;
433 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
434
435 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
436 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
437 svcpu->sr[i] = SR_INVALID;
438}
439
440void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
441{
442 kvmppc_mmu_pte_flush(vcpu, 0, 0);
443 preempt_disable();
444 __destroy_context(to_book3s(vcpu)->context_id);
445 preempt_enable();
446}
447
448/* From mm/mmu_context_hash32.c */
449#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff)
450
451int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
452{
453 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
454 int err;
455 ulong sdr1;
456
457 err = __init_new_context();
458 if (err < 0)
459 return -1;
460 vcpu3s->context_id = err;
461
462 vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
463 vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
464
465#if 0 /* XXX still doesn't guarantee uniqueness */
466 /* We could collide with the Linux vsid space because the vsid
467 * wraps around at 24 bits. We're safe if we do our own space
468 * though, so let's always set the highest bit. */
469
470 vcpu3s->vsid_max |= 0x00800000;
471 vcpu3s->vsid_first |= 0x00800000;
472#endif
473 BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first);
474
475 vcpu3s->vsid_next = vcpu3s->vsid_first;
476
477 /* Remember where the HTAB is */
478 asm ( "mfsdr1 %0" : "=r"(sdr1) );
479 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
480 htab = (ulong)__va(sdr1 & 0xffff0000);
481
482 return 0;
483}
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
new file mode 100644
index 000000000000..3608471ad2d8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -0,0 +1,143 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20/******************************************************************************
21 * *
22 * Entry code *
23 * *
24 *****************************************************************************/
25
26.macro LOAD_GUEST_SEGMENTS
27
28 /* Required state:
29 *
30 * MSR = ~IR|DR
31 * R1 = host R1
32 * R2 = host R2
33 * R3 = shadow vcpu
34 * all other volatile GPRS = free
35 * SVCPU[CR] = guest CR
36 * SVCPU[XER] = guest XER
37 * SVCPU[CTR] = guest CTR
38 * SVCPU[LR] = guest LR
39 */
40
41#define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \
42 mtsr n, r9
43
44 XCHG_SR(0)
45 XCHG_SR(1)
46 XCHG_SR(2)
47 XCHG_SR(3)
48 XCHG_SR(4)
49 XCHG_SR(5)
50 XCHG_SR(6)
51 XCHG_SR(7)
52 XCHG_SR(8)
53 XCHG_SR(9)
54 XCHG_SR(10)
55 XCHG_SR(11)
56 XCHG_SR(12)
57 XCHG_SR(13)
58 XCHG_SR(14)
59 XCHG_SR(15)
60
61 /* Clear BATs. */
62
63#define KVM_KILL_BAT(n, reg) \
64 mtspr SPRN_IBAT##n##U,reg; \
65 mtspr SPRN_IBAT##n##L,reg; \
66 mtspr SPRN_DBAT##n##U,reg; \
67 mtspr SPRN_DBAT##n##L,reg; \
68
69 li r9, 0
70 KVM_KILL_BAT(0, r9)
71 KVM_KILL_BAT(1, r9)
72 KVM_KILL_BAT(2, r9)
73 KVM_KILL_BAT(3, r9)
74
75.endm
76
77/******************************************************************************
78 * *
79 * Exit code *
80 * *
81 *****************************************************************************/
82
83.macro LOAD_HOST_SEGMENTS
84
85 /* Register usage at this point:
86 *
87 * R1 = host R1
88 * R2 = host R2
89 * R12 = exit handler id
90 * R13 = shadow vcpu - SHADOW_VCPU_OFF
91 * SVCPU.* = guest *
92 * SVCPU[CR] = guest CR
93 * SVCPU[XER] = guest XER
94 * SVCPU[CTR] = guest CTR
95 * SVCPU[LR] = guest LR
96 *
97 */
98
99 /* Restore BATs */
100
101 /* We only overwrite the upper part, so we only restoree
102 the upper part. */
103#define KVM_LOAD_BAT(n, reg, RA, RB) \
104 lwz RA,(n*16)+0(reg); \
105 lwz RB,(n*16)+4(reg); \
106 mtspr SPRN_IBAT##n##U,RA; \
107 mtspr SPRN_IBAT##n##L,RB; \
108 lwz RA,(n*16)+8(reg); \
109 lwz RB,(n*16)+12(reg); \
110 mtspr SPRN_DBAT##n##U,RA; \
111 mtspr SPRN_DBAT##n##L,RB; \
112
113 lis r9, BATS@ha
114 addi r9, r9, BATS@l
115 tophys(r9, r9)
116 KVM_LOAD_BAT(0, r9, r10, r11)
117 KVM_LOAD_BAT(1, r9, r10, r11)
118 KVM_LOAD_BAT(2, r9, r10, r11)
119 KVM_LOAD_BAT(3, r9, r10, r11)
120
121 /* Restore Segment Registers */
122
123 /* 0xc - 0xf */
124
125 li r0, 4
126 mtctr r0
127 LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
128 lis r4, 0xc000
1293: mtsrin r3, r4
130 addi r3, r3, 0x111 /* increment VSID */
131 addis r4, r4, 0x1000 /* address of next segment */
132 bdnz 3b
133
134 /* 0x0 - 0xb */
135
136 /* 'current->mm' needs to be in r4 */
137 tophys(r4, r2)
138 lwz r4, MM(r4)
139 tophys(r4, r4)
140 /* This only clobbers r0, r3, r4 and r5 */
141 bl switch_mmu_context
142
143.endm
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 512dcff77554..4025ea26b3c1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -232,7 +232,7 @@ do_second:
232 } 232 }
233 233
234 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " 234 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
235 "-> 0x%llx\n", 235 "-> 0x%lx\n",
236 eaddr, avpn, gpte->vpage, gpte->raddr); 236 eaddr, avpn, gpte->vpage, gpte->raddr);
237 found = true; 237 found = true;
238 break; 238 break;
@@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
383 383
384 if (vcpu->arch.msr & MSR_IR) { 384 if (vcpu->arch.msr & MSR_IR) {
385 kvmppc_mmu_flush_segments(vcpu); 385 kvmppc_mmu_flush_segments(vcpu);
386 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); 386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
387 } 387 }
388} 388}
389 389
@@ -439,37 +439,43 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
439 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); 439 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
440} 440}
441 441
442static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, 442static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
443 u64 *vsid) 443 u64 *vsid)
444{ 444{
445 ulong ea = esid << SID_SHIFT;
446 struct kvmppc_slb *slb;
447 u64 gvsid = esid;
448
449 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
451 if (slb)
452 gvsid = slb->vsid;
453 }
454
445 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 455 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
446 case 0: 456 case 0:
447 *vsid = (VSID_REAL >> 16) | esid; 457 *vsid = VSID_REAL | esid;
448 break; 458 break;
449 case MSR_IR: 459 case MSR_IR:
450 *vsid = (VSID_REAL_IR >> 16) | esid; 460 *vsid = VSID_REAL_IR | gvsid;
451 break; 461 break;
452 case MSR_DR: 462 case MSR_DR:
453 *vsid = (VSID_REAL_DR >> 16) | esid; 463 *vsid = VSID_REAL_DR | gvsid;
454 break; 464 break;
455 case MSR_DR|MSR_IR: 465 case MSR_DR|MSR_IR:
456 { 466 if (!slb)
457 ulong ea;
458 struct kvmppc_slb *slb;
459 ea = esid << SID_SHIFT;
460 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
461 if (slb)
462 *vsid = slb->vsid;
463 else
464 return -ENOENT; 467 return -ENOENT;
465 468
469 *vsid = gvsid;
466 break; 470 break;
467 }
468 default: 471 default:
469 BUG(); 472 BUG();
470 break; 473 break;
471 } 474 }
472 475
476 if (vcpu->arch.msr & MSR_PR)
477 *vsid |= VSID_PR;
478
473 return 0; 479 return 0;
474} 480}
475 481
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index f2899b297ffd..e4b5744977f6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -48,21 +48,25 @@
48 48
49static void invalidate_pte(struct hpte_cache *pte) 49static void invalidate_pte(struct hpte_cache *pte)
50{ 50{
51 dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 51 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
52 i, pte->pte.eaddr, pte->pte.vpage, pte->host_va); 52 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
53 53
54 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 54 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
55 MMU_PAGE_4K, MMU_SEGSIZE_256M, 55 MMU_PAGE_4K, MMU_SEGSIZE_256M,
56 false); 56 false);
57 pte->host_va = 0; 57 pte->host_va = 0;
58 kvm_release_pfn_dirty(pte->pfn); 58
59 if (pte->pte.may_write)
60 kvm_release_pfn_dirty(pte->pfn);
61 else
62 kvm_release_pfn_clean(pte->pfn);
59} 63}
60 64
61void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask) 65void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
62{ 66{
63 int i; 67 int i;
64 68
65 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n", 69 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
66 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); 70 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
67 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); 71 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
68 72
@@ -106,12 +110,12 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
106 } 110 }
107} 111}
108 112
109void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end) 113void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
110{ 114{
111 int i; 115 int i;
112 116
113 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", 117 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
114 vcpu->arch.hpte_cache_offset, guest_pa, pa_mask); 118 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
115 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); 119 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
116 120
117 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { 121 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
@@ -182,7 +186,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
182 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 186 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
183 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 187 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
184 if (map->guest_vsid == gvsid) { 188 if (map->guest_vsid == gvsid) {
185 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", 189 dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
186 gvsid, map->host_vsid); 190 gvsid, map->host_vsid);
187 return map; 191 return map;
188 } 192 }
@@ -194,7 +198,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
194 return map; 198 return map;
195 } 199 }
196 200
197 dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid); 201 dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
202 sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
198 return NULL; 203 return NULL;
199} 204}
200 205
@@ -212,7 +217,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
212 /* Get host physical address for gpa */ 217 /* Get host physical address for gpa */
213 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 218 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
214 if (kvm_is_error_hva(hpaddr)) { 219 if (kvm_is_error_hva(hpaddr)) {
215 printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr); 220 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
216 return -EINVAL; 221 return -EINVAL;
217 } 222 }
218 hpaddr <<= PAGE_SHIFT; 223 hpaddr <<= PAGE_SHIFT;
@@ -227,10 +232,16 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
227 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 232 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
228 map = find_sid_vsid(vcpu, vsid); 233 map = find_sid_vsid(vcpu, vsid);
229 if (!map) { 234 if (!map) {
230 kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); 235 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
236 WARN_ON(ret < 0);
231 map = find_sid_vsid(vcpu, vsid); 237 map = find_sid_vsid(vcpu, vsid);
232 } 238 }
233 BUG_ON(!map); 239 if (!map) {
240 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
241 vsid, orig_pte->eaddr);
242 WARN_ON(true);
243 return -EINVAL;
244 }
234 245
235 vsid = map->host_vsid; 246 vsid = map->host_vsid;
236 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 247 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
@@ -257,26 +268,26 @@ map_again:
257 268
258 if (ret < 0) { 269 if (ret < 0) {
259 /* If we couldn't map a primary PTE, try a secondary */ 270 /* If we couldn't map a primary PTE, try a secondary */
260#ifdef USE_SECONDARY
261 hash = ~hash; 271 hash = ~hash;
272 vflags ^= HPTE_V_SECONDARY;
262 attempt++; 273 attempt++;
263 if (attempt % 2)
264 vflags = HPTE_V_SECONDARY;
265 else
266 vflags = 0;
267#else
268 attempt = 2;
269#endif
270 goto map_again; 274 goto map_again;
271 } else { 275 } else {
272 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); 276 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
273 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id]; 277 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
274 278
275 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n", 279 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
276 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', 280 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
277 (rflags & HPTE_R_N) ? '-' : 'x', 281 (rflags & HPTE_R_N) ? '-' : 'x',
278 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr); 282 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
279 283
284 /* The ppc_md code may give us a secondary entry even though we
285 asked for a primary. Fix up. */
286 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
287 hash = ~hash;
288 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
289 }
290
280 pte->slot = hpteg + (ret & 7); 291 pte->slot = hpteg + (ret & 7);
281 pte->host_va = va; 292 pte->host_va = va;
282 pte->pte = *orig_pte; 293 pte->pte = *orig_pte;
@@ -321,6 +332,9 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
321 map->guest_vsid = gvsid; 332 map->guest_vsid = gvsid;
322 map->valid = true; 333 map->valid = true;
323 334
335 dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
336 sid_map_mask, gvsid, map->host_vsid);
337
324 return map; 338 return map;
325} 339}
326 340
@@ -331,14 +345,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
331 int found_inval = -1; 345 int found_inval = -1;
332 int r; 346 int r;
333 347
334 if (!get_paca()->kvm_slb_max) 348 if (!to_svcpu(vcpu)->slb_max)
335 get_paca()->kvm_slb_max = 1; 349 to_svcpu(vcpu)->slb_max = 1;
336 350
337 /* Are we overwriting? */ 351 /* Are we overwriting? */
338 for (i = 1; i < get_paca()->kvm_slb_max; i++) { 352 for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
339 if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V)) 353 if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
340 found_inval = i; 354 found_inval = i;
341 else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid) 355 else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
342 return i; 356 return i;
343 } 357 }
344 358
@@ -352,11 +366,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
352 max_slb_size = mmu_slb_size; 366 max_slb_size = mmu_slb_size;
353 367
354 /* Overflowing -> purge */ 368 /* Overflowing -> purge */
355 if ((get_paca()->kvm_slb_max) == max_slb_size) 369 if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
356 kvmppc_mmu_flush_segments(vcpu); 370 kvmppc_mmu_flush_segments(vcpu);
357 371
358 r = get_paca()->kvm_slb_max; 372 r = to_svcpu(vcpu)->slb_max;
359 get_paca()->kvm_slb_max++; 373 to_svcpu(vcpu)->slb_max++;
360 374
361 return r; 375 return r;
362} 376}
@@ -374,7 +388,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
374 388
375 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 389 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
376 /* Invalidate an entry */ 390 /* Invalidate an entry */
377 get_paca()->kvm_slb[slb_index].esid = 0; 391 to_svcpu(vcpu)->slb[slb_index].esid = 0;
378 return -ENOENT; 392 return -ENOENT;
379 } 393 }
380 394
@@ -388,8 +402,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
388 slb_vsid &= ~SLB_VSID_KP; 402 slb_vsid &= ~SLB_VSID_KP;
389 slb_esid |= slb_index; 403 slb_esid |= slb_index;
390 404
391 get_paca()->kvm_slb[slb_index].esid = slb_esid; 405 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
392 get_paca()->kvm_slb[slb_index].vsid = slb_vsid; 406 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
393 407
394 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); 408 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
395 409
@@ -398,11 +412,29 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
398 412
399void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 413void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
400{ 414{
401 get_paca()->kvm_slb_max = 1; 415 to_svcpu(vcpu)->slb_max = 1;
402 get_paca()->kvm_slb[0].esid = 0; 416 to_svcpu(vcpu)->slb[0].esid = 0;
403} 417}
404 418
405void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 419void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
406{ 420{
407 kvmppc_mmu_pte_flush(vcpu, 0, 0); 421 kvmppc_mmu_pte_flush(vcpu, 0, 0);
422 __destroy_context(to_book3s(vcpu)->context_id);
423}
424
425int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
426{
427 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
428 int err;
429
430 err = __init_new_context();
431 if (err < 0)
432 return -1;
433 vcpu3s->context_id = err;
434
435 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
436 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
437 vcpu3s->vsid_next = vcpu3s->vsid_first;
438
439 return 0;
408} 440}
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 35b762722187..04e7d3bbfe8b 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -44,8 +44,7 @@ slb_exit_skip_ ## num:
44 * * 44 * *
45 *****************************************************************************/ 45 *****************************************************************************/
46 46
47.global kvmppc_handler_trampoline_enter 47.macro LOAD_GUEST_SEGMENTS
48kvmppc_handler_trampoline_enter:
49 48
50 /* Required state: 49 /* Required state:
51 * 50 *
@@ -53,20 +52,14 @@ kvmppc_handler_trampoline_enter:
53 * R13 = PACA 52 * R13 = PACA
54 * R1 = host R1 53 * R1 = host R1
55 * R2 = host R2 54 * R2 = host R2
56 * R9 = guest IP 55 * R3 = shadow vcpu
57 * R10 = guest MSR 56 * all other volatile GPRS = free
58 * all other GPRS = free 57 * SVCPU[CR] = guest CR
59 * PACA[KVM_CR] = guest CR 58 * SVCPU[XER] = guest XER
60 * PACA[KVM_XER] = guest XER 59 * SVCPU[CTR] = guest CTR
60 * SVCPU[LR] = guest LR
61 */ 61 */
62 62
63 mtsrr0 r9
64 mtsrr1 r10
65
66 /* Activate guest mode, so faults get handled by KVM */
67 li r11, KVM_GUEST_MODE_GUEST
68 stb r11, PACA_KVM_IN_GUEST(r13)
69
70 /* Remove LPAR shadow entries */ 63 /* Remove LPAR shadow entries */
71 64
72#if SLB_NUM_BOLTED == 3 65#if SLB_NUM_BOLTED == 3
@@ -101,14 +94,14 @@ kvmppc_handler_trampoline_enter:
101 94
102 /* Fill SLB with our shadow */ 95 /* Fill SLB with our shadow */
103 96
104 lbz r12, PACA_KVM_SLB_MAX(r13) 97 lbz r12, SVCPU_SLB_MAX(r3)
105 mulli r12, r12, 16 98 mulli r12, r12, 16
106 addi r12, r12, PACA_KVM_SLB 99 addi r12, r12, SVCPU_SLB
107 add r12, r12, r13 100 add r12, r12, r3
108 101
109 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */ 102 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
110 li r11, PACA_KVM_SLB 103 li r11, SVCPU_SLB
111 add r11, r11, r13 104 add r11, r11, r3
112 105
113slb_loop_enter: 106slb_loop_enter:
114 107
@@ -127,34 +120,7 @@ slb_loop_enter_skip:
127 120
128slb_do_enter: 121slb_do_enter:
129 122
130 /* Enter guest */ 123.endm
131
132 ld r0, (PACA_KVM_R0)(r13)
133 ld r1, (PACA_KVM_R1)(r13)
134 ld r2, (PACA_KVM_R2)(r13)
135 ld r3, (PACA_KVM_R3)(r13)
136 ld r4, (PACA_KVM_R4)(r13)
137 ld r5, (PACA_KVM_R5)(r13)
138 ld r6, (PACA_KVM_R6)(r13)
139 ld r7, (PACA_KVM_R7)(r13)
140 ld r8, (PACA_KVM_R8)(r13)
141 ld r9, (PACA_KVM_R9)(r13)
142 ld r10, (PACA_KVM_R10)(r13)
143 ld r12, (PACA_KVM_R12)(r13)
144
145 lwz r11, (PACA_KVM_CR)(r13)
146 mtcr r11
147
148 ld r11, (PACA_KVM_XER)(r13)
149 mtxer r11
150
151 ld r11, (PACA_KVM_R11)(r13)
152 ld r13, (PACA_KVM_R13)(r13)
153
154 RFI
155kvmppc_handler_trampoline_enter_end:
156
157
158 124
159/****************************************************************************** 125/******************************************************************************
160 * * 126 * *
@@ -162,99 +128,22 @@ kvmppc_handler_trampoline_enter_end:
162 * * 128 * *
163 *****************************************************************************/ 129 *****************************************************************************/
164 130
165.global kvmppc_handler_trampoline_exit 131.macro LOAD_HOST_SEGMENTS
166kvmppc_handler_trampoline_exit:
167 132
168 /* Register usage at this point: 133 /* Register usage at this point:
169 * 134 *
170 * SPRG_SCRATCH0 = guest R13 135 * R1 = host R1
171 * R12 = exit handler id 136 * R2 = host R2
172 * R13 = PACA 137 * R12 = exit handler id
173 * PACA.KVM.SCRATCH0 = guest R12 138 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
174 * PACA.KVM.SCRATCH1 = guest CR 139 * SVCPU.* = guest *
140 * SVCPU[CR] = guest CR
141 * SVCPU[XER] = guest XER
142 * SVCPU[CTR] = guest CTR
143 * SVCPU[LR] = guest LR
175 * 144 *
176 */ 145 */
177 146
178 /* Save registers */
179
180 std r0, PACA_KVM_R0(r13)
181 std r1, PACA_KVM_R1(r13)
182 std r2, PACA_KVM_R2(r13)
183 std r3, PACA_KVM_R3(r13)
184 std r4, PACA_KVM_R4(r13)
185 std r5, PACA_KVM_R5(r13)
186 std r6, PACA_KVM_R6(r13)
187 std r7, PACA_KVM_R7(r13)
188 std r8, PACA_KVM_R8(r13)
189 std r9, PACA_KVM_R9(r13)
190 std r10, PACA_KVM_R10(r13)
191 std r11, PACA_KVM_R11(r13)
192
193 /* Restore R1/R2 so we can handle faults */
194 ld r1, PACA_KVM_HOST_R1(r13)
195 ld r2, PACA_KVM_HOST_R2(r13)
196
197 /* Save guest PC and MSR in GPRs */
198 mfsrr0 r3
199 mfsrr1 r4
200
201 /* Get scratch'ed off registers */
202 mfspr r9, SPRN_SPRG_SCRATCH0
203 std r9, PACA_KVM_R13(r13)
204
205 ld r8, PACA_KVM_SCRATCH0(r13)
206 std r8, PACA_KVM_R12(r13)
207
208 lwz r7, PACA_KVM_SCRATCH1(r13)
209 stw r7, PACA_KVM_CR(r13)
210
211 /* Save more register state */
212
213 mfxer r6
214 stw r6, PACA_KVM_XER(r13)
215
216 mfdar r5
217 mfdsisr r6
218
219 /*
220 * In order for us to easily get the last instruction,
221 * we got the #vmexit at, we exploit the fact that the
222 * virtual layout is still the same here, so we can just
223 * ld from the guest's PC address
224 */
225
226 /* We only load the last instruction when it's safe */
227 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
228 beq ld_last_inst
229 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
230 beq ld_last_inst
231
232 b no_ld_last_inst
233
234ld_last_inst:
235 /* Save off the guest instruction we're at */
236
237 /* Set guest mode to 'jump over instruction' so if lwz faults
238 * we'll just continue at the next IP. */
239 li r9, KVM_GUEST_MODE_SKIP
240 stb r9, PACA_KVM_IN_GUEST(r13)
241
242 /* 1) enable paging for data */
243 mfmsr r9
244 ori r11, r9, MSR_DR /* Enable paging for data */
245 mtmsr r11
246 /* 2) fetch the instruction */
247 li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
248 lwz r0, 0(r3)
249 /* 3) disable paging again */
250 mtmsr r9
251
252no_ld_last_inst:
253
254 /* Unset guest mode */
255 li r9, KVM_GUEST_MODE_NONE
256 stb r9, PACA_KVM_IN_GUEST(r13)
257
258 /* Restore bolted entries from the shadow and fix it along the way */ 147 /* Restore bolted entries from the shadow and fix it along the way */
259 148
260 /* We don't store anything in entry 0, so we don't need to take care of it */ 149 /* We don't store anything in entry 0, so we don't need to take care of it */
@@ -275,28 +164,4 @@ no_ld_last_inst:
275 164
276slb_do_exit: 165slb_do_exit:
277 166
278 /* Register usage at this point: 167.endm
279 *
280 * R0 = guest last inst
281 * R1 = host R1
282 * R2 = host R2
283 * R3 = guest PC
284 * R4 = guest MSR
285 * R5 = guest DAR
286 * R6 = guest DSISR
287 * R12 = exit handler id
288 * R13 = PACA
289 * PACA.KVM.* = guest *
290 *
291 */
292
293 /* RFI into the highmem handler */
294 mfmsr r7
295 ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
296 mtsrr1 r7
297 ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
298 mtsrr0 r8
299
300 RFI
301kvmppc_handler_trampoline_exit_end:
302
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 2b0ee7e040c9..c85f906038ce 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -28,13 +28,16 @@
28#define OP_31_XOP_MFMSR 83 28#define OP_31_XOP_MFMSR 83
29#define OP_31_XOP_MTMSR 146 29#define OP_31_XOP_MTMSR 146
30#define OP_31_XOP_MTMSRD 178 30#define OP_31_XOP_MTMSRD 178
31#define OP_31_XOP_MTSR 210
31#define OP_31_XOP_MTSRIN 242 32#define OP_31_XOP_MTSRIN 242
32#define OP_31_XOP_TLBIEL 274 33#define OP_31_XOP_TLBIEL 274
33#define OP_31_XOP_TLBIE 306 34#define OP_31_XOP_TLBIE 306
34#define OP_31_XOP_SLBMTE 402 35#define OP_31_XOP_SLBMTE 402
35#define OP_31_XOP_SLBIE 434 36#define OP_31_XOP_SLBIE 434
36#define OP_31_XOP_SLBIA 498 37#define OP_31_XOP_SLBIA 498
38#define OP_31_XOP_MFSR 595
37#define OP_31_XOP_MFSRIN 659 39#define OP_31_XOP_MFSRIN 659
40#define OP_31_XOP_DCBA 758
38#define OP_31_XOP_SLBMFEV 851 41#define OP_31_XOP_SLBMFEV 851
39#define OP_31_XOP_EIOIO 854 42#define OP_31_XOP_EIOIO 854
40#define OP_31_XOP_SLBMFEE 915 43#define OP_31_XOP_SLBMFEE 915
@@ -42,6 +45,24 @@
42/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 45/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
43#define OP_31_XOP_DCBZ 1010 46#define OP_31_XOP_DCBZ 1010
44 47
48#define OP_LFS 48
49#define OP_LFD 50
50#define OP_STFS 52
51#define OP_STFD 54
52
53#define SPRN_GQR0 912
54#define SPRN_GQR1 913
55#define SPRN_GQR2 914
56#define SPRN_GQR3 915
57#define SPRN_GQR4 916
58#define SPRN_GQR5 917
59#define SPRN_GQR6 918
60#define SPRN_GQR7 919
61
62/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
63 * function pointers, so let's just disable the define. */
64#undef mfsrin
65
45int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 66int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
46 unsigned int inst, int *advance) 67 unsigned int inst, int *advance)
47{ 68{
@@ -52,7 +73,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
52 switch (get_xop(inst)) { 73 switch (get_xop(inst)) {
53 case OP_19_XOP_RFID: 74 case OP_19_XOP_RFID:
54 case OP_19_XOP_RFI: 75 case OP_19_XOP_RFI:
55 vcpu->arch.pc = vcpu->arch.srr0; 76 kvmppc_set_pc(vcpu, vcpu->arch.srr0);
56 kvmppc_set_msr(vcpu, vcpu->arch.srr1); 77 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
57 *advance = 0; 78 *advance = 0;
58 break; 79 break;
@@ -80,6 +101,18 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
80 case OP_31_XOP_MTMSR: 101 case OP_31_XOP_MTMSR:
81 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); 102 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
82 break; 103 break;
104 case OP_31_XOP_MFSR:
105 {
106 int srnum;
107
108 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
109 if (vcpu->arch.mmu.mfsrin) {
110 u32 sr;
111 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
112 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
113 }
114 break;
115 }
83 case OP_31_XOP_MFSRIN: 116 case OP_31_XOP_MFSRIN:
84 { 117 {
85 int srnum; 118 int srnum;
@@ -92,6 +125,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
92 } 125 }
93 break; 126 break;
94 } 127 }
128 case OP_31_XOP_MTSR:
129 vcpu->arch.mmu.mtsrin(vcpu,
130 (inst >> 16) & 0xf,
131 kvmppc_get_gpr(vcpu, get_rs(inst)));
132 break;
95 case OP_31_XOP_MTSRIN: 133 case OP_31_XOP_MTSRIN:
96 vcpu->arch.mmu.mtsrin(vcpu, 134 vcpu->arch.mmu.mtsrin(vcpu,
97 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, 135 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
@@ -150,12 +188,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
150 kvmppc_set_gpr(vcpu, get_rt(inst), t); 188 kvmppc_set_gpr(vcpu, get_rt(inst), t);
151 } 189 }
152 break; 190 break;
191 case OP_31_XOP_DCBA:
192 /* Gets treated as NOP */
193 break;
153 case OP_31_XOP_DCBZ: 194 case OP_31_XOP_DCBZ:
154 { 195 {
155 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 196 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
156 ulong ra = 0; 197 ulong ra = 0;
157 ulong addr; 198 ulong addr, vaddr;
158 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 199 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
200 u32 dsisr;
201 int r;
159 202
160 if (get_ra(inst)) 203 if (get_ra(inst))
161 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 204 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
@@ -163,15 +206,25 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
163 addr = (ra + rb) & ~31ULL; 206 addr = (ra + rb) & ~31ULL;
164 if (!(vcpu->arch.msr & MSR_SF)) 207 if (!(vcpu->arch.msr & MSR_SF))
165 addr &= 0xffffffff; 208 addr &= 0xffffffff;
209 vaddr = addr;
210
211 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
212 if ((r == -ENOENT) || (r == -EPERM)) {
213 *advance = 0;
214 vcpu->arch.dear = vaddr;
215 to_svcpu(vcpu)->fault_dar = vaddr;
216
217 dsisr = DSISR_ISSTORE;
218 if (r == -ENOENT)
219 dsisr |= DSISR_NOHPTE;
220 else if (r == -EPERM)
221 dsisr |= DSISR_PROTFAULT;
222
223 to_book3s(vcpu)->dsisr = dsisr;
224 to_svcpu(vcpu)->fault_dsisr = dsisr;
166 225
167 if (kvmppc_st(vcpu, addr, 32, zeros)) {
168 vcpu->arch.dear = addr;
169 vcpu->arch.fault_dear = addr;
170 to_book3s(vcpu)->dsisr = DSISR_PROTFAULT |
171 DSISR_ISSTORE;
172 kvmppc_book3s_queue_irqprio(vcpu, 226 kvmppc_book3s_queue_irqprio(vcpu,
173 BOOK3S_INTERRUPT_DATA_STORAGE); 227 BOOK3S_INTERRUPT_DATA_STORAGE);
174 kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL);
175 } 228 }
176 229
177 break; 230 break;
@@ -184,6 +237,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
184 emulated = EMULATE_FAIL; 237 emulated = EMULATE_FAIL;
185 } 238 }
186 239
240 if (emulated == EMULATE_FAIL)
241 emulated = kvmppc_emulate_paired_single(run, vcpu);
242
187 return emulated; 243 return emulated;
188} 244}
189 245
@@ -207,6 +263,34 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
207 } 263 }
208} 264}
209 265
266static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
267{
268 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
269 struct kvmppc_bat *bat;
270
271 switch (sprn) {
272 case SPRN_IBAT0U ... SPRN_IBAT3L:
273 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
274 break;
275 case SPRN_IBAT4U ... SPRN_IBAT7L:
276 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
277 break;
278 case SPRN_DBAT0U ... SPRN_DBAT3L:
279 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
280 break;
281 case SPRN_DBAT4U ... SPRN_DBAT7L:
282 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
283 break;
284 default:
285 BUG();
286 }
287
288 if (sprn % 2)
289 return bat->raw >> 32;
290 else
291 return bat->raw;
292}
293
210static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) 294static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
211{ 295{
212 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 296 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -217,13 +301,13 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
217 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 301 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
218 break; 302 break;
219 case SPRN_IBAT4U ... SPRN_IBAT7L: 303 case SPRN_IBAT4U ... SPRN_IBAT7L:
220 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2]; 304 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
221 break; 305 break;
222 case SPRN_DBAT0U ... SPRN_DBAT3L: 306 case SPRN_DBAT0U ... SPRN_DBAT3L:
223 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 307 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
224 break; 308 break;
225 case SPRN_DBAT4U ... SPRN_DBAT7L: 309 case SPRN_DBAT4U ... SPRN_DBAT7L:
226 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2]; 310 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
227 break; 311 break;
228 default: 312 default:
229 BUG(); 313 BUG();
@@ -258,6 +342,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
258 /* BAT writes happen so rarely that we're ok to flush 342 /* BAT writes happen so rarely that we're ok to flush
259 * everything here */ 343 * everything here */
260 kvmppc_mmu_pte_flush(vcpu, 0, 0); 344 kvmppc_mmu_pte_flush(vcpu, 0, 0);
345 kvmppc_mmu_flush_segments(vcpu);
261 break; 346 break;
262 case SPRN_HID0: 347 case SPRN_HID0:
263 to_book3s(vcpu)->hid[0] = spr_val; 348 to_book3s(vcpu)->hid[0] = spr_val;
@@ -268,7 +353,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
268 case SPRN_HID2: 353 case SPRN_HID2:
269 to_book3s(vcpu)->hid[2] = spr_val; 354 to_book3s(vcpu)->hid[2] = spr_val;
270 break; 355 break;
356 case SPRN_HID2_GEKKO:
357 to_book3s(vcpu)->hid[2] = spr_val;
358 /* HID2.PSE controls paired single on gekko */
359 switch (vcpu->arch.pvr) {
360 case 0x00080200: /* lonestar 2.0 */
361 case 0x00088202: /* lonestar 2.2 */
362 case 0x70000100: /* gekko 1.0 */
363 case 0x00080100: /* gekko 2.0 */
364 case 0x00083203: /* gekko 2.3a */
365 case 0x00083213: /* gekko 2.3b */
366 case 0x00083204: /* gekko 2.4 */
367 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
368 case 0x00087200: /* broadway */
369 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
370 /* Native paired singles */
371 } else if (spr_val & (1 << 29)) { /* HID2.PSE */
372 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
373 kvmppc_giveup_ext(vcpu, MSR_FP);
374 } else {
375 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
376 }
377 break;
378 }
379 break;
271 case SPRN_HID4: 380 case SPRN_HID4:
381 case SPRN_HID4_GEKKO:
272 to_book3s(vcpu)->hid[4] = spr_val; 382 to_book3s(vcpu)->hid[4] = spr_val;
273 break; 383 break;
274 case SPRN_HID5: 384 case SPRN_HID5:
@@ -278,12 +388,30 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
278 (mfmsr() & MSR_HV)) 388 (mfmsr() & MSR_HV))
279 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 389 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
280 break; 390 break;
391 case SPRN_GQR0:
392 case SPRN_GQR1:
393 case SPRN_GQR2:
394 case SPRN_GQR3:
395 case SPRN_GQR4:
396 case SPRN_GQR5:
397 case SPRN_GQR6:
398 case SPRN_GQR7:
399 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
400 break;
281 case SPRN_ICTC: 401 case SPRN_ICTC:
282 case SPRN_THRM1: 402 case SPRN_THRM1:
283 case SPRN_THRM2: 403 case SPRN_THRM2:
284 case SPRN_THRM3: 404 case SPRN_THRM3:
285 case SPRN_CTRLF: 405 case SPRN_CTRLF:
286 case SPRN_CTRLT: 406 case SPRN_CTRLT:
407 case SPRN_L2CR:
408 case SPRN_MMCR0_GEKKO:
409 case SPRN_MMCR1_GEKKO:
410 case SPRN_PMC1_GEKKO:
411 case SPRN_PMC2_GEKKO:
412 case SPRN_PMC3_GEKKO:
413 case SPRN_PMC4_GEKKO:
414 case SPRN_WPAR_GEKKO:
287 break; 415 break;
288 default: 416 default:
289 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); 417 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
@@ -301,6 +429,12 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
301 int emulated = EMULATE_DONE; 429 int emulated = EMULATE_DONE;
302 430
303 switch (sprn) { 431 switch (sprn) {
432 case SPRN_IBAT0U ... SPRN_IBAT3L:
433 case SPRN_IBAT4U ... SPRN_IBAT7L:
434 case SPRN_DBAT0U ... SPRN_DBAT3L:
435 case SPRN_DBAT4U ... SPRN_DBAT7L:
436 kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn));
437 break;
304 case SPRN_SDR1: 438 case SPRN_SDR1:
305 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); 439 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
306 break; 440 break;
@@ -320,19 +454,40 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
320 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); 454 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
321 break; 455 break;
322 case SPRN_HID2: 456 case SPRN_HID2:
457 case SPRN_HID2_GEKKO:
323 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); 458 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
324 break; 459 break;
325 case SPRN_HID4: 460 case SPRN_HID4:
461 case SPRN_HID4_GEKKO:
326 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); 462 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
327 break; 463 break;
328 case SPRN_HID5: 464 case SPRN_HID5:
329 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); 465 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
330 break; 466 break;
467 case SPRN_GQR0:
468 case SPRN_GQR1:
469 case SPRN_GQR2:
470 case SPRN_GQR3:
471 case SPRN_GQR4:
472 case SPRN_GQR5:
473 case SPRN_GQR6:
474 case SPRN_GQR7:
475 kvmppc_set_gpr(vcpu, rt,
476 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
477 break;
331 case SPRN_THRM1: 478 case SPRN_THRM1:
332 case SPRN_THRM2: 479 case SPRN_THRM2:
333 case SPRN_THRM3: 480 case SPRN_THRM3:
334 case SPRN_CTRLF: 481 case SPRN_CTRLF:
335 case SPRN_CTRLT: 482 case SPRN_CTRLT:
483 case SPRN_L2CR:
484 case SPRN_MMCR0_GEKKO:
485 case SPRN_MMCR1_GEKKO:
486 case SPRN_PMC1_GEKKO:
487 case SPRN_PMC2_GEKKO:
488 case SPRN_PMC3_GEKKO:
489 case SPRN_PMC4_GEKKO:
490 case SPRN_WPAR_GEKKO:
336 kvmppc_set_gpr(vcpu, rt, 0); 491 kvmppc_set_gpr(vcpu, rt, 0);
337 break; 492 break;
338 default: 493 default:
@@ -346,3 +501,73 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
346 return emulated; 501 return emulated;
347} 502}
348 503
504u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
505{
506 u32 dsisr = 0;
507
508 /*
509 * This is what the spec says about DSISR bits (not mentioned = 0):
510 *
511 * 12:13 [DS] Set to bits 30:31
512 * 15:16 [X] Set to bits 29:30
513 * 17 [X] Set to bit 25
514 * [D/DS] Set to bit 5
515 * 18:21 [X] Set to bits 21:24
516 * [D/DS] Set to bits 1:4
517 * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS)
518 * 27:31 Set to bits 11:15 (RA)
519 */
520
521 switch (get_op(inst)) {
522 /* D-form */
523 case OP_LFS:
524 case OP_LFD:
525 case OP_STFD:
526 case OP_STFS:
527 dsisr |= (inst >> 12) & 0x4000; /* bit 17 */
528 dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
529 break;
530 /* X-form */
531 case 31:
532 dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
533 dsisr |= (inst << 8) & 0x04000; /* bit 17 */
534 dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */
535 break;
536 default:
537 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
538 break;
539 }
540
541 dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
542
543 return dsisr;
544}
545
546ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
547{
548 ulong dar = 0;
549 ulong ra;
550
551 switch (get_op(inst)) {
552 case OP_LFS:
553 case OP_LFD:
554 case OP_STFD:
555 case OP_STFS:
556 ra = get_ra(inst);
557 if (ra)
558 dar = kvmppc_get_gpr(vcpu, ra);
559 dar += (s32)((s16)inst);
560 break;
561 case 31:
562 ra = get_ra(inst);
563 if (ra)
564 dar = kvmppc_get_gpr(vcpu, ra);
565 dar += kvmppc_get_gpr(vcpu, get_rb(inst));
566 break;
567 default:
568 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
569 break;
570 }
571
572 return dar;
573}
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0d..1dd5a1ddfd0d 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c1584d0cbce8..2f0bc928b08a 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -24,36 +24,56 @@
24#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit 27#if defined(CONFIG_PPC_BOOK3S_64)
28#define ULONG_SIZE 8
29#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
30 28
31.macro DISABLE_INTERRUPTS 29#define ULONG_SIZE 8
32 mfmsr r0 30#define FUNC(name) GLUE(.,name)
33 rldicl r0,r0,48,1
34 rotldi r0,r0,16
35 mtmsrd r0,1
36.endm
37 31
32#define GET_SHADOW_VCPU(reg) \
33 addi reg, r13, PACA_KVM_SVCPU
34
35#define DISABLE_INTERRUPTS \
36 mfmsr r0; \
37 rldicl r0,r0,48,1; \
38 rotldi r0,r0,16; \
39 mtmsrd r0,1; \
40
41#elif defined(CONFIG_PPC_BOOK3S_32)
42
43#define ULONG_SIZE 4
44#define FUNC(name) name
45
46#define GET_SHADOW_VCPU(reg) \
47 lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
48
49#define DISABLE_INTERRUPTS \
50 mfmsr r0; \
51 rlwinm r0,r0,0,17,15; \
52 mtmsr r0; \
53
54#endif /* CONFIG_PPC_BOOK3S_XX */
55
56
57#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
38#define VCPU_LOAD_NVGPRS(vcpu) \ 58#define VCPU_LOAD_NVGPRS(vcpu) \
39 ld r14, VCPU_GPR(r14)(vcpu); \ 59 PPC_LL r14, VCPU_GPR(r14)(vcpu); \
40 ld r15, VCPU_GPR(r15)(vcpu); \ 60 PPC_LL r15, VCPU_GPR(r15)(vcpu); \
41 ld r16, VCPU_GPR(r16)(vcpu); \ 61 PPC_LL r16, VCPU_GPR(r16)(vcpu); \
42 ld r17, VCPU_GPR(r17)(vcpu); \ 62 PPC_LL r17, VCPU_GPR(r17)(vcpu); \
43 ld r18, VCPU_GPR(r18)(vcpu); \ 63 PPC_LL r18, VCPU_GPR(r18)(vcpu); \
44 ld r19, VCPU_GPR(r19)(vcpu); \ 64 PPC_LL r19, VCPU_GPR(r19)(vcpu); \
45 ld r20, VCPU_GPR(r20)(vcpu); \ 65 PPC_LL r20, VCPU_GPR(r20)(vcpu); \
46 ld r21, VCPU_GPR(r21)(vcpu); \ 66 PPC_LL r21, VCPU_GPR(r21)(vcpu); \
47 ld r22, VCPU_GPR(r22)(vcpu); \ 67 PPC_LL r22, VCPU_GPR(r22)(vcpu); \
48 ld r23, VCPU_GPR(r23)(vcpu); \ 68 PPC_LL r23, VCPU_GPR(r23)(vcpu); \
49 ld r24, VCPU_GPR(r24)(vcpu); \ 69 PPC_LL r24, VCPU_GPR(r24)(vcpu); \
50 ld r25, VCPU_GPR(r25)(vcpu); \ 70 PPC_LL r25, VCPU_GPR(r25)(vcpu); \
51 ld r26, VCPU_GPR(r26)(vcpu); \ 71 PPC_LL r26, VCPU_GPR(r26)(vcpu); \
52 ld r27, VCPU_GPR(r27)(vcpu); \ 72 PPC_LL r27, VCPU_GPR(r27)(vcpu); \
53 ld r28, VCPU_GPR(r28)(vcpu); \ 73 PPC_LL r28, VCPU_GPR(r28)(vcpu); \
54 ld r29, VCPU_GPR(r29)(vcpu); \ 74 PPC_LL r29, VCPU_GPR(r29)(vcpu); \
55 ld r30, VCPU_GPR(r30)(vcpu); \ 75 PPC_LL r30, VCPU_GPR(r30)(vcpu); \
56 ld r31, VCPU_GPR(r31)(vcpu); \ 76 PPC_LL r31, VCPU_GPR(r31)(vcpu); \
57 77
58/***************************************************************************** 78/*****************************************************************************
59 * * 79 * *
@@ -69,11 +89,11 @@ _GLOBAL(__kvmppc_vcpu_entry)
69 89
70kvm_start_entry: 90kvm_start_entry:
71 /* Write correct stack frame */ 91 /* Write correct stack frame */
72 mflr r0 92 mflr r0
73 std r0,16(r1) 93 PPC_STL r0,PPC_LR_STKOFF(r1)
74 94
75 /* Save host state to the stack */ 95 /* Save host state to the stack */
76 stdu r1, -SWITCH_FRAME_SIZE(r1) 96 PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
77 97
78 /* Save r3 (kvm_run) and r4 (vcpu) */ 98 /* Save r3 (kvm_run) and r4 (vcpu) */
79 SAVE_2GPRS(3, r1) 99 SAVE_2GPRS(3, r1)
@@ -82,33 +102,28 @@ kvm_start_entry:
82 SAVE_NVGPRS(r1) 102 SAVE_NVGPRS(r1)
83 103
84 /* Save LR */ 104 /* Save LR */
85 std r0, _LINK(r1) 105 PPC_STL r0, _LINK(r1)
86 106
87 /* Load non-volatile guest state from the vcpu */ 107 /* Load non-volatile guest state from the vcpu */
88 VCPU_LOAD_NVGPRS(r4) 108 VCPU_LOAD_NVGPRS(r4)
89 109
110 GET_SHADOW_VCPU(r5)
111
90 /* Save R1/R2 in the PACA */ 112 /* Save R1/R2 in the PACA */
91 std r1, PACA_KVM_HOST_R1(r13) 113 PPC_STL r1, SVCPU_HOST_R1(r5)
92 std r2, PACA_KVM_HOST_R2(r13) 114 PPC_STL r2, SVCPU_HOST_R2(r5)
93 115
94 /* XXX swap in/out on load? */ 116 /* XXX swap in/out on load? */
95 ld r3, VCPU_HIGHMEM_HANDLER(r4) 117 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
96 std r3, PACA_KVM_VMHANDLER(r13) 118 PPC_STL r3, SVCPU_VMHANDLER(r5)
97 119
98kvm_start_lightweight: 120kvm_start_lightweight:
99 121
100 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ 122 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
101 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
102
103 /* Load some guest state in the respective registers */
104 ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
105 /* will be swapped in by rmcall */
106
107 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
108 mtlr r3 /* LR = r3 */
109 123
110 DISABLE_INTERRUPTS 124 DISABLE_INTERRUPTS
111 125
126#ifdef CONFIG_PPC_BOOK3S_64
112 /* Some guests may need to have dcbz set to 32 byte length. 127 /* Some guests may need to have dcbz set to 32 byte length.
113 * 128 *
114 * Usually we ensure that by patching the guest's instructions 129 * Usually we ensure that by patching the guest's instructions
@@ -118,7 +133,7 @@ kvm_start_lightweight:
118 * because that's a lot faster. 133 * because that's a lot faster.
119 */ 134 */
120 135
121 ld r3, VCPU_HFLAGS(r4) 136 PPC_LL r3, VCPU_HFLAGS(r4)
122 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ 137 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
123 beq no_dcbz32_on 138 beq no_dcbz32_on
124 139
@@ -128,13 +143,15 @@ kvm_start_lightweight:
128 143
129no_dcbz32_on: 144no_dcbz32_on:
130 145
131 ld r6, VCPU_RMCALL(r4) 146#endif /* CONFIG_PPC_BOOK3S_64 */
147
148 PPC_LL r6, VCPU_RMCALL(r4)
132 mtctr r6 149 mtctr r6
133 150
134 ld r3, VCPU_TRAMPOLINE_ENTER(r4) 151 PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4)
135 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 152 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
136 153
137 /* Jump to SLB patching handlder and into our guest */ 154 /* Jump to segment patching handler and into our guest */
138 bctr 155 bctr
139 156
140/* 157/*
@@ -149,31 +166,20 @@ kvmppc_handler_highmem:
149 /* 166 /*
150 * Register usage at this point: 167 * Register usage at this point:
151 * 168 *
152 * R0 = guest last inst 169 * R1 = host R1
153 * R1 = host R1 170 * R2 = host R2
154 * R2 = host R2 171 * R12 = exit handler id
155 * R3 = guest PC 172 * R13 = PACA
156 * R4 = guest MSR 173 * SVCPU.* = guest *
157 * R5 = guest DAR
158 * R6 = guest DSISR
159 * R13 = PACA
160 * PACA.KVM.* = guest *
161 * 174 *
162 */ 175 */
163 176
164 /* R7 = vcpu */ 177 /* R7 = vcpu */
165 ld r7, GPR4(r1) 178 PPC_LL r7, GPR4(r1)
166 179
167 /* Now save the guest state */ 180#ifdef CONFIG_PPC_BOOK3S_64
168 181
169 stw r0, VCPU_LAST_INST(r7) 182 PPC_LL r5, VCPU_HFLAGS(r7)
170
171 std r3, VCPU_PC(r7)
172 std r4, VCPU_SHADOW_SRR1(r7)
173 std r5, VCPU_FAULT_DEAR(r7)
174 std r6, VCPU_FAULT_DSISR(r7)
175
176 ld r5, VCPU_HFLAGS(r7)
177 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ 183 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
178 beq no_dcbz32_off 184 beq no_dcbz32_off
179 185
@@ -184,35 +190,29 @@ kvmppc_handler_highmem:
184 190
185no_dcbz32_off: 191no_dcbz32_off:
186 192
187 std r14, VCPU_GPR(r14)(r7) 193#endif /* CONFIG_PPC_BOOK3S_64 */
188 std r15, VCPU_GPR(r15)(r7) 194
189 std r16, VCPU_GPR(r16)(r7) 195 PPC_STL r14, VCPU_GPR(r14)(r7)
190 std r17, VCPU_GPR(r17)(r7) 196 PPC_STL r15, VCPU_GPR(r15)(r7)
191 std r18, VCPU_GPR(r18)(r7) 197 PPC_STL r16, VCPU_GPR(r16)(r7)
192 std r19, VCPU_GPR(r19)(r7) 198 PPC_STL r17, VCPU_GPR(r17)(r7)
193 std r20, VCPU_GPR(r20)(r7) 199 PPC_STL r18, VCPU_GPR(r18)(r7)
194 std r21, VCPU_GPR(r21)(r7) 200 PPC_STL r19, VCPU_GPR(r19)(r7)
195 std r22, VCPU_GPR(r22)(r7) 201 PPC_STL r20, VCPU_GPR(r20)(r7)
196 std r23, VCPU_GPR(r23)(r7) 202 PPC_STL r21, VCPU_GPR(r21)(r7)
197 std r24, VCPU_GPR(r24)(r7) 203 PPC_STL r22, VCPU_GPR(r22)(r7)
198 std r25, VCPU_GPR(r25)(r7) 204 PPC_STL r23, VCPU_GPR(r23)(r7)
199 std r26, VCPU_GPR(r26)(r7) 205 PPC_STL r24, VCPU_GPR(r24)(r7)
200 std r27, VCPU_GPR(r27)(r7) 206 PPC_STL r25, VCPU_GPR(r25)(r7)
201 std r28, VCPU_GPR(r28)(r7) 207 PPC_STL r26, VCPU_GPR(r26)(r7)
202 std r29, VCPU_GPR(r29)(r7) 208 PPC_STL r27, VCPU_GPR(r27)(r7)
203 std r30, VCPU_GPR(r30)(r7) 209 PPC_STL r28, VCPU_GPR(r28)(r7)
204 std r31, VCPU_GPR(r31)(r7) 210 PPC_STL r29, VCPU_GPR(r29)(r7)
205 211 PPC_STL r30, VCPU_GPR(r30)(r7)
206 /* Save guest CTR */ 212 PPC_STL r31, VCPU_GPR(r31)(r7)
207 mfctr r5
208 std r5, VCPU_CTR(r7)
209
210 /* Save guest LR */
211 mflr r5
212 std r5, VCPU_LR(r7)
213 213
214 /* Restore host msr -> SRR1 */ 214 /* Restore host msr -> SRR1 */
215 ld r6, VCPU_HOST_MSR(r7) 215 PPC_LL r6, VCPU_HOST_MSR(r7)
216 216
217 /* 217 /*
218 * For some interrupts, we need to call the real Linux 218 * For some interrupts, we need to call the real Linux
@@ -228,9 +228,12 @@ no_dcbz32_off:
228 beq call_linux_handler 228 beq call_linux_handler
229 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 229 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
230 beq call_linux_handler 230 beq call_linux_handler
231 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
232 beq call_linux_handler
231 233
232 /* Back to EE=1 */ 234 /* Back to EE=1 */
233 mtmsr r6 235 mtmsr r6
236 sync
234 b kvm_return_point 237 b kvm_return_point
235 238
236call_linux_handler: 239call_linux_handler:
@@ -249,14 +252,14 @@ call_linux_handler:
249 */ 252 */
250 253
251 /* Restore host IP -> SRR0 */ 254 /* Restore host IP -> SRR0 */
252 ld r5, VCPU_HOST_RETIP(r7) 255 PPC_LL r5, VCPU_HOST_RETIP(r7)
253 256
254 /* XXX Better move to a safe function? 257 /* XXX Better move to a safe function?
255 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ 258 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
256 259
257 mtlr r12 260 mtlr r12
258 261
259 ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) 262 PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7)
260 mtsrr0 r4 263 mtsrr0 r4
261 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 264 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
262 mtsrr1 r3 265 mtsrr1 r3
@@ -274,7 +277,7 @@ kvm_return_point:
274 277
275 /* Restore r3 (kvm_run) and r4 (vcpu) */ 278 /* Restore r3 (kvm_run) and r4 (vcpu) */
276 REST_2GPRS(3, r1) 279 REST_2GPRS(3, r1)
277 bl KVMPPC_HANDLE_EXIT 280 bl FUNC(kvmppc_handle_exit)
278 281
279 /* If RESUME_GUEST, get back in the loop */ 282 /* If RESUME_GUEST, get back in the loop */
280 cmpwi r3, RESUME_GUEST 283 cmpwi r3, RESUME_GUEST
@@ -285,7 +288,7 @@ kvm_return_point:
285 288
286kvm_exit_loop: 289kvm_exit_loop:
287 290
288 ld r4, _LINK(r1) 291 PPC_LL r4, _LINK(r1)
289 mtlr r4 292 mtlr r4
290 293
291 /* Restore non-volatile host registers (r14 - r31) */ 294 /* Restore non-volatile host registers (r14 - r31) */
@@ -296,8 +299,8 @@ kvm_exit_loop:
296 299
297kvm_loop_heavyweight: 300kvm_loop_heavyweight:
298 301
299 ld r4, _LINK(r1) 302 PPC_LL r4, _LINK(r1)
300 std r4, (16 + SWITCH_FRAME_SIZE)(r1) 303 PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
301 304
302 /* Load vcpu and cpu_run */ 305 /* Load vcpu and cpu_run */
303 REST_2GPRS(3, r1) 306 REST_2GPRS(3, r1)
@@ -315,4 +318,3 @@ kvm_loop_lightweight:
315 318
316 /* Jump back into the beginning of this function */ 319 /* Jump back into the beginning of this function */
317 b kvm_start_lightweight 320 b kvm_start_lightweight
318
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
new file mode 100644
index 000000000000..a9f66abafcb3
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -0,0 +1,1289 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright Novell Inc 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/kvm.h>
21#include <asm/kvm_ppc.h>
22#include <asm/disassemble.h>
23#include <asm/kvm_book3s.h>
24#include <asm/kvm_fpu.h>
25#include <asm/reg.h>
26#include <asm/cacheflush.h>
27#include <linux/vmalloc.h>
28
29/* #define DEBUG */
30
31#ifdef DEBUG
32#define dprintk printk
33#else
34#define dprintk(...) do { } while(0);
35#endif
36
37#define OP_LFS 48
38#define OP_LFSU 49
39#define OP_LFD 50
40#define OP_LFDU 51
41#define OP_STFS 52
42#define OP_STFSU 53
43#define OP_STFD 54
44#define OP_STFDU 55
45#define OP_PSQ_L 56
46#define OP_PSQ_LU 57
47#define OP_PSQ_ST 60
48#define OP_PSQ_STU 61
49
50#define OP_31_LFSX 535
51#define OP_31_LFSUX 567
52#define OP_31_LFDX 599
53#define OP_31_LFDUX 631
54#define OP_31_STFSX 663
55#define OP_31_STFSUX 695
56#define OP_31_STFX 727
57#define OP_31_STFUX 759
58#define OP_31_LWIZX 887
59#define OP_31_STFIWX 983
60
61#define OP_59_FADDS 21
62#define OP_59_FSUBS 20
63#define OP_59_FSQRTS 22
64#define OP_59_FDIVS 18
65#define OP_59_FRES 24
66#define OP_59_FMULS 25
67#define OP_59_FRSQRTES 26
68#define OP_59_FMSUBS 28
69#define OP_59_FMADDS 29
70#define OP_59_FNMSUBS 30
71#define OP_59_FNMADDS 31
72
73#define OP_63_FCMPU 0
74#define OP_63_FCPSGN 8
75#define OP_63_FRSP 12
76#define OP_63_FCTIW 14
77#define OP_63_FCTIWZ 15
78#define OP_63_FDIV 18
79#define OP_63_FADD 21
80#define OP_63_FSQRT 22
81#define OP_63_FSEL 23
82#define OP_63_FRE 24
83#define OP_63_FMUL 25
84#define OP_63_FRSQRTE 26
85#define OP_63_FMSUB 28
86#define OP_63_FMADD 29
87#define OP_63_FNMSUB 30
88#define OP_63_FNMADD 31
89#define OP_63_FCMPO 32
90#define OP_63_MTFSB1 38 // XXX
91#define OP_63_FSUB 20
92#define OP_63_FNEG 40
93#define OP_63_MCRFS 64
94#define OP_63_MTFSB0 70
95#define OP_63_FMR 72
96#define OP_63_MTFSFI 134
97#define OP_63_FABS 264
98#define OP_63_MFFS 583
99#define OP_63_MTFSF 711
100
101#define OP_4X_PS_CMPU0 0
102#define OP_4X_PSQ_LX 6
103#define OP_4XW_PSQ_STX 7
104#define OP_4A_PS_SUM0 10
105#define OP_4A_PS_SUM1 11
106#define OP_4A_PS_MULS0 12
107#define OP_4A_PS_MULS1 13
108#define OP_4A_PS_MADDS0 14
109#define OP_4A_PS_MADDS1 15
110#define OP_4A_PS_DIV 18
111#define OP_4A_PS_SUB 20
112#define OP_4A_PS_ADD 21
113#define OP_4A_PS_SEL 23
114#define OP_4A_PS_RES 24
115#define OP_4A_PS_MUL 25
116#define OP_4A_PS_RSQRTE 26
117#define OP_4A_PS_MSUB 28
118#define OP_4A_PS_MADD 29
119#define OP_4A_PS_NMSUB 30
120#define OP_4A_PS_NMADD 31
121#define OP_4X_PS_CMPO0 32
122#define OP_4X_PSQ_LUX 38
123#define OP_4XW_PSQ_STUX 39
124#define OP_4X_PS_NEG 40
125#define OP_4X_PS_CMPU1 64
126#define OP_4X_PS_MR 72
127#define OP_4X_PS_CMPO1 96
128#define OP_4X_PS_NABS 136
129#define OP_4X_PS_ABS 264
130#define OP_4X_PS_MERGE00 528
131#define OP_4X_PS_MERGE01 560
132#define OP_4X_PS_MERGE10 592
133#define OP_4X_PS_MERGE11 624
134
135#define SCALAR_NONE 0
136#define SCALAR_HIGH (1 << 0)
137#define SCALAR_LOW (1 << 1)
138#define SCALAR_NO_PS0 (1 << 2)
139#define SCALAR_NO_PS1 (1 << 3)
140
141#define GQR_ST_TYPE_MASK 0x00000007
142#define GQR_ST_TYPE_SHIFT 0
143#define GQR_ST_SCALE_MASK 0x00003f00
144#define GQR_ST_SCALE_SHIFT 8
145#define GQR_LD_TYPE_MASK 0x00070000
146#define GQR_LD_TYPE_SHIFT 16
147#define GQR_LD_SCALE_MASK 0x3f000000
148#define GQR_LD_SCALE_SHIFT 24
149
150#define GQR_QUANTIZE_FLOAT 0
151#define GQR_QUANTIZE_U8 4
152#define GQR_QUANTIZE_U16 5
153#define GQR_QUANTIZE_S8 6
154#define GQR_QUANTIZE_S16 7
155
156#define FPU_LS_SINGLE 0
157#define FPU_LS_DOUBLE 1
158#define FPU_LS_SINGLE_LOW 2
159
160static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
161{
162 struct thread_struct t;
163
164 t.fpscr.val = vcpu->arch.fpscr;
165 cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t);
166}
167
168static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
169{
170 u64 dsisr;
171
172 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0);
173 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
174 vcpu->arch.dear = eaddr;
175 /* Page Fault */
176 dsisr = kvmppc_set_field(0, 33, 33, 1);
177 if (is_store)
178 to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
179 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
180}
181
182static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
183 int rs, ulong addr, int ls_type)
184{
185 int emulated = EMULATE_FAIL;
186 struct thread_struct t;
187 int r;
188 char tmp[8];
189 int len = sizeof(u32);
190
191 if (ls_type == FPU_LS_DOUBLE)
192 len = sizeof(u64);
193
194 t.fpscr.val = vcpu->arch.fpscr;
195
196 /* read from memory */
197 r = kvmppc_ld(vcpu, &addr, len, tmp, true);
198 vcpu->arch.paddr_accessed = addr;
199
200 if (r < 0) {
201 kvmppc_inject_pf(vcpu, addr, false);
202 goto done_load;
203 } else if (r == EMULATE_DO_MMIO) {
204 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1);
205 goto done_load;
206 }
207
208 emulated = EMULATE_DONE;
209
210 /* put in registers */
211 switch (ls_type) {
212 case FPU_LS_SINGLE:
213 cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t);
214 vcpu->arch.qpr[rs] = *((u32*)tmp);
215 break;
216 case FPU_LS_DOUBLE:
217 vcpu->arch.fpr[rs] = *((u64*)tmp);
218 break;
219 }
220
221 dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
222 addr, len);
223
224done_load:
225 return emulated;
226}
227
228static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
229 int rs, ulong addr, int ls_type)
230{
231 int emulated = EMULATE_FAIL;
232 struct thread_struct t;
233 int r;
234 char tmp[8];
235 u64 val;
236 int len;
237
238 t.fpscr.val = vcpu->arch.fpscr;
239
240 switch (ls_type) {
241 case FPU_LS_SINGLE:
242 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t);
243 val = *((u32*)tmp);
244 len = sizeof(u32);
245 break;
246 case FPU_LS_SINGLE_LOW:
247 *((u32*)tmp) = vcpu->arch.fpr[rs];
248 val = vcpu->arch.fpr[rs] & 0xffffffff;
249 len = sizeof(u32);
250 break;
251 case FPU_LS_DOUBLE:
252 *((u64*)tmp) = vcpu->arch.fpr[rs];
253 val = vcpu->arch.fpr[rs];
254 len = sizeof(u64);
255 break;
256 default:
257 val = 0;
258 len = 0;
259 }
260
261 r = kvmppc_st(vcpu, &addr, len, tmp, true);
262 vcpu->arch.paddr_accessed = addr;
263 if (r < 0) {
264 kvmppc_inject_pf(vcpu, addr, true);
265 } else if (r == EMULATE_DO_MMIO) {
266 emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
267 } else {
268 emulated = EMULATE_DONE;
269 }
270
271 dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
272 val, addr, len);
273
274 return emulated;
275}
276
277static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
278 int rs, ulong addr, bool w, int i)
279{
280 int emulated = EMULATE_FAIL;
281 struct thread_struct t;
282 int r;
283 float one = 1.0;
284 u32 tmp[2];
285
286 t.fpscr.val = vcpu->arch.fpscr;
287
288 /* read from memory */
289 if (w) {
290 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
291 memcpy(&tmp[1], &one, sizeof(u32));
292 } else {
293 r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
294 }
295 vcpu->arch.paddr_accessed = addr;
296 if (r < 0) {
297 kvmppc_inject_pf(vcpu, addr, false);
298 goto done_load;
299 } else if ((r == EMULATE_DO_MMIO) && w) {
300 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1);
301 vcpu->arch.qpr[rs] = tmp[1];
302 goto done_load;
303 } else if (r == EMULATE_DO_MMIO) {
304 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1);
305 goto done_load;
306 }
307
308 emulated = EMULATE_DONE;
309
310 /* put in registers */
311 cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t);
312 vcpu->arch.qpr[rs] = tmp[1];
313
314 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
315 tmp[1], addr, w ? 4 : 8);
316
317done_load:
318 return emulated;
319}
320
321static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
322 int rs, ulong addr, bool w, int i)
323{
324 int emulated = EMULATE_FAIL;
325 struct thread_struct t;
326 int r;
327 u32 tmp[2];
328 int len = w ? sizeof(u32) : sizeof(u64);
329
330 t.fpscr.val = vcpu->arch.fpscr;
331
332 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t);
333 tmp[1] = vcpu->arch.qpr[rs];
334
335 r = kvmppc_st(vcpu, &addr, len, tmp, true);
336 vcpu->arch.paddr_accessed = addr;
337 if (r < 0) {
338 kvmppc_inject_pf(vcpu, addr, true);
339 } else if ((r == EMULATE_DO_MMIO) && w) {
340 emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
341 } else if (r == EMULATE_DO_MMIO) {
342 u64 val = ((u64)tmp[0] << 32) | tmp[1];
343 emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
344 } else {
345 emulated = EMULATE_DONE;
346 }
347
348 dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
349 tmp[0], tmp[1], addr, len);
350
351 return emulated;
352}
353
354/*
355 * Cuts out inst bits with ordering according to spec.
356 * That means the leftmost bit is zero. All given bits are included.
357 */
358static inline u32 inst_get_field(u32 inst, int msb, int lsb)
359{
360 return kvmppc_get_field(inst, msb + 32, lsb + 32);
361}
362
363/*
364 * Replaces inst bits with ordering according to spec.
365 */
366static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
367{
368 return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
369}
370
371bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
372{
373 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
374 return false;
375
376 switch (get_op(inst)) {
377 case OP_PSQ_L:
378 case OP_PSQ_LU:
379 case OP_PSQ_ST:
380 case OP_PSQ_STU:
381 case OP_LFS:
382 case OP_LFSU:
383 case OP_LFD:
384 case OP_LFDU:
385 case OP_STFS:
386 case OP_STFSU:
387 case OP_STFD:
388 case OP_STFDU:
389 return true;
390 case 4:
391 /* X form */
392 switch (inst_get_field(inst, 21, 30)) {
393 case OP_4X_PS_CMPU0:
394 case OP_4X_PSQ_LX:
395 case OP_4X_PS_CMPO0:
396 case OP_4X_PSQ_LUX:
397 case OP_4X_PS_NEG:
398 case OP_4X_PS_CMPU1:
399 case OP_4X_PS_MR:
400 case OP_4X_PS_CMPO1:
401 case OP_4X_PS_NABS:
402 case OP_4X_PS_ABS:
403 case OP_4X_PS_MERGE00:
404 case OP_4X_PS_MERGE01:
405 case OP_4X_PS_MERGE10:
406 case OP_4X_PS_MERGE11:
407 return true;
408 }
409 /* XW form */
410 switch (inst_get_field(inst, 25, 30)) {
411 case OP_4XW_PSQ_STX:
412 case OP_4XW_PSQ_STUX:
413 return true;
414 }
415 /* A form */
416 switch (inst_get_field(inst, 26, 30)) {
417 case OP_4A_PS_SUM1:
418 case OP_4A_PS_SUM0:
419 case OP_4A_PS_MULS0:
420 case OP_4A_PS_MULS1:
421 case OP_4A_PS_MADDS0:
422 case OP_4A_PS_MADDS1:
423 case OP_4A_PS_DIV:
424 case OP_4A_PS_SUB:
425 case OP_4A_PS_ADD:
426 case OP_4A_PS_SEL:
427 case OP_4A_PS_RES:
428 case OP_4A_PS_MUL:
429 case OP_4A_PS_RSQRTE:
430 case OP_4A_PS_MSUB:
431 case OP_4A_PS_MADD:
432 case OP_4A_PS_NMSUB:
433 case OP_4A_PS_NMADD:
434 return true;
435 }
436 break;
437 case 59:
438 switch (inst_get_field(inst, 21, 30)) {
439 case OP_59_FADDS:
440 case OP_59_FSUBS:
441 case OP_59_FDIVS:
442 case OP_59_FRES:
443 case OP_59_FRSQRTES:
444 return true;
445 }
446 switch (inst_get_field(inst, 26, 30)) {
447 case OP_59_FMULS:
448 case OP_59_FMSUBS:
449 case OP_59_FMADDS:
450 case OP_59_FNMSUBS:
451 case OP_59_FNMADDS:
452 return true;
453 }
454 break;
455 case 63:
456 switch (inst_get_field(inst, 21, 30)) {
457 case OP_63_MTFSB0:
458 case OP_63_MTFSB1:
459 case OP_63_MTFSF:
460 case OP_63_MTFSFI:
461 case OP_63_MCRFS:
462 case OP_63_MFFS:
463 case OP_63_FCMPU:
464 case OP_63_FCMPO:
465 case OP_63_FNEG:
466 case OP_63_FMR:
467 case OP_63_FABS:
468 case OP_63_FRSP:
469 case OP_63_FDIV:
470 case OP_63_FADD:
471 case OP_63_FSUB:
472 case OP_63_FCTIW:
473 case OP_63_FCTIWZ:
474 case OP_63_FRSQRTE:
475 case OP_63_FCPSGN:
476 return true;
477 }
478 switch (inst_get_field(inst, 26, 30)) {
479 case OP_63_FMUL:
480 case OP_63_FSEL:
481 case OP_63_FMSUB:
482 case OP_63_FMADD:
483 case OP_63_FNMSUB:
484 case OP_63_FNMADD:
485 return true;
486 }
487 break;
488 case 31:
489 switch (inst_get_field(inst, 21, 30)) {
490 case OP_31_LFSX:
491 case OP_31_LFSUX:
492 case OP_31_LFDX:
493 case OP_31_LFDUX:
494 case OP_31_STFSX:
495 case OP_31_STFSUX:
496 case OP_31_STFX:
497 case OP_31_STFUX:
498 case OP_31_STFIWX:
499 return true;
500 }
501 break;
502 }
503
504 return false;
505}
506
507static int get_d_signext(u32 inst)
508{
509 int d = inst & 0x8ff;
510
511 if (d & 0x800)
512 return -(d & 0x7ff);
513
514 return (d & 0x7ff);
515}
516
517static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
518 int reg_out, int reg_in1, int reg_in2,
519 int reg_in3, int scalar,
520 void (*func)(struct thread_struct *t,
521 u32 *dst, u32 *src1,
522 u32 *src2, u32 *src3))
523{
524 u32 *qpr = vcpu->arch.qpr;
525 u64 *fpr = vcpu->arch.fpr;
526 u32 ps0_out;
527 u32 ps0_in1, ps0_in2, ps0_in3;
528 u32 ps1_in1, ps1_in2, ps1_in3;
529 struct thread_struct t;
530 t.fpscr.val = vcpu->arch.fpscr;
531
532 /* RC */
533 WARN_ON(rc);
534
535 /* PS0 */
536 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
537 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
538 cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t);
539
540 if (scalar & SCALAR_LOW)
541 ps0_in2 = qpr[reg_in2];
542
543 func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
544
545 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
546 ps0_in1, ps0_in2, ps0_in3, ps0_out);
547
548 if (!(scalar & SCALAR_NO_PS0))
549 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
550
551 /* PS1 */
552 ps1_in1 = qpr[reg_in1];
553 ps1_in2 = qpr[reg_in2];
554 ps1_in3 = qpr[reg_in3];
555
556 if (scalar & SCALAR_HIGH)
557 ps1_in2 = ps0_in2;
558
559 if (!(scalar & SCALAR_NO_PS1))
560 func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
561
562 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
563 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
564
565 return EMULATE_DONE;
566}
567
568static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
569 int reg_out, int reg_in1, int reg_in2,
570 int scalar,
571 void (*func)(struct thread_struct *t,
572 u32 *dst, u32 *src1,
573 u32 *src2))
574{
575 u32 *qpr = vcpu->arch.qpr;
576 u64 *fpr = vcpu->arch.fpr;
577 u32 ps0_out;
578 u32 ps0_in1, ps0_in2;
579 u32 ps1_out;
580 u32 ps1_in1, ps1_in2;
581 struct thread_struct t;
582 t.fpscr.val = vcpu->arch.fpscr;
583
584 /* RC */
585 WARN_ON(rc);
586
587 /* PS0 */
588 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
589
590 if (scalar & SCALAR_LOW)
591 ps0_in2 = qpr[reg_in2];
592 else
593 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
594
595 func(&t, &ps0_out, &ps0_in1, &ps0_in2);
596
597 if (!(scalar & SCALAR_NO_PS0)) {
598 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
599 ps0_in1, ps0_in2, ps0_out);
600
601 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
602 }
603
604 /* PS1 */
605 ps1_in1 = qpr[reg_in1];
606 ps1_in2 = qpr[reg_in2];
607
608 if (scalar & SCALAR_HIGH)
609 ps1_in2 = ps0_in2;
610
611 func(&t, &ps1_out, &ps1_in1, &ps1_in2);
612
613 if (!(scalar & SCALAR_NO_PS1)) {
614 qpr[reg_out] = ps1_out;
615
616 dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
617 ps1_in1, ps1_in2, qpr[reg_out]);
618 }
619
620 return EMULATE_DONE;
621}
622
623static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
624 int reg_out, int reg_in,
625 void (*func)(struct thread_struct *t,
626 u32 *dst, u32 *src1))
627{
628 u32 *qpr = vcpu->arch.qpr;
629 u64 *fpr = vcpu->arch.fpr;
630 u32 ps0_out, ps0_in;
631 u32 ps1_in;
632 struct thread_struct t;
633 t.fpscr.val = vcpu->arch.fpscr;
634
635 /* RC */
636 WARN_ON(rc);
637
638 /* PS0 */
639 cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t);
640 func(&t, &ps0_out, &ps0_in);
641
642 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
643 ps0_in, ps0_out);
644
645 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
646
647 /* PS1 */
648 ps1_in = qpr[reg_in];
649 func(&t, &qpr[reg_out], &ps1_in);
650
651 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
652 ps1_in, qpr[reg_out]);
653
654 return EMULATE_DONE;
655}
656
657int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
658{
659 u32 inst = kvmppc_get_last_inst(vcpu);
660 enum emulation_result emulated = EMULATE_DONE;
661
662 int ax_rd = inst_get_field(inst, 6, 10);
663 int ax_ra = inst_get_field(inst, 11, 15);
664 int ax_rb = inst_get_field(inst, 16, 20);
665 int ax_rc = inst_get_field(inst, 21, 25);
666 short full_d = inst_get_field(inst, 16, 31);
667
668 u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
669 u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
670 u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
671 u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
672
673 bool rcomp = (inst & 1) ? true : false;
674 u32 cr = kvmppc_get_cr(vcpu);
675 struct thread_struct t;
676#ifdef DEBUG
677 int i;
678#endif
679
680 t.fpscr.val = vcpu->arch.fpscr;
681
682 if (!kvmppc_inst_is_paired_single(vcpu, inst))
683 return EMULATE_FAIL;
684
685 if (!(vcpu->arch.msr & MSR_FP)) {
686 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
687 return EMULATE_AGAIN;
688 }
689
690 kvmppc_giveup_ext(vcpu, MSR_FP);
691 preempt_disable();
692 enable_kernel_fp();
693 /* Do we need to clear FE0 / FE1 here? Don't think so. */
694
695#ifdef DEBUG
696 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
697 u32 f;
698 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
699 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
700 i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
701 }
702#endif
703
704 switch (get_op(inst)) {
705 case OP_PSQ_L:
706 {
707 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
708 bool w = inst_get_field(inst, 16, 16) ? true : false;
709 int i = inst_get_field(inst, 17, 19);
710
711 addr += get_d_signext(inst);
712 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
713 break;
714 }
715 case OP_PSQ_LU:
716 {
717 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
718 bool w = inst_get_field(inst, 16, 16) ? true : false;
719 int i = inst_get_field(inst, 17, 19);
720
721 addr += get_d_signext(inst);
722 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
723
724 if (emulated == EMULATE_DONE)
725 kvmppc_set_gpr(vcpu, ax_ra, addr);
726 break;
727 }
728 case OP_PSQ_ST:
729 {
730 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
731 bool w = inst_get_field(inst, 16, 16) ? true : false;
732 int i = inst_get_field(inst, 17, 19);
733
734 addr += get_d_signext(inst);
735 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
736 break;
737 }
738 case OP_PSQ_STU:
739 {
740 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
741 bool w = inst_get_field(inst, 16, 16) ? true : false;
742 int i = inst_get_field(inst, 17, 19);
743
744 addr += get_d_signext(inst);
745 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
746
747 if (emulated == EMULATE_DONE)
748 kvmppc_set_gpr(vcpu, ax_ra, addr);
749 break;
750 }
751 case 4:
752 /* X form */
753 switch (inst_get_field(inst, 21, 30)) {
754 case OP_4X_PS_CMPU0:
755 /* XXX */
756 emulated = EMULATE_FAIL;
757 break;
758 case OP_4X_PSQ_LX:
759 {
760 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
761 bool w = inst_get_field(inst, 21, 21) ? true : false;
762 int i = inst_get_field(inst, 22, 24);
763
764 addr += kvmppc_get_gpr(vcpu, ax_rb);
765 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
766 break;
767 }
768 case OP_4X_PS_CMPO0:
769 /* XXX */
770 emulated = EMULATE_FAIL;
771 break;
772 case OP_4X_PSQ_LUX:
773 {
774 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
775 bool w = inst_get_field(inst, 21, 21) ? true : false;
776 int i = inst_get_field(inst, 22, 24);
777
778 addr += kvmppc_get_gpr(vcpu, ax_rb);
779 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
780
781 if (emulated == EMULATE_DONE)
782 kvmppc_set_gpr(vcpu, ax_ra, addr);
783 break;
784 }
785 case OP_4X_PS_NEG:
786 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
787 vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
788 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
789 vcpu->arch.qpr[ax_rd] ^= 0x80000000;
790 break;
791 case OP_4X_PS_CMPU1:
792 /* XXX */
793 emulated = EMULATE_FAIL;
794 break;
795 case OP_4X_PS_MR:
796 WARN_ON(rcomp);
797 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
798 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
799 break;
800 case OP_4X_PS_CMPO1:
801 /* XXX */
802 emulated = EMULATE_FAIL;
803 break;
804 case OP_4X_PS_NABS:
805 WARN_ON(rcomp);
806 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
807 vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
808 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
809 vcpu->arch.qpr[ax_rd] |= 0x80000000;
810 break;
811 case OP_4X_PS_ABS:
812 WARN_ON(rcomp);
813 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
814 vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
815 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
816 vcpu->arch.qpr[ax_rd] &= ~0x80000000;
817 break;
818 case OP_4X_PS_MERGE00:
819 WARN_ON(rcomp);
820 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
821 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
822 cvt_df((double*)&vcpu->arch.fpr[ax_rb],
823 (float*)&vcpu->arch.qpr[ax_rd], &t);
824 break;
825 case OP_4X_PS_MERGE01:
826 WARN_ON(rcomp);
827 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
828 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
829 break;
830 case OP_4X_PS_MERGE10:
831 WARN_ON(rcomp);
832 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
833 cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
834 (double*)&vcpu->arch.fpr[ax_rd], &t);
835 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
836 cvt_df((double*)&vcpu->arch.fpr[ax_rb],
837 (float*)&vcpu->arch.qpr[ax_rd], &t);
838 break;
839 case OP_4X_PS_MERGE11:
840 WARN_ON(rcomp);
841 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
842 cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
843 (double*)&vcpu->arch.fpr[ax_rd], &t);
844 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
845 break;
846 }
847 /* XW form */
848 switch (inst_get_field(inst, 25, 30)) {
849 case OP_4XW_PSQ_STX:
850 {
851 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
852 bool w = inst_get_field(inst, 21, 21) ? true : false;
853 int i = inst_get_field(inst, 22, 24);
854
855 addr += kvmppc_get_gpr(vcpu, ax_rb);
856 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
857 break;
858 }
859 case OP_4XW_PSQ_STUX:
860 {
861 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
862 bool w = inst_get_field(inst, 21, 21) ? true : false;
863 int i = inst_get_field(inst, 22, 24);
864
865 addr += kvmppc_get_gpr(vcpu, ax_rb);
866 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
867
868 if (emulated == EMULATE_DONE)
869 kvmppc_set_gpr(vcpu, ax_ra, addr);
870 break;
871 }
872 }
873 /* A form */
874 switch (inst_get_field(inst, 26, 30)) {
875 case OP_4A_PS_SUM1:
876 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
877 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
878 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
879 break;
880 case OP_4A_PS_SUM0:
881 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
882 ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
883 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
884 break;
885 case OP_4A_PS_MULS0:
886 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
887 ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
888 break;
889 case OP_4A_PS_MULS1:
890 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
891 ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
892 break;
893 case OP_4A_PS_MADDS0:
894 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
895 ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
896 break;
897 case OP_4A_PS_MADDS1:
898 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
899 ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
900 break;
901 case OP_4A_PS_DIV:
902 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
903 ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
904 break;
905 case OP_4A_PS_SUB:
906 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
907 ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
908 break;
909 case OP_4A_PS_ADD:
910 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
911 ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
912 break;
913 case OP_4A_PS_SEL:
914 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
915 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
916 break;
917 case OP_4A_PS_RES:
918 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
919 ax_rb, fps_fres);
920 break;
921 case OP_4A_PS_MUL:
922 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
923 ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
924 break;
925 case OP_4A_PS_RSQRTE:
926 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
927 ax_rb, fps_frsqrte);
928 break;
929 case OP_4A_PS_MSUB:
930 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
931 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
932 break;
933 case OP_4A_PS_MADD:
934 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
935 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
936 break;
937 case OP_4A_PS_NMSUB:
938 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
939 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
940 break;
941 case OP_4A_PS_NMADD:
942 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
943 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
944 break;
945 }
946 break;
947
948 /* Real FPU operations */
949
950 case OP_LFS:
951 {
952 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
953
954 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
955 FPU_LS_SINGLE);
956 break;
957 }
958 case OP_LFSU:
959 {
960 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
961
962 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
963 FPU_LS_SINGLE);
964
965 if (emulated == EMULATE_DONE)
966 kvmppc_set_gpr(vcpu, ax_ra, addr);
967 break;
968 }
969 case OP_LFD:
970 {
971 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
972
973 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
974 FPU_LS_DOUBLE);
975 break;
976 }
977 case OP_LFDU:
978 {
979 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
980
981 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
982 FPU_LS_DOUBLE);
983
984 if (emulated == EMULATE_DONE)
985 kvmppc_set_gpr(vcpu, ax_ra, addr);
986 break;
987 }
988 case OP_STFS:
989 {
990 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
991
992 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
993 FPU_LS_SINGLE);
994 break;
995 }
996 case OP_STFSU:
997 {
998 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
999
1000 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1001 FPU_LS_SINGLE);
1002
1003 if (emulated == EMULATE_DONE)
1004 kvmppc_set_gpr(vcpu, ax_ra, addr);
1005 break;
1006 }
1007 case OP_STFD:
1008 {
1009 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
1010
1011 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1012 FPU_LS_DOUBLE);
1013 break;
1014 }
1015 case OP_STFDU:
1016 {
1017 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
1018
1019 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1020 FPU_LS_DOUBLE);
1021
1022 if (emulated == EMULATE_DONE)
1023 kvmppc_set_gpr(vcpu, ax_ra, addr);
1024 break;
1025 }
1026 case 31:
1027 switch (inst_get_field(inst, 21, 30)) {
1028 case OP_31_LFSX:
1029 {
1030 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
1031
1032 addr += kvmppc_get_gpr(vcpu, ax_rb);
1033 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1034 addr, FPU_LS_SINGLE);
1035 break;
1036 }
1037 case OP_31_LFSUX:
1038 {
1039 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1040 kvmppc_get_gpr(vcpu, ax_rb);
1041
1042 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1043 addr, FPU_LS_SINGLE);
1044
1045 if (emulated == EMULATE_DONE)
1046 kvmppc_set_gpr(vcpu, ax_ra, addr);
1047 break;
1048 }
1049 case OP_31_LFDX:
1050 {
1051 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1052 kvmppc_get_gpr(vcpu, ax_rb);
1053
1054 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1055 addr, FPU_LS_DOUBLE);
1056 break;
1057 }
1058 case OP_31_LFDUX:
1059 {
1060 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1061 kvmppc_get_gpr(vcpu, ax_rb);
1062
1063 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1064 addr, FPU_LS_DOUBLE);
1065
1066 if (emulated == EMULATE_DONE)
1067 kvmppc_set_gpr(vcpu, ax_ra, addr);
1068 break;
1069 }
1070 case OP_31_STFSX:
1071 {
1072 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1073 kvmppc_get_gpr(vcpu, ax_rb);
1074
1075 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1076 addr, FPU_LS_SINGLE);
1077 break;
1078 }
1079 case OP_31_STFSUX:
1080 {
1081 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1082 kvmppc_get_gpr(vcpu, ax_rb);
1083
1084 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1085 addr, FPU_LS_SINGLE);
1086
1087 if (emulated == EMULATE_DONE)
1088 kvmppc_set_gpr(vcpu, ax_ra, addr);
1089 break;
1090 }
1091 case OP_31_STFX:
1092 {
1093 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1094 kvmppc_get_gpr(vcpu, ax_rb);
1095
1096 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1097 addr, FPU_LS_DOUBLE);
1098 break;
1099 }
1100 case OP_31_STFUX:
1101 {
1102 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1103 kvmppc_get_gpr(vcpu, ax_rb);
1104
1105 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1106 addr, FPU_LS_DOUBLE);
1107
1108 if (emulated == EMULATE_DONE)
1109 kvmppc_set_gpr(vcpu, ax_ra, addr);
1110 break;
1111 }
1112 case OP_31_STFIWX:
1113 {
1114 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1115 kvmppc_get_gpr(vcpu, ax_rb);
1116
1117 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1118 addr,
1119 FPU_LS_SINGLE_LOW);
1120 break;
1121 }
1122 break;
1123 }
1124 break;
1125 case 59:
1126 switch (inst_get_field(inst, 21, 30)) {
1127 case OP_59_FADDS:
1128 fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1129 kvmppc_sync_qpr(vcpu, ax_rd);
1130 break;
1131 case OP_59_FSUBS:
1132 fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1133 kvmppc_sync_qpr(vcpu, ax_rd);
1134 break;
1135 case OP_59_FDIVS:
1136 fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1137 kvmppc_sync_qpr(vcpu, ax_rd);
1138 break;
1139 case OP_59_FRES:
1140 fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1141 kvmppc_sync_qpr(vcpu, ax_rd);
1142 break;
1143 case OP_59_FRSQRTES:
1144 fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1145 kvmppc_sync_qpr(vcpu, ax_rd);
1146 break;
1147 }
1148 switch (inst_get_field(inst, 26, 30)) {
1149 case OP_59_FMULS:
1150 fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1151 kvmppc_sync_qpr(vcpu, ax_rd);
1152 break;
1153 case OP_59_FMSUBS:
1154 fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1155 kvmppc_sync_qpr(vcpu, ax_rd);
1156 break;
1157 case OP_59_FMADDS:
1158 fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1159 kvmppc_sync_qpr(vcpu, ax_rd);
1160 break;
1161 case OP_59_FNMSUBS:
1162 fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1163 kvmppc_sync_qpr(vcpu, ax_rd);
1164 break;
1165 case OP_59_FNMADDS:
1166 fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1167 kvmppc_sync_qpr(vcpu, ax_rd);
1168 break;
1169 }
1170 break;
1171 case 63:
1172 switch (inst_get_field(inst, 21, 30)) {
1173 case OP_63_MTFSB0:
1174 case OP_63_MTFSB1:
1175 case OP_63_MCRFS:
1176 case OP_63_MTFSFI:
1177 /* XXX need to implement */
1178 break;
1179 case OP_63_MFFS:
1180 /* XXX missing CR */
1181 *fpr_d = vcpu->arch.fpscr;
1182 break;
1183 case OP_63_MTFSF:
1184 /* XXX missing fm bits */
1185 /* XXX missing CR */
1186 vcpu->arch.fpscr = *fpr_b;
1187 break;
1188 case OP_63_FCMPU:
1189 {
1190 u32 tmp_cr;
1191 u32 cr0_mask = 0xf0000000;
1192 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1193
1194 fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1195 cr &= ~(cr0_mask >> cr_shift);
1196 cr |= (cr & cr0_mask) >> cr_shift;
1197 break;
1198 }
1199 case OP_63_FCMPO:
1200 {
1201 u32 tmp_cr;
1202 u32 cr0_mask = 0xf0000000;
1203 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1204
1205 fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1206 cr &= ~(cr0_mask >> cr_shift);
1207 cr |= (cr & cr0_mask) >> cr_shift;
1208 break;
1209 }
1210 case OP_63_FNEG:
1211 fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1212 break;
1213 case OP_63_FMR:
1214 *fpr_d = *fpr_b;
1215 break;
1216 case OP_63_FABS:
1217 fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1218 break;
1219 case OP_63_FCPSGN:
1220 fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1221 break;
1222 case OP_63_FDIV:
1223 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1224 break;
1225 case OP_63_FADD:
1226 fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1227 break;
1228 case OP_63_FSUB:
1229 fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1230 break;
1231 case OP_63_FCTIW:
1232 fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1233 break;
1234 case OP_63_FCTIWZ:
1235 fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1236 break;
1237 case OP_63_FRSP:
1238 fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1239 kvmppc_sync_qpr(vcpu, ax_rd);
1240 break;
1241 case OP_63_FRSQRTE:
1242 {
1243 double one = 1.0f;
1244
1245 /* fD = sqrt(fB) */
1246 fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1247 /* fD = 1.0f / fD */
1248 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1249 break;
1250 }
1251 }
1252 switch (inst_get_field(inst, 26, 30)) {
1253 case OP_63_FMUL:
1254 fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1255 break;
1256 case OP_63_FSEL:
1257 fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1258 break;
1259 case OP_63_FMSUB:
1260 fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1261 break;
1262 case OP_63_FMADD:
1263 fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1264 break;
1265 case OP_63_FNMSUB:
1266 fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1267 break;
1268 case OP_63_FNMADD:
1269 fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1270 break;
1271 }
1272 break;
1273 }
1274
1275#ifdef DEBUG
1276 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
1277 u32 f;
1278 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
1279 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1280 }
1281#endif
1282
1283 if (rcomp)
1284 kvmppc_set_cr(vcpu, cr);
1285
1286 preempt_enable();
1287
1288 return emulated;
1289}
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index c83c60ad96c5..506d5c316c96 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -22,7 +22,10 @@
22#include <asm/reg.h> 22#include <asm/reg.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
25
26#ifdef CONFIG_PPC_BOOK3S_64
25#include <asm/exception-64s.h> 27#include <asm/exception-64s.h>
28#endif
26 29
27/***************************************************************************** 30/*****************************************************************************
28 * * 31 * *
@@ -30,6 +33,39 @@
30 * * 33 * *
31 ****************************************************************************/ 34 ****************************************************************************/
32 35
36#if defined(CONFIG_PPC_BOOK3S_64)
37
38#define LOAD_SHADOW_VCPU(reg) \
39 mfspr reg, SPRN_SPRG_PACA
40
41#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
42#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
43#define FUNC(name) GLUE(.,name)
44
45#elif defined(CONFIG_PPC_BOOK3S_32)
46
47#define LOAD_SHADOW_VCPU(reg) \
48 mfspr reg, SPRN_SPRG_THREAD; \
49 lwz reg, THREAD_KVM_SVCPU(reg); \
50 /* PPC32 can have a NULL pointer - let's check for that */ \
51 mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
52 mfcr r12; \
53 cmpwi reg, 0; \
54 bne 1f; \
55 mfspr reg, SPRN_SPRG_SCRATCH0; \
56 mtcr r12; \
57 mfspr r12, SPRN_SPRG_SCRATCH1; \
58 b kvmppc_resume_\intno; \
591:; \
60 mtcr r12; \
61 mfspr r12, SPRN_SPRG_SCRATCH1; \
62 tophys(reg, reg)
63
64#define SHADOW_VCPU_OFF 0
65#define MSR_NOIRQ MSR_KERNEL
66#define FUNC(name) name
67
68#endif
33 69
34.macro INTERRUPT_TRAMPOLINE intno 70.macro INTERRUPT_TRAMPOLINE intno
35 71
@@ -42,19 +78,19 @@ kvmppc_trampoline_\intno:
42 * First thing to do is to find out if we're coming 78 * First thing to do is to find out if we're coming
43 * from a KVM guest or a Linux process. 79 * from a KVM guest or a Linux process.
44 * 80 *
45 * To distinguish, we check a magic byte in the PACA 81 * To distinguish, we check a magic byte in the PACA/current
46 */ 82 */
47 mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ 83 LOAD_SHADOW_VCPU(r13)
48 std r12, PACA_KVM_SCRATCH0(r13) 84 PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
49 mfcr r12 85 mfcr r12
50 stw r12, PACA_KVM_SCRATCH1(r13) 86 stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
51 lbz r12, PACA_KVM_IN_GUEST(r13) 87 lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
52 cmpwi r12, KVM_GUEST_MODE_NONE 88 cmpwi r12, KVM_GUEST_MODE_NONE
53 bne ..kvmppc_handler_hasmagic_\intno 89 bne ..kvmppc_handler_hasmagic_\intno
54 /* No KVM guest? Then jump back to the Linux handler! */ 90 /* No KVM guest? Then jump back to the Linux handler! */
55 lwz r12, PACA_KVM_SCRATCH1(r13) 91 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
56 mtcr r12 92 mtcr r12
57 ld r12, PACA_KVM_SCRATCH0(r13) 93 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
58 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ 94 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
59 b kvmppc_resume_\intno /* Get back original handler */ 95 b kvmppc_resume_\intno /* Get back original handler */
60 96
@@ -76,9 +112,7 @@ kvmppc_trampoline_\intno:
76INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET 112INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
77INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK 113INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
78INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE 114INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
79INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
80INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE 115INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
81INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
82INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL 116INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
83INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT 117INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
84INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM 118INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
@@ -88,7 +122,14 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
88INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE 122INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
89INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON 123INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
90INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC 124INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
125
126/* Those are only available on 64 bit machines */
127
128#ifdef CONFIG_PPC_BOOK3S_64
129INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
130INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
91INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX 131INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
132#endif
92 133
93/* 134/*
94 * Bring us back to the faulting code, but skip the 135 * Bring us back to the faulting code, but skip the
@@ -99,11 +140,11 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
99 * 140 *
100 * Input Registers: 141 * Input Registers:
101 * 142 *
102 * R12 = free 143 * R12 = free
103 * R13 = PACA 144 * R13 = Shadow VCPU (PACA)
104 * PACA.KVM.SCRATCH0 = guest R12 145 * SVCPU.SCRATCH0 = guest R12
105 * PACA.KVM.SCRATCH1 = guest CR 146 * SVCPU.SCRATCH1 = guest CR
106 * SPRG_SCRATCH0 = guest R13 147 * SPRG_SCRATCH0 = guest R13
107 * 148 *
108 */ 149 */
109kvmppc_handler_skip_ins: 150kvmppc_handler_skip_ins:
@@ -114,9 +155,9 @@ kvmppc_handler_skip_ins:
114 mtsrr0 r12 155 mtsrr0 r12
115 156
116 /* Clean up all state */ 157 /* Clean up all state */
117 lwz r12, PACA_KVM_SCRATCH1(r13) 158 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
118 mtcr r12 159 mtcr r12
119 ld r12, PACA_KVM_SCRATCH0(r13) 160 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
120 mfspr r13, SPRN_SPRG_SCRATCH0 161 mfspr r13, SPRN_SPRG_SCRATCH0
121 162
122 /* And get back into the code */ 163 /* And get back into the code */
@@ -147,41 +188,48 @@ kvmppc_handler_lowmem_trampoline_end:
147 * 188 *
148 * R3 = function 189 * R3 = function
149 * R4 = MSR 190 * R4 = MSR
150 * R5 = CTR 191 * R5 = scratch register
151 * 192 *
152 */ 193 */
153_GLOBAL(kvmppc_rmcall) 194_GLOBAL(kvmppc_rmcall)
154 mtmsr r4 /* Disable relocation, so mtsrr 195 LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
196 mtmsr r5 /* Disable relocation and interrupts, so mtsrr
155 doesn't get interrupted */ 197 doesn't get interrupted */
156 mtctr r5 198 sync
157 mtsrr0 r3 199 mtsrr0 r3
158 mtsrr1 r4 200 mtsrr1 r4
159 RFI 201 RFI
160 202
203#if defined(CONFIG_PPC_BOOK3S_32)
204#define STACK_LR INT_FRAME_SIZE+4
205#elif defined(CONFIG_PPC_BOOK3S_64)
206#define STACK_LR _LINK
207#endif
208
161/* 209/*
162 * Activate current's external feature (FPU/Altivec/VSX) 210 * Activate current's external feature (FPU/Altivec/VSX)
163 */ 211 */
164#define define_load_up(what) \ 212#define define_load_up(what) \
165 \ 213 \
166_GLOBAL(kvmppc_load_up_ ## what); \ 214_GLOBAL(kvmppc_load_up_ ## what); \
167 subi r1, r1, INT_FRAME_SIZE; \ 215 PPC_STLU r1, -INT_FRAME_SIZE(r1); \
168 mflr r3; \ 216 mflr r3; \
169 std r3, _LINK(r1); \ 217 PPC_STL r3, STACK_LR(r1); \
170 mfmsr r4; \ 218 PPC_STL r20, _NIP(r1); \
171 std r31, GPR3(r1); \ 219 mfmsr r20; \
172 mr r31, r4; \ 220 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
173 li r5, MSR_DR; \ 221 andc r3,r20,r3; /* Disable DR,EE */ \
174 oris r5, r5, MSR_EE@h; \ 222 mtmsr r3; \
175 andc r4, r4, r5; \ 223 sync; \
176 mtmsr r4; \ 224 \
177 \ 225 bl FUNC(load_up_ ## what); \
178 bl .load_up_ ## what; \ 226 \
179 \ 227 mtmsr r20; /* Enable DR,EE */ \
180 mtmsr r31; \ 228 sync; \
181 ld r3, _LINK(r1); \ 229 PPC_LL r3, STACK_LR(r1); \
182 ld r31, GPR3(r1); \ 230 PPC_LL r20, _NIP(r1); \
183 addi r1, r1, INT_FRAME_SIZE; \ 231 mtlr r3; \
184 mtlr r3; \ 232 addi r1, r1, INT_FRAME_SIZE; \
185 blr 233 blr
186 234
187define_load_up(fpu) 235define_load_up(fpu)
@@ -194,11 +242,10 @@ define_load_up(vsx)
194 242
195.global kvmppc_trampoline_lowmem 243.global kvmppc_trampoline_lowmem
196kvmppc_trampoline_lowmem: 244kvmppc_trampoline_lowmem:
197 .long kvmppc_handler_lowmem_trampoline - _stext 245 .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
198 246
199.global kvmppc_trampoline_enter 247.global kvmppc_trampoline_enter
200kvmppc_trampoline_enter: 248kvmppc_trampoline_enter:
201 .long kvmppc_handler_trampoline_enter - _stext 249 .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
202
203#include "book3s_64_slb.S"
204 250
251#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
new file mode 100644
index 000000000000..7c52ed0b7051
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -0,0 +1,259 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20/* Real mode helpers */
21
22#if defined(CONFIG_PPC_BOOK3S_64)
23
24#define GET_SHADOW_VCPU(reg) \
25 addi reg, r13, PACA_KVM_SVCPU
26
27#elif defined(CONFIG_PPC_BOOK3S_32)
28
29#define GET_SHADOW_VCPU(reg) \
30 tophys(reg, r2); \
31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
32 tophys(reg, reg)
33
34#endif
35
36/* Disable for nested KVM */
37#define USE_QUICK_LAST_INST
38
39
40/* Get helper functions for subarch specific functionality */
41
42#if defined(CONFIG_PPC_BOOK3S_64)
43#include "book3s_64_slb.S"
44#elif defined(CONFIG_PPC_BOOK3S_32)
45#include "book3s_32_sr.S"
46#endif
47
48/******************************************************************************
49 * *
50 * Entry code *
51 * *
52 *****************************************************************************/
53
54.global kvmppc_handler_trampoline_enter
55kvmppc_handler_trampoline_enter:
56
57 /* Required state:
58 *
59 * MSR = ~IR|DR
60 * R13 = PACA
61 * R1 = host R1
62 * R2 = host R2
63 * R10 = guest MSR
64 * all other volatile GPRS = free
65 * SVCPU[CR] = guest CR
66 * SVCPU[XER] = guest XER
67 * SVCPU[CTR] = guest CTR
68 * SVCPU[LR] = guest LR
69 */
70
71 /* r3 = shadow vcpu */
72 GET_SHADOW_VCPU(r3)
73
74 /* Move SRR0 and SRR1 into the respective regs */
75 PPC_LL r9, SVCPU_PC(r3)
76 mtsrr0 r9
77 mtsrr1 r10
78
79 /* Activate guest mode, so faults get handled by KVM */
80 li r11, KVM_GUEST_MODE_GUEST
81 stb r11, SVCPU_IN_GUEST(r3)
82
83 /* Switch to guest segment. This is subarch specific. */
84 LOAD_GUEST_SEGMENTS
85
86 /* Enter guest */
87
88 PPC_LL r4, (SVCPU_CTR)(r3)
89 PPC_LL r5, (SVCPU_LR)(r3)
90 lwz r6, (SVCPU_CR)(r3)
91 lwz r7, (SVCPU_XER)(r3)
92
93 mtctr r4
94 mtlr r5
95 mtcr r6
96 mtxer r7
97
98 PPC_LL r0, (SVCPU_R0)(r3)
99 PPC_LL r1, (SVCPU_R1)(r3)
100 PPC_LL r2, (SVCPU_R2)(r3)
101 PPC_LL r4, (SVCPU_R4)(r3)
102 PPC_LL r5, (SVCPU_R5)(r3)
103 PPC_LL r6, (SVCPU_R6)(r3)
104 PPC_LL r7, (SVCPU_R7)(r3)
105 PPC_LL r8, (SVCPU_R8)(r3)
106 PPC_LL r9, (SVCPU_R9)(r3)
107 PPC_LL r10, (SVCPU_R10)(r3)
108 PPC_LL r11, (SVCPU_R11)(r3)
109 PPC_LL r12, (SVCPU_R12)(r3)
110 PPC_LL r13, (SVCPU_R13)(r3)
111
112 PPC_LL r3, (SVCPU_R3)(r3)
113
114 RFI
115kvmppc_handler_trampoline_enter_end:
116
117
118
119/******************************************************************************
120 * *
121 * Exit code *
122 * *
123 *****************************************************************************/
124
125.global kvmppc_handler_trampoline_exit
126kvmppc_handler_trampoline_exit:
127
128 /* Register usage at this point:
129 *
130 * SPRG_SCRATCH0 = guest R13
131 * R12 = exit handler id
132 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
133 * SVCPU.SCRATCH0 = guest R12
134 * SVCPU.SCRATCH1 = guest CR
135 *
136 */
137
138 /* Save registers */
139
140 PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
141 PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
142 PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
143 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
144 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
145 PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
146 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
147 PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
148 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
149 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
150 PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
151 PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
152
153 /* Restore R1/R2 so we can handle faults */
154 PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
155 PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
156
157 /* Save guest PC and MSR */
158 mfsrr0 r3
159 mfsrr1 r4
160
161 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
162 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
163
164 /* Get scratch'ed off registers */
165 mfspr r9, SPRN_SPRG_SCRATCH0
166 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
167 lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
168
169 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
170 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
171 stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
172
173 /* Save more register state */
174
175 mfxer r5
176 mfdar r6
177 mfdsisr r7
178 mfctr r8
179 mflr r9
180
181 stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
182 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
183 stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
184 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
185 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
186
187 /*
188 * In order for us to easily get the last instruction,
189 * we got the #vmexit at, we exploit the fact that the
190 * virtual layout is still the same here, so we can just
191 * ld from the guest's PC address
192 */
193
194 /* We only load the last instruction when it's safe */
195 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
196 beq ld_last_inst
197 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
198 beq ld_last_inst
199 cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
200 beq- ld_last_inst
201
202 b no_ld_last_inst
203
204ld_last_inst:
205 /* Save off the guest instruction we're at */
206
207 /* In case lwz faults */
208 li r0, KVM_INST_FETCH_FAILED
209
210#ifdef USE_QUICK_LAST_INST
211
212 /* Set guest mode to 'jump over instruction' so if lwz faults
213 * we'll just continue at the next IP. */
214 li r9, KVM_GUEST_MODE_SKIP
215 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
216
217 /* 1) enable paging for data */
218 mfmsr r9
219 ori r11, r9, MSR_DR /* Enable paging for data */
220 mtmsr r11
221 sync
222 /* 2) fetch the instruction */
223 lwz r0, 0(r3)
224 /* 3) disable paging again */
225 mtmsr r9
226 sync
227
228#endif
229 stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
230
231no_ld_last_inst:
232
233 /* Unset guest mode */
234 li r9, KVM_GUEST_MODE_NONE
235 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
236
237 /* Switch back to host MMU */
238 LOAD_HOST_SEGMENTS
239
240 /* Register usage at this point:
241 *
242 * R1 = host R1
243 * R2 = host R2
244 * R12 = exit handler id
245 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
246 * SVCPU.* = guest *
247 *
248 */
249
250 /* RFI into the highmem handler */
251 mfmsr r7
252 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
253 mtsrr1 r7
254 /* Load highmem handler address */
255 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
256 mtsrr0 r8
257
258 RFI
259kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 2a3a1953d4bd..a33ab8cc2ccc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -133,6 +133,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
133 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); 133 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
134} 134}
135 135
136void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
137 struct kvm_interrupt *irq)
138{
139 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
140}
141
136/* Deliver the interrupt of the corresponding priority, if possible. */ 142/* Deliver the interrupt of the corresponding priority, if possible. */
137static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 143static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
138 unsigned int priority) 144 unsigned int priority)
@@ -479,6 +485,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
479{ 485{
480 int i; 486 int i;
481 487
488 vcpu_load(vcpu);
489
482 regs->pc = vcpu->arch.pc; 490 regs->pc = vcpu->arch.pc;
483 regs->cr = kvmppc_get_cr(vcpu); 491 regs->cr = kvmppc_get_cr(vcpu);
484 regs->ctr = vcpu->arch.ctr; 492 regs->ctr = vcpu->arch.ctr;
@@ -499,6 +507,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
499 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 507 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
500 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 508 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
501 509
510 vcpu_put(vcpu);
511
502 return 0; 512 return 0;
503} 513}
504 514
@@ -506,6 +516,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
506{ 516{
507 int i; 517 int i;
508 518
519 vcpu_load(vcpu);
520
509 vcpu->arch.pc = regs->pc; 521 vcpu->arch.pc = regs->pc;
510 kvmppc_set_cr(vcpu, regs->cr); 522 kvmppc_set_cr(vcpu, regs->cr);
511 vcpu->arch.ctr = regs->ctr; 523 vcpu->arch.ctr = regs->ctr;
@@ -525,6 +537,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
525 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 537 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
526 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 538 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
527 539
540 vcpu_put(vcpu);
541
528 return 0; 542 return 0;
529} 543}
530 544
@@ -553,7 +567,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
553int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 567int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
554 struct kvm_translation *tr) 568 struct kvm_translation *tr)
555{ 569{
556 return kvmppc_core_vcpu_translate(vcpu, tr); 570 int r;
571
572 vcpu_load(vcpu);
573 r = kvmppc_core_vcpu_translate(vcpu, tr);
574 vcpu_put(vcpu);
575 return r;
557} 576}
558 577
559int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 578int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 669a5c5fc7d7..e8a00b0c4449 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -161,10 +161,10 @@ static int __init kvmppc_e500_init(void)
161 flush_icache_range(kvmppc_booke_handlers, 161 flush_icache_range(kvmppc_booke_handlers,
162 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); 162 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
163 163
164 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE); 164 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
165} 165}
166 166
167static void __init kvmppc_e500_exit(void) 167static void __exit kvmppc_e500_exit(void)
168{ 168{
169 kvmppc_booke_exit(); 169 kvmppc_booke_exit();
170} 170}
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index cb72a65f4ecc..4568ec386c2a 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -38,10 +38,12 @@
38#define OP_31_XOP_LBZX 87 38#define OP_31_XOP_LBZX 87
39#define OP_31_XOP_STWX 151 39#define OP_31_XOP_STWX 151
40#define OP_31_XOP_STBX 215 40#define OP_31_XOP_STBX 215
41#define OP_31_XOP_LBZUX 119
41#define OP_31_XOP_STBUX 247 42#define OP_31_XOP_STBUX 247
42#define OP_31_XOP_LHZX 279 43#define OP_31_XOP_LHZX 279
43#define OP_31_XOP_LHZUX 311 44#define OP_31_XOP_LHZUX 311
44#define OP_31_XOP_MFSPR 339 45#define OP_31_XOP_MFSPR 339
46#define OP_31_XOP_LHAX 343
45#define OP_31_XOP_STHX 407 47#define OP_31_XOP_STHX 407
46#define OP_31_XOP_STHUX 439 48#define OP_31_XOP_STHUX 439
47#define OP_31_XOP_MTSPR 467 49#define OP_31_XOP_MTSPR 467
@@ -62,10 +64,12 @@
62#define OP_STBU 39 64#define OP_STBU 39
63#define OP_LHZ 40 65#define OP_LHZ 40
64#define OP_LHZU 41 66#define OP_LHZU 41
67#define OP_LHA 42
68#define OP_LHAU 43
65#define OP_STH 44 69#define OP_STH 44
66#define OP_STHU 45 70#define OP_STHU 45
67 71
68#ifdef CONFIG_PPC64 72#ifdef CONFIG_PPC_BOOK3S
69static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 73static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
70{ 74{
71 return 1; 75 return 1;
@@ -82,7 +86,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
82 unsigned long dec_nsec; 86 unsigned long dec_nsec;
83 87
84 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 88 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85#ifdef CONFIG_PPC64 89#ifdef CONFIG_PPC_BOOK3S
86 /* mtdec lowers the interrupt line when positive. */ 90 /* mtdec lowers the interrupt line when positive. */
87 kvmppc_core_dequeue_dec(vcpu); 91 kvmppc_core_dequeue_dec(vcpu);
88 92
@@ -128,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
128 * from opcode tables in the future. */ 132 * from opcode tables in the future. */
129int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 133int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
130{ 134{
131 u32 inst = vcpu->arch.last_inst; 135 u32 inst = kvmppc_get_last_inst(vcpu);
132 u32 ea; 136 u32 ea;
133 int ra; 137 int ra;
134 int rb; 138 int rb;
@@ -143,13 +147,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
143 147
144 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 148 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
145 149
146 /* Try again next time */
147 if (inst == KVM_INST_FETCH_FAILED)
148 return EMULATE_DONE;
149
150 switch (get_op(inst)) { 150 switch (get_op(inst)) {
151 case OP_TRAP: 151 case OP_TRAP:
152#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC_BOOK3S
153 case OP_TRAP_64: 153 case OP_TRAP_64:
154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
155#else 155#else
@@ -171,6 +171,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
171 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 171 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
172 break; 172 break;
173 173
174 case OP_31_XOP_LBZUX:
175 rt = get_rt(inst);
176 ra = get_ra(inst);
177 rb = get_rb(inst);
178
179 ea = kvmppc_get_gpr(vcpu, rb);
180 if (ra)
181 ea += kvmppc_get_gpr(vcpu, ra);
182
183 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
184 kvmppc_set_gpr(vcpu, ra, ea);
185 break;
186
174 case OP_31_XOP_STWX: 187 case OP_31_XOP_STWX:
175 rs = get_rs(inst); 188 rs = get_rs(inst);
176 emulated = kvmppc_handle_store(run, vcpu, 189 emulated = kvmppc_handle_store(run, vcpu,
@@ -200,6 +213,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
200 kvmppc_set_gpr(vcpu, rs, ea); 213 kvmppc_set_gpr(vcpu, rs, ea);
201 break; 214 break;
202 215
216 case OP_31_XOP_LHAX:
217 rt = get_rt(inst);
218 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
219 break;
220
203 case OP_31_XOP_LHZX: 221 case OP_31_XOP_LHZX:
204 rt = get_rt(inst); 222 rt = get_rt(inst);
205 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 223 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
@@ -450,6 +468,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 468 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
451 break; 469 break;
452 470
471 case OP_LHA:
472 rt = get_rt(inst);
473 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
474 break;
475
476 case OP_LHAU:
477 ra = get_ra(inst);
478 rt = get_rt(inst);
479 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
480 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
481 break;
482
453 case OP_STH: 483 case OP_STH:
454 rs = get_rs(inst); 484 rs = get_rs(inst);
455 emulated = kvmppc_handle_store(run, vcpu, 485 emulated = kvmppc_handle_store(run, vcpu,
@@ -472,7 +502,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
472 502
473 if (emulated == EMULATE_FAIL) { 503 if (emulated == EMULATE_FAIL) {
474 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 504 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
475 if (emulated == EMULATE_FAIL) { 505 if (emulated == EMULATE_AGAIN) {
506 advance = 0;
507 } else if (emulated == EMULATE_FAIL) {
476 advance = 0; 508 advance = 0;
477 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 509 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
478 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 510 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
@@ -480,10 +512,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
480 } 512 }
481 } 513 }
482 514
483 trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); 515 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
484 516
517 /* Advance past emulated instruction. */
485 if (advance) 518 if (advance)
486 vcpu->arch.pc += 4; /* Advance past emulated instruction. */ 519 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
487 520
488 return emulated; 521 return emulated;
489} 522}
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
new file mode 100644
index 000000000000..2b340a3eee90
--- /dev/null
+++ b/arch/powerpc/kvm/fpu.S
@@ -0,0 +1,273 @@
1/*
2 * FPU helper code to use FPU operations from inside the kernel
3 *
4 * Copyright (C) 2010 Alexander Graf (agraf@suse.de)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <asm/reg.h>
14#include <asm/page.h>
15#include <asm/mmu.h>
16#include <asm/pgtable.h>
17#include <asm/cputable.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20#include <asm/ppc_asm.h>
21#include <asm/asm-offsets.h>
22
23/* Instructions operating on single parameters */
24
25/*
26 * Single operation with one input operand
27 *
28 * R3 = (double*)&fpscr
29 * R4 = (short*)&result
30 * R5 = (short*)&param1
31 */
32#define FPS_ONE_IN(name) \
33_GLOBAL(fps_ ## name); \
34 lfd 0,0(r3); /* load up fpscr value */ \
35 MTFSF_L(0); \
36 lfs 0,0(r5); \
37 \
38 name 0,0; \
39 \
40 stfs 0,0(r4); \
41 mffs 0; \
42 stfd 0,0(r3); /* save new fpscr value */ \
43 blr
44
45/*
46 * Single operation with two input operands
47 *
48 * R3 = (double*)&fpscr
49 * R4 = (short*)&result
50 * R5 = (short*)&param1
51 * R6 = (short*)&param2
52 */
53#define FPS_TWO_IN(name) \
54_GLOBAL(fps_ ## name); \
55 lfd 0,0(r3); /* load up fpscr value */ \
56 MTFSF_L(0); \
57 lfs 0,0(r5); \
58 lfs 1,0(r6); \
59 \
60 name 0,0,1; \
61 \
62 stfs 0,0(r4); \
63 mffs 0; \
64 stfd 0,0(r3); /* save new fpscr value */ \
65 blr
66
67/*
68 * Single operation with three input operands
69 *
70 * R3 = (double*)&fpscr
71 * R4 = (short*)&result
72 * R5 = (short*)&param1
73 * R6 = (short*)&param2
74 * R7 = (short*)&param3
75 */
76#define FPS_THREE_IN(name) \
77_GLOBAL(fps_ ## name); \
78 lfd 0,0(r3); /* load up fpscr value */ \
79 MTFSF_L(0); \
80 lfs 0,0(r5); \
81 lfs 1,0(r6); \
82 lfs 2,0(r7); \
83 \
84 name 0,0,1,2; \
85 \
86 stfs 0,0(r4); \
87 mffs 0; \
88 stfd 0,0(r3); /* save new fpscr value */ \
89 blr
90
91FPS_ONE_IN(fres)
92FPS_ONE_IN(frsqrte)
93FPS_ONE_IN(fsqrts)
94FPS_TWO_IN(fadds)
95FPS_TWO_IN(fdivs)
96FPS_TWO_IN(fmuls)
97FPS_TWO_IN(fsubs)
98FPS_THREE_IN(fmadds)
99FPS_THREE_IN(fmsubs)
100FPS_THREE_IN(fnmadds)
101FPS_THREE_IN(fnmsubs)
102FPS_THREE_IN(fsel)
103
104
105/* Instructions operating on double parameters */
106
107/*
108 * Beginning of double instruction processing
109 *
110 * R3 = (double*)&fpscr
111 * R4 = (u32*)&cr
112 * R5 = (double*)&result
113 * R6 = (double*)&param1
114 * R7 = (double*)&param2 [load_two]
115 * R8 = (double*)&param3 [load_three]
116 * LR = instruction call function
117 */
118fpd_load_three:
119 lfd 2,0(r8) /* load param3 */
120fpd_load_two:
121 lfd 1,0(r7) /* load param2 */
122fpd_load_one:
123 lfd 0,0(r6) /* load param1 */
124fpd_load_none:
125 lfd 3,0(r3) /* load up fpscr value */
126 MTFSF_L(3)
127 lwz r6, 0(r4) /* load cr */
128 mtcr r6
129 blr
130
131/*
132 * End of double instruction processing
133 *
134 * R3 = (double*)&fpscr
135 * R4 = (u32*)&cr
136 * R5 = (double*)&result
137 * LR = caller of instruction call function
138 */
139fpd_return:
140 mfcr r6
141 stfd 0,0(r5) /* save result */
142 mffs 0
143 stfd 0,0(r3) /* save new fpscr value */
144 stw r6,0(r4) /* save new cr value */
145 blr
146
147/*
148 * Double operation with no input operand
149 *
150 * R3 = (double*)&fpscr
151 * R4 = (u32*)&cr
152 * R5 = (double*)&result
153 */
154#define FPD_NONE_IN(name) \
155_GLOBAL(fpd_ ## name); \
156 mflr r12; \
157 bl fpd_load_none; \
158 mtlr r12; \
159 \
160 name. 0; /* call instruction */ \
161 b fpd_return
162
163/*
164 * Double operation with one input operand
165 *
166 * R3 = (double*)&fpscr
167 * R4 = (u32*)&cr
168 * R5 = (double*)&result
169 * R6 = (double*)&param1
170 */
171#define FPD_ONE_IN(name) \
172_GLOBAL(fpd_ ## name); \
173 mflr r12; \
174 bl fpd_load_one; \
175 mtlr r12; \
176 \
177 name. 0,0; /* call instruction */ \
178 b fpd_return
179
180/*
181 * Double operation with two input operands
182 *
183 * R3 = (double*)&fpscr
184 * R4 = (u32*)&cr
185 * R5 = (double*)&result
186 * R6 = (double*)&param1
187 * R7 = (double*)&param2
188 * R8 = (double*)&param3
189 */
190#define FPD_TWO_IN(name) \
191_GLOBAL(fpd_ ## name); \
192 mflr r12; \
193 bl fpd_load_two; \
194 mtlr r12; \
195 \
196 name. 0,0,1; /* call instruction */ \
197 b fpd_return
198
199/*
200 * CR Double operation with two input operands
201 *
202 * R3 = (double*)&fpscr
203 * R4 = (u32*)&cr
204 * R5 = (double*)&param1
205 * R6 = (double*)&param2
206 * R7 = (double*)&param3
207 */
208#define FPD_TWO_IN_CR(name) \
209_GLOBAL(fpd_ ## name); \
210 lfd 1,0(r6); /* load param2 */ \
211 lfd 0,0(r5); /* load param1 */ \
212 lfd 3,0(r3); /* load up fpscr value */ \
213 MTFSF_L(3); \
214 lwz r6, 0(r4); /* load cr */ \
215 mtcr r6; \
216 \
217 name 0,0,1; /* call instruction */ \
218 mfcr r6; \
219 mffs 0; \
220 stfd 0,0(r3); /* save new fpscr value */ \
221 stw r6,0(r4); /* save new cr value */ \
222 blr
223
224/*
225 * Double operation with three input operands
226 *
227 * R3 = (double*)&fpscr
228 * R4 = (u32*)&cr
229 * R5 = (double*)&result
230 * R6 = (double*)&param1
231 * R7 = (double*)&param2
232 * R8 = (double*)&param3
233 */
234#define FPD_THREE_IN(name) \
235_GLOBAL(fpd_ ## name); \
236 mflr r12; \
237 bl fpd_load_three; \
238 mtlr r12; \
239 \
240 name. 0,0,1,2; /* call instruction */ \
241 b fpd_return
242
243FPD_ONE_IN(fsqrts)
244FPD_ONE_IN(frsqrtes)
245FPD_ONE_IN(fres)
246FPD_ONE_IN(frsp)
247FPD_ONE_IN(fctiw)
248FPD_ONE_IN(fctiwz)
249FPD_ONE_IN(fsqrt)
250FPD_ONE_IN(fre)
251FPD_ONE_IN(frsqrte)
252FPD_ONE_IN(fneg)
253FPD_ONE_IN(fabs)
254FPD_TWO_IN(fadds)
255FPD_TWO_IN(fsubs)
256FPD_TWO_IN(fdivs)
257FPD_TWO_IN(fmuls)
258FPD_TWO_IN_CR(fcmpu)
259FPD_TWO_IN(fcpsgn)
260FPD_TWO_IN(fdiv)
261FPD_TWO_IN(fadd)
262FPD_TWO_IN(fmul)
263FPD_TWO_IN_CR(fcmpo)
264FPD_TWO_IN(fsub)
265FPD_THREE_IN(fmsubs)
266FPD_THREE_IN(fmadds)
267FPD_THREE_IN(fnmsubs)
268FPD_THREE_IN(fnmadds)
269FPD_THREE_IN(fsel)
270FPD_THREE_IN(fmsub)
271FPD_THREE_IN(fmadd)
272FPD_THREE_IN(fnmsub)
273FPD_THREE_IN(fnmadd)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 297fcd2ff7d0..9b8683f39e05 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
70 case EMULATE_FAIL: 70 case EMULATE_FAIL:
71 /* XXX Deliver Program interrupt to guest. */ 71 /* XXX Deliver Program interrupt to guest. */
72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
73 vcpu->arch.last_inst); 73 kvmppc_get_last_inst(vcpu));
74 r = RESUME_HOST; 74 r = RESUME_HOST;
75 break; 75 break;
76 default: 76 default:
@@ -148,6 +148,10 @@ int kvm_dev_ioctl_check_extension(long ext)
148 148
149 switch (ext) { 149 switch (ext) {
150 case KVM_CAP_PPC_SEGSTATE: 150 case KVM_CAP_PPC_SEGSTATE:
151 case KVM_CAP_PPC_PAIRED_SINGLES:
152 case KVM_CAP_PPC_UNSET_IRQ:
153 case KVM_CAP_ENABLE_CAP:
154 case KVM_CAP_PPC_OSI:
151 r = 1; 155 r = 1;
152 break; 156 break;
153 case KVM_CAP_COALESCED_MMIO: 157 case KVM_CAP_COALESCED_MMIO:
@@ -193,12 +197,17 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
193{ 197{
194 struct kvm_vcpu *vcpu; 198 struct kvm_vcpu *vcpu;
195 vcpu = kvmppc_core_vcpu_create(kvm, id); 199 vcpu = kvmppc_core_vcpu_create(kvm, id);
196 kvmppc_create_vcpu_debugfs(vcpu, id); 200 if (!IS_ERR(vcpu))
201 kvmppc_create_vcpu_debugfs(vcpu, id);
197 return vcpu; 202 return vcpu;
198} 203}
199 204
200void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 205void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
201{ 206{
207 /* Make sure we're not using the vcpu anymore */
208 hrtimer_cancel(&vcpu->arch.dec_timer);
209 tasklet_kill(&vcpu->arch.tasklet);
210
202 kvmppc_remove_vcpu_debugfs(vcpu); 211 kvmppc_remove_vcpu_debugfs(vcpu);
203 kvmppc_core_vcpu_free(vcpu); 212 kvmppc_core_vcpu_free(vcpu);
204} 213}
@@ -278,7 +287,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
278static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 287static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
279 struct kvm_run *run) 288 struct kvm_run *run)
280{ 289{
281 ulong gpr; 290 u64 gpr;
282 291
283 if (run->mmio.len > sizeof(gpr)) { 292 if (run->mmio.len > sizeof(gpr)) {
284 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 293 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -287,6 +296,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
287 296
288 if (vcpu->arch.mmio_is_bigendian) { 297 if (vcpu->arch.mmio_is_bigendian) {
289 switch (run->mmio.len) { 298 switch (run->mmio.len) {
299 case 8: gpr = *(u64 *)run->mmio.data; break;
290 case 4: gpr = *(u32 *)run->mmio.data; break; 300 case 4: gpr = *(u32 *)run->mmio.data; break;
291 case 2: gpr = *(u16 *)run->mmio.data; break; 301 case 2: gpr = *(u16 *)run->mmio.data; break;
292 case 1: gpr = *(u8 *)run->mmio.data; break; 302 case 1: gpr = *(u8 *)run->mmio.data; break;
@@ -300,7 +310,43 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
300 } 310 }
301 } 311 }
302 312
313 if (vcpu->arch.mmio_sign_extend) {
314 switch (run->mmio.len) {
315#ifdef CONFIG_PPC64
316 case 4:
317 gpr = (s64)(s32)gpr;
318 break;
319#endif
320 case 2:
321 gpr = (s64)(s16)gpr;
322 break;
323 case 1:
324 gpr = (s64)(s8)gpr;
325 break;
326 }
327 }
328
303 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 329 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
330
331 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
332 case KVM_REG_GPR:
333 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
334 break;
335 case KVM_REG_FPR:
336 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
337 break;
338#ifdef CONFIG_PPC_BOOK3S
339 case KVM_REG_QPR:
340 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
341 break;
342 case KVM_REG_FQPR:
343 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
344 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
345 break;
346#endif
347 default:
348 BUG();
349 }
304} 350}
305 351
306int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 352int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -319,12 +365,25 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
319 vcpu->arch.mmio_is_bigendian = is_bigendian; 365 vcpu->arch.mmio_is_bigendian = is_bigendian;
320 vcpu->mmio_needed = 1; 366 vcpu->mmio_needed = 1;
321 vcpu->mmio_is_write = 0; 367 vcpu->mmio_is_write = 0;
368 vcpu->arch.mmio_sign_extend = 0;
322 369
323 return EMULATE_DO_MMIO; 370 return EMULATE_DO_MMIO;
324} 371}
325 372
373/* Same as above, but sign extends */
374int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
375 unsigned int rt, unsigned int bytes, int is_bigendian)
376{
377 int r;
378
379 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
380 vcpu->arch.mmio_sign_extend = 1;
381
382 return r;
383}
384
326int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 385int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
327 u32 val, unsigned int bytes, int is_bigendian) 386 u64 val, unsigned int bytes, int is_bigendian)
328{ 387{
329 void *data = run->mmio.data; 388 void *data = run->mmio.data;
330 389
@@ -342,6 +401,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
342 /* Store the value at the lowest bytes in 'data'. */ 401 /* Store the value at the lowest bytes in 'data'. */
343 if (is_bigendian) { 402 if (is_bigendian) {
344 switch (bytes) { 403 switch (bytes) {
404 case 8: *(u64 *)data = val; break;
345 case 4: *(u32 *)data = val; break; 405 case 4: *(u32 *)data = val; break;
346 case 2: *(u16 *)data = val; break; 406 case 2: *(u16 *)data = val; break;
347 case 1: *(u8 *)data = val; break; 407 case 1: *(u8 *)data = val; break;
@@ -376,6 +436,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
376 if (!vcpu->arch.dcr_is_write) 436 if (!vcpu->arch.dcr_is_write)
377 kvmppc_complete_dcr_load(vcpu, run); 437 kvmppc_complete_dcr_load(vcpu, run);
378 vcpu->arch.dcr_needed = 0; 438 vcpu->arch.dcr_needed = 0;
439 } else if (vcpu->arch.osi_needed) {
440 u64 *gprs = run->osi.gprs;
441 int i;
442
443 for (i = 0; i < 32; i++)
444 kvmppc_set_gpr(vcpu, i, gprs[i]);
445 vcpu->arch.osi_needed = 0;
379 } 446 }
380 447
381 kvmppc_core_deliver_interrupts(vcpu); 448 kvmppc_core_deliver_interrupts(vcpu);
@@ -396,7 +463,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
396 463
397int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 464int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
398{ 465{
399 kvmppc_core_queue_external(vcpu, irq); 466 if (irq->irq == KVM_INTERRUPT_UNSET)
467 kvmppc_core_dequeue_external(vcpu, irq);
468 else
469 kvmppc_core_queue_external(vcpu, irq);
400 470
401 if (waitqueue_active(&vcpu->wq)) { 471 if (waitqueue_active(&vcpu->wq)) {
402 wake_up_interruptible(&vcpu->wq); 472 wake_up_interruptible(&vcpu->wq);
@@ -406,6 +476,27 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
406 return 0; 476 return 0;
407} 477}
408 478
479static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
480 struct kvm_enable_cap *cap)
481{
482 int r;
483
484 if (cap->flags)
485 return -EINVAL;
486
487 switch (cap->cap) {
488 case KVM_CAP_PPC_OSI:
489 r = 0;
490 vcpu->arch.osi_enabled = true;
491 break;
492 default:
493 r = -EINVAL;
494 break;
495 }
496
497 return r;
498}
499
409int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 500int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
410 struct kvm_mp_state *mp_state) 501 struct kvm_mp_state *mp_state)
411{ 502{
@@ -434,6 +525,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
434 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 525 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
435 break; 526 break;
436 } 527 }
528 case KVM_ENABLE_CAP:
529 {
530 struct kvm_enable_cap cap;
531 r = -EFAULT;
532 if (copy_from_user(&cap, argp, sizeof(cap)))
533 goto out;
534 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
535 break;
536 }
437 default: 537 default:
438 r = -EINVAL; 538 r = -EINVAL;
439 } 539 }
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
index 0dfba2bf7f31..d0ee554e86e4 100644
--- a/arch/powerpc/mm/mmu_context_hash32.c
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -60,11 +60,7 @@
60static unsigned long next_mmu_context; 60static unsigned long next_mmu_context;
61static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; 61static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
62 62
63 63unsigned long __init_new_context(void)
64/*
65 * Set up the context for a new address space.
66 */
67int init_new_context(struct task_struct *t, struct mm_struct *mm)
68{ 64{
69 unsigned long ctx = next_mmu_context; 65 unsigned long ctx = next_mmu_context;
70 66
@@ -74,19 +70,38 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
74 ctx = 0; 70 ctx = 0;
75 } 71 }
76 next_mmu_context = (ctx + 1) & LAST_CONTEXT; 72 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
77 mm->context.id = ctx; 73
74 return ctx;
75}
76EXPORT_SYMBOL_GPL(__init_new_context);
77
78/*
79 * Set up the context for a new address space.
80 */
81int init_new_context(struct task_struct *t, struct mm_struct *mm)
82{
83 mm->context.id = __init_new_context();
78 84
79 return 0; 85 return 0;
80} 86}
81 87
82/* 88/*
89 * Free a context ID. Make sure to call this with preempt disabled!
90 */
91void __destroy_context(unsigned long ctx)
92{
93 clear_bit(ctx, context_map);
94}
95EXPORT_SYMBOL_GPL(__destroy_context);
96
97/*
83 * We're finished using the context for an address space. 98 * We're finished using the context for an address space.
84 */ 99 */
85void destroy_context(struct mm_struct *mm) 100void destroy_context(struct mm_struct *mm)
86{ 101{
87 preempt_disable(); 102 preempt_disable();
88 if (mm->context.id != NO_CONTEXT) { 103 if (mm->context.id != NO_CONTEXT) {
89 clear_bit(mm->context.id, context_map); 104 __destroy_context(mm->context.id);
90 mm->context.id = NO_CONTEXT; 105 mm->context.id = NO_CONTEXT;
91 } 106 }
92 preempt_enable(); 107 preempt_enable();
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 2c9e52267292..7fd90d02d8c6 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
1077 index = ENTRIES-1; 1077 index = ENTRIES-1;
1078 1078
1079 /* make sure index is valid */ 1079 /* make sure index is valid */
1080 if ((index > ENTRIES) || (index < 0)) 1080 if ((index >= ENTRIES) || (index < 0))
1081 index = ENTRIES-1; 1081 index = ENTRIES-1;
1082 1082
1083 return initial_lfsr[index]; 1083 return initial_lfsr[index];
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index eeba0a70e466..69d668c072ae 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -171,6 +171,17 @@ config ISS4xx
171 help 171 help
172 This option enables support for the IBM ISS simulation environment 172 This option enables support for the IBM ISS simulation environment
173 173
174config ICON
175 bool "Icon"
176 depends on 44x
177 default n
178 select PPC44x_SIMPLE
179 select 440SPe
180 select PCI
181 select PPC4xx_PCI_EXPRESS
182 help
183 This option enables support for the AMCC PPC440SPe evaluation board.
184
174#config LUAN 185#config LUAN
175# bool "Luan" 186# bool "Luan"
176# depends on 44x 187# depends on 44x
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index e8c23ccaa1fc..5f7a29d7f590 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -61,7 +61,8 @@ static char *board[] __initdata = {
61 "amcc,redwood", 61 "amcc,redwood",
62 "amcc,sequoia", 62 "amcc,sequoia",
63 "amcc,taishan", 63 "amcc,taishan",
64 "amcc,yosemite" 64 "amcc,yosemite",
65 "mosaixtech,icon"
65}; 66};
66 67
67static int __init ppc44x_probe(void) 68static int __init ppc44x_probe(void)
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index b7f518a60f03..707e572b7c40 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -22,6 +22,7 @@
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/time.h> 23#include <asm/time.h>
24#include <asm/mpc5121.h> 24#include <asm/mpc5121.h>
25#include <asm/mpc52xx_psc.h>
25 26
26#include "mpc512x.h" 27#include "mpc512x.h"
27 28
@@ -95,9 +96,86 @@ void __init mpc512x_declare_of_platform_devices(void)
95 } 96 }
96} 97}
97 98
99#define DEFAULT_FIFO_SIZE 16
100
101static unsigned int __init get_fifo_size(struct device_node *np,
102 char *prop_name)
103{
104 const unsigned int *fp;
105
106 fp = of_get_property(np, prop_name, NULL);
107 if (fp)
108 return *fp;
109
110 pr_warning("no %s property in %s node, defaulting to %d\n",
111 prop_name, np->full_name, DEFAULT_FIFO_SIZE);
112
113 return DEFAULT_FIFO_SIZE;
114}
115
116#define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \
117 ((u32)(_base) + sizeof(struct mpc52xx_psc)))
118
119/* Init PSC FIFO space for TX and RX slices */
120void __init mpc512x_psc_fifo_init(void)
121{
122 struct device_node *np;
123 void __iomem *psc;
124 unsigned int tx_fifo_size;
125 unsigned int rx_fifo_size;
126 int fifobase = 0; /* current fifo address in 32 bit words */
127
128 for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") {
129 tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
130 rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
131
132 /* size in register is in 4 byte units */
133 tx_fifo_size /= 4;
134 rx_fifo_size /= 4;
135 if (!tx_fifo_size)
136 tx_fifo_size = 1;
137 if (!rx_fifo_size)
138 rx_fifo_size = 1;
139
140 psc = of_iomap(np, 0);
141 if (!psc) {
142 pr_err("%s: Can't map %s device\n",
143 __func__, np->full_name);
144 continue;
145 }
146
147 /* FIFO space is 4KiB, check if requested size is available */
148 if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) {
149 pr_err("%s: no fifo space available for %s\n",
150 __func__, np->full_name);
151 iounmap(psc);
152 /*
153 * chances are that another device requests less
154 * fifo space, so we continue.
155 */
156 continue;
157 }
158
159 /* set tx and rx fifo size registers */
160 out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size);
161 fifobase += tx_fifo_size;
162 out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size);
163 fifobase += rx_fifo_size;
164
165 /* reset and enable the slices */
166 out_be32(&FIFOC(psc)->txcmd, 0x80);
167 out_be32(&FIFOC(psc)->txcmd, 0x01);
168 out_be32(&FIFOC(psc)->rxcmd, 0x80);
169 out_be32(&FIFOC(psc)->rxcmd, 0x01);
170
171 iounmap(psc);
172 }
173}
174
98void __init mpc512x_init(void) 175void __init mpc512x_init(void)
99{ 176{
100 mpc512x_declare_of_platform_devices(); 177 mpc512x_declare_of_platform_devices();
101 mpc5121_clk_init(); 178 mpc5121_clk_init();
102 mpc512x_restart_init(); 179 mpc512x_restart_init();
180 mpc512x_psc_fifo_init();
103} 181}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
index fda7c2a18282..ca5305a5bd61 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
@@ -168,7 +168,7 @@ static int __devinit mpc52xx_wkup_gpiochip_probe(struct of_device *ofdev,
168 ofchip->gc.get = mpc52xx_wkup_gpio_get; 168 ofchip->gc.get = mpc52xx_wkup_gpio_get;
169 ofchip->gc.set = mpc52xx_wkup_gpio_set; 169 ofchip->gc.set = mpc52xx_wkup_gpio_set;
170 170
171 ret = of_mm_gpiochip_add(ofdev->node, &chip->mmchip); 171 ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
172 if (ret) 172 if (ret)
173 return ret; 173 return ret;
174 174
@@ -193,8 +193,11 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
193}; 193};
194 194
195static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = { 195static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = {
196 .name = "gpio_wkup", 196 .driver = {
197 .match_table = mpc52xx_wkup_gpiochip_match, 197 .name = "gpio_wkup",
198 .owner = THIS_MODULE,
199 .of_match_table = mpc52xx_wkup_gpiochip_match,
200 },
198 .probe = mpc52xx_wkup_gpiochip_probe, 201 .probe = mpc52xx_wkup_gpiochip_probe,
199 .remove = mpc52xx_gpiochip_remove, 202 .remove = mpc52xx_gpiochip_remove,
200}; 203};
@@ -329,7 +332,7 @@ static int __devinit mpc52xx_simple_gpiochip_probe(struct of_device *ofdev,
329 ofchip->gc.get = mpc52xx_simple_gpio_get; 332 ofchip->gc.get = mpc52xx_simple_gpio_get;
330 ofchip->gc.set = mpc52xx_simple_gpio_set; 333 ofchip->gc.set = mpc52xx_simple_gpio_set;
331 334
332 ret = of_mm_gpiochip_add(ofdev->node, &chip->mmchip); 335 ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
333 if (ret) 336 if (ret)
334 return ret; 337 return ret;
335 338
@@ -349,8 +352,11 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
349}; 352};
350 353
351static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { 354static struct of_platform_driver mpc52xx_simple_gpiochip_driver = {
352 .name = "gpio", 355 .driver = {
353 .match_table = mpc52xx_simple_gpiochip_match, 356 .name = "gpio",
357 .owner = THIS_MODULE,
358 .of_match_table = mpc52xx_simple_gpiochip_match,
359 },
354 .probe = mpc52xx_simple_gpiochip_probe, 360 .probe = mpc52xx_simple_gpiochip_probe,
355 .remove = mpc52xx_gpiochip_remove, 361 .remove = mpc52xx_gpiochip_remove,
356}; 362};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index a60ee39d3b78..46c93578cbf0 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -734,8 +734,8 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
734 734
735 spin_lock_init(&gpt->lock); 735 spin_lock_init(&gpt->lock);
736 gpt->dev = &ofdev->dev; 736 gpt->dev = &ofdev->dev;
737 gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node); 737 gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
738 gpt->regs = of_iomap(ofdev->node, 0); 738 gpt->regs = of_iomap(ofdev->dev.of_node, 0);
739 if (!gpt->regs) { 739 if (!gpt->regs) {
740 kfree(gpt); 740 kfree(gpt);
741 return -ENOMEM; 741 return -ENOMEM;
@@ -743,21 +743,21 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
743 743
744 dev_set_drvdata(&ofdev->dev, gpt); 744 dev_set_drvdata(&ofdev->dev, gpt);
745 745
746 mpc52xx_gpt_gpio_setup(gpt, ofdev->node); 746 mpc52xx_gpt_gpio_setup(gpt, ofdev->dev.of_node);
747 mpc52xx_gpt_irq_setup(gpt, ofdev->node); 747 mpc52xx_gpt_irq_setup(gpt, ofdev->dev.of_node);
748 748
749 mutex_lock(&mpc52xx_gpt_list_mutex); 749 mutex_lock(&mpc52xx_gpt_list_mutex);
750 list_add(&gpt->list, &mpc52xx_gpt_list); 750 list_add(&gpt->list, &mpc52xx_gpt_list);
751 mutex_unlock(&mpc52xx_gpt_list_mutex); 751 mutex_unlock(&mpc52xx_gpt_list_mutex);
752 752
753 /* check if this device could be a watchdog */ 753 /* check if this device could be a watchdog */
754 if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) || 754 if (of_get_property(ofdev->dev.of_node, "fsl,has-wdt", NULL) ||
755 of_get_property(ofdev->node, "has-wdt", NULL)) { 755 of_get_property(ofdev->dev.of_node, "has-wdt", NULL)) {
756 const u32 *on_boot_wdt; 756 const u32 *on_boot_wdt;
757 757
758 gpt->wdt_mode = MPC52xx_GPT_CAN_WDT; 758 gpt->wdt_mode = MPC52xx_GPT_CAN_WDT;
759 on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot", 759 on_boot_wdt = of_get_property(ofdev->dev.of_node,
760 NULL); 760 "fsl,wdt-on-boot", NULL);
761 if (on_boot_wdt) { 761 if (on_boot_wdt) {
762 dev_info(gpt->dev, "used as watchdog\n"); 762 dev_info(gpt->dev, "used as watchdog\n");
763 gpt->wdt_mode |= MPC52xx_GPT_IS_WDT; 763 gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
@@ -784,8 +784,11 @@ static const struct of_device_id mpc52xx_gpt_match[] = {
784}; 784};
785 785
786static struct of_platform_driver mpc52xx_gpt_driver = { 786static struct of_platform_driver mpc52xx_gpt_driver = {
787 .name = "mpc52xx-gpt", 787 .driver = {
788 .match_table = mpc52xx_gpt_match, 788 .name = "mpc52xx-gpt",
789 .owner = THIS_MODULE,
790 .of_match_table = mpc52xx_gpt_match,
791 },
789 .probe = mpc52xx_gpt_probe, 792 .probe = mpc52xx_gpt_probe,
790 .remove = mpc52xx_gpt_remove, 793 .remove = mpc52xx_gpt_remove,
791}; 794};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index d4f8be307cd5..e86aec644501 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -445,14 +445,14 @@ mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
445 if (lpbfifo.dev != NULL) 445 if (lpbfifo.dev != NULL)
446 return -ENOSPC; 446 return -ENOSPC;
447 447
448 lpbfifo.irq = irq_of_parse_and_map(op->node, 0); 448 lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0);
449 if (!lpbfifo.irq) 449 if (!lpbfifo.irq)
450 return -ENODEV; 450 return -ENODEV;
451 451
452 if (of_address_to_resource(op->node, 0, &res)) 452 if (of_address_to_resource(op->dev.of_node, 0, &res))
453 return -ENODEV; 453 return -ENODEV;
454 lpbfifo.regs_phys = res.start; 454 lpbfifo.regs_phys = res.start;
455 lpbfifo.regs = of_iomap(op->node, 0); 455 lpbfifo.regs = of_iomap(op->dev.of_node, 0);
456 if (!lpbfifo.regs) 456 if (!lpbfifo.regs)
457 return -ENOMEM; 457 return -ENOMEM;
458 458
@@ -537,9 +537,11 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = {
537}; 537};
538 538
539static struct of_platform_driver mpc52xx_lpbfifo_driver = { 539static struct of_platform_driver mpc52xx_lpbfifo_driver = {
540 .owner = THIS_MODULE, 540 .driver = {
541 .name = "mpc52xx-lpbfifo", 541 .name = "mpc52xx-lpbfifo",
542 .match_table = mpc52xx_lpbfifo_match, 542 .owner = THIS_MODULE,
543 .of_match_table = mpc52xx_lpbfifo_match,
544 },
543 .probe = mpc52xx_lpbfifo_probe, 545 .probe = mpc52xx_lpbfifo_probe,
544 .remove = __devexit_p(mpc52xx_lpbfifo_remove), 546 .remove = __devexit_p(mpc52xx_lpbfifo_remove),
545}; 547};
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index f21555d3395a..9f2e52b36f91 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -119,12 +119,12 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev,
119 struct device_node *node; 119 struct device_node *node;
120 int ret; 120 int ret;
121 121
122 node = of_get_parent(ofdev->node); 122 node = of_get_parent(ofdev->dev.of_node);
123 of_node_put(node); 123 of_node_put(node);
124 if (node != ep8248e_bcsr_node) 124 if (node != ep8248e_bcsr_node)
125 return -ENODEV; 125 return -ENODEV;
126 126
127 ret = of_address_to_resource(ofdev->node, 0, &res); 127 ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
128 if (ret) 128 if (ret)
129 return ret; 129 return ret;
130 130
@@ -142,7 +142,7 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev,
142 bus->parent = &ofdev->dev; 142 bus->parent = &ofdev->dev;
143 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); 143 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
144 144
145 ret = of_mdiobus_register(bus, ofdev->node); 145 ret = of_mdiobus_register(bus, ofdev->dev.of_node);
146 if (ret) 146 if (ret)
147 goto err_free_irq; 147 goto err_free_irq;
148 148
@@ -170,8 +170,9 @@ static const struct of_device_id ep8248e_mdio_match[] = {
170static struct of_platform_driver ep8248e_mdio_driver = { 170static struct of_platform_driver ep8248e_mdio_driver = {
171 .driver = { 171 .driver = {
172 .name = "ep8248e-mdio-bitbang", 172 .name = "ep8248e-mdio-bitbang",
173 .owner = THIS_MODULE,
174 .of_match_table = ep8248e_mdio_match,
173 }, 175 },
174 .match_table = ep8248e_mdio_match,
175 .probe = ep8248e_mdio_probe, 176 .probe = ep8248e_mdio_probe,
176 .remove = ep8248e_mdio_remove, 177 .remove = ep8248e_mdio_remove,
177}; 178};
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 43805348b81e..ebe6c3537209 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -321,7 +321,7 @@ static struct platform_suspend_ops mpc83xx_suspend_ops = {
321static int pmc_probe(struct of_device *ofdev, 321static int pmc_probe(struct of_device *ofdev,
322 const struct of_device_id *match) 322 const struct of_device_id *match)
323{ 323{
324 struct device_node *np = ofdev->node; 324 struct device_node *np = ofdev->dev.of_node;
325 struct resource res; 325 struct resource res;
326 struct pmc_type *type = match->data; 326 struct pmc_type *type = match->data;
327 int ret = 0; 327 int ret = 0;
@@ -423,8 +423,11 @@ static struct of_device_id pmc_match[] = {
423}; 423};
424 424
425static struct of_platform_driver pmc_driver = { 425static struct of_platform_driver pmc_driver = {
426 .name = "mpc83xx-pmc", 426 .driver = {
427 .match_table = pmc_match, 427 .name = "mpc83xx-pmc",
428 .owner = THIS_MODULE,
429 .of_match_table = pmc_match,
430 },
428 .probe = pmc_probe, 431 .probe = pmc_probe,
429 .remove = pmc_remove 432 .remove = pmc_remove
430}; 433};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index f0684c8ac960..8fe87fc61485 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006-2007. All rights reserved. 2 * Copyright (C) Freescale Semicondutor, Inc. 2006-2010. All rights reserved.
3 * 3 *
4 * Author: Andy Fleming <afleming@freescale.com> 4 * Author: Andy Fleming <afleming@freescale.com>
5 * 5 *
@@ -154,6 +154,10 @@ static int mpc8568_mds_phy_fixups(struct phy_device *phydev)
154 * Setup the architecture 154 * Setup the architecture
155 * 155 *
156 */ 156 */
157#ifdef CONFIG_SMP
158extern void __init mpc85xx_smp_init(void);
159#endif
160
157static void __init mpc85xx_mds_setup_arch(void) 161static void __init mpc85xx_mds_setup_arch(void)
158{ 162{
159 struct device_node *np; 163 struct device_node *np;
@@ -194,6 +198,10 @@ static void __init mpc85xx_mds_setup_arch(void)
194 } 198 }
195#endif 199#endif
196 200
201#ifdef CONFIG_SMP
202 mpc85xx_smp_init();
203#endif
204
197#ifdef CONFIG_QUICC_ENGINE 205#ifdef CONFIG_QUICC_ENGINE
198 np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 206 np = of_find_compatible_node(NULL, NULL, "fsl,qe");
199 if (!np) { 207 if (!np) {
@@ -271,9 +279,49 @@ static void __init mpc85xx_mds_setup_arch(void)
271 BCSR_UCC_RGMII, BCSR_UCC_RTBI); 279 BCSR_UCC_RGMII, BCSR_UCC_RTBI);
272 } 280 }
273 281
282 } else if (machine_is(p1021_mds)) {
283#define BCSR11_ENET_MICRST (0x1 << 5)
284 /* Reset Micrel PHY */
285 clrbits8(&bcsr_regs[11], BCSR11_ENET_MICRST);
286 setbits8(&bcsr_regs[11], BCSR11_ENET_MICRST);
274 } 287 }
288
275 iounmap(bcsr_regs); 289 iounmap(bcsr_regs);
276 } 290 }
291
292 if (machine_is(p1021_mds)) {
293#define MPC85xx_PMUXCR_OFFSET 0x60
294#define MPC85xx_PMUXCR_QE0 0x00008000
295#define MPC85xx_PMUXCR_QE3 0x00001000
296#define MPC85xx_PMUXCR_QE9 0x00000040
297#define MPC85xx_PMUXCR_QE12 0x00000008
298 static __be32 __iomem *pmuxcr;
299
300 np = of_find_node_by_name(NULL, "global-utilities");
301
302 if (np) {
303 pmuxcr = of_iomap(np, 0) + MPC85xx_PMUXCR_OFFSET;
304
305 if (!pmuxcr)
306 printk(KERN_EMERG "Error: Alternate function"
307 " signal multiplex control register not"
308 " mapped!\n");
309 else
310 /* P1021 has pins muxed for QE and other functions. To
311 * enable QE UEC mode, we need to set bit QE0 for UCC1
312 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
313 * and QE12 for QE MII management singals in PMUXCR
314 * register.
315 */
316 setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 |
317 MPC85xx_PMUXCR_QE3 |
318 MPC85xx_PMUXCR_QE9 |
319 MPC85xx_PMUXCR_QE12);
320
321 of_node_put(np);
322 }
323
324 }
277#endif /* CONFIG_QUICC_ENGINE */ 325#endif /* CONFIG_QUICC_ENGINE */
278 326
279#ifdef CONFIG_SWIOTLB 327#ifdef CONFIG_SWIOTLB
@@ -330,6 +378,16 @@ static struct of_device_id mpc85xx_ids[] = {
330 {}, 378 {},
331}; 379};
332 380
381static struct of_device_id p1021_ids[] = {
382 { .type = "soc", },
383 { .compatible = "soc", },
384 { .compatible = "simple-bus", },
385 { .type = "qe", },
386 { .compatible = "fsl,qe", },
387 { .compatible = "gianfar", },
388 {},
389};
390
333static int __init mpc85xx_publish_devices(void) 391static int __init mpc85xx_publish_devices(void)
334{ 392{
335 if (machine_is(mpc8568_mds)) 393 if (machine_is(mpc8568_mds))
@@ -342,11 +400,22 @@ static int __init mpc85xx_publish_devices(void)
342 400
343 return 0; 401 return 0;
344} 402}
403
404static int __init p1021_publish_devices(void)
405{
406 /* Publish the QE devices */
407 of_platform_bus_probe(NULL, p1021_ids, NULL);
408
409 return 0;
410}
411
345machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); 412machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
346machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices); 413machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices);
414machine_device_initcall(p1021_mds, p1021_publish_devices);
347 415
348machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier); 416machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier);
349machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier); 417machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier);
418machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier);
350 419
351static void __init mpc85xx_mds_pic_init(void) 420static void __init mpc85xx_mds_pic_init(void)
352{ 421{
@@ -366,7 +435,7 @@ static void __init mpc85xx_mds_pic_init(void)
366 435
367 mpic = mpic_alloc(np, r.start, 436 mpic = mpic_alloc(np, r.start,
368 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | 437 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
369 MPIC_BROKEN_FRR_NIRQS, 438 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
370 0, 256, " OpenPIC "); 439 0, 256, " OpenPIC ");
371 BUG_ON(mpic == NULL); 440 BUG_ON(mpic == NULL);
372 of_node_put(np); 441 of_node_put(np);
@@ -380,7 +449,11 @@ static void __init mpc85xx_mds_pic_init(void)
380 if (!np) 449 if (!np)
381 return; 450 return;
382 } 451 }
383 qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL); 452 if (machine_is(p1021_mds))
453 qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
454 qe_ic_cascade_high_mpic);
455 else
456 qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL);
384 of_node_put(np); 457 of_node_put(np);
385#endif /* CONFIG_QUICC_ENGINE */ 458#endif /* CONFIG_QUICC_ENGINE */
386} 459}
@@ -426,3 +499,26 @@ define_machine(mpc8569_mds) {
426 .pcibios_fixup_bus = fsl_pcibios_fixup_bus, 499 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
427#endif 500#endif
428}; 501};
502
503static int __init p1021_mds_probe(void)
504{
505 unsigned long root = of_get_flat_dt_root();
506
507 return of_flat_dt_is_compatible(root, "fsl,P1021MDS");
508
509}
510
511define_machine(p1021_mds) {
512 .name = "P1021 MDS",
513 .probe = p1021_mds_probe,
514 .setup_arch = mpc85xx_mds_setup_arch,
515 .init_IRQ = mpc85xx_mds_pic_init,
516 .get_irq = mpic_get_irq,
517 .restart = fsl_rstcr_restart,
518 .calibrate_decr = generic_calibrate_decr,
519 .progress = udbg_progress,
520#ifdef CONFIG_PCI
521 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
522#endif
523};
524
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 8efe48192f3f..6257e5378615 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -345,7 +345,7 @@ static int axon_msi_shutdown(struct of_device *device)
345static int axon_msi_probe(struct of_device *device, 345static int axon_msi_probe(struct of_device *device,
346 const struct of_device_id *device_id) 346 const struct of_device_id *device_id)
347{ 347{
348 struct device_node *dn = device->node; 348 struct device_node *dn = device->dev.of_node;
349 struct axon_msic *msic; 349 struct axon_msic *msic;
350 unsigned int virq; 350 unsigned int virq;
351 int dcr_base, dcr_len; 351 int dcr_base, dcr_len;
@@ -447,11 +447,12 @@ static const struct of_device_id axon_msi_device_id[] = {
447}; 447};
448 448
449static struct of_platform_driver axon_msi_driver = { 449static struct of_platform_driver axon_msi_driver = {
450 .match_table = axon_msi_device_id,
451 .probe = axon_msi_probe, 450 .probe = axon_msi_probe,
452 .shutdown = axon_msi_shutdown, 451 .shutdown = axon_msi_shutdown,
453 .driver = { 452 .driver = {
454 .name = "axon-msi" 453 .name = "axon-msi",
454 .owner = THIS_MODULE,
455 .of_match_table = axon_msi_device_id,
455 }, 456 },
456}; 457};
457 458
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index e3ec4976fae7..4326b737d913 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -545,7 +545,6 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
545{ 545{
546 struct iommu_window *window; 546 struct iommu_window *window;
547 struct cbe_iommu *iommu; 547 struct cbe_iommu *iommu;
548 struct dev_archdata *archdata = &dev->archdata;
549 548
550 /* Current implementation uses the first window available in that 549 /* Current implementation uses the first window available in that
551 * node's iommu. We -might- do something smarter later though it may 550 * node's iommu. We -might- do something smarter later though it may
@@ -554,7 +553,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
554 iommu = cell_iommu_for_node(dev_to_node(dev)); 553 iommu = cell_iommu_for_node(dev_to_node(dev));
555 if (iommu == NULL || list_empty(&iommu->windows)) { 554 if (iommu == NULL || list_empty(&iommu->windows)) {
556 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", 555 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
557 archdata->of_node ? archdata->of_node->full_name : "?", 556 dev->of_node ? dev->of_node->full_name : "?",
558 dev_to_node(dev)); 557 dev_to_node(dev));
559 return NULL; 558 return NULL;
560 } 559 }
@@ -897,7 +896,7 @@ static u64 cell_iommu_get_fixed_address(struct device *dev)
897 const u32 *ranges = NULL; 896 const u32 *ranges = NULL;
898 int i, len, best, naddr, nsize, pna, range_size; 897 int i, len, best, naddr, nsize, pna, range_size;
899 898
900 np = of_node_get(dev->archdata.of_node); 899 np = of_node_get(dev->of_node);
901 while (1) { 900 while (1) {
902 naddr = of_n_addr_cells(np); 901 naddr = of_n_addr_cells(np);
903 nsize = of_n_size_cells(np); 902 nsize = of_n_size_cells(np);
@@ -1067,7 +1066,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
1067 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); 1066 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
1068 fsize = lmb_phys_mem_size(); 1067 fsize = lmb_phys_mem_size();
1069 1068
1070 if ((fbase + fsize) <= 0x800000000) 1069 if ((fbase + fsize) <= 0x800000000ul)
1071 hbase = 0; /* use the device tree window */ 1070 hbase = 0; /* use the device tree window */
1072 else { 1071 else {
1073 /* If we're over 32 GB we need to cheat. We can't map all of 1072 /* If we're over 32 GB we need to cheat. We can't map all of
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 5c2808252516..1a40da92154c 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1849,8 +1849,7 @@ out:
1849 return ret; 1849 return ret;
1850} 1850}
1851 1851
1852static int spufs_mfc_fsync(struct file *file, struct dentry *dentry, 1852static int spufs_mfc_fsync(struct file *file, int datasync)
1853 int datasync)
1854{ 1853{
1855 return spufs_mfc_flush(file, NULL); 1854 return spufs_mfc_flush(file, NULL);
1856} 1855}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index fc1b1c42b1dc..e5e5f823d687 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -251,7 +251,7 @@ const struct file_operations spufs_context_fops = {
251 .llseek = dcache_dir_lseek, 251 .llseek = dcache_dir_lseek,
252 .read = generic_read_dir, 252 .read = generic_read_dir,
253 .readdir = dcache_readdir, 253 .readdir = dcache_readdir,
254 .fsync = simple_sync_file, 254 .fsync = noop_fsync,
255}; 255};
256EXPORT_SYMBOL_GPL(spufs_context_fops); 256EXPORT_SYMBOL_GPL(spufs_context_fops);
257 257
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index 0f881f64583e..627ee089e75d 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -220,7 +220,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev,
220 const struct of_device_id *match) 220 const struct of_device_id *match)
221{ 221{
222 struct device *dev = &ofdev->dev; 222 struct device *dev = &ofdev->dev;
223 struct device_node *np = ofdev->node; 223 struct device_node *np = ofdev->dev.of_node;
224 struct mii_bus *new_bus; 224 struct mii_bus *new_bus;
225 struct gpio_priv *priv; 225 struct gpio_priv *priv;
226 const unsigned int *prop; 226 const unsigned int *prop;
@@ -301,11 +301,12 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match);
301 301
302static struct of_platform_driver gpio_mdio_driver = 302static struct of_platform_driver gpio_mdio_driver =
303{ 303{
304 .match_table = gpio_mdio_match,
305 .probe = gpio_mdio_probe, 304 .probe = gpio_mdio_probe,
306 .remove = gpio_mdio_remove, 305 .remove = gpio_mdio_remove,
307 .driver = { 306 .driver = {
308 .name = "gpio-mdio-bitbang", 307 .name = "gpio-mdio-bitbang",
308 .owner = THIS_MODULE,
309 .of_match_table = gpio_mdio_match,
309 }, 310 },
310}; 311};
311 312
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index ac6fdd973291..f372ec1691a3 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -360,10 +360,10 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
360 /* We know electra_cf devices will always have of_node set, since 360 /* We know electra_cf devices will always have of_node set, since
361 * electra_cf is an of_platform driver. 361 * electra_cf is an of_platform driver.
362 */ 362 */
363 if (!parent->archdata.of_node) 363 if (!parent->of_node)
364 return 0; 364 return 0;
365 365
366 if (!of_device_is_compatible(parent->archdata.of_node, "electra-cf")) 366 if (!of_device_is_compatible(parent->of_node, "electra-cf"))
367 return 0; 367 return 0;
368 368
369 /* We use the direct ops for localbus */ 369 /* We use the direct ops for localbus */
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 6d09f5e3e7e4..23083c397528 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -766,7 +766,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
766 BUG(); 766 BUG();
767 }; 767 };
768 768
769 dev->core.archdata.of_node = NULL; 769 dev->core.of_node = NULL;
770 set_dev_node(&dev->core, 0); 770 set_dev_node(&dev->core, 0);
771 771
772 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core)); 772 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 1fefae76e295..e19ff021e711 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -102,7 +102,7 @@ static const struct file_operations hcall_inst_seq_fops = {
102#define CPU_NAME_BUF_SIZE 32 102#define CPU_NAME_BUF_SIZE 32
103 103
104 104
105static void probe_hcall_entry(unsigned long opcode, unsigned long *args) 105static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args)
106{ 106{
107 struct hcall_stats *h; 107 struct hcall_stats *h;
108 108
@@ -114,7 +114,7 @@ static void probe_hcall_entry(unsigned long opcode, unsigned long *args)
114 h->purr_start = mfspr(SPRN_PURR); 114 h->purr_start = mfspr(SPRN_PURR);
115} 115}
116 116
117static void probe_hcall_exit(unsigned long opcode, unsigned long retval, 117static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long retval,
118 unsigned long *retbuf) 118 unsigned long *retbuf)
119{ 119{
120 struct hcall_stats *h; 120 struct hcall_stats *h;
@@ -140,11 +140,11 @@ static int __init hcall_inst_init(void)
140 if (!firmware_has_feature(FW_FEATURE_LPAR)) 140 if (!firmware_has_feature(FW_FEATURE_LPAR))
141 return 0; 141 return 0;
142 142
143 if (register_trace_hcall_entry(probe_hcall_entry)) 143 if (register_trace_hcall_entry(probe_hcall_entry, NULL))
144 return -EINVAL; 144 return -EINVAL;
145 145
146 if (register_trace_hcall_exit(probe_hcall_exit)) { 146 if (register_trace_hcall_exit(probe_hcall_exit, NULL)) {
147 unregister_trace_hcall_entry(probe_hcall_entry); 147 unregister_trace_hcall_entry(probe_hcall_entry, NULL);
148 return -EINVAL; 148 return -EINVAL;
149 } 149 }
150 150
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1a0000a4b6d6..d26182d42cbf 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -468,7 +468,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
468 468
469 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev)); 469 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
470 470
471 dn = dev->dev.archdata.of_node; 471 dn = dev->dev.of_node;
472 472
473 /* If we're the direct child of a root bus, then we need to allocate 473 /* If we're the direct child of a root bus, then we need to allocate
474 * an iommu table ourselves. The bus setup code should have setup 474 * an iommu table ourselves. The bus setup code should have setup
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 88f4ae787832..402d2212162f 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -185,7 +185,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
185 axon_ram_bank_id++; 185 axon_ram_bank_id++;
186 186
187 dev_info(&device->dev, "Found memory controller on %s\n", 187 dev_info(&device->dev, "Found memory controller on %s\n",
188 device->node->full_name); 188 device->dev.of_node->full_name);
189 189
190 bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL); 190 bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
191 if (bank == NULL) { 191 if (bank == NULL) {
@@ -198,7 +198,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
198 198
199 bank->device = device; 199 bank->device = device;
200 200
201 if (of_address_to_resource(device->node, 0, &resource) != 0) { 201 if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) {
202 dev_err(&device->dev, "Cannot access device tree\n"); 202 dev_err(&device->dev, "Cannot access device tree\n");
203 rc = -EFAULT; 203 rc = -EFAULT;
204 goto failed; 204 goto failed;
@@ -253,7 +253,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
253 blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); 253 blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
254 add_disk(bank->disk); 254 add_disk(bank->disk);
255 255
256 bank->irq_id = irq_of_parse_and_map(device->node, 0); 256 bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
257 if (bank->irq_id == NO_IRQ) { 257 if (bank->irq_id == NO_IRQ) {
258 dev_err(&device->dev, "Cannot access ECC interrupt ID\n"); 258 dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
259 rc = -EFAULT; 259 rc = -EFAULT;
@@ -327,12 +327,12 @@ static struct of_device_id axon_ram_device_id[] = {
327}; 327};
328 328
329static struct of_platform_driver axon_ram_driver = { 329static struct of_platform_driver axon_ram_driver = {
330 .match_table = axon_ram_device_id,
331 .probe = axon_ram_probe, 330 .probe = axon_ram_probe,
332 .remove = axon_ram_remove, 331 .remove = axon_ram_remove,
333 .driver = { 332 .driver = {
334 .owner = THIS_MODULE, 333 .name = AXON_RAM_MODULE_NAME,
335 .name = AXON_RAM_MODULE_NAME, 334 .owner = THIS_MODULE,
335 .of_match_table = axon_ram_device_id,
336 }, 336 },
337}; 337};
338 338
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 378ebd9aac18..a7c5c470af14 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -377,7 +377,7 @@ mpc52xx_bcom_probe(struct of_device *op, const struct of_device_id *match)
377 printk(KERN_INFO "DMA: MPC52xx BestComm driver\n"); 377 printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
378 378
379 /* Get the bestcomm node */ 379 /* Get the bestcomm node */
380 of_node_get(op->node); 380 of_node_get(op->dev.of_node);
381 381
382 /* Prepare SRAM */ 382 /* Prepare SRAM */
383 ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids); 383 ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
@@ -406,10 +406,10 @@ mpc52xx_bcom_probe(struct of_device *op, const struct of_device_id *match)
406 } 406 }
407 407
408 /* Save the node */ 408 /* Save the node */
409 bcom_eng->ofnode = op->node; 409 bcom_eng->ofnode = op->dev.of_node;
410 410
411 /* Get, reserve & map io */ 411 /* Get, reserve & map io */
412 if (of_address_to_resource(op->node, 0, &res_bcom)) { 412 if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
413 printk(KERN_ERR DRIVER_NAME ": " 413 printk(KERN_ERR DRIVER_NAME ": "
414 "Can't get resource\n"); 414 "Can't get resource\n");
415 rv = -EINVAL; 415 rv = -EINVAL;
@@ -453,7 +453,7 @@ error_sramclean:
453 kfree(bcom_eng); 453 kfree(bcom_eng);
454 bcom_sram_cleanup(); 454 bcom_sram_cleanup();
455error_ofput: 455error_ofput:
456 of_node_put(op->node); 456 of_node_put(op->dev.of_node);
457 457
458 printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n"); 458 printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
459 459
@@ -494,14 +494,12 @@ MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
494 494
495 495
496static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { 496static struct of_platform_driver mpc52xx_bcom_of_platform_driver = {
497 .owner = THIS_MODULE,
498 .name = DRIVER_NAME,
499 .match_table = mpc52xx_bcom_of_match,
500 .probe = mpc52xx_bcom_probe, 497 .probe = mpc52xx_bcom_probe,
501 .remove = mpc52xx_bcom_remove, 498 .remove = mpc52xx_bcom_remove,
502 .driver = { 499 .driver = {
503 .name = DRIVER_NAME, 500 .name = DRIVER_NAME,
504 .owner = THIS_MODULE, 501 .owner = THIS_MODULE,
502 .of_match_table = mpc52xx_bcom_of_match,
505 }, 503 },
506}; 504};
507 505
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 3482e3fd89c0..962c2d8dd8d9 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc.
3 * 3 *
4 * Author: Tony Li <tony.li@freescale.com> 4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com> 5 * Jason Jin <Jason.jin@freescale.com>
@@ -22,14 +22,20 @@
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/hw_irq.h> 23#include <asm/hw_irq.h>
24#include <asm/ppc-pci.h> 24#include <asm/ppc-pci.h>
25#include <asm/mpic.h>
25#include "fsl_msi.h" 26#include "fsl_msi.h"
26 27
28LIST_HEAD(msi_head);
29
27struct fsl_msi_feature { 30struct fsl_msi_feature {
28 u32 fsl_pic_ip; 31 u32 fsl_pic_ip;
29 u32 msiir_offset; 32 u32 msiir_offset;
30}; 33};
31 34
32static struct fsl_msi *fsl_msi; 35struct fsl_msi_cascade_data {
36 struct fsl_msi *msi_data;
37 int index;
38};
33 39
34static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) 40static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
35{ 41{
@@ -54,10 +60,12 @@ static struct irq_chip fsl_msi_chip = {
54static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, 60static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
55 irq_hw_number_t hw) 61 irq_hw_number_t hw)
56{ 62{
63 struct fsl_msi *msi_data = h->host_data;
57 struct irq_chip *chip = &fsl_msi_chip; 64 struct irq_chip *chip = &fsl_msi_chip;
58 65
59 irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING; 66 irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
60 67
68 set_irq_chip_data(virq, msi_data);
61 set_irq_chip_and_handler(virq, chip, handle_edge_irq); 69 set_irq_chip_and_handler(virq, chip, handle_edge_irq);
62 70
63 return 0; 71 return 0;
@@ -96,11 +104,12 @@ static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type)
96static void fsl_teardown_msi_irqs(struct pci_dev *pdev) 104static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
97{ 105{
98 struct msi_desc *entry; 106 struct msi_desc *entry;
99 struct fsl_msi *msi_data = fsl_msi; 107 struct fsl_msi *msi_data;
100 108
101 list_for_each_entry(entry, &pdev->msi_list, list) { 109 list_for_each_entry(entry, &pdev->msi_list, list) {
102 if (entry->irq == NO_IRQ) 110 if (entry->irq == NO_IRQ)
103 continue; 111 continue;
112 msi_data = get_irq_data(entry->irq);
104 set_irq_msi(entry->irq, NULL); 113 set_irq_msi(entry->irq, NULL);
105 msi_bitmap_free_hwirqs(&msi_data->bitmap, 114 msi_bitmap_free_hwirqs(&msi_data->bitmap,
106 virq_to_hw(entry->irq), 1); 115 virq_to_hw(entry->irq), 1);
@@ -111,9 +120,10 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
111} 120}
112 121
113static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, 122static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
114 struct msi_msg *msg) 123 struct msi_msg *msg,
124 struct fsl_msi *fsl_msi_data)
115{ 125{
116 struct fsl_msi *msi_data = fsl_msi; 126 struct fsl_msi *msi_data = fsl_msi_data;
117 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 127 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
118 u32 base = 0; 128 u32 base = 0;
119 129
@@ -130,14 +140,19 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
130 140
131static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 141static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
132{ 142{
133 int rc, hwirq; 143 int rc, hwirq = -ENOMEM;
134 unsigned int virq; 144 unsigned int virq;
135 struct msi_desc *entry; 145 struct msi_desc *entry;
136 struct msi_msg msg; 146 struct msi_msg msg;
137 struct fsl_msi *msi_data = fsl_msi; 147 struct fsl_msi *msi_data;
138 148
139 list_for_each_entry(entry, &pdev->msi_list, list) { 149 list_for_each_entry(entry, &pdev->msi_list, list) {
140 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); 150 list_for_each_entry(msi_data, &msi_head, list) {
151 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
152 if (hwirq >= 0)
153 break;
154 }
155
141 if (hwirq < 0) { 156 if (hwirq < 0) {
142 rc = hwirq; 157 rc = hwirq;
143 pr_debug("%s: fail allocating msi interrupt\n", 158 pr_debug("%s: fail allocating msi interrupt\n",
@@ -154,25 +169,31 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
154 rc = -ENOSPC; 169 rc = -ENOSPC;
155 goto out_free; 170 goto out_free;
156 } 171 }
172 set_irq_data(virq, msi_data);
157 set_irq_msi(virq, entry); 173 set_irq_msi(virq, entry);
158 174
159 fsl_compose_msi_msg(pdev, hwirq, &msg); 175 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
160 write_msi_msg(virq, &msg); 176 write_msi_msg(virq, &msg);
161 } 177 }
162 return 0; 178 return 0;
163 179
164out_free: 180out_free:
181 /* free by the caller of this function */
165 return rc; 182 return rc;
166} 183}
167 184
168static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) 185static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
169{ 186{
170 unsigned int cascade_irq; 187 unsigned int cascade_irq;
171 struct fsl_msi *msi_data = fsl_msi; 188 struct fsl_msi *msi_data;
172 int msir_index = -1; 189 int msir_index = -1;
173 u32 msir_value = 0; 190 u32 msir_value = 0;
174 u32 intr_index; 191 u32 intr_index;
175 u32 have_shift = 0; 192 u32 have_shift = 0;
193 struct fsl_msi_cascade_data *cascade_data;
194
195 cascade_data = (struct fsl_msi_cascade_data *)get_irq_data(irq);
196 msi_data = cascade_data->msi_data;
176 197
177 raw_spin_lock(&desc->lock); 198 raw_spin_lock(&desc->lock);
178 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 199 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
@@ -187,13 +208,13 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
187 if (unlikely(desc->status & IRQ_INPROGRESS)) 208 if (unlikely(desc->status & IRQ_INPROGRESS))
188 goto unlock; 209 goto unlock;
189 210
190 msir_index = (int)desc->handler_data; 211 msir_index = cascade_data->index;
191 212
192 if (msir_index >= NR_MSI_REG) 213 if (msir_index >= NR_MSI_REG)
193 cascade_irq = NO_IRQ; 214 cascade_irq = NO_IRQ;
194 215
195 desc->status |= IRQ_INPROGRESS; 216 desc->status |= IRQ_INPROGRESS;
196 switch (fsl_msi->feature & FSL_PIC_IP_MASK) { 217 switch (msi_data->feature & FSL_PIC_IP_MASK) {
197 case FSL_PIC_IP_MPIC: 218 case FSL_PIC_IP_MPIC:
198 msir_value = fsl_msi_read(msi_data->msi_regs, 219 msir_value = fsl_msi_read(msi_data->msi_regs,
199 msir_index * 0x10); 220 msir_index * 0x10);
@@ -229,6 +250,30 @@ unlock:
229 raw_spin_unlock(&desc->lock); 250 raw_spin_unlock(&desc->lock);
230} 251}
231 252
253static int fsl_of_msi_remove(struct of_device *ofdev)
254{
255 struct fsl_msi *msi = ofdev->dev.platform_data;
256 int virq, i;
257 struct fsl_msi_cascade_data *cascade_data;
258
259 if (msi->list.prev != NULL)
260 list_del(&msi->list);
261 for (i = 0; i < NR_MSI_REG; i++) {
262 virq = msi->msi_virqs[i];
263 if (virq != NO_IRQ) {
264 cascade_data = get_irq_data(virq);
265 kfree(cascade_data);
266 irq_dispose_mapping(virq);
267 }
268 }
269 if (msi->bitmap.bitmap)
270 msi_bitmap_free(&msi->bitmap);
271 iounmap(msi->msi_regs);
272 kfree(msi);
273
274 return 0;
275}
276
232static int __devinit fsl_of_msi_probe(struct of_device *dev, 277static int __devinit fsl_of_msi_probe(struct of_device *dev,
233 const struct of_device_id *match) 278 const struct of_device_id *match)
234{ 279{
@@ -239,17 +284,20 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
239 int virt_msir; 284 int virt_msir;
240 const u32 *p; 285 const u32 *p;
241 struct fsl_msi_feature *features = match->data; 286 struct fsl_msi_feature *features = match->data;
287 struct fsl_msi_cascade_data *cascade_data = NULL;
288 int len;
289 u32 offset;
242 290
243 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 291 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
244 292
245 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); 293 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
246 if (!msi) { 294 if (!msi) {
247 dev_err(&dev->dev, "No memory for MSI structure\n"); 295 dev_err(&dev->dev, "No memory for MSI structure\n");
248 err = -ENOMEM; 296 return -ENOMEM;
249 goto error_out;
250 } 297 }
298 dev->dev.platform_data = msi;
251 299
252 msi->irqhost = irq_alloc_host(dev->node, IRQ_HOST_MAP_LINEAR, 300 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
253 NR_MSI_IRQS, &fsl_msi_host_ops, 0); 301 NR_MSI_IRQS, &fsl_msi_host_ops, 0);
254 302
255 if (msi->irqhost == NULL) { 303 if (msi->irqhost == NULL) {
@@ -259,10 +307,10 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
259 } 307 }
260 308
261 /* Get the MSI reg base */ 309 /* Get the MSI reg base */
262 err = of_address_to_resource(dev->node, 0, &res); 310 err = of_address_to_resource(dev->dev.of_node, 0, &res);
263 if (err) { 311 if (err) {
264 dev_err(&dev->dev, "%s resource error!\n", 312 dev_err(&dev->dev, "%s resource error!\n",
265 dev->node->full_name); 313 dev->dev.of_node->full_name);
266 goto error_out; 314 goto error_out;
267 } 315 }
268 316
@@ -285,40 +333,60 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
285 goto error_out; 333 goto error_out;
286 } 334 }
287 335
288 p = of_get_property(dev->node, "interrupts", &count); 336 p = of_get_property(dev->dev.of_node, "interrupts", &count);
289 if (!p) { 337 if (!p) {
290 dev_err(&dev->dev, "no interrupts property found on %s\n", 338 dev_err(&dev->dev, "no interrupts property found on %s\n",
291 dev->node->full_name); 339 dev->dev.of_node->full_name);
292 err = -ENODEV; 340 err = -ENODEV;
293 goto error_out; 341 goto error_out;
294 } 342 }
295 if (count % 8 != 0) { 343 if (count % 8 != 0) {
296 dev_err(&dev->dev, "Malformed interrupts property on %s\n", 344 dev_err(&dev->dev, "Malformed interrupts property on %s\n",
297 dev->node->full_name); 345 dev->dev.of_node->full_name);
298 err = -EINVAL; 346 err = -EINVAL;
299 goto error_out; 347 goto error_out;
300 } 348 }
349 offset = 0;
350 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
351 if (p)
352 offset = *p / IRQS_PER_MSI_REG;
301 353
302 count /= sizeof(u32); 354 count /= sizeof(u32);
303 for (i = 0; i < count / 2; i++) { 355 for (i = 0; i < min(count / 2, NR_MSI_REG); i++) {
304 if (i > NR_MSI_REG) 356 virt_msir = irq_of_parse_and_map(dev->dev.of_node, i);
305 break;
306 virt_msir = irq_of_parse_and_map(dev->node, i);
307 if (virt_msir != NO_IRQ) { 357 if (virt_msir != NO_IRQ) {
308 set_irq_data(virt_msir, (void *)i); 358 cascade_data = kzalloc(
359 sizeof(struct fsl_msi_cascade_data),
360 GFP_KERNEL);
361 if (!cascade_data) {
362 dev_err(&dev->dev,
363 "No memory for MSI cascade data\n");
364 err = -ENOMEM;
365 goto error_out;
366 }
367 msi->msi_virqs[i] = virt_msir;
368 cascade_data->index = i + offset;
369 cascade_data->msi_data = msi;
370 set_irq_data(virt_msir, (void *)cascade_data);
309 set_irq_chained_handler(virt_msir, fsl_msi_cascade); 371 set_irq_chained_handler(virt_msir, fsl_msi_cascade);
310 } 372 }
311 } 373 }
312 374
313 fsl_msi = msi; 375 list_add_tail(&msi->list, &msi_head);
314 376
315 WARN_ON(ppc_md.setup_msi_irqs); 377 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */
316 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs; 378 if (!ppc_md.setup_msi_irqs) {
317 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs; 379 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
318 ppc_md.msi_check_device = fsl_msi_check_device; 380 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
381 ppc_md.msi_check_device = fsl_msi_check_device;
382 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
383 dev_err(&dev->dev, "Different MSI driver already installed!\n");
384 err = -ENODEV;
385 goto error_out;
386 }
319 return 0; 387 return 0;
320error_out: 388error_out:
321 kfree(msi); 389 fsl_of_msi_remove(dev);
322 return err; 390 return err;
323} 391}
324 392
@@ -345,9 +413,13 @@ static const struct of_device_id fsl_of_msi_ids[] = {
345}; 413};
346 414
347static struct of_platform_driver fsl_of_msi_driver = { 415static struct of_platform_driver fsl_of_msi_driver = {
348 .name = "fsl-msi", 416 .driver = {
349 .match_table = fsl_of_msi_ids, 417 .name = "fsl-msi",
418 .owner = THIS_MODULE,
419 .of_match_table = fsl_of_msi_ids,
420 },
350 .probe = fsl_of_msi_probe, 421 .probe = fsl_of_msi_probe,
422 .remove = fsl_of_msi_remove,
351}; 423};
352 424
353static __init int fsl_of_msi_init(void) 425static __init int fsl_of_msi_init(void)
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 331c7e7025b7..624580c252d7 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -32,8 +32,11 @@ struct fsl_msi {
32 u32 msi_addr_hi; 32 u32 msi_addr_hi;
33 void __iomem *msi_regs; 33 void __iomem *msi_regs;
34 u32 feature; 34 u32 feature;
35 int msi_virqs[NR_MSI_REG];
35 36
36 struct msi_bitmap bitmap; 37 struct msi_bitmap bitmap;
38
39 struct list_head list; /* support multiple MSI banks */
37}; 40};
38 41
39#endif /* _POWERPC_SYSDEV_FSL_MSI_H */ 42#endif /* _POWERPC_SYSDEV_FSL_MSI_H */
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index a7635a993dca..9082eb921ad9 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -60,7 +60,7 @@ static struct platform_suspend_ops pmc_suspend_ops = {
60 60
61static int pmc_probe(struct of_device *ofdev, const struct of_device_id *id) 61static int pmc_probe(struct of_device *ofdev, const struct of_device_id *id)
62{ 62{
63 pmc_regs = of_iomap(ofdev->node, 0); 63 pmc_regs = of_iomap(ofdev->dev.of_node, 0);
64 if (!pmc_regs) 64 if (!pmc_regs)
65 return -ENOMEM; 65 return -ENOMEM;
66 66
@@ -76,8 +76,11 @@ static const struct of_device_id pmc_ids[] = {
76}; 76};
77 77
78static struct of_platform_driver pmc_driver = { 78static struct of_platform_driver pmc_driver = {
79 .driver.name = "fsl-pmc", 79 .driver = {
80 .match_table = pmc_ids, 80 .name = "fsl-pmc",
81 .owner = THIS_MODULE,
82 .of_match_table = pmc_ids,
83 },
81 .probe = pmc_probe, 84 .probe = pmc_probe,
82}; 85};
83 86
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 71fba88f50db..30e1626b2e85 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1,6 +1,15 @@
1/* 1/*
2 * Freescale MPC85xx/MPC86xx RapidIO support 2 * Freescale MPC85xx/MPC86xx RapidIO support
3 * 3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
4 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. 13 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
5 * Zhang Wei <wei.zhang@freescale.com> 14 * Zhang Wei <wei.zhang@freescale.com>
6 * 15 *
@@ -24,19 +33,30 @@
24#include <linux/of_platform.h> 33#include <linux/of_platform.h>
25#include <linux/delay.h> 34#include <linux/delay.h>
26#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/kfifo.h>
27 37
28#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/machdep.h>
40#include <asm/uaccess.h>
41
42#undef DEBUG_PW /* Port-Write debugging */
29 43
30/* RapidIO definition irq, which read from OF-tree */ 44/* RapidIO definition irq, which read from OF-tree */
31#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) 45#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
32#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) 46#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
33#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) 47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
34 49
35#define RIO_ATMU_REGS_OFFSET 0x10c00 50#define RIO_ATMU_REGS_OFFSET 0x10c00
36#define RIO_P_MSG_REGS_OFFSET 0x11000 51#define RIO_P_MSG_REGS_OFFSET 0x11000
37#define RIO_S_MSG_REGS_OFFSET 0x13000 52#define RIO_S_MSG_REGS_OFFSET 0x13000
38#define RIO_ESCSR 0x158 53#define RIO_ESCSR 0x158
39#define RIO_CCSR 0x15c 54#define RIO_CCSR 0x15c
55#define RIO_LTLEDCSR 0x0608
56#define RIO_LTLEDCSR_IER 0x80000000
57#define RIO_LTLEDCSR_PRT 0x01000000
58#define RIO_LTLEECSR 0x060c
59#define RIO_EPWISR 0x10010
40#define RIO_ISR_AACR 0x10120 60#define RIO_ISR_AACR 0x10120
41#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ 61#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
42#define RIO_MAINT_WIN_SIZE 0x400000 62#define RIO_MAINT_WIN_SIZE 0x400000
@@ -55,6 +75,18 @@
55#define RIO_MSG_ISR_QFI 0x00000010 75#define RIO_MSG_ISR_QFI 0x00000010
56#define RIO_MSG_ISR_DIQI 0x00000001 76#define RIO_MSG_ISR_DIQI 0x00000001
57 77
78#define RIO_IPWMR_SEN 0x00100000
79#define RIO_IPWMR_QFIE 0x00000100
80#define RIO_IPWMR_EIE 0x00000020
81#define RIO_IPWMR_CQ 0x00000002
82#define RIO_IPWMR_PWE 0x00000001
83
84#define RIO_IPWSR_QF 0x00100000
85#define RIO_IPWSR_TE 0x00000080
86#define RIO_IPWSR_QFI 0x00000010
87#define RIO_IPWSR_PWD 0x00000008
88#define RIO_IPWSR_PWB 0x00000004
89
58#define RIO_MSG_DESC_SIZE 32 90#define RIO_MSG_DESC_SIZE 32
59#define RIO_MSG_BUFFER_SIZE 4096 91#define RIO_MSG_BUFFER_SIZE 4096
60#define RIO_MIN_TX_RING_SIZE 2 92#define RIO_MIN_TX_RING_SIZE 2
@@ -121,7 +153,7 @@ struct rio_msg_regs {
121 u32 pad10[26]; 153 u32 pad10[26];
122 u32 pwmr; 154 u32 pwmr;
123 u32 pwsr; 155 u32 pwsr;
124 u32 pad11; 156 u32 epwqbar;
125 u32 pwqbar; 157 u32 pwqbar;
126}; 158};
127 159
@@ -160,6 +192,14 @@ struct rio_msg_rx_ring {
160 void *dev_id; 192 void *dev_id;
161}; 193};
162 194
195struct rio_port_write_msg {
196 void *virt;
197 dma_addr_t phys;
198 u32 msg_count;
199 u32 err_count;
200 u32 discard_count;
201};
202
163struct rio_priv { 203struct rio_priv {
164 struct device *dev; 204 struct device *dev;
165 void __iomem *regs_win; 205 void __iomem *regs_win;
@@ -172,11 +212,64 @@ struct rio_priv {
172 struct rio_dbell_ring dbell_ring; 212 struct rio_dbell_ring dbell_ring;
173 struct rio_msg_tx_ring msg_tx_ring; 213 struct rio_msg_tx_ring msg_tx_ring;
174 struct rio_msg_rx_ring msg_rx_ring; 214 struct rio_msg_rx_ring msg_rx_ring;
215 struct rio_port_write_msg port_write_msg;
175 int bellirq; 216 int bellirq;
176 int txirq; 217 int txirq;
177 int rxirq; 218 int rxirq;
219 int pwirq;
220 struct work_struct pw_work;
221 struct kfifo pw_fifo;
222 spinlock_t pw_fifo_lock;
178}; 223};
179 224
225#define __fsl_read_rio_config(x, addr, err, op) \
226 __asm__ __volatile__( \
227 "1: "op" %1,0(%2)\n" \
228 " eieio\n" \
229 "2:\n" \
230 ".section .fixup,\"ax\"\n" \
231 "3: li %1,-1\n" \
232 " li %0,%3\n" \
233 " b 2b\n" \
234 ".section __ex_table,\"a\"\n" \
235 " .align 2\n" \
236 " .long 1b,3b\n" \
237 ".text" \
238 : "=r" (err), "=r" (x) \
239 : "b" (addr), "i" (-EFAULT), "0" (err))
240
241static void __iomem *rio_regs_win;
242
243static int (*saved_mcheck_exception)(struct pt_regs *regs);
244
245static int fsl_rio_mcheck_exception(struct pt_regs *regs)
246{
247 const struct exception_table_entry *entry = NULL;
248 unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
249
250 if (reason & MCSR_BUS_RBERR) {
251 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
252 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
253 /* Check if we are prepared to handle this fault */
254 entry = search_exception_tables(regs->nip);
255 if (entry) {
256 pr_debug("RIO: %s - MC Exception handled\n",
257 __func__);
258 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
259 0);
260 regs->msr |= MSR_RI;
261 regs->nip = entry->fixup;
262 return 1;
263 }
264 }
265 }
266
267 if (saved_mcheck_exception)
268 return saved_mcheck_exception(regs);
269 else
270 return cur_cpu_spec->machine_check(regs);
271}
272
180/** 273/**
181 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 274 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
182 * @mport: RapidIO master port info 275 * @mport: RapidIO master port info
@@ -277,27 +370,44 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
277{ 370{
278 struct rio_priv *priv = mport->priv; 371 struct rio_priv *priv = mport->priv;
279 u8 *data; 372 u8 *data;
373 u32 rval, err = 0;
280 374
281 pr_debug 375 pr_debug
282 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", 376 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
283 index, destid, hopcount, offset, len); 377 index, destid, hopcount, offset, len);
378
379 /* 16MB maintenance window possible */
380 /* allow only aligned access to maintenance registers */
381 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
382 return -EINVAL;
383
284 out_be32(&priv->maint_atmu_regs->rowtar, 384 out_be32(&priv->maint_atmu_regs->rowtar,
285 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); 385 (destid << 22) | (hopcount << 12) | (offset >> 12));
386 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
286 387
287 data = (u8 *) priv->maint_win + offset; 388 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
288 switch (len) { 389 switch (len) {
289 case 1: 390 case 1:
290 *val = in_8((u8 *) data); 391 __fsl_read_rio_config(rval, data, err, "lbz");
291 break; 392 break;
292 case 2: 393 case 2:
293 *val = in_be16((u16 *) data); 394 __fsl_read_rio_config(rval, data, err, "lhz");
294 break; 395 break;
295 default: 396 case 4:
296 *val = in_be32((u32 *) data); 397 __fsl_read_rio_config(rval, data, err, "lwz");
297 break; 398 break;
399 default:
400 return -EINVAL;
298 } 401 }
299 402
300 return 0; 403 if (err) {
404 pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
405 err, destid, hopcount, offset);
406 }
407
408 *val = rval;
409
410 return err;
301} 411}
302 412
303/** 413/**
@@ -322,10 +432,17 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
322 pr_debug 432 pr_debug
323 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", 433 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
324 index, destid, hopcount, offset, len, val); 434 index, destid, hopcount, offset, len, val);
435
436 /* 16MB maintenance windows possible */
437 /* allow only aligned access to maintenance registers */
438 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
439 return -EINVAL;
440
325 out_be32(&priv->maint_atmu_regs->rowtar, 441 out_be32(&priv->maint_atmu_regs->rowtar,
326 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); 442 (destid << 22) | (hopcount << 12) | (offset >> 12));
443 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
327 444
328 data = (u8 *) priv->maint_win + offset; 445 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
329 switch (len) { 446 switch (len) {
330 case 1: 447 case 1:
331 out_8((u8 *) data, val); 448 out_8((u8 *) data, val);
@@ -333,9 +450,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
333 case 2: 450 case 2:
334 out_be16((u16 *) data, val); 451 out_be16((u16 *) data, val);
335 break; 452 break;
336 default: 453 case 4:
337 out_be32((u32 *) data, val); 454 out_be32((u32 *) data, val);
338 break; 455 break;
456 default:
457 return -EINVAL;
339 } 458 }
340 459
341 return 0; 460 return 0;
@@ -930,6 +1049,223 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
930 return rc; 1049 return rc;
931} 1050}
932 1051
1052/**
1053 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1054 * @irq: Linux interrupt number
1055 * @dev_instance: Pointer to interrupt-specific data
1056 *
1057 * Handles port write interrupts. Parses a list of registered
1058 * port write event handlers and executes a matching event handler.
1059 */
1060static irqreturn_t
1061fsl_rio_port_write_handler(int irq, void *dev_instance)
1062{
1063 u32 ipwmr, ipwsr;
1064 struct rio_mport *port = (struct rio_mport *)dev_instance;
1065 struct rio_priv *priv = port->priv;
1066 u32 epwisr, tmp;
1067
1068 ipwmr = in_be32(&priv->msg_regs->pwmr);
1069 ipwsr = in_be32(&priv->msg_regs->pwsr);
1070
1071 epwisr = in_be32(priv->regs_win + RIO_EPWISR);
1072 if (epwisr & 0x80000000) {
1073 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1074 pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
1075 out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
1076 }
1077
1078 if (!(epwisr & 0x00000001))
1079 return IRQ_HANDLED;
1080
1081#ifdef DEBUG_PW
1082 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
1083 if (ipwsr & RIO_IPWSR_QF)
1084 pr_debug(" QF");
1085 if (ipwsr & RIO_IPWSR_TE)
1086 pr_debug(" TE");
1087 if (ipwsr & RIO_IPWSR_QFI)
1088 pr_debug(" QFI");
1089 if (ipwsr & RIO_IPWSR_PWD)
1090 pr_debug(" PWD");
1091 if (ipwsr & RIO_IPWSR_PWB)
1092 pr_debug(" PWB");
1093 pr_debug(" )\n");
1094#endif
1095 out_be32(&priv->msg_regs->pwsr,
1096 ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1097
1098 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
1099 priv->port_write_msg.err_count++;
1100 pr_info("RIO: Port-Write Transaction Err (%d)\n",
1101 priv->port_write_msg.err_count);
1102 }
1103 if (ipwsr & RIO_IPWSR_PWD) {
1104 priv->port_write_msg.discard_count++;
1105 pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
1106 priv->port_write_msg.discard_count);
1107 }
1108
1109 /* Schedule deferred processing if PW was received */
1110 if (ipwsr & RIO_IPWSR_QFI) {
1111 /* Save PW message (if there is room in FIFO),
1112 * otherwise discard it.
1113 */
1114 if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
1115 priv->port_write_msg.msg_count++;
1116 kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
1117 RIO_PW_MSG_SIZE);
1118 } else {
1119 priv->port_write_msg.discard_count++;
1120 pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
1121 priv->port_write_msg.discard_count);
1122 }
1123 schedule_work(&priv->pw_work);
1124 }
1125
1126 /* Issue Clear Queue command. This allows another
1127 * port-write to be received.
1128 */
1129 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
1130
1131 return IRQ_HANDLED;
1132}
1133
1134static void fsl_pw_dpc(struct work_struct *work)
1135{
1136 struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
1137 unsigned long flags;
1138 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
1139
1140 /*
1141 * Process port-write messages
1142 */
1143 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1144 while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
1145 RIO_PW_MSG_SIZE)) {
1146 /* Process one message */
1147 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1148#ifdef DEBUG_PW
1149 {
1150 u32 i;
1151 pr_debug("%s : Port-Write Message:", __func__);
1152 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
1153 if ((i%4) == 0)
1154 pr_debug("\n0x%02x: 0x%08x", i*4,
1155 msg_buffer[i]);
1156 else
1157 pr_debug(" 0x%08x", msg_buffer[i]);
1158 }
1159 pr_debug("\n");
1160 }
1161#endif
1162 /* Pass the port-write message to RIO core for processing */
1163 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
1164 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1165 }
1166 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1167}
1168
1169/**
1170 * fsl_rio_pw_enable - enable/disable port-write interface init
1171 * @mport: Master port implementing the port write unit
1172 * @enable: 1=enable; 0=disable port-write message handling
1173 */
1174static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
1175{
1176 struct rio_priv *priv = mport->priv;
1177 u32 rval;
1178
1179 rval = in_be32(&priv->msg_regs->pwmr);
1180
1181 if (enable)
1182 rval |= RIO_IPWMR_PWE;
1183 else
1184 rval &= ~RIO_IPWMR_PWE;
1185
1186 out_be32(&priv->msg_regs->pwmr, rval);
1187
1188 return 0;
1189}
1190
1191/**
1192 * fsl_rio_port_write_init - MPC85xx port write interface init
1193 * @mport: Master port implementing the port write unit
1194 *
1195 * Initializes port write unit hardware and DMA buffer
1196 * ring. Called from fsl_rio_setup(). Returns %0 on success
1197 * or %-ENOMEM on failure.
1198 */
1199static int fsl_rio_port_write_init(struct rio_mport *mport)
1200{
1201 struct rio_priv *priv = mport->priv;
1202 int rc = 0;
1203
1204 /* Following configurations require a disabled port write controller */
1205 out_be32(&priv->msg_regs->pwmr,
1206 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
1207
1208 /* Initialize port write */
1209 priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
1210 RIO_PW_MSG_SIZE,
1211 &priv->port_write_msg.phys, GFP_KERNEL);
1212 if (!priv->port_write_msg.virt) {
1213 pr_err("RIO: unable allocate port write queue\n");
1214 return -ENOMEM;
1215 }
1216
1217 priv->port_write_msg.err_count = 0;
1218 priv->port_write_msg.discard_count = 0;
1219
1220 /* Point dequeue/enqueue pointers at first entry */
1221 out_be32(&priv->msg_regs->epwqbar, 0);
1222 out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
1223
1224 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
1225 in_be32(&priv->msg_regs->epwqbar),
1226 in_be32(&priv->msg_regs->pwqbar));
1227
1228 /* Clear interrupt status IPWSR */
1229 out_be32(&priv->msg_regs->pwsr,
1230 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1231
1232 /* Configure port write contoller for snooping enable all reporting,
1233 clear queue full */
1234 out_be32(&priv->msg_regs->pwmr,
1235 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
1236
1237
1238 /* Hook up port-write handler */
1239 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
1240 "port-write", (void *)mport);
1241 if (rc < 0) {
1242 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1243 goto err_out;
1244 }
1245
1246 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1247 spin_lock_init(&priv->pw_fifo_lock);
1248 if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1249 pr_err("FIFO allocation failed\n");
1250 rc = -ENOMEM;
1251 goto err_out_irq;
1252 }
1253
1254 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
1255 in_be32(&priv->msg_regs->pwmr),
1256 in_be32(&priv->msg_regs->pwsr));
1257
1258 return rc;
1259
1260err_out_irq:
1261 free_irq(IRQ_RIO_PW(mport), (void *)mport);
1262err_out:
1263 dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
1264 priv->port_write_msg.virt,
1265 priv->port_write_msg.phys);
1266 return rc;
1267}
1268
933static char *cmdline = NULL; 1269static char *cmdline = NULL;
934 1270
935static int fsl_rio_get_hdid(int index) 1271static int fsl_rio_get_hdid(int index)
@@ -1015,41 +1351,41 @@ int fsl_rio_setup(struct of_device *dev)
1015 u64 law_start, law_size; 1351 u64 law_start, law_size;
1016 int paw, aw, sw; 1352 int paw, aw, sw;
1017 1353
1018 if (!dev->node) { 1354 if (!dev->dev.of_node) {
1019 dev_err(&dev->dev, "Device OF-Node is NULL"); 1355 dev_err(&dev->dev, "Device OF-Node is NULL");
1020 return -EFAULT; 1356 return -EFAULT;
1021 } 1357 }
1022 1358
1023 rc = of_address_to_resource(dev->node, 0, &regs); 1359 rc = of_address_to_resource(dev->dev.of_node, 0, &regs);
1024 if (rc) { 1360 if (rc) {
1025 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1361 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1026 dev->node->full_name); 1362 dev->dev.of_node->full_name);
1027 return -EFAULT; 1363 return -EFAULT;
1028 } 1364 }
1029 dev_info(&dev->dev, "Of-device full name %s\n", dev->node->full_name); 1365 dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name);
1030 dev_info(&dev->dev, "Regs: %pR\n", &regs); 1366 dev_info(&dev->dev, "Regs: %pR\n", &regs);
1031 1367
1032 dt_range = of_get_property(dev->node, "ranges", &rlen); 1368 dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen);
1033 if (!dt_range) { 1369 if (!dt_range) {
1034 dev_err(&dev->dev, "Can't get %s property 'ranges'\n", 1370 dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
1035 dev->node->full_name); 1371 dev->dev.of_node->full_name);
1036 return -EFAULT; 1372 return -EFAULT;
1037 } 1373 }
1038 1374
1039 /* Get node address wide */ 1375 /* Get node address wide */
1040 cell = of_get_property(dev->node, "#address-cells", NULL); 1376 cell = of_get_property(dev->dev.of_node, "#address-cells", NULL);
1041 if (cell) 1377 if (cell)
1042 aw = *cell; 1378 aw = *cell;
1043 else 1379 else
1044 aw = of_n_addr_cells(dev->node); 1380 aw = of_n_addr_cells(dev->dev.of_node);
1045 /* Get node size wide */ 1381 /* Get node size wide */
1046 cell = of_get_property(dev->node, "#size-cells", NULL); 1382 cell = of_get_property(dev->dev.of_node, "#size-cells", NULL);
1047 if (cell) 1383 if (cell)
1048 sw = *cell; 1384 sw = *cell;
1049 else 1385 else
1050 sw = of_n_size_cells(dev->node); 1386 sw = of_n_size_cells(dev->dev.of_node);
1051 /* Get parent address wide wide */ 1387 /* Get parent address wide wide */
1052 paw = of_n_addr_cells(dev->node); 1388 paw = of_n_addr_cells(dev->dev.of_node);
1053 1389
1054 law_start = of_read_number(dt_range + aw, paw); 1390 law_start = of_read_number(dt_range + aw, paw);
1055 law_size = of_read_number(dt_range + aw + paw, sw); 1391 law_size = of_read_number(dt_range + aw + paw, sw);
@@ -1057,7 +1393,7 @@ int fsl_rio_setup(struct of_device *dev)
1057 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", 1393 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
1058 law_start, law_size); 1394 law_start, law_size);
1059 1395
1060 ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL); 1396 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
1061 if (!ops) { 1397 if (!ops) {
1062 rc = -ENOMEM; 1398 rc = -ENOMEM;
1063 goto err_ops; 1399 goto err_ops;
@@ -1067,6 +1403,7 @@ int fsl_rio_setup(struct of_device *dev)
1067 ops->cread = fsl_rio_config_read; 1403 ops->cread = fsl_rio_config_read;
1068 ops->cwrite = fsl_rio_config_write; 1404 ops->cwrite = fsl_rio_config_write;
1069 ops->dsend = fsl_rio_doorbell_send; 1405 ops->dsend = fsl_rio_doorbell_send;
1406 ops->pwenable = fsl_rio_pw_enable;
1070 1407
1071 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 1408 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
1072 if (!port) { 1409 if (!port) {
@@ -1089,11 +1426,12 @@ int fsl_rio_setup(struct of_device *dev)
1089 port->iores.flags = IORESOURCE_MEM; 1426 port->iores.flags = IORESOURCE_MEM;
1090 port->iores.name = "rio_io_win"; 1427 port->iores.name = "rio_io_win";
1091 1428
1092 priv->bellirq = irq_of_parse_and_map(dev->node, 2); 1429 priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0);
1093 priv->txirq = irq_of_parse_and_map(dev->node, 3); 1430 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
1094 priv->rxirq = irq_of_parse_and_map(dev->node, 4); 1431 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
1095 dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq, 1432 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
1096 priv->txirq, priv->rxirq); 1433 dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
1434 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
1097 1435
1098 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 1436 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1099 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 1437 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
@@ -1109,6 +1447,7 @@ int fsl_rio_setup(struct of_device *dev)
1109 rio_register_mport(port); 1447 rio_register_mport(port);
1110 1448
1111 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); 1449 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
1450 rio_regs_win = priv->regs_win;
1112 1451
1113 /* Probe the master port phy type */ 1452 /* Probe the master port phy type */
1114 ccsr = in_be32(priv->regs_win + RIO_CCSR); 1453 ccsr = in_be32(priv->regs_win + RIO_CCSR);
@@ -1166,7 +1505,8 @@ int fsl_rio_setup(struct of_device *dev)
1166 1505
1167 /* Configure maintenance transaction window */ 1506 /* Configure maintenance transaction window */
1168 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); 1507 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
1169 out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */ 1508 out_be32(&priv->maint_atmu_regs->rowar,
1509 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
1170 1510
1171 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); 1511 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
1172 1512
@@ -1175,6 +1515,12 @@ int fsl_rio_setup(struct of_device *dev)
1175 (law_start + RIO_MAINT_WIN_SIZE) >> 12); 1515 (law_start + RIO_MAINT_WIN_SIZE) >> 12);
1176 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ 1516 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
1177 fsl_rio_doorbell_init(port); 1517 fsl_rio_doorbell_init(port);
1518 fsl_rio_port_write_init(port);
1519
1520 saved_mcheck_exception = ppc_md.machine_check_exception;
1521 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1522 /* Ensure that RFXE is set */
1523 mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
1178 1524
1179 return 0; 1525 return 0;
1180err: 1526err:
@@ -1195,7 +1541,7 @@ static int __devinit fsl_of_rio_rpn_probe(struct of_device *dev,
1195{ 1541{
1196 int rc; 1542 int rc;
1197 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", 1543 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
1198 dev->node->full_name); 1544 dev->dev.of_node->full_name);
1199 1545
1200 rc = fsl_rio_setup(dev); 1546 rc = fsl_rio_setup(dev);
1201 if (rc) 1547 if (rc)
@@ -1215,8 +1561,11 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = {
1215}; 1561};
1216 1562
1217static struct of_platform_driver fsl_of_rio_rpn_driver = { 1563static struct of_platform_driver fsl_of_rio_rpn_driver = {
1218 .name = "fsl-of-rio", 1564 .driver = {
1219 .match_table = fsl_of_rio_rpn_ids, 1565 .name = "fsl-of-rio",
1566 .owner = THIS_MODULE,
1567 .of_match_table = fsl_of_rio_rpn_ids,
1568 },
1220 .probe = fsl_of_rio_rpn_probe, 1569 .probe = fsl_of_rio_rpn_probe,
1221}; 1570};
1222 1571
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 652652db4ce2..d07137a07d75 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -124,7 +124,7 @@ static void pmi_notify_handlers(struct work_struct *work)
124static int pmi_of_probe(struct of_device *dev, 124static int pmi_of_probe(struct of_device *dev,
125 const struct of_device_id *match) 125 const struct of_device_id *match)
126{ 126{
127 struct device_node *np = dev->node; 127 struct device_node *np = dev->dev.of_node;
128 int rc; 128 int rc;
129 129
130 if (data) { 130 if (data) {
@@ -206,11 +206,12 @@ static int pmi_of_remove(struct of_device *dev)
206} 206}
207 207
208static struct of_platform_driver pmi_of_platform_driver = { 208static struct of_platform_driver pmi_of_platform_driver = {
209 .match_table = pmi_match,
210 .probe = pmi_of_probe, 209 .probe = pmi_of_probe,
211 .remove = pmi_of_remove, 210 .remove = pmi_of_remove,
212 .driver = { 211 .driver = {
213 .name = "pmi", 212 .name = "pmi",
213 .owner = THIS_MODULE,
214 .of_match_table = pmi_match,
214 }, 215 },
215}; 216};
216 217
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 106d767bf65b..156aa7d36258 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -974,6 +974,123 @@ static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
974 .setup_utl = ppc460ex_pciex_init_utl, 974 .setup_utl = ppc460ex_pciex_init_utl,
975}; 975};
976 976
977static int __init ppc460sx_pciex_core_init(struct device_node *np)
978{
979 /* HSS drive amplitude */
980 mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
981 mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
982 mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
983 mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
984 mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
985 mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
986 mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
987 mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
988
989 mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
990 mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
991 mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
992 mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
993
994 mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
995 mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
996 mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
997 mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
998
999 /* HSS TX pre-emphasis */
1000 mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1001 mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1002 mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1003 mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1004 mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1005 mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1006 mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1007 mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1008
1009 mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1010 mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1011 mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1012 mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1013
1014 mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1015 mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1016 mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1017 mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1018
1019 /* HSS TX calibration control */
1020 mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1021 mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1022 mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1023
1024 /* HSS TX slew control */
1025 mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1026 mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1027 mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1028
1029 udelay(100);
1030
1031 /* De-assert PLLRESET */
1032 dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1033
1034 /* Reset DL, UTL, GPL before configuration */
1035 mtdcri(SDR0, PESDR0_460SX_RCSSET,
1036 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1037 mtdcri(SDR0, PESDR1_460SX_RCSSET,
1038 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1039 mtdcri(SDR0, PESDR2_460SX_RCSSET,
1040 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1041
1042 udelay(100);
1043
1044 /*
1045 * If bifurcation is not enabled, u-boot would have disabled the
1046 * third PCIe port
1047 */
1048 if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1049 0x00000001)) {
1050 printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1051 printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1052 return 3;
1053 }
1054
1055 printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1056 return 2;
1057}
1058
1059static int ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1060{
1061
1062 if (port->endpoint)
1063 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1064 0x01000000, 0);
1065 else
1066 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1067 0, 0x01000000);
1068
1069 /*Gen-1*/
1070 mtdcri(SDR0, port->sdr_base + PESDRn_460SX_RCEI, 0x08000000);
1071
1072 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1073 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1074 PESDRx_RCSSET_RSTPYN);
1075
1076 port->has_ibpre = 1;
1077
1078 return 0;
1079}
1080
1081static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1082{
1083 /* Max 128 Bytes */
1084 out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
1085 return 0;
1086}
1087
1088static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1089 .core_init = ppc460sx_pciex_core_init,
1090 .port_init_hw = ppc460sx_pciex_init_port_hw,
1091 .setup_utl = ppc460sx_pciex_init_utl,
1092};
1093
977#endif /* CONFIG_44x */ 1094#endif /* CONFIG_44x */
978 1095
979#ifdef CONFIG_40x 1096#ifdef CONFIG_40x
@@ -1089,6 +1206,8 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1089 } 1206 }
1090 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex")) 1207 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1091 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops; 1208 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1209 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1210 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1092#endif /* CONFIG_44x */ 1211#endif /* CONFIG_44x */
1093#ifdef CONFIG_40x 1212#ifdef CONFIG_40x
1094 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) 1213 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h
index d04e40b306fb..56d9e5deccbf 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.h
+++ b/arch/powerpc/sysdev/ppc4xx_pci.h
@@ -324,6 +324,64 @@
324#define PESDR0_460EX_IHS2 0x036D 324#define PESDR0_460EX_IHS2 0x036D
325 325
326/* 326/*
327 * 460SX addtional DCRs
328 */
329#define PESDRn_460SX_RCEI 0x02
330
331#define PESDR0_460SX_HSSL0DAMP 0x320
332#define PESDR0_460SX_HSSL1DAMP 0x321
333#define PESDR0_460SX_HSSL2DAMP 0x322
334#define PESDR0_460SX_HSSL3DAMP 0x323
335#define PESDR0_460SX_HSSL4DAMP 0x324
336#define PESDR0_460SX_HSSL5DAMP 0x325
337#define PESDR0_460SX_HSSL6DAMP 0x326
338#define PESDR0_460SX_HSSL7DAMP 0x327
339
340#define PESDR1_460SX_HSSL0DAMP 0x354
341#define PESDR1_460SX_HSSL1DAMP 0x355
342#define PESDR1_460SX_HSSL2DAMP 0x356
343#define PESDR1_460SX_HSSL3DAMP 0x357
344
345#define PESDR2_460SX_HSSL0DAMP 0x384
346#define PESDR2_460SX_HSSL1DAMP 0x385
347#define PESDR2_460SX_HSSL2DAMP 0x386
348#define PESDR2_460SX_HSSL3DAMP 0x387
349
350#define PESDR0_460SX_HSSL0COEFA 0x328
351#define PESDR0_460SX_HSSL1COEFA 0x329
352#define PESDR0_460SX_HSSL2COEFA 0x32A
353#define PESDR0_460SX_HSSL3COEFA 0x32B
354#define PESDR0_460SX_HSSL4COEFA 0x32C
355#define PESDR0_460SX_HSSL5COEFA 0x32D
356#define PESDR0_460SX_HSSL6COEFA 0x32E
357#define PESDR0_460SX_HSSL7COEFA 0x32F
358
359#define PESDR1_460SX_HSSL0COEFA 0x358
360#define PESDR1_460SX_HSSL1COEFA 0x359
361#define PESDR1_460SX_HSSL2COEFA 0x35A
362#define PESDR1_460SX_HSSL3COEFA 0x35B
363
364#define PESDR2_460SX_HSSL0COEFA 0x388
365#define PESDR2_460SX_HSSL1COEFA 0x389
366#define PESDR2_460SX_HSSL2COEFA 0x38A
367#define PESDR2_460SX_HSSL3COEFA 0x38B
368
369#define PESDR0_460SX_HSSL1CALDRV 0x339
370#define PESDR1_460SX_HSSL1CALDRV 0x361
371#define PESDR2_460SX_HSSL1CALDRV 0x391
372
373#define PESDR0_460SX_HSSSLEW 0x338
374#define PESDR1_460SX_HSSSLEW 0x360
375#define PESDR2_460SX_HSSSLEW 0x390
376
377#define PESDR0_460SX_HSSCTLSET 0x31E
378#define PESDR1_460SX_HSSCTLSET 0x352
379#define PESDR2_460SX_HSSCTLSET 0x382
380
381#define PESDR0_460SX_RCSSET 0x304
382#define PESDR1_460SX_RCSSET 0x344
383#define PESDR2_460SX_RCSSET 0x374
384/*
327 * Of the above, some are common offsets from the base 385 * Of the above, some are common offsets from the base
328 */ 386 */
329#define PESDRn_UTLSET1 0x00 387#define PESDRn_UTLSET1 0x00
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 149393c02c3f..093e0ae1a941 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -669,8 +669,11 @@ static const struct of_device_id qe_ids[] = {
669}; 669};
670 670
671static struct of_platform_driver qe_driver = { 671static struct of_platform_driver qe_driver = {
672 .driver.name = "fsl-qe", 672 .driver = {
673 .match_table = qe_ids, 673 .name = "fsl-qe",
674 .owner = THIS_MODULE,
675 .of_match_table = qe_ids,
676 },
674 .probe = qe_probe, 677 .probe = qe_probe,
675 .resume = qe_resume, 678 .resume = qe_resume,
676}; 679};