aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Kconfig.debug15
-rw-r--r--arch/powerpc/Makefile5
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts5
-rw-r--r--arch/powerpc/boot/dts/glacier.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts2
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dts280
-rw-r--r--arch/powerpc/boot/dts/p1010si.dtsi376
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts2
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts546
-rw-r--r--arch/powerpc/boot/dts/p2040rdb.dts166
-rw-r--r--arch/powerpc/boot/dts/p2040si.dtsi623
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts214
-rw-r--r--arch/powerpc/boot/dts/p3041si.dtsi660
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts533
-rw-r--r--arch/powerpc/boot/dts/p4080si.dtsi661
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts215
-rw-r--r--arch/powerpc/boot/dts/p5020si.dtsi652
-rw-r--r--arch/powerpc/boot/dts/sequoia.dts12
-rw-r--r--arch/powerpc/boot/dts/socrates.dts2
-rw-r--r--arch/powerpc/boot/dts/taishan.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts42
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5200.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5200_xmon.dts2
-rw-r--r--arch/powerpc/boot/treeboot-iss4xx.c23
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig173
-rw-r--r--arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig5
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig187
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig (renamed from arch/powerpc/configs/e55xx_smp_defconfig)0
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig12
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig10
-rw-r--r--arch/powerpc/configs/ppc64_defconfig16
-rw-r--r--arch/powerpc/configs/pseries_defconfig3
-rw-r--r--arch/powerpc/include/asm/8253pit.h3
-rw-r--r--arch/powerpc/include/asm/atomic.h19
-rw-r--r--arch/powerpc/include/asm/bitops.h5
-rw-r--r--arch/powerpc/include/asm/cputable.h14
-rw-r--r--arch/powerpc/include/asm/dbell.h2
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h40
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h6
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h502
-rw-r--r--arch/powerpc/include/asm/exception-64e.h52
-rw-r--r--arch/powerpc/include/asm/exception-64s.h136
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h655
-rw-r--r--arch/powerpc/include/asm/hvcall.h5
-rw-r--r--arch/powerpc/include/asm/hvsi.h94
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h2
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h47
-rw-r--r--arch/powerpc/include/asm/kvm.h15
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h196
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h41
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h30
-rw-r--r--arch/powerpc/include/asm/kvm_host.h169
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h41
-rw-r--r--arch/powerpc/include/asm/local.h2
-rw-r--r--arch/powerpc/include/asm/macio.h2
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h10
-rw-r--r--arch/powerpc/include/asm/mmu.h12
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h2
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h29
-rw-r--r--arch/powerpc/include/asm/pci.h3
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h3
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h40
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h28
-rw-r--r--arch/powerpc/include/asm/processor.h5
-rw-r--r--arch/powerpc/include/asm/prom.h16
-rw-r--r--arch/powerpc/include/asm/reg.h29
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/include/asm/setup.h4
-rw-r--r--arch/powerpc/include/asm/smp.h3
-rw-r--r--arch/powerpc/include/asm/smu.h2
-rw-r--r--arch/powerpc/include/asm/system.h1
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c193
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S22
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S26
-rw-r--r--arch/powerpc/kernel/crash.c6
-rw-r--r--arch/powerpc/kernel/dma.c4
-rw-r--r--arch/powerpc/kernel/e500-pmu.c5
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S22
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S228
-rw-r--r--arch/powerpc/kernel/head_44x.S42
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/head_booke.h42
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S57
-rw-r--r--arch/powerpc/kernel/idle_e500.S12
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/irq.c50
-rw-r--r--arch/powerpc/kernel/jump_label.c23
-rw-r--r--arch/powerpc/kernel/machine_kexec.c4
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/module.c18
-rw-r--r--arch/powerpc/kernel/module_32.c11
-rw-r--r--arch/powerpc/kernel/module_64.c10
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c7
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c31
-rw-r--r--arch/powerpc/kernel/pci_32.c169
-rw-r--r--arch/powerpc/kernel/pci_dn.c47
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/perf_event.c8
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c6
-rw-r--r--arch/powerpc/kernel/power4-pmu.c7
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c7
-rw-r--r--arch/powerpc/kernel/power5-pmu.c7
-rw-r--r--arch/powerpc/kernel/power6-pmu.c7
-rw-r--r--arch/powerpc/kernel/power7-pmu.c7
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/prom.c16
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/powerpc/kernel/rtas_flash.c1
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c30
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c2
-rw-r--r--arch/powerpc/kernel/smp.c33
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/traps.c5
-rw-r--r--arch/powerpc/kernel/udbg.c5
-rw-r--r--arch/powerpc/kvm/44x_tlb.c4
-rw-r--r--arch/powerpc/kvm/Kconfig35
-rw-r--r--arch/powerpc/kvm/Makefile27
-rw-r--r--arch/powerpc/kvm/book3s.c1007
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c180
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c73
-rw-r--r--arch/powerpc/kvm/book3s_exports.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1269
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c155
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S166
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c370
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1345
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S21
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c71
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1029
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S102
-rw-r--r--arch/powerpc/kvm/book3s_segment.S117
-rw-r--r--arch/powerpc/kvm/booke.c132
-rw-r--r--arch/powerpc/kvm/booke.h23
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S66
-rw-r--r--arch/powerpc/kvm/e500.c7
-rw-r--r--arch/powerpc/kvm/e500_emulate.c4
-rw-r--r--arch/powerpc/kvm/e500_tlb.c800
-rw-r--r--arch/powerpc/kvm/e500_tlb.h13
-rw-r--r--arch/powerpc/kvm/powerpc.c78
-rw-r--r--arch/powerpc/kvm/timing.c9
-rw-r--r--arch/powerpc/kvm/trace.h4
-rw-r--r--arch/powerpc/mm/44x_mmu.c13
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/mm/hash_native_64.c6
-rw-r--r--arch/powerpc/mm/init_32.c32
-rw-r--r--arch/powerpc/mm/init_64.c16
-rw-r--r--arch/powerpc/mm/mem.c35
-rw-r--r--arch/powerpc/mm/tlb_hash32.c4
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S206
-rw-r--r--arch/powerpc/mm/tlb_nohash.c64
-rw-r--r--arch/powerpc/net/Makefile4
-rw-r--r--arch/powerpc/net/bpf_jit.h227
-rw-r--r--arch/powerpc/net/bpf_jit_64.S138
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c694
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpio.c380
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c8
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c2
-rw-r--r--arch/powerpc/platforms/83xx/usb.c2
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig31
-rw-r--r--arch/powerpc/platforms/85xx/Makefile3
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c41
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c5
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c122
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c18
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c162
-rw-r--r--arch/powerpc/platforms/85xx/p2040_rdb.c88
-rw-r--r--arch/powerpc/platforms/85xx/p3041_ds.c28
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c38
-rw-r--r--arch/powerpc/platforms/85xx/p5020_ds.c32
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c2
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c30
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c107
-rw-r--r--arch/powerpc/platforms/Kconfig6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/amigaone/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c8
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c2
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c2
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c22
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig1
-rw-r--r--arch/powerpc/platforms/chrp/pci.c2
-rw-r--r--arch/powerpc/platforms/chrp/smp.c2
-rw-r--r--arch/powerpc/platforms/iseries/exception.S2
-rw-r--r--arch/powerpc/platforms/iseries/exception.h4
-rw-r--r--arch/powerpc/platforms/iseries/smp.c4
-rw-r--r--arch/powerpc/platforms/maple/setup.c41
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c4
-rw-r--r--arch/powerpc/platforms/powermac/pci.c9
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powermac/time.c2
-rw-r--r--arch/powerpc/platforms/prep/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c10
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c10
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c46
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c191
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h3
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c28
-rw-r--r--arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--arch/powerpc/platforms/pseries/smp.c5
-rw-r--r--arch/powerpc/platforms/wsp/smp.c2
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/powerpc/sysdev/cpm1.c2
-rw-r--r--arch/powerpc/sysdev/cpm_common.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c302
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c95
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c29
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h3
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/sysdev/mv64x60_udbg.c4
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c163
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c11
268 files changed, 15996 insertions, 3910 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2729c6663d8a..374c475e56a3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,6 +134,8 @@ config PPC
134 select GENERIC_IRQ_SHOW_LEVEL 134 select GENERIC_IRQ_SHOW_LEVEL
135 select HAVE_RCU_TABLE_FREE if SMP 135 select HAVE_RCU_TABLE_FREE if SMP
136 select HAVE_SYSCALL_TRACEPOINTS 136 select HAVE_SYSCALL_TRACEPOINTS
137 select HAVE_BPF_JIT if (PPC64 && NET)
138 select HAVE_ARCH_JUMP_LABEL
137 139
138config EARLY_PRINTK 140config EARLY_PRINTK
139 bool 141 bool
@@ -841,7 +843,7 @@ config LOWMEM_CAM_NUM
841 843
842config RELOCATABLE 844config RELOCATABLE
843 bool "Build a relocatable kernel (EXPERIMENTAL)" 845 bool "Build a relocatable kernel (EXPERIMENTAL)"
844 depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE 846 depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || PPC_47x)
845 help 847 help
846 This builds a kernel image that is capable of running at the 848 This builds a kernel image that is capable of running at the
847 location the kernel is loaded at (some alignment restrictions may 849 location the kernel is loaded at (some alignment restrictions may
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index e72dcf6a421d..067cb8480747 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -167,6 +167,13 @@ config PPC_EARLY_DEBUG_LPAR
167 Select this to enable early debugging for a machine with a HVC 167 Select this to enable early debugging for a machine with a HVC
168 console on vterm 0. 168 console on vterm 0.
169 169
170config PPC_EARLY_DEBUG_LPAR_HVSI
171 bool "LPAR HVSI Console"
172 depends on PPC_PSERIES
173 help
174 Select this to enable early debugging for a machine with a HVSI
175 console on a specified vterm.
176
170config PPC_EARLY_DEBUG_G5 177config PPC_EARLY_DEBUG_G5
171 bool "Apple G5" 178 bool "Apple G5"
172 depends on PPC_PMAC64 179 depends on PPC_PMAC64
@@ -253,6 +260,14 @@ config PPC_EARLY_DEBUG_WSP
253 260
254endchoice 261endchoice
255 262
263config PPC_EARLY_DEBUG_HVSI_VTERMNO
264 hex "vterm number to use with early debug HVSI"
265 depends on PPC_EARLY_DEBUG_LPAR_HVSI
266 default "0x30000000"
267 help
268 You probably want 0x30000000 for your first serial port and
269 0x30000001 for your second one
270
256config PPC_EARLY_DEBUG_44x_PHYSLOW 271config PPC_EARLY_DEBUG_44x_PHYSLOW
257 hex "Low 32 bits of early debug UART physical address" 272 hex "Low 32 bits of early debug UART physical address"
258 depends on PPC_EARLY_DEBUG_44x 273 depends on PPC_EARLY_DEBUG_44x
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index b7212b619c52..57af16edc192 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -67,7 +67,7 @@ LDFLAGS_vmlinux-yy := -Bstatic
67LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie 67LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie
68LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-yy) 68LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-yy)
69 69
70CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc 70CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc
71CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple 71CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
72KBUILD_CPPFLAGS += -Iarch/$(ARCH) 72KBUILD_CPPFLAGS += -Iarch/$(ARCH)
73KBUILD_AFLAGS += -Iarch/$(ARCH) 73KBUILD_AFLAGS += -Iarch/$(ARCH)
@@ -154,7 +154,8 @@ core-y += arch/powerpc/kernel/ \
154 arch/powerpc/lib/ \ 154 arch/powerpc/lib/ \
155 arch/powerpc/sysdev/ \ 155 arch/powerpc/sysdev/ \
156 arch/powerpc/platforms/ \ 156 arch/powerpc/platforms/ \
157 arch/powerpc/math-emu/ 157 arch/powerpc/math-emu/ \
158 arch/powerpc/net/
158core-$(CONFIG_XMON) += arch/powerpc/xmon/ 159core-$(CONFIG_XMON) += arch/powerpc/xmon/
159core-$(CONFIG_KVM) += arch/powerpc/kvm/ 160core-$(CONFIG_KVM) += arch/powerpc/kvm/
160 161
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 22dd6ae84da0..3dc75deafbb3 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -143,6 +143,11 @@
143 interrupts = <0x1d 0x4>; 143 interrupts = <0x1d 0x4>;
144 }; 144 };
145 145
146 HWRNG: hwrng@110000 {
147 compatible = "amcc,ppc460ex-rng", "ppc4xx-rng";
148 reg = <4 0x00110000 0x50>;
149 };
150
146 MAL0: mcmal { 151 MAL0: mcmal {
147 compatible = "ibm,mcmal-460ex", "ibm,mcmal2"; 152 compatible = "ibm,mcmal-460ex", "ibm,mcmal2";
148 dcr-reg = <0x180 0x062>; 153 dcr-reg = <0x180 0x062>;
diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts
index e618fc4cbc9e..2000060386d7 100644
--- a/arch/powerpc/boot/dts/glacier.dts
+++ b/arch/powerpc/boot/dts/glacier.dts
@@ -130,12 +130,18 @@
130 }; 130 };
131 131
132 CRYPTO: crypto@180000 { 132 CRYPTO: crypto@180000 {
133 compatible = "amcc,ppc460gt-crypto", "amcc,ppc4xx-crypto"; 133 compatible = "amcc,ppc460gt-crypto", "amcc,ppc460ex-crypto",
134 "amcc,ppc4xx-crypto";
134 reg = <4 0x00180000 0x80400>; 135 reg = <4 0x00180000 0x80400>;
135 interrupt-parent = <&UIC0>; 136 interrupt-parent = <&UIC0>;
136 interrupts = <0x1d 0x4>; 137 interrupts = <0x1d 0x4>;
137 }; 138 };
138 139
140 HWRNG: hwrng@110000 {
141 compatible = "amcc,ppc460ex-rng", "ppc4xx-rng";
142 reg = <4 0x00110000 0x50>;
143 };
144
139 MAL0: mcmal { 145 MAL0: mcmal {
140 compatible = "ibm,mcmal-460gt", "ibm,mcmal2"; 146 compatible = "ibm,mcmal-460gt", "ibm,mcmal2";
141 dcr-reg = <0x180 0x062>; 147 dcr-reg = <0x180 0x062>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 30cf0e098bb9..647daf8e7291 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -60,6 +60,8 @@
60 compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus", 60 compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus",
61 "simple-bus"; 61 "simple-bus";
62 reg = <0xe0005000 0x1000>; 62 reg = <0xe0005000 0x1000>;
63 interrupt-parent = <&mpic>;
64 interrupts = <19 2>;
63 65
64 ranges = <0x0 0x0 0xfe000000 0x02000000 66 ranges = <0x0 0x0 0xfe000000 0x02000000
65 0x1 0x0 0xf8000000 0x00008000 67 0x1 0x0 0xf8000000 0x00008000
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
new file mode 100644
index 000000000000..6b33b73a5ba0
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb.dts
@@ -0,0 +1,280 @@
1/*
2 * P1010 RDB Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/include/ "p1010si.dtsi"
13
14/ {
15 model = "fsl,P1010RDB";
16 compatible = "fsl,P1010RDB";
17
18 aliases {
19 serial0 = &serial0;
20 serial1 = &serial1;
21 ethernet0 = &enet0;
22 ethernet1 = &enet1;
23 ethernet2 = &enet2;
24 pci0 = &pci0;
25 pci1 = &pci1;
26 };
27
28 memory {
29 device_type = "memory";
30 };
31
32 ifc@ffe1e000 {
33 /* NOR, NAND Flashes and CPLD on board */
34 ranges = <0x0 0x0 0x0 0xee000000 0x02000000
35 0x1 0x0 0x0 0xff800000 0x00010000
36 0x3 0x0 0x0 0xffb00000 0x00000020>;
37
38 nor@0,0 {
39 #address-cells = <1>;
40 #size-cells = <1>;
41 compatible = "cfi-flash";
42 reg = <0x0 0x0 0x2000000>;
43 bank-width = <2>;
44 device-width = <1>;
45
46 partition@40000 {
47 /* 256KB for DTB Image */
48 reg = <0x00040000 0x00040000>;
49 label = "NOR DTB Image";
50 };
51
52 partition@80000 {
53 /* 7 MB for Linux Kernel Image */
54 reg = <0x00080000 0x00700000>;
55 label = "NOR Linux Kernel Image";
56 };
57
58 partition@800000 {
59 /* 20MB for JFFS2 based Root file System */
60 reg = <0x00800000 0x01400000>;
61 label = "NOR JFFS2 Root File System";
62 };
63
64 partition@1f00000 {
65 /* This location must not be altered */
66 /* 512KB for u-boot Bootloader Image */
67 /* 512KB for u-boot Environment Variables */
68 reg = <0x01f00000 0x00100000>;
69 label = "NOR U-Boot Image";
70 read-only;
71 };
72 };
73
74 nand@1,0 {
75 #address-cells = <1>;
76 #size-cells = <1>;
77 compatible = "fsl,ifc-nand";
78 reg = <0x1 0x0 0x10000>;
79
80 partition@0 {
81 /* This location must not be altered */
82 /* 1MB for u-boot Bootloader Image */
83 reg = <0x0 0x00100000>;
84 label = "NAND U-Boot Image";
85 read-only;
86 };
87
88 partition@100000 {
89 /* 1MB for DTB Image */
90 reg = <0x00100000 0x00100000>;
91 label = "NAND DTB Image";
92 };
93
94 partition@200000 {
95 /* 4MB for Linux Kernel Image */
96 reg = <0x00200000 0x00400000>;
97 label = "NAND Linux Kernel Image";
98 };
99
100 partition@600000 {
101 /* 4MB for Compressed Root file System Image */
102 reg = <0x00600000 0x00400000>;
103 label = "NAND Compressed RFS Image";
104 };
105
106 partition@a00000 {
107 /* 15MB for JFFS2 based Root file System */
108 reg = <0x00a00000 0x00f00000>;
109 label = "NAND JFFS2 Root File System";
110 };
111
112 partition@1900000 {
113 /* 7MB for User Area */
114 reg = <0x01900000 0x00700000>;
115 label = "NAND User area";
116 };
117 };
118
119 cpld@3,0 {
120 #address-cells = <1>;
121 #size-cells = <1>;
122 compatible = "fsl,p1010rdb-cpld";
123 reg = <0x3 0x0 0x0000020>;
124 bank-width = <1>;
125 device-width = <1>;
126 };
127 };
128
129 soc@ffe00000 {
130 spi@7000 {
131 flash@0 {
132 #address-cells = <1>;
133 #size-cells = <1>;
134 compatible = "spansion,s25sl12801";
135 reg = <0>;
136 spi-max-frequency = <50000000>;
137
138 partition@0 {
139 /* 1MB for u-boot Bootloader Image */
140 /* 1MB for Environment */
141 reg = <0x0 0x00100000>;
142 label = "SPI Flash U-Boot Image";
143 read-only;
144 };
145
146 partition@100000 {
147 /* 512KB for DTB Image */
148 reg = <0x00100000 0x00080000>;
149 label = "SPI Flash DTB Image";
150 };
151
152 partition@180000 {
153 /* 4MB for Linux Kernel Image */
154 reg = <0x00180000 0x00400000>;
155 label = "SPI Flash Linux Kernel Image";
156 };
157
158 partition@580000 {
159 /* 4MB for Compressed RFS Image */
160 reg = <0x00580000 0x00400000>;
161 label = "SPI Flash Compressed RFSImage";
162 };
163
164 partition@980000 {
165 /* 6.5MB for JFFS2 based RFS */
166 reg = <0x00980000 0x00680000>;
167 label = "SPI Flash JFFS2 RFS";
168 };
169 };
170 };
171
172 can0@1c000 {
173 fsl,flexcan-clock-source = "platform";
174 };
175
176 can1@1d000 {
177 fsl,flexcan-clock-source = "platform";
178 };
179
180 usb@22000 {
181 phy_type = "utmi";
182 };
183
184 mdio@24000 {
185 phy0: ethernet-phy@0 {
186 interrupt-parent = <&mpic>;
187 interrupts = <3 1>;
188 reg = <0x1>;
189 };
190
191 phy1: ethernet-phy@1 {
192 interrupt-parent = <&mpic>;
193 interrupts = <2 1>;
194 reg = <0x0>;
195 };
196
197 phy2: ethernet-phy@2 {
198 interrupt-parent = <&mpic>;
199 interrupts = <2 1>;
200 reg = <0x2>;
201 };
202 };
203
204 enet0: ethernet@b0000 {
205 phy-handle = <&phy0>;
206 phy-connection-type = "rgmii-id";
207 };
208
209 enet1: ethernet@b1000 {
210 phy-handle = <&phy1>;
211 tbi-handle = <&tbi0>;
212 phy-connection-type = "sgmii";
213 };
214
215 enet2: ethernet@b2000 {
216 phy-handle = <&phy2>;
217 tbi-handle = <&tbi1>;
218 phy-connection-type = "sgmii";
219 };
220 };
221
222 pci0: pcie@ffe09000 {
223 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
224 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
225 pcie@0 {
226 reg = <0x0 0x0 0x0 0x0 0x0>;
227 #interrupt-cells = <1>;
228 #size-cells = <2>;
229 #address-cells = <3>;
230 device_type = "pci";
231 interrupt-parent = <&mpic>;
232 interrupts = <16 2>;
233 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
234 interrupt-map = <
235 /* IDSEL 0x0 */
236 0000 0x0 0x0 0x1 &mpic 0x4 0x1
237 0000 0x0 0x0 0x2 &mpic 0x5 0x1
238 0000 0x0 0x0 0x3 &mpic 0x6 0x1
239 0000 0x0 0x0 0x4 &mpic 0x7 0x1
240 >;
241
242 ranges = <0x2000000 0x0 0xa0000000
243 0x2000000 0x0 0xa0000000
244 0x0 0x20000000
245
246 0x1000000 0x0 0x0
247 0x1000000 0x0 0x0
248 0x0 0x100000>;
249 };
250 };
251
252 pci1: pcie@ffe0a000 {
253 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
254 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
255 pcie@0 {
256 reg = <0x0 0x0 0x0 0x0 0x0>;
257 #interrupt-cells = <1>;
258 #size-cells = <2>;
259 #address-cells = <3>;
260 device_type = "pci";
261 interrupt-parent = <&mpic>;
262 interrupts = <16 2>;
263 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
264 interrupt-map = <
265 /* IDSEL 0x0 */
266 0000 0x0 0x0 0x1 &mpic 0x4 0x1
267 0000 0x0 0x0 0x2 &mpic 0x5 0x1
268 0000 0x0 0x0 0x3 &mpic 0x6 0x1
269 0000 0x0 0x0 0x4 &mpic 0x7 0x1
270 >;
271 ranges = <0x2000000 0x0 0x80000000
272 0x2000000 0x0 0x80000000
273 0x0 0x20000000
274
275 0x1000000 0x0 0x0
276 0x1000000 0x0 0x0
277 0x0 0x100000>;
278 };
279 };
280};
diff --git a/arch/powerpc/boot/dts/p1010si.dtsi b/arch/powerpc/boot/dts/p1010si.dtsi
new file mode 100644
index 000000000000..7f51104f2e36
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010si.dtsi
@@ -0,0 +1,376 @@
1/*
2 * P1010si Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/dts-v1/;
13/ {
14 compatible = "fsl,P1010";
15 #address-cells = <2>;
16 #size-cells = <2>;
17
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21
22 PowerPC,P1010@0 {
23 device_type = "cpu";
24 reg = <0x0>;
25 next-level-cache = <&L2>;
26 };
27 };
28
29 ifc@ffe1e000 {
30 #address-cells = <2>;
31 #size-cells = <1>;
32 compatible = "fsl,ifc", "simple-bus";
33 reg = <0x0 0xffe1e000 0 0x2000>;
34 interrupts = <16 2 19 2>;
35 interrupt-parent = <&mpic>;
36 };
37
38 soc@ffe00000 {
39 #address-cells = <1>;
40 #size-cells = <1>;
41 device_type = "soc";
42 compatible = "fsl,p1010-immr", "simple-bus";
43 ranges = <0x0 0x0 0xffe00000 0x100000>;
44 bus-frequency = <0>; // Filled out by uboot.
45
46 ecm-law@0 {
47 compatible = "fsl,ecm-law";
48 reg = <0x0 0x1000>;
49 fsl,num-laws = <12>;
50 };
51
52 ecm@1000 {
53 compatible = "fsl,p1010-ecm", "fsl,ecm";
54 reg = <0x1000 0x1000>;
55 interrupts = <16 2>;
56 interrupt-parent = <&mpic>;
57 };
58
59 memory-controller@2000 {
60 compatible = "fsl,p1010-memory-controller";
61 reg = <0x2000 0x1000>;
62 interrupt-parent = <&mpic>;
63 interrupts = <16 2>;
64 };
65
66 i2c@3000 {
67 #address-cells = <1>;
68 #size-cells = <0>;
69 cell-index = <0>;
70 compatible = "fsl-i2c";
71 reg = <0x3000 0x100>;
72 interrupts = <43 2>;
73 interrupt-parent = <&mpic>;
74 dfsrr;
75 };
76
77 i2c@3100 {
78 #address-cells = <1>;
79 #size-cells = <0>;
80 cell-index = <1>;
81 compatible = "fsl-i2c";
82 reg = <0x3100 0x100>;
83 interrupts = <43 2>;
84 interrupt-parent = <&mpic>;
85 dfsrr;
86 };
87
88 serial0: serial@4500 {
89 cell-index = <0>;
90 device_type = "serial";
91 compatible = "ns16550";
92 reg = <0x4500 0x100>;
93 clock-frequency = <0>;
94 interrupts = <42 2>;
95 interrupt-parent = <&mpic>;
96 };
97
98 serial1: serial@4600 {
99 cell-index = <1>;
100 device_type = "serial";
101 compatible = "ns16550";
102 reg = <0x4600 0x100>;
103 clock-frequency = <0>;
104 interrupts = <42 2>;
105 interrupt-parent = <&mpic>;
106 };
107
108 spi@7000 {
109 #address-cells = <1>;
110 #size-cells = <0>;
111 compatible = "fsl,mpc8536-espi";
112 reg = <0x7000 0x1000>;
113 interrupts = <59 0x2>;
114 interrupt-parent = <&mpic>;
115 fsl,espi-num-chipselects = <1>;
116 };
117
118 gpio: gpio-controller@f000 {
119 #gpio-cells = <2>;
120 compatible = "fsl,mpc8572-gpio";
121 reg = <0xf000 0x100>;
122 interrupts = <47 0x2>;
123 interrupt-parent = <&mpic>;
124 gpio-controller;
125 };
126
127 sata@18000 {
128 compatible = "fsl,pq-sata-v2";
129 reg = <0x18000 0x1000>;
130 cell-index = <1>;
131 interrupts = <74 0x2>;
132 interrupt-parent = <&mpic>;
133 };
134
135 sata@19000 {
136 compatible = "fsl,pq-sata-v2";
137 reg = <0x19000 0x1000>;
138 cell-index = <2>;
139 interrupts = <41 0x2>;
140 interrupt-parent = <&mpic>;
141 };
142
143 can0@1c000 {
144 compatible = "fsl,flexcan-v1.0";
145 reg = <0x1c000 0x1000>;
146 interrupts = <48 0x2>;
147 interrupt-parent = <&mpic>;
148 fsl,flexcan-clock-divider = <2>;
149 };
150
151 can1@1d000 {
152 compatible = "fsl,flexcan-v1.0";
153 reg = <0x1d000 0x1000>;
154 interrupts = <61 0x2>;
155 interrupt-parent = <&mpic>;
156 fsl,flexcan-clock-divider = <2>;
157 };
158
159 L2: l2-cache-controller@20000 {
160 compatible = "fsl,p1010-l2-cache-controller",
161 "fsl,p1014-l2-cache-controller";
162 reg = <0x20000 0x1000>;
163 cache-line-size = <32>; // 32 bytes
164 cache-size = <0x40000>; // L2,256K
165 interrupt-parent = <&mpic>;
166 interrupts = <16 2>;
167 };
168
169 dma@21300 {
170 #address-cells = <1>;
171 #size-cells = <1>;
172 compatible = "fsl,p1010-dma", "fsl,eloplus-dma";
173 reg = <0x21300 0x4>;
174 ranges = <0x0 0x21100 0x200>;
175 cell-index = <0>;
176 dma-channel@0 {
177 compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
178 reg = <0x0 0x80>;
179 cell-index = <0>;
180 interrupt-parent = <&mpic>;
181 interrupts = <20 2>;
182 };
183 dma-channel@80 {
184 compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
185 reg = <0x80 0x80>;
186 cell-index = <1>;
187 interrupt-parent = <&mpic>;
188 interrupts = <21 2>;
189 };
190 dma-channel@100 {
191 compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
192 reg = <0x100 0x80>;
193 cell-index = <2>;
194 interrupt-parent = <&mpic>;
195 interrupts = <22 2>;
196 };
197 dma-channel@180 {
198 compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
199 reg = <0x180 0x80>;
200 cell-index = <3>;
201 interrupt-parent = <&mpic>;
202 interrupts = <23 2>;
203 };
204 };
205
206 usb@22000 {
207 compatible = "fsl-usb2-dr";
208 reg = <0x22000 0x1000>;
209 #address-cells = <1>;
210 #size-cells = <0>;
211 interrupt-parent = <&mpic>;
212 interrupts = <28 0x2>;
213 dr_mode = "host";
214 };
215
216 mdio@24000 {
217 #address-cells = <1>;
218 #size-cells = <0>;
219 compatible = "fsl,etsec2-mdio";
220 reg = <0x24000 0x1000 0xb0030 0x4>;
221 };
222
223 mdio@25000 {
224 #address-cells = <1>;
225 #size-cells = <0>;
226 compatible = "fsl,etsec2-tbi";
227 reg = <0x25000 0x1000 0xb1030 0x4>;
228 tbi0: tbi-phy@11 {
229 reg = <0x11>;
230 device_type = "tbi-phy";
231 };
232 };
233
234 mdio@26000 {
235 #address-cells = <1>;
236 #size-cells = <0>;
237 compatible = "fsl,etsec2-tbi";
238 reg = <0x26000 0x1000 0xb1030 0x4>;
239 tbi1: tbi-phy@11 {
240 reg = <0x11>;
241 device_type = "tbi-phy";
242 };
243 };
244
245 sdhci@2e000 {
246 compatible = "fsl,esdhc";
247 reg = <0x2e000 0x1000>;
248 interrupts = <72 0x8>;
249 interrupt-parent = <&mpic>;
250 /* Filled in by U-Boot */
251 clock-frequency = <0>;
252 fsl,sdhci-auto-cmd12;
253 };
254
255 enet0: ethernet@b0000 {
256 #address-cells = <1>;
257 #size-cells = <1>;
258 device_type = "network";
259 model = "eTSEC";
260 compatible = "fsl,etsec2";
261 fsl,num_rx_queues = <0x8>;
262 fsl,num_tx_queues = <0x8>;
263 local-mac-address = [ 00 00 00 00 00 00 ];
264 interrupt-parent = <&mpic>;
265
266 queue-group@0 {
267 #address-cells = <1>;
268 #size-cells = <1>;
269 reg = <0xb0000 0x1000>;
270 fsl,rx-bit-map = <0xff>;
271 fsl,tx-bit-map = <0xff>;
272 interrupts = <29 2 30 2 34 2>;
273 };
274
275 };
276
277 enet1: ethernet@b1000 {
278 #address-cells = <1>;
279 #size-cells = <1>;
280 device_type = "network";
281 model = "eTSEC";
282 compatible = "fsl,etsec2";
283 fsl,num_rx_queues = <0x8>;
284 fsl,num_tx_queues = <0x8>;
285 local-mac-address = [ 00 00 00 00 00 00 ];
286 interrupt-parent = <&mpic>;
287
288 queue-group@0 {
289 #address-cells = <1>;
290 #size-cells = <1>;
291 reg = <0xb1000 0x1000>;
292 fsl,rx-bit-map = <0xff>;
293 fsl,tx-bit-map = <0xff>;
294 interrupts = <35 2 36 2 40 2>;
295 };
296
297 };
298
299 enet2: ethernet@b2000 {
300 #address-cells = <1>;
301 #size-cells = <1>;
302 device_type = "network";
303 model = "eTSEC";
304 compatible = "fsl,etsec2";
305 fsl,num_rx_queues = <0x8>;
306 fsl,num_tx_queues = <0x8>;
307 local-mac-address = [ 00 00 00 00 00 00 ];
308 interrupt-parent = <&mpic>;
309
310 queue-group@0 {
311 #address-cells = <1>;
312 #size-cells = <1>;
313 reg = <0xb2000 0x1000>;
314 fsl,rx-bit-map = <0xff>;
315 fsl,tx-bit-map = <0xff>;
316 interrupts = <31 2 32 2 33 2>;
317 };
318
319 };
320
321 mpic: pic@40000 {
322 interrupt-controller;
323 #address-cells = <0>;
324 #interrupt-cells = <2>;
325 reg = <0x40000 0x40000>;
326 compatible = "chrp,open-pic";
327 device_type = "open-pic";
328 };
329
330 msi@41600 {
331 compatible = "fsl,p1010-msi", "fsl,mpic-msi";
332 reg = <0x41600 0x80>;
333 msi-available-ranges = <0 0x100>;
334 interrupts = <
335 0xe0 0
336 0xe1 0
337 0xe2 0
338 0xe3 0
339 0xe4 0
340 0xe5 0
341 0xe6 0
342 0xe7 0>;
343 interrupt-parent = <&mpic>;
344 };
345
346 global-utilities@e0000 { //global utilities block
347 compatible = "fsl,p1010-guts";
348 reg = <0xe0000 0x1000>;
349 fsl,has-rstcr;
350 };
351 };
352
353 pci0: pcie@ffe09000 {
354 compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
355 device_type = "pci";
356 #size-cells = <2>;
357 #address-cells = <3>;
358 reg = <0 0xffe09000 0 0x1000>;
359 bus-range = <0 255>;
360 clock-frequency = <33333333>;
361 interrupt-parent = <&mpic>;
362 interrupts = <16 2>;
363 };
364
365 pci1: pcie@ffe0a000 {
366 compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
367 device_type = "pci";
368 #size-cells = <2>;
369 #address-cells = <3>;
370 reg = <0 0xffe0a000 0 0x1000>;
371 bus-range = <0 255>;
372 clock-frequency = <33333333>;
373 interrupt-parent = <&mpic>;
374 interrupts = <16 2>;
375 };
376};
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 98d9426d4b85..1be9743ab5e0 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -412,7 +412,6 @@
412 fsl,magic-packet; 412 fsl,magic-packet;
413 fsl,wake-on-filer; 413 fsl,wake-on-filer;
414 local-mac-address = [ 00 00 00 00 00 00 ]; 414 local-mac-address = [ 00 00 00 00 00 00 ];
415 fixed-link = <1 1 1000 0 0>;
416 phy-handle = <&phy0>; 415 phy-handle = <&phy0>;
417 phy-connection-type = "rgmii-id"; 416 phy-connection-type = "rgmii-id";
418 queue-group@0{ 417 queue-group@0{
@@ -439,7 +438,6 @@
439 fsl,num_rx_queues = <0x8>; 438 fsl,num_rx_queues = <0x8>;
440 fsl,num_tx_queues = <0x8>; 439 fsl,num_tx_queues = <0x8>;
441 local-mac-address = [ 00 00 00 00 00 00 ]; 440 local-mac-address = [ 00 00 00 00 00 00 ];
442 fixed-link = <1 1 1000 0 0>;
443 phy-handle = <&phy1>; 441 phy-handle = <&phy1>;
444 phy-connection-type = "rgmii-id"; 442 phy-connection-type = "rgmii-id";
445 queue-group@0{ 443 queue-group@0{
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
new file mode 100644
index 000000000000..bfa96aa8f2ca
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -0,0 +1,546 @@
1/*
2 * P1023 RDS Device Tree Source
3 *
4 * Copyright 2010-2011 Freescale Semiconductor Inc.
5 *
6 * Author: Roy Zang <tie-fei.zang@freescale.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Freescale Semiconductor nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
26 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
29 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
32 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/dts-v1/;
38
39/ {
40 model = "fsl,P1023";
41 compatible = "fsl,P1023RDS";
42 #address-cells = <2>;
43 #size-cells = <2>;
44
45 aliases {
46 serial0 = &serial0;
47 serial1 = &serial1;
48 pci0 = &pci0;
49 pci1 = &pci1;
50 pci2 = &pci2;
51
52 crypto = &crypto;
53 sec_jr0 = &sec_jr0;
54 sec_jr1 = &sec_jr1;
55 sec_jr2 = &sec_jr2;
56 sec_jr3 = &sec_jr3;
57 rtic_a = &rtic_a;
58 rtic_b = &rtic_b;
59 rtic_c = &rtic_c;
60 rtic_d = &rtic_d;
61 };
62
63 cpus {
64 #address-cells = <1>;
65 #size-cells = <0>;
66
67 cpu0: PowerPC,P1023@0 {
68 device_type = "cpu";
69 reg = <0x0>;
70 next-level-cache = <&L2>;
71 };
72
73 cpu1: PowerPC,P1023@1 {
74 device_type = "cpu";
75 reg = <0x1>;
76 next-level-cache = <&L2>;
77 };
78 };
79
80 memory {
81 device_type = "memory";
82 };
83
84 soc@ff600000 {
85 #address-cells = <1>;
86 #size-cells = <1>;
87 device_type = "soc";
88 compatible = "fsl,p1023-immr", "simple-bus";
89 ranges = <0x0 0x0 0xff600000 0x200000>;
90 bus-frequency = <0>; // Filled out by uboot.
91
92 ecm-law@0 {
93 compatible = "fsl,ecm-law";
94 reg = <0x0 0x1000>;
95 fsl,num-laws = <12>;
96 };
97
98 ecm@1000 {
99 compatible = "fsl,p1023-ecm", "fsl,ecm";
100 reg = <0x1000 0x1000>;
101 interrupts = <16 2>;
102 interrupt-parent = <&mpic>;
103 };
104
105 memory-controller@2000 {
106 compatible = "fsl,p1023-memory-controller";
107 reg = <0x2000 0x1000>;
108 interrupt-parent = <&mpic>;
109 interrupts = <16 2>;
110 };
111
112 i2c@3000 {
113 #address-cells = <1>;
114 #size-cells = <0>;
115 cell-index = <0>;
116 compatible = "fsl-i2c";
117 reg = <0x3000 0x100>;
118 interrupts = <43 2>;
119 interrupt-parent = <&mpic>;
120 dfsrr;
121 rtc@68 {
122 compatible = "dallas,ds1374";
123 reg = <0x68>;
124 };
125 };
126
127 i2c@3100 {
128 #address-cells = <1>;
129 #size-cells = <0>;
130 cell-index = <1>;
131 compatible = "fsl-i2c";
132 reg = <0x3100 0x100>;
133 interrupts = <43 2>;
134 interrupt-parent = <&mpic>;
135 dfsrr;
136 };
137
138 serial0: serial@4500 {
139 cell-index = <0>;
140 device_type = "serial";
141 compatible = "ns16550";
142 reg = <0x4500 0x100>;
143 clock-frequency = <0>;
144 interrupts = <42 2>;
145 interrupt-parent = <&mpic>;
146 };
147
148 serial1: serial@4600 {
149 cell-index = <1>;
150 device_type = "serial";
151 compatible = "ns16550";
152 reg = <0x4600 0x100>;
153 clock-frequency = <0>;
154 interrupts = <42 2>;
155 interrupt-parent = <&mpic>;
156 };
157
158 spi@7000 {
159 cell-index = <0>;
160 #address-cells = <1>;
161 #size-cells = <0>;
162 compatible = "fsl,p1023-espi", "fsl,mpc8536-espi";
163 reg = <0x7000 0x1000>;
164 interrupts = <59 0x2>;
165 interrupt-parent = <&mpic>;
166 fsl,espi-num-chipselects = <4>;
167
168 fsl_dataflash@0 {
169 #address-cells = <1>;
170 #size-cells = <1>;
171 compatible = "atmel,at45db081d";
172 reg = <0>;
173 spi-max-frequency = <40000000>; /* input clock */
174 partition@u-boot {
175 /* 512KB for u-boot Bootloader Image */
176 label = "u-boot-spi";
177 reg = <0x00000000 0x00080000>;
178 read-only;
179 };
180 partition@dtb {
181 /* 512KB for DTB Image */
182 label = "dtb-spi";
183 reg = <0x00080000 0x00080000>;
184 read-only;
185 };
186 };
187 };
188
189 gpio: gpio-controller@f000 {
190 #gpio-cells = <2>;
191 compatible = "fsl,qoriq-gpio";
192 reg = <0xf000 0x100>;
193 interrupts = <47 0x2>;
194 interrupt-parent = <&mpic>;
195 gpio-controller;
196 };
197
198 L2: l2-cache-controller@20000 {
199 compatible = "fsl,p1023-l2-cache-controller";
200 reg = <0x20000 0x1000>;
201 cache-line-size = <32>; // 32 bytes
202 cache-size = <0x40000>; // L2,256K
203 interrupt-parent = <&mpic>;
204 interrupts = <16 2>;
205 };
206
207 dma@21300 {
208 #address-cells = <1>;
209 #size-cells = <1>;
210 compatible = "fsl,eloplus-dma";
211 reg = <0x21300 0x4>;
212 ranges = <0x0 0x21100 0x200>;
213 cell-index = <0>;
214 dma-channel@0 {
215 compatible = "fsl,eloplus-dma-channel";
216 reg = <0x0 0x80>;
217 cell-index = <0>;
218 interrupt-parent = <&mpic>;
219 interrupts = <20 2>;
220 };
221 dma-channel@80 {
222 compatible = "fsl,eloplus-dma-channel";
223 reg = <0x80 0x80>;
224 cell-index = <1>;
225 interrupt-parent = <&mpic>;
226 interrupts = <21 2>;
227 };
228 dma-channel@100 {
229 compatible = "fsl,eloplus-dma-channel";
230 reg = <0x100 0x80>;
231 cell-index = <2>;
232 interrupt-parent = <&mpic>;
233 interrupts = <22 2>;
234 };
235 dma-channel@180 {
236 compatible = "fsl,eloplus-dma-channel";
237 reg = <0x180 0x80>;
238 cell-index = <3>;
239 interrupt-parent = <&mpic>;
240 interrupts = <23 2>;
241 };
242 };
243
244 usb@22000 {
245 #address-cells = <1>;
246 #size-cells = <0>;
247 compatible = "fsl-usb2-dr";
248 reg = <0x22000 0x1000>;
249 interrupt-parent = <&mpic>;
250 interrupts = <28 0x2>;
251 dr_mode = "host";
252 phy_type = "ulpi";
253 };
254
255 crypto: crypto@300000 {
256 compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
257 #address-cells = <1>;
258 #size-cells = <1>;
259 reg = <0x30000 0x10000>;
260 ranges = <0 0x30000 0x10000>;
261 interrupt-parent = <&mpic>;
262 interrupts = <58 2>;
263
264 sec_jr0: jr@1000 {
265 compatible = "fsl,sec-v4.2-job-ring",
266 "fsl,sec-v4.0-job-ring";
267 reg = <0x1000 0x1000>;
268 interrupts = <45 2>;
269 };
270
271 sec_jr1: jr@2000 {
272 compatible = "fsl,sec-v4.2-job-ring",
273 "fsl,sec-v4.0-job-ring";
274 reg = <0x2000 0x1000>;
275 interrupts = <45 2>;
276 };
277
278 sec_jr2: jr@3000 {
279 compatible = "fsl,sec-v4.2-job-ring",
280 "fsl,sec-v4.0-job-ring";
281 reg = <0x3000 0x1000>;
282 interrupts = <57 2>;
283 };
284
285 sec_jr3: jr@4000 {
286 compatible = "fsl,sec-v4.2-job-ring",
287 "fsl,sec-v4.0-job-ring";
288 reg = <0x4000 0x1000>;
289 interrupts = <57 2>;
290 };
291
292 rtic@6000 {
293 compatible = "fsl,sec-v4.2-rtic",
294 "fsl,sec-v4.0-rtic";
295 #address-cells = <1>;
296 #size-cells = <1>;
297 reg = <0x6000 0x100>;
298 ranges = <0x0 0x6100 0xe00>;
299
300 rtic_a: rtic-a@0 {
301 compatible = "fsl,sec-v4.2-rtic-memory",
302 "fsl,sec-v4.0-rtic-memory";
303 reg = <0x00 0x20 0x100 0x80>;
304 };
305
306 rtic_b: rtic-b@20 {
307 compatible = "fsl,sec-v4.2-rtic-memory",
308 "fsl,sec-v4.0-rtic-memory";
309 reg = <0x20 0x20 0x200 0x80>;
310 };
311
312 rtic_c: rtic-c@40 {
313 compatible = "fsl,sec-v4.2-rtic-memory",
314 "fsl,sec-v4.0-rtic-memory";
315 reg = <0x40 0x20 0x300 0x80>;
316 };
317
318 rtic_d: rtic-d@60 {
319 compatible = "fsl,sec-v4.2-rtic-memory",
320 "fsl,sec-v4.0-rtic-memory";
321 reg = <0x60 0x20 0x500 0x80>;
322 };
323 };
324 };
325
326 power@e0070{
327 compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc",
328 "fsl,p1022-pmc";
329 reg = <0xe0070 0x20>;
330 etsec1_clk: soc-clk@B0{
331 fsl,pmcdr-mask = <0x00000080>;
332 };
333 etsec2_clk: soc-clk@B1{
334 fsl,pmcdr-mask = <0x00000040>;
335 };
336 etsec3_clk: soc-clk@B2{
337 fsl,pmcdr-mask = <0x00000020>;
338 };
339 };
340
341 mpic: pic@40000 {
342 interrupt-controller;
343 #address-cells = <0>;
344 #interrupt-cells = <2>;
345 reg = <0x40000 0x40000>;
346 compatible = "chrp,open-pic";
347 device_type = "open-pic";
348 };
349
350 msi@41600 {
351 compatible = "fsl,p1023-msi", "fsl,mpic-msi";
352 reg = <0x41600 0x80>;
353 msi-available-ranges = <0 0x100>;
354 interrupts = <
355 0xe0 0
356 0xe1 0
357 0xe2 0
358 0xe3 0
359 0xe4 0
360 0xe5 0
361 0xe6 0
362 0xe7 0>;
363 interrupt-parent = <&mpic>;
364 };
365
366 global-utilities@e0000 { //global utilities block
367 compatible = "fsl,p1023-guts";
368 reg = <0xe0000 0x1000>;
369 fsl,has-rstcr;
370 };
371 };
372
373 localbus@ff605000 {
374 #address-cells = <2>;
375 #size-cells = <1>;
376 compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
377 reg = <0 0xff605000 0 0x1000>;
378 interrupts = <19 2>;
379 interrupt-parent = <&mpic>;
380
381 /* NOR Flash, BCSR */
382 ranges = <0x0 0x0 0x0 0xee000000 0x02000000
383 0x1 0x0 0x0 0xe0000000 0x00008000>;
384
385 nor@0,0 {
386 #address-cells = <1>;
387 #size-cells = <1>;
388 compatible = "cfi-flash";
389 reg = <0x0 0x0 0x02000000>;
390 bank-width = <1>;
391 device-width = <1>;
392 partition@0 {
393 label = "ramdisk";
394 reg = <0x00000000 0x01c00000>;
395 };
396 partition@1c00000 {
397 label = "kernel";
398 reg = <0x01c00000 0x002e0000>;
399 };
400 partiton@1ee0000 {
401 label = "dtb";
402 reg = <0x01ee0000 0x00020000>;
403 };
404 partition@1f00000 {
405 label = "firmware";
406 reg = <0x01f00000 0x00080000>;
407 read-only;
408 };
409 partition@1f80000 {
410 label = "u-boot";
411 reg = <0x01f80000 0x00080000>;
412 read-only;
413 };
414 };
415
416 fpga@1,0 {
417 #address-cells = <1>;
418 #size-cells = <1>;
419 compatible = "fsl,p1023rds-fpga";
420 reg = <1 0 0x8000>;
421 ranges = <0 1 0 0x8000>;
422
423 bcsr@20 {
424 compatible = "fsl,p1023rds-bcsr";
425 reg = <0x20 0x20>;
426 };
427 };
428 };
429
430 pci0: pcie@ff60a000 {
431 compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
432 cell-index = <1>;
433 device_type = "pci";
434 #size-cells = <2>;
435 #address-cells = <3>;
436 reg = <0 0xff60a000 0 0x1000>;
437 bus-range = <0 255>;
438 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
439 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
440 clock-frequency = <33333333>;
441 interrupt-parent = <&mpic>;
442 interrupts = <16 2>;
443 pcie@0 {
444 reg = <0x0 0x0 0x0 0x0 0x0>;
445 #interrupt-cells = <1>;
446 #size-cells = <2>;
447 #address-cells = <3>;
448 device_type = "pci";
449 interrupt-parent = <&mpic>;
450 interrupts = <16 2>;
451 interrupt-map-mask = <0xf800 0 0 7>;
452 interrupt-map = <
453 /* IDSEL 0x0 */
454 0000 0 0 1 &mpic 0 1
455 0000 0 0 2 &mpic 1 1
456 0000 0 0 3 &mpic 2 1
457 0000 0 0 4 &mpic 3 1
458 >;
459 ranges = <0x2000000 0x0 0xc0000000
460 0x2000000 0x0 0xc0000000
461 0x0 0x20000000
462
463 0x1000000 0x0 0x0
464 0x1000000 0x0 0x0
465 0x0 0x100000>;
466 };
467 };
468
469 pci1: pcie@ff609000 {
470 compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
471 cell-index = <2>;
472 device_type = "pci";
473 #size-cells = <2>;
474 #address-cells = <3>;
475 reg = <0 0xff609000 0 0x1000>;
476 bus-range = <0 255>;
477 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
478 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
479 clock-frequency = <33333333>;
480 interrupt-parent = <&mpic>;
481 interrupts = <16 2>;
482 pcie@0 {
483 reg = <0x0 0x0 0x0 0x0 0x0>;
484 #interrupt-cells = <1>;
485 #size-cells = <2>;
486 #address-cells = <3>;
487 device_type = "pci";
488 interrupt-parent = <&mpic>;
489 interrupts = <16 2>;
490 interrupt-map-mask = <0xf800 0 0 7>;
491 interrupt-map = <
492 /* IDSEL 0x0 */
493 0000 0 0 1 &mpic 4 1
494 0000 0 0 2 &mpic 5 1
495 0000 0 0 3 &mpic 6 1
496 0000 0 0 4 &mpic 7 1
497 >;
498 ranges = <0x2000000 0x0 0xa0000000
499 0x2000000 0x0 0xa0000000
500 0x0 0x20000000
501
502 0x1000000 0x0 0x0
503 0x1000000 0x0 0x0
504 0x0 0x100000>;
505 };
506 };
507
508 pci2: pcie@ff60b000 {
509 cell-index = <3>;
510 compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
511 device_type = "pci";
512 #size-cells = <2>;
513 #address-cells = <3>;
514 reg = <0 0xff60b000 0 0x1000>;
515 bus-range = <0 255>;
516 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
517 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
518 clock-frequency = <33333333>;
519 interrupt-parent = <&mpic>;
520 interrupts = <16 2>;
521 pcie@0 {
522 reg = <0x0 0x0 0x0 0x0 0x0>;
523 #interrupt-cells = <1>;
524 #size-cells = <2>;
525 #address-cells = <3>;
526 device_type = "pci";
527 interrupt-parent = <&mpic>;
528 interrupts = <16 2>;
529 interrupt-map-mask = <0xf800 0 0 7>;
530 interrupt-map = <
531 /* IDSEL 0x0 */
532 0000 0 0 1 &mpic 8 1
533 0000 0 0 2 &mpic 9 1
534 0000 0 0 3 &mpic 10 1
535 0000 0 0 4 &mpic 11 1
536 >;
537 ranges = <0x2000000 0x0 0x80000000
538 0x2000000 0x0 0x80000000
539 0x0 0x20000000
540
541 0x1000000 0x0 0x0
542 0x1000000 0x0 0x0
543 0x0 0x100000>;
544 };
545 };
546};
diff --git a/arch/powerpc/boot/dts/p2040rdb.dts b/arch/powerpc/boot/dts/p2040rdb.dts
new file mode 100644
index 000000000000..7d84e391c632
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2040rdb.dts
@@ -0,0 +1,166 @@
1/*
2 * P2040RDB Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "p2040si.dtsi"
36
37/ {
38 model = "fsl,P2040RDB";
39 compatible = "fsl,P2040RDB";
40 #address-cells = <2>;
41 #size-cells = <2>;
42 interrupt-parent = <&mpic>;
43
44 memory {
45 device_type = "memory";
46 };
47
48 soc: soc@ffe000000 {
49 spi@110000 {
50 flash@0 {
51 #address-cells = <1>;
52 #size-cells = <1>;
53 compatible = "spansion,s25sl12801";
54 reg = <0>;
55 spi-max-frequency = <40000000>; /* input clock */
56 partition@u-boot {
57 label = "u-boot";
58 reg = <0x00000000 0x00100000>;
59 read-only;
60 };
61 partition@kernel {
62 label = "kernel";
63 reg = <0x00100000 0x00500000>;
64 read-only;
65 };
66 partition@dtb {
67 label = "dtb";
68 reg = <0x00600000 0x00100000>;
69 read-only;
70 };
71 partition@fs {
72 label = "file system";
73 reg = <0x00700000 0x00900000>;
74 };
75 };
76 };
77
78 i2c@118000 {
79 lm75b@48 {
80 compatible = "nxp,lm75a";
81 reg = <0x48>;
82 };
83 eeprom@50 {
84 compatible = "at24,24c256";
85 reg = <0x50>;
86 };
87 rtc@68 {
88 compatible = "pericom,pt7c4338";
89 reg = <0x68>;
90 };
91 };
92
93 i2c@118100 {
94 eeprom@50 {
95 compatible = "at24,24c256";
96 reg = <0x50>;
97 };
98 };
99
100 usb0: usb@210000 {
101 phy_type = "utmi";
102 };
103
104 usb1: usb@211000 {
105 dr_mode = "host";
106 phy_type = "utmi";
107 };
108 };
109
110 localbus@ffe124000 {
111 reg = <0xf 0xfe124000 0 0x1000>;
112 ranges = <0 0 0xf 0xe8000000 0x08000000>;
113
114 flash@0,0 {
115 compatible = "cfi-flash";
116 reg = <0 0 0x08000000>;
117 bank-width = <2>;
118 device-width = <2>;
119 };
120 };
121
122 pci0: pcie@ffe200000 {
123 reg = <0xf 0xfe200000 0 0x1000>;
124 ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
125 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
126 pcie@0 {
127 ranges = <0x02000000 0 0xe0000000
128 0x02000000 0 0xe0000000
129 0 0x20000000
130
131 0x01000000 0 0x00000000
132 0x01000000 0 0x00000000
133 0 0x00010000>;
134 };
135 };
136
137 pci1: pcie@ffe201000 {
138 reg = <0xf 0xfe201000 0 0x1000>;
139 ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
140 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
141 pcie@0 {
142 ranges = <0x02000000 0 0xe0000000
143 0x02000000 0 0xe0000000
144 0 0x20000000
145
146 0x01000000 0 0x00000000
147 0x01000000 0 0x00000000
148 0 0x00010000>;
149 };
150 };
151
152 pci2: pcie@ffe202000 {
153 reg = <0xf 0xfe202000 0 0x1000>;
154 ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
155 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
156 pcie@0 {
157 ranges = <0x02000000 0 0xe0000000
158 0x02000000 0 0xe0000000
159 0 0x20000000
160
161 0x01000000 0 0x00000000
162 0x01000000 0 0x00000000
163 0 0x00010000>;
164 };
165 };
166};
diff --git a/arch/powerpc/boot/dts/p2040si.dtsi b/arch/powerpc/boot/dts/p2040si.dtsi
new file mode 100644
index 000000000000..5fdbb24c0763
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2040si.dtsi
@@ -0,0 +1,623 @@
1/*
2 * P2040 Silicon Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/dts-v1/;
36
37/ {
38 compatible = "fsl,P2040";
39 #address-cells = <2>;
40 #size-cells = <2>;
41 interrupt-parent = <&mpic>;
42
43 aliases {
44 ccsr = &soc;
45
46 serial0 = &serial0;
47 serial1 = &serial1;
48 serial2 = &serial2;
49 serial3 = &serial3;
50 pci0 = &pci0;
51 pci1 = &pci1;
52 pci2 = &pci2;
53 usb0 = &usb0;
54 usb1 = &usb1;
55 dma0 = &dma0;
56 dma1 = &dma1;
57 sdhc = &sdhc;
58 msi0 = &msi0;
59 msi1 = &msi1;
60 msi2 = &msi2;
61
62 crypto = &crypto;
63 sec_jr0 = &sec_jr0;
64 sec_jr1 = &sec_jr1;
65 sec_jr2 = &sec_jr2;
66 sec_jr3 = &sec_jr3;
67 rtic_a = &rtic_a;
68 rtic_b = &rtic_b;
69 rtic_c = &rtic_c;
70 rtic_d = &rtic_d;
71 sec_mon = &sec_mon;
72 };
73
74 cpus {
75 #address-cells = <1>;
76 #size-cells = <0>;
77
78 cpu0: PowerPC,e500mc@0 {
79 device_type = "cpu";
80 reg = <0>;
81 next-level-cache = <&L2_0>;
82 L2_0: l2-cache {
83 next-level-cache = <&cpc>;
84 };
85 };
86 cpu1: PowerPC,e500mc@1 {
87 device_type = "cpu";
88 reg = <1>;
89 next-level-cache = <&L2_1>;
90 L2_1: l2-cache {
91 next-level-cache = <&cpc>;
92 };
93 };
94 cpu2: PowerPC,e500mc@2 {
95 device_type = "cpu";
96 reg = <2>;
97 next-level-cache = <&L2_2>;
98 L2_2: l2-cache {
99 next-level-cache = <&cpc>;
100 };
101 };
102 cpu3: PowerPC,e500mc@3 {
103 device_type = "cpu";
104 reg = <3>;
105 next-level-cache = <&L2_3>;
106 L2_3: l2-cache {
107 next-level-cache = <&cpc>;
108 };
109 };
110 };
111
112 soc: soc@ffe000000 {
113 #address-cells = <1>;
114 #size-cells = <1>;
115 device_type = "soc";
116 compatible = "simple-bus";
117 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
118 reg = <0xf 0xfe000000 0 0x00001000>;
119
120 soc-sram-error {
121 compatible = "fsl,soc-sram-error";
122 interrupts = <16 2 1 29>;
123 };
124
125 corenet-law@0 {
126 compatible = "fsl,corenet-law";
127 reg = <0x0 0x1000>;
128 fsl,num-laws = <32>;
129 };
130
131 memory-controller@8000 {
132 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
133 reg = <0x8000 0x1000>;
134 interrupts = <16 2 1 23>;
135 };
136
137 cpc: l3-cache-controller@10000 {
138 compatible = "fsl,p2040-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
139 reg = <0x10000 0x1000>;
140 interrupts = <16 2 1 27>;
141 };
142
143 corenet-cf@18000 {
144 compatible = "fsl,corenet-cf";
145 reg = <0x18000 0x1000>;
146 interrupts = <16 2 1 31>;
147 fsl,ccf-num-csdids = <32>;
148 fsl,ccf-num-snoopids = <32>;
149 };
150
151 iommu@20000 {
152 compatible = "fsl,pamu-v1.0", "fsl,pamu";
153 reg = <0x20000 0x4000>;
154 interrupts = <
155 24 2 0 0
156 16 2 1 30>;
157 };
158
159 mpic: pic@40000 {
160 clock-frequency = <0>;
161 interrupt-controller;
162 #address-cells = <0>;
163 #interrupt-cells = <4>;
164 reg = <0x40000 0x40000>;
165 compatible = "fsl,mpic", "chrp,open-pic";
166 device_type = "open-pic";
167 };
168
169 msi0: msi@41600 {
170 compatible = "fsl,mpic-msi";
171 reg = <0x41600 0x200>;
172 msi-available-ranges = <0 0x100>;
173 interrupts = <
174 0xe0 0 0 0
175 0xe1 0 0 0
176 0xe2 0 0 0
177 0xe3 0 0 0
178 0xe4 0 0 0
179 0xe5 0 0 0
180 0xe6 0 0 0
181 0xe7 0 0 0>;
182 };
183
184 msi1: msi@41800 {
185 compatible = "fsl,mpic-msi";
186 reg = <0x41800 0x200>;
187 msi-available-ranges = <0 0x100>;
188 interrupts = <
189 0xe8 0 0 0
190 0xe9 0 0 0
191 0xea 0 0 0
192 0xeb 0 0 0
193 0xec 0 0 0
194 0xed 0 0 0
195 0xee 0 0 0
196 0xef 0 0 0>;
197 };
198
199 msi2: msi@41a00 {
200 compatible = "fsl,mpic-msi";
201 reg = <0x41a00 0x200>;
202 msi-available-ranges = <0 0x100>;
203 interrupts = <
204 0xf0 0 0 0
205 0xf1 0 0 0
206 0xf2 0 0 0
207 0xf3 0 0 0
208 0xf4 0 0 0
209 0xf5 0 0 0
210 0xf6 0 0 0
211 0xf7 0 0 0>;
212 };
213
214 guts: global-utilities@e0000 {
215 compatible = "fsl,qoriq-device-config-1.0";
216 reg = <0xe0000 0xe00>;
217 fsl,has-rstcr;
218 #sleep-cells = <1>;
219 fsl,liodn-bits = <12>;
220 };
221
222 pins: global-utilities@e0e00 {
223 compatible = "fsl,qoriq-pin-control-1.0";
224 reg = <0xe0e00 0x200>;
225 #sleep-cells = <2>;
226 };
227
228 clockgen: global-utilities@e1000 {
229 compatible = "fsl,p2040-clockgen", "fsl,qoriq-clockgen-1.0";
230 reg = <0xe1000 0x1000>;
231 clock-frequency = <0>;
232 };
233
234 rcpm: global-utilities@e2000 {
235 compatible = "fsl,qoriq-rcpm-1.0";
236 reg = <0xe2000 0x1000>;
237 #sleep-cells = <1>;
238 };
239
240 sfp: sfp@e8000 {
241 compatible = "fsl,p2040-sfp", "fsl,qoriq-sfp-1.0";
242 reg = <0xe8000 0x1000>;
243 };
244
245 serdes: serdes@ea000 {
246 compatible = "fsl,p2040-serdes";
247 reg = <0xea000 0x1000>;
248 };
249
250 dma0: dma@100300 {
251 #address-cells = <1>;
252 #size-cells = <1>;
253 compatible = "fsl,p2040-dma", "fsl,eloplus-dma";
254 reg = <0x100300 0x4>;
255 ranges = <0x0 0x100100 0x200>;
256 cell-index = <0>;
257 dma-channel@0 {
258 compatible = "fsl,p2040-dma-channel",
259 "fsl,eloplus-dma-channel";
260 reg = <0x0 0x80>;
261 cell-index = <0>;
262 interrupts = <28 2 0 0>;
263 };
264 dma-channel@80 {
265 compatible = "fsl,p2040-dma-channel",
266 "fsl,eloplus-dma-channel";
267 reg = <0x80 0x80>;
268 cell-index = <1>;
269 interrupts = <29 2 0 0>;
270 };
271 dma-channel@100 {
272 compatible = "fsl,p2040-dma-channel",
273 "fsl,eloplus-dma-channel";
274 reg = <0x100 0x80>;
275 cell-index = <2>;
276 interrupts = <30 2 0 0>;
277 };
278 dma-channel@180 {
279 compatible = "fsl,p2040-dma-channel",
280 "fsl,eloplus-dma-channel";
281 reg = <0x180 0x80>;
282 cell-index = <3>;
283 interrupts = <31 2 0 0>;
284 };
285 };
286
287 dma1: dma@101300 {
288 #address-cells = <1>;
289 #size-cells = <1>;
290 compatible = "fsl,p2040-dma", "fsl,eloplus-dma";
291 reg = <0x101300 0x4>;
292 ranges = <0x0 0x101100 0x200>;
293 cell-index = <1>;
294 dma-channel@0 {
295 compatible = "fsl,p2040-dma-channel",
296 "fsl,eloplus-dma-channel";
297 reg = <0x0 0x80>;
298 cell-index = <0>;
299 interrupts = <32 2 0 0>;
300 };
301 dma-channel@80 {
302 compatible = "fsl,p2040-dma-channel",
303 "fsl,eloplus-dma-channel";
304 reg = <0x80 0x80>;
305 cell-index = <1>;
306 interrupts = <33 2 0 0>;
307 };
308 dma-channel@100 {
309 compatible = "fsl,p2040-dma-channel",
310 "fsl,eloplus-dma-channel";
311 reg = <0x100 0x80>;
312 cell-index = <2>;
313 interrupts = <34 2 0 0>;
314 };
315 dma-channel@180 {
316 compatible = "fsl,p2040-dma-channel",
317 "fsl,eloplus-dma-channel";
318 reg = <0x180 0x80>;
319 cell-index = <3>;
320 interrupts = <35 2 0 0>;
321 };
322 };
323
324 spi@110000 {
325 #address-cells = <1>;
326 #size-cells = <0>;
327 compatible = "fsl,p2040-espi", "fsl,mpc8536-espi";
328 reg = <0x110000 0x1000>;
329 interrupts = <53 0x2 0 0>;
330 fsl,espi-num-chipselects = <4>;
331
332 };
333
334 sdhc: sdhc@114000 {
335 compatible = "fsl,p2040-esdhc", "fsl,esdhc";
336 reg = <0x114000 0x1000>;
337 interrupts = <48 2 0 0>;
338 sdhci,auto-cmd12;
339 clock-frequency = <0>;
340 };
341
342
343 i2c@118000 {
344 #address-cells = <1>;
345 #size-cells = <0>;
346 cell-index = <0>;
347 compatible = "fsl-i2c";
348 reg = <0x118000 0x100>;
349 interrupts = <38 2 0 0>;
350 dfsrr;
351 };
352
353 i2c@118100 {
354 #address-cells = <1>;
355 #size-cells = <0>;
356 cell-index = <1>;
357 compatible = "fsl-i2c";
358 reg = <0x118100 0x100>;
359 interrupts = <38 2 0 0>;
360 dfsrr;
361 };
362
363 i2c@119000 {
364 #address-cells = <1>;
365 #size-cells = <0>;
366 cell-index = <2>;
367 compatible = "fsl-i2c";
368 reg = <0x119000 0x100>;
369 interrupts = <39 2 0 0>;
370 dfsrr;
371 };
372
373 i2c@119100 {
374 #address-cells = <1>;
375 #size-cells = <0>;
376 cell-index = <3>;
377 compatible = "fsl-i2c";
378 reg = <0x119100 0x100>;
379 interrupts = <39 2 0 0>;
380 dfsrr;
381 };
382
383 serial0: serial@11c500 {
384 cell-index = <0>;
385 device_type = "serial";
386 compatible = "ns16550";
387 reg = <0x11c500 0x100>;
388 clock-frequency = <0>;
389 interrupts = <36 2 0 0>;
390 };
391
392 serial1: serial@11c600 {
393 cell-index = <1>;
394 device_type = "serial";
395 compatible = "ns16550";
396 reg = <0x11c600 0x100>;
397 clock-frequency = <0>;
398 interrupts = <36 2 0 0>;
399 };
400
401 serial2: serial@11d500 {
402 cell-index = <2>;
403 device_type = "serial";
404 compatible = "ns16550";
405 reg = <0x11d500 0x100>;
406 clock-frequency = <0>;
407 interrupts = <37 2 0 0>;
408 };
409
410 serial3: serial@11d600 {
411 cell-index = <3>;
412 device_type = "serial";
413 compatible = "ns16550";
414 reg = <0x11d600 0x100>;
415 clock-frequency = <0>;
416 interrupts = <37 2 0 0>;
417 };
418
419 gpio0: gpio@130000 {
420 compatible = "fsl,p2040-gpio", "fsl,qoriq-gpio";
421 reg = <0x130000 0x1000>;
422 interrupts = <55 2 0 0>;
423 #gpio-cells = <2>;
424 gpio-controller;
425 };
426
427 usb0: usb@210000 {
428 compatible = "fsl,p2040-usb2-mph",
429 "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
430 reg = <0x210000 0x1000>;
431 #address-cells = <1>;
432 #size-cells = <0>;
433 interrupts = <44 0x2 0 0>;
434 port0;
435 };
436
437 usb1: usb@211000 {
438 compatible = "fsl,p2040-usb2-dr",
439 "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
440 reg = <0x211000 0x1000>;
441 #address-cells = <1>;
442 #size-cells = <0>;
443 interrupts = <45 0x2 0 0>;
444 };
445
446 sata@220000 {
447 compatible = "fsl,p2040-sata", "fsl,pq-sata-v2";
448 reg = <0x220000 0x1000>;
449 interrupts = <68 0x2 0 0>;
450 };
451
452 sata@221000 {
453 compatible = "fsl,p2040-sata", "fsl,pq-sata-v2";
454 reg = <0x221000 0x1000>;
455 interrupts = <69 0x2 0 0>;
456 };
457
458 crypto: crypto@300000 {
459 compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
460 #address-cells = <1>;
461 #size-cells = <1>;
462 reg = <0x300000 0x10000>;
463 ranges = <0 0x300000 0x10000>;
464 interrupts = <92 2 0 0>;
465
466 sec_jr0: jr@1000 {
467 compatible = "fsl,sec-v4.2-job-ring",
468 "fsl,sec-v4.0-job-ring";
469 reg = <0x1000 0x1000>;
470 interrupts = <88 2 0 0>;
471 };
472
473 sec_jr1: jr@2000 {
474 compatible = "fsl,sec-v4.2-job-ring",
475 "fsl,sec-v4.0-job-ring";
476 reg = <0x2000 0x1000>;
477 interrupts = <89 2 0 0>;
478 };
479
480 sec_jr2: jr@3000 {
481 compatible = "fsl,sec-v4.2-job-ring",
482 "fsl,sec-v4.0-job-ring";
483 reg = <0x3000 0x1000>;
484 interrupts = <90 2 0 0>;
485 };
486
487 sec_jr3: jr@4000 {
488 compatible = "fsl,sec-v4.2-job-ring",
489 "fsl,sec-v4.0-job-ring";
490 reg = <0x4000 0x1000>;
491 interrupts = <91 2 0 0>;
492 };
493
494 rtic@6000 {
495 compatible = "fsl,sec-v4.2-rtic",
496 "fsl,sec-v4.0-rtic";
497 #address-cells = <1>;
498 #size-cells = <1>;
499 reg = <0x6000 0x100>;
500 ranges = <0x0 0x6100 0xe00>;
501
502 rtic_a: rtic-a@0 {
503 compatible = "fsl,sec-v4.2-rtic-memory",
504 "fsl,sec-v4.0-rtic-memory";
505 reg = <0x00 0x20 0x100 0x80>;
506 };
507
508 rtic_b: rtic-b@20 {
509 compatible = "fsl,sec-v4.2-rtic-memory",
510 "fsl,sec-v4.0-rtic-memory";
511 reg = <0x20 0x20 0x200 0x80>;
512 };
513
514 rtic_c: rtic-c@40 {
515 compatible = "fsl,sec-v4.2-rtic-memory",
516 "fsl,sec-v4.0-rtic-memory";
517 reg = <0x40 0x20 0x300 0x80>;
518 };
519
520 rtic_d: rtic-d@60 {
521 compatible = "fsl,sec-v4.2-rtic-memory",
522 "fsl,sec-v4.0-rtic-memory";
523 reg = <0x60 0x20 0x500 0x80>;
524 };
525 };
526 };
527
528 sec_mon: sec_mon@314000 {
529 compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
530 reg = <0x314000 0x1000>;
531 interrupts = <93 2 0 0>;
532 };
533
534 };
535
536 localbus@ffe124000 {
537 compatible = "fsl,p2040-elbc", "fsl,elbc", "simple-bus";
538 interrupts = <25 2 0 0>;
539 #address-cells = <2>;
540 #size-cells = <1>;
541 };
542
543 pci0: pcie@ffe200000 {
544 compatible = "fsl,p2040-pcie", "fsl,qoriq-pcie-v2.2";
545 device_type = "pci";
546 #size-cells = <2>;
547 #address-cells = <3>;
548 bus-range = <0x0 0xff>;
549 clock-frequency = <0x1fca055>;
550 fsl,msi = <&msi0>;
551 interrupts = <16 2 1 15>;
552 pcie@0 {
553 reg = <0 0 0 0 0>;
554 #interrupt-cells = <1>;
555 #size-cells = <2>;
556 #address-cells = <3>;
557 device_type = "pci";
558 interrupts = <16 2 1 15>;
559 interrupt-map-mask = <0xf800 0 0 7>;
560 interrupt-map = <
561 /* IDSEL 0x0 */
562 0000 0 0 1 &mpic 40 1 0 0
563 0000 0 0 2 &mpic 1 1 0 0
564 0000 0 0 3 &mpic 2 1 0 0
565 0000 0 0 4 &mpic 3 1 0 0
566 >;
567 };
568 };
569
570 pci1: pcie@ffe201000 {
571 compatible = "fsl,p2040-pcie", "fsl,qoriq-pcie-v2.2";
572 device_type = "pci";
573 #size-cells = <2>;
574 #address-cells = <3>;
575 bus-range = <0 0xff>;
576 clock-frequency = <0x1fca055>;
577 fsl,msi = <&msi1>;
578 interrupts = <16 2 1 14>;
579 pcie@0 {
580 reg = <0 0 0 0 0>;
581 #interrupt-cells = <1>;
582 #size-cells = <2>;
583 #address-cells = <3>;
584 device_type = "pci";
585 interrupts = <16 2 1 14>;
586 interrupt-map-mask = <0xf800 0 0 7>;
587 interrupt-map = <
588 /* IDSEL 0x0 */
589 0000 0 0 1 &mpic 41 1 0 0
590 0000 0 0 2 &mpic 5 1 0 0
591 0000 0 0 3 &mpic 6 1 0 0
592 0000 0 0 4 &mpic 7 1 0 0
593 >;
594 };
595 };
596
597 pci2: pcie@ffe202000 {
598 compatible = "fsl,p2040-pcie", "fsl,qoriq-pcie-v2.2";
599 device_type = "pci";
600 #size-cells = <2>;
601 #address-cells = <3>;
602 bus-range = <0x0 0xff>;
603 clock-frequency = <0x1fca055>;
604 fsl,msi = <&msi2>;
605 interrupts = <16 2 1 13>;
606 pcie@0 {
607 reg = <0 0 0 0 0>;
608 #interrupt-cells = <1>;
609 #size-cells = <2>;
610 #address-cells = <3>;
611 device_type = "pci";
612 interrupts = <16 2 1 13>;
613 interrupt-map-mask = <0xf800 0 0 7>;
614 interrupt-map = <
615 /* IDSEL 0x0 */
616 0000 0 0 1 &mpic 42 1 0 0
617 0000 0 0 2 &mpic 9 1 0 0
618 0000 0 0 3 &mpic 10 1 0 0
619 0000 0 0 4 &mpic 11 1 0 0
620 >;
621 };
622 };
623};
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
new file mode 100644
index 000000000000..69cae674f396
--- /dev/null
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -0,0 +1,214 @@
1/*
2 * P3041DS Device Tree Source
3 *
4 * Copyright 2010-2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "p3041si.dtsi"
36
37/ {
38 model = "fsl,P3041DS";
39 compatible = "fsl,P3041DS";
40 #address-cells = <2>;
41 #size-cells = <2>;
42 interrupt-parent = <&mpic>;
43
44 memory {
45 device_type = "memory";
46 };
47
48 soc: soc@ffe000000 {
49 spi@110000 {
50 flash@0 {
51 #address-cells = <1>;
52 #size-cells = <1>;
53 compatible = "spansion,s25sl12801";
54 reg = <0>;
55 spi-max-frequency = <40000000>; /* input clock */
56 partition@u-boot {
57 label = "u-boot";
58 reg = <0x00000000 0x00100000>;
59 read-only;
60 };
61 partition@kernel {
62 label = "kernel";
63 reg = <0x00100000 0x00500000>;
64 read-only;
65 };
66 partition@dtb {
67 label = "dtb";
68 reg = <0x00600000 0x00100000>;
69 read-only;
70 };
71 partition@fs {
72 label = "file system";
73 reg = <0x00700000 0x00900000>;
74 };
75 };
76 };
77
78 i2c@118100 {
79 eeprom@51 {
80 compatible = "at24,24c256";
81 reg = <0x51>;
82 };
83 eeprom@52 {
84 compatible = "at24,24c256";
85 reg = <0x52>;
86 };
87 };
88
89 i2c@119100 {
90 rtc@68 {
91 compatible = "dallas,ds3232";
92 reg = <0x68>;
93 interrupts = <0x1 0x1 0 0>;
94 };
95 };
96 };
97
98 localbus@ffe124000 {
99 reg = <0xf 0xfe124000 0 0x1000>;
100 ranges = <0 0 0xf 0xe8000000 0x08000000
101 2 0 0xf 0xffa00000 0x00040000
102 3 0 0xf 0xffdf0000 0x00008000>;
103
104 flash@0,0 {
105 compatible = "cfi-flash";
106 reg = <0 0 0x08000000>;
107 bank-width = <2>;
108 device-width = <2>;
109 };
110
111 nand@2,0 {
112 #address-cells = <1>;
113 #size-cells = <1>;
114 compatible = "fsl,elbc-fcm-nand";
115 reg = <0x2 0x0 0x40000>;
116
117 partition@0 {
118 label = "NAND U-Boot Image";
119 reg = <0x0 0x02000000>;
120 read-only;
121 };
122
123 partition@2000000 {
124 label = "NAND Root File System";
125 reg = <0x02000000 0x10000000>;
126 };
127
128 partition@12000000 {
129 label = "NAND Compressed RFS Image";
130 reg = <0x12000000 0x08000000>;
131 };
132
133 partition@1a000000 {
134 label = "NAND Linux Kernel Image";
135 reg = <0x1a000000 0x04000000>;
136 };
137
138 partition@1e000000 {
139 label = "NAND DTB Image";
140 reg = <0x1e000000 0x01000000>;
141 };
142
143 partition@1f000000 {
144 label = "NAND Writable User area";
145 reg = <0x1f000000 0x21000000>;
146 };
147 };
148
149 board-control@3,0 {
150 compatible = "fsl,p3041ds-pixis";
151 reg = <3 0 0x20>;
152 };
153 };
154
155 pci0: pcie@ffe200000 {
156 reg = <0xf 0xfe200000 0 0x1000>;
157 ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
158 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
159 pcie@0 {
160 ranges = <0x02000000 0 0xe0000000
161 0x02000000 0 0xe0000000
162 0 0x20000000
163
164 0x01000000 0 0x00000000
165 0x01000000 0 0x00000000
166 0 0x00010000>;
167 };
168 };
169
170 pci1: pcie@ffe201000 {
171 reg = <0xf 0xfe201000 0 0x1000>;
172 ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
173 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
174 pcie@0 {
175 ranges = <0x02000000 0 0xe0000000
176 0x02000000 0 0xe0000000
177 0 0x20000000
178
179 0x01000000 0 0x00000000
180 0x01000000 0 0x00000000
181 0 0x00010000>;
182 };
183 };
184
185 pci2: pcie@ffe202000 {
186 reg = <0xf 0xfe202000 0 0x1000>;
187 ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
188 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
189 pcie@0 {
190 ranges = <0x02000000 0 0xe0000000
191 0x02000000 0 0xe0000000
192 0 0x20000000
193
194 0x01000000 0 0x00000000
195 0x01000000 0 0x00000000
196 0 0x00010000>;
197 };
198 };
199
200 pci3: pcie@ffe203000 {
201 reg = <0xf 0xfe203000 0 0x1000>;
202 ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
203 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
204 pcie@0 {
205 ranges = <0x02000000 0 0xe0000000
206 0x02000000 0 0xe0000000
207 0 0x20000000
208
209 0x01000000 0 0x00000000
210 0x01000000 0 0x00000000
211 0 0x00010000>;
212 };
213 };
214};
diff --git a/arch/powerpc/boot/dts/p3041si.dtsi b/arch/powerpc/boot/dts/p3041si.dtsi
new file mode 100644
index 000000000000..8b695801f505
--- /dev/null
+++ b/arch/powerpc/boot/dts/p3041si.dtsi
@@ -0,0 +1,660 @@
1/*
2 * P3041 Silicon Device Tree Source
3 *
4 * Copyright 2010-2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/dts-v1/;
36
37/ {
38 compatible = "fsl,P3041";
39 #address-cells = <2>;
40 #size-cells = <2>;
41 interrupt-parent = <&mpic>;
42
43 aliases {
44 ccsr = &soc;
45
46 serial0 = &serial0;
47 serial1 = &serial1;
48 serial2 = &serial2;
49 serial3 = &serial3;
50 pci0 = &pci0;
51 pci1 = &pci1;
52 pci2 = &pci2;
53 pci3 = &pci3;
54 usb0 = &usb0;
55 usb1 = &usb1;
56 dma0 = &dma0;
57 dma1 = &dma1;
58 sdhc = &sdhc;
59 msi0 = &msi0;
60 msi1 = &msi1;
61 msi2 = &msi2;
62
63 crypto = &crypto;
64 sec_jr0 = &sec_jr0;
65 sec_jr1 = &sec_jr1;
66 sec_jr2 = &sec_jr2;
67 sec_jr3 = &sec_jr3;
68 rtic_a = &rtic_a;
69 rtic_b = &rtic_b;
70 rtic_c = &rtic_c;
71 rtic_d = &rtic_d;
72 sec_mon = &sec_mon;
73
74/*
75 rio0 = &rapidio0;
76 */
77 };
78
79 cpus {
80 #address-cells = <1>;
81 #size-cells = <0>;
82
83 cpu0: PowerPC,e500mc@0 {
84 device_type = "cpu";
85 reg = <0>;
86 next-level-cache = <&L2_0>;
87 L2_0: l2-cache {
88 next-level-cache = <&cpc>;
89 };
90 };
91 cpu1: PowerPC,e500mc@1 {
92 device_type = "cpu";
93 reg = <1>;
94 next-level-cache = <&L2_1>;
95 L2_1: l2-cache {
96 next-level-cache = <&cpc>;
97 };
98 };
99 cpu2: PowerPC,e500mc@2 {
100 device_type = "cpu";
101 reg = <2>;
102 next-level-cache = <&L2_2>;
103 L2_2: l2-cache {
104 next-level-cache = <&cpc>;
105 };
106 };
107 cpu3: PowerPC,e500mc@3 {
108 device_type = "cpu";
109 reg = <3>;
110 next-level-cache = <&L2_3>;
111 L2_3: l2-cache {
112 next-level-cache = <&cpc>;
113 };
114 };
115 };
116
117 soc: soc@ffe000000 {
118 #address-cells = <1>;
119 #size-cells = <1>;
120 device_type = "soc";
121 compatible = "simple-bus";
122 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
123 reg = <0xf 0xfe000000 0 0x00001000>;
124
125 soc-sram-error {
126 compatible = "fsl,soc-sram-error";
127 interrupts = <16 2 1 29>;
128 };
129
130 corenet-law@0 {
131 compatible = "fsl,corenet-law";
132 reg = <0x0 0x1000>;
133 fsl,num-laws = <32>;
134 };
135
136 memory-controller@8000 {
137 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
138 reg = <0x8000 0x1000>;
139 interrupts = <16 2 1 23>;
140 };
141
142 cpc: l3-cache-controller@10000 {
143 compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
144 reg = <0x10000 0x1000>;
145 interrupts = <16 2 1 27>;
146 };
147
148 corenet-cf@18000 {
149 compatible = "fsl,corenet-cf";
150 reg = <0x18000 0x1000>;
151 interrupts = <16 2 1 31>;
152 fsl,ccf-num-csdids = <32>;
153 fsl,ccf-num-snoopids = <32>;
154 };
155
156 iommu@20000 {
157 compatible = "fsl,pamu-v1.0", "fsl,pamu";
158 reg = <0x20000 0x4000>;
159 interrupts = <
160 24 2 0 0
161 16 2 1 30>;
162 };
163
164 mpic: pic@40000 {
165 clock-frequency = <0>;
166 interrupt-controller;
167 #address-cells = <0>;
168 #interrupt-cells = <4>;
169 reg = <0x40000 0x40000>;
170 compatible = "fsl,mpic", "chrp,open-pic";
171 device_type = "open-pic";
172 };
173
174 msi0: msi@41600 {
175 compatible = "fsl,mpic-msi";
176 reg = <0x41600 0x200>;
177 msi-available-ranges = <0 0x100>;
178 interrupts = <
179 0xe0 0 0 0
180 0xe1 0 0 0
181 0xe2 0 0 0
182 0xe3 0 0 0
183 0xe4 0 0 0
184 0xe5 0 0 0
185 0xe6 0 0 0
186 0xe7 0 0 0>;
187 };
188
189 msi1: msi@41800 {
190 compatible = "fsl,mpic-msi";
191 reg = <0x41800 0x200>;
192 msi-available-ranges = <0 0x100>;
193 interrupts = <
194 0xe8 0 0 0
195 0xe9 0 0 0
196 0xea 0 0 0
197 0xeb 0 0 0
198 0xec 0 0 0
199 0xed 0 0 0
200 0xee 0 0 0
201 0xef 0 0 0>;
202 };
203
204 msi2: msi@41a00 {
205 compatible = "fsl,mpic-msi";
206 reg = <0x41a00 0x200>;
207 msi-available-ranges = <0 0x100>;
208 interrupts = <
209 0xf0 0 0 0
210 0xf1 0 0 0
211 0xf2 0 0 0
212 0xf3 0 0 0
213 0xf4 0 0 0
214 0xf5 0 0 0
215 0xf6 0 0 0
216 0xf7 0 0 0>;
217 };
218
219 guts: global-utilities@e0000 {
220 compatible = "fsl,qoriq-device-config-1.0";
221 reg = <0xe0000 0xe00>;
222 fsl,has-rstcr;
223 #sleep-cells = <1>;
224 fsl,liodn-bits = <12>;
225 };
226
227 pins: global-utilities@e0e00 {
228 compatible = "fsl,qoriq-pin-control-1.0";
229 reg = <0xe0e00 0x200>;
230 #sleep-cells = <2>;
231 };
232
233 clockgen: global-utilities@e1000 {
234 compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
235 reg = <0xe1000 0x1000>;
236 clock-frequency = <0>;
237 };
238
239 rcpm: global-utilities@e2000 {
240 compatible = "fsl,qoriq-rcpm-1.0";
241 reg = <0xe2000 0x1000>;
242 #sleep-cells = <1>;
243 };
244
245 sfp: sfp@e8000 {
246 compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0";
247 reg = <0xe8000 0x1000>;
248 };
249
250 serdes: serdes@ea000 {
251 compatible = "fsl,p3041-serdes";
252 reg = <0xea000 0x1000>;
253 };
254
255 dma0: dma@100300 {
256 #address-cells = <1>;
257 #size-cells = <1>;
258 compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
259 reg = <0x100300 0x4>;
260 ranges = <0x0 0x100100 0x200>;
261 cell-index = <0>;
262 dma-channel@0 {
263 compatible = "fsl,p3041-dma-channel",
264 "fsl,eloplus-dma-channel";
265 reg = <0x0 0x80>;
266 cell-index = <0>;
267 interrupts = <28 2 0 0>;
268 };
269 dma-channel@80 {
270 compatible = "fsl,p3041-dma-channel",
271 "fsl,eloplus-dma-channel";
272 reg = <0x80 0x80>;
273 cell-index = <1>;
274 interrupts = <29 2 0 0>;
275 };
276 dma-channel@100 {
277 compatible = "fsl,p3041-dma-channel",
278 "fsl,eloplus-dma-channel";
279 reg = <0x100 0x80>;
280 cell-index = <2>;
281 interrupts = <30 2 0 0>;
282 };
283 dma-channel@180 {
284 compatible = "fsl,p3041-dma-channel",
285 "fsl,eloplus-dma-channel";
286 reg = <0x180 0x80>;
287 cell-index = <3>;
288 interrupts = <31 2 0 0>;
289 };
290 };
291
292 dma1: dma@101300 {
293 #address-cells = <1>;
294 #size-cells = <1>;
295 compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
296 reg = <0x101300 0x4>;
297 ranges = <0x0 0x101100 0x200>;
298 cell-index = <1>;
299 dma-channel@0 {
300 compatible = "fsl,p3041-dma-channel",
301 "fsl,eloplus-dma-channel";
302 reg = <0x0 0x80>;
303 cell-index = <0>;
304 interrupts = <32 2 0 0>;
305 };
306 dma-channel@80 {
307 compatible = "fsl,p3041-dma-channel",
308 "fsl,eloplus-dma-channel";
309 reg = <0x80 0x80>;
310 cell-index = <1>;
311 interrupts = <33 2 0 0>;
312 };
313 dma-channel@100 {
314 compatible = "fsl,p3041-dma-channel",
315 "fsl,eloplus-dma-channel";
316 reg = <0x100 0x80>;
317 cell-index = <2>;
318 interrupts = <34 2 0 0>;
319 };
320 dma-channel@180 {
321 compatible = "fsl,p3041-dma-channel",
322 "fsl,eloplus-dma-channel";
323 reg = <0x180 0x80>;
324 cell-index = <3>;
325 interrupts = <35 2 0 0>;
326 };
327 };
328
329 spi@110000 {
330 #address-cells = <1>;
331 #size-cells = <0>;
332 compatible = "fsl,p3041-espi", "fsl,mpc8536-espi";
333 reg = <0x110000 0x1000>;
334 interrupts = <53 0x2 0 0>;
335 fsl,espi-num-chipselects = <4>;
336 };
337
338 sdhc: sdhc@114000 {
339 compatible = "fsl,p3041-esdhc", "fsl,esdhc";
340 reg = <0x114000 0x1000>;
341 interrupts = <48 2 0 0>;
342 sdhci,auto-cmd12;
343 clock-frequency = <0>;
344 };
345
346 i2c@118000 {
347 #address-cells = <1>;
348 #size-cells = <0>;
349 cell-index = <0>;
350 compatible = "fsl-i2c";
351 reg = <0x118000 0x100>;
352 interrupts = <38 2 0 0>;
353 dfsrr;
354 };
355
356 i2c@118100 {
357 #address-cells = <1>;
358 #size-cells = <0>;
359 cell-index = <1>;
360 compatible = "fsl-i2c";
361 reg = <0x118100 0x100>;
362 interrupts = <38 2 0 0>;
363 dfsrr;
364 };
365
366 i2c@119000 {
367 #address-cells = <1>;
368 #size-cells = <0>;
369 cell-index = <2>;
370 compatible = "fsl-i2c";
371 reg = <0x119000 0x100>;
372 interrupts = <39 2 0 0>;
373 dfsrr;
374 };
375
376 i2c@119100 {
377 #address-cells = <1>;
378 #size-cells = <0>;
379 cell-index = <3>;
380 compatible = "fsl-i2c";
381 reg = <0x119100 0x100>;
382 interrupts = <39 2 0 0>;
383 dfsrr;
384 };
385
386 serial0: serial@11c500 {
387 cell-index = <0>;
388 device_type = "serial";
389 compatible = "ns16550";
390 reg = <0x11c500 0x100>;
391 clock-frequency = <0>;
392 interrupts = <36 2 0 0>;
393 };
394
395 serial1: serial@11c600 {
396 cell-index = <1>;
397 device_type = "serial";
398 compatible = "ns16550";
399 reg = <0x11c600 0x100>;
400 clock-frequency = <0>;
401 interrupts = <36 2 0 0>;
402 };
403
404 serial2: serial@11d500 {
405 cell-index = <2>;
406 device_type = "serial";
407 compatible = "ns16550";
408 reg = <0x11d500 0x100>;
409 clock-frequency = <0>;
410 interrupts = <37 2 0 0>;
411 };
412
413 serial3: serial@11d600 {
414 cell-index = <3>;
415 device_type = "serial";
416 compatible = "ns16550";
417 reg = <0x11d600 0x100>;
418 clock-frequency = <0>;
419 interrupts = <37 2 0 0>;
420 };
421
422 gpio0: gpio@130000 {
423 compatible = "fsl,p3041-gpio", "fsl,qoriq-gpio";
424 reg = <0x130000 0x1000>;
425 interrupts = <55 2 0 0>;
426 #gpio-cells = <2>;
427 gpio-controller;
428 };
429
430 usb0: usb@210000 {
431 compatible = "fsl,p3041-usb2-mph",
432 "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
433 reg = <0x210000 0x1000>;
434 #address-cells = <1>;
435 #size-cells = <0>;
436 interrupts = <44 0x2 0 0>;
437 phy_type = "utmi";
438 port0;
439 };
440
441 usb1: usb@211000 {
442 compatible = "fsl,p3041-usb2-dr",
443 "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
444 reg = <0x211000 0x1000>;
445 #address-cells = <1>;
446 #size-cells = <0>;
447 interrupts = <45 0x2 0 0>;
448 dr_mode = "host";
449 phy_type = "utmi";
450 };
451
452 sata@220000 {
453 compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
454 reg = <0x220000 0x1000>;
455 interrupts = <68 0x2 0 0>;
456 };
457
458 sata@221000 {
459 compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
460 reg = <0x221000 0x1000>;
461 interrupts = <69 0x2 0 0>;
462 };
463
464 crypto: crypto@300000 {
465 compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
466 #address-cells = <1>;
467 #size-cells = <1>;
468 reg = <0x300000 0x10000>;
469 ranges = <0 0x300000 0x10000>;
470 interrupts = <92 2 0 0>;
471
472 sec_jr0: jr@1000 {
473 compatible = "fsl,sec-v4.2-job-ring",
474 "fsl,sec-v4.0-job-ring";
475 reg = <0x1000 0x1000>;
476 interrupts = <88 2 0 0>;
477 };
478
479 sec_jr1: jr@2000 {
480 compatible = "fsl,sec-v4.2-job-ring",
481 "fsl,sec-v4.0-job-ring";
482 reg = <0x2000 0x1000>;
483 interrupts = <89 2 0 0>;
484 };
485
486 sec_jr2: jr@3000 {
487 compatible = "fsl,sec-v4.2-job-ring",
488 "fsl,sec-v4.0-job-ring";
489 reg = <0x3000 0x1000>;
490 interrupts = <90 2 0 0>;
491 };
492
493 sec_jr3: jr@4000 {
494 compatible = "fsl,sec-v4.2-job-ring",
495 "fsl,sec-v4.0-job-ring";
496 reg = <0x4000 0x1000>;
497 interrupts = <91 2 0 0>;
498 };
499
500 rtic@6000 {
501 compatible = "fsl,sec-v4.2-rtic",
502 "fsl,sec-v4.0-rtic";
503 #address-cells = <1>;
504 #size-cells = <1>;
505 reg = <0x6000 0x100>;
506 ranges = <0x0 0x6100 0xe00>;
507
508 rtic_a: rtic-a@0 {
509 compatible = "fsl,sec-v4.2-rtic-memory",
510 "fsl,sec-v4.0-rtic-memory";
511 reg = <0x00 0x20 0x100 0x80>;
512 };
513
514 rtic_b: rtic-b@20 {
515 compatible = "fsl,sec-v4.2-rtic-memory",
516 "fsl,sec-v4.0-rtic-memory";
517 reg = <0x20 0x20 0x200 0x80>;
518 };
519
520 rtic_c: rtic-c@40 {
521 compatible = "fsl,sec-v4.2-rtic-memory",
522 "fsl,sec-v4.0-rtic-memory";
523 reg = <0x40 0x20 0x300 0x80>;
524 };
525
526 rtic_d: rtic-d@60 {
527 compatible = "fsl,sec-v4.2-rtic-memory",
528 "fsl,sec-v4.0-rtic-memory";
529 reg = <0x60 0x20 0x500 0x80>;
530 };
531 };
532 };
533
534 sec_mon: sec_mon@314000 {
535 compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
536 reg = <0x314000 0x1000>;
537 interrupts = <93 2 0 0>;
538 };
539 };
540
541/*
542 rapidio0: rapidio@ffe0c0000
543*/
544
545 localbus@ffe124000 {
546 compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
547 interrupts = <25 2 0 0>;
548 #address-cells = <2>;
549 #size-cells = <1>;
550 };
551
552 pci0: pcie@ffe200000 {
553 compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
554 device_type = "pci";
555 #size-cells = <2>;
556 #address-cells = <3>;
557 bus-range = <0x0 0xff>;
558 clock-frequency = <0x1fca055>;
559 fsl,msi = <&msi0>;
560 interrupts = <16 2 1 15>;
561
562 pcie@0 {
563 reg = <0 0 0 0 0>;
564 #interrupt-cells = <1>;
565 #size-cells = <2>;
566 #address-cells = <3>;
567 device_type = "pci";
568 interrupts = <16 2 1 15>;
569 interrupt-map-mask = <0xf800 0 0 7>;
570 interrupt-map = <
571 /* IDSEL 0x0 */
572 0000 0 0 1 &mpic 40 1 0 0
573 0000 0 0 2 &mpic 1 1 0 0
574 0000 0 0 3 &mpic 2 1 0 0
575 0000 0 0 4 &mpic 3 1 0 0
576 >;
577 };
578 };
579
580 pci1: pcie@ffe201000 {
581 compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
582 device_type = "pci";
583 #size-cells = <2>;
584 #address-cells = <3>;
585 bus-range = <0 0xff>;
586 clock-frequency = <0x1fca055>;
587 fsl,msi = <&msi1>;
588 interrupts = <16 2 1 14>;
589 pcie@0 {
590 reg = <0 0 0 0 0>;
591 #interrupt-cells = <1>;
592 #size-cells = <2>;
593 #address-cells = <3>;
594 device_type = "pci";
595 interrupts = <16 2 1 14>;
596 interrupt-map-mask = <0xf800 0 0 7>;
597 interrupt-map = <
598 /* IDSEL 0x0 */
599 0000 0 0 1 &mpic 41 1 0 0
600 0000 0 0 2 &mpic 5 1 0 0
601 0000 0 0 3 &mpic 6 1 0 0
602 0000 0 0 4 &mpic 7 1 0 0
603 >;
604 };
605 };
606
607 pci2: pcie@ffe202000 {
608 compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
609 device_type = "pci";
610 #size-cells = <2>;
611 #address-cells = <3>;
612 bus-range = <0x0 0xff>;
613 clock-frequency = <0x1fca055>;
614 fsl,msi = <&msi2>;
615 interrupts = <16 2 1 13>;
616 pcie@0 {
617 reg = <0 0 0 0 0>;
618 #interrupt-cells = <1>;
619 #size-cells = <2>;
620 #address-cells = <3>;
621 device_type = "pci";
622 interrupts = <16 2 1 13>;
623 interrupt-map-mask = <0xf800 0 0 7>;
624 interrupt-map = <
625 /* IDSEL 0x0 */
626 0000 0 0 1 &mpic 42 1 0 0
627 0000 0 0 2 &mpic 9 1 0 0
628 0000 0 0 3 &mpic 10 1 0 0
629 0000 0 0 4 &mpic 11 1 0 0
630 >;
631 };
632 };
633
634 pci3: pcie@ffe203000 {
635 compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
636 device_type = "pci";
637 #size-cells = <2>;
638 #address-cells = <3>;
639 bus-range = <0x0 0xff>;
640 clock-frequency = <0x1fca055>;
641 fsl,msi = <&msi2>;
642 interrupts = <16 2 1 12>;
643 pcie@0 {
644 reg = <0 0 0 0 0>;
645 #interrupt-cells = <1>;
646 #size-cells = <2>;
647 #address-cells = <3>;
648 device_type = "pci";
649 interrupts = <16 2 1 12>;
650 interrupt-map-mask = <0xf800 0 0 7>;
651 interrupt-map = <
652 /* IDSEL 0x0 */
653 0000 0 0 1 &mpic 43 1 0 0
654 0000 0 0 2 &mpic 0 1 0 0
655 0000 0 0 3 &mpic 4 1 0 0
656 0000 0 0 4 &mpic 8 1 0 0
657 >;
658 };
659 };
660};
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 927f94d16e9b..eb11098bb687 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -3,258 +3,50 @@
3 * 3 *
4 * Copyright 2009-2011 Freescale Semiconductor Inc. 4 * Copyright 2009-2011 Freescale Semiconductor Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * Redistribution and use in source and binary forms, with or without
7 * under the terms of the GNU General Public License as published by the 7 * modification, are permitted provided that the following conditions are met:
8 * Free Software Foundation; either version 2 of the License, or (at your 8 * * Redistributions of source code must retain the above copyright
9 * option) any later version. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10 */ 33 */
11 34
12/dts-v1/; 35/include/ "p4080si.dtsi"
13 36
14/ { 37/ {
15 model = "fsl,P4080DS"; 38 model = "fsl,P4080DS";
16 compatible = "fsl,P4080DS"; 39 compatible = "fsl,P4080DS";
17 #address-cells = <2>; 40 #address-cells = <2>;
18 #size-cells = <2>; 41 #size-cells = <2>;
19 42 interrupt-parent = <&mpic>;
20 aliases {
21 ccsr = &soc;
22
23 serial0 = &serial0;
24 serial1 = &serial1;
25 serial2 = &serial2;
26 serial3 = &serial3;
27 pci0 = &pci0;
28 pci1 = &pci1;
29 pci2 = &pci2;
30 usb0 = &usb0;
31 usb1 = &usb1;
32 dma0 = &dma0;
33 dma1 = &dma1;
34 sdhc = &sdhc;
35
36 crypto = &crypto;
37 sec_jr0 = &sec_jr0;
38 sec_jr1 = &sec_jr1;
39 sec_jr2 = &sec_jr2;
40 sec_jr3 = &sec_jr3;
41 rtic_a = &rtic_a;
42 rtic_b = &rtic_b;
43 rtic_c = &rtic_c;
44 rtic_d = &rtic_d;
45 sec_mon = &sec_mon;
46
47 rio0 = &rapidio0;
48 };
49
50 cpus {
51 #address-cells = <1>;
52 #size-cells = <0>;
53
54 cpu0: PowerPC,4080@0 {
55 device_type = "cpu";
56 reg = <0>;
57 next-level-cache = <&L2_0>;
58 L2_0: l2-cache {
59 };
60 };
61 cpu1: PowerPC,4080@1 {
62 device_type = "cpu";
63 reg = <1>;
64 next-level-cache = <&L2_1>;
65 L2_1: l2-cache {
66 };
67 };
68 cpu2: PowerPC,4080@2 {
69 device_type = "cpu";
70 reg = <2>;
71 next-level-cache = <&L2_2>;
72 L2_2: l2-cache {
73 };
74 };
75 cpu3: PowerPC,4080@3 {
76 device_type = "cpu";
77 reg = <3>;
78 next-level-cache = <&L2_3>;
79 L2_3: l2-cache {
80 };
81 };
82 cpu4: PowerPC,4080@4 {
83 device_type = "cpu";
84 reg = <4>;
85 next-level-cache = <&L2_4>;
86 L2_4: l2-cache {
87 };
88 };
89 cpu5: PowerPC,4080@5 {
90 device_type = "cpu";
91 reg = <5>;
92 next-level-cache = <&L2_5>;
93 L2_5: l2-cache {
94 };
95 };
96 cpu6: PowerPC,4080@6 {
97 device_type = "cpu";
98 reg = <6>;
99 next-level-cache = <&L2_6>;
100 L2_6: l2-cache {
101 };
102 };
103 cpu7: PowerPC,4080@7 {
104 device_type = "cpu";
105 reg = <7>;
106 next-level-cache = <&L2_7>;
107 L2_7: l2-cache {
108 };
109 };
110 };
111 43
112 memory { 44 memory {
113 device_type = "memory"; 45 device_type = "memory";
114 }; 46 };
115 47
116 soc: soc@ffe000000 { 48 soc: soc@ffe000000 {
117 #address-cells = <1>;
118 #size-cells = <1>;
119 device_type = "soc";
120 compatible = "simple-bus";
121 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
122 reg = <0xf 0xfe000000 0 0x00001000>;
123
124 corenet-law@0 {
125 compatible = "fsl,corenet-law";
126 reg = <0x0 0x1000>;
127 fsl,num-laws = <32>;
128 };
129
130 memory-controller@8000 {
131 compatible = "fsl,p4080-memory-controller";
132 reg = <0x8000 0x1000>;
133 interrupt-parent = <&mpic>;
134 interrupts = <0x12 2>;
135 };
136
137 memory-controller@9000 {
138 compatible = "fsl,p4080-memory-controller";
139 reg = <0x9000 0x1000>;
140 interrupt-parent = <&mpic>;
141 interrupts = <0x12 2>;
142 };
143
144 corenet-cf@18000 {
145 compatible = "fsl,corenet-cf";
146 reg = <0x18000 0x1000>;
147 fsl,ccf-num-csdids = <32>;
148 fsl,ccf-num-snoopids = <32>;
149 };
150
151 iommu@20000 {
152 compatible = "fsl,p4080-pamu";
153 reg = <0x20000 0x10000>;
154 interrupts = <24 2>;
155 interrupt-parent = <&mpic>;
156 };
157
158 mpic: pic@40000 {
159 interrupt-controller;
160 #address-cells = <0>;
161 #interrupt-cells = <2>;
162 reg = <0x40000 0x40000>;
163 compatible = "chrp,open-pic";
164 device_type = "open-pic";
165 };
166
167 dma0: dma@100300 {
168 #address-cells = <1>;
169 #size-cells = <1>;
170 compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
171 reg = <0x100300 0x4>;
172 ranges = <0x0 0x100100 0x200>;
173 cell-index = <0>;
174 dma-channel@0 {
175 compatible = "fsl,p4080-dma-channel",
176 "fsl,eloplus-dma-channel";
177 reg = <0x0 0x80>;
178 cell-index = <0>;
179 interrupt-parent = <&mpic>;
180 interrupts = <28 2>;
181 };
182 dma-channel@80 {
183 compatible = "fsl,p4080-dma-channel",
184 "fsl,eloplus-dma-channel";
185 reg = <0x80 0x80>;
186 cell-index = <1>;
187 interrupt-parent = <&mpic>;
188 interrupts = <29 2>;
189 };
190 dma-channel@100 {
191 compatible = "fsl,p4080-dma-channel",
192 "fsl,eloplus-dma-channel";
193 reg = <0x100 0x80>;
194 cell-index = <2>;
195 interrupt-parent = <&mpic>;
196 interrupts = <30 2>;
197 };
198 dma-channel@180 {
199 compatible = "fsl,p4080-dma-channel",
200 "fsl,eloplus-dma-channel";
201 reg = <0x180 0x80>;
202 cell-index = <3>;
203 interrupt-parent = <&mpic>;
204 interrupts = <31 2>;
205 };
206 };
207
208 dma1: dma@101300 {
209 #address-cells = <1>;
210 #size-cells = <1>;
211 compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
212 reg = <0x101300 0x4>;
213 ranges = <0x0 0x101100 0x200>;
214 cell-index = <1>;
215 dma-channel@0 {
216 compatible = "fsl,p4080-dma-channel",
217 "fsl,eloplus-dma-channel";
218 reg = <0x0 0x80>;
219 cell-index = <0>;
220 interrupt-parent = <&mpic>;
221 interrupts = <32 2>;
222 };
223 dma-channel@80 {
224 compatible = "fsl,p4080-dma-channel",
225 "fsl,eloplus-dma-channel";
226 reg = <0x80 0x80>;
227 cell-index = <1>;
228 interrupt-parent = <&mpic>;
229 interrupts = <33 2>;
230 };
231 dma-channel@100 {
232 compatible = "fsl,p4080-dma-channel",
233 "fsl,eloplus-dma-channel";
234 reg = <0x100 0x80>;
235 cell-index = <2>;
236 interrupt-parent = <&mpic>;
237 interrupts = <34 2>;
238 };
239 dma-channel@180 {
240 compatible = "fsl,p4080-dma-channel",
241 "fsl,eloplus-dma-channel";
242 reg = <0x180 0x80>;
243 cell-index = <3>;
244 interrupt-parent = <&mpic>;
245 interrupts = <35 2>;
246 };
247 };
248
249 spi@110000 { 49 spi@110000 {
250 #address-cells = <1>;
251 #size-cells = <0>;
252 compatible = "fsl,p4080-espi", "fsl,mpc8536-espi";
253 reg = <0x110000 0x1000>;
254 interrupts = <53 0x2>;
255 interrupt-parent = <&mpic>;
256 fsl,espi-num-chipselects = <4>;
257
258 flash@0 { 50 flash@0 {
259 #address-cells = <1>; 51 #address-cells = <1>;
260 #size-cells = <1>; 52 #size-cells = <1>;
@@ -283,35 +75,7 @@
283 }; 75 };
284 }; 76 };
285 77
286 sdhc: sdhc@114000 {
287 compatible = "fsl,p4080-esdhc", "fsl,esdhc";
288 reg = <0x114000 0x1000>;
289 interrupts = <48 2>;
290 interrupt-parent = <&mpic>;
291 voltage-ranges = <3300 3300>;
292 sdhci,auto-cmd12;
293 };
294
295 i2c@118000 {
296 #address-cells = <1>;
297 #size-cells = <0>;
298 cell-index = <0>;
299 compatible = "fsl-i2c";
300 reg = <0x118000 0x100>;
301 interrupts = <38 2>;
302 interrupt-parent = <&mpic>;
303 dfsrr;
304 };
305
306 i2c@118100 { 78 i2c@118100 {
307 #address-cells = <1>;
308 #size-cells = <0>;
309 cell-index = <1>;
310 compatible = "fsl-i2c";
311 reg = <0x118100 0x100>;
312 interrupts = <38 2>;
313 interrupt-parent = <&mpic>;
314 dfsrr;
315 eeprom@51 { 79 eeprom@51 {
316 compatible = "at24,24c256"; 80 compatible = "at24,24c256";
317 reg = <0x51>; 81 reg = <0x51>;
@@ -323,198 +87,27 @@
323 rtc@68 { 87 rtc@68 {
324 compatible = "dallas,ds3232"; 88 compatible = "dallas,ds3232";
325 reg = <0x68>; 89 reg = <0x68>;
326 interrupts = <0 0x1>; 90 interrupts = <0x1 0x1 0 0>;
327 interrupt-parent = <&mpic>;
328 }; 91 };
329 }; 92 };
330 93
331 i2c@119000 {
332 #address-cells = <1>;
333 #size-cells = <0>;
334 cell-index = <2>;
335 compatible = "fsl-i2c";
336 reg = <0x119000 0x100>;
337 interrupts = <39 2>;
338 interrupt-parent = <&mpic>;
339 dfsrr;
340 };
341
342 i2c@119100 {
343 #address-cells = <1>;
344 #size-cells = <0>;
345 cell-index = <3>;
346 compatible = "fsl-i2c";
347 reg = <0x119100 0x100>;
348 interrupts = <39 2>;
349 interrupt-parent = <&mpic>;
350 dfsrr;
351 };
352
353 serial0: serial@11c500 {
354 cell-index = <0>;
355 device_type = "serial";
356 compatible = "ns16550";
357 reg = <0x11c500 0x100>;
358 clock-frequency = <0>;
359 interrupts = <36 2>;
360 interrupt-parent = <&mpic>;
361 };
362
363 serial1: serial@11c600 {
364 cell-index = <1>;
365 device_type = "serial";
366 compatible = "ns16550";
367 reg = <0x11c600 0x100>;
368 clock-frequency = <0>;
369 interrupts = <36 2>;
370 interrupt-parent = <&mpic>;
371 };
372
373 serial2: serial@11d500 {
374 cell-index = <2>;
375 device_type = "serial";
376 compatible = "ns16550";
377 reg = <0x11d500 0x100>;
378 clock-frequency = <0>;
379 interrupts = <37 2>;
380 interrupt-parent = <&mpic>;
381 };
382
383 serial3: serial@11d600 {
384 cell-index = <3>;
385 device_type = "serial";
386 compatible = "ns16550";
387 reg = <0x11d600 0x100>;
388 clock-frequency = <0>;
389 interrupts = <37 2>;
390 interrupt-parent = <&mpic>;
391 };
392
393 gpio0: gpio@130000 {
394 compatible = "fsl,p4080-gpio";
395 reg = <0x130000 0x1000>;
396 interrupts = <55 2>;
397 interrupt-parent = <&mpic>;
398 #gpio-cells = <2>;
399 gpio-controller;
400 };
401
402 usb0: usb@210000 { 94 usb0: usb@210000 {
403 compatible = "fsl,p4080-usb2-mph",
404 "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
405 reg = <0x210000 0x1000>;
406 #address-cells = <1>;
407 #size-cells = <0>;
408 interrupt-parent = <&mpic>;
409 interrupts = <44 0x2>;
410 phy_type = "ulpi"; 95 phy_type = "ulpi";
411 }; 96 };
412 97
413 usb1: usb@211000 { 98 usb1: usb@211000 {
414 compatible = "fsl,p4080-usb2-dr",
415 "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
416 reg = <0x211000 0x1000>;
417 #address-cells = <1>;
418 #size-cells = <0>;
419 interrupt-parent = <&mpic>;
420 interrupts = <45 0x2>;
421 dr_mode = "host"; 99 dr_mode = "host";
422 phy_type = "ulpi"; 100 phy_type = "ulpi";
423 }; 101 };
424
425 crypto: crypto@300000 {
426 compatible = "fsl,sec-v4.0";
427 #address-cells = <1>;
428 #size-cells = <1>;
429 reg = <0x300000 0x10000>;
430 ranges = <0 0x300000 0x10000>;
431 interrupt-parent = <&mpic>;
432 interrupts = <92 2>;
433
434 sec_jr0: jr@1000 {
435 compatible = "fsl,sec-v4.0-job-ring";
436 reg = <0x1000 0x1000>;
437 interrupt-parent = <&mpic>;
438 interrupts = <88 2>;
439 };
440
441 sec_jr1: jr@2000 {
442 compatible = "fsl,sec-v4.0-job-ring";
443 reg = <0x2000 0x1000>;
444 interrupt-parent = <&mpic>;
445 interrupts = <89 2>;
446 };
447
448 sec_jr2: jr@3000 {
449 compatible = "fsl,sec-v4.0-job-ring";
450 reg = <0x3000 0x1000>;
451 interrupt-parent = <&mpic>;
452 interrupts = <90 2>;
453 };
454
455 sec_jr3: jr@4000 {
456 compatible = "fsl,sec-v4.0-job-ring";
457 reg = <0x4000 0x1000>;
458 interrupt-parent = <&mpic>;
459 interrupts = <91 2>;
460 };
461
462 rtic@6000 {
463 compatible = "fsl,sec-v4.0-rtic";
464 #address-cells = <1>;
465 #size-cells = <1>;
466 reg = <0x6000 0x100>;
467 ranges = <0x0 0x6100 0xe00>;
468
469 rtic_a: rtic-a@0 {
470 compatible = "fsl,sec-v4.0-rtic-memory";
471 reg = <0x00 0x20 0x100 0x80>;
472 };
473
474 rtic_b: rtic-b@20 {
475 compatible = "fsl,sec-v4.0-rtic-memory";
476 reg = <0x20 0x20 0x200 0x80>;
477 };
478
479 rtic_c: rtic-c@40 {
480 compatible = "fsl,sec-v4.0-rtic-memory";
481 reg = <0x40 0x20 0x300 0x80>;
482 };
483
484 rtic_d: rtic-d@60 {
485 compatible = "fsl,sec-v4.0-rtic-memory";
486 reg = <0x60 0x20 0x500 0x80>;
487 };
488 };
489 };
490
491 sec_mon: sec_mon@314000 {
492 compatible = "fsl,sec-v4.0-mon";
493 reg = <0x314000 0x1000>;
494 interrupt-parent = <&mpic>;
495 interrupts = <93 2>;
496 };
497 }; 102 };
498 103
499 rapidio0: rapidio@ffe0c0000 { 104 rapidio0: rapidio@ffe0c0000 {
500 #address-cells = <2>;
501 #size-cells = <2>;
502 compatible = "fsl,rapidio-delta";
503 reg = <0xf 0xfe0c0000 0 0x20000>; 105 reg = <0xf 0xfe0c0000 0 0x20000>;
504 ranges = <0 0 0xf 0xf5000000 0 0x01000000>; 106 ranges = <0 0 0xc 0x20000000 0 0x01000000>;
505 interrupt-parent = <&mpic>;
506 /* err_irq bell_outb_irq bell_inb_irq
507 msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */
508 interrupts = <16 2 56 2 57 2 60 2 61 2 62 2 63 2>;
509 }; 107 };
510 108
511 localbus@ffe124000 { 109 localbus@ffe124000 {
512 compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
513 reg = <0xf 0xfe124000 0 0x1000>; 110 reg = <0xf 0xfe124000 0 0x1000>;
514 interrupts = <25 2>;
515 #address-cells = <2>;
516 #size-cells = <1>;
517
518 ranges = <0 0 0xf 0xe8000000 0x08000000>; 111 ranges = <0 0 0xf 0xe8000000 0x08000000>;
519 112
520 flash@0,0 { 113 flash@0,0 {
@@ -526,32 +119,10 @@
526 }; 119 };
527 120
528 pci0: pcie@ffe200000 { 121 pci0: pcie@ffe200000 {
529 compatible = "fsl,p4080-pcie";
530 device_type = "pci";
531 #interrupt-cells = <1>;
532 #size-cells = <2>;
533 #address-cells = <3>;
534 reg = <0xf 0xfe200000 0 0x1000>; 122 reg = <0xf 0xfe200000 0 0x1000>;
535 bus-range = <0x0 0xff>;
536 ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 123 ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
537 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; 124 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
538 clock-frequency = <0x1fca055>;
539 interrupt-parent = <&mpic>;
540 interrupts = <16 2>;
541
542 interrupt-map-mask = <0xf800 0 0 7>;
543 interrupt-map = <
544 /* IDSEL 0x0 */
545 0000 0 0 1 &mpic 40 1
546 0000 0 0 2 &mpic 1 1
547 0000 0 0 3 &mpic 2 1
548 0000 0 0 4 &mpic 3 1
549 >;
550 pcie@0 { 125 pcie@0 {
551 reg = <0 0 0 0 0>;
552 #size-cells = <2>;
553 #address-cells = <3>;
554 device_type = "pci";
555 ranges = <0x02000000 0 0xe0000000 126 ranges = <0x02000000 0 0xe0000000
556 0x02000000 0 0xe0000000 127 0x02000000 0 0xe0000000
557 0 0x20000000 128 0 0x20000000
@@ -563,31 +134,10 @@
563 }; 134 };
564 135
565 pci1: pcie@ffe201000 { 136 pci1: pcie@ffe201000 {
566 compatible = "fsl,p4080-pcie";
567 device_type = "pci";
568 #interrupt-cells = <1>;
569 #size-cells = <2>;
570 #address-cells = <3>;
571 reg = <0xf 0xfe201000 0 0x1000>; 137 reg = <0xf 0xfe201000 0 0x1000>;
572 bus-range = <0 0xff>;
573 ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 138 ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
574 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; 139 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
575 clock-frequency = <0x1fca055>;
576 interrupt-parent = <&mpic>;
577 interrupts = <16 2>;
578 interrupt-map-mask = <0xf800 0 0 7>;
579 interrupt-map = <
580 /* IDSEL 0x0 */
581 0000 0 0 1 &mpic 41 1
582 0000 0 0 2 &mpic 5 1
583 0000 0 0 3 &mpic 6 1
584 0000 0 0 4 &mpic 7 1
585 >;
586 pcie@0 { 140 pcie@0 {
587 reg = <0 0 0 0 0>;
588 #size-cells = <2>;
589 #address-cells = <3>;
590 device_type = "pci";
591 ranges = <0x02000000 0 0xe0000000 141 ranges = <0x02000000 0 0xe0000000
592 0x02000000 0 0xe0000000 142 0x02000000 0 0xe0000000
593 0 0x20000000 143 0 0x20000000
@@ -599,31 +149,10 @@
599 }; 149 };
600 150
601 pci2: pcie@ffe202000 { 151 pci2: pcie@ffe202000 {
602 compatible = "fsl,p4080-pcie";
603 device_type = "pci";
604 #interrupt-cells = <1>;
605 #size-cells = <2>;
606 #address-cells = <3>;
607 reg = <0xf 0xfe202000 0 0x1000>; 152 reg = <0xf 0xfe202000 0 0x1000>;
608 bus-range = <0x0 0xff>;
609 ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000 153 ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
610 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>; 154 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
611 clock-frequency = <0x1fca055>;
612 interrupt-parent = <&mpic>;
613 interrupts = <16 2>;
614 interrupt-map-mask = <0xf800 0 0 7>;
615 interrupt-map = <
616 /* IDSEL 0x0 */
617 0000 0 0 1 &mpic 42 1
618 0000 0 0 2 &mpic 9 1
619 0000 0 0 3 &mpic 10 1
620 0000 0 0 4 &mpic 11 1
621 >;
622 pcie@0 { 155 pcie@0 {
623 reg = <0 0 0 0 0>;
624 #size-cells = <2>;
625 #address-cells = <3>;
626 device_type = "pci";
627 ranges = <0x02000000 0 0xe0000000 156 ranges = <0x02000000 0 0xe0000000
628 0x02000000 0 0xe0000000 157 0x02000000 0 0xe0000000
629 0 0x20000000 158 0 0x20000000
diff --git a/arch/powerpc/boot/dts/p4080si.dtsi b/arch/powerpc/boot/dts/p4080si.dtsi
new file mode 100644
index 000000000000..b71051f506c1
--- /dev/null
+++ b/arch/powerpc/boot/dts/p4080si.dtsi
@@ -0,0 +1,661 @@
1/*
2 * P4080 Silicon Device Tree Source
3 *
4 * Copyright 2009-2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/dts-v1/;
36
37/ {
38 compatible = "fsl,P4080";
39 #address-cells = <2>;
40 #size-cells = <2>;
41 interrupt-parent = <&mpic>;
42
43 aliases {
44 ccsr = &soc;
45
46 serial0 = &serial0;
47 serial1 = &serial1;
48 serial2 = &serial2;
49 serial3 = &serial3;
50 pci0 = &pci0;
51 pci1 = &pci1;
52 pci2 = &pci2;
53 usb0 = &usb0;
54 usb1 = &usb1;
55 dma0 = &dma0;
56 dma1 = &dma1;
57 sdhc = &sdhc;
58 msi0 = &msi0;
59 msi1 = &msi1;
60 msi2 = &msi2;
61
62 crypto = &crypto;
63 sec_jr0 = &sec_jr0;
64 sec_jr1 = &sec_jr1;
65 sec_jr2 = &sec_jr2;
66 sec_jr3 = &sec_jr3;
67 rtic_a = &rtic_a;
68 rtic_b = &rtic_b;
69 rtic_c = &rtic_c;
70 rtic_d = &rtic_d;
71 sec_mon = &sec_mon;
72
73 rio0 = &rapidio0;
74 };
75
76 cpus {
77 #address-cells = <1>;
78 #size-cells = <0>;
79
80 cpu0: PowerPC,4080@0 {
81 device_type = "cpu";
82 reg = <0>;
83 next-level-cache = <&L2_0>;
84 L2_0: l2-cache {
85 next-level-cache = <&cpc>;
86 };
87 };
88 cpu1: PowerPC,4080@1 {
89 device_type = "cpu";
90 reg = <1>;
91 next-level-cache = <&L2_1>;
92 L2_1: l2-cache {
93 next-level-cache = <&cpc>;
94 };
95 };
96 cpu2: PowerPC,4080@2 {
97 device_type = "cpu";
98 reg = <2>;
99 next-level-cache = <&L2_2>;
100 L2_2: l2-cache {
101 next-level-cache = <&cpc>;
102 };
103 };
104 cpu3: PowerPC,4080@3 {
105 device_type = "cpu";
106 reg = <3>;
107 next-level-cache = <&L2_3>;
108 L2_3: l2-cache {
109 next-level-cache = <&cpc>;
110 };
111 };
112 cpu4: PowerPC,4080@4 {
113 device_type = "cpu";
114 reg = <4>;
115 next-level-cache = <&L2_4>;
116 L2_4: l2-cache {
117 next-level-cache = <&cpc>;
118 };
119 };
120 cpu5: PowerPC,4080@5 {
121 device_type = "cpu";
122 reg = <5>;
123 next-level-cache = <&L2_5>;
124 L2_5: l2-cache {
125 next-level-cache = <&cpc>;
126 };
127 };
128 cpu6: PowerPC,4080@6 {
129 device_type = "cpu";
130 reg = <6>;
131 next-level-cache = <&L2_6>;
132 L2_6: l2-cache {
133 next-level-cache = <&cpc>;
134 };
135 };
136 cpu7: PowerPC,4080@7 {
137 device_type = "cpu";
138 reg = <7>;
139 next-level-cache = <&L2_7>;
140 L2_7: l2-cache {
141 next-level-cache = <&cpc>;
142 };
143 };
144 };
145
146 soc: soc@ffe000000 {
147 #address-cells = <1>;
148 #size-cells = <1>;
149 device_type = "soc";
150 compatible = "simple-bus";
151 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
152 reg = <0xf 0xfe000000 0 0x00001000>;
153
154 soc-sram-error {
155 compatible = "fsl,soc-sram-error";
156 interrupts = <16 2 1 29>;
157 };
158
159 corenet-law@0 {
160 compatible = "fsl,corenet-law";
161 reg = <0x0 0x1000>;
162 fsl,num-laws = <32>;
163 };
164
165 memory-controller@8000 {
166 compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
167 reg = <0x8000 0x1000>;
168 interrupts = <16 2 1 23>;
169 };
170
171 memory-controller@9000 {
172 compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller";
173 reg = <0x9000 0x1000>;
174 interrupts = <16 2 1 22>;
175 };
176
177 cpc: l3-cache-controller@10000 {
178 compatible = "fsl,p4080-l3-cache-controller", "cache";
179 reg = <0x10000 0x1000
180 0x11000 0x1000>;
181 interrupts = <16 2 1 27
182 16 2 1 26>;
183 };
184
185 corenet-cf@18000 {
186 compatible = "fsl,corenet-cf";
187 reg = <0x18000 0x1000>;
188 interrupts = <16 2 1 31>;
189 fsl,ccf-num-csdids = <32>;
190 fsl,ccf-num-snoopids = <32>;
191 };
192
193 iommu@20000 {
194 compatible = "fsl,pamu-v1.0", "fsl,pamu";
195 reg = <0x20000 0x5000>;
196 interrupts = <
197 24 2 0 0
198 16 2 1 30>;
199 };
200
201 mpic: pic@40000 {
202 clock-frequency = <0>;
203 interrupt-controller;
204 #address-cells = <0>;
205 #interrupt-cells = <4>;
206 reg = <0x40000 0x40000>;
207 compatible = "fsl,mpic", "chrp,open-pic";
208 device_type = "open-pic";
209 };
210
211 msi0: msi@41600 {
212 compatible = "fsl,mpic-msi";
213 reg = <0x41600 0x200>;
214 msi-available-ranges = <0 0x100>;
215 interrupts = <
216 0xe0 0 0 0
217 0xe1 0 0 0
218 0xe2 0 0 0
219 0xe3 0 0 0
220 0xe4 0 0 0
221 0xe5 0 0 0
222 0xe6 0 0 0
223 0xe7 0 0 0>;
224 };
225
226 msi1: msi@41800 {
227 compatible = "fsl,mpic-msi";
228 reg = <0x41800 0x200>;
229 msi-available-ranges = <0 0x100>;
230 interrupts = <
231 0xe8 0 0 0
232 0xe9 0 0 0
233 0xea 0 0 0
234 0xeb 0 0 0
235 0xec 0 0 0
236 0xed 0 0 0
237 0xee 0 0 0
238 0xef 0 0 0>;
239 };
240
241 msi2: msi@41a00 {
242 compatible = "fsl,mpic-msi";
243 reg = <0x41a00 0x200>;
244 msi-available-ranges = <0 0x100>;
245 interrupts = <
246 0xf0 0 0 0
247 0xf1 0 0 0
248 0xf2 0 0 0
249 0xf3 0 0 0
250 0xf4 0 0 0
251 0xf5 0 0 0
252 0xf6 0 0 0
253 0xf7 0 0 0>;
254 };
255
256 guts: global-utilities@e0000 {
257 compatible = "fsl,qoriq-device-config-1.0";
258 reg = <0xe0000 0xe00>;
259 fsl,has-rstcr;
260 #sleep-cells = <1>;
261 fsl,liodn-bits = <12>;
262 };
263
264 pins: global-utilities@e0e00 {
265 compatible = "fsl,qoriq-pin-control-1.0";
266 reg = <0xe0e00 0x200>;
267 #sleep-cells = <2>;
268 };
269
270 clockgen: global-utilities@e1000 {
271 compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
272 reg = <0xe1000 0x1000>;
273 clock-frequency = <0>;
274 };
275
276 rcpm: global-utilities@e2000 {
277 compatible = "fsl,qoriq-rcpm-1.0";
278 reg = <0xe2000 0x1000>;
279 #sleep-cells = <1>;
280 };
281
282 sfp: sfp@e8000 {
283 compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0";
284 reg = <0xe8000 0x1000>;
285 };
286
287 serdes: serdes@ea000 {
288 compatible = "fsl,p4080-serdes";
289 reg = <0xea000 0x1000>;
290 };
291
292 dma0: dma@100300 {
293 #address-cells = <1>;
294 #size-cells = <1>;
295 compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
296 reg = <0x100300 0x4>;
297 ranges = <0x0 0x100100 0x200>;
298 cell-index = <0>;
299 dma-channel@0 {
300 compatible = "fsl,p4080-dma-channel",
301 "fsl,eloplus-dma-channel";
302 reg = <0x0 0x80>;
303 cell-index = <0>;
304 interrupts = <28 2 0 0>;
305 };
306 dma-channel@80 {
307 compatible = "fsl,p4080-dma-channel",
308 "fsl,eloplus-dma-channel";
309 reg = <0x80 0x80>;
310 cell-index = <1>;
311 interrupts = <29 2 0 0>;
312 };
313 dma-channel@100 {
314 compatible = "fsl,p4080-dma-channel",
315 "fsl,eloplus-dma-channel";
316 reg = <0x100 0x80>;
317 cell-index = <2>;
318 interrupts = <30 2 0 0>;
319 };
320 dma-channel@180 {
321 compatible = "fsl,p4080-dma-channel",
322 "fsl,eloplus-dma-channel";
323 reg = <0x180 0x80>;
324 cell-index = <3>;
325 interrupts = <31 2 0 0>;
326 };
327 };
328
329 dma1: dma@101300 {
330 #address-cells = <1>;
331 #size-cells = <1>;
332 compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
333 reg = <0x101300 0x4>;
334 ranges = <0x0 0x101100 0x200>;
335 cell-index = <1>;
336 dma-channel@0 {
337 compatible = "fsl,p4080-dma-channel",
338 "fsl,eloplus-dma-channel";
339 reg = <0x0 0x80>;
340 cell-index = <0>;
341 interrupts = <32 2 0 0>;
342 };
343 dma-channel@80 {
344 compatible = "fsl,p4080-dma-channel",
345 "fsl,eloplus-dma-channel";
346 reg = <0x80 0x80>;
347 cell-index = <1>;
348 interrupts = <33 2 0 0>;
349 };
350 dma-channel@100 {
351 compatible = "fsl,p4080-dma-channel",
352 "fsl,eloplus-dma-channel";
353 reg = <0x100 0x80>;
354 cell-index = <2>;
355 interrupts = <34 2 0 0>;
356 };
357 dma-channel@180 {
358 compatible = "fsl,p4080-dma-channel",
359 "fsl,eloplus-dma-channel";
360 reg = <0x180 0x80>;
361 cell-index = <3>;
362 interrupts = <35 2 0 0>;
363 };
364 };
365
366 spi@110000 {
367 #address-cells = <1>;
368 #size-cells = <0>;
369 compatible = "fsl,p4080-espi", "fsl,mpc8536-espi";
370 reg = <0x110000 0x1000>;
371 interrupts = <53 0x2 0 0>;
372 fsl,espi-num-chipselects = <4>;
373 };
374
375 sdhc: sdhc@114000 {
376 compatible = "fsl,p4080-esdhc", "fsl,esdhc";
377 reg = <0x114000 0x1000>;
378 interrupts = <48 2 0 0>;
379 voltage-ranges = <3300 3300>;
380 sdhci,auto-cmd12;
381 clock-frequency = <0>;
382 };
383
384 i2c@118000 {
385 #address-cells = <1>;
386 #size-cells = <0>;
387 cell-index = <0>;
388 compatible = "fsl-i2c";
389 reg = <0x118000 0x100>;
390 interrupts = <38 2 0 0>;
391 dfsrr;
392 };
393
394 i2c@118100 {
395 #address-cells = <1>;
396 #size-cells = <0>;
397 cell-index = <1>;
398 compatible = "fsl-i2c";
399 reg = <0x118100 0x100>;
400 interrupts = <38 2 0 0>;
401 dfsrr;
402 };
403
404 i2c@119000 {
405 #address-cells = <1>;
406 #size-cells = <0>;
407 cell-index = <2>;
408 compatible = "fsl-i2c";
409 reg = <0x119000 0x100>;
410 interrupts = <39 2 0 0>;
411 dfsrr;
412 };
413
414 i2c@119100 {
415 #address-cells = <1>;
416 #size-cells = <0>;
417 cell-index = <3>;
418 compatible = "fsl-i2c";
419 reg = <0x119100 0x100>;
420 interrupts = <39 2 0 0>;
421 dfsrr;
422 };
423
424 serial0: serial@11c500 {
425 cell-index = <0>;
426 device_type = "serial";
427 compatible = "ns16550";
428 reg = <0x11c500 0x100>;
429 clock-frequency = <0>;
430 interrupts = <36 2 0 0>;
431 };
432
433 serial1: serial@11c600 {
434 cell-index = <1>;
435 device_type = "serial";
436 compatible = "ns16550";
437 reg = <0x11c600 0x100>;
438 clock-frequency = <0>;
439 interrupts = <36 2 0 0>;
440 };
441
442 serial2: serial@11d500 {
443 cell-index = <2>;
444 device_type = "serial";
445 compatible = "ns16550";
446 reg = <0x11d500 0x100>;
447 clock-frequency = <0>;
448 interrupts = <37 2 0 0>;
449 };
450
451 serial3: serial@11d600 {
452 cell-index = <3>;
453 device_type = "serial";
454 compatible = "ns16550";
455 reg = <0x11d600 0x100>;
456 clock-frequency = <0>;
457 interrupts = <37 2 0 0>;
458 };
459
460 gpio0: gpio@130000 {
461 compatible = "fsl,p4080-gpio", "fsl,qoriq-gpio";
462 reg = <0x130000 0x1000>;
463 interrupts = <55 2 0 0>;
464 #gpio-cells = <2>;
465 gpio-controller;
466 };
467
468 usb0: usb@210000 {
469 compatible = "fsl,p4080-usb2-mph",
470 "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
471 reg = <0x210000 0x1000>;
472 #address-cells = <1>;
473 #size-cells = <0>;
474 interrupts = <44 0x2 0 0>;
475 };
476
477 usb1: usb@211000 {
478 compatible = "fsl,p4080-usb2-dr",
479 "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
480 reg = <0x211000 0x1000>;
481 #address-cells = <1>;
482 #size-cells = <0>;
483 interrupts = <45 0x2 0 0>;
484 };
485
486 crypto: crypto@300000 {
487 compatible = "fsl,sec-v4.0";
488 #address-cells = <1>;
489 #size-cells = <1>;
490 reg = <0x300000 0x10000>;
491 ranges = <0 0x300000 0x10000>;
492 interrupt-parent = <&mpic>;
493 interrupts = <92 2 0 0>;
494
495 sec_jr0: jr@1000 {
496 compatible = "fsl,sec-v4.0-job-ring";
497 reg = <0x1000 0x1000>;
498 interrupt-parent = <&mpic>;
499 interrupts = <88 2 0 0>;
500 };
501
502 sec_jr1: jr@2000 {
503 compatible = "fsl,sec-v4.0-job-ring";
504 reg = <0x2000 0x1000>;
505 interrupt-parent = <&mpic>;
506 interrupts = <89 2 0 0>;
507 };
508
509 sec_jr2: jr@3000 {
510 compatible = "fsl,sec-v4.0-job-ring";
511 reg = <0x3000 0x1000>;
512 interrupt-parent = <&mpic>;
513 interrupts = <90 2 0 0>;
514 };
515
516 sec_jr3: jr@4000 {
517 compatible = "fsl,sec-v4.0-job-ring";
518 reg = <0x4000 0x1000>;
519 interrupt-parent = <&mpic>;
520 interrupts = <91 2 0 0>;
521 };
522
523 rtic@6000 {
524 compatible = "fsl,sec-v4.0-rtic";
525 #address-cells = <1>;
526 #size-cells = <1>;
527 reg = <0x6000 0x100>;
528 ranges = <0x0 0x6100 0xe00>;
529
530 rtic_a: rtic-a@0 {
531 compatible = "fsl,sec-v4.0-rtic-memory";
532 reg = <0x00 0x20 0x100 0x80>;
533 };
534
535 rtic_b: rtic-b@20 {
536 compatible = "fsl,sec-v4.0-rtic-memory";
537 reg = <0x20 0x20 0x200 0x80>;
538 };
539
540 rtic_c: rtic-c@40 {
541 compatible = "fsl,sec-v4.0-rtic-memory";
542 reg = <0x40 0x20 0x300 0x80>;
543 };
544
545 rtic_d: rtic-d@60 {
546 compatible = "fsl,sec-v4.0-rtic-memory";
547 reg = <0x60 0x20 0x500 0x80>;
548 };
549 };
550 };
551
552 sec_mon: sec_mon@314000 {
553 compatible = "fsl,sec-v4.0-mon";
554 reg = <0x314000 0x1000>;
555 interrupt-parent = <&mpic>;
556 interrupts = <93 2 0 0>;
557 };
558 };
559
560 rapidio0: rapidio@ffe0c0000 {
561 #address-cells = <2>;
562 #size-cells = <2>;
563 compatible = "fsl,rapidio-delta";
564 interrupts = <
565 16 2 1 11 /* err_irq */
566 56 2 0 0 /* bell_outb_irq */
567 57 2 0 0 /* bell_inb_irq */
568 60 2 0 0 /* msg1_tx_irq */
569 61 2 0 0 /* msg1_rx_irq */
570 62 2 0 0 /* msg2_tx_irq */
571 63 2 0 0>; /* msg2_rx_irq */
572 };
573
574 localbus@ffe124000 {
575 compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
576 interrupts = <25 2 0 0>;
577 #address-cells = <2>;
578 #size-cells = <1>;
579 };
580
581 pci0: pcie@ffe200000 {
582 compatible = "fsl,p4080-pcie";
583 device_type = "pci";
584 #size-cells = <2>;
585 #address-cells = <3>;
586 bus-range = <0x0 0xff>;
587 clock-frequency = <0x1fca055>;
588 fsl,msi = <&msi0>;
589 interrupts = <16 2 1 15>;
590 pcie@0 {
591 reg = <0 0 0 0 0>;
592 #interrupt-cells = <1>;
593 #size-cells = <2>;
594 #address-cells = <3>;
595 device_type = "pci";
596 interrupts = <16 2 1 15>;
597 interrupt-map-mask = <0xf800 0 0 7>;
598 interrupt-map = <
599 /* IDSEL 0x0 */
600 0000 0 0 1 &mpic 40 1 0 0
601 0000 0 0 2 &mpic 1 1 0 0
602 0000 0 0 3 &mpic 2 1 0 0
603 0000 0 0 4 &mpic 3 1 0 0
604 >;
605 };
606 };
607
608 pci1: pcie@ffe201000 {
609 compatible = "fsl,p4080-pcie";
610 device_type = "pci";
611 #size-cells = <2>;
612 #address-cells = <3>;
613 bus-range = <0 0xff>;
614 clock-frequency = <0x1fca055>;
615 fsl,msi = <&msi1>;
616 interrupts = <16 2 1 14>;
617 pcie@0 {
618 reg = <0 0 0 0 0>;
619 #interrupt-cells = <1>;
620 #size-cells = <2>;
621 #address-cells = <3>;
622 device_type = "pci";
623 interrupts = <16 2 1 14>;
624 interrupt-map-mask = <0xf800 0 0 7>;
625 interrupt-map = <
626 /* IDSEL 0x0 */
627 0000 0 0 1 &mpic 41 1 0 0
628 0000 0 0 2 &mpic 5 1 0 0
629 0000 0 0 3 &mpic 6 1 0 0
630 0000 0 0 4 &mpic 7 1 0 0
631 >;
632 };
633 };
634
635 pci2: pcie@ffe202000 {
636 compatible = "fsl,p4080-pcie";
637 device_type = "pci";
638 #size-cells = <2>;
639 #address-cells = <3>;
640 bus-range = <0x0 0xff>;
641 clock-frequency = <0x1fca055>;
642 fsl,msi = <&msi2>;
643 interrupts = <16 2 1 13>;
644 pcie@0 {
645 reg = <0 0 0 0 0>;
646 #interrupt-cells = <1>;
647 #size-cells = <2>;
648 #address-cells = <3>;
649 device_type = "pci";
650 interrupts = <16 2 1 13>;
651 interrupt-map-mask = <0xf800 0 0 7>;
652 interrupt-map = <
653 /* IDSEL 0x0 */
654 0000 0 0 1 &mpic 42 1 0 0
655 0000 0 0 2 &mpic 9 1 0 0
656 0000 0 0 3 &mpic 10 1 0 0
657 0000 0 0 4 &mpic 11 1 0 0
658 >;
659 };
660 };
661};
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
new file mode 100644
index 000000000000..8366e2fd2fba
--- /dev/null
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -0,0 +1,215 @@
1/*
2 * P5020DS Device Tree Source
3 *
4 * Copyright 2010-2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "p5020si.dtsi"
36
37/ {
38 model = "fsl,P5020DS";
39 compatible = "fsl,P5020DS";
40 #address-cells = <2>;
41 #size-cells = <2>;
42 interrupt-parent = <&mpic>;
43
44 memory {
45 device_type = "memory";
46 };
47
48 soc: soc@ffe000000 {
49 spi@110000 {
50 flash@0 {
51 #address-cells = <1>;
52 #size-cells = <1>;
53 compatible = "spansion,s25sl12801";
54 reg = <0>;
55 spi-max-frequency = <40000000>; /* input clock */
56 partition@u-boot {
57 label = "u-boot";
58 reg = <0x00000000 0x00100000>;
59 read-only;
60 };
61 partition@kernel {
62 label = "kernel";
63 reg = <0x00100000 0x00500000>;
64 read-only;
65 };
66 partition@dtb {
67 label = "dtb";
68 reg = <0x00600000 0x00100000>;
69 read-only;
70 };
71 partition@fs {
72 label = "file system";
73 reg = <0x00700000 0x00900000>;
74 };
75 };
76 };
77
78 i2c@118100 {
79 eeprom@51 {
80 compatible = "at24,24c256";
81 reg = <0x51>;
82 };
83 eeprom@52 {
84 compatible = "at24,24c256";
85 reg = <0x52>;
86 };
87 };
88
89 i2c@119100 {
90 rtc@68 {
91 compatible = "dallas,ds3232";
92 reg = <0x68>;
93 interrupts = <0x1 0x1 0 0>;
94 };
95 };
96 };
97
98 localbus@ffe124000 {
99 reg = <0xf 0xfe124000 0 0x1000>;
100 ranges = <0 0 0xf 0xe8000000 0x08000000
101 2 0 0xf 0xffa00000 0x00040000
102 3 0 0xf 0xffdf0000 0x00008000>;
103
104 flash@0,0 {
105 compatible = "cfi-flash";
106 reg = <0 0 0x08000000>;
107 bank-width = <2>;
108 device-width = <2>;
109 };
110
111 nand@2,0 {
112 #address-cells = <1>;
113 #size-cells = <1>;
114 compatible = "fsl,elbc-fcm-nand";
115 reg = <0x2 0x0 0x40000>;
116
117 partition@0 {
118 label = "NAND U-Boot Image";
119 reg = <0x0 0x02000000>;
120 read-only;
121 };
122
123 partition@2000000 {
124 label = "NAND Root File System";
125 reg = <0x02000000 0x10000000>;
126 };
127
128 partition@12000000 {
129 label = "NAND Compressed RFS Image";
130 reg = <0x12000000 0x08000000>;
131 };
132
133 partition@1a000000 {
134 label = "NAND Linux Kernel Image";
135 reg = <0x1a000000 0x04000000>;
136 };
137
138 partition@1e000000 {
139 label = "NAND DTB Image";
140 reg = <0x1e000000 0x01000000>;
141 };
142
143 partition@1f000000 {
144 label = "NAND Writable User area";
145 reg = <0x1f000000 0x21000000>;
146 };
147 };
148
149 board-control@3,0 {
150 compatible = "fsl,p5020ds-pixis";
151 reg = <3 0 0x20>;
152 };
153 };
154
155 pci0: pcie@ffe200000 {
156 reg = <0xf 0xfe200000 0 0x1000>;
157 ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
158 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
159
160 pcie@0 {
161 ranges = <0x02000000 0 0xe0000000
162 0x02000000 0 0xe0000000
163 0 0x20000000
164
165 0x01000000 0 0x00000000
166 0x01000000 0 0x00000000
167 0 0x00010000>;
168 };
169 };
170
171 pci1: pcie@ffe201000 {
172 reg = <0xf 0xfe201000 0 0x1000>;
173 ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
174 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
175 pcie@0 {
176 ranges = <0x02000000 0 0xe0000000
177 0x02000000 0 0xe0000000
178 0 0x20000000
179
180 0x01000000 0 0x00000000
181 0x01000000 0 0x00000000
182 0 0x00010000>;
183 };
184 };
185
186 pci2: pcie@ffe202000 {
187 reg = <0xf 0xfe202000 0 0x1000>;
188 ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
189 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
190 pcie@0 {
191 ranges = <0x02000000 0 0xe0000000
192 0x02000000 0 0xe0000000
193 0 0x20000000
194
195 0x01000000 0 0x00000000
196 0x01000000 0 0x00000000
197 0 0x00010000>;
198 };
199 };
200
201 pci3: pcie@ffe203000 {
202 reg = <0xf 0xfe203000 0 0x1000>;
203 ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
204 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
205 pcie@0 {
206 ranges = <0x02000000 0 0xe0000000
207 0x02000000 0 0xe0000000
208 0 0x20000000
209
210 0x01000000 0 0x00000000
211 0x01000000 0 0x00000000
212 0 0x00010000>;
213 };
214 };
215};
diff --git a/arch/powerpc/boot/dts/p5020si.dtsi b/arch/powerpc/boot/dts/p5020si.dtsi
new file mode 100644
index 000000000000..5e6048ec55bb
--- /dev/null
+++ b/arch/powerpc/boot/dts/p5020si.dtsi
@@ -0,0 +1,652 @@
1/*
2 * P5020 Silicon Device Tree Source
3 *
4 * Copyright 2010-2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/dts-v1/;
36
37/ {
38 compatible = "fsl,P5020";
39 #address-cells = <2>;
40 #size-cells = <2>;
41 interrupt-parent = <&mpic>;
42
43 aliases {
44 ccsr = &soc;
45
46 serial0 = &serial0;
47 serial1 = &serial1;
48 serial2 = &serial2;
49 serial3 = &serial3;
50 pci0 = &pci0;
51 pci1 = &pci1;
52 pci2 = &pci2;
53 pci3 = &pci3;
54 usb0 = &usb0;
55 usb1 = &usb1;
56 dma0 = &dma0;
57 dma1 = &dma1;
58 sdhc = &sdhc;
59 msi0 = &msi0;
60 msi1 = &msi1;
61 msi2 = &msi2;
62
63 crypto = &crypto;
64 sec_jr0 = &sec_jr0;
65 sec_jr1 = &sec_jr1;
66 sec_jr2 = &sec_jr2;
67 sec_jr3 = &sec_jr3;
68 rtic_a = &rtic_a;
69 rtic_b = &rtic_b;
70 rtic_c = &rtic_c;
71 rtic_d = &rtic_d;
72 sec_mon = &sec_mon;
73
74/*
75 rio0 = &rapidio0;
76 */
77 };
78
79 cpus {
80 #address-cells = <1>;
81 #size-cells = <0>;
82
83 cpu0: PowerPC,e5500@0 {
84 device_type = "cpu";
85 reg = <0>;
86 next-level-cache = <&L2_0>;
87 L2_0: l2-cache {
88 next-level-cache = <&cpc>;
89 };
90 };
91 cpu1: PowerPC,e5500@1 {
92 device_type = "cpu";
93 reg = <1>;
94 next-level-cache = <&L2_1>;
95 L2_1: l2-cache {
96 next-level-cache = <&cpc>;
97 };
98 };
99 };
100
101 soc: soc@ffe000000 {
102 #address-cells = <1>;
103 #size-cells = <1>;
104 device_type = "soc";
105 compatible = "simple-bus";
106 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
107 reg = <0xf 0xfe000000 0 0x00001000>;
108
109 soc-sram-error {
110 compatible = "fsl,soc-sram-error";
111 interrupts = <16 2 1 29>;
112 };
113
114 corenet-law@0 {
115 compatible = "fsl,corenet-law";
116 reg = <0x0 0x1000>;
117 fsl,num-laws = <32>;
118 };
119
120 memory-controller@8000 {
121 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
122 reg = <0x8000 0x1000>;
123 interrupts = <16 2 1 23>;
124 };
125
126 memory-controller@9000 {
127 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
128 reg = <0x9000 0x1000>;
129 interrupts = <16 2 1 22>;
130 };
131
132 cpc: l3-cache-controller@10000 {
133 compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
134 reg = <0x10000 0x1000
135 0x11000 0x1000>;
136 interrupts = <16 2 1 27
137 16 2 1 26>;
138 };
139
140 corenet-cf@18000 {
141 compatible = "fsl,corenet-cf";
142 reg = <0x18000 0x1000>;
143 interrupts = <16 2 1 31>;
144 fsl,ccf-num-csdids = <32>;
145 fsl,ccf-num-snoopids = <32>;
146 };
147
148 iommu@20000 {
149 compatible = "fsl,pamu-v1.0", "fsl,pamu";
150 reg = <0x20000 0x4000>;
151 interrupts = <
152 24 2 0 0
153 16 2 1 30>;
154 };
155
156 mpic: pic@40000 {
157 clock-frequency = <0>;
158 interrupt-controller;
159 #address-cells = <0>;
160 #interrupt-cells = <4>;
161 reg = <0x40000 0x40000>;
162 compatible = "fsl,mpic", "chrp,open-pic";
163 device_type = "open-pic";
164 };
165
166 msi0: msi@41600 {
167 compatible = "fsl,mpic-msi";
168 reg = <0x41600 0x200>;
169 msi-available-ranges = <0 0x100>;
170 interrupts = <
171 0xe0 0 0 0
172 0xe1 0 0 0
173 0xe2 0 0 0
174 0xe3 0 0 0
175 0xe4 0 0 0
176 0xe5 0 0 0
177 0xe6 0 0 0
178 0xe7 0 0 0>;
179 };
180
181 msi1: msi@41800 {
182 compatible = "fsl,mpic-msi";
183 reg = <0x41800 0x200>;
184 msi-available-ranges = <0 0x100>;
185 interrupts = <
186 0xe8 0 0 0
187 0xe9 0 0 0
188 0xea 0 0 0
189 0xeb 0 0 0
190 0xec 0 0 0
191 0xed 0 0 0
192 0xee 0 0 0
193 0xef 0 0 0>;
194 };
195
196 msi2: msi@41a00 {
197 compatible = "fsl,mpic-msi";
198 reg = <0x41a00 0x200>;
199 msi-available-ranges = <0 0x100>;
200 interrupts = <
201 0xf0 0 0 0
202 0xf1 0 0 0
203 0xf2 0 0 0
204 0xf3 0 0 0
205 0xf4 0 0 0
206 0xf5 0 0 0
207 0xf6 0 0 0
208 0xf7 0 0 0>;
209 };
210
211 guts: global-utilities@e0000 {
212 compatible = "fsl,qoriq-device-config-1.0";
213 reg = <0xe0000 0xe00>;
214 fsl,has-rstcr;
215 #sleep-cells = <1>;
216 fsl,liodn-bits = <12>;
217 };
218
219 pins: global-utilities@e0e00 {
220 compatible = "fsl,qoriq-pin-control-1.0";
221 reg = <0xe0e00 0x200>;
222 #sleep-cells = <2>;
223 };
224
225 clockgen: global-utilities@e1000 {
226 compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
227 reg = <0xe1000 0x1000>;
228 clock-frequency = <0>;
229 };
230
231 rcpm: global-utilities@e2000 {
232 compatible = "fsl,qoriq-rcpm-1.0";
233 reg = <0xe2000 0x1000>;
234 #sleep-cells = <1>;
235 };
236
237 sfp: sfp@e8000 {
238 compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0";
239 reg = <0xe8000 0x1000>;
240 };
241
242 serdes: serdes@ea000 {
243 compatible = "fsl,p5020-serdes";
244 reg = <0xea000 0x1000>;
245 };
246
247 dma0: dma@100300 {
248 #address-cells = <1>;
249 #size-cells = <1>;
250 compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
251 reg = <0x100300 0x4>;
252 ranges = <0x0 0x100100 0x200>;
253 cell-index = <0>;
254 dma-channel@0 {
255 compatible = "fsl,p5020-dma-channel",
256 "fsl,eloplus-dma-channel";
257 reg = <0x0 0x80>;
258 cell-index = <0>;
259 interrupts = <28 2 0 0>;
260 };
261 dma-channel@80 {
262 compatible = "fsl,p5020-dma-channel",
263 "fsl,eloplus-dma-channel";
264 reg = <0x80 0x80>;
265 cell-index = <1>;
266 interrupts = <29 2 0 0>;
267 };
268 dma-channel@100 {
269 compatible = "fsl,p5020-dma-channel",
270 "fsl,eloplus-dma-channel";
271 reg = <0x100 0x80>;
272 cell-index = <2>;
273 interrupts = <30 2 0 0>;
274 };
275 dma-channel@180 {
276 compatible = "fsl,p5020-dma-channel",
277 "fsl,eloplus-dma-channel";
278 reg = <0x180 0x80>;
279 cell-index = <3>;
280 interrupts = <31 2 0 0>;
281 };
282 };
283
284 dma1: dma@101300 {
285 #address-cells = <1>;
286 #size-cells = <1>;
287 compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
288 reg = <0x101300 0x4>;
289 ranges = <0x0 0x101100 0x200>;
290 cell-index = <1>;
291 dma-channel@0 {
292 compatible = "fsl,p5020-dma-channel",
293 "fsl,eloplus-dma-channel";
294 reg = <0x0 0x80>;
295 cell-index = <0>;
296 interrupts = <32 2 0 0>;
297 };
298 dma-channel@80 {
299 compatible = "fsl,p5020-dma-channel",
300 "fsl,eloplus-dma-channel";
301 reg = <0x80 0x80>;
302 cell-index = <1>;
303 interrupts = <33 2 0 0>;
304 };
305 dma-channel@100 {
306 compatible = "fsl,p5020-dma-channel",
307 "fsl,eloplus-dma-channel";
308 reg = <0x100 0x80>;
309 cell-index = <2>;
310 interrupts = <34 2 0 0>;
311 };
312 dma-channel@180 {
313 compatible = "fsl,p5020-dma-channel",
314 "fsl,eloplus-dma-channel";
315 reg = <0x180 0x80>;
316 cell-index = <3>;
317 interrupts = <35 2 0 0>;
318 };
319 };
320
321 spi@110000 {
322 #address-cells = <1>;
323 #size-cells = <0>;
324 compatible = "fsl,p5020-espi", "fsl,mpc8536-espi";
325 reg = <0x110000 0x1000>;
326 interrupts = <53 0x2 0 0>;
327 fsl,espi-num-chipselects = <4>;
328 };
329
330 sdhc: sdhc@114000 {
331 compatible = "fsl,p5020-esdhc", "fsl,esdhc";
332 reg = <0x114000 0x1000>;
333 interrupts = <48 2 0 0>;
334 sdhci,auto-cmd12;
335 clock-frequency = <0>;
336 };
337
338 i2c@118000 {
339 #address-cells = <1>;
340 #size-cells = <0>;
341 cell-index = <0>;
342 compatible = "fsl-i2c";
343 reg = <0x118000 0x100>;
344 interrupts = <38 2 0 0>;
345 dfsrr;
346 };
347
348 i2c@118100 {
349 #address-cells = <1>;
350 #size-cells = <0>;
351 cell-index = <1>;
352 compatible = "fsl-i2c";
353 reg = <0x118100 0x100>;
354 interrupts = <38 2 0 0>;
355 dfsrr;
356 };
357
358 i2c@119000 {
359 #address-cells = <1>;
360 #size-cells = <0>;
361 cell-index = <2>;
362 compatible = "fsl-i2c";
363 reg = <0x119000 0x100>;
364 interrupts = <39 2 0 0>;
365 dfsrr;
366 };
367
368 i2c@119100 {
369 #address-cells = <1>;
370 #size-cells = <0>;
371 cell-index = <3>;
372 compatible = "fsl-i2c";
373 reg = <0x119100 0x100>;
374 interrupts = <39 2 0 0>;
375 dfsrr;
376 };
377
378 serial0: serial@11c500 {
379 cell-index = <0>;
380 device_type = "serial";
381 compatible = "ns16550";
382 reg = <0x11c500 0x100>;
383 clock-frequency = <0>;
384 interrupts = <36 2 0 0>;
385 };
386
387 serial1: serial@11c600 {
388 cell-index = <1>;
389 device_type = "serial";
390 compatible = "ns16550";
391 reg = <0x11c600 0x100>;
392 clock-frequency = <0>;
393 interrupts = <36 2 0 0>;
394 };
395
396 serial2: serial@11d500 {
397 cell-index = <2>;
398 device_type = "serial";
399 compatible = "ns16550";
400 reg = <0x11d500 0x100>;
401 clock-frequency = <0>;
402 interrupts = <37 2 0 0>;
403 };
404
405 serial3: serial@11d600 {
406 cell-index = <3>;
407 device_type = "serial";
408 compatible = "ns16550";
409 reg = <0x11d600 0x100>;
410 clock-frequency = <0>;
411 interrupts = <37 2 0 0>;
412 };
413
414 gpio0: gpio@130000 {
415 compatible = "fsl,p5020-gpio", "fsl,qoriq-gpio";
416 reg = <0x130000 0x1000>;
417 interrupts = <55 2 0 0>;
418 #gpio-cells = <2>;
419 gpio-controller;
420 };
421
422 usb0: usb@210000 {
423 compatible = "fsl,p5020-usb2-mph",
424 "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
425 reg = <0x210000 0x1000>;
426 #address-cells = <1>;
427 #size-cells = <0>;
428 interrupts = <44 0x2 0 0>;
429 phy_type = "utmi";
430 port0;
431 };
432
433 usb1: usb@211000 {
434 compatible = "fsl,p5020-usb2-dr",
435 "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
436 reg = <0x211000 0x1000>;
437 #address-cells = <1>;
438 #size-cells = <0>;
439 interrupts = <45 0x2 0 0>;
440 dr_mode = "host";
441 phy_type = "utmi";
442 };
443
444 sata@220000 {
445 compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
446 reg = <0x220000 0x1000>;
447 interrupts = <68 0x2 0 0>;
448 };
449
450 sata@221000 {
451 compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
452 reg = <0x221000 0x1000>;
453 interrupts = <69 0x2 0 0>;
454 };
455
456 crypto: crypto@300000 {
457 compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
458 #address-cells = <1>;
459 #size-cells = <1>;
460 reg = <0x300000 0x10000>;
461 ranges = <0 0x300000 0x10000>;
462 interrupts = <92 2 0 0>;
463
464 sec_jr0: jr@1000 {
465 compatible = "fsl,sec-v4.2-job-ring",
466 "fsl,sec-v4.0-job-ring";
467 reg = <0x1000 0x1000>;
468 interrupts = <88 2 0 0>;
469 };
470
471 sec_jr1: jr@2000 {
472 compatible = "fsl,sec-v4.2-job-ring",
473 "fsl,sec-v4.0-job-ring";
474 reg = <0x2000 0x1000>;
475 interrupts = <89 2 0 0>;
476 };
477
478 sec_jr2: jr@3000 {
479 compatible = "fsl,sec-v4.2-job-ring",
480 "fsl,sec-v4.0-job-ring";
481 reg = <0x3000 0x1000>;
482 interrupts = <90 2 0 0>;
483 };
484
485 sec_jr3: jr@4000 {
486 compatible = "fsl,sec-v4.2-job-ring",
487 "fsl,sec-v4.0-job-ring";
488 reg = <0x4000 0x1000>;
489 interrupts = <91 2 0 0>;
490 };
491
492 rtic@6000 {
493 compatible = "fsl,sec-v4.2-rtic",
494 "fsl,sec-v4.0-rtic";
495 #address-cells = <1>;
496 #size-cells = <1>;
497 reg = <0x6000 0x100>;
498 ranges = <0x0 0x6100 0xe00>;
499
500 rtic_a: rtic-a@0 {
501 compatible = "fsl,sec-v4.2-rtic-memory",
502 "fsl,sec-v4.0-rtic-memory";
503 reg = <0x00 0x20 0x100 0x80>;
504 };
505
506 rtic_b: rtic-b@20 {
507 compatible = "fsl,sec-v4.2-rtic-memory",
508 "fsl,sec-v4.0-rtic-memory";
509 reg = <0x20 0x20 0x200 0x80>;
510 };
511
512 rtic_c: rtic-c@40 {
513 compatible = "fsl,sec-v4.2-rtic-memory",
514 "fsl,sec-v4.0-rtic-memory";
515 reg = <0x40 0x20 0x300 0x80>;
516 };
517
518 rtic_d: rtic-d@60 {
519 compatible = "fsl,sec-v4.2-rtic-memory",
520 "fsl,sec-v4.0-rtic-memory";
521 reg = <0x60 0x20 0x500 0x80>;
522 };
523 };
524 };
525
526 sec_mon: sec_mon@314000 {
527 compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
528 reg = <0x314000 0x1000>;
529 interrupts = <93 2 0 0>;
530 };
531 };
532
533/*
534 rapidio0: rapidio@ffe0c0000
535*/
536
537 localbus@ffe124000 {
538 compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
539 interrupts = <25 2 0 0>;
540 #address-cells = <2>;
541 #size-cells = <1>;
542 };
543
544 pci0: pcie@ffe200000 {
545 compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
546 device_type = "pci";
547 #size-cells = <2>;
548 #address-cells = <3>;
549 bus-range = <0x0 0xff>;
550 clock-frequency = <0x1fca055>;
551 fsl,msi = <&msi0>;
552 interrupts = <16 2 1 15>;
553
554 pcie@0 {
555 reg = <0 0 0 0 0>;
556 #interrupt-cells = <1>;
557 #size-cells = <2>;
558 #address-cells = <3>;
559 device_type = "pci";
560 interrupts = <16 2 1 15>;
561 interrupt-map-mask = <0xf800 0 0 7>;
562 interrupt-map = <
563 /* IDSEL 0x0 */
564 0000 0 0 1 &mpic 40 1 0 0
565 0000 0 0 2 &mpic 1 1 0 0
566 0000 0 0 3 &mpic 2 1 0 0
567 0000 0 0 4 &mpic 3 1 0 0
568 >;
569 };
570 };
571
572 pci1: pcie@ffe201000 {
573 compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
574 device_type = "pci";
575 #size-cells = <2>;
576 #address-cells = <3>;
577 bus-range = <0 0xff>;
578 clock-frequency = <0x1fca055>;
579 fsl,msi = <&msi1>;
580 interrupts = <16 2 1 14>;
581 pcie@0 {
582 reg = <0 0 0 0 0>;
583 #interrupt-cells = <1>;
584 #size-cells = <2>;
585 #address-cells = <3>;
586 device_type = "pci";
587 interrupts = <16 2 1 14>;
588 interrupt-map-mask = <0xf800 0 0 7>;
589 interrupt-map = <
590 /* IDSEL 0x0 */
591 0000 0 0 1 &mpic 41 1 0 0
592 0000 0 0 2 &mpic 5 1 0 0
593 0000 0 0 3 &mpic 6 1 0 0
594 0000 0 0 4 &mpic 7 1 0 0
595 >;
596 };
597 };
598
599 pci2: pcie@ffe202000 {
600 compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
601 device_type = "pci";
602 #size-cells = <2>;
603 #address-cells = <3>;
604 bus-range = <0x0 0xff>;
605 clock-frequency = <0x1fca055>;
606 fsl,msi = <&msi2>;
607 interrupts = <16 2 1 13>;
608 pcie@0 {
609 reg = <0 0 0 0 0>;
610 #interrupt-cells = <1>;
611 #size-cells = <2>;
612 #address-cells = <3>;
613 device_type = "pci";
614 interrupts = <16 2 1 13>;
615 interrupt-map-mask = <0xf800 0 0 7>;
616 interrupt-map = <
617 /* IDSEL 0x0 */
618 0000 0 0 1 &mpic 42 1 0 0
619 0000 0 0 2 &mpic 9 1 0 0
620 0000 0 0 3 &mpic 10 1 0 0
621 0000 0 0 4 &mpic 11 1 0 0
622 >;
623 };
624 };
625
626 pci3: pcie@ffe203000 {
627 compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
628 device_type = "pci";
629 #size-cells = <2>;
630 #address-cells = <3>;
631 bus-range = <0x0 0xff>;
632 clock-frequency = <0x1fca055>;
633 fsl,msi = <&msi2>;
634 interrupts = <16 2 1 12>;
635 pcie@0 {
636 reg = <0 0 0 0 0>;
637 #interrupt-cells = <1>;
638 #size-cells = <2>;
639 #address-cells = <3>;
640 device_type = "pci";
641 interrupts = <16 2 1 12>;
642 interrupt-map-mask = <0xf800 0 0 7>;
643 interrupt-map = <
644 /* IDSEL 0x0 */
645 0000 0 0 1 &mpic 43 1 0 0
646 0000 0 0 2 &mpic 0 1 0 0
647 0000 0 0 3 &mpic 4 1 0 0
648 0000 0 0 4 &mpic 8 1 0 0
649 >;
650 };
651 };
652};
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index 739dd0da2416..b1d329246b08 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -110,6 +110,18 @@
110 dcr-reg = <0x010 0x002>; 110 dcr-reg = <0x010 0x002>;
111 }; 111 };
112 112
113 CRYPTO: crypto@e0100000 {
114 compatible = "amcc,ppc440epx-crypto","amcc,ppc4xx-crypto";
115 reg = <0 0xE0100000 0x80400>;
116 interrupt-parent = <&UIC0>;
117 interrupts = <0x17 0x4>;
118 };
119
120 rng@e0120000 {
121 compatible = "amcc,ppc440epx-rng","amcc,ppc4xx-rng";
122 reg = <0 0xE0120000 0x150>;
123 };
124
113 DMA0: dma { 125 DMA0: dma {
114 compatible = "ibm,dma-440epx", "ibm,dma-4xx"; 126 compatible = "ibm,dma-440epx", "ibm,dma-4xx";
115 dcr-reg = <0x100 0x027>; 127 dcr-reg = <0x100 0x027>;
diff --git a/arch/powerpc/boot/dts/socrates.dts b/arch/powerpc/boot/dts/socrates.dts
index feb4ef6bd144..38c35404bdc3 100644
--- a/arch/powerpc/boot/dts/socrates.dts
+++ b/arch/powerpc/boot/dts/socrates.dts
@@ -240,6 +240,8 @@
240 #address-cells = <2>; 240 #address-cells = <2>;
241 #size-cells = <1>; 241 #size-cells = <1>;
242 reg = <0xe0005000 0x40>; 242 reg = <0xe0005000 0x40>;
243 interrupt-parent = <&mpic>;
244 interrupts = <19 2>;
243 245
244 ranges = <0 0 0xfc000000 0x04000000 246 ranges = <0 0 0xfc000000 0x04000000
245 2 0 0xc8000000 0x04000000 247 2 0 0xc8000000 0x04000000
diff --git a/arch/powerpc/boot/dts/taishan.dts b/arch/powerpc/boot/dts/taishan.dts
index 058438f9629b..1657ad0bf8a6 100644
--- a/arch/powerpc/boot/dts/taishan.dts
+++ b/arch/powerpc/boot/dts/taishan.dts
@@ -337,7 +337,7 @@
337 rx-fifo-size = <4096>; 337 rx-fifo-size = <4096>;
338 tx-fifo-size = <2048>; 338 tx-fifo-size = <2048>;
339 phy-mode = "rgmii"; 339 phy-mode = "rgmii";
340 phy-map = <0x00000001>; 340 phy-address = <1>;
341 rgmii-device = <&RGMII0>; 341 rgmii-device = <&RGMII0>;
342 rgmii-channel = <0>; 342 rgmii-channel = <0>;
343 zmii-device = <&ZMII0>; 343 zmii-device = <&ZMII0>;
@@ -361,7 +361,7 @@
361 rx-fifo-size = <4096>; 361 rx-fifo-size = <4096>;
362 tx-fifo-size = <2048>; 362 tx-fifo-size = <2048>;
363 phy-mode = "rgmii"; 363 phy-mode = "rgmii";
364 phy-map = <0x00000003>; 364 phy-address = <3>;
365 rgmii-device = <&RGMII0>; 365 rgmii-device = <&RGMII0>;
366 rgmii-channel = <1>; 366 rgmii-channel = <1>;
367 zmii-device = <&ZMII0>; 367 zmii-device = <&ZMII0>;
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index 15ca731bc24e..0a4cedbdcb55 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -277,6 +277,48 @@
277 }; 277 };
278 }; 278 };
279 279
280 localbus@e0005000 {
281 #address-cells = <2>;
282 #size-cells = <1>;
283 compatible = "fsl,mpc8540-localbus", "fsl,pq3-localbus",
284 "simple-bus";
285 reg = <0xe0005000 0x1000>;
286 interrupt-parent = <&mpic>;
287 interrupts = <19 2>;
288
289 ranges = <0x0 0x0 0xfe000000 0x02000000>;
290
291 nor@0,0 {
292 #address-cells = <1>;
293 #size-cells = <1>;
294 compatible = "cfi-flash";
295 reg = <0x0 0x0 0x02000000>;
296 bank-width = <4>;
297 device-width = <2>;
298 partition@0 {
299 label = "kernel";
300 reg = <0x00000000 0x00180000>;
301 };
302 partition@180000 {
303 label = "root";
304 reg = <0x00180000 0x01dc0000>;
305 };
306 partition@1f40000 {
307 label = "env1";
308 reg = <0x01f40000 0x00040000>;
309 };
310 partition@1f80000 {
311 label = "env2";
312 reg = <0x01f80000 0x00040000>;
313 };
314 partition@1fc0000 {
315 label = "u-boot";
316 reg = <0x01fc0000 0x00040000>;
317 read-only;
318 };
319 };
320 };
321
280 pci0: pci@e0008000 { 322 pci0: pci@e0008000 {
281 #interrupt-cells = <1>; 323 #interrupt-cells = <1>;
282 #size-cells = <2>; 324 #size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 5dbb36edb038..9452c3c05114 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -346,6 +346,8 @@
346 #address-cells = <2>; 346 #address-cells = <2>;
347 #size-cells = <1>; 347 #size-cells = <1>;
348 reg = <0xa0005000 0x100>; // BRx, ORx, etc. 348 reg = <0xa0005000 0x100>; // BRx, ORx, etc.
349 interrupt-parent = <&mpic>;
350 interrupts = <19 2>;
349 351
350 ranges = < 352 ranges = <
351 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1 353 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index a050ae427108..619776f72c90 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -346,6 +346,8 @@
346 #address-cells = <2>; 346 #address-cells = <2>;
347 #size-cells = <1>; 347 #size-cells = <1>;
348 reg = <0xe0005000 0x100>; // BRx, ORx, etc. 348 reg = <0xe0005000 0x100>; // BRx, ORx, etc.
349 interrupt-parent = <&mpic>;
350 interrupts = <19 2>;
349 351
350 ranges = < 352 ranges = <
351 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1 353 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index 22ec39b5beeb..7665a16a8b9a 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -312,6 +312,8 @@
312 #address-cells = <2>; 312 #address-cells = <2>;
313 #size-cells = <1>; 313 #size-cells = <1>;
314 reg = <0xe0005000 0x100>; // BRx, ORx, etc. 314 reg = <0xe0005000 0x100>; // BRx, ORx, etc.
315 interrupt-parent = <&mpic>;
316 interrupts = <19 2>;
315 317
316 ranges = < 318 ranges = <
317 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1 319 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
diff --git a/arch/powerpc/boot/dts/xpedite5200.dts b/arch/powerpc/boot/dts/xpedite5200.dts
index a0cf53fbd55c..c41a80c55e47 100644
--- a/arch/powerpc/boot/dts/xpedite5200.dts
+++ b/arch/powerpc/boot/dts/xpedite5200.dts
@@ -374,6 +374,8 @@
374 #address-cells = <2>; 374 #address-cells = <2>;
375 #size-cells = <1>; 375 #size-cells = <1>;
376 reg = <0xef005000 0x100>; // BRx, ORx, etc. 376 reg = <0xef005000 0x100>; // BRx, ORx, etc.
377 interrupt-parent = <&mpic>;
378 interrupts = <19 2>;
377 379
378 ranges = < 380 ranges = <
379 0 0x0 0xfc000000 0x04000000 // NOR boot flash 381 0 0x0 0xfc000000 0x04000000 // NOR boot flash
diff --git a/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
index c5b29752651a..c0efcbb45137 100644
--- a/arch/powerpc/boot/dts/xpedite5200_xmon.dts
+++ b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
@@ -378,6 +378,8 @@
378 #address-cells = <2>; 378 #address-cells = <2>;
379 #size-cells = <1>; 379 #size-cells = <1>;
380 reg = <0xef005000 0x100>; // BRx, ORx, etc. 380 reg = <0xef005000 0x100>; // BRx, ORx, etc.
381 interrupt-parent = <&mpic>;
382 interrupts = <19 2>;
381 383
382 ranges = < 384 ranges = <
383 0 0x0 0xf8000000 0x08000000 // NOR boot flash 385 0 0x0 0xf8000000 0x08000000 // NOR boot flash
diff --git a/arch/powerpc/boot/treeboot-iss4xx.c b/arch/powerpc/boot/treeboot-iss4xx.c
index fcc44952874e..329e710feda2 100644
--- a/arch/powerpc/boot/treeboot-iss4xx.c
+++ b/arch/powerpc/boot/treeboot-iss4xx.c
@@ -34,9 +34,29 @@
34 34
35BSS_STACK(4096); 35BSS_STACK(4096);
36 36
37static u32 ibm4xx_memstart;
38
37static void iss_4xx_fixups(void) 39static void iss_4xx_fixups(void)
38{ 40{
39 ibm4xx_sdram_fixup_memsize(); 41 void *memory;
42 u32 reg[3];
43
44 memory = finddevice("/memory");
45 if (!memory)
46 fatal("Can't find memory node\n");
47 /* This assumes #address-cells = 2, #size-cells =1 and that */
48 getprop(memory, "reg", reg, sizeof(reg));
49 if (reg[2])
50 /* If the device tree specifies the memory range, use it */
51 ibm4xx_memstart = reg[1];
52 else
53 /* othersize, read it from the SDRAM controller */
54 ibm4xx_sdram_fixup_memsize();
55}
56
57static void *iss_4xx_vmlinux_alloc(unsigned long size)
58{
59 return (void *)ibm4xx_memstart;
40} 60}
41 61
42#define SPRN_PIR 0x11E /* Processor Indentification Register */ 62#define SPRN_PIR 0x11E /* Processor Indentification Register */
@@ -48,6 +68,7 @@ void platform_init(void)
48 68
49 simple_alloc_init(_end, avail_ram, 128, 64); 69 simple_alloc_init(_end, avail_ram, 128, 64);
50 platform_ops.fixups = iss_4xx_fixups; 70 platform_ops.fixups = iss_4xx_fixups;
71 platform_ops.vmlinux_alloc = iss_4xx_vmlinux_alloc;
51 platform_ops.exit = ibm44x_dbcr_reset; 72 platform_ops.exit = ibm44x_dbcr_reset;
52 pir_reg = mfspr(SPRN_PIR); 73 pir_reg = mfspr(SPRN_PIR);
53 fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); 74 fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 92f863ac8443..a6eb6ad05b2d 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -3,8 +3,8 @@ CONFIG_SMP=y
3CONFIG_EXPERIMENTAL=y 3CONFIG_EXPERIMENTAL=y
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
6CONFIG_SPARSE_IRQ=y
6CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
7CONFIG_SYSFS_DEPRECATED_V2=y
8CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
10CONFIG_EXPERT=y 10CONFIG_EXPERT=y
@@ -21,10 +21,11 @@ CONFIG_ISS4xx=y
21CONFIG_HZ_100=y 21CONFIG_HZ_100=y
22CONFIG_MATH_EMULATION=y 22CONFIG_MATH_EMULATION=y
23CONFIG_IRQ_ALL_CPUS=y 23CONFIG_IRQ_ALL_CPUS=y
24CONFIG_SPARSE_IRQ=y
25CONFIG_CMDLINE_BOOL=y 24CONFIG_CMDLINE_BOOL=y
26CONFIG_CMDLINE="root=/dev/issblk0" 25CONFIG_CMDLINE="root=/dev/issblk0"
27# CONFIG_PCI is not set 26# CONFIG_PCI is not set
27CONFIG_ADVANCED_OPTIONS=y
28CONFIG_RELOCATABLE=y
28CONFIG_NET=y 29CONFIG_NET=y
29CONFIG_PACKET=y 30CONFIG_PACKET=y
30CONFIG_UNIX=y 31CONFIG_UNIX=y
@@ -67,7 +68,6 @@ CONFIG_EXT3_FS=y
67# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 68# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
68CONFIG_EXT3_FS_POSIX_ACL=y 69CONFIG_EXT3_FS_POSIX_ACL=y
69CONFIG_EXT3_FS_SECURITY=y 70CONFIG_EXT3_FS_SECURITY=y
70CONFIG_INOTIFY=y
71CONFIG_PROC_KCORE=y 71CONFIG_PROC_KCORE=y
72CONFIG_TMPFS=y 72CONFIG_TMPFS=y
73CONFIG_CRAMFS=y 73CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
new file mode 100644
index 000000000000..980ff8f61fd4
--- /dev/null
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -0,0 +1,173 @@
1CONFIG_PPC_85xx=y
2CONFIG_SMP=y
3CONFIG_NR_CPUS=2
4CONFIG_EXPERIMENTAL=y
5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_AUDIT=y
9CONFIG_SPARSE_IRQ=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=14
13CONFIG_BLK_DEV_INITRD=y
14# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
15CONFIG_KALLSYMS_ALL=y
16CONFIG_KALLSYMS_EXTRA_PASS=y
17CONFIG_EMBEDDED=y
18CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y
20CONFIG_MODULE_FORCE_UNLOAD=y
21CONFIG_MODVERSIONS=y
22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_P1023_RDS=y
24CONFIG_QUICC_ENGINE=y
25CONFIG_QE_GPIO=y
26CONFIG_CPM2=y
27CONFIG_MPC8xxx_GPIO=y
28CONFIG_HIGHMEM=y
29CONFIG_NO_HZ=y
30CONFIG_HIGH_RES_TIMERS=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_BINFMT_MISC=m
33CONFIG_MATH_EMULATION=y
34CONFIG_SWIOTLB=y
35CONFIG_PCI=y
36CONFIG_PCIEPORTBUS=y
37# CONFIG_PCIEAER is not set
38# CONFIG_PCIEASPM is not set
39CONFIG_PCI_MSI=y
40CONFIG_NET=y
41CONFIG_PACKET=y
42CONFIG_UNIX=y
43CONFIG_XFRM_USER=y
44CONFIG_NET_KEY=y
45CONFIG_INET=y
46CONFIG_IP_MULTICAST=y
47CONFIG_IP_ADVANCED_ROUTER=y
48CONFIG_IP_MULTIPLE_TABLES=y
49CONFIG_IP_ROUTE_MULTIPATH=y
50CONFIG_IP_ROUTE_VERBOSE=y
51CONFIG_IP_PNP=y
52CONFIG_IP_PNP_DHCP=y
53CONFIG_IP_PNP_BOOTP=y
54CONFIG_IP_PNP_RARP=y
55CONFIG_NET_IPIP=y
56CONFIG_IP_MROUTE=y
57CONFIG_IP_PIMSM_V1=y
58CONFIG_IP_PIMSM_V2=y
59CONFIG_ARPD=y
60CONFIG_INET_ESP=y
61# CONFIG_INET_XFRM_MODE_BEET is not set
62# CONFIG_INET_LRO is not set
63CONFIG_IPV6=y
64CONFIG_IP_SCTP=m
65CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
66CONFIG_PROC_DEVICETREE=y
67CONFIG_BLK_DEV_LOOP=y
68CONFIG_BLK_DEV_RAM=y
69CONFIG_BLK_DEV_RAM_SIZE=131072
70CONFIG_MISC_DEVICES=y
71CONFIG_EEPROM_LEGACY=y
72CONFIG_BLK_DEV_SD=y
73CONFIG_CHR_DEV_ST=y
74CONFIG_BLK_DEV_SR=y
75CONFIG_CHR_DEV_SG=y
76CONFIG_SCSI_MULTI_LUN=y
77CONFIG_SCSI_LOGGING=y
78CONFIG_ATA=y
79CONFIG_SATA_FSL=y
80CONFIG_SATA_SIL24=y
81CONFIG_NETDEVICES=y
82CONFIG_DUMMY=y
83CONFIG_MARVELL_PHY=y
84CONFIG_DAVICOM_PHY=y
85CONFIG_CICADA_PHY=y
86CONFIG_VITESSE_PHY=y
87CONFIG_FIXED_PHY=y
88CONFIG_NET_ETHERNET=y
89CONFIG_FS_ENET=y
90CONFIG_E1000E=y
91CONFIG_FSL_PQ_MDIO=y
92CONFIG_INPUT_FF_MEMLESS=m
93# CONFIG_INPUT_MOUSEDEV is not set
94# CONFIG_INPUT_KEYBOARD is not set
95# CONFIG_INPUT_MOUSE is not set
96CONFIG_SERIO_LIBPS2=y
97CONFIG_SERIAL_8250=y
98CONFIG_SERIAL_8250_CONSOLE=y
99CONFIG_SERIAL_8250_NR_UARTS=2
100CONFIG_SERIAL_8250_RUNTIME_UARTS=2
101CONFIG_SERIAL_8250_EXTENDED=y
102CONFIG_SERIAL_8250_MANY_PORTS=y
103CONFIG_SERIAL_8250_DETECT_IRQ=y
104CONFIG_SERIAL_8250_RSA=y
105CONFIG_SERIAL_QE=m
106CONFIG_HW_RANDOM=y
107CONFIG_NVRAM=y
108CONFIG_I2C=y
109CONFIG_I2C_CPM=m
110CONFIG_I2C_MPC=y
111# CONFIG_HWMON is not set
112CONFIG_VIDEO_OUTPUT_CONTROL=y
113CONFIG_SOUND=y
114CONFIG_SND=y
115CONFIG_SND_MIXER_OSS=y
116CONFIG_SND_PCM_OSS=y
117# CONFIG_SND_SUPPORT_OLD_API is not set
118CONFIG_EDAC=y
119CONFIG_EDAC_MM_EDAC=y
120CONFIG_RTC_CLASS=y
121CONFIG_RTC_DRV_CMOS=y
122CONFIG_DMADEVICES=y
123CONFIG_FSL_DMA=y
124# CONFIG_NET_DMA is not set
125CONFIG_STAGING=y
126# CONFIG_STAGING_EXCLUDE_BUILD is not set
127CONFIG_EXT2_FS=y
128CONFIG_EXT3_FS=y
129# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
130CONFIG_ISO9660_FS=m
131CONFIG_JOLIET=y
132CONFIG_ZISOFS=y
133CONFIG_UDF_FS=m
134CONFIG_MSDOS_FS=m
135CONFIG_VFAT_FS=y
136CONFIG_NTFS_FS=y
137CONFIG_PROC_KCORE=y
138CONFIG_TMPFS=y
139CONFIG_ADFS_FS=m
140CONFIG_AFFS_FS=m
141CONFIG_HFS_FS=m
142CONFIG_HFSPLUS_FS=m
143CONFIG_BEFS_FS=m
144CONFIG_BFS_FS=m
145CONFIG_EFS_FS=m
146CONFIG_CRAMFS=y
147CONFIG_VXFS_FS=m
148CONFIG_HPFS_FS=m
149CONFIG_QNX4FS_FS=m
150CONFIG_SYSV_FS=m
151CONFIG_UFS_FS=m
152CONFIG_NFS_FS=y
153CONFIG_NFS_V3=y
154CONFIG_NFS_V4=y
155CONFIG_ROOT_NFS=y
156CONFIG_NFSD=y
157CONFIG_PARTITION_ADVANCED=y
158CONFIG_MAC_PARTITION=y
159CONFIG_CRC_T10DIF=y
160CONFIG_FRAME_WARN=8092
161CONFIG_DEBUG_FS=y
162CONFIG_DEBUG_KERNEL=y
163CONFIG_DETECT_HUNG_TASK=y
164# CONFIG_DEBUG_BUGVERBOSE is not set
165CONFIG_DEBUG_INFO=y
166# CONFIG_RCU_CPU_STALL_DETECTOR is not set
167CONFIG_SYSCTL_SYSCALL_CHECK=y
168CONFIG_VIRQ_DEBUG=y
169CONFIG_CRYPTO_PCBC=m
170CONFIG_CRYPTO_SHA256=y
171CONFIG_CRYPTO_SHA512=y
172CONFIG_CRYPTO_AES=y
173# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
index 036bfb2d18cd..0db9ba0423ff 100644
--- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
@@ -89,6 +89,11 @@ CONFIG_I2C_MPC=y
89CONFIG_VIDEO_OUTPUT_CONTROL=y 89CONFIG_VIDEO_OUTPUT_CONTROL=y
90CONFIG_FB=y 90CONFIG_FB=y
91CONFIG_FB_FSL_DIU=y 91CONFIG_FB_FSL_DIU=y
92CONFIG_VGACON_SOFT_SCROLLBACK=y
93CONFIG_FRAMEBUFFER_CONSOLE=y
94CONFIG_FONTS=y
95CONFIG_FONT_8x8=y
96CONFIG_FONT_8x16=y
92CONFIG_SOUND=y 97CONFIG_SOUND=y
93CONFIG_SND=y 98CONFIG_SND=y
94CONFIG_SND_MIXER_OSS=y 99CONFIG_SND_MIXER_OSS=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
new file mode 100644
index 000000000000..10562a5c65b9
--- /dev/null
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -0,0 +1,187 @@
1CONFIG_PPC_85xx=y
2CONFIG_SMP=y
3CONFIG_NR_CPUS=8
4CONFIG_EXPERIMENTAL=y
5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_AUDIT=y
9CONFIG_SPARSE_IRQ=y
10CONFIG_RCU_TRACE=y
11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=14
14CONFIG_BLK_DEV_INITRD=y
15# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
16CONFIG_KALLSYMS_ALL=y
17CONFIG_KALLSYMS_EXTRA_PASS=y
18CONFIG_EMBEDDED=y
19CONFIG_PERF_EVENTS=y
20CONFIG_SLAB=y
21CONFIG_MODULES=y
22CONFIG_MODULE_UNLOAD=y
23CONFIG_MODULE_FORCE_UNLOAD=y
24CONFIG_MODVERSIONS=y
25# CONFIG_BLK_DEV_BSG is not set
26CONFIG_P2040_RDB=y
27CONFIG_P3041_DS=y
28CONFIG_P4080_DS=y
29CONFIG_P5020_DS=y
30CONFIG_HIGHMEM=y
31CONFIG_NO_HZ=y
32CONFIG_HIGH_RES_TIMERS=y
33# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
34CONFIG_BINFMT_MISC=m
35CONFIG_KEXEC=y
36CONFIG_FORCE_MAX_ZONEORDER=13
37CONFIG_FSL_LBC=y
38CONFIG_PCI=y
39CONFIG_PCIEPORTBUS=y
40# CONFIG_PCIEASPM is not set
41CONFIG_NET=y
42CONFIG_PACKET=y
43CONFIG_UNIX=y
44CONFIG_XFRM_USER=y
45CONFIG_XFRM_SUB_POLICY=y
46CONFIG_XFRM_STATISTICS=y
47CONFIG_NET_KEY=y
48CONFIG_NET_KEY_MIGRATE=y
49CONFIG_INET=y
50CONFIG_IP_MULTICAST=y
51CONFIG_IP_ADVANCED_ROUTER=y
52CONFIG_IP_MULTIPLE_TABLES=y
53CONFIG_IP_ROUTE_MULTIPATH=y
54CONFIG_IP_ROUTE_VERBOSE=y
55CONFIG_IP_PNP=y
56CONFIG_IP_PNP_DHCP=y
57CONFIG_IP_PNP_BOOTP=y
58CONFIG_IP_PNP_RARP=y
59CONFIG_NET_IPIP=y
60CONFIG_IP_MROUTE=y
61CONFIG_IP_PIMSM_V1=y
62CONFIG_IP_PIMSM_V2=y
63CONFIG_ARPD=y
64CONFIG_INET_AH=y
65CONFIG_INET_ESP=y
66CONFIG_INET_IPCOMP=y
67# CONFIG_INET_LRO is not set
68CONFIG_IPV6=y
69CONFIG_IP_SCTP=m
70CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
71CONFIG_MTD=y
72CONFIG_MTD_PARTITIONS=y
73CONFIG_MTD_CMDLINE_PARTS=y
74CONFIG_MTD_CHAR=y
75CONFIG_MTD_BLOCK=y
76CONFIG_MTD_CFI=y
77CONFIG_MTD_CFI_AMDSTD=y
78CONFIG_MTD_PHYSMAP_OF=y
79CONFIG_MTD_M25P80=y
80CONFIG_PROC_DEVICETREE=y
81CONFIG_BLK_DEV_LOOP=y
82CONFIG_BLK_DEV_RAM=y
83CONFIG_BLK_DEV_RAM_SIZE=131072
84CONFIG_MISC_DEVICES=y
85CONFIG_BLK_DEV_SD=y
86CONFIG_CHR_DEV_ST=y
87CONFIG_BLK_DEV_SR=y
88CONFIG_CHR_DEV_SG=y
89CONFIG_SCSI_MULTI_LUN=y
90CONFIG_SCSI_LOGGING=y
91CONFIG_SCSI_SYM53C8XX_2=y
92CONFIG_ATA=y
93CONFIG_SATA_AHCI=y
94CONFIG_SATA_FSL=y
95CONFIG_SATA_SIL24=y
96CONFIG_SATA_SIL=y
97CONFIG_PATA_SIL680=y
98CONFIG_NETDEVICES=y
99CONFIG_VITESSE_PHY=y
100CONFIG_FIXED_PHY=y
101CONFIG_NET_ETHERNET=y
102CONFIG_E1000=y
103CONFIG_E1000E=y
104CONFIG_FSL_PQ_MDIO=y
105# CONFIG_INPUT_MOUSEDEV is not set
106# CONFIG_INPUT_KEYBOARD is not set
107# CONFIG_INPUT_MOUSE is not set
108CONFIG_SERIO_LIBPS2=y
109# CONFIG_LEGACY_PTYS is not set
110CONFIG_PPC_EPAPR_HV_BYTECHAN=y
111CONFIG_SERIAL_8250=y
112CONFIG_SERIAL_8250_CONSOLE=y
113CONFIG_SERIAL_8250_EXTENDED=y
114CONFIG_SERIAL_8250_MANY_PORTS=y
115CONFIG_SERIAL_8250_DETECT_IRQ=y
116CONFIG_SERIAL_8250_RSA=y
117CONFIG_HW_RANDOM=y
118CONFIG_NVRAM=y
119CONFIG_I2C=y
120CONFIG_I2C_MPC=y
121CONFIG_SPI=y
122CONFIG_SPI_GPIO=y
123CONFIG_SPI_FSL_SPI=y
124CONFIG_SPI_FSL_ESPI=y
125# CONFIG_HWMON is not set
126CONFIG_VIDEO_OUTPUT_CONTROL=y
127CONFIG_USB_HID=m
128CONFIG_USB=y
129CONFIG_USB_DEVICEFS=y
130CONFIG_USB_MON=y
131CONFIG_USB_EHCI_HCD=y
132CONFIG_USB_EHCI_FSL=y
133CONFIG_USB_OHCI_HCD=y
134CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
135CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
136CONFIG_USB_STORAGE=y
137CONFIG_MMC=y
138CONFIG_MMC_SDHCI=y
139CONFIG_MMC_SDHCI_OF=y
140CONFIG_MMC_SDHCI_OF_ESDHC=y
141CONFIG_EDAC=y
142CONFIG_EDAC_MM_EDAC=y
143CONFIG_EDAC_MPC85XX=y
144CONFIG_RTC_CLASS=y
145CONFIG_RTC_DRV_DS3232=y
146CONFIG_RTC_DRV_CMOS=y
147CONFIG_UIO=y
148CONFIG_STAGING=y
149# CONFIG_STAGING_EXCLUDE_BUILD is not set
150CONFIG_VIRT_DRIVERS=y
151CONFIG_FSL_HV_MANAGER=y
152CONFIG_EXT2_FS=y
153CONFIG_EXT3_FS=y
154# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
155CONFIG_ISO9660_FS=m
156CONFIG_JOLIET=y
157CONFIG_ZISOFS=y
158CONFIG_UDF_FS=m
159CONFIG_MSDOS_FS=m
160CONFIG_VFAT_FS=y
161CONFIG_NTFS_FS=y
162CONFIG_PROC_KCORE=y
163CONFIG_TMPFS=y
164CONFIG_JFFS2_FS=y
165CONFIG_CRAMFS=y
166CONFIG_NFS_FS=y
167CONFIG_NFS_V3=y
168CONFIG_NFS_V4=y
169CONFIG_ROOT_NFS=y
170CONFIG_NFSD=m
171CONFIG_PARTITION_ADVANCED=y
172CONFIG_MAC_PARTITION=y
173CONFIG_NLS_ISO8859_1=y
174CONFIG_NLS_UTF8=m
175CONFIG_MAGIC_SYSRQ=y
176CONFIG_DEBUG_KERNEL=y
177CONFIG_DEBUG_SHIRQ=y
178CONFIG_DETECT_HUNG_TASK=y
179CONFIG_DEBUG_INFO=y
180CONFIG_SYSCTL_SYSCALL_CHECK=y
181CONFIG_CRYPTO_NULL=y
182CONFIG_CRYPTO_PCBC=m
183CONFIG_CRYPTO_MD4=y
184CONFIG_CRYPTO_SHA256=y
185CONFIG_CRYPTO_SHA512=y
186CONFIG_CRYPTO_AES=y
187# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index d32283555b53..d32283555b53 100644
--- a/arch/powerpc/configs/e55xx_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 96b89df7752a..fcd85d2c72dc 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -5,6 +5,7 @@ CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
6CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_AUDIT=y 7CONFIG_AUDIT=y
8CONFIG_SPARSE_IRQ=y
8CONFIG_IKCONFIG=y 9CONFIG_IKCONFIG=y
9CONFIG_IKCONFIG_PROC=y 10CONFIG_IKCONFIG_PROC=y
10CONFIG_LOG_BUF_SHIFT=14 11CONFIG_LOG_BUF_SHIFT=14
@@ -25,7 +26,9 @@ CONFIG_MPC85xx_MDS=y
25CONFIG_MPC8536_DS=y 26CONFIG_MPC8536_DS=y
26CONFIG_MPC85xx_DS=y 27CONFIG_MPC85xx_DS=y
27CONFIG_MPC85xx_RDB=y 28CONFIG_MPC85xx_RDB=y
29CONFIG_P1010_RDB=y
28CONFIG_P1022_DS=y 30CONFIG_P1022_DS=y
31CONFIG_P1023_RDS=y
29CONFIG_SOCRATES=y 32CONFIG_SOCRATES=y
30CONFIG_KSI8560=y 33CONFIG_KSI8560=y
31CONFIG_XES_MPC85xx=y 34CONFIG_XES_MPC85xx=y
@@ -44,7 +47,6 @@ CONFIG_NO_HZ=y
44CONFIG_HIGH_RES_TIMERS=y 47CONFIG_HIGH_RES_TIMERS=y
45CONFIG_BINFMT_MISC=m 48CONFIG_BINFMT_MISC=m
46CONFIG_MATH_EMULATION=y 49CONFIG_MATH_EMULATION=y
47CONFIG_SPARSE_IRQ=y
48CONFIG_FORCE_MAX_ZONEORDER=12 50CONFIG_FORCE_MAX_ZONEORDER=12
49CONFIG_PCI=y 51CONFIG_PCI=y
50CONFIG_PCI_MSI=y 52CONFIG_PCI_MSI=y
@@ -65,8 +67,6 @@ CONFIG_IP_PNP_DHCP=y
65CONFIG_IP_PNP_BOOTP=y 67CONFIG_IP_PNP_BOOTP=y
66CONFIG_IP_PNP_RARP=y 68CONFIG_IP_PNP_RARP=y
67CONFIG_NET_IPIP=y 69CONFIG_NET_IPIP=y
68CONFIG_NET_IPGRE=y
69CONFIG_NET_IPGRE_BROADCAST=y
70CONFIG_IP_MROUTE=y 70CONFIG_IP_MROUTE=y
71CONFIG_IP_PIMSM_V1=y 71CONFIG_IP_PIMSM_V1=y
72CONFIG_IP_PIMSM_V2=y 72CONFIG_IP_PIMSM_V2=y
@@ -128,6 +128,10 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
128CONFIG_FB=y 128CONFIG_FB=y
129CONFIG_FB_FSL_DIU=y 129CONFIG_FB_FSL_DIU=y
130# CONFIG_VGA_CONSOLE is not set 130# CONFIG_VGA_CONSOLE is not set
131CONFIG_FRAMEBUFFER_CONSOLE=y
132CONFIG_FONTS=y
133CONFIG_FONT_8x8=y
134CONFIG_FONT_8x16=y
131CONFIG_SOUND=y 135CONFIG_SOUND=y
132CONFIG_SND=y 136CONFIG_SND=y
133# CONFIG_SND_SUPPORT_OLD_API is not set 137# CONFIG_SND_SUPPORT_OLD_API is not set
@@ -170,7 +174,6 @@ CONFIG_FSL_DMA=y
170CONFIG_EXT2_FS=y 174CONFIG_EXT2_FS=y
171CONFIG_EXT3_FS=y 175CONFIG_EXT3_FS=y
172# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 176# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
173CONFIG_INOTIFY=y
174CONFIG_ISO9660_FS=m 177CONFIG_ISO9660_FS=m
175CONFIG_JOLIET=y 178CONFIG_JOLIET=y
176CONFIG_ZISOFS=y 179CONFIG_ZISOFS=y
@@ -205,7 +208,6 @@ CONFIG_DEBUG_FS=y
205CONFIG_DEBUG_KERNEL=y 208CONFIG_DEBUG_KERNEL=y
206CONFIG_DETECT_HUNG_TASK=y 209CONFIG_DETECT_HUNG_TASK=y
207CONFIG_DEBUG_INFO=y 210CONFIG_DEBUG_INFO=y
208# CONFIG_RCU_CPU_STALL_DETECTOR is not set
209CONFIG_SYSCTL_SYSCALL_CHECK=y 211CONFIG_SYSCTL_SYSCALL_CHECK=y
210CONFIG_VIRQ_DEBUG=y 212CONFIG_VIRQ_DEBUG=y
211CONFIG_CRYPTO_PCBC=m 213CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index de65841aa04e..908c941fc24c 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -7,6 +7,7 @@ CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y 7CONFIG_POSIX_MQUEUE=y
8CONFIG_BSD_PROCESS_ACCT=y 8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_AUDIT=y 9CONFIG_AUDIT=y
10CONFIG_SPARSE_IRQ=y
10CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=14 13CONFIG_LOG_BUF_SHIFT=14
@@ -28,6 +29,7 @@ CONFIG_MPC8536_DS=y
28CONFIG_MPC85xx_DS=y 29CONFIG_MPC85xx_DS=y
29CONFIG_MPC85xx_RDB=y 30CONFIG_MPC85xx_RDB=y
30CONFIG_P1022_DS=y 31CONFIG_P1022_DS=y
32CONFIG_P1023_RDS=y
31CONFIG_SOCRATES=y 33CONFIG_SOCRATES=y
32CONFIG_KSI8560=y 34CONFIG_KSI8560=y
33CONFIG_XES_MPC85xx=y 35CONFIG_XES_MPC85xx=y
@@ -46,7 +48,6 @@ CONFIG_NO_HZ=y
46CONFIG_HIGH_RES_TIMERS=y 48CONFIG_HIGH_RES_TIMERS=y
47CONFIG_BINFMT_MISC=m 49CONFIG_BINFMT_MISC=m
48CONFIG_MATH_EMULATION=y 50CONFIG_MATH_EMULATION=y
49CONFIG_SPARSE_IRQ=y
50CONFIG_FORCE_MAX_ZONEORDER=12 51CONFIG_FORCE_MAX_ZONEORDER=12
51CONFIG_PCI=y 52CONFIG_PCI=y
52CONFIG_PCI_MSI=y 53CONFIG_PCI_MSI=y
@@ -67,8 +68,6 @@ CONFIG_IP_PNP_DHCP=y
67CONFIG_IP_PNP_BOOTP=y 68CONFIG_IP_PNP_BOOTP=y
68CONFIG_IP_PNP_RARP=y 69CONFIG_IP_PNP_RARP=y
69CONFIG_NET_IPIP=y 70CONFIG_NET_IPIP=y
70CONFIG_NET_IPGRE=y
71CONFIG_NET_IPGRE_BROADCAST=y
72CONFIG_IP_MROUTE=y 71CONFIG_IP_MROUTE=y
73CONFIG_IP_PIMSM_V1=y 72CONFIG_IP_PIMSM_V1=y
74CONFIG_IP_PIMSM_V2=y 73CONFIG_IP_PIMSM_V2=y
@@ -130,6 +129,10 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
130CONFIG_FB=y 129CONFIG_FB=y
131CONFIG_FB_FSL_DIU=y 130CONFIG_FB_FSL_DIU=y
132# CONFIG_VGA_CONSOLE is not set 131# CONFIG_VGA_CONSOLE is not set
132CONFIG_FRAMEBUFFER_CONSOLE=y
133CONFIG_FONTS=y
134CONFIG_FONT_8x8=y
135CONFIG_FONT_8x16=y
133CONFIG_SOUND=y 136CONFIG_SOUND=y
134CONFIG_SND=y 137CONFIG_SND=y
135# CONFIG_SND_SUPPORT_OLD_API is not set 138# CONFIG_SND_SUPPORT_OLD_API is not set
@@ -172,7 +175,6 @@ CONFIG_FSL_DMA=y
172CONFIG_EXT2_FS=y 175CONFIG_EXT2_FS=y
173CONFIG_EXT3_FS=y 176CONFIG_EXT3_FS=y
174# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 177# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
175CONFIG_INOTIFY=y
176CONFIG_ISO9660_FS=m 178CONFIG_ISO9660_FS=m
177CONFIG_JOLIET=y 179CONFIG_JOLIET=y
178CONFIG_ZISOFS=y 180CONFIG_ZISOFS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 76736017cd34..84a685a505fe 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -176,12 +176,19 @@ CONFIG_CHR_DEV_SG=y
176CONFIG_SCSI_MULTI_LUN=y 176CONFIG_SCSI_MULTI_LUN=y
177CONFIG_SCSI_CONSTANTS=y 177CONFIG_SCSI_CONSTANTS=y
178CONFIG_SCSI_FC_ATTRS=y 178CONFIG_SCSI_FC_ATTRS=y
179CONFIG_SCSI_SAS_ATTRS=m
180CONFIG_SCSI_CXGB3_ISCSI=m
181CONFIG_SCSI_CXGB4_ISCSI=m
182CONFIG_SCSI_BNX2_ISCSI=m
183CONFIG_BE2ISCSI=m
184CONFIG_SCSI_MPT2SAS=m
179CONFIG_SCSI_IBMVSCSI=y 185CONFIG_SCSI_IBMVSCSI=y
180CONFIG_SCSI_IBMVFC=m 186CONFIG_SCSI_IBMVFC=m
181CONFIG_SCSI_SYM53C8XX_2=y 187CONFIG_SCSI_SYM53C8XX_2=y
182CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 188CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
183CONFIG_SCSI_IPR=y 189CONFIG_SCSI_IPR=y
184CONFIG_SCSI_QLA_FC=m 190CONFIG_SCSI_QLA_FC=m
191CONFIG_SCSI_QLA_ISCSI=m
185CONFIG_SCSI_LPFC=m 192CONFIG_SCSI_LPFC=m
186CONFIG_ATA=y 193CONFIG_ATA=y
187CONFIG_SATA_SIL24=y 194CONFIG_SATA_SIL24=y
@@ -235,11 +242,13 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
235CONFIG_E1000=y 242CONFIG_E1000=y
236CONFIG_E1000E=y 243CONFIG_E1000E=y
237CONFIG_TIGON3=y 244CONFIG_TIGON3=y
245CONFIG_BNX2=m
238CONFIG_SPIDER_NET=m 246CONFIG_SPIDER_NET=m
239CONFIG_GELIC_NET=m 247CONFIG_GELIC_NET=m
240CONFIG_GELIC_WIRELESS=y 248CONFIG_GELIC_WIRELESS=y
241CONFIG_CHELSIO_T1=m 249CONFIG_CHELSIO_T1=m
242CONFIG_CHELSIO_T3=m 250CONFIG_CHELSIO_T3=m
251CONFIG_CHELSIO_T4=m
243CONFIG_EHEA=m 252CONFIG_EHEA=m
244CONFIG_IXGBE=m 253CONFIG_IXGBE=m
245CONFIG_IXGB=m 254CONFIG_IXGB=m
@@ -248,6 +257,8 @@ CONFIG_MYRI10GE=m
248CONFIG_NETXEN_NIC=m 257CONFIG_NETXEN_NIC=m
249CONFIG_PASEMI_MAC=y 258CONFIG_PASEMI_MAC=y
250CONFIG_MLX4_EN=m 259CONFIG_MLX4_EN=m
260CONFIG_QLGE=m
261CONFIG_BE2NET=m
251CONFIG_ISERIES_VETH=m 262CONFIG_ISERIES_VETH=m
252CONFIG_PPP=m 263CONFIG_PPP=m
253CONFIG_PPP_ASYNC=m 264CONFIG_PPP_ASYNC=m
@@ -330,6 +341,8 @@ CONFIG_INFINIBAND_USER_MAD=m
330CONFIG_INFINIBAND_USER_ACCESS=m 341CONFIG_INFINIBAND_USER_ACCESS=m
331CONFIG_INFINIBAND_MTHCA=m 342CONFIG_INFINIBAND_MTHCA=m
332CONFIG_INFINIBAND_EHCA=m 343CONFIG_INFINIBAND_EHCA=m
344CONFIG_INFINIBAND_CXGB3=m
345CONFIG_INFINIBAND_CXGB4=m
333CONFIG_MLX4_INFINIBAND=m 346CONFIG_MLX4_INFINIBAND=m
334CONFIG_INFINIBAND_IPOIB=m 347CONFIG_INFINIBAND_IPOIB=m
335CONFIG_INFINIBAND_IPOIB_CM=y 348CONFIG_INFINIBAND_IPOIB_CM=y
@@ -430,11 +443,12 @@ CONFIG_NLS_KOI8_U=m
430CONFIG_CRC_T10DIF=y 443CONFIG_CRC_T10DIF=y
431CONFIG_MAGIC_SYSRQ=y 444CONFIG_MAGIC_SYSRQ=y
432CONFIG_DEBUG_KERNEL=y 445CONFIG_DEBUG_KERNEL=y
446CONFIG_LOCKUP_DETECTOR=y
447CONFIG_DETECT_HUNG_TASK=y
433CONFIG_DEBUG_MUTEXES=y 448CONFIG_DEBUG_MUTEXES=y
434# CONFIG_RCU_CPU_STALL_DETECTOR is not set 449# CONFIG_RCU_CPU_STALL_DETECTOR is not set
435CONFIG_LATENCYTOP=y 450CONFIG_LATENCYTOP=y
436CONFIG_SYSCTL_SYSCALL_CHECK=y 451CONFIG_SYSCTL_SYSCALL_CHECK=y
437CONFIG_IRQSOFF_TRACER=y
438CONFIG_SCHED_TRACER=y 452CONFIG_SCHED_TRACER=y
439CONFIG_BLK_DEV_IO_TRACE=y 453CONFIG_BLK_DEV_IO_TRACE=y
440CONFIG_DEBUG_STACKOVERFLOW=y 454CONFIG_DEBUG_STACKOVERFLOW=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 80bc5de7ee1d..96a58b709705 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -149,6 +149,7 @@ CONFIG_SCSI_CXGB3_ISCSI=m
149CONFIG_SCSI_CXGB4_ISCSI=m 149CONFIG_SCSI_CXGB4_ISCSI=m
150CONFIG_SCSI_BNX2_ISCSI=m 150CONFIG_SCSI_BNX2_ISCSI=m
151CONFIG_BE2ISCSI=m 151CONFIG_BE2ISCSI=m
152CONFIG_SCSI_MPT2SAS=m
152CONFIG_SCSI_IBMVSCSI=y 153CONFIG_SCSI_IBMVSCSI=y
153CONFIG_SCSI_IBMVFC=m 154CONFIG_SCSI_IBMVFC=m
154CONFIG_SCSI_SYM53C8XX_2=y 155CONFIG_SCSI_SYM53C8XX_2=y
@@ -320,6 +321,8 @@ CONFIG_NLS_ISO8859_1=y
320CONFIG_CRC_T10DIF=y 321CONFIG_CRC_T10DIF=y
321CONFIG_MAGIC_SYSRQ=y 322CONFIG_MAGIC_SYSRQ=y
322CONFIG_DEBUG_KERNEL=y 323CONFIG_DEBUG_KERNEL=y
324CONFIG_LOCKUP_DETECTOR=y
325CONFIG_DETECT_HUNG_TASK=y
323# CONFIG_RCU_CPU_STALL_DETECTOR is not set 326# CONFIG_RCU_CPU_STALL_DETECTOR is not set
324CONFIG_LATENCYTOP=y 327CONFIG_LATENCYTOP=y
325CONFIG_SYSCTL_SYSCALL_CHECK=y 328CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
deleted file mode 100644
index a71c9c1455a7..000000000000
--- a/arch/powerpc/include/asm/8253pit.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b8f152ece025..e2a4c26ad377 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -181,21 +181,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
181#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 181#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
182 182
183/** 183/**
184 * atomic_add_unless - add unless the number is a given value 184 * __atomic_add_unless - add unless the number is a given value
185 * @v: pointer of type atomic_t 185 * @v: pointer of type atomic_t
186 * @a: the amount to add to v... 186 * @a: the amount to add to v...
187 * @u: ...unless v is equal to u. 187 * @u: ...unless v is equal to u.
188 * 188 *
189 * Atomically adds @a to @v, so long as it was not @u. 189 * Atomically adds @a to @v, so long as it was not @u.
190 * Returns non-zero if @v was not @u, and zero otherwise. 190 * Returns the old value of @v.
191 */ 191 */
192static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 192static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
193{ 193{
194 int t; 194 int t;
195 195
196 __asm__ __volatile__ ( 196 __asm__ __volatile__ (
197 PPC_RELEASE_BARRIER 197 PPC_RELEASE_BARRIER
198"1: lwarx %0,0,%1 # atomic_add_unless\n\ 198"1: lwarx %0,0,%1 # __atomic_add_unless\n\
199 cmpw 0,%0,%3 \n\ 199 cmpw 0,%0,%3 \n\
200 beq- 2f \n\ 200 beq- 2f \n\
201 add %0,%2,%0 \n" 201 add %0,%2,%0 \n"
@@ -209,10 +209,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
209 : "r" (&v->counter), "r" (a), "r" (u) 209 : "r" (&v->counter), "r" (a), "r" (u)
210 : "cc", "memory"); 210 : "cc", "memory");
211 211
212 return t != u; 212 return t;
213} 213}
214 214
215#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
216 215
217#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) 216#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
218#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) 217#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
@@ -444,7 +443,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
444 * @u: ...unless v is equal to u. 443 * @u: ...unless v is equal to u.
445 * 444 *
446 * Atomically adds @a to @v, so long as it was not @u. 445 * Atomically adds @a to @v, so long as it was not @u.
447 * Returns non-zero if @v was not @u, and zero otherwise. 446 * Returns the old value of @v.
448 */ 447 */
449static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 448static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
450{ 449{
@@ -452,7 +451,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
452 451
453 __asm__ __volatile__ ( 452 __asm__ __volatile__ (
454 PPC_RELEASE_BARRIER 453 PPC_RELEASE_BARRIER
455"1: ldarx %0,0,%1 # atomic_add_unless\n\ 454"1: ldarx %0,0,%1 # __atomic_add_unless\n\
456 cmpd 0,%0,%3 \n\ 455 cmpd 0,%0,%3 \n\
457 beq- 2f \n\ 456 beq- 2f \n\
458 add %0,%2,%0 \n" 457 add %0,%2,%0 \n"
@@ -470,11 +469,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
470 469
471#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 470#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
472 471
473#else /* __powerpc64__ */
474#include <asm-generic/atomic64.h>
475
476#endif /* __powerpc64__ */ 472#endif /* __powerpc64__ */
477 473
478#include <asm-generic/atomic-long.h>
479#endif /* __KERNEL__ */ 474#endif /* __KERNEL__ */
480#endif /* _ASM_POWERPC_ATOMIC_H_ */ 475#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index f18c6d9b9510..e137afcc10fa 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -327,10 +327,7 @@ unsigned long find_next_bit_le(const void *addr,
327 unsigned long size, unsigned long offset); 327 unsigned long size, unsigned long offset);
328/* Bitmap functions for the ext2 filesystem */ 328/* Bitmap functions for the ext2 filesystem */
329 329
330#define ext2_set_bit_atomic(lock, nr, addr) \ 330#include <asm-generic/bitops/ext2-atomic-setbit.h>
331 test_and_set_bit_le((nr), (unsigned long*)addr)
332#define ext2_clear_bit_atomic(lock, nr, addr) \
333 test_and_clear_bit_le((nr), (unsigned long*)addr)
334 331
335#include <asm-generic/bitops/sched.h> 332#include <asm-generic/bitops/sched.h>
336 333
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c0d842cfd012..e30442c539ce 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -179,8 +179,9 @@ extern const char *powerpc_base_platform;
179#define LONG_ASM_CONST(x) 0 179#define LONG_ASM_CONST(x) 0
180#endif 180#endif
181 181
182 182#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000)
183#define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000) 183#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000)
184#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000)
184#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) 185#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
185#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) 186#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
186#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) 187#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
@@ -401,9 +402,10 @@ extern const char *powerpc_base_platform;
401 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ 402 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
402 CPU_FTR_STCX_CHECKS_ADDRESS) 403 CPU_FTR_STCX_CHECKS_ADDRESS)
403#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 404#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
404 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 405 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
405 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 406 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
406 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS) 407 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
408 CPU_FTR_HVMODE)
407#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 409#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
408 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 410 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
409 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 411 CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -417,13 +419,13 @@ extern const char *powerpc_base_platform;
417 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ 419 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
418 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) 420 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
419#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 421#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
420 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\ 422 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
421 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 423 CPU_FTR_MMCRA | CPU_FTR_SMT | \
422 CPU_FTR_COHERENT_ICACHE | \ 424 CPU_FTR_COHERENT_ICACHE | \
423 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 425 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
424 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 426 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
425 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 427 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
426 CPU_FTR_ICSWX | CPU_FTR_CFAR) 428 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE)
427#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 429#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
428 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 430 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
429 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 431 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 9c70d0ca96d4..efa74ac44a35 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -18,7 +18,7 @@
18#include <asm/ppc-opcode.h> 18#include <asm/ppc-opcode.h>
19 19
20#define PPC_DBELL_MSG_BRDCAST (0x04000000) 20#define PPC_DBELL_MSG_BRDCAST (0x04000000)
21#define PPC_DBELL_TYPE(x) (((x) & 0xf) << 28) 21#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
22enum ppc_dbell { 22enum ppc_dbell {
23 PPC_DBELL = 0, /* doorbell */ 23 PPC_DBELL = 0, /* doorbell */
24 PPC_DBELL_CRIT = 1, /* critical doorbell */ 24 PPC_DBELL_CRIT = 1, /* critical doorbell */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
new file mode 100644
index 000000000000..a9e1f4f796f6
--- /dev/null
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -0,0 +1,40 @@
1/*
2 * EHV_PIC private definitions and structure.
3 *
4 * Copyright 2008-2010 Freescale Semiconductor, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10#ifndef __EHV_PIC_H__
11#define __EHV_PIC_H__
12
13#include <linux/irq.h>
14
15#define NR_EHV_PIC_INTS 1024
16
17#define EHV_PIC_INFO(name) EHV_PIC_##name
18
19#define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0
20#define EHV_PIC_VECPRI_POLARITY_POSITIVE 1
21#define EHV_PIC_VECPRI_SENSE_EDGE 0
22#define EHV_PIC_VECPRI_SENSE_LEVEL 0x2
23#define EHV_PIC_VECPRI_POLARITY_MASK 0x1
24#define EHV_PIC_VECPRI_SENSE_MASK 0x2
25
26struct ehv_pic {
27 /* The remapper for this EHV_PIC */
28 struct irq_host *irqhost;
29
30 /* The "linux" controller struct */
31 struct irq_chip hc_irq;
32
33 /* core int flag */
34 int coreint_flag;
35};
36
37void ehv_pic_init(void);
38unsigned int ehv_pic_get_irq(void);
39
40#endif /* __EHV_PIC_H__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 2b917c69ed15..3bf9cca35147 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -267,7 +267,7 @@ extern int ucache_bsize;
267struct linux_binprm; 267struct linux_binprm;
268extern int arch_setup_additional_pages(struct linux_binprm *bprm, 268extern int arch_setup_additional_pages(struct linux_binprm *bprm,
269 int uses_interp); 269 int uses_interp);
270#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); 270#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b)
271 271
272/* 1GB for 64bit, 8MB for 32bit */ 272/* 1GB for 64bit, 8MB for 32bit */
273#define STACK_RND_MASK (is_32bit_task() ? \ 273#define STACK_RND_MASK (is_32bit_task() ? \
@@ -298,7 +298,7 @@ do { \
298 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ 298 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
299 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ 299 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
300 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ 300 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
301 VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \ 301 VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \
302} while (0) 302} while (0)
303 303
304/* PowerPC64 relocations defined by the ABIs */ 304/* PowerPC64 relocations defined by the ABIs */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 45921672b97a..63f2a22e9954 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -18,7 +18,7 @@
18#ifndef _ASM_POWERPC_EMULATED_OPS_H 18#ifndef _ASM_POWERPC_EMULATED_OPS_H
19#define _ASM_POWERPC_EMULATED_OPS_H 19#define _ASM_POWERPC_EMULATED_OPS_H
20 20
21#include <asm/atomic.h> 21#include <linux/atomic.h>
22#include <linux/perf_event.h> 22#include <linux/perf_event.h>
23 23
24 24
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
78#define PPC_WARN_EMULATED(type, regs) \ 78#define PPC_WARN_EMULATED(type, regs) \
79 do { \ 79 do { \
80 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ 80 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
81 1, 0, regs, 0); \ 81 1, regs, 0); \
82 __PPC_WARN_EMULATED(type); \ 82 __PPC_WARN_EMULATED(type); \
83 } while (0) 83 } while (0)
84 84
85#define PPC_WARN_ALIGNMENT(type, regs) \ 85#define PPC_WARN_ALIGNMENT(type, regs) \
86 do { \ 86 do { \
87 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ 87 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
88 1, 0, regs, regs->dar); \ 88 1, regs, regs->dar); \
89 __PPC_WARN_EMULATED(type); \ 89 __PPC_WARN_EMULATED(type); \
90 } while (0) 90 } while (0)
91 91
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
new file mode 100644
index 000000000000..f3b0c2cc9fea
--- /dev/null
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -0,0 +1,502 @@
1/*
2 * ePAPR hcall interface
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Author: Timur Tabi <timur@freescale.com>
7 *
8 * This file is provided under a dual BSD/GPL license. When using or
9 * redistributing this file, you may do so under either license.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions are met:
13 * * Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * * Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * * Neither the name of Freescale Semiconductor nor the
19 * names of its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 *
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") as published by the Free Software
25 * Foundation, either version 2 of that License or (at your option) any
26 * later version.
27 *
28 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
29 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
30 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
31 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
33 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/* A "hypercall" is an "sc 1" instruction. This header file file provides C
41 * wrapper functions for the ePAPR hypervisor interface. It is inteded
42 * for use by Linux device drivers and other operating systems.
43 *
44 * The hypercalls are implemented as inline assembly, rather than assembly
45 * language functions in a .S file, for optimization. It allows
46 * the caller to issue the hypercall instruction directly, improving both
47 * performance and memory footprint.
48 */
49
50#ifndef _EPAPR_HCALLS_H
51#define _EPAPR_HCALLS_H
52
53#include <linux/types.h>
54#include <linux/errno.h>
55#include <asm/byteorder.h>
56
57#define EV_BYTE_CHANNEL_SEND 1
58#define EV_BYTE_CHANNEL_RECEIVE 2
59#define EV_BYTE_CHANNEL_POLL 3
60#define EV_INT_SET_CONFIG 4
61#define EV_INT_GET_CONFIG 5
62#define EV_INT_SET_MASK 6
63#define EV_INT_GET_MASK 7
64#define EV_INT_IACK 9
65#define EV_INT_EOI 10
66#define EV_INT_SEND_IPI 11
67#define EV_INT_SET_TASK_PRIORITY 12
68#define EV_INT_GET_TASK_PRIORITY 13
69#define EV_DOORBELL_SEND 14
70#define EV_MSGSND 15
71#define EV_IDLE 16
72
73/* vendor ID: epapr */
74#define EV_LOCAL_VENDOR_ID 0 /* for private use */
75#define EV_EPAPR_VENDOR_ID 1
76#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
77#define EV_IBM_VENDOR_ID 3 /* IBM */
78#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
79#define EV_ENEA_VENDOR_ID 5 /* Enea */
80#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
81#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
82#define EV_KVM_VENDOR_ID 42 /* KVM */
83
84/* The max number of bytes that a byte channel can send or receive per call */
85#define EV_BYTE_CHANNEL_MAX_BYTES 16
86
87
88#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
89#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
90
91/* epapr error codes */
92#define EV_EPERM 1 /* Operation not permitted */
93#define EV_ENOENT 2 /* Entry Not Found */
94#define EV_EIO 3 /* I/O error occured */
95#define EV_EAGAIN 4 /* The operation had insufficient
96 * resources to complete and should be
97 * retried
98 */
99#define EV_ENOMEM 5 /* There was insufficient memory to
100 * complete the operation */
101#define EV_EFAULT 6 /* Bad guest address */
102#define EV_ENODEV 7 /* No such device */
103#define EV_EINVAL 8 /* An argument supplied to the hcall
104 was out of range or invalid */
105#define EV_INTERNAL 9 /* An internal error occured */
106#define EV_CONFIG 10 /* A configuration error was detected */
107#define EV_INVALID_STATE 11 /* The object is in an invalid state */
108#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
109#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
110
111/*
112 * Hypercall register clobber list
113 *
114 * These macros are used to define the list of clobbered registers during a
115 * hypercall. Technically, registers r0 and r3-r12 are always clobbered,
116 * but the gcc inline assembly syntax does not allow us to specify registers
117 * on the clobber list that are also on the input/output list. Therefore,
118 * the lists of clobbered registers depends on the number of register
119 * parmeters ("+r" and "=r") passed to the hypercall.
120 *
121 * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
122 * general rule, 'x' is the number of parameters passed to the assembly
123 * block *except* for r11.
124 *
125 * If you're not sure, just use the smallest value of 'x' that does not
126 * generate a compilation error. Because these are static inline functions,
127 * the compiler will only check the clobber list for a function if you
128 * compile code that calls that function.
129 *
130 * r3 and r11 are not included in any clobbers list because they are always
131 * listed as output registers.
132 *
133 * XER, CTR, and LR are currently listed as clobbers because it's uncertain
134 * whether they will be clobbered.
135 *
136 * Note that r11 can be used as an output parameter.
137*/
138
139/* List of common clobbered registers. Do not use this macro. */
140#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc"
141
142#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
143#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
144#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
145#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
146#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
147#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
148#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
149#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
150
151
152/*
153 * We use "uintptr_t" to define a register because it's guaranteed to be a
154 * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
155 * platform.
156 *
157 * All registers are either input/output or output only. Registers that are
158 * initialized before making the hypercall are input/output. All
159 * input/output registers are represented with "+r". Output-only registers
160 * are represented with "=r". Do not specify any unused registers. The
161 * clobber list will tell the compiler that the hypercall modifies those
162 * registers, which is good enough.
163 */
164
165/**
166 * ev_int_set_config - configure the specified interrupt
167 * @interrupt: the interrupt number
168 * @config: configuration for this interrupt
169 * @priority: interrupt priority
170 * @destination: destination CPU number
171 *
172 * Returns 0 for success, or an error code.
173 */
174static inline unsigned int ev_int_set_config(unsigned int interrupt,
175 uint32_t config, unsigned int priority, uint32_t destination)
176{
177 register uintptr_t r11 __asm__("r11");
178 register uintptr_t r3 __asm__("r3");
179 register uintptr_t r4 __asm__("r4");
180 register uintptr_t r5 __asm__("r5");
181 register uintptr_t r6 __asm__("r6");
182
183 r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
184 r3 = interrupt;
185 r4 = config;
186 r5 = priority;
187 r6 = destination;
188
189 __asm__ __volatile__ ("sc 1"
190 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
191 : : EV_HCALL_CLOBBERS4
192 );
193
194 return r3;
195}
196
197/**
198 * ev_int_get_config - return the config of the specified interrupt
199 * @interrupt: the interrupt number
200 * @config: returned configuration for this interrupt
201 * @priority: returned interrupt priority
202 * @destination: returned destination CPU number
203 *
204 * Returns 0 for success, or an error code.
205 */
206static inline unsigned int ev_int_get_config(unsigned int interrupt,
207 uint32_t *config, unsigned int *priority, uint32_t *destination)
208{
209 register uintptr_t r11 __asm__("r11");
210 register uintptr_t r3 __asm__("r3");
211 register uintptr_t r4 __asm__("r4");
212 register uintptr_t r5 __asm__("r5");
213 register uintptr_t r6 __asm__("r6");
214
215 r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
216 r3 = interrupt;
217
218 __asm__ __volatile__ ("sc 1"
219 : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
220 : : EV_HCALL_CLOBBERS4
221 );
222
223 *config = r4;
224 *priority = r5;
225 *destination = r6;
226
227 return r3;
228}
229
230/**
231 * ev_int_set_mask - sets the mask for the specified interrupt source
232 * @interrupt: the interrupt number
233 * @mask: 0=enable interrupts, 1=disable interrupts
234 *
235 * Returns 0 for success, or an error code.
236 */
237static inline unsigned int ev_int_set_mask(unsigned int interrupt,
238 unsigned int mask)
239{
240 register uintptr_t r11 __asm__("r11");
241 register uintptr_t r3 __asm__("r3");
242 register uintptr_t r4 __asm__("r4");
243
244 r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
245 r3 = interrupt;
246 r4 = mask;
247
248 __asm__ __volatile__ ("sc 1"
249 : "+r" (r11), "+r" (r3), "+r" (r4)
250 : : EV_HCALL_CLOBBERS2
251 );
252
253 return r3;
254}
255
256/**
257 * ev_int_get_mask - returns the mask for the specified interrupt source
258 * @interrupt: the interrupt number
259 * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
260 *
261 * Returns 0 for success, or an error code.
262 */
263static inline unsigned int ev_int_get_mask(unsigned int interrupt,
264 unsigned int *mask)
265{
266 register uintptr_t r11 __asm__("r11");
267 register uintptr_t r3 __asm__("r3");
268 register uintptr_t r4 __asm__("r4");
269
270 r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
271 r3 = interrupt;
272
273 __asm__ __volatile__ ("sc 1"
274 : "+r" (r11), "+r" (r3), "=r" (r4)
275 : : EV_HCALL_CLOBBERS2
276 );
277
278 *mask = r4;
279
280 return r3;
281}
282
283/**
284 * ev_int_eoi - signal the end of interrupt processing
285 * @interrupt: the interrupt number
286 *
287 * This function signals the end of processing for the the specified
288 * interrupt, which must be the interrupt currently in service. By
289 * definition, this is also the highest-priority interrupt.
290 *
291 * Returns 0 for success, or an error code.
292 */
293static inline unsigned int ev_int_eoi(unsigned int interrupt)
294{
295 register uintptr_t r11 __asm__("r11");
296 register uintptr_t r3 __asm__("r3");
297
298 r11 = EV_HCALL_TOKEN(EV_INT_EOI);
299 r3 = interrupt;
300
301 __asm__ __volatile__ ("sc 1"
302 : "+r" (r11), "+r" (r3)
303 : : EV_HCALL_CLOBBERS1
304 );
305
306 return r3;
307}
308
309/**
310 * ev_byte_channel_send - send characters to a byte stream
311 * @handle: byte stream handle
312 * @count: (input) num of chars to send, (output) num chars sent
313 * @buffer: pointer to a 16-byte buffer
314 *
315 * @buffer must be at least 16 bytes long, because all 16 bytes will be
316 * read from memory into registers, even if count < 16.
317 *
318 * Returns 0 for success, or an error code.
319 */
320static inline unsigned int ev_byte_channel_send(unsigned int handle,
321 unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
322{
323 register uintptr_t r11 __asm__("r11");
324 register uintptr_t r3 __asm__("r3");
325 register uintptr_t r4 __asm__("r4");
326 register uintptr_t r5 __asm__("r5");
327 register uintptr_t r6 __asm__("r6");
328 register uintptr_t r7 __asm__("r7");
329 register uintptr_t r8 __asm__("r8");
330 const uint32_t *p = (const uint32_t *) buffer;
331
332 r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
333 r3 = handle;
334 r4 = *count;
335 r5 = be32_to_cpu(p[0]);
336 r6 = be32_to_cpu(p[1]);
337 r7 = be32_to_cpu(p[2]);
338 r8 = be32_to_cpu(p[3]);
339
340 __asm__ __volatile__ ("sc 1"
341 : "+r" (r11), "+r" (r3),
342 "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
343 : : EV_HCALL_CLOBBERS6
344 );
345
346 *count = r4;
347
348 return r3;
349}
350
351/**
352 * ev_byte_channel_receive - fetch characters from a byte channel
353 * @handle: byte channel handle
354 * @count: (input) max num of chars to receive, (output) num chars received
355 * @buffer: pointer to a 16-byte buffer
356 *
357 * The size of @buffer must be at least 16 bytes, even if you request fewer
358 * than 16 characters, because we always write 16 bytes to @buffer. This is
359 * for performance reasons.
360 *
361 * Returns 0 for success, or an error code.
362 */
363static inline unsigned int ev_byte_channel_receive(unsigned int handle,
364 unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
365{
366 register uintptr_t r11 __asm__("r11");
367 register uintptr_t r3 __asm__("r3");
368 register uintptr_t r4 __asm__("r4");
369 register uintptr_t r5 __asm__("r5");
370 register uintptr_t r6 __asm__("r6");
371 register uintptr_t r7 __asm__("r7");
372 register uintptr_t r8 __asm__("r8");
373 uint32_t *p = (uint32_t *) buffer;
374
375 r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
376 r3 = handle;
377 r4 = *count;
378
379 __asm__ __volatile__ ("sc 1"
380 : "+r" (r11), "+r" (r3), "+r" (r4),
381 "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
382 : : EV_HCALL_CLOBBERS6
383 );
384
385 *count = r4;
386 p[0] = cpu_to_be32(r5);
387 p[1] = cpu_to_be32(r6);
388 p[2] = cpu_to_be32(r7);
389 p[3] = cpu_to_be32(r8);
390
391 return r3;
392}
393
394/**
395 * ev_byte_channel_poll - returns the status of the byte channel buffers
396 * @handle: byte channel handle
397 * @rx_count: returned count of bytes in receive queue
398 * @tx_count: returned count of free space in transmit queue
399 *
400 * This function reports the amount of data in the receive queue (i.e. the
401 * number of bytes you can read), and the amount of free space in the transmit
402 * queue (i.e. the number of bytes you can write).
403 *
404 * Returns 0 for success, or an error code.
405 */
406static inline unsigned int ev_byte_channel_poll(unsigned int handle,
407 unsigned int *rx_count, unsigned int *tx_count)
408{
409 register uintptr_t r11 __asm__("r11");
410 register uintptr_t r3 __asm__("r3");
411 register uintptr_t r4 __asm__("r4");
412 register uintptr_t r5 __asm__("r5");
413
414 r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
415 r3 = handle;
416
417 __asm__ __volatile__ ("sc 1"
418 : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
419 : : EV_HCALL_CLOBBERS3
420 );
421
422 *rx_count = r4;
423 *tx_count = r5;
424
425 return r3;
426}
427
428/**
429 * ev_int_iack - acknowledge an interrupt
430 * @handle: handle to the target interrupt controller
431 * @vector: returned interrupt vector
432 *
433 * If handle is zero, the function returns the next interrupt source
434 * number to be handled irrespective of the hierarchy or cascading
435 * of interrupt controllers. If non-zero, specifies a handle to the
436 * interrupt controller that is the target of the acknowledge.
437 *
438 * Returns 0 for success, or an error code.
439 */
440static inline unsigned int ev_int_iack(unsigned int handle,
441 unsigned int *vector)
442{
443 register uintptr_t r11 __asm__("r11");
444 register uintptr_t r3 __asm__("r3");
445 register uintptr_t r4 __asm__("r4");
446
447 r11 = EV_HCALL_TOKEN(EV_INT_IACK);
448 r3 = handle;
449
450 __asm__ __volatile__ ("sc 1"
451 : "+r" (r11), "+r" (r3), "=r" (r4)
452 : : EV_HCALL_CLOBBERS2
453 );
454
455 *vector = r4;
456
457 return r3;
458}
459
460/**
461 * ev_doorbell_send - send a doorbell to another partition
462 * @handle: doorbell send handle
463 *
464 * Returns 0 for success, or an error code.
465 */
466static inline unsigned int ev_doorbell_send(unsigned int handle)
467{
468 register uintptr_t r11 __asm__("r11");
469 register uintptr_t r3 __asm__("r3");
470
471 r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
472 r3 = handle;
473
474 __asm__ __volatile__ ("sc 1"
475 : "+r" (r11), "+r" (r3)
476 : : EV_HCALL_CLOBBERS1
477 );
478
479 return r3;
480}
481
482/**
483 * ev_idle -- wait for next interrupt on this core
484 *
485 * Returns 0 for success, or an error code.
486 */
487static inline unsigned int ev_idle(void)
488{
489 register uintptr_t r11 __asm__("r11");
490 register uintptr_t r3 __asm__("r3");
491
492 r11 = EV_HCALL_TOKEN(EV_IDLE);
493
494 __asm__ __volatile__ ("sc 1"
495 : "+r" (r11), "=r" (r3)
496 : : EV_HCALL_CLOBBERS1
497 );
498
499 return r3;
500}
501
502#endif
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 6d53f311d942..ac13addb8495 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -48,30 +48,33 @@
48#define EX_R14 (4 * 8) 48#define EX_R14 (4 * 8)
49#define EX_R15 (5 * 8) 49#define EX_R15 (5 * 8)
50 50
51/* The TLB miss exception uses different slots */ 51/*
52 * The TLB miss exception uses different slots.
53 *
54 * The bolted variant uses only the first six fields,
55 * which in combination with pgd and kernel_pgd fits in
56 * one 64-byte cache line.
57 */
52 58
53#define EX_TLB_R10 ( 0 * 8) 59#define EX_TLB_R10 ( 0 * 8)
54#define EX_TLB_R11 ( 1 * 8) 60#define EX_TLB_R11 ( 1 * 8)
55#define EX_TLB_R12 ( 2 * 8) 61#define EX_TLB_R14 ( 2 * 8)
56#define EX_TLB_R13 ( 3 * 8) 62#define EX_TLB_R15 ( 3 * 8)
57#define EX_TLB_R14 ( 4 * 8) 63#define EX_TLB_R16 ( 4 * 8)
58#define EX_TLB_R15 ( 5 * 8) 64#define EX_TLB_CR ( 5 * 8)
59#define EX_TLB_R16 ( 6 * 8) 65#define EX_TLB_R12 ( 6 * 8)
60#define EX_TLB_CR ( 7 * 8) 66#define EX_TLB_R13 ( 7 * 8)
61#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */ 67#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */
62#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */ 68#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
63#define EX_TLB_SRR0 (10 * 8) 69#define EX_TLB_SRR0 (10 * 8)
64#define EX_TLB_SRR1 (11 * 8) 70#define EX_TLB_SRR1 (11 * 8)
65#define EX_TLB_MMUCR0 (12 * 8) /* Level 0 */
66#define EX_TLB_MAS1 (12 * 8) /* Level 0 */
67#define EX_TLB_MAS2 (13 * 8) /* Level 0 */
68#ifdef CONFIG_BOOK3E_MMU_TLB_STATS 71#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
69#define EX_TLB_R8 (14 * 8) 72#define EX_TLB_R8 (12 * 8)
70#define EX_TLB_R9 (15 * 8) 73#define EX_TLB_R9 (13 * 8)
71#define EX_TLB_LR (16 * 8) 74#define EX_TLB_LR (14 * 8)
72#define EX_TLB_SIZE (17 * 8) 75#define EX_TLB_SIZE (15 * 8)
73#else 76#else
74#define EX_TLB_SIZE (14 * 8) 77#define EX_TLB_SIZE (12 * 8)
75#endif 78#endif
76 79
77#define START_EXCEPTION(label) \ 80#define START_EXCEPTION(label) \
@@ -168,6 +171,16 @@ exc_##label##_book3e:
168 ld r9,EX_TLB_R9(r12); \ 171 ld r9,EX_TLB_R9(r12); \
169 ld r8,EX_TLB_R8(r12); \ 172 ld r8,EX_TLB_R8(r12); \
170 mtlr r16; 173 mtlr r16;
174#define TLB_MISS_PROLOG_STATS_BOLTED \
175 mflr r10; \
176 std r8,PACA_EXTLB+EX_TLB_R8(r13); \
177 std r9,PACA_EXTLB+EX_TLB_R9(r13); \
178 std r10,PACA_EXTLB+EX_TLB_LR(r13);
179#define TLB_MISS_RESTORE_STATS_BOLTED \
180 ld r16,PACA_EXTLB+EX_TLB_LR(r13); \
181 ld r9,PACA_EXTLB+EX_TLB_R9(r13); \
182 ld r8,PACA_EXTLB+EX_TLB_R8(r13); \
183 mtlr r16;
171#define TLB_MISS_STATS_D(name) \ 184#define TLB_MISS_STATS_D(name) \
172 addi r9,r13,MMSTAT_DSTATS+name; \ 185 addi r9,r13,MMSTAT_DSTATS+name; \
173 bl .tlb_stat_inc; 186 bl .tlb_stat_inc;
@@ -183,17 +196,20 @@ exc_##label##_book3e:
18361: addi r9,r13,MMSTAT_ISTATS+name; \ 19661: addi r9,r13,MMSTAT_ISTATS+name; \
18462: bl .tlb_stat_inc; 19762: bl .tlb_stat_inc;
185#define TLB_MISS_STATS_SAVE_INFO \ 198#define TLB_MISS_STATS_SAVE_INFO \
186 std r14,EX_TLB_ESR(r12); /* save ESR */ \ 199 std r14,EX_TLB_ESR(r12); /* save ESR */
187 200#define TLB_MISS_STATS_SAVE_INFO_BOLTED \
188 201 std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */
189#else 202#else
190#define TLB_MISS_PROLOG_STATS 203#define TLB_MISS_PROLOG_STATS
191#define TLB_MISS_RESTORE_STATS 204#define TLB_MISS_RESTORE_STATS
205#define TLB_MISS_PROLOG_STATS_BOLTED
206#define TLB_MISS_RESTORE_STATS_BOLTED
192#define TLB_MISS_STATS_D(name) 207#define TLB_MISS_STATS_D(name)
193#define TLB_MISS_STATS_I(name) 208#define TLB_MISS_STATS_I(name)
194#define TLB_MISS_STATS_X(name) 209#define TLB_MISS_STATS_X(name)
195#define TLB_MISS_STATS_Y(name) 210#define TLB_MISS_STATS_Y(name)
196#define TLB_MISS_STATS_SAVE_INFO 211#define TLB_MISS_STATS_SAVE_INFO
212#define TLB_MISS_STATS_SAVE_INFO_BOLTED
197#endif 213#endif
198 214
199#define SET_IVOR(vector_number, vector_offset) \ 215#define SET_IVOR(vector_number, vector_offset) \
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index f5dfe3411f64..8057f4f6980f 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -61,19 +61,22 @@
61#define EXC_HV H 61#define EXC_HV H
62#define EXC_STD 62#define EXC_STD
63 63
64#define EXCEPTION_PROLOG_1(area) \ 64#define __EXCEPTION_PROLOG_1(area, extra, vec) \
65 GET_PACA(r13); \ 65 GET_PACA(r13); \
66 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 66 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
67 std r10,area+EX_R10(r13); \ 67 std r10,area+EX_R10(r13); \
68 std r11,area+EX_R11(r13); \
69 std r12,area+EX_R12(r13); \
70 BEGIN_FTR_SECTION_NESTED(66); \ 68 BEGIN_FTR_SECTION_NESTED(66); \
71 mfspr r10,SPRN_CFAR; \ 69 mfspr r10,SPRN_CFAR; \
72 std r10,area+EX_CFAR(r13); \ 70 std r10,area+EX_CFAR(r13); \
73 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 71 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
74 GET_SCRATCH0(r9); \ 72 mfcr r9; \
75 std r9,area+EX_R13(r13); \ 73 extra(vec); \
76 mfcr r9 74 std r11,area+EX_R11(r13); \
75 std r12,area+EX_R12(r13); \
76 GET_SCRATCH0(r10); \
77 std r10,area+EX_R13(r13)
78#define EXCEPTION_PROLOG_1(area, extra, vec) \
79 __EXCEPTION_PROLOG_1(area, extra, vec)
77 80
78#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ 81#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
79 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 82 ld r12,PACAKBASE(r13); /* get high part of &label */ \
@@ -85,13 +88,65 @@
85 mtspr SPRN_##h##SRR1,r10; \ 88 mtspr SPRN_##h##SRR1,r10; \
86 h##rfid; \ 89 h##rfid; \
87 b . /* prevent speculative execution */ 90 b . /* prevent speculative execution */
88#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 91#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
89 __EXCEPTION_PROLOG_PSERIES_1(label, h) 92 __EXCEPTION_PROLOG_PSERIES_1(label, h)
90 93
91#define EXCEPTION_PROLOG_PSERIES(area, label, h) \ 94#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
92 EXCEPTION_PROLOG_1(area); \ 95 EXCEPTION_PROLOG_1(area, extra, vec); \
93 EXCEPTION_PROLOG_PSERIES_1(label, h); 96 EXCEPTION_PROLOG_PSERIES_1(label, h);
94 97
98#define __KVMTEST(n) \
99 lbz r10,HSTATE_IN_GUEST(r13); \
100 cmpwi r10,0; \
101 bne do_kvm_##n
102
103#define __KVM_HANDLER(area, h, n) \
104do_kvm_##n: \
105 ld r10,area+EX_R10(r13); \
106 stw r9,HSTATE_SCRATCH1(r13); \
107 ld r9,area+EX_R9(r13); \
108 std r12,HSTATE_SCRATCH0(r13); \
109 li r12,n; \
110 b kvmppc_interrupt
111
112#define __KVM_HANDLER_SKIP(area, h, n) \
113do_kvm_##n: \
114 cmpwi r10,KVM_GUEST_MODE_SKIP; \
115 ld r10,area+EX_R10(r13); \
116 beq 89f; \
117 stw r9,HSTATE_SCRATCH1(r13); \
118 ld r9,area+EX_R9(r13); \
119 std r12,HSTATE_SCRATCH0(r13); \
120 li r12,n; \
121 b kvmppc_interrupt; \
12289: mtocrf 0x80,r9; \
123 ld r9,area+EX_R9(r13); \
124 b kvmppc_skip_##h##interrupt
125
126#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
127#define KVMTEST(n) __KVMTEST(n)
128#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
129#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
130
131#else
132#define KVMTEST(n)
133#define KVM_HANDLER(area, h, n)
134#define KVM_HANDLER_SKIP(area, h, n)
135#endif
136
137#ifdef CONFIG_KVM_BOOK3S_PR
138#define KVMTEST_PR(n) __KVMTEST(n)
139#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
140#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
141
142#else
143#define KVMTEST_PR(n)
144#define KVM_HANDLER_PR(area, h, n)
145#define KVM_HANDLER_PR_SKIP(area, h, n)
146#endif
147
148#define NOTEST(n)
149
95/* 150/*
96 * The common exception prolog is used for all except a few exceptions 151 * The common exception prolog is used for all except a few exceptions
97 * such as a segment miss on a kernel address. We have to be prepared 152 * such as a segment miss on a kernel address. We have to be prepared
@@ -164,57 +219,58 @@
164 .globl label##_pSeries; \ 219 .globl label##_pSeries; \
165label##_pSeries: \ 220label##_pSeries: \
166 HMT_MEDIUM; \ 221 HMT_MEDIUM; \
167 DO_KVM vec; \
168 SET_SCRATCH0(r13); /* save r13 */ \ 222 SET_SCRATCH0(r13); /* save r13 */ \
169 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD) 223 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
224 EXC_STD, KVMTEST_PR, vec)
170 225
171#define STD_EXCEPTION_HV(loc, vec, label) \ 226#define STD_EXCEPTION_HV(loc, vec, label) \
172 . = loc; \ 227 . = loc; \
173 .globl label##_hv; \ 228 .globl label##_hv; \
174label##_hv: \ 229label##_hv: \
175 HMT_MEDIUM; \ 230 HMT_MEDIUM; \
176 DO_KVM vec; \ 231 SET_SCRATCH0(r13); /* save r13 */ \
177 SET_SCRATCH0(r13); /* save r13 */ \ 232 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
178 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV) 233 EXC_HV, KVMTEST, vec)
179 234
180#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ 235#define __SOFTEN_TEST(h) \
181 HMT_MEDIUM; \
182 DO_KVM vec; \
183 SET_SCRATCH0(r13); /* save r13 */ \
184 GET_PACA(r13); \
185 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
186 std r10,PACA_EXGEN+EX_R10(r13); \
187 lbz r10,PACASOFTIRQEN(r13); \ 236 lbz r10,PACASOFTIRQEN(r13); \
188 mfcr r9; \
189 cmpwi r10,0; \ 237 cmpwi r10,0; \
190 beq masked_##h##interrupt; \ 238 beq masked_##h##interrupt
191 GET_SCRATCH0(r10); \ 239#define _SOFTEN_TEST(h) __SOFTEN_TEST(h)
192 std r10,PACA_EXGEN+EX_R13(r13); \ 240
193 std r11,PACA_EXGEN+EX_R11(r13); \ 241#define SOFTEN_TEST_PR(vec) \
194 std r12,PACA_EXGEN+EX_R12(r13); \ 242 KVMTEST_PR(vec); \
195 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 243 _SOFTEN_TEST(EXC_STD)
196 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ 244
197 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 245#define SOFTEN_TEST_HV(vec) \
198 LOAD_HANDLER(r12,label##_common) \ 246 KVMTEST(vec); \
199 mtspr SPRN_##h##SRR0,r12; \ 247 _SOFTEN_TEST(EXC_HV)
200 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 248
201 mtspr SPRN_##h##SRR1,r10; \ 249#define SOFTEN_TEST_HV_201(vec) \
202 h##rfid; \ 250 KVMTEST(vec); \
203 b . /* prevent speculative execution */ 251 _SOFTEN_TEST(EXC_STD)
204#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ 252
205 __MASKABLE_EXCEPTION_PSERIES(vec, label, h) 253#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
254 HMT_MEDIUM; \
255 SET_SCRATCH0(r13); /* save r13 */ \
256 __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
257 EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
258#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
259 __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
206 260
207#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ 261#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
208 . = loc; \ 262 . = loc; \
209 .globl label##_pSeries; \ 263 .globl label##_pSeries; \
210label##_pSeries: \ 264label##_pSeries: \
211 _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD) 265 _MASKABLE_EXCEPTION_PSERIES(vec, label, \
266 EXC_STD, SOFTEN_TEST_PR)
212 267
213#define MASKABLE_EXCEPTION_HV(loc, vec, label) \ 268#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
214 . = loc; \ 269 . = loc; \
215 .globl label##_hv; \ 270 .globl label##_hv; \
216label##_hv: \ 271label##_hv: \
217 _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV) 272 _MASKABLE_EXCEPTION_PSERIES(vec, label, \
273 EXC_HV, SOFTEN_TEST_HV)
218 274
219#ifdef CONFIG_PPC_ISERIES 275#ifdef CONFIG_PPC_ISERIES
220#define DISABLE_INTS \ 276#define DISABLE_INTS \
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
new file mode 100644
index 000000000000..922d9b5fe3d5
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -0,0 +1,655 @@
1/*
2 * Freescale hypervisor call interface
3 *
4 * Copyright 2008-2010 Freescale Semiconductor, Inc.
5 *
6 * Author: Timur Tabi <timur@freescale.com>
7 *
8 * This file is provided under a dual BSD/GPL license. When using or
9 * redistributing this file, you may do so under either license.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions are met:
13 * * Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * * Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * * Neither the name of Freescale Semiconductor nor the
19 * names of its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 *
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") as published by the Free Software
25 * Foundation, either version 2 of that License or (at your option) any
26 * later version.
27 *
28 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
29 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
30 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
31 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
33 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#ifndef _FSL_HCALLS_H
41#define _FSL_HCALLS_H
42
43#include <linux/types.h>
44#include <linux/errno.h>
45#include <asm/byteorder.h>
46#include <asm/epapr_hcalls.h>
47
48#define FH_API_VERSION 1
49
50#define FH_ERR_GET_INFO 1
51#define FH_PARTITION_GET_DTPROP 2
52#define FH_PARTITION_SET_DTPROP 3
53#define FH_PARTITION_RESTART 4
54#define FH_PARTITION_GET_STATUS 5
55#define FH_PARTITION_START 6
56#define FH_PARTITION_STOP 7
57#define FH_PARTITION_MEMCPY 8
58#define FH_DMA_ENABLE 9
59#define FH_DMA_DISABLE 10
60#define FH_SEND_NMI 11
61#define FH_VMPIC_GET_MSIR 12
62#define FH_SYSTEM_RESET 13
63#define FH_GET_CORE_STATE 14
64#define FH_ENTER_NAP 15
65#define FH_EXIT_NAP 16
66#define FH_CLAIM_DEVICE 17
67#define FH_PARTITION_STOP_DMA 18
68
69/* vendor ID: Freescale Semiconductor */
70#define FH_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num)
71
72/*
73 * We use "uintptr_t" to define a register because it's guaranteed to be a
74 * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
75 * platform.
76 *
77 * All registers are either input/output or output only. Registers that are
78 * initialized before making the hypercall are input/output. All
79 * input/output registers are represented with "+r". Output-only registers
80 * are represented with "=r". Do not specify any unused registers. The
81 * clobber list will tell the compiler that the hypercall modifies those
82 * registers, which is good enough.
83 */
84
85/**
86 * fh_send_nmi - send NMI to virtual cpu(s).
87 * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask.
88 *
89 * Returns 0 for success, or EINVAL for invalid vcpu_mask.
90 */
91static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
92{
93 register uintptr_t r11 __asm__("r11");
94 register uintptr_t r3 __asm__("r3");
95
96 r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
97 r3 = vcpu_mask;
98
99 __asm__ __volatile__ ("sc 1"
100 : "+r" (r11), "+r" (r3)
101 : : EV_HCALL_CLOBBERS1
102 );
103
104 return r3;
105}
106
107/* Arbitrary limits to avoid excessive memory allocation in hypervisor */
108#define FH_DTPROP_MAX_PATHLEN 4096
109#define FH_DTPROP_MAX_PROPLEN 32768
110
111/**
112 * fh_partiton_get_dtprop - get a property from a guest device tree.
113 * @handle: handle of partition whose device tree is to be accessed
114 * @dtpath_addr: physical address of device tree path to access
115 * @propname_addr: physical address of name of property
116 * @propvalue_addr: physical address of property value buffer
117 * @propvalue_len: length of buffer on entry, length of property on return
118 *
119 * Returns zero on success, non-zero on error.
120 */
121static inline unsigned int fh_partition_get_dtprop(int handle,
122 uint64_t dtpath_addr,
123 uint64_t propname_addr,
124 uint64_t propvalue_addr,
125 uint32_t *propvalue_len)
126{
127 register uintptr_t r11 __asm__("r11");
128 register uintptr_t r3 __asm__("r3");
129 register uintptr_t r4 __asm__("r4");
130 register uintptr_t r5 __asm__("r5");
131 register uintptr_t r6 __asm__("r6");
132 register uintptr_t r7 __asm__("r7");
133 register uintptr_t r8 __asm__("r8");
134 register uintptr_t r9 __asm__("r9");
135 register uintptr_t r10 __asm__("r10");
136
137 r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP);
138 r3 = handle;
139
140#ifdef CONFIG_PHYS_64BIT
141 r4 = dtpath_addr >> 32;
142 r6 = propname_addr >> 32;
143 r8 = propvalue_addr >> 32;
144#else
145 r4 = 0;
146 r6 = 0;
147 r8 = 0;
148#endif
149 r5 = (uint32_t)dtpath_addr;
150 r7 = (uint32_t)propname_addr;
151 r9 = (uint32_t)propvalue_addr;
152 r10 = *propvalue_len;
153
154 __asm__ __volatile__ ("sc 1"
155 : "+r" (r11),
156 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
157 "+r" (r8), "+r" (r9), "+r" (r10)
158 : : EV_HCALL_CLOBBERS8
159 );
160
161 *propvalue_len = r4;
162 return r3;
163}
164
165/**
166 * Set a property in a guest device tree.
167 * @handle: handle of partition whose device tree is to be accessed
168 * @dtpath_addr: physical address of device tree path to access
169 * @propname_addr: physical address of name of property
170 * @propvalue_addr: physical address of property value
171 * @propvalue_len: length of property
172 *
173 * Returns zero on success, non-zero on error.
174 */
175static inline unsigned int fh_partition_set_dtprop(int handle,
176 uint64_t dtpath_addr,
177 uint64_t propname_addr,
178 uint64_t propvalue_addr,
179 uint32_t propvalue_len)
180{
181 register uintptr_t r11 __asm__("r11");
182 register uintptr_t r3 __asm__("r3");
183 register uintptr_t r4 __asm__("r4");
184 register uintptr_t r6 __asm__("r6");
185 register uintptr_t r8 __asm__("r8");
186 register uintptr_t r5 __asm__("r5");
187 register uintptr_t r7 __asm__("r7");
188 register uintptr_t r9 __asm__("r9");
189 register uintptr_t r10 __asm__("r10");
190
191 r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP);
192 r3 = handle;
193
194#ifdef CONFIG_PHYS_64BIT
195 r4 = dtpath_addr >> 32;
196 r6 = propname_addr >> 32;
197 r8 = propvalue_addr >> 32;
198#else
199 r4 = 0;
200 r6 = 0;
201 r8 = 0;
202#endif
203 r5 = (uint32_t)dtpath_addr;
204 r7 = (uint32_t)propname_addr;
205 r9 = (uint32_t)propvalue_addr;
206 r10 = propvalue_len;
207
208 __asm__ __volatile__ ("sc 1"
209 : "+r" (r11),
210 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
211 "+r" (r8), "+r" (r9), "+r" (r10)
212 : : EV_HCALL_CLOBBERS8
213 );
214
215 return r3;
216}
217
218/**
219 * fh_partition_restart - reboot the current partition
220 * @partition: partition ID
221 *
222 * Returns an error code if reboot failed. Does not return if it succeeds.
223 */
224static inline unsigned int fh_partition_restart(unsigned int partition)
225{
226 register uintptr_t r11 __asm__("r11");
227 register uintptr_t r3 __asm__("r3");
228
229 r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
230 r3 = partition;
231
232 __asm__ __volatile__ ("sc 1"
233 : "+r" (r11), "+r" (r3)
234 : : EV_HCALL_CLOBBERS1
235 );
236
237 return r3;
238}
239
240#define FH_PARTITION_STOPPED 0
241#define FH_PARTITION_RUNNING 1
242#define FH_PARTITION_STARTING 2
243#define FH_PARTITION_STOPPING 3
244#define FH_PARTITION_PAUSING 4
245#define FH_PARTITION_PAUSED 5
246#define FH_PARTITION_RESUMING 6
247
248/**
249 * fh_partition_get_status - gets the status of a partition
250 * @partition: partition ID
251 * @status: returned status code
252 *
253 * Returns 0 for success, or an error code.
254 */
255static inline unsigned int fh_partition_get_status(unsigned int partition,
256 unsigned int *status)
257{
258 register uintptr_t r11 __asm__("r11");
259 register uintptr_t r3 __asm__("r3");
260 register uintptr_t r4 __asm__("r4");
261
262 r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
263 r3 = partition;
264
265 __asm__ __volatile__ ("sc 1"
266 : "+r" (r11), "+r" (r3), "=r" (r4)
267 : : EV_HCALL_CLOBBERS2
268 );
269
270 *status = r4;
271
272 return r3;
273}
274
275/**
276 * fh_partition_start - boots and starts execution of the specified partition
277 * @partition: partition ID
278 * @entry_point: guest physical address to start execution
279 *
280 * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot
281 * time, guest physical address are the same as guest virtual addresses.
282 *
283 * Returns 0 for success, or an error code.
284 */
285static inline unsigned int fh_partition_start(unsigned int partition,
286 uint32_t entry_point, int load)
287{
288 register uintptr_t r11 __asm__("r11");
289 register uintptr_t r3 __asm__("r3");
290 register uintptr_t r4 __asm__("r4");
291 register uintptr_t r5 __asm__("r5");
292
293 r11 = FH_HCALL_TOKEN(FH_PARTITION_START);
294 r3 = partition;
295 r4 = entry_point;
296 r5 = load;
297
298 __asm__ __volatile__ ("sc 1"
299 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
300 : : EV_HCALL_CLOBBERS3
301 );
302
303 return r3;
304}
305
306/**
307 * fh_partition_stop - stops another partition
308 * @partition: partition ID
309 *
310 * Returns 0 for success, or an error code.
311 */
312static inline unsigned int fh_partition_stop(unsigned int partition)
313{
314 register uintptr_t r11 __asm__("r11");
315 register uintptr_t r3 __asm__("r3");
316
317 r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
318 r3 = partition;
319
320 __asm__ __volatile__ ("sc 1"
321 : "+r" (r11), "+r" (r3)
322 : : EV_HCALL_CLOBBERS1
323 );
324
325 return r3;
326}
327
328/**
329 * struct fh_sg_list: definition of the fh_partition_memcpy S/G list
330 * @source: guest physical address to copy from
331 * @target: guest physical address to copy to
332 * @size: number of bytes to copy
333 * @reserved: reserved, must be zero
334 *
335 * The scatter/gather list for fh_partition_memcpy() is an array of these
336 * structures. The array must be guest physically contiguous.
337 *
338 * This structure must be aligned on 32-byte boundary, so that no single
339 * strucuture can span two pages.
340 */
341struct fh_sg_list {
342 uint64_t source; /**< guest physical address to copy from */
343 uint64_t target; /**< guest physical address to copy to */
344 uint64_t size; /**< number of bytes to copy */
345 uint64_t reserved; /**< reserved, must be zero */
346} __attribute__ ((aligned(32)));
347
348/**
349 * fh_partition_memcpy - copies data from one guest to another
350 * @source: the ID of the partition to copy from
351 * @target: the ID of the partition to copy to
352 * @sg_list: guest physical address of an array of &fh_sg_list structures
353 * @count: the number of entries in @sg_list
354 *
355 * Returns 0 for success, or an error code.
356 */
357static inline unsigned int fh_partition_memcpy(unsigned int source,
358 unsigned int target, phys_addr_t sg_list, unsigned int count)
359{
360 register uintptr_t r11 __asm__("r11");
361 register uintptr_t r3 __asm__("r3");
362 register uintptr_t r4 __asm__("r4");
363 register uintptr_t r5 __asm__("r5");
364 register uintptr_t r6 __asm__("r6");
365 register uintptr_t r7 __asm__("r7");
366
367 r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY);
368 r3 = source;
369 r4 = target;
370 r5 = (uint32_t) sg_list;
371
372#ifdef CONFIG_PHYS_64BIT
373 r6 = sg_list >> 32;
374#else
375 r6 = 0;
376#endif
377 r7 = count;
378
379 __asm__ __volatile__ ("sc 1"
380 : "+r" (r11),
381 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
382 : : EV_HCALL_CLOBBERS5
383 );
384
385 return r3;
386}
387
388/**
389 * fh_dma_enable - enable DMA for the specified device
390 * @liodn: the LIODN of the I/O device for which to enable DMA
391 *
392 * Returns 0 for success, or an error code.
393 */
394static inline unsigned int fh_dma_enable(unsigned int liodn)
395{
396 register uintptr_t r11 __asm__("r11");
397 register uintptr_t r3 __asm__("r3");
398
399 r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
400 r3 = liodn;
401
402 __asm__ __volatile__ ("sc 1"
403 : "+r" (r11), "+r" (r3)
404 : : EV_HCALL_CLOBBERS1
405 );
406
407 return r3;
408}
409
410/**
411 * fh_dma_disable - disable DMA for the specified device
412 * @liodn: the LIODN of the I/O device for which to disable DMA
413 *
414 * Returns 0 for success, or an error code.
415 */
416static inline unsigned int fh_dma_disable(unsigned int liodn)
417{
418 register uintptr_t r11 __asm__("r11");
419 register uintptr_t r3 __asm__("r3");
420
421 r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
422 r3 = liodn;
423
424 __asm__ __volatile__ ("sc 1"
425 : "+r" (r11), "+r" (r3)
426 : : EV_HCALL_CLOBBERS1
427 );
428
429 return r3;
430}
431
432
433/**
434 * fh_vmpic_get_msir - returns the MPIC-MSI register value
435 * @interrupt: the interrupt number
436 * @msir_val: returned MPIC-MSI register value
437 *
438 * Returns 0 for success, or an error code.
439 */
440static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
441 unsigned int *msir_val)
442{
443 register uintptr_t r11 __asm__("r11");
444 register uintptr_t r3 __asm__("r3");
445 register uintptr_t r4 __asm__("r4");
446
447 r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
448 r3 = interrupt;
449
450 __asm__ __volatile__ ("sc 1"
451 : "+r" (r11), "+r" (r3), "=r" (r4)
452 : : EV_HCALL_CLOBBERS2
453 );
454
455 *msir_val = r4;
456
457 return r3;
458}
459
460/**
461 * fh_system_reset - reset the system
462 *
463 * Returns 0 for success, or an error code.
464 */
465static inline unsigned int fh_system_reset(void)
466{
467 register uintptr_t r11 __asm__("r11");
468 register uintptr_t r3 __asm__("r3");
469
470 r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
471
472 __asm__ __volatile__ ("sc 1"
473 : "+r" (r11), "=r" (r3)
474 : : EV_HCALL_CLOBBERS1
475 );
476
477 return r3;
478}
479
480
481/**
482 * fh_err_get_info - get platform error information
483 * @queue id:
484 * 0 for guest error event queue
485 * 1 for global error event queue
486 *
487 * @pointer to store the platform error data:
488 * platform error data is returned in registers r4 - r11
489 *
490 * Returns 0 for success, or an error code.
491 */
492static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
493 uint32_t addr_hi, uint32_t addr_lo, int peek)
494{
495 register uintptr_t r11 __asm__("r11");
496 register uintptr_t r3 __asm__("r3");
497 register uintptr_t r4 __asm__("r4");
498 register uintptr_t r5 __asm__("r5");
499 register uintptr_t r6 __asm__("r6");
500 register uintptr_t r7 __asm__("r7");
501
502 r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO);
503 r3 = queue;
504 r4 = *bufsize;
505 r5 = addr_hi;
506 r6 = addr_lo;
507 r7 = peek;
508
509 __asm__ __volatile__ ("sc 1"
510 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
511 "+r" (r7)
512 : : EV_HCALL_CLOBBERS5
513 );
514
515 *bufsize = r4;
516
517 return r3;
518}
519
520
521#define FH_VCPU_RUN 0
522#define FH_VCPU_IDLE 1
523#define FH_VCPU_NAP 2
524
525/**
526 * fh_get_core_state - get the state of a vcpu
527 *
528 * @handle: handle of partition containing the vcpu
529 * @vcpu: vcpu number within the partition
530 * @state:the current state of the vcpu, see FH_VCPU_*
531 *
532 * Returns 0 for success, or an error code.
533 */
534static inline unsigned int fh_get_core_state(unsigned int handle,
535 unsigned int vcpu, unsigned int *state)
536{
537 register uintptr_t r11 __asm__("r11");
538 register uintptr_t r3 __asm__("r3");
539 register uintptr_t r4 __asm__("r4");
540
541 r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE);
542 r3 = handle;
543 r4 = vcpu;
544
545 __asm__ __volatile__ ("sc 1"
546 : "+r" (r11), "+r" (r3), "+r" (r4)
547 : : EV_HCALL_CLOBBERS2
548 );
549
550 *state = r4;
551 return r3;
552}
553
554/**
555 * fh_enter_nap - enter nap on a vcpu
556 *
557 * Note that though the API supports entering nap on a vcpu other
558 * than the caller, this may not be implmented and may return EINVAL.
559 *
560 * @handle: handle of partition containing the vcpu
561 * @vcpu: vcpu number within the partition
562 *
563 * Returns 0 for success, or an error code.
564 */
565static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
566{
567 register uintptr_t r11 __asm__("r11");
568 register uintptr_t r3 __asm__("r3");
569 register uintptr_t r4 __asm__("r4");
570
571 r11 = FH_HCALL_TOKEN(FH_ENTER_NAP);
572 r3 = handle;
573 r4 = vcpu;
574
575 __asm__ __volatile__ ("sc 1"
576 : "+r" (r11), "+r" (r3), "+r" (r4)
577 : : EV_HCALL_CLOBBERS2
578 );
579
580 return r3;
581}
582
583/**
584 * fh_exit_nap - exit nap on a vcpu
585 * @handle: handle of partition containing the vcpu
586 * @vcpu: vcpu number within the partition
587 *
588 * Returns 0 for success, or an error code.
589 */
590static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
591{
592 register uintptr_t r11 __asm__("r11");
593 register uintptr_t r3 __asm__("r3");
594 register uintptr_t r4 __asm__("r4");
595
596 r11 = FH_HCALL_TOKEN(FH_EXIT_NAP);
597 r3 = handle;
598 r4 = vcpu;
599
600 __asm__ __volatile__ ("sc 1"
601 : "+r" (r11), "+r" (r3), "+r" (r4)
602 : : EV_HCALL_CLOBBERS2
603 );
604
605 return r3;
606}
607/**
608 * fh_claim_device - claim a "claimable" shared device
609 * @handle: fsl,hv-device-handle of node to claim
610 *
611 * Returns 0 for success, or an error code.
612 */
613static inline unsigned int fh_claim_device(unsigned int handle)
614{
615 register uintptr_t r11 __asm__("r11");
616 register uintptr_t r3 __asm__("r3");
617
618 r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
619 r3 = handle;
620
621 __asm__ __volatile__ ("sc 1"
622 : "+r" (r11), "+r" (r3)
623 : : EV_HCALL_CLOBBERS1
624 );
625
626 return r3;
627}
628
629/**
630 * Run deferred DMA disabling on a partition's private devices
631 *
632 * This applies to devices which a partition owns either privately,
633 * or which are claimable and still actively owned by that partition,
634 * and which do not have the no-dma-disable property.
635 *
636 * @handle: partition (must be stopped) whose DMA is to be disabled
637 *
638 * Returns 0 for success, or an error code.
639 */
640static inline unsigned int fh_partition_stop_dma(unsigned int handle)
641{
642 register uintptr_t r11 __asm__("r11");
643 register uintptr_t r3 __asm__("r3");
644
645 r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
646 r3 = handle;
647
648 __asm__ __volatile__ ("sc 1"
649 : "+r" (r11), "+r" (r3)
650 : : EV_HCALL_CLOBBERS1
651 );
652
653 return r3;
654}
655#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index fd8201dddd4b..1c324ff55ea8 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -29,6 +29,10 @@
29#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ 29#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
30 is a good time to retry */ 30 is a good time to retry */
31#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ 31#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
32
33/* Internal value used in book3s_hv kvm support; not returned to guests */
34#define H_TOO_HARD 9999
35
32#define H_HARDWARE -1 /* Hardware error */ 36#define H_HARDWARE -1 /* Hardware error */
33#define H_FUNCTION -2 /* Function not supported */ 37#define H_FUNCTION -2 /* Function not supported */
34#define H_PRIVILEGE -3 /* Caller not privileged */ 38#define H_PRIVILEGE -3 /* Caller not privileged */
@@ -100,6 +104,7 @@
100#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE 104#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
101#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ 105#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
102#define H_ANDCOND (1UL<<(63-33)) 106#define H_ANDCOND (1UL<<(63-33))
107#define H_LOCAL (1UL<<(63-35))
103#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ 108#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
104#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ 109#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
105#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ 110#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
new file mode 100644
index 000000000000..d3f64f361814
--- /dev/null
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -0,0 +1,94 @@
1#ifndef _HVSI_H
2#define _HVSI_H
3
4#define VS_DATA_PACKET_HEADER 0xff
5#define VS_CONTROL_PACKET_HEADER 0xfe
6#define VS_QUERY_PACKET_HEADER 0xfd
7#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
8
9/* control verbs */
10#define VSV_SET_MODEM_CTL 1 /* to service processor only */
11#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
12#define VSV_CLOSE_PROTOCOL 3
13
14/* query verbs */
15#define VSV_SEND_VERSION_NUMBER 1
16#define VSV_SEND_MODEM_CTL_STATUS 2
17
18/* yes, these masks are not consecutive. */
19#define HVSI_TSDTR 0x01
20#define HVSI_TSCD 0x20
21
22#define HVSI_MAX_OUTGOING_DATA 12
23#define HVSI_VERSION 1
24
25struct hvsi_header {
26 uint8_t type;
27 uint8_t len;
28 uint16_t seqno;
29} __attribute__((packed));
30
31struct hvsi_data {
32 struct hvsi_header hdr;
33 uint8_t data[HVSI_MAX_OUTGOING_DATA];
34} __attribute__((packed));
35
36struct hvsi_control {
37 struct hvsi_header hdr;
38 uint16_t verb;
39 /* optional depending on verb: */
40 uint32_t word;
41 uint32_t mask;
42} __attribute__((packed));
43
44struct hvsi_query {
45 struct hvsi_header hdr;
46 uint16_t verb;
47} __attribute__((packed));
48
49struct hvsi_query_response {
50 struct hvsi_header hdr;
51 uint16_t verb;
52 uint16_t query_seqno;
53 union {
54 uint8_t version;
55 uint32_t mctrl_word;
56 } u;
57} __attribute__((packed));
58
59/* hvsi lib struct definitions */
60#define HVSI_INBUF_SIZE 255
61struct tty_struct;
62struct hvsi_priv {
63 unsigned int inbuf_len; /* data in input buffer */
64 unsigned char inbuf[HVSI_INBUF_SIZE];
65 unsigned int inbuf_cur; /* Cursor in input buffer */
66 unsigned int inbuf_pktlen; /* packet lenght from cursor */
67 atomic_t seqno; /* packet sequence number */
68 unsigned int opened:1; /* driver opened */
69 unsigned int established:1; /* protocol established */
70 unsigned int is_console:1; /* used as a kernel console device */
71 unsigned int mctrl_update:1; /* modem control updated */
72 unsigned short mctrl; /* modem control */
73 struct tty_struct *tty; /* tty structure */
74 int (*get_chars)(uint32_t termno, char *buf, int count);
75 int (*put_chars)(uint32_t termno, const char *buf, int count);
76 uint32_t termno;
77};
78
79/* hvsi lib functions */
80struct hvc_struct;
81extern void hvsilib_init(struct hvsi_priv *pv,
82 int (*get_chars)(uint32_t termno, char *buf, int count),
83 int (*put_chars)(uint32_t termno, const char *buf,
84 int count),
85 int termno, int is_console);
86extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp);
87extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp);
88extern int hvsilib_read_mctrl(struct hvsi_priv *pv);
89extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr);
90extern void hvsilib_establish(struct hvsi_priv *pv);
91extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count);
92extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count);
93
94#endif /* _HVSI_H */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 1c33ec17ca36..80fd4d2b4a62 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp);
57extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); 57extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
58 58
59extern struct pmu perf_ops_bp; 59extern struct pmu perf_ops_bp;
60extern void ptrace_triggered(struct perf_event *bp, int nmi, 60extern void ptrace_triggered(struct perf_event *bp,
61 struct perf_sample_data *data, struct pt_regs *regs); 61 struct perf_sample_data *data, struct pt_regs *regs);
62static inline void hw_breakpoint_disable(void) 62static inline void hw_breakpoint_disable(void)
63{ 63{
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 1bff591f7f72..c0e1bc319e35 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -14,7 +14,7 @@
14#include <linux/radix-tree.h> 14#include <linux/radix-tree.h>
15 15
16#include <asm/types.h> 16#include <asm/types.h>
17#include <asm/atomic.h> 17#include <linux/atomic.h>
18 18
19 19
20/* Define a way to iterate across irqs. */ 20/* Define a way to iterate across irqs. */
@@ -330,5 +330,7 @@ extern int call_handle_irq(int irq, void *p1,
330 struct thread_info *tp, void *func); 330 struct thread_info *tp, void *func);
331extern void do_IRQ(struct pt_regs *regs); 331extern void do_IRQ(struct pt_regs *regs);
332 332
333int irq_choose_cpu(const struct cpumask *mask);
334
333#endif /* _ASM_IRQ_H */ 335#endif /* _ASM_IRQ_H */
334#endif /* __KERNEL__ */ 336#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
new file mode 100644
index 000000000000..1f780b95c0f0
--- /dev/null
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -0,0 +1,47 @@
1#ifndef _ASM_POWERPC_JUMP_LABEL_H
2#define _ASM_POWERPC_JUMP_LABEL_H
3
4/*
5 * Copyright 2010 Michael Ellerman, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/types.h>
14
15#include <asm/feature-fixups.h>
16
17#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
18#define JUMP_LABEL_NOP_SIZE 4
19
20static __always_inline bool arch_static_branch(struct jump_label_key *key)
21{
22 asm goto("1:\n\t"
23 "nop\n\t"
24 ".pushsection __jump_table, \"aw\"\n\t"
25 ".align 4\n\t"
26 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
27 ".popsection \n\t"
28 : : "i" (key) : : l_yes);
29 return false;
30l_yes:
31 return true;
32}
33
34#ifdef CONFIG_PPC64
35typedef u64 jump_label_t;
36#else
37typedef u32 jump_label_t;
38#endif
39
40struct jump_entry {
41 jump_label_t code;
42 jump_label_t target;
43 jump_label_t key;
44 jump_label_t pad;
45};
46
47#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index d2ca5ed3877b..a4f6c85431f8 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -22,6 +22,10 @@
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24 24
25/* Select powerpc specific features in <linux/kvm.h> */
26#define __KVM_HAVE_SPAPR_TCE
27#define __KVM_HAVE_PPC_SMT
28
25struct kvm_regs { 29struct kvm_regs {
26 __u64 pc; 30 __u64 pc;
27 __u64 cr; 31 __u64 cr;
@@ -272,4 +276,15 @@ struct kvm_guest_debug_arch {
272#define KVM_INTERRUPT_UNSET -2U 276#define KVM_INTERRUPT_UNSET -2U
273#define KVM_INTERRUPT_SET_LEVEL -3U 277#define KVM_INTERRUPT_SET_LEVEL -3U
274 278
279/* for KVM_CAP_SPAPR_TCE */
280struct kvm_create_spapr_tce {
281 __u64 liobn;
282 __u32 window_size;
283};
284
285/* for KVM_ALLOCATE_RMA */
286struct kvm_allocate_rma {
287 __u64 rma_size;
288};
289
275#endif /* __LINUX_KVM_POWERPC_H */ 290#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 0951b17f4eb5..7b1f0e0fc653 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -64,8 +64,12 @@
64#define BOOK3S_INTERRUPT_PROGRAM 0x700 64#define BOOK3S_INTERRUPT_PROGRAM 0x700
65#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 65#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
66#define BOOK3S_INTERRUPT_DECREMENTER 0x900 66#define BOOK3S_INTERRUPT_DECREMENTER 0x900
67#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
67#define BOOK3S_INTERRUPT_SYSCALL 0xc00 68#define BOOK3S_INTERRUPT_SYSCALL 0xc00
68#define BOOK3S_INTERRUPT_TRACE 0xd00 69#define BOOK3S_INTERRUPT_TRACE 0xd00
70#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
71#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
72#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
69#define BOOK3S_INTERRUPT_PERFMON 0xf00 73#define BOOK3S_INTERRUPT_PERFMON 0xf00
70#define BOOK3S_INTERRUPT_ALTIVEC 0xf20 74#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
71#define BOOK3S_INTERRUPT_VSX 0xf40 75#define BOOK3S_INTERRUPT_VSX 0xf40
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d62e703f1214..98da010252a3 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -24,20 +24,6 @@
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25#include <asm/kvm_book3s_asm.h> 25#include <asm/kvm_book3s_asm.h>
26 26
27struct kvmppc_slb {
28 u64 esid;
29 u64 vsid;
30 u64 orige;
31 u64 origv;
32 bool valid : 1;
33 bool Ks : 1;
34 bool Kp : 1;
35 bool nx : 1;
36 bool large : 1; /* PTEs are 16MB */
37 bool tb : 1; /* 1TB segment */
38 bool class : 1;
39};
40
41struct kvmppc_bat { 27struct kvmppc_bat {
42 u64 raw; 28 u64 raw;
43 u32 bepi; 29 u32 bepi;
@@ -67,11 +53,22 @@ struct kvmppc_sid_map {
67#define VSID_POOL_SIZE (SID_CONTEXTS * 16) 53#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
68#endif 54#endif
69 55
56struct hpte_cache {
57 struct hlist_node list_pte;
58 struct hlist_node list_pte_long;
59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long;
61 struct rcu_head rcu_head;
62 u64 host_va;
63 u64 pfn;
64 ulong slot;
65 struct kvmppc_pte pte;
66};
67
70struct kvmppc_vcpu_book3s { 68struct kvmppc_vcpu_book3s {
71 struct kvm_vcpu vcpu; 69 struct kvm_vcpu vcpu;
72 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; 70 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
73 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 71 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
74 struct kvmppc_slb slb[64];
75 struct { 72 struct {
76 u64 esid; 73 u64 esid;
77 u64 vsid; 74 u64 vsid;
@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s {
81 struct kvmppc_bat dbat[8]; 78 struct kvmppc_bat dbat[8];
82 u64 hid[6]; 79 u64 hid[6];
83 u64 gqr[8]; 80 u64 gqr[8];
84 int slb_nr;
85 u64 sdr1; 81 u64 sdr1;
86 u64 hior; 82 u64 hior;
87 u64 msr_mask; 83 u64 msr_mask;
@@ -93,7 +89,13 @@ struct kvmppc_vcpu_book3s {
93 u64 vsid_max; 89 u64 vsid_max;
94#endif 90#endif
95 int context_id[SID_CONTEXTS]; 91 int context_id[SID_CONTEXTS];
96 ulong prog_flags; /* flags to inject when giving a 700 trap */ 92
93 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
94 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
95 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
96 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
97 int hpte_cache_count;
98 spinlock_t mmu_lock;
97}; 99};
98 100
99#define CONTEXT_HOST 0 101#define CONTEXT_HOST 0
@@ -110,8 +112,10 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
110extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 112extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
111extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 113extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
112extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 114extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
115extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
113extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 116extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
114extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 117extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
118extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
115extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 119extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
116extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 120extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
117extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 121extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -123,19 +127,22 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
123extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 127extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124extern int kvmppc_mmu_hpte_sysinit(void); 128extern int kvmppc_mmu_hpte_sysinit(void);
125extern void kvmppc_mmu_hpte_sysexit(void); 129extern void kvmppc_mmu_hpte_sysexit(void);
130extern int kvmppc_mmu_hv_init(void);
126 131
127extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 132extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
128extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 133extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
129extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 134extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
135extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
130extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 136extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
131 bool upper, u32 val); 137 bool upper, u32 val);
132extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 138extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
133extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 139extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
134extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 140extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
135 141
136extern ulong kvmppc_trampoline_lowmem; 142extern void kvmppc_handler_lowmem_trampoline(void);
137extern ulong kvmppc_trampoline_enter; 143extern void kvmppc_handler_trampoline_enter(void);
138extern void kvmppc_rmcall(ulong srr0, ulong srr1); 144extern void kvmppc_rmcall(ulong srr0, ulong srr1);
145extern void kvmppc_hv_entry_trampoline(void);
139extern void kvmppc_load_up_fpu(void); 146extern void kvmppc_load_up_fpu(void);
140extern void kvmppc_load_up_altivec(void); 147extern void kvmppc_load_up_altivec(void);
141extern void kvmppc_load_up_vsx(void); 148extern void kvmppc_load_up_vsx(void);
@@ -147,15 +154,32 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
147 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); 154 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
148} 155}
149 156
150static inline ulong dsisr(void) 157extern void kvm_return_point(void);
158
159/* Also add subarch specific defines */
160
161#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
162#include <asm/kvm_book3s_32.h>
163#endif
164#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
165#include <asm/kvm_book3s_64.h>
166#endif
167
168#ifdef CONFIG_KVM_BOOK3S_PR
169
170static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
151{ 171{
152 ulong r; 172 return to_book3s(vcpu)->hior;
153 asm ( "mfdsisr %0 " : "=r" (r) );
154 return r;
155} 173}
156 174
157extern void kvm_return_point(void); 175static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
158static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu); 176 unsigned long pending_now, unsigned long old_pending)
177{
178 if (pending_now)
179 vcpu->arch.shared->int_pending = 1;
180 else if (old_pending)
181 vcpu->arch.shared->int_pending = 0;
182}
159 183
160static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 184static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
161{ 185{
@@ -244,6 +268,120 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
244 return to_svcpu(vcpu)->fault_dar; 268 return to_svcpu(vcpu)->fault_dar;
245} 269}
246 270
271static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
272{
273 ulong crit_raw = vcpu->arch.shared->critical;
274 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
275 bool crit;
276
277 /* Truncate crit indicators in 32 bit mode */
278 if (!(vcpu->arch.shared->msr & MSR_SF)) {
279 crit_raw &= 0xffffffff;
280 crit_r1 &= 0xffffffff;
281 }
282
283 /* Critical section when crit == r1 */
284 crit = (crit_raw == crit_r1);
285 /* ... and we're in supervisor mode */
286 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
287
288 return crit;
289}
290#else /* CONFIG_KVM_BOOK3S_PR */
291
292static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
293{
294 return 0;
295}
296
297static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
298 unsigned long pending_now, unsigned long old_pending)
299{
300}
301
302static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
303{
304 vcpu->arch.gpr[num] = val;
305}
306
307static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
308{
309 return vcpu->arch.gpr[num];
310}
311
312static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
313{
314 vcpu->arch.cr = val;
315}
316
317static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
318{
319 return vcpu->arch.cr;
320}
321
322static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
323{
324 vcpu->arch.xer = val;
325}
326
327static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
328{
329 return vcpu->arch.xer;
330}
331
332static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
333{
334 vcpu->arch.ctr = val;
335}
336
337static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
338{
339 return vcpu->arch.ctr;
340}
341
342static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
343{
344 vcpu->arch.lr = val;
345}
346
347static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
348{
349 return vcpu->arch.lr;
350}
351
352static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
353{
354 vcpu->arch.pc = val;
355}
356
357static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
358{
359 return vcpu->arch.pc;
360}
361
362static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
363{
364 ulong pc = kvmppc_get_pc(vcpu);
365
366 /* Load the instruction manually if it failed to do so in the
367 * exit path */
368 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
369 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
370
371 return vcpu->arch.last_inst;
372}
373
374static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
375{
376 return vcpu->arch.fault_dar;
377}
378
379static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
380{
381 return false;
382}
383#endif
384
247/* Magic register values loaded into r3 and r4 before the 'sc' assembly 385/* Magic register values loaded into r3 and r4 before the 'sc' assembly
248 * instruction for the OSI hypercalls */ 386 * instruction for the OSI hypercalls */
249#define OSI_SC_MAGIC_R3 0x113724FA 387#define OSI_SC_MAGIC_R3 0x113724FA
@@ -251,12 +389,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
251 389
252#define INS_DCBZ 0x7c0007ec 390#define INS_DCBZ 0x7c0007ec
253 391
254/* Also add subarch specific defines */
255
256#ifdef CONFIG_PPC_BOOK3S_32
257#include <asm/kvm_book3s_32.h>
258#else
259#include <asm/kvm_book3s_64.h>
260#endif
261
262#endif /* __ASM_KVM_BOOK3S_H__ */ 392#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 4cadd612d575..e43fe42b9875 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,9 +20,13 @@
20#ifndef __ASM_KVM_BOOK3S_64_H__ 20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__ 21#define __ASM_KVM_BOOK3S_64_H__
22 22
23#ifdef CONFIG_KVM_BOOK3S_PR
23static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) 24static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
24{ 25{
25 return &get_paca()->shadow_vcpu; 26 return &get_paca()->shadow_vcpu;
26} 27}
28#endif
29
30#define SPAPR_TCE_SHIFT 12
27 31
28#endif /* __ASM_KVM_BOOK3S_64_H__ */ 32#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d5a8a3861635..ef7b3688c3b6 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -60,6 +60,36 @@ kvmppc_resume_\intno:
60 60
61#else /*__ASSEMBLY__ */ 61#else /*__ASSEMBLY__ */
62 62
63/*
64 * This struct goes in the PACA on 64-bit processors. It is used
65 * to store host state that needs to be saved when we enter a guest
66 * and restored when we exit, but isn't specific to any particular
67 * guest or vcpu. It also has some scratch fields used by the guest
68 * exit code.
69 */
70struct kvmppc_host_state {
71 ulong host_r1;
72 ulong host_r2;
73 ulong host_msr;
74 ulong vmhandler;
75 ulong scratch0;
76 ulong scratch1;
77 u8 in_guest;
78
79#ifdef CONFIG_KVM_BOOK3S_64_HV
80 struct kvm_vcpu *kvm_vcpu;
81 struct kvmppc_vcore *kvm_vcore;
82 unsigned long xics_phys;
83 u64 dabr;
84 u64 host_mmcr[3];
85 u32 host_pmc[8];
86 u64 host_purr;
87 u64 host_spurr;
88 u64 host_dscr;
89 u64 dec_expires;
90#endif
91};
92
63struct kvmppc_book3s_shadow_vcpu { 93struct kvmppc_book3s_shadow_vcpu {
64 ulong gpr[14]; 94 ulong gpr[14];
65 u32 cr; 95 u32 cr;
@@ -73,17 +103,12 @@ struct kvmppc_book3s_shadow_vcpu {
73 ulong shadow_srr1; 103 ulong shadow_srr1;
74 ulong fault_dar; 104 ulong fault_dar;
75 105
76 ulong host_r1;
77 ulong host_r2;
78 ulong handler;
79 ulong scratch0;
80 ulong scratch1;
81 ulong vmhandler;
82 u8 in_guest;
83
84#ifdef CONFIG_PPC_BOOK3S_32 106#ifdef CONFIG_PPC_BOOK3S_32
85 u32 sr[16]; /* Guest SRs */ 107 u32 sr[16]; /* Guest SRs */
108
109 struct kvmppc_host_state hstate;
86#endif 110#endif
111
87#ifdef CONFIG_PPC_BOOK3S_64 112#ifdef CONFIG_PPC_BOOK3S_64
88 u8 slb_max; /* highest used guest slb entry */ 113 u8 slb_max; /* highest used guest slb entry */
89 struct { 114 struct {
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 9c9ba3d59b1b..a90e09188777 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -93,4 +93,8 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
93 return vcpu->arch.fault_dear; 93 return vcpu->arch.fault_dear;
94} 94}
95 95
96static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
97{
98 return vcpu->arch.shared->msr;
99}
96#endif /* __ASM_KVM_BOOKE_H__ */ 100#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index 7a2a565f88c4..adbfca9dd100 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, <yu.liu@freescale.com> 4 * Author: Yu Liu, <yu.liu@freescale.com>
5 * 5 *
@@ -29,17 +29,25 @@ struct tlbe{
29 u32 mas7; 29 u32 mas7;
30}; 30};
31 31
32#define E500_TLB_VALID 1
33#define E500_TLB_DIRTY 2
34
35struct tlbe_priv {
36 pfn_t pfn;
37 unsigned int flags; /* E500_TLB_* */
38};
39
40struct vcpu_id_table;
41
32struct kvmppc_vcpu_e500 { 42struct kvmppc_vcpu_e500 {
33 /* Unmodified copy of the guest's TLB. */ 43 /* Unmodified copy of the guest's TLB. */
34 struct tlbe *guest_tlb[E500_TLB_NUM]; 44 struct tlbe *gtlb_arch[E500_TLB_NUM];
35 /* TLB that's actually used when the guest is running. */
36 struct tlbe *shadow_tlb[E500_TLB_NUM];
37 /* Pages which are referenced in the shadow TLB. */
38 struct page **shadow_pages[E500_TLB_NUM];
39 45
40 unsigned int guest_tlb_size[E500_TLB_NUM]; 46 /* KVM internal information associated with each guest TLB entry */
41 unsigned int shadow_tlb_size[E500_TLB_NUM]; 47 struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
42 unsigned int guest_tlb_nv[E500_TLB_NUM]; 48
49 unsigned int gtlb_size[E500_TLB_NUM];
50 unsigned int gtlb_nv[E500_TLB_NUM];
43 51
44 u32 host_pid[E500_PID_NUM]; 52 u32 host_pid[E500_PID_NUM];
45 u32 pid[E500_PID_NUM]; 53 u32 pid[E500_PID_NUM];
@@ -53,6 +61,10 @@ struct kvmppc_vcpu_e500 {
53 u32 mas5; 61 u32 mas5;
54 u32 mas6; 62 u32 mas6;
55 u32 mas7; 63 u32 mas7;
64
65 /* vcpu id table */
66 struct vcpu_id_table *idt;
67
56 u32 l1csr0; 68 u32 l1csr0;
57 u32 l1csr1; 69 u32 l1csr1;
58 u32 hid0; 70 u32 hid0;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 186f150b9b89..cc22b282d755 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,15 +25,23 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/kvm_types.h> 27#include <linux/kvm_types.h>
28#include <linux/threads.h>
29#include <linux/spinlock.h>
28#include <linux/kvm_para.h> 30#include <linux/kvm_para.h>
31#include <linux/list.h>
32#include <linux/atomic.h>
29#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/processor.h>
30 35
31#define KVM_MAX_VCPUS 1 36#define KVM_MAX_VCPUS NR_CPUS
37#define KVM_MAX_VCORES NR_CPUS
32#define KVM_MEMORY_SLOTS 32 38#define KVM_MEMORY_SLOTS 32
33/* memory slots that does not exposed to userspace */ 39/* memory slots that does not exposed to userspace */
34#define KVM_PRIVATE_MEM_SLOTS 4 40#define KVM_PRIVATE_MEM_SLOTS 4
35 41
42#ifdef CONFIG_KVM_MMIO
36#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 43#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
44#endif
37 45
38/* We don't currently support large pages. */ 46/* We don't currently support large pages. */
39#define KVM_HPAGE_GFN_SHIFT(x) 0 47#define KVM_HPAGE_GFN_SHIFT(x) 0
@@ -57,6 +65,10 @@ struct kvm;
57struct kvm_run; 65struct kvm_run;
58struct kvm_vcpu; 66struct kvm_vcpu;
59 67
68struct lppaca;
69struct slb_shadow;
70struct dtl;
71
60struct kvm_vm_stat { 72struct kvm_vm_stat {
61 u32 remote_tlb_flush; 73 u32 remote_tlb_flush;
62}; 74};
@@ -133,9 +145,74 @@ struct kvmppc_exit_timing {
133 }; 145 };
134}; 146};
135 147
148struct kvmppc_pginfo {
149 unsigned long pfn;
150 atomic_t refcnt;
151};
152
153struct kvmppc_spapr_tce_table {
154 struct list_head list;
155 struct kvm *kvm;
156 u64 liobn;
157 u32 window_size;
158 struct page *pages[0];
159};
160
161struct kvmppc_rma_info {
162 void *base_virt;
163 unsigned long base_pfn;
164 unsigned long npages;
165 struct list_head list;
166 atomic_t use_count;
167};
168
136struct kvm_arch { 169struct kvm_arch {
170#ifdef CONFIG_KVM_BOOK3S_64_HV
171 unsigned long hpt_virt;
172 unsigned long ram_npages;
173 unsigned long ram_psize;
174 unsigned long ram_porder;
175 struct kvmppc_pginfo *ram_pginfo;
176 unsigned int lpid;
177 unsigned int host_lpid;
178 unsigned long host_lpcr;
179 unsigned long sdr1;
180 unsigned long host_sdr1;
181 int tlbie_lock;
182 int n_rma_pages;
183 unsigned long lpcr;
184 unsigned long rmor;
185 struct kvmppc_rma_info *rma;
186 struct list_head spapr_tce_tables;
187 unsigned short last_vcpu[NR_CPUS];
188 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
189#endif /* CONFIG_KVM_BOOK3S_64_HV */
137}; 190};
138 191
192/*
193 * Struct for a virtual core.
194 * Note: entry_exit_count combines an entry count in the bottom 8 bits
195 * and an exit count in the next 8 bits. This is so that we can
196 * atomically increment the entry count iff the exit count is 0
197 * without taking the lock.
198 */
199struct kvmppc_vcore {
200 int n_runnable;
201 int n_blocked;
202 int num_threads;
203 int entry_exit_count;
204 int n_woken;
205 int nap_count;
206 u16 pcpu;
207 u8 vcore_running;
208 u8 in_guest;
209 struct list_head runnable_threads;
210 spinlock_t lock;
211};
212
213#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
214#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
215
139struct kvmppc_pte { 216struct kvmppc_pte {
140 ulong eaddr; 217 ulong eaddr;
141 u64 vpage; 218 u64 vpage;
@@ -163,16 +240,18 @@ struct kvmppc_mmu {
163 bool (*is_dcbz32)(struct kvm_vcpu *vcpu); 240 bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
164}; 241};
165 242
166struct hpte_cache { 243struct kvmppc_slb {
167 struct hlist_node list_pte; 244 u64 esid;
168 struct hlist_node list_pte_long; 245 u64 vsid;
169 struct hlist_node list_vpte; 246 u64 orige;
170 struct hlist_node list_vpte_long; 247 u64 origv;
171 struct rcu_head rcu_head; 248 bool valid : 1;
172 u64 host_va; 249 bool Ks : 1;
173 u64 pfn; 250 bool Kp : 1;
174 ulong slot; 251 bool nx : 1;
175 struct kvmppc_pte pte; 252 bool large : 1; /* PTEs are 16MB */
253 bool tb : 1; /* 1TB segment */
254 bool class : 1;
176}; 255};
177 256
178struct kvm_vcpu_arch { 257struct kvm_vcpu_arch {
@@ -187,6 +266,9 @@ struct kvm_vcpu_arch {
187 ulong highmem_handler; 266 ulong highmem_handler;
188 ulong rmcall; 267 ulong rmcall;
189 ulong host_paca_phys; 268 ulong host_paca_phys;
269 struct kvmppc_slb slb[64];
270 int slb_max; /* 1 + index of last valid entry in slb[] */
271 int slb_nr; /* total number of entries in SLB */
190 struct kvmppc_mmu mmu; 272 struct kvmppc_mmu mmu;
191#endif 273#endif
192 274
@@ -195,13 +277,19 @@ struct kvm_vcpu_arch {
195 u64 fpr[32]; 277 u64 fpr[32];
196 u64 fpscr; 278 u64 fpscr;
197 279
280#ifdef CONFIG_SPE
281 ulong evr[32];
282 ulong spefscr;
283 ulong host_spefscr;
284 u64 acc;
285#endif
198#ifdef CONFIG_ALTIVEC 286#ifdef CONFIG_ALTIVEC
199 vector128 vr[32]; 287 vector128 vr[32];
200 vector128 vscr; 288 vector128 vscr;
201#endif 289#endif
202 290
203#ifdef CONFIG_VSX 291#ifdef CONFIG_VSX
204 u64 vsr[32]; 292 u64 vsr[64];
205#endif 293#endif
206 294
207#ifdef CONFIG_PPC_BOOK3S 295#ifdef CONFIG_PPC_BOOK3S
@@ -209,22 +297,27 @@ struct kvm_vcpu_arch {
209 u32 qpr[32]; 297 u32 qpr[32];
210#endif 298#endif
211 299
212#ifdef CONFIG_BOOKE
213 ulong pc; 300 ulong pc;
214 ulong ctr; 301 ulong ctr;
215 ulong lr; 302 ulong lr;
216 303
217 ulong xer; 304 ulong xer;
218 u32 cr; 305 u32 cr;
219#endif
220 306
221#ifdef CONFIG_PPC_BOOK3S 307#ifdef CONFIG_PPC_BOOK3S
222 ulong shadow_msr;
223 ulong hflags; 308 ulong hflags;
224 ulong guest_owned_ext; 309 ulong guest_owned_ext;
310 ulong purr;
311 ulong spurr;
312 ulong dscr;
313 ulong amr;
314 ulong uamor;
315 u32 ctrl;
316 ulong dabr;
225#endif 317#endif
226 u32 vrsave; /* also USPRG0 */ 318 u32 vrsave; /* also USPRG0 */
227 u32 mmucr; 319 u32 mmucr;
320 ulong shadow_msr;
228 ulong sprg4; 321 ulong sprg4;
229 ulong sprg5; 322 ulong sprg5;
230 ulong sprg6; 323 ulong sprg6;
@@ -249,6 +342,7 @@ struct kvm_vcpu_arch {
249 u32 pvr; 342 u32 pvr;
250 343
251 u32 shadow_pid; 344 u32 shadow_pid;
345 u32 shadow_pid1;
252 u32 pid; 346 u32 pid;
253 u32 swap_pid; 347 u32 swap_pid;
254 348
@@ -258,6 +352,9 @@ struct kvm_vcpu_arch {
258 u32 dbcr1; 352 u32 dbcr1;
259 u32 dbsr; 353 u32 dbsr;
260 354
355 u64 mmcr[3];
356 u32 pmc[8];
357
261#ifdef CONFIG_KVM_EXIT_TIMING 358#ifdef CONFIG_KVM_EXIT_TIMING
262 struct mutex exit_timing_lock; 359 struct mutex exit_timing_lock;
263 struct kvmppc_exit_timing timing_exit; 360 struct kvmppc_exit_timing timing_exit;
@@ -272,8 +369,12 @@ struct kvm_vcpu_arch {
272 struct dentry *debugfs_exit_timing; 369 struct dentry *debugfs_exit_timing;
273#endif 370#endif
274 371
372#ifdef CONFIG_PPC_BOOK3S
373 ulong fault_dar;
374 u32 fault_dsisr;
375#endif
376
275#ifdef CONFIG_BOOKE 377#ifdef CONFIG_BOOKE
276 u32 last_inst;
277 ulong fault_dear; 378 ulong fault_dear;
278 ulong fault_esr; 379 ulong fault_esr;
279 ulong queued_dear; 380 ulong queued_dear;
@@ -288,25 +389,47 @@ struct kvm_vcpu_arch {
288 u8 dcr_is_write; 389 u8 dcr_is_write;
289 u8 osi_needed; 390 u8 osi_needed;
290 u8 osi_enabled; 391 u8 osi_enabled;
392 u8 hcall_needed;
291 393
292 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ 394 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
293 395
294 struct hrtimer dec_timer; 396 struct hrtimer dec_timer;
295 struct tasklet_struct tasklet; 397 struct tasklet_struct tasklet;
296 u64 dec_jiffies; 398 u64 dec_jiffies;
399 u64 dec_expires;
297 unsigned long pending_exceptions; 400 unsigned long pending_exceptions;
401 u16 last_cpu;
402 u8 ceded;
403 u8 prodded;
404 u32 last_inst;
405
406 struct lppaca *vpa;
407 struct slb_shadow *slb_shadow;
408 struct dtl *dtl;
409 struct dtl *dtl_end;
410
411 struct kvmppc_vcore *vcore;
412 int ret;
413 int trap;
414 int state;
415 int ptid;
416 wait_queue_head_t cpu_run;
417
298 struct kvm_vcpu_arch_shared *shared; 418 struct kvm_vcpu_arch_shared *shared;
299 unsigned long magic_page_pa; /* phys addr to map the magic page to */ 419 unsigned long magic_page_pa; /* phys addr to map the magic page to */
300 unsigned long magic_page_ea; /* effect. addr to map the magic page to */ 420 unsigned long magic_page_ea; /* effect. addr to map the magic page to */
301 421
302#ifdef CONFIG_PPC_BOOK3S 422#ifdef CONFIG_KVM_BOOK3S_64_HV
303 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 423 struct kvm_vcpu_arch_shared shregs;
304 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 424
305 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 425 struct list_head run_list;
306 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 426 struct task_struct *run_task;
307 int hpte_cache_count; 427 struct kvm_run *kvm_run;
308 spinlock_t mmu_lock;
309#endif 428#endif
310}; 429};
311 430
431#define KVMPPC_VCPU_BUSY_IN_HOST 0
432#define KVMPPC_VCPU_BLOCKED 1
433#define KVMPPC_VCPU_RUNNABLE 2
434
312#endif /* __POWERPC_KVM_HOST_H__ */ 435#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9345238edecf..d121f49d62b8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -33,6 +33,9 @@
33#else 33#else
34#include <asm/kvm_booke.h> 34#include <asm/kvm_booke.h>
35#endif 35#endif
36#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
37#include <asm/paca.h>
38#endif
36 39
37enum emulation_result { 40enum emulation_result {
38 EMULATE_DONE, /* no further processing */ 41 EMULATE_DONE, /* no further processing */
@@ -42,6 +45,7 @@ enum emulation_result {
42 EMULATE_AGAIN, /* something went wrong. go again */ 45 EMULATE_AGAIN, /* something went wrong. go again */
43}; 46};
44 47
48extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
45extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 49extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
46extern char kvmppc_handlers_start[]; 50extern char kvmppc_handlers_start[];
47extern unsigned long kvmppc_handler_len; 51extern unsigned long kvmppc_handler_len;
@@ -109,6 +113,27 @@ extern void kvmppc_booke_exit(void);
109 113
110extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 114extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
111extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); 115extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
116extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
117
118extern long kvmppc_alloc_hpt(struct kvm *kvm);
119extern void kvmppc_free_hpt(struct kvm *kvm);
120extern long kvmppc_prepare_vrma(struct kvm *kvm,
121 struct kvm_userspace_memory_region *mem);
122extern void kvmppc_map_vrma(struct kvm *kvm,
123 struct kvm_userspace_memory_region *mem);
124extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
125extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
126 struct kvm_create_spapr_tce *args);
127extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
128 struct kvm_allocate_rma *rma);
129extern struct kvmppc_rma_info *kvm_alloc_rma(void);
130extern void kvm_release_rma(struct kvmppc_rma_info *ri);
131extern int kvmppc_core_init_vm(struct kvm *kvm);
132extern void kvmppc_core_destroy_vm(struct kvm *kvm);
133extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
134 struct kvm_userspace_memory_region *mem);
135extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
136 struct kvm_userspace_memory_region *mem);
112 137
113/* 138/*
114 * Cuts out inst bits with ordering according to spec. 139 * Cuts out inst bits with ordering according to spec.
@@ -151,4 +176,20 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
151 176
152void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); 177void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
153 178
179#ifdef CONFIG_KVM_BOOK3S_64_HV
180static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
181{
182 paca[cpu].kvm_hstate.xics_phys = addr;
183}
184
185extern void kvm_rma_init(void);
186
187#else
188static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
189{}
190
191static inline void kvm_rma_init(void)
192{}
193#endif
194
154#endif /* __POWERPC_KVM_PPC_H__ */ 195#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index c2410af6bfd9..b8da91363864 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -2,7 +2,7 @@
2#define _ARCH_POWERPC_LOCAL_H 2#define _ARCH_POWERPC_LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/atomic.h> 5#include <linux/atomic.h>
6 6
7typedef struct 7typedef struct
8{ 8{
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 7ab82c825a03..27af7f8bbb8d 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -76,7 +76,7 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour
76 struct resource *res = &dev->resource[resource_no]; 76 struct resource *res = &dev->resource[resource_no];
77 if (res->start == 0 || res->end == 0 || res->end < res->start) 77 if (res->start == 0 || res->end == 0 || res->end < res->start)
78 return 0; 78 return 0;
79 return res->end - res->start + 1; 79 return resource_size(res);
80} 80}
81 81
82extern int macio_enable_devres(struct macio_dev *dev); 82extern int macio_enable_devres(struct macio_dev *dev);
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index d865bd909c7d..b445e0af4c2b 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -90,13 +90,19 @@ extern char initial_stab[];
90 90
91#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) 91#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
92#define HPTE_R_TS ASM_CONST(0x4000000000000000) 92#define HPTE_R_TS ASM_CONST(0x4000000000000000)
93#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
93#define HPTE_R_RPN_SHIFT 12 94#define HPTE_R_RPN_SHIFT 12
94#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) 95#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
95#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
96#define HPTE_R_PP ASM_CONST(0x0000000000000003) 96#define HPTE_R_PP ASM_CONST(0x0000000000000003)
97#define HPTE_R_N ASM_CONST(0x0000000000000004) 97#define HPTE_R_N ASM_CONST(0x0000000000000004)
98#define HPTE_R_G ASM_CONST(0x0000000000000008)
99#define HPTE_R_M ASM_CONST(0x0000000000000010)
100#define HPTE_R_I ASM_CONST(0x0000000000000020)
101#define HPTE_R_W ASM_CONST(0x0000000000000040)
102#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
98#define HPTE_R_C ASM_CONST(0x0000000000000080) 103#define HPTE_R_C ASM_CONST(0x0000000000000080)
99#define HPTE_R_R ASM_CONST(0x0000000000000100) 104#define HPTE_R_R ASM_CONST(0x0000000000000100)
105#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
100 106
101#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) 107#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
102#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) 108#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 4138b21ae80a..698b30638681 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -115,14 +115,24 @@
115#ifndef __ASSEMBLY__ 115#ifndef __ASSEMBLY__
116#include <asm/cputable.h> 116#include <asm/cputable.h>
117 117
118#ifdef CONFIG_PPC_FSL_BOOK3E
119#include <asm/percpu.h>
120DECLARE_PER_CPU(int, next_tlbcam_idx);
121#endif
122
118static inline int mmu_has_feature(unsigned long feature) 123static inline int mmu_has_feature(unsigned long feature)
119{ 124{
120 return (cur_cpu_spec->mmu_features & feature); 125 return (cur_cpu_spec->mmu_features & feature);
121} 126}
122 127
128static inline void mmu_clear_feature(unsigned long feature)
129{
130 cur_cpu_spec->mmu_features &= ~feature;
131}
132
123extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; 133extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
124 134
125/* MMU initialization (64-bit only fo now) */ 135/* MMU initialization */
126extern void early_init_mmu(void); 136extern void early_init_mmu(void);
127extern void early_init_mmu_secondary(void); 137extern void early_init_mmu_secondary(void);
128 138
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index 89d2f99c1bf4..23cd6cc30bcf 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -17,7 +17,7 @@
17#ifdef CONFIG_PPC_PSERIES 17#ifdef CONFIG_PPC_PSERIES
18extern int pSeries_reconfig_notifier_register(struct notifier_block *); 18extern int pSeries_reconfig_notifier_register(struct notifier_block *);
19extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); 19extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
20extern struct blocking_notifier_head pSeries_reconfig_chain; 20extern int pSeries_reconfig_notify(unsigned long action, void *p);
21/* Not the best place to put this, will be fixed when we move some 21/* Not the best place to put this, will be fixed when we move some
22 * of the rtas suspend-me stuff to pseries */ 22 * of the rtas suspend-me stuff to pseries */
23extern void pSeries_coalesce_init(void); 23extern void pSeries_coalesce_init(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 74126765106a..516bfb3f47d9 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -103,11 +103,12 @@ struct paca_struct {
103#endif /* CONFIG_PPC_STD_MMU_64 */ 103#endif /* CONFIG_PPC_STD_MMU_64 */
104 104
105#ifdef CONFIG_PPC_BOOK3E 105#ifdef CONFIG_PPC_BOOK3E
106 pgd_t *pgd; /* Current PGD */
107 pgd_t *kernel_pgd; /* Kernel PGD */
108 u64 exgen[8] __attribute__((aligned(0x80))); 106 u64 exgen[8] __attribute__((aligned(0x80)));
107 /* Keep pgd in the same cacheline as the start of extlb */
108 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
109 pgd_t *kernel_pgd; /* Kernel PGD */
109 /* We can have up to 3 levels of reentrancy in the TLB miss handler */ 110 /* We can have up to 3 levels of reentrancy in the TLB miss handler */
110 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80))); 111 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
111 u64 exmc[8]; /* used for machine checks */ 112 u64 exmc[8]; /* used for machine checks */
112 u64 excrit[8]; /* used for crit interrupts */ 113 u64 excrit[8]; /* used for crit interrupts */
113 u64 exdbg[8]; /* used for debug interrupts */ 114 u64 exdbg[8]; /* used for debug interrupts */
@@ -147,9 +148,12 @@ struct paca_struct {
147 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ 148 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
148 149
149#ifdef CONFIG_KVM_BOOK3S_HANDLER 150#ifdef CONFIG_KVM_BOOK3S_HANDLER
151#ifdef CONFIG_KVM_BOOK3S_PR
150 /* We use this to store guest state in */ 152 /* We use this to store guest state in */
151 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 153 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
152#endif 154#endif
155 struct kvmppc_host_state kvm_hstate;
156#endif
153}; 157};
154 158
155extern struct paca_struct *paca; 159extern struct paca_struct *paca;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 8a9cb9f3ed02..56b879ab3a40 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -123,15 +123,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
123 123
124#ifndef CONFIG_PPC64 124#ifndef CONFIG_PPC64
125 125
126static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) 126extern int pci_device_from_OF_node(struct device_node *node,
127{ 127 u8 *bus, u8 *devfn);
128 struct pci_controller *host; 128extern void pci_create_OF_bus_map(void);
129
130 if (bus->self)
131 return pci_device_to_OF_node(bus->self);
132 host = pci_bus_to_host(bus);
133 return host ? host->dn : NULL;
134}
135 129
136static inline int isa_vaddr_is_ioport(void __iomem *address) 130static inline int isa_vaddr_is_ioport(void __iomem *address)
137{ 131{
@@ -175,17 +169,8 @@ struct pci_dn {
175/* Get the pointer to a device_node's pci_dn */ 169/* Get the pointer to a device_node's pci_dn */
176#define PCI_DN(dn) ((struct pci_dn *) (dn)->data) 170#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
177 171
178extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
179extern void * update_dn_pci_info(struct device_node *dn, void *data); 172extern void * update_dn_pci_info(struct device_node *dn, void *data);
180 173
181/* Get a device_node from a pci_dev. This code must be fast except
182 * in the case where the sysdata is incorrect and needs to be fixed
183 * up (this will only happen once). */
184static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
185{
186 return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev);
187}
188
189static inline int pci_device_from_OF_node(struct device_node *np, 174static inline int pci_device_from_OF_node(struct device_node *np,
190 u8 *bus, u8 *devfn) 175 u8 *bus, u8 *devfn)
191{ 176{
@@ -196,14 +181,6 @@ static inline int pci_device_from_OF_node(struct device_node *np,
196 return 0; 181 return 0;
197} 182}
198 183
199static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
200{
201 if (bus->self)
202 return pci_device_to_OF_node(bus->self);
203 else
204 return bus->dev.of_node; /* Must be root bus (PHB) */
205}
206
207/** Find the bus corresponding to the indicated device node */ 184/** Find the bus corresponding to the indicated device node */
208extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 185extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
209 186
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index b9a40faca93f..49c3de582be0 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb);
179extern struct pci_dev *of_create_pci_dev(struct device_node *node, 179extern struct pci_dev *of_create_pci_dev(struct device_node *node,
180 struct pci_bus *bus, int devfn); 180 struct pci_bus *bus, int devfn);
181 181
182extern void of_scan_pci_bridge(struct device_node *node, 182extern void of_scan_pci_bridge(struct pci_dev *dev);
183 struct pci_dev *dev);
184 183
185extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); 184extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
186extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); 185extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 81576ee0cfb1..c4205616dfb5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -357,7 +357,8 @@ void pgtable_cache_init(void);
357/* 357/*
358 * find_linux_pte returns the address of a linux pte for a given 358 * find_linux_pte returns the address of a linux pte for a given
359 * effective address and directory. If not found, it returns zero. 359 * effective address and directory. If not found, it returns zero.
360 */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 360 */
361static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
361{ 362{
362 pgd_t *pg; 363 pgd_t *pg;
363 pud_t *pu; 364 pud_t *pu;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e472659d906c..e980faae4225 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -71,6 +71,42 @@
71#define PPC_INST_ERATSX 0x7c000126 71#define PPC_INST_ERATSX 0x7c000126
72#define PPC_INST_ERATSX_DOT 0x7c000127 72#define PPC_INST_ERATSX_DOT 0x7c000127
73 73
74/* Misc instructions for BPF compiler */
75#define PPC_INST_LD 0xe8000000
76#define PPC_INST_LHZ 0xa0000000
77#define PPC_INST_LWZ 0x80000000
78#define PPC_INST_STD 0xf8000000
79#define PPC_INST_STDU 0xf8000001
80#define PPC_INST_MFLR 0x7c0802a6
81#define PPC_INST_MTLR 0x7c0803a6
82#define PPC_INST_CMPWI 0x2c000000
83#define PPC_INST_CMPDI 0x2c200000
84#define PPC_INST_CMPLW 0x7c000040
85#define PPC_INST_CMPLWI 0x28000000
86#define PPC_INST_ADDI 0x38000000
87#define PPC_INST_ADDIS 0x3c000000
88#define PPC_INST_ADD 0x7c000214
89#define PPC_INST_SUB 0x7c000050
90#define PPC_INST_BLR 0x4e800020
91#define PPC_INST_BLRL 0x4e800021
92#define PPC_INST_MULLW 0x7c0001d6
93#define PPC_INST_MULHWU 0x7c000016
94#define PPC_INST_MULLI 0x1c000000
95#define PPC_INST_DIVWU 0x7c0003d6
96#define PPC_INST_RLWINM 0x54000000
97#define PPC_INST_RLDICR 0x78000004
98#define PPC_INST_SLW 0x7c000030
99#define PPC_INST_SRW 0x7c000430
100#define PPC_INST_AND 0x7c000038
101#define PPC_INST_ANDDOT 0x7c000039
102#define PPC_INST_OR 0x7c000378
103#define PPC_INST_ANDI 0x70000000
104#define PPC_INST_ORI 0x60000000
105#define PPC_INST_ORIS 0x64000000
106#define PPC_INST_NEG 0x7c0000d0
107#define PPC_INST_BRANCH 0x48000000
108#define PPC_INST_BRANCH_COND 0x40800000
109
74/* macros to insert fields into opcodes */ 110/* macros to insert fields into opcodes */
75#define __PPC_RA(a) (((a) & 0x1f) << 16) 111#define __PPC_RA(a) (((a) & 0x1f) << 16)
76#define __PPC_RB(b) (((b) & 0x1f) << 11) 112#define __PPC_RB(b) (((b) & 0x1f) << 11)
@@ -83,6 +119,10 @@
83#define __PPC_T_TLB(t) (((t) & 0x3) << 21) 119#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
84#define __PPC_WC(w) (((w) & 0x3) << 21) 120#define __PPC_WC(w) (((w) & 0x3) << 21)
85#define __PPC_WS(w) (((w) & 0x1f) << 11) 121#define __PPC_WS(w) (((w) & 0x1f) << 11)
122#define __PPC_SH(s) __PPC_WS(s)
123#define __PPC_MB(s) (((s) & 0x1f) << 6)
124#define __PPC_ME(s) (((s) & 0x1f) << 1)
125#define __PPC_BI(s) (((s) & 0x1f) << 16)
86 126
87/* 127/*
88 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a 128 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 1b422381fc16..368f72f79808 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -150,18 +150,22 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
150#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base) 150#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
151#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base) 151#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
152 152
153#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) 153/*
154#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) 154 * b = base register for addressing, o = base offset from register of 1st EVR
155#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base) 155 * n = first EVR, s = scratch
156#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base) 156 */
157#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base) 157#define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
158#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base) 158#define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
159#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n 159#define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
160#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base) 160#define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
161#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base) 161#define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
162#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base) 162#define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
163#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base) 163#define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
164#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) 164#define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
165#define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
166#define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
167#define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
168#define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
165 169
166/* Macros to adjust thread priority for hardware multithreading */ 170/* Macros to adjust thread priority for hardware multithreading */
167#define HMT_VERY_LOW or 31,31,31 # very low priority 171#define HMT_VERY_LOW or 31,31,31 # very low priority
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index d50c2b6d9bc3..eb11a446720e 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -20,6 +20,7 @@
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/cache.h>
23#include <asm/ptrace.h> 24#include <asm/ptrace.h>
24#include <asm/types.h> 25#include <asm/types.h>
25 26
@@ -156,6 +157,10 @@ struct thread_struct {
156#endif 157#endif
157 struct pt_regs *regs; /* Pointer to saved register state */ 158 struct pt_regs *regs; /* Pointer to saved register state */
158 mm_segment_t fs; /* for get_fs() validation */ 159 mm_segment_t fs; /* for get_fs() validation */
160#ifdef CONFIG_BOOKE
161 /* BookE base exception scratch space; align on cacheline */
162 unsigned long normsave[8] ____cacheline_aligned;
163#endif
159#ifdef CONFIG_PPC32 164#ifdef CONFIG_PPC32
160 void *pgdir; /* root of page-table tree */ 165 void *pgdir; /* root of page-table tree */
161#endif 166#endif
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index c189aa5fe1f4..b5c91901e384 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -18,24 +18,10 @@
18 */ 18 */
19#include <linux/types.h> 19#include <linux/types.h>
20#include <asm/irq.h> 20#include <asm/irq.h>
21#include <asm/atomic.h> 21#include <linux/atomic.h>
22 22
23#define HAVE_ARCH_DEVTREE_FIXUPS 23#define HAVE_ARCH_DEVTREE_FIXUPS
24 24
25#ifdef CONFIG_PPC32
26/*
27 * PCI <-> OF matching functions
28 * (XXX should these be here?)
29 */
30struct pci_bus;
31struct pci_dev;
32extern int pci_device_from_OF_node(struct device_node *node,
33 u8* bus, u8* devfn);
34extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
35extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
36extern void pci_create_OF_bus_map(void);
37#endif
38
39/* 25/*
40 * OF address retreival & translation 26 * OF address retreival & translation
41 */ 27 */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c5cae0dd176c..e8aaf6fce38b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -189,6 +189,9 @@
189#define SPRN_CTR 0x009 /* Count Register */ 189#define SPRN_CTR 0x009 /* Count Register */
190#define SPRN_DSCR 0x11 190#define SPRN_DSCR 0x11
191#define SPRN_CFAR 0x1c /* Come From Address Register */ 191#define SPRN_CFAR 0x1c /* Come From Address Register */
192#define SPRN_AMR 0x1d /* Authority Mask Register */
193#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
194#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
192#define SPRN_ACOP 0x1F /* Available Coprocessor Register */ 195#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
193#define SPRN_CTRLF 0x088 196#define SPRN_CTRLF 0x088
194#define SPRN_CTRLT 0x098 197#define SPRN_CTRLT 0x098
@@ -232,22 +235,28 @@
232#define LPCR_VPM0 (1ul << (63-0)) 235#define LPCR_VPM0 (1ul << (63-0))
233#define LPCR_VPM1 (1ul << (63-1)) 236#define LPCR_VPM1 (1ul << (63-1))
234#define LPCR_ISL (1ul << (63-2)) 237#define LPCR_ISL (1ul << (63-2))
238#define LPCR_VC_SH (63-2)
235#define LPCR_DPFD_SH (63-11) 239#define LPCR_DPFD_SH (63-11)
236#define LPCR_VRMA_L (1ul << (63-12)) 240#define LPCR_VRMA_L (1ul << (63-12))
237#define LPCR_VRMA_LP0 (1ul << (63-15)) 241#define LPCR_VRMA_LP0 (1ul << (63-15))
238#define LPCR_VRMA_LP1 (1ul << (63-16)) 242#define LPCR_VRMA_LP1 (1ul << (63-16))
243#define LPCR_VRMASD_SH (63-16)
239#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ 244#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
245#define LPCR_RMLS_SH (63-37)
240#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ 246#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
241#define LPCR_PECE 0x00007000 /* powersave exit cause enable */ 247#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
242#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ 248#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
243#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ 249#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
244#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ 250#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
245#define LPCR_MER 0x00000800 /* Mediated External Exception */ 251#define LPCR_MER 0x00000800 /* Mediated External Exception */
252#define LPCR_LPES 0x0000000c
246#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ 253#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
247#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ 254#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
255#define LPCR_LPES_SH 2
248#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ 256#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
249#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ 257#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
250#define SPRN_LPID 0x13F /* Logical Partition Identifier */ 258#define SPRN_LPID 0x13F /* Logical Partition Identifier */
259#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
251#define SPRN_HMER 0x150 /* Hardware m? error recovery */ 260#define SPRN_HMER 0x150 /* Hardware m? error recovery */
252#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ 261#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
253#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ 262#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
@@ -298,6 +307,7 @@
298#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ 307#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
299#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ 308#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
300#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ 309#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
310#define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */
301#define HID0_EMCP (1<<31) /* Enable Machine Check pin */ 311#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
302#define HID0_EBA (1<<29) /* Enable Bus Address Parity */ 312#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
303#define HID0_EBD (1<<28) /* Enable Bus Data Parity */ 313#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
@@ -353,6 +363,13 @@
353#define SPRN_IABR2 0x3FA /* 83xx */ 363#define SPRN_IABR2 0x3FA /* 83xx */
354#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ 364#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
355#define SPRN_HID4 0x3F4 /* 970 HID4 */ 365#define SPRN_HID4 0x3F4 /* 970 HID4 */
366#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
367#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
368#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
369#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
370#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
371#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
372#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
356#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */ 373#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
357#define SPRN_HID5 0x3F6 /* 970 HID5 */ 374#define SPRN_HID5 0x3F6 /* 970 HID5 */
358#define SPRN_HID6 0x3F9 /* BE HID 6 */ 375#define SPRN_HID6 0x3F9 /* BE HID 6 */
@@ -802,28 +819,28 @@
802 mfspr rX,SPRN_SPRG_PACA; \ 819 mfspr rX,SPRN_SPRG_PACA; \
803 FTR_SECTION_ELSE_NESTED(66); \ 820 FTR_SECTION_ELSE_NESTED(66); \
804 mfspr rX,SPRN_SPRG_HPACA; \ 821 mfspr rX,SPRN_SPRG_HPACA; \
805 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 822 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
806 823
807#define SET_PACA(rX) \ 824#define SET_PACA(rX) \
808 BEGIN_FTR_SECTION_NESTED(66); \ 825 BEGIN_FTR_SECTION_NESTED(66); \
809 mtspr SPRN_SPRG_PACA,rX; \ 826 mtspr SPRN_SPRG_PACA,rX; \
810 FTR_SECTION_ELSE_NESTED(66); \ 827 FTR_SECTION_ELSE_NESTED(66); \
811 mtspr SPRN_SPRG_HPACA,rX; \ 828 mtspr SPRN_SPRG_HPACA,rX; \
812 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 829 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
813 830
814#define GET_SCRATCH0(rX) \ 831#define GET_SCRATCH0(rX) \
815 BEGIN_FTR_SECTION_NESTED(66); \ 832 BEGIN_FTR_SECTION_NESTED(66); \
816 mfspr rX,SPRN_SPRG_SCRATCH0; \ 833 mfspr rX,SPRN_SPRG_SCRATCH0; \
817 FTR_SECTION_ELSE_NESTED(66); \ 834 FTR_SECTION_ELSE_NESTED(66); \
818 mfspr rX,SPRN_SPRG_HSCRATCH0; \ 835 mfspr rX,SPRN_SPRG_HSCRATCH0; \
819 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 836 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
820 837
821#define SET_SCRATCH0(rX) \ 838#define SET_SCRATCH0(rX) \
822 BEGIN_FTR_SECTION_NESTED(66); \ 839 BEGIN_FTR_SECTION_NESTED(66); \
823 mtspr SPRN_SPRG_SCRATCH0,rX; \ 840 mtspr SPRN_SPRG_SCRATCH0,rX; \
824 FTR_SECTION_ELSE_NESTED(66); \ 841 FTR_SECTION_ELSE_NESTED(66); \
825 mtspr SPRN_SPRG_HSCRATCH0,rX; \ 842 mtspr SPRN_SPRG_HSCRATCH0,rX; \
826 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 843 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
827 844
828#else /* CONFIG_PPC_BOOK3S_64 */ 845#else /* CONFIG_PPC_BOOK3S_64 */
829#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0 846#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
@@ -872,8 +889,8 @@
872#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W 889#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W
873#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R 890#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R
874#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W 891#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W
875#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R 892#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG1
876#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W 893#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1
877#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R 894#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
878#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W 895#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
879#ifdef CONFIG_E200 896#ifdef CONFIG_E200
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 0f0ad9fa01c1..9ec0b39f9ddc 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -318,6 +318,7 @@
318#define ESR_ILK 0x00100000 /* Instr. Cache Locking */ 318#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
319#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */ 319#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
320#define ESR_BO 0x00020000 /* Byte Ordering */ 320#define ESR_BO 0x00020000 /* Byte Ordering */
321#define ESR_SPV 0x00000080 /* Signal Processing operation */
321 322
322/* Bit definitions related to the DBCR0. */ 323/* Bit definitions related to the DBCR0. */
323#if defined(CONFIG_40x) 324#if defined(CONFIG_40x)
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index dae19342f0b9..186e0fb835bd 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -3,4 +3,8 @@
3 3
4#include <asm-generic/setup.h> 4#include <asm-generic/setup.h>
5 5
6#ifndef __ASSEMBLY__
7extern void ppc_printk_progress(char *s, unsigned short hex);
8#endif
9
6#endif /* _ASM_POWERPC_SETUP_H */ 10#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 11eb404b5606..15a70b7f638b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -30,7 +30,7 @@
30#include <asm/percpu.h> 30#include <asm/percpu.h>
31 31
32extern int boot_cpuid; 32extern int boot_cpuid;
33extern int boot_cpu_count; 33extern int spinning_secondaries;
34 34
35extern void cpu_die(void); 35extern void cpu_die(void);
36 36
@@ -119,7 +119,6 @@ extern const char *smp_ipi_name[];
119/* for irq controllers with only a single ipi */ 119/* for irq controllers with only a single ipi */
120extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); 120extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
121extern void smp_muxed_ipi_message_pass(int cpu, int msg); 121extern void smp_muxed_ipi_message_pass(int cpu, int msg);
122extern void smp_muxed_ipi_resend(void);
123extern irqreturn_t smp_ipi_demux(void); 122extern irqreturn_t smp_ipi_demux(void);
124 123
125void smp_init_iSeries(void); 124void smp_init_iSeries(void);
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index e3bdada8c542..ae20ce1af4c7 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -547,7 +547,7 @@ struct smu_sdbp_header {
547 * (currently, afaik, this concerns only the FVT partition 547 * (currently, afaik, this concerns only the FVT partition
548 * (0x12) 548 * (0x12)
549 */ 549 */
550#define SMU_U16_MIX(x) le16_to_cpu(x); 550#define SMU_U16_MIX(x) le16_to_cpu(x)
551#define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8)) 551#define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8))
552 552
553 553
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 2dc595dda03b..e30a13d1ee76 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -120,7 +120,6 @@ extern void do_dabr(struct pt_regs *regs, unsigned long address,
120 unsigned long error_code); 120 unsigned long error_code);
121#endif 121#endif
122extern void print_backtrace(unsigned long *); 122extern void print_backtrace(unsigned long *);
123extern void show_regs(struct pt_regs * regs);
124extern void flush_instruction_cache(void); 123extern void flush_instruction_cache(void);
125extern void hard_reset_now(void); 124extern void hard_reset_now(void);
126extern void poweroff_now(void); 125extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 58580e94a2bb..93e05d1b34b2 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -40,6 +40,7 @@ extern void udbg_adb_init_early(void);
40 40
41extern void __init udbg_early_init(void); 41extern void __init udbg_early_init(void);
42extern void __init udbg_init_debug_lpar(void); 42extern void __init udbg_init_debug_lpar(void);
43extern void __init udbg_init_debug_lpar_hvsi(void);
43extern void __init udbg_init_pmac_realmode(void); 44extern void __init udbg_init_pmac_realmode(void);
44extern void __init udbg_init_maple_realmode(void); 45extern void __init udbg_init_maple_realmode(void);
45extern void __init udbg_init_pas_realmode(void); 46extern void __init udbg_init_pas_realmode(void);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index e8b981897d44..ce4f7f179117 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
76obj-$(CONFIG_44x) += cpu_setup_44x.o 76obj-$(CONFIG_44x) += cpu_setup_44x.o
77obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o 77obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
78obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o 78obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
79obj-$(CONFIG_JUMP_LABEL) += jump_label.o
79 80
80extra-y := head_$(CONFIG_WORD_SIZE).o 81extra-y := head_$(CONFIG_WORD_SIZE).o
81extra-$(CONFIG_40x) := head_40x.o 82extra-$(CONFIG_40x) := head_40x.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 36e1c8a29be8..5f078bc2063e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -82,6 +82,9 @@ int main(void)
82 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 82 DEFINE(KSP, offsetof(struct thread_struct, ksp));
83 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); 83 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
84 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 84 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
85#ifdef CONFIG_BOOKE
86 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
87#endif
85 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); 88 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
86 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); 89 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
87 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); 90 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
@@ -128,6 +131,7 @@ int main(void)
128 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 131 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
129 /* paca */ 132 /* paca */
130 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 133 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
134 DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
131 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); 135 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
132 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); 136 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
133 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); 137 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
@@ -187,7 +191,9 @@ int main(void)
187 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 191 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
188 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); 192 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
189 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); 193 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
194 DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
190 DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); 195 DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
196 DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
191 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); 197 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
192#endif /* CONFIG_PPC_STD_MMU_64 */ 198#endif /* CONFIG_PPC_STD_MMU_64 */
193 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 199 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
@@ -198,11 +204,6 @@ int main(void)
198 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 204 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
199 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 205 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
200 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 206 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
202 DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
203 DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
204 DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
205#endif
206#endif /* CONFIG_PPC64 */ 207#endif /* CONFIG_PPC64 */
207 208
208 /* RTAS */ 209 /* RTAS */
@@ -397,67 +398,160 @@ int main(void)
397 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 398 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
398 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 399 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
399 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 400 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
401 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
402 DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
403#ifdef CONFIG_ALTIVEC
404 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
405 DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
406#endif
407#ifdef CONFIG_VSX
408 DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
409#endif
410 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
411 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
412 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
413 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
414 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
415#ifdef CONFIG_KVM_BOOK3S_64_HV
416 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
417 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
418 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
419 DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
420 DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
421 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
422 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
423#endif
400 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 424 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
401 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 425 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
402 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 426 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
403 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 427 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
404 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 428 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
429 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
405 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 430 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
406 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 431 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
432 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
407 433
408 /* book3s */ 434 /* book3s */
435#ifdef CONFIG_KVM_BOOK3S_64_HV
436 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
437 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
438 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
439 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
440 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
441 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
442 DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
443 DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
444 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
445 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
446 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
447 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
448#endif
409#ifdef CONFIG_PPC_BOOK3S 449#ifdef CONFIG_PPC_BOOK3S
450 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
451 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
410 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); 452 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
411 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 453 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
412 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 454 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
455 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
456 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
457 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
458 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
459 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
460 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
413 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 461 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
414 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 462 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
415 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 463 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
416 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 464 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
417 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 465 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
466 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
467 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
468 DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
469 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
470 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
471 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
472 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
473 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
474 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
475 DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
476 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
477 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
478 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
479 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
480 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
481 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
482 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
483 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
418 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 484 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
419 offsetof(struct kvmppc_vcpu_book3s, vcpu)); 485 offsetof(struct kvmppc_vcpu_book3s, vcpu));
420 DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); 486 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
421 DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); 487 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
422 DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); 488 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
423 DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); 489
424 DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); 490#ifdef CONFIG_PPC_BOOK3S_64
425 DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); 491#ifdef CONFIG_KVM_BOOK3S_PR
426 DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); 492# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
427 DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); 493#else
428 DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); 494# define SVCPU_FIELD(x, f)
429 DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); 495#endif
430 DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); 496# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
431 DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); 497#else /* 32-bit */
432 DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); 498# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
433 DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); 499# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
434 DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); 500#endif
435 DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); 501
436 DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); 502 SVCPU_FIELD(SVCPU_CR, cr);
437 DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); 503 SVCPU_FIELD(SVCPU_XER, xer);
438 DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); 504 SVCPU_FIELD(SVCPU_CTR, ctr);
439 DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); 505 SVCPU_FIELD(SVCPU_LR, lr);
440 DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); 506 SVCPU_FIELD(SVCPU_PC, pc);
441 DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, 507 SVCPU_FIELD(SVCPU_R0, gpr[0]);
442 vmhandler)); 508 SVCPU_FIELD(SVCPU_R1, gpr[1]);
443 DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, 509 SVCPU_FIELD(SVCPU_R2, gpr[2]);
444 scratch0)); 510 SVCPU_FIELD(SVCPU_R3, gpr[3]);
445 DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, 511 SVCPU_FIELD(SVCPU_R4, gpr[4]);
446 scratch1)); 512 SVCPU_FIELD(SVCPU_R5, gpr[5]);
447 DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, 513 SVCPU_FIELD(SVCPU_R6, gpr[6]);
448 in_guest)); 514 SVCPU_FIELD(SVCPU_R7, gpr[7]);
449 DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, 515 SVCPU_FIELD(SVCPU_R8, gpr[8]);
450 fault_dsisr)); 516 SVCPU_FIELD(SVCPU_R9, gpr[9]);
451 DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu, 517 SVCPU_FIELD(SVCPU_R10, gpr[10]);
452 fault_dar)); 518 SVCPU_FIELD(SVCPU_R11, gpr[11]);
453 DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu, 519 SVCPU_FIELD(SVCPU_R12, gpr[12]);
454 last_inst)); 520 SVCPU_FIELD(SVCPU_R13, gpr[13]);
455 DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu, 521 SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
456 shadow_srr1)); 522 SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
523 SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
524 SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
457#ifdef CONFIG_PPC_BOOK3S_32 525#ifdef CONFIG_PPC_BOOK3S_32
458 DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); 526 SVCPU_FIELD(SVCPU_SR, sr);
459#endif 527#endif
460#else 528#ifdef CONFIG_PPC64
529 SVCPU_FIELD(SVCPU_SLB, slb);
530 SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
531#endif
532
533 HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
534 HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
535 HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
536 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
537 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
538 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
539 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
540
541#ifdef CONFIG_KVM_BOOK3S_64_HV
542 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
543 HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
544 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
545 HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
546 HSTATE_FIELD(HSTATE_PMC, host_pmc);
547 HSTATE_FIELD(HSTATE_PURR, host_purr);
548 HSTATE_FIELD(HSTATE_SPURR, host_spurr);
549 HSTATE_FIELD(HSTATE_DSCR, host_dscr);
550 HSTATE_FIELD(HSTATE_DABR, dabr);
551 HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
552#endif /* CONFIG_KVM_BOOK3S_64_HV */
553
554#else /* CONFIG_PPC_BOOK3S */
461 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 555 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
462 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 556 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
463 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 557 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
@@ -467,7 +561,7 @@ int main(void)
467 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 561 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
468 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 562 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
469#endif /* CONFIG_PPC_BOOK3S */ 563#endif /* CONFIG_PPC_BOOK3S */
470#endif 564#endif /* CONFIG_KVM */
471 565
472#ifdef CONFIG_KVM_GUEST 566#ifdef CONFIG_KVM_GUEST
473 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, 567 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
@@ -497,6 +591,13 @@ int main(void)
497 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); 591 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
498#endif 592#endif
499 593
594#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
595 DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
596 DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
597 DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
598 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
599#endif
600
500#ifdef CONFIG_KVM_EXIT_TIMING 601#ifdef CONFIG_KVM_EXIT_TIMING
501 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 602 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
502 arch.timing_exit.tv32.tbu)); 603 arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
index 4f9a93fcfe07..76797c5105d6 100644
--- a/arch/powerpc/kernel/cpu_setup_power7.S
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -45,12 +45,12 @@ _GLOBAL(__restore_cpu_power7)
45 blr 45 blr
46 46
47__init_hvmode_206: 47__init_hvmode_206:
48 /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */ 48 /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
49 mfmsr r3 49 mfmsr r3
50 rldicl. r0,r3,4,63 50 rldicl. r0,r3,4,63
51 bnelr 51 bnelr
52 ld r5,CPU_SPEC_FEATURES(r4) 52 ld r5,CPU_SPEC_FEATURES(r4)
53 LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206) 53 LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
54 xor r5,r5,r6 54 xor r5,r5,r6
55 std r5,CPU_SPEC_FEATURES(r4) 55 std r5,CPU_SPEC_FEATURES(r4)
56 blr 56 blr
@@ -61,19 +61,23 @@ __init_LPCR:
61 * LPES = 0b01 (HSRR0/1 used for 0x500) 61 * LPES = 0b01 (HSRR0/1 used for 0x500)
62 * PECE = 0b111 62 * PECE = 0b111
63 * DPFD = 4 63 * DPFD = 4
64 * HDICE = 0
65 * VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
66 * VRMASD = 0b10000 (L=1, LP=00)
64 * 67 *
65 * Other bits untouched for now 68 * Other bits untouched for now
66 */ 69 */
67 mfspr r3,SPRN_LPCR 70 mfspr r3,SPRN_LPCR
68 ori r3,r3,(LPCR_LPES0|LPCR_LPES1) 71 li r5,1
69 xori r3,r3, LPCR_LPES0 72 rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
70 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) 73 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
71 li r5,7
72 sldi r5,r5,LPCR_DPFD_SH
73 andc r3,r3,r5
74 li r5,4 74 li r5,4
75 sldi r5,r5,LPCR_DPFD_SH 75 rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
76 or r3,r3,r5 76 clrrdi r3,r3,1 /* clear HDICE */
77 li r5,4
78 rldimi r3,r5, LPCR_VC_SH, 0
79 li r5,0x10
80 rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
77 mtspr SPRN_LPCR,r3 81 mtspr SPRN_LPCR,r3
78 isync 82 isync
79 blr 83 blr
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 27f2507279d8..12fac8df01c5 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -76,7 +76,7 @@ _GLOBAL(__setup_cpu_ppc970)
76 /* Do nothing if not running in HV mode */ 76 /* Do nothing if not running in HV mode */
77 mfmsr r0 77 mfmsr r0
78 rldicl. r0,r0,4,63 78 rldicl. r0,r0,4,63
79 beqlr 79 beq no_hv_mode
80 80
81 mfspr r0,SPRN_HID0 81 mfspr r0,SPRN_HID0
82 li r11,5 /* clear DOZE and SLEEP */ 82 li r11,5 /* clear DOZE and SLEEP */
@@ -90,7 +90,7 @@ _GLOBAL(__setup_cpu_ppc970MP)
90 /* Do nothing if not running in HV mode */ 90 /* Do nothing if not running in HV mode */
91 mfmsr r0 91 mfmsr r0
92 rldicl. r0,r0,4,63 92 rldicl. r0,r0,4,63
93 beqlr 93 beq no_hv_mode
94 94
95 mfspr r0,SPRN_HID0 95 mfspr r0,SPRN_HID0
96 li r11,0x15 /* clear DOZE and SLEEP */ 96 li r11,0x15 /* clear DOZE and SLEEP */
@@ -109,6 +109,14 @@ load_hids:
109 sync 109 sync
110 isync 110 isync
111 111
112 /* Try to set LPES = 01 in HID4 */
113 mfspr r0,SPRN_HID4
114 clrldi r0,r0,1 /* clear LPES0 */
115 ori r0,r0,HID4_LPES1 /* set LPES1 */
116 sync
117 mtspr SPRN_HID4,r0
118 isync
119
112 /* Save away cpu state */ 120 /* Save away cpu state */
113 LOAD_REG_ADDR(r5,cpu_state_storage) 121 LOAD_REG_ADDR(r5,cpu_state_storage)
114 122
@@ -117,11 +125,21 @@ load_hids:
117 std r3,CS_HID0(r5) 125 std r3,CS_HID0(r5)
118 mfspr r3,SPRN_HID1 126 mfspr r3,SPRN_HID1
119 std r3,CS_HID1(r5) 127 std r3,CS_HID1(r5)
120 mfspr r3,SPRN_HID4 128 mfspr r4,SPRN_HID4
121 std r3,CS_HID4(r5) 129 std r4,CS_HID4(r5)
122 mfspr r3,SPRN_HID5 130 mfspr r3,SPRN_HID5
123 std r3,CS_HID5(r5) 131 std r3,CS_HID5(r5)
124 132
133 /* See if we successfully set LPES1 to 1; if not we are in Apple mode */
134 andi. r4,r4,HID4_LPES1
135 bnelr
136
137no_hv_mode:
138 /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */
139 ld r5,CPU_SPEC_FEATURES(r4)
140 LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
141 andc r5,r5,r6
142 std r5,CPU_SPEC_FEATURES(r4)
125 blr 143 blr
126 144
127/* Called with no MMU context (typically MSR:IR/DR off) to 145/* Called with no MMU context (typically MSR:IR/DR off) to
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 4e6ee944495a..cc6a9d5d69ab 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -242,12 +242,8 @@ static void crash_kexec_wait_realmode(int cpu)
242 242
243 while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { 243 while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
244 barrier(); 244 barrier();
245 if (!cpu_possible(i)) { 245 if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
246 break; 246 break;
247 }
248 if (!cpu_online(i)) {
249 break;
250 }
251 msecs--; 247 msecs--;
252 mdelay(1); 248 mdelay(1);
253 } 249 }
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index d238c082c3c5..4f0959fbfbee 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -161,9 +161,7 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
161 161
162 if (ppc_md.dma_set_mask) 162 if (ppc_md.dma_set_mask)
163 return ppc_md.dma_set_mask(dev, dma_mask); 163 return ppc_md.dma_set_mask(dev, dma_mask);
164 if (unlikely(dma_ops == NULL)) 164 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
165 return -EIO;
166 if (dma_ops->set_dma_mask != NULL)
167 return dma_ops->set_dma_mask(dev, dma_mask); 165 return dma_ops->set_dma_mask(dev, dma_mask);
168 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 166 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
169 return -EIO; 167 return -EIO;
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c
index b150b510510f..cb2e2949c8d1 100644
--- a/arch/powerpc/kernel/e500-pmu.c
+++ b/arch/powerpc/kernel/e500-pmu.c
@@ -75,6 +75,11 @@ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
75 [C(OP_WRITE)] = { -1, -1 }, 75 [C(OP_WRITE)] = { -1, -1 },
76 [C(OP_PREFETCH)] = { -1, -1 }, 76 [C(OP_PREFETCH)] = { -1, -1 },
77 }, 77 },
78 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
79 [C(OP_READ)] = { -1, -1 },
80 [C(OP_WRITE)] = { -1, -1 },
81 [C(OP_PREFETCH)] = { -1, -1 },
82 },
78}; 83};
79 84
80static int num_events = 128; 85static int num_events = 128;
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index d24d4400cc79..429983c06f91 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -120,6 +120,12 @@
120 std r14,PACA_EXMC+EX_R14(r13); \ 120 std r14,PACA_EXMC+EX_R14(r13); \
121 std r15,PACA_EXMC+EX_R15(r13) 121 std r15,PACA_EXMC+EX_R15(r13)
122 122
123#define PROLOG_ADDITION_DOORBELL_GEN \
124 lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
125 cmpwi cr0,r11,0; /* yes -> go out of line */ \
126 beq masked_doorbell_book3e
127
128
123/* Core exception code for all exceptions except TLB misses. 129/* Core exception code for all exceptions except TLB misses.
124 * XXX: Needs to make SPRN_SPRG_GEN depend on exception type 130 * XXX: Needs to make SPRN_SPRG_GEN depend on exception type
125 */ 131 */
@@ -522,7 +528,13 @@ kernel_dbg_exc:
522 MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) 528 MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE)
523 529
524/* Doorbell interrupt */ 530/* Doorbell interrupt */
525 MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) 531 START_EXCEPTION(doorbell)
532 NORMAL_EXCEPTION_PROLOG(0x2070, PROLOG_ADDITION_DOORBELL)
533 EXCEPTION_COMMON(0x2070, PACA_EXGEN, INTS_DISABLE_ALL)
534 CHECK_NAPPING()
535 addi r3,r1,STACK_FRAME_OVERHEAD
536 bl .doorbell_exception
537 b .ret_from_except_lite
526 538
527/* Doorbell critical Interrupt */ 539/* Doorbell critical Interrupt */
528 START_EXCEPTION(doorbell_crit); 540 START_EXCEPTION(doorbell_crit);
@@ -545,8 +557,16 @@ kernel_dbg_exc:
545 * An interrupt came in while soft-disabled; clear EE in SRR1, 557 * An interrupt came in while soft-disabled; clear EE in SRR1,
546 * clear paca->hard_enabled and return. 558 * clear paca->hard_enabled and return.
547 */ 559 */
560masked_doorbell_book3e:
561 mtcr r10
562 /* Resend the doorbell to fire again when ints enabled */
563 mfspr r10,SPRN_PIR
564 PPC_MSGSND(r10)
565 b masked_interrupt_book3e_common
566
548masked_interrupt_book3e: 567masked_interrupt_book3e:
549 mtcr r10 568 mtcr r10
569masked_interrupt_book3e_common:
550 stb r11,PACAHARDIRQEN(r13) 570 stb r11,PACAHARDIRQEN(r13)
551 mfspr r10,SPRN_SRR1 571 mfspr r10,SPRN_SRR1
552 rldicl r11,r10,48,1 /* clear MSR_EE */ 572 rldicl r11,r10,48,1 /* clear MSR_EE */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a85f4874cba7..41b02c792aa3 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -40,7 +40,6 @@ __start_interrupts:
40 .globl system_reset_pSeries; 40 .globl system_reset_pSeries;
41system_reset_pSeries: 41system_reset_pSeries:
42 HMT_MEDIUM; 42 HMT_MEDIUM;
43 DO_KVM 0x100;
44 SET_SCRATCH0(r13) 43 SET_SCRATCH0(r13)
45#ifdef CONFIG_PPC_P7_NAP 44#ifdef CONFIG_PPC_P7_NAP
46BEGIN_FTR_SECTION 45BEGIN_FTR_SECTION
@@ -50,82 +49,73 @@ BEGIN_FTR_SECTION
50 * state loss at this time. 49 * state loss at this time.
51 */ 50 */
52 mfspr r13,SPRN_SRR1 51 mfspr r13,SPRN_SRR1
53 rlwinm r13,r13,47-31,30,31 52 rlwinm. r13,r13,47-31,30,31
54 cmpwi cr0,r13,1 53 beq 9f
55 bne 1f 54
56 b .power7_wakeup_noloss 55 /* waking up from powersave (nap) state */
571: cmpwi cr0,r13,2 56 cmpwi cr1,r13,2
58 bne 1f
59 b .power7_wakeup_loss
60 /* Total loss of HV state is fatal, we could try to use the 57 /* Total loss of HV state is fatal, we could try to use the
61 * PIR to locate a PACA, then use an emergency stack etc... 58 * PIR to locate a PACA, then use an emergency stack etc...
62 * but for now, let's just stay stuck here 59 * but for now, let's just stay stuck here
63 */ 60 */
641: cmpwi cr0,r13,3 61 bgt cr1,.
65 beq . 62 GET_PACA(r13)
66END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) 63
64#ifdef CONFIG_KVM_BOOK3S_64_HV
65 lbz r0,PACAPROCSTART(r13)
66 cmpwi r0,0x80
67 bne 1f
68 li r0,0
69 stb r0,PACAPROCSTART(r13)
70 b kvm_start_guest
711:
72#endif
73
74 beq cr1,2f
75 b .power7_wakeup_noloss
762: b .power7_wakeup_loss
779:
78END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
67#endif /* CONFIG_PPC_P7_NAP */ 79#endif /* CONFIG_PPC_P7_NAP */
68 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) 80 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
81 NOTEST, 0x100)
69 82
70 . = 0x200 83 . = 0x200
71_machine_check_pSeries: 84machine_check_pSeries_1:
72 HMT_MEDIUM 85 /* This is moved out of line as it can be patched by FW, but
73 DO_KVM 0x200 86 * some code path might still want to branch into the original
74 SET_SCRATCH0(r13) 87 * vector
75 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) 88 */
89 b machine_check_pSeries
76 90
77 . = 0x300 91 . = 0x300
78 .globl data_access_pSeries 92 .globl data_access_pSeries
79data_access_pSeries: 93data_access_pSeries:
80 HMT_MEDIUM 94 HMT_MEDIUM
81 DO_KVM 0x300
82 SET_SCRATCH0(r13) 95 SET_SCRATCH0(r13)
96#ifndef CONFIG_POWER4_ONLY
83BEGIN_FTR_SECTION 97BEGIN_FTR_SECTION
84 GET_PACA(r13) 98 b data_access_check_stab
85 std r9,PACA_EXSLB+EX_R9(r13) 99data_access_not_stab:
86 std r10,PACA_EXSLB+EX_R10(r13) 100END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
87 mfspr r10,SPRN_DAR 101#endif
88 mfspr r9,SPRN_DSISR 102 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
89 srdi r10,r10,60 103 KVMTEST_PR, 0x300)
90 rlwimi r10,r9,16,0x20
91 mfcr r9
92 cmpwi r10,0x2c
93 beq do_stab_bolted_pSeries
94 ld r10,PACA_EXSLB+EX_R10(r13)
95 std r11,PACA_EXGEN+EX_R11(r13)
96 ld r11,PACA_EXSLB+EX_R9(r13)
97 std r12,PACA_EXGEN+EX_R12(r13)
98 GET_SCRATCH0(r12)
99 std r10,PACA_EXGEN+EX_R10(r13)
100 std r11,PACA_EXGEN+EX_R9(r13)
101 std r12,PACA_EXGEN+EX_R13(r13)
102 EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
103FTR_SECTION_ELSE
104 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
105ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
106 104
107 . = 0x380 105 . = 0x380
108 .globl data_access_slb_pSeries 106 .globl data_access_slb_pSeries
109data_access_slb_pSeries: 107data_access_slb_pSeries:
110 HMT_MEDIUM 108 HMT_MEDIUM
111 DO_KVM 0x380
112 SET_SCRATCH0(r13) 109 SET_SCRATCH0(r13)
113 GET_PACA(r13) 110 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
114 std r3,PACA_EXSLB+EX_R3(r13) 111 std r3,PACA_EXSLB+EX_R3(r13)
115 mfspr r3,SPRN_DAR 112 mfspr r3,SPRN_DAR
116 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
117 mfcr r9
118#ifdef __DISABLED__ 113#ifdef __DISABLED__
119 /* Keep that around for when we re-implement dynamic VSIDs */ 114 /* Keep that around for when we re-implement dynamic VSIDs */
120 cmpdi r3,0 115 cmpdi r3,0
121 bge slb_miss_user_pseries 116 bge slb_miss_user_pseries
122#endif /* __DISABLED__ */ 117#endif /* __DISABLED__ */
123 std r10,PACA_EXSLB+EX_R10(r13) 118 mfspr r12,SPRN_SRR1
124 std r11,PACA_EXSLB+EX_R11(r13)
125 std r12,PACA_EXSLB+EX_R12(r13)
126 GET_SCRATCH0(r10)
127 std r10,PACA_EXSLB+EX_R13(r13)
128 mfspr r12,SPRN_SRR1 /* and SRR1 */
129#ifndef CONFIG_RELOCATABLE 119#ifndef CONFIG_RELOCATABLE
130 b .slb_miss_realmode 120 b .slb_miss_realmode
131#else 121#else
@@ -147,24 +137,16 @@ data_access_slb_pSeries:
147 .globl instruction_access_slb_pSeries 137 .globl instruction_access_slb_pSeries
148instruction_access_slb_pSeries: 138instruction_access_slb_pSeries:
149 HMT_MEDIUM 139 HMT_MEDIUM
150 DO_KVM 0x480
151 SET_SCRATCH0(r13) 140 SET_SCRATCH0(r13)
152 GET_PACA(r13) 141 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
153 std r3,PACA_EXSLB+EX_R3(r13) 142 std r3,PACA_EXSLB+EX_R3(r13)
154 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 143 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
155 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
156 mfcr r9
157#ifdef __DISABLED__ 144#ifdef __DISABLED__
158 /* Keep that around for when we re-implement dynamic VSIDs */ 145 /* Keep that around for when we re-implement dynamic VSIDs */
159 cmpdi r3,0 146 cmpdi r3,0
160 bge slb_miss_user_pseries 147 bge slb_miss_user_pseries
161#endif /* __DISABLED__ */ 148#endif /* __DISABLED__ */
162 std r10,PACA_EXSLB+EX_R10(r13) 149 mfspr r12,SPRN_SRR1
163 std r11,PACA_EXSLB+EX_R11(r13)
164 std r12,PACA_EXSLB+EX_R12(r13)
165 GET_SCRATCH0(r10)
166 std r10,PACA_EXSLB+EX_R13(r13)
167 mfspr r12,SPRN_SRR1 /* and SRR1 */
168#ifndef CONFIG_RELOCATABLE 150#ifndef CONFIG_RELOCATABLE
169 b .slb_miss_realmode 151 b .slb_miss_realmode
170#else 152#else
@@ -184,26 +166,46 @@ instruction_access_slb_pSeries:
184hardware_interrupt_pSeries: 166hardware_interrupt_pSeries:
185hardware_interrupt_hv: 167hardware_interrupt_hv:
186 BEGIN_FTR_SECTION 168 BEGIN_FTR_SECTION
187 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD) 169 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
170 EXC_HV, SOFTEN_TEST_HV)
171 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
188 FTR_SECTION_ELSE 172 FTR_SECTION_ELSE
189 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV) 173 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
190 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) 174 EXC_STD, SOFTEN_TEST_HV_201)
175 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
176 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
191 177
192 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 178 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
179 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
180
193 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 181 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
182 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
183
194 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 184 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
185 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
195 186
196 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) 187 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
197 MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer) 188 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
198 189
199 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) 190 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
191 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
192
200 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 193 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
194 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
201 195
202 . = 0xc00 196 . = 0xc00
203 .globl system_call_pSeries 197 .globl system_call_pSeries
204system_call_pSeries: 198system_call_pSeries:
205 HMT_MEDIUM 199 HMT_MEDIUM
206 DO_KVM 0xc00 200#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
201 SET_SCRATCH0(r13)
202 GET_PACA(r13)
203 std r9,PACA_EXGEN+EX_R9(r13)
204 std r10,PACA_EXGEN+EX_R10(r13)
205 mfcr r9
206 KVMTEST(0xc00)
207 GET_SCRATCH0(r13)
208#endif
207BEGIN_FTR_SECTION 209BEGIN_FTR_SECTION
208 cmpdi r0,0x1ebe 210 cmpdi r0,0x1ebe
209 beq- 1f 211 beq- 1f
@@ -220,6 +222,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
220 rfid 222 rfid
221 b . /* prevent speculative execution */ 223 b . /* prevent speculative execution */
222 224
225 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
226
223/* Fast LE/BE switch system call */ 227/* Fast LE/BE switch system call */
2241: mfspr r12,SPRN_SRR1 2281: mfspr r12,SPRN_SRR1
225 xori r12,r12,MSR_LE 229 xori r12,r12,MSR_LE
@@ -228,6 +232,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
228 b . 232 b .
229 233
230 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 234 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
235 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
231 236
232 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 237 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
233 * out of line to handle them 238 * out of line to handle them
@@ -262,30 +267,93 @@ vsx_unavailable_pSeries_1:
262 267
263#ifdef CONFIG_CBE_RAS 268#ifdef CONFIG_CBE_RAS
264 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 269 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
270 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
265#endif /* CONFIG_CBE_RAS */ 271#endif /* CONFIG_CBE_RAS */
272
266 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 273 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
274 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
275
267#ifdef CONFIG_CBE_RAS 276#ifdef CONFIG_CBE_RAS
268 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 277 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
278 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
269#endif /* CONFIG_CBE_RAS */ 279#endif /* CONFIG_CBE_RAS */
280
270 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 281 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
282 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
283
271#ifdef CONFIG_CBE_RAS 284#ifdef CONFIG_CBE_RAS
272 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 285 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
286 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
273#endif /* CONFIG_CBE_RAS */ 287#endif /* CONFIG_CBE_RAS */
274 288
275 . = 0x3000 289 . = 0x3000
276 290
277/*** Out of line interrupts support ***/ 291/*** Out of line interrupts support ***/
278 292
293 /* moved from 0x200 */
294machine_check_pSeries:
295 .globl machine_check_fwnmi
296machine_check_fwnmi:
297 HMT_MEDIUM
298 SET_SCRATCH0(r13) /* save r13 */
299 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
300 EXC_STD, KVMTEST, 0x200)
301 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
302
303#ifndef CONFIG_POWER4_ONLY
304 /* moved from 0x300 */
305data_access_check_stab:
306 GET_PACA(r13)
307 std r9,PACA_EXSLB+EX_R9(r13)
308 std r10,PACA_EXSLB+EX_R10(r13)
309 mfspr r10,SPRN_DAR
310 mfspr r9,SPRN_DSISR
311 srdi r10,r10,60
312 rlwimi r10,r9,16,0x20
313#ifdef CONFIG_KVM_BOOK3S_PR
314 lbz r9,HSTATE_IN_GUEST(r13)
315 rlwimi r10,r9,8,0x300
316#endif
317 mfcr r9
318 cmpwi r10,0x2c
319 beq do_stab_bolted_pSeries
320 mtcrf 0x80,r9
321 ld r9,PACA_EXSLB+EX_R9(r13)
322 ld r10,PACA_EXSLB+EX_R10(r13)
323 b data_access_not_stab
324do_stab_bolted_pSeries:
325 std r11,PACA_EXSLB+EX_R11(r13)
326 std r12,PACA_EXSLB+EX_R12(r13)
327 GET_SCRATCH0(r10)
328 std r10,PACA_EXSLB+EX_R13(r13)
329 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
330#endif /* CONFIG_POWER4_ONLY */
331
332 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
333 KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
334 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
335 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
337 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
338
339 .align 7
279 /* moved from 0xe00 */ 340 /* moved from 0xe00 */
280 STD_EXCEPTION_HV(., 0xe00, h_data_storage) 341 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
281 STD_EXCEPTION_HV(., 0xe20, h_instr_storage) 342 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
282 STD_EXCEPTION_HV(., 0xe40, emulation_assist) 343 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
283 STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */ 344 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
345 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
346 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
347 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
348 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
284 349
285 /* moved from 0xf00 */ 350 /* moved from 0xf00 */
286 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) 351 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
352 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
287 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) 353 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
354 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
288 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) 355 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
356 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
289 357
290/* 358/*
291 * An interrupt came in while soft-disabled; clear EE in SRR1, 359 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -317,14 +385,6 @@ masked_Hinterrupt:
317 hrfid 385 hrfid
318 b . 386 b .
319 387
320 .align 7
321do_stab_bolted_pSeries:
322 std r11,PACA_EXSLB+EX_R11(r13)
323 std r12,PACA_EXSLB+EX_R12(r13)
324 GET_SCRATCH0(r10)
325 std r10,PACA_EXSLB+EX_R13(r13)
326 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
327
328#ifdef CONFIG_PPC_PSERIES 388#ifdef CONFIG_PPC_PSERIES
329/* 389/*
330 * Vectors for the FWNMI option. Share common code. 390 * Vectors for the FWNMI option. Share common code.
@@ -334,14 +394,8 @@ do_stab_bolted_pSeries:
334system_reset_fwnmi: 394system_reset_fwnmi:
335 HMT_MEDIUM 395 HMT_MEDIUM
336 SET_SCRATCH0(r13) /* save r13 */ 396 SET_SCRATCH0(r13) /* save r13 */
337 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) 397 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
338 398 NOTEST, 0x100)
339 .globl machine_check_fwnmi
340 .align 7
341machine_check_fwnmi:
342 HMT_MEDIUM
343 SET_SCRATCH0(r13) /* save r13 */
344 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
345 399
346#endif /* CONFIG_PPC_PSERIES */ 400#endif /* CONFIG_PPC_PSERIES */
347 401
@@ -376,7 +430,11 @@ slb_miss_user_pseries:
376/* KVM's trampoline code needs to be close to the interrupt handlers */ 430/* KVM's trampoline code needs to be close to the interrupt handlers */
377 431
378#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 432#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
433#ifdef CONFIG_KVM_BOOK3S_PR
379#include "../kvm/book3s_rmhandlers.S" 434#include "../kvm/book3s_rmhandlers.S"
435#else
436#include "../kvm/book3s_hv_rmhandlers.S"
437#endif
380#endif 438#endif
381 439
382 .align 7 440 .align 7
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 5e12b741ba5f..f8e971ba94f5 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -93,6 +93,30 @@ _ENTRY(_start);
93 93
94 bl early_init 94 bl early_init
95 95
96#ifdef CONFIG_RELOCATABLE
97 /*
98 * r25 will contain RPN/ERPN for the start address of memory
99 *
100 * Add the difference between KERNELBASE and PAGE_OFFSET to the
101 * start of physical memory to get kernstart_addr.
102 */
103 lis r3,kernstart_addr@ha
104 la r3,kernstart_addr@l(r3)
105
106 lis r4,KERNELBASE@h
107 ori r4,r4,KERNELBASE@l
108 lis r5,PAGE_OFFSET@h
109 ori r5,r5,PAGE_OFFSET@l
110 subf r4,r5,r4
111
112 rlwinm r6,r25,0,28,31 /* ERPN */
113 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
114 add r7,r7,r4
115
116 stw r6,0(r3)
117 stw r7,4(r3)
118#endif
119
96/* 120/*
97 * Decide what sort of machine this is and initialize the MMU. 121 * Decide what sort of machine this is and initialize the MMU.
98 */ 122 */
@@ -1001,9 +1025,6 @@ clear_utlb_entry:
1001 lis r3,PAGE_OFFSET@h 1025 lis r3,PAGE_OFFSET@h
1002 ori r3,r3,PAGE_OFFSET@l 1026 ori r3,r3,PAGE_OFFSET@l
1003 1027
1004 /* Kernel is at the base of RAM */
1005 li r4, 0 /* Load the kernel physical address */
1006
1007 /* Load the kernel PID = 0 */ 1028 /* Load the kernel PID = 0 */
1008 li r0,0 1029 li r0,0
1009 mtspr SPRN_PID,r0 1030 mtspr SPRN_PID,r0
@@ -1013,9 +1034,8 @@ clear_utlb_entry:
1013 clrrwi r3,r3,12 /* Mask off the effective page number */ 1034 clrrwi r3,r3,12 /* Mask off the effective page number */
1014 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M 1035 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1015 1036
1016 /* Word 1 */ 1037 /* Word 1 - use r25. RPN is the same as the original entry */
1017 clrrwi r4,r4,12 /* Mask off the real page number */ 1038
1018 /* ERPN is 0 for first 4GB page */
1019 /* Word 2 */ 1039 /* Word 2 */
1020 li r5,0 1040 li r5,0
1021 ori r5,r5,PPC47x_TLB2_S_RWX 1041 ori r5,r5,PPC47x_TLB2_S_RWX
@@ -1026,7 +1046,7 @@ clear_utlb_entry:
1026 /* We write to way 0 and bolted 0 */ 1046 /* We write to way 0 and bolted 0 */
1027 lis r0,0x8800 1047 lis r0,0x8800
1028 tlbwe r3,r0,0 1048 tlbwe r3,r0,0
1029 tlbwe r4,r0,1 1049 tlbwe r25,r0,1
1030 tlbwe r5,r0,2 1050 tlbwe r5,r0,2
1031 1051
1032/* 1052/*
@@ -1124,7 +1144,13 @@ head_start_common:
1124 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 1144 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
1125 mtspr SPRN_IVPR,r4 1145 mtspr SPRN_IVPR,r4
1126 1146
1127 addis r22,r22,KERNELBASE@h 1147 /*
1148 * If the kernel was loaded at a non-zero 256 MB page, we need to
1149 * mask off the most significant 4 bits to get the relative address
1150 * from the start of physical memory
1151 */
1152 rlwinm r22,r22,0,4,31
1153 addis r22,r22,PAGE_OFFSET@h
1128 mtlr r22 1154 mtlr r22
1129 isync 1155 isync
1130 blr 1156 blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ba504099844a..3564c49c683e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -255,7 +255,7 @@ generic_secondary_common_init:
255 mtctr r23 255 mtctr r23
256 bctrl 256 bctrl
257 257
2583: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */ 2583: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
259 lwarx r4,0,r3 259 lwarx r4,0,r3
260 subi r4,r4,1 260 subi r4,r4,1
261 stwcx. r4,0,r3 261 stwcx. r4,0,r3
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index a0bf158c8b47..fc921bf62e15 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -20,33 +20,43 @@
20 addi reg,reg,val@l 20 addi reg,reg,val@l
21#endif 21#endif
22 22
23/*
24 * Macro used to get to thread save registers.
25 * Note that entries 0-3 are used for the prolog code, and the remaining
26 * entries are available for specific exception use in the event a handler
27 * requires more than 4 scratch registers.
28 */
29#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
30
23#define NORMAL_EXCEPTION_PROLOG \ 31#define NORMAL_EXCEPTION_PROLOG \
24 mtspr SPRN_SPRG_WSCRATCH0,r10;/* save two registers to work with */\ 32 mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
25 mtspr SPRN_SPRG_WSCRATCH1,r11; \ 33 mfspr r10, SPRN_SPRG_THREAD; \
26 mtspr SPRN_SPRG_WSCRATCH2,r1; \ 34 stw r11, THREAD_NORMSAVE(0)(r10); \
27 mfcr r10; /* save CR in r10 for now */\ 35 stw r13, THREAD_NORMSAVE(2)(r10); \
36 mfcr r13; /* save CR in r13 for now */\
28 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ 37 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
29 andi. r11,r11,MSR_PR; \ 38 andi. r11,r11,MSR_PR; \
39 mr r11, r1; \
30 beq 1f; \ 40 beq 1f; \
31 mfspr r1,SPRN_SPRG_THREAD; /* if from user, start at top of */\ 41 /* if from user, start at top of this thread's kernel stack */ \
32 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ 42 lwz r11, THREAD_INFO-THREAD(r10); \
33 ALLOC_STACK_FRAME(r1, THREAD_SIZE); \ 43 ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
341: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ 441 : subi r11, r11, INT_FRAME_SIZE; /* Allocate exception frame */ \
35 mr r11,r1; \ 45 stw r13, _CCR(r11); /* save various registers */ \
36 stw r10,_CCR(r11); /* save various registers */\
37 stw r12,GPR12(r11); \ 46 stw r12,GPR12(r11); \
38 stw r9,GPR9(r11); \ 47 stw r9,GPR9(r11); \
39 mfspr r10,SPRN_SPRG_RSCRATCH0; \ 48 mfspr r13, SPRN_SPRG_RSCRATCH0; \
40 stw r10,GPR10(r11); \ 49 stw r13, GPR10(r11); \
41 mfspr r12,SPRN_SPRG_RSCRATCH1; \ 50 lwz r12, THREAD_NORMSAVE(0)(r10); \
42 stw r12,GPR11(r11); \ 51 stw r12,GPR11(r11); \
52 lwz r13, THREAD_NORMSAVE(2)(r10); /* restore r13 */ \
43 mflr r10; \ 53 mflr r10; \
44 stw r10,_LINK(r11); \ 54 stw r10,_LINK(r11); \
45 mfspr r10,SPRN_SPRG_RSCRATCH2; \
46 mfspr r12,SPRN_SRR0; \ 55 mfspr r12,SPRN_SRR0; \
47 stw r10,GPR1(r11); \ 56 stw r1, GPR1(r11); \
48 mfspr r9,SPRN_SRR1; \ 57 mfspr r9,SPRN_SRR1; \
49 stw r10,0(r11); \ 58 stw r1, 0(r11); \
59 mr r1, r11; \
50 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ 60 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
51 stw r0,GPR0(r11); \ 61 stw r0,GPR0(r11); \
52 lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \ 62 lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 5ecf54cfa7d4..50845924b7d9 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -346,11 +346,12 @@ interrupt_base:
346 /* Data TLB Error Interrupt */ 346 /* Data TLB Error Interrupt */
347 START_EXCEPTION(DataTLBError) 347 START_EXCEPTION(DataTLBError)
348 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 348 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
349 mtspr SPRN_SPRG_WSCRATCH1, r11 349 mfspr r10, SPRN_SPRG_THREAD
350 mtspr SPRN_SPRG_WSCRATCH2, r12 350 stw r11, THREAD_NORMSAVE(0)(r10)
351 mtspr SPRN_SPRG_WSCRATCH3, r13 351 stw r12, THREAD_NORMSAVE(1)(r10)
352 mfcr r11 352 stw r13, THREAD_NORMSAVE(2)(r10)
353 mtspr SPRN_SPRG_WSCRATCH4, r11 353 mfcr r13
354 stw r13, THREAD_NORMSAVE(3)(r10)
354 mfspr r10, SPRN_DEAR /* Get faulting address */ 355 mfspr r10, SPRN_DEAR /* Get faulting address */
355 356
356 /* If we are faulting a kernel address, we have to use the 357 /* If we are faulting a kernel address, we have to use the
@@ -416,11 +417,12 @@ interrupt_base:
416 /* The bailout. Restore registers to pre-exception conditions 417 /* The bailout. Restore registers to pre-exception conditions
417 * and call the heavyweights to help us out. 418 * and call the heavyweights to help us out.
418 */ 419 */
419 mfspr r11, SPRN_SPRG_RSCRATCH4 420 mfspr r10, SPRN_SPRG_THREAD
421 lwz r11, THREAD_NORMSAVE(3)(r10)
420 mtcr r11 422 mtcr r11
421 mfspr r13, SPRN_SPRG_RSCRATCH3 423 lwz r13, THREAD_NORMSAVE(2)(r10)
422 mfspr r12, SPRN_SPRG_RSCRATCH2 424 lwz r12, THREAD_NORMSAVE(1)(r10)
423 mfspr r11, SPRN_SPRG_RSCRATCH1 425 lwz r11, THREAD_NORMSAVE(0)(r10)
424 mfspr r10, SPRN_SPRG_RSCRATCH0 426 mfspr r10, SPRN_SPRG_RSCRATCH0
425 b DataStorage 427 b DataStorage
426 428
@@ -432,11 +434,12 @@ interrupt_base:
432 */ 434 */
433 START_EXCEPTION(InstructionTLBError) 435 START_EXCEPTION(InstructionTLBError)
434 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 436 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
435 mtspr SPRN_SPRG_WSCRATCH1, r11 437 mfspr r10, SPRN_SPRG_THREAD
436 mtspr SPRN_SPRG_WSCRATCH2, r12 438 stw r11, THREAD_NORMSAVE(0)(r10)
437 mtspr SPRN_SPRG_WSCRATCH3, r13 439 stw r12, THREAD_NORMSAVE(1)(r10)
438 mfcr r11 440 stw r13, THREAD_NORMSAVE(2)(r10)
439 mtspr SPRN_SPRG_WSCRATCH4, r11 441 mfcr r13
442 stw r13, THREAD_NORMSAVE(3)(r10)
440 mfspr r10, SPRN_SRR0 /* Get faulting address */ 443 mfspr r10, SPRN_SRR0 /* Get faulting address */
441 444
442 /* If we are faulting a kernel address, we have to use the 445 /* If we are faulting a kernel address, we have to use the
@@ -496,11 +499,12 @@ interrupt_base:
496 /* The bailout. Restore registers to pre-exception conditions 499 /* The bailout. Restore registers to pre-exception conditions
497 * and call the heavyweights to help us out. 500 * and call the heavyweights to help us out.
498 */ 501 */
499 mfspr r11, SPRN_SPRG_RSCRATCH4 502 mfspr r10, SPRN_SPRG_THREAD
503 lwz r11, THREAD_NORMSAVE(3)(r10)
500 mtcr r11 504 mtcr r11
501 mfspr r13, SPRN_SPRG_RSCRATCH3 505 lwz r13, THREAD_NORMSAVE(2)(r10)
502 mfspr r12, SPRN_SPRG_RSCRATCH2 506 lwz r12, THREAD_NORMSAVE(1)(r10)
503 mfspr r11, SPRN_SPRG_RSCRATCH1 507 lwz r11, THREAD_NORMSAVE(0)(r10)
504 mfspr r10, SPRN_SPRG_RSCRATCH0 508 mfspr r10, SPRN_SPRG_RSCRATCH0
505 b InstructionStorage 509 b InstructionStorage
506 510
@@ -621,11 +625,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
621 tlbwe 625 tlbwe
622 626
623 /* Done...restore registers and get out of here. */ 627 /* Done...restore registers and get out of here. */
624 mfspr r11, SPRN_SPRG_RSCRATCH4 628 mfspr r10, SPRN_SPRG_THREAD
629 lwz r11, THREAD_NORMSAVE(3)(r10)
625 mtcr r11 630 mtcr r11
626 mfspr r13, SPRN_SPRG_RSCRATCH3 631 lwz r13, THREAD_NORMSAVE(2)(r10)
627 mfspr r12, SPRN_SPRG_RSCRATCH2 632 lwz r12, THREAD_NORMSAVE(1)(r10)
628 mfspr r11, SPRN_SPRG_RSCRATCH1 633 lwz r11, THREAD_NORMSAVE(0)(r10)
629 mfspr r10, SPRN_SPRG_RSCRATCH0 634 mfspr r10, SPRN_SPRG_RSCRATCH0
630 rfi /* Force context change */ 635 rfi /* Force context change */
631 636
@@ -656,7 +661,7 @@ load_up_spe:
656 cmpi 0,r4,0 661 cmpi 0,r4,0
657 beq 1f 662 beq 1f
658 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 663 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
659 SAVE_32EVRS(0,r10,r4) 664 SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
660 evxor evr10, evr10, evr10 /* clear out evr10 */ 665 evxor evr10, evr10, evr10 /* clear out evr10 */
661 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 666 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
662 li r5,THREAD_ACC 667 li r5,THREAD_ACC
@@ -676,7 +681,7 @@ load_up_spe:
676 stw r4,THREAD_USED_SPE(r5) 681 stw r4,THREAD_USED_SPE(r5)
677 evlddx evr4,r10,r5 682 evlddx evr4,r10,r5
678 evmra evr4,evr4 683 evmra evr4,evr4
679 REST_32EVRS(0,r10,r5) 684 REST_32EVRS(0,r10,r5,THREAD_EVR0)
680#ifndef CONFIG_SMP 685#ifndef CONFIG_SMP
681 subi r4,r5,THREAD 686 subi r4,r5,THREAD
682 stw r4,last_task_used_spe@l(r3) 687 stw r4,last_task_used_spe@l(r3)
@@ -787,13 +792,11 @@ _GLOBAL(giveup_spe)
787 addi r3,r3,THREAD /* want THREAD of task */ 792 addi r3,r3,THREAD /* want THREAD of task */
788 lwz r5,PT_REGS(r3) 793 lwz r5,PT_REGS(r3)
789 cmpi 0,r5,0 794 cmpi 0,r5,0
790 SAVE_32EVRS(0, r4, r3) 795 SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
791 evxor evr6, evr6, evr6 /* clear out evr6 */ 796 evxor evr6, evr6, evr6 /* clear out evr6 */
792 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 797 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
793 li r4,THREAD_ACC 798 li r4,THREAD_ACC
794 evstddx evr6, r4, r3 /* save off accumulator */ 799 evstddx evr6, r4, r3 /* save off accumulator */
795 mfspr r6,SPRN_SPEFSCR
796 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
797 beq 1f 800 beq 1f
798 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 801 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
799 lis r3,MSR_SPE@h 802 lis r3,MSR_SPE@h
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 47a1a983ff88..3e2b95c6ae67 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -26,6 +26,17 @@ _GLOBAL(e500_idle)
26 ori r4,r4,_TLF_NAPPING /* so when we take an exception */ 26 ori r4,r4,_TLF_NAPPING /* so when we take an exception */
27 stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */ 27 stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */
28 28
29#ifdef CONFIG_E500MC
30 wrteei 1
311: wait
32
33 /*
34 * Guard against spurious wakeups (e.g. from a hypervisor) --
35 * any real interrupt will cause us to return to LR due to
36 * _TLF_NAPPING.
37 */
38 b 1b
39#else
29 /* Check if we can nap or doze, put HID0 mask in r3 */ 40 /* Check if we can nap or doze, put HID0 mask in r3 */
30 lis r3,0 41 lis r3,0
31BEGIN_FTR_SECTION 42BEGIN_FTR_SECTION
@@ -72,6 +83,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L2CSR|CPU_FTR_CAN_NAP)
72 mtmsr r7 83 mtmsr r7
73 isync 84 isync
742: b 2b 852: b 2b
86#endif /* !E500MC */
75 87
76/* 88/*
77 * Return from NAP/DOZE mode, restore some CPU specific registers, 89 * Return from NAP/DOZE mode, restore some CPU specific registers,
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index f8f0bc7f1d4f..3a70845a51c7 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -73,7 +73,6 @@ _GLOBAL(power7_idle)
73 b . 73 b .
74 74
75_GLOBAL(power7_wakeup_loss) 75_GLOBAL(power7_wakeup_loss)
76 GET_PACA(r13)
77 ld r1,PACAR1(r13) 76 ld r1,PACAR1(r13)
78 REST_NVGPRS(r1) 77 REST_NVGPRS(r1)
79 REST_GPR(2, r1) 78 REST_GPR(2, r1)
@@ -87,7 +86,6 @@ _GLOBAL(power7_wakeup_loss)
87 rfid 86 rfid
88 87
89_GLOBAL(power7_wakeup_noloss) 88_GLOBAL(power7_wakeup_noloss)
90 GET_PACA(r13)
91 ld r1,PACAR1(r13) 89 ld r1,PACAR1(r13)
92 ld r4,_MSR(r1) 90 ld r4,_MSR(r1)
93 ld r5,_NIP(r1) 91 ld r5,_NIP(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5b428e308666..d281fb6f12f3 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -157,12 +157,6 @@ notrace void arch_local_irq_restore(unsigned long en)
157 if (get_hard_enabled()) 157 if (get_hard_enabled())
158 return; 158 return;
159 159
160#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
161 /* Check for pending doorbell interrupts and resend to ourself */
162 if (cpu_has_feature(CPU_FTR_DBELL))
163 smp_muxed_ipi_resend();
164#endif
165
166 /* 160 /*
167 * Need to hard-enable interrupts here. Since currently disabled, 161 * Need to hard-enable interrupts here. Since currently disabled,
168 * no need to take further asm precautions against preemption; but 162 * no need to take further asm precautions against preemption; but
@@ -457,11 +451,18 @@ static inline void do_softirq_onstack(void)
457 curtp = current_thread_info(); 451 curtp = current_thread_info();
458 irqtp = softirq_ctx[smp_processor_id()]; 452 irqtp = softirq_ctx[smp_processor_id()];
459 irqtp->task = curtp->task; 453 irqtp->task = curtp->task;
454 irqtp->flags = 0;
460 current->thread.ksp_limit = (unsigned long)irqtp + 455 current->thread.ksp_limit = (unsigned long)irqtp +
461 _ALIGN_UP(sizeof(struct thread_info), 16); 456 _ALIGN_UP(sizeof(struct thread_info), 16);
462 call_do_softirq(irqtp); 457 call_do_softirq(irqtp);
463 current->thread.ksp_limit = saved_sp_limit; 458 current->thread.ksp_limit = saved_sp_limit;
464 irqtp->task = NULL; 459 irqtp->task = NULL;
460
461 /* Set any flag that may have been set on the
462 * alternate stack
463 */
464 if (irqtp->flags)
465 set_bits(irqtp->flags, &curtp->flags);
465} 466}
466 467
467void do_softirq(void) 468void do_softirq(void)
@@ -750,7 +751,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
750 if (irq_setup_virq(host, virq, hwirq)) 751 if (irq_setup_virq(host, virq, hwirq))
751 return NO_IRQ; 752 return NO_IRQ;
752 753
753 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", 754 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
754 hwirq, host->of_node ? host->of_node->full_name : "null", virq); 755 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
755 756
756 return virq; 757 return virq;
@@ -882,6 +883,41 @@ unsigned int irq_find_mapping(struct irq_host *host,
882} 883}
883EXPORT_SYMBOL_GPL(irq_find_mapping); 884EXPORT_SYMBOL_GPL(irq_find_mapping);
884 885
886#ifdef CONFIG_SMP
887int irq_choose_cpu(const struct cpumask *mask)
888{
889 int cpuid;
890
891 if (cpumask_equal(mask, cpu_all_mask)) {
892 static int irq_rover;
893 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
894 unsigned long flags;
895
896 /* Round-robin distribution... */
897do_round_robin:
898 raw_spin_lock_irqsave(&irq_rover_lock, flags);
899
900 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
901 if (irq_rover >= nr_cpu_ids)
902 irq_rover = cpumask_first(cpu_online_mask);
903
904 cpuid = irq_rover;
905
906 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
907 } else {
908 cpuid = cpumask_first_and(mask, cpu_online_mask);
909 if (cpuid >= nr_cpu_ids)
910 goto do_round_robin;
911 }
912
913 return get_hard_smp_processor_id(cpuid);
914}
915#else
916int irq_choose_cpu(const struct cpumask *mask)
917{
918 return hard_smp_processor_id();
919}
920#endif
885 921
886unsigned int irq_radix_revmap_lookup(struct irq_host *host, 922unsigned int irq_radix_revmap_lookup(struct irq_host *host,
887 irq_hw_number_t hwirq) 923 irq_hw_number_t hwirq)
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
new file mode 100644
index 000000000000..368d158d665d
--- /dev/null
+++ b/arch/powerpc/kernel/jump_label.c
@@ -0,0 +1,23 @@
1/*
2 * Copyright 2010 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/jump_label.h>
12#include <asm/code-patching.h>
13
14void arch_jump_label_transform(struct jump_entry *entry,
15 enum jump_label_type type)
16{
17 u32 *addr = (u32 *)(unsigned long)entry->code;
18
19 if (type == JUMP_LABEL_ENABLE)
20 patch_branch(addr, entry->target, 0);
21 else
22 patch_instruction(addr, PPC_INST_NOP);
23}
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 7ee50f0547cb..6658a1589955 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -126,7 +126,7 @@ void __init reserve_crashkernel(void)
126 /* We might have got these values via the command line or the 126 /* We might have got these values via the command line or the
127 * device tree, either way sanitise them now. */ 127 * device tree, either way sanitise them now. */
128 128
129 crash_size = crashk_res.end - crashk_res.start + 1; 129 crash_size = resource_size(&crashk_res);
130 130
131#ifndef CONFIG_RELOCATABLE 131#ifndef CONFIG_RELOCATABLE
132 if (crashk_res.start != KDUMP_KERNELBASE) 132 if (crashk_res.start != KDUMP_KERNELBASE)
@@ -222,7 +222,7 @@ static void __init export_crashk_values(struct device_node *node)
222 222
223 if (crashk_res.start != 0) { 223 if (crashk_res.start != 0) {
224 prom_add_property(node, &crashk_base_prop); 224 prom_add_property(node, &crashk_base_prop);
225 crashk_size = crashk_res.end - crashk_res.start + 1; 225 crashk_size = resource_size(&crashk_res);
226 prom_add_property(node, &crashk_size_prop); 226 prom_add_property(node, &crashk_size_prop);
227 } 227 }
228} 228}
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e89df59cdc5a..616921ef1439 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -339,7 +339,7 @@ _GLOBAL(real_205_writeb)
339#endif /* CONFIG_PPC_PASEMI */ 339#endif /* CONFIG_PPC_PASEMI */
340 340
341 341
342#ifdef CONFIG_CPU_FREQ_PMAC64 342#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
343/* 343/*
344 * SCOM access functions for 970 (FX only for now) 344 * SCOM access functions for 970 (FX only for now)
345 * 345 *
@@ -408,7 +408,7 @@ _GLOBAL(scom970_write)
408 /* restore interrupts */ 408 /* restore interrupts */
409 mtmsrd r5,1 409 mtmsrd r5,1
410 blr 410 blr
411#endif /* CONFIG_CPU_FREQ_PMAC64 */ 411#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
412 412
413 413
414/* 414/*
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 49cee9df225b..a1cd701b5753 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -31,20 +31,6 @@
31 31
32LIST_HEAD(module_bug_list); 32LIST_HEAD(module_bug_list);
33 33
34void *module_alloc(unsigned long size)
35{
36 if (size == 0)
37 return NULL;
38
39 return vmalloc_exec(size);
40}
41
42/* Free memory returned from module_alloc */
43void module_free(struct module *mod, void *module_region)
44{
45 vfree(module_region);
46}
47
48static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, 34static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
49 const Elf_Shdr *sechdrs, 35 const Elf_Shdr *sechdrs,
50 const char *name) 36 const char *name)
@@ -93,7 +79,3 @@ int module_finalize(const Elf_Ehdr *hdr,
93 79
94 return 0; 80 return 0;
95} 81}
96
97void module_arch_cleanup(struct module *mod)
98{
99}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index f832773fc28e..0b6d79617d7b 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -174,17 +174,6 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
174 return 0; 174 return 0;
175} 175}
176 176
177int apply_relocate(Elf32_Shdr *sechdrs,
178 const char *strtab,
179 unsigned int symindex,
180 unsigned int relsec,
181 struct module *module)
182{
183 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
184 module->name);
185 return -ENOEXEC;
186}
187
188static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) 177static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
189{ 178{
190 if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) 179 if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 8fbb12508bf3..9f44a775a106 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -243,16 +243,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
243 return 0; 243 return 0;
244} 244}
245 245
246int apply_relocate(Elf64_Shdr *sechdrs,
247 const char *strtab,
248 unsigned int symindex,
249 unsigned int relsec,
250 struct module *me)
251{
252 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
253 return -ENOEXEC;
254}
255
256/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this 246/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
257 gives the value maximum span in an instruction which uses a signed 247 gives the value maximum span in an instruction which uses a signed
258 offset) */ 248 offset) */
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index 2cc5e0301d0b..fe21b515ca44 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -388,6 +388,11 @@ static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
388 [C(OP_WRITE)] = { -1, -1 }, 388 [C(OP_WRITE)] = { -1, -1 },
389 [C(OP_PREFETCH)] = { -1, -1 }, 389 [C(OP_PREFETCH)] = { -1, -1 },
390 }, 390 },
391 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
392 [C(OP_READ)] = { -1, -1 },
393 [C(OP_WRITE)] = { -1, -1 },
394 [C(OP_PREFETCH)] = { -1, -1 },
395 },
391}; 396};
392 397
393struct power_pmu mpc7450_pmu = { 398struct power_pmu mpc7450_pmu = {
@@ -405,7 +410,7 @@ struct power_pmu mpc7450_pmu = {
405 .cache_events = &mpc7450_cache_events, 410 .cache_events = &mpc7450_cache_events,
406}; 411};
407 412
408static int init_mpc7450_pmu(void) 413static int __init init_mpc7450_pmu(void)
409{ 414{
410 if (!cur_cpu_spec->oprofile_cpu_type || 415 if (!cur_cpu_spec->oprofile_cpu_type ||
411 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450")) 416 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 24582181b6ec..59dbf6abaaf3 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -26,7 +26,7 @@
26#include <asm/topology.h> 26#include <asm/topology.h>
27#include <asm/pci-bridge.h> 27#include <asm/pci-bridge.h>
28#include <asm/ppc-pci.h> 28#include <asm/ppc-pci.h>
29#include <asm/atomic.h> 29#include <linux/atomic.h>
30 30
31#ifdef CONFIG_PPC_OF_PLATFORM_PCI 31#ifdef CONFIG_PPC_OF_PLATFORM_PCI
32 32
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index efeb88184182..0a5a899846bb 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -167,7 +167,7 @@ void setup_paca(struct paca_struct *new_paca)
167 * if we do a GET_PACA() before the feature fixups have been 167 * if we do a GET_PACA() before the feature fixups have been
168 * applied 168 * applied
169 */ 169 */
170 if (cpu_has_feature(CPU_FTR_HVMODE_206)) 170 if (cpu_has_feature(CPU_FTR_HVMODE))
171 mtspr(SPRN_SPRG_HPACA, local_paca); 171 mtspr(SPRN_SPRG_HPACA, local_paca);
172#endif 172#endif
173 mtspr(SPRN_SPRG_PACA, local_paca); 173 mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 4b9ae679254b..32656f105250 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -107,7 +107,7 @@ static resource_size_t pcibios_io_size(const struct pci_controller *hose)
107#ifdef CONFIG_PPC64 107#ifdef CONFIG_PPC64
108 return hose->pci_io_size; 108 return hose->pci_io_size;
109#else 109#else
110 return hose->io_resource.end - hose->io_resource.start + 1; 110 return resource_size(&hose->io_resource);
111#endif 111#endif
112} 112}
113 113
@@ -1097,9 +1097,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1097 if (dev->is_added) 1097 if (dev->is_added)
1098 continue; 1098 continue;
1099 1099
1100 /* Setup OF node pointer in the device */
1101 dev->dev.of_node = pci_device_to_OF_node(dev);
1102
1103 /* Fixup NUMA node as it may not be setup yet by the generic 1100 /* Fixup NUMA node as it may not be setup yet by the generic
1104 * code and is needed by the DMA init 1101 * code and is needed by the DMA init
1105 */ 1102 */
@@ -1685,6 +1682,13 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1685 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); 1682 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1686} 1683}
1687 1684
1685struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1686{
1687 struct pci_controller *hose = bus->sysdata;
1688
1689 return of_node_get(hose->dn);
1690}
1691
1688/** 1692/**
1689 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus 1693 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1690 * @hose: Pointer to the PCI host controller instance structure 1694 * @hose: Pointer to the PCI host controller instance structure
@@ -1705,7 +1709,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
1705 hose->global_number); 1709 hose->global_number);
1706 return; 1710 return;
1707 } 1711 }
1708 bus->dev.of_node = of_node_get(node);
1709 bus->secondary = hose->first_busno; 1712 bus->secondary = hose->first_busno;
1710 hose->bus = bus; 1713 hose->bus = bus;
1711 1714
@@ -1728,3 +1731,21 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
1728 if (mode == PCI_PROBE_NORMAL) 1731 if (mode == PCI_PROBE_NORMAL)
1729 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 1732 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
1730} 1733}
1734
1735static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1736{
1737 int i, class = dev->class >> 8;
1738
1739 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1740 class == PCI_CLASS_BRIDGE_OTHER) &&
1741 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1742 (dev->bus->parent == NULL)) {
1743 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1744 dev->resource[i].start = 0;
1745 dev->resource[i].end = 0;
1746 dev->resource[i].flags = 0;
1747 }
1748 }
1749}
1750DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1751DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index b1959bf7562b..bb154511db5e 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -51,25 +51,6 @@ struct pci_dev *isa_bridge_pcidev;
51EXPORT_SYMBOL_GPL(isa_bridge_pcidev); 51EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
52 52
53static void 53static void
54fixup_hide_host_resource_fsl(struct pci_dev *dev)
55{
56 int i, class = dev->class >> 8;
57
58 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
59 class == PCI_CLASS_BRIDGE_OTHER) &&
60 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
61 (dev->bus->parent == NULL)) {
62 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
63 dev->resource[i].start = 0;
64 dev->resource[i].end = 0;
65 dev->resource[i].flags = 0;
66 }
67 }
68}
69DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
70DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
71
72static void
73fixup_cpc710_pci64(struct pci_dev* dev) 54fixup_cpc710_pci64(struct pci_dev* dev)
74{ 55{
75 /* Hide the PCI64 BARs from the kernel as their content doesn't 56 /* Hide the PCI64 BARs from the kernel as their content doesn't
@@ -167,150 +148,26 @@ pcibios_make_OF_bus_map(void)
167#endif 148#endif
168} 149}
169 150
170typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
171
172static struct device_node*
173scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
174{
175 struct device_node *node;
176 struct device_node* sub_node;
177
178 for_each_child_of_node(parent, node) {
179 const unsigned int *class_code;
180
181 if (filter(node, data)) {
182 of_node_put(node);
183 return node;
184 }
185
186 /* For PCI<->PCI bridges or CardBus bridges, we go down
187 * Note: some OFs create a parent node "multifunc-device" as
188 * a fake root for all functions of a multi-function device,
189 * we go down them as well.
190 */
191 class_code = of_get_property(node, "class-code", NULL);
192 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
193 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
194 strcmp(node->name, "multifunc-device"))
195 continue;
196 sub_node = scan_OF_pci_childs(node, filter, data);
197 if (sub_node) {
198 of_node_put(node);
199 return sub_node;
200 }
201 }
202 return NULL;
203}
204
205static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
206 unsigned int devfn)
207{
208 struct device_node *np, *cnp;
209 const u32 *reg;
210 unsigned int psize;
211
212 for_each_child_of_node(parent, np) {
213 reg = of_get_property(np, "reg", &psize);
214 if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
215 return np;
216
217 /* Note: some OFs create a parent node "multifunc-device" as
218 * a fake root for all functions of a multi-function device,
219 * we go down them as well. */
220 if (!strcmp(np->name, "multifunc-device")) {
221 cnp = scan_OF_for_pci_dev(np, devfn);
222 if (cnp)
223 return cnp;
224 }
225 }
226 return NULL;
227}
228
229
230static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
231{
232 struct device_node *parent, *np;
233
234 /* Are we a root bus ? */
235 if (bus->self == NULL || bus->parent == NULL) {
236 struct pci_controller *hose = pci_bus_to_host(bus);
237 if (hose == NULL)
238 return NULL;
239 return of_node_get(hose->dn);
240 }
241
242 /* not a root bus, we need to get our parent */
243 parent = scan_OF_for_pci_bus(bus->parent);
244 if (parent == NULL)
245 return NULL;
246
247 /* now iterate for children for a match */
248 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
249 of_node_put(parent);
250
251 return np;
252}
253
254/*
255 * Scans the OF tree for a device node matching a PCI device
256 */
257struct device_node *
258pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
259{
260 struct device_node *parent, *np;
261
262 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
263 parent = scan_OF_for_pci_bus(bus);
264 if (parent == NULL)
265 return NULL;
266 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
267 np = scan_OF_for_pci_dev(parent, devfn);
268 of_node_put(parent);
269 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
270
271 /* XXX most callers don't release the returned node
272 * mostly because ppc64 doesn't increase the refcount,
273 * we need to fix that.
274 */
275 return np;
276}
277EXPORT_SYMBOL(pci_busdev_to_OF_node);
278
279struct device_node*
280pci_device_to_OF_node(struct pci_dev *dev)
281{
282 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
283}
284EXPORT_SYMBOL(pci_device_to_OF_node);
285
286static int
287find_OF_pci_device_filter(struct device_node* node, void* data)
288{
289 return ((void *)node == data);
290}
291 151
292/* 152/*
293 * Returns the PCI device matching a given OF node 153 * Returns the PCI device matching a given OF node
294 */ 154 */
295int 155int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
296pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
297{ 156{
298 const unsigned int *reg; 157 struct pci_dev *dev = NULL;
299 struct pci_controller* hose; 158 const __be32 *reg;
300 struct pci_dev* dev = NULL; 159 int size;
301 160
302 /* Make sure it's really a PCI device */ 161 /* Check if it might have a chance to be a PCI device */
303 hose = pci_find_hose_for_OF_device(node); 162 if (!pci_find_hose_for_OF_device(node))
304 if (!hose || !hose->dn)
305 return -ENODEV;
306 if (!scan_OF_pci_childs(hose->dn,
307 find_OF_pci_device_filter, (void *)node))
308 return -ENODEV; 163 return -ENODEV;
309 reg = of_get_property(node, "reg", NULL); 164
310 if (!reg) 165 reg = of_get_property(node, "reg", &size);
166 if (!reg || size < 5 * sizeof(u32))
311 return -ENODEV; 167 return -ENODEV;
312 *bus = (reg[0] >> 16) & 0xff; 168
313 *devfn = ((reg[0] >> 8) & 0xff); 169 *bus = (be32_to_cpup(&reg[0]) >> 16) & 0xff;
170 *devfn = (be32_to_cpup(&reg[0]) >> 8) & 0xff;
314 171
315 /* Ok, here we need some tweak. If we have already renumbered 172 /* Ok, here we need some tweak. If we have already renumbered
316 * all busses, we can't rely on the OF bus number any more. 173 * all busses, we can't rely on the OF bus number any more.
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 6baabc13306a..478f8d78716b 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -142,53 +142,6 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
142 traverse_pci_devices(dn, update_dn_pci_info, phb); 142 traverse_pci_devices(dn, update_dn_pci_info, phb);
143} 143}
144 144
145/*
146 * Traversal func that looks for a <busno,devfcn> value.
147 * If found, the pci_dn is returned (thus terminating the traversal).
148 */
149static void *is_devfn_node(struct device_node *dn, void *data)
150{
151 int busno = ((unsigned long)data >> 8) & 0xff;
152 int devfn = ((unsigned long)data) & 0xff;
153 struct pci_dn *pci = dn->data;
154
155 if (pci && (devfn == pci->devfn) && (busno == pci->busno))
156 return dn;
157 return NULL;
158}
159
160/*
161 * This is the "slow" path for looking up a device_node from a
162 * pci_dev. It will hunt for the device under its parent's
163 * phb and then update of_node pointer.
164 *
165 * It may also do fixups on the actual device since this happens
166 * on the first read/write.
167 *
168 * Note that it also must deal with devices that don't exist.
169 * In this case it may probe for real hardware ("just in case")
170 * and add a device_node to the device tree if necessary.
171 *
172 * Is this function necessary anymore now that dev->dev.of_node is
173 * used to store the node pointer?
174 *
175 */
176struct device_node *fetch_dev_dn(struct pci_dev *dev)
177{
178 struct pci_controller *phb = dev->sysdata;
179 struct device_node *dn;
180 unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
181
182 if (WARN_ON(!phb))
183 return NULL;
184
185 dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval);
186 if (dn)
187 dev->dev.of_node = dn;
188 return dn;
189}
190EXPORT_SYMBOL(fetch_dev_dn);
191
192/** 145/**
193 * pci_devs_phb_init - Initialize phbs and pci devs under them. 146 * pci_devs_phb_init - Initialize phbs and pci devs under them.
194 * 147 *
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 1e89a72fd030..fe0a5ad6f73e 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -202,9 +202,9 @@ EXPORT_SYMBOL(of_create_pci_dev);
202 * this routine in turn call of_scan_bus() recusively to scan for more child 202 * this routine in turn call of_scan_bus() recusively to scan for more child
203 * devices. 203 * devices.
204 */ 204 */
205void __devinit of_scan_pci_bridge(struct device_node *node, 205void __devinit of_scan_pci_bridge(struct pci_dev *dev)
206 struct pci_dev *dev)
207{ 206{
207 struct device_node *node = dev->dev.of_node;
208 struct pci_bus *bus; 208 struct pci_bus *bus;
209 const u32 *busrange, *ranges; 209 const u32 *busrange, *ranges;
210 int len, i, mode; 210 int len, i, mode;
@@ -238,7 +238,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
238 bus->primary = dev->bus->number; 238 bus->primary = dev->bus->number;
239 bus->subordinate = busrange[1]; 239 bus->subordinate = busrange[1];
240 bus->bridge_ctl = 0; 240 bus->bridge_ctl = 0;
241 bus->dev.of_node = of_node_get(node);
242 241
243 /* parse ranges property */ 242 /* parse ranges property */
244 /* PCI #address-cells == 3 and #size-cells == 2 always */ 243 /* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -335,9 +334,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
335 list_for_each_entry(dev, &bus->devices, bus_list) { 334 list_for_each_entry(dev, &bus->devices, bus_list) {
336 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 335 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
337 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { 336 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
338 struct device_node *child = pci_device_to_OF_node(dev); 337 of_scan_pci_bridge(dev);
339 if (child)
340 of_scan_pci_bridge(child, dev);
341 } 338 }
342 } 339 }
343} 340}
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 822f63008ae1..10a140f82cb8 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
1207 * here so there is no possibility of being interrupted. 1207 * here so there is no possibility of being interrupted.
1208 */ 1208 */
1209static void record_and_restart(struct perf_event *event, unsigned long val, 1209static void record_and_restart(struct perf_event *event, unsigned long val,
1210 struct pt_regs *regs, int nmi) 1210 struct pt_regs *regs)
1211{ 1211{
1212 u64 period = event->hw.sample_period; 1212 u64 period = event->hw.sample_period;
1213 s64 prev, delta, left; 1213 s64 prev, delta, left;
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1258 if (event->attr.sample_type & PERF_SAMPLE_ADDR) 1258 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1259 perf_get_data_addr(regs, &data.addr); 1259 perf_get_data_addr(regs, &data.addr);
1260 1260
1261 if (perf_event_overflow(event, nmi, &data, regs)) 1261 if (perf_event_overflow(event, &data, regs))
1262 power_pmu_stop(event, 0); 1262 power_pmu_stop(event, 0);
1263 } 1263 }
1264} 1264}
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1346 if ((int)val < 0) { 1346 if ((int)val < 0) {
1347 /* event has overflowed */ 1347 /* event has overflowed */
1348 found = 1; 1348 found = 1;
1349 record_and_restart(event, val, regs, nmi); 1349 record_and_restart(event, val, regs);
1350 } 1350 }
1351 } 1351 }
1352 1352
@@ -1408,7 +1408,7 @@ power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu
1408 return NOTIFY_OK; 1408 return NOTIFY_OK;
1409} 1409}
1410 1410
1411int register_power_pmu(struct power_pmu *pmu) 1411int __cpuinit register_power_pmu(struct power_pmu *pmu)
1412{ 1412{
1413 if (ppmu) 1413 if (ppmu)
1414 return -EBUSY; /* something's already registered */ 1414 return -EBUSY; /* something's already registered */
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index b0dc8f7069cd..0a6d2a9d569c 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
568 * here so there is no possibility of being interrupted. 568 * here so there is no possibility of being interrupted.
569 */ 569 */
570static void record_and_restart(struct perf_event *event, unsigned long val, 570static void record_and_restart(struct perf_event *event, unsigned long val,
571 struct pt_regs *regs, int nmi) 571 struct pt_regs *regs)
572{ 572{
573 u64 period = event->hw.sample_period; 573 u64 period = event->hw.sample_period;
574 s64 prev, delta, left; 574 s64 prev, delta, left;
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
616 perf_sample_data_init(&data, 0); 616 perf_sample_data_init(&data, 0);
617 data.period = event->hw.last_period; 617 data.period = event->hw.last_period;
618 618
619 if (perf_event_overflow(event, nmi, &data, regs)) 619 if (perf_event_overflow(event, &data, regs))
620 fsl_emb_pmu_stop(event, 0); 620 fsl_emb_pmu_stop(event, 0);
621 } 621 }
622} 622}
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
644 if (event) { 644 if (event) {
645 /* event has overflowed */ 645 /* event has overflowed */
646 found = 1; 646 found = 1;
647 record_and_restart(event, val, regs, nmi); 647 record_and_restart(event, val, regs);
648 } else { 648 } else {
649 /* 649 /*
650 * Disabled counter is negative, 650 * Disabled counter is negative,
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index ead8b3c2649e..b4f1dda4d089 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -587,6 +587,11 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
587 [C(OP_WRITE)] = { -1, -1 }, 587 [C(OP_WRITE)] = { -1, -1 },
588 [C(OP_PREFETCH)] = { -1, -1 }, 588 [C(OP_PREFETCH)] = { -1, -1 },
589 }, 589 },
590 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
591 [C(OP_READ)] = { -1, -1 },
592 [C(OP_WRITE)] = { -1, -1 },
593 [C(OP_PREFETCH)] = { -1, -1 },
594 },
590}; 595};
591 596
592static struct power_pmu power4_pmu = { 597static struct power_pmu power4_pmu = {
@@ -604,7 +609,7 @@ static struct power_pmu power4_pmu = {
604 .cache_events = &power4_cache_events, 609 .cache_events = &power4_cache_events,
605}; 610};
606 611
607static int init_power4_pmu(void) 612static int __init init_power4_pmu(void)
608{ 613{
609 if (!cur_cpu_spec->oprofile_cpu_type || 614 if (!cur_cpu_spec->oprofile_cpu_type ||
610 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4")) 615 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4"))
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index eca0ac595cb6..a8757baa28f3 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -653,6 +653,11 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
653 [C(OP_WRITE)] = { -1, -1 }, 653 [C(OP_WRITE)] = { -1, -1 },
654 [C(OP_PREFETCH)] = { -1, -1 }, 654 [C(OP_PREFETCH)] = { -1, -1 },
655 }, 655 },
656 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
657 [C(OP_READ)] = { -1, -1 },
658 [C(OP_WRITE)] = { -1, -1 },
659 [C(OP_PREFETCH)] = { -1, -1 },
660 },
656}; 661};
657 662
658static struct power_pmu power5p_pmu = { 663static struct power_pmu power5p_pmu = {
@@ -672,7 +677,7 @@ static struct power_pmu power5p_pmu = {
672 .cache_events = &power5p_cache_events, 677 .cache_events = &power5p_cache_events,
673}; 678};
674 679
675static int init_power5p_pmu(void) 680static int __init init_power5p_pmu(void)
676{ 681{
677 if (!cur_cpu_spec->oprofile_cpu_type || 682 if (!cur_cpu_spec->oprofile_cpu_type ||
678 (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+") 683 (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index d5ff0f64a5e6..e7f06eb7a861 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -595,6 +595,11 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
595 [C(OP_WRITE)] = { -1, -1 }, 595 [C(OP_WRITE)] = { -1, -1 },
596 [C(OP_PREFETCH)] = { -1, -1 }, 596 [C(OP_PREFETCH)] = { -1, -1 },
597 }, 597 },
598 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
599 [C(OP_READ)] = { -1, -1 },
600 [C(OP_WRITE)] = { -1, -1 },
601 [C(OP_PREFETCH)] = { -1, -1 },
602 },
598}; 603};
599 604
600static struct power_pmu power5_pmu = { 605static struct power_pmu power5_pmu = {
@@ -612,7 +617,7 @@ static struct power_pmu power5_pmu = {
612 .cache_events = &power5_cache_events, 617 .cache_events = &power5_cache_events,
613}; 618};
614 619
615static int init_power5_pmu(void) 620static int __init init_power5_pmu(void)
616{ 621{
617 if (!cur_cpu_spec->oprofile_cpu_type || 622 if (!cur_cpu_spec->oprofile_cpu_type ||
618 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5")) 623 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 31603927e376..03b95e2c6d65 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -516,6 +516,11 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
516 [C(OP_WRITE)] = { -1, -1 }, 516 [C(OP_WRITE)] = { -1, -1 },
517 [C(OP_PREFETCH)] = { -1, -1 }, 517 [C(OP_PREFETCH)] = { -1, -1 },
518 }, 518 },
519 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
520 [C(OP_READ)] = { -1, -1 },
521 [C(OP_WRITE)] = { -1, -1 },
522 [C(OP_PREFETCH)] = { -1, -1 },
523 },
519}; 524};
520 525
521static struct power_pmu power6_pmu = { 526static struct power_pmu power6_pmu = {
@@ -535,7 +540,7 @@ static struct power_pmu power6_pmu = {
535 .cache_events = &power6_cache_events, 540 .cache_events = &power6_cache_events,
536}; 541};
537 542
538static int init_power6_pmu(void) 543static int __init init_power6_pmu(void)
539{ 544{
540 if (!cur_cpu_spec->oprofile_cpu_type || 545 if (!cur_cpu_spec->oprofile_cpu_type ||
541 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) 546 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 593740fcb799..de83d6060dda 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -342,6 +342,11 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
342 [C(OP_WRITE)] = { -1, -1 }, 342 [C(OP_WRITE)] = { -1, -1 },
343 [C(OP_PREFETCH)] = { -1, -1 }, 343 [C(OP_PREFETCH)] = { -1, -1 },
344 }, 344 },
345 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
346 [C(OP_READ)] = { -1, -1 },
347 [C(OP_WRITE)] = { -1, -1 },
348 [C(OP_PREFETCH)] = { -1, -1 },
349 },
345}; 350};
346 351
347static struct power_pmu power7_pmu = { 352static struct power_pmu power7_pmu = {
@@ -360,7 +365,7 @@ static struct power_pmu power7_pmu = {
360 .cache_events = &power7_cache_events, 365 .cache_events = &power7_cache_events,
361}; 366};
362 367
363static int init_power7_pmu(void) 368static int __init init_power7_pmu(void)
364{ 369{
365 if (!cur_cpu_spec->oprofile_cpu_type || 370 if (!cur_cpu_spec->oprofile_cpu_type ||
366 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) 371 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 9a6e093858fe..8c2190206964 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -467,6 +467,11 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
467 [C(OP_WRITE)] = { -1, -1 }, 467 [C(OP_WRITE)] = { -1, -1 },
468 [C(OP_PREFETCH)] = { -1, -1 }, 468 [C(OP_PREFETCH)] = { -1, -1 },
469 }, 469 },
470 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
471 [C(OP_READ)] = { -1, -1 },
472 [C(OP_WRITE)] = { -1, -1 },
473 [C(OP_PREFETCH)] = { -1, -1 },
474 },
470}; 475};
471 476
472static struct power_pmu ppc970_pmu = { 477static struct power_pmu ppc970_pmu = {
@@ -484,7 +489,7 @@ static struct power_pmu ppc970_pmu = {
484 .cache_events = &ppc970_cache_events, 489 .cache_events = &ppc970_cache_events,
485}; 490};
486 491
487static int init_ppc970_pmu(void) 492static int __init init_ppc970_pmu(void)
488{ 493{
489 if (!cur_cpu_spec->oprofile_cpu_type || 494 if (!cur_cpu_spec->oprofile_cpu_type ||
490 (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970") 495 (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 7d28f540200c..f5ae872a2ef0 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -18,7 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/uaccess.h> 19#include <asm/uaccess.h>
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/atomic.h> 21#include <linux/atomic.h>
22#include <asm/checksum.h> 22#include <asm/checksum.h>
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 91e52df3d81d..8f53954e75a3 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
96 preempt_enable(); 96 preempt_enable();
97 } 97 }
98} 98}
99EXPORT_SYMBOL_GPL(flush_fp_to_thread);
99 100
100void enable_kernel_fp(void) 101void enable_kernel_fp(void)
101{ 102{
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
145 preempt_enable(); 146 preempt_enable();
146 } 147 }
147} 148}
149EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
148#endif /* CONFIG_ALTIVEC */ 150#endif /* CONFIG_ALTIVEC */
149 151
150#ifdef CONFIG_VSX 152#ifdef CONFIG_VSX
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
186 preempt_enable(); 188 preempt_enable();
187 } 189 }
188} 190}
191EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
189#endif /* CONFIG_VSX */ 192#endif /* CONFIG_VSX */
190 193
191#ifdef CONFIG_SPE 194#ifdef CONFIG_SPE
@@ -213,6 +216,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
213#ifdef CONFIG_SMP 216#ifdef CONFIG_SMP
214 BUG_ON(tsk != current); 217 BUG_ON(tsk != current);
215#endif 218#endif
219 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
216 giveup_spe(tsk); 220 giveup_spe(tsk);
217 } 221 }
218 preempt_enable(); 222 preempt_enable();
@@ -650,6 +654,8 @@ void show_regs(struct pt_regs * regs)
650 printbits(regs->msr, msr_bits); 654 printbits(regs->msr, msr_bits);
651 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 655 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
652 trap = TRAP(regs); 656 trap = TRAP(regs);
657 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
658 printk("CFAR: "REG"\n", regs->orig_gpr3);
653 if (trap == 0x300 || trap == 0x600) 659 if (trap == 0x300 || trap == 0x600)
654#ifdef CONFIG_PPC_ADV_DEBUG_REGS 660#ifdef CONFIG_PPC_ADV_DEBUG_REGS
655 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 661 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
@@ -831,8 +837,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
831 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ 837 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
832#endif 838#endif
833 839
834 set_fs(USER_DS);
835
836 /* 840 /*
837 * If we exec out of a kernel thread then thread.regs will not be 841 * If we exec out of a kernel thread then thread.regs will not be
838 * set. Do it now. 842 * set. Do it now.
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8c3112a57cf2..174e1e96175e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -69,6 +69,7 @@ unsigned long tce_alloc_start, tce_alloc_end;
69u64 ppc64_rma_size; 69u64 ppc64_rma_size;
70#endif 70#endif
71static phys_addr_t first_memblock_size; 71static phys_addr_t first_memblock_size;
72static int __initdata boot_cpu_count;
72 73
73static int __init early_parse_mem(char *p) 74static int __init early_parse_mem(char *p)
74{ 75{
@@ -769,6 +770,13 @@ void __init early_init_devtree(void *params)
769 */ 770 */
770 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 771 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
771 772
773#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
774 /* We'll later wait for secondaries to check in; there are
775 * NCPUS-1 non-boot CPUs :-)
776 */
777 spinning_secondaries = boot_cpu_count - 1;
778#endif
779
772 DBG(" <- early_init_devtree()\n"); 780 DBG(" <- early_init_devtree()\n");
773} 781}
774 782
@@ -862,16 +870,14 @@ static int prom_reconfig_notifier(struct notifier_block *nb,
862 switch (action) { 870 switch (action) {
863 case PSERIES_RECONFIG_ADD: 871 case PSERIES_RECONFIG_ADD:
864 err = of_finish_dynamic_node(node); 872 err = of_finish_dynamic_node(node);
865 if (err < 0) { 873 if (err < 0)
866 printk(KERN_ERR "finish_node returned %d\n", err); 874 printk(KERN_ERR "finish_node returned %d\n", err);
867 err = NOTIFY_BAD;
868 }
869 break; 875 break;
870 default: 876 default:
871 err = NOTIFY_DONE; 877 err = 0;
872 break; 878 break;
873 } 879 }
874 return err; 880 return notifier_from_errno(err);
875} 881}
876 882
877static struct notifier_block prom_reconfig_nb = { 883static struct notifier_block prom_reconfig_nb = {
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index cb22024f2b42..05b7dd217f60 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
882} 882}
883 883
884#ifdef CONFIG_HAVE_HW_BREAKPOINT 884#ifdef CONFIG_HAVE_HW_BREAKPOINT
885void ptrace_triggered(struct perf_event *bp, int nmi, 885void ptrace_triggered(struct perf_event *bp,
886 struct perf_sample_data *data, struct pt_regs *regs) 886 struct perf_sample_data *data, struct pt_regs *regs)
887{ 887{
888 struct perf_event_attr attr; 888 struct perf_event_attr attr;
@@ -973,7 +973,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
973 &attr.bp_type); 973 &attr.bp_type);
974 974
975 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, 975 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
976 ptrace_triggered, task); 976 ptrace_triggered, NULL, task);
977 if (IS_ERR(bp)) { 977 if (IS_ERR(bp)) {
978 thread->ptrace_bps[0] = NULL; 978 thread->ptrace_bps[0] = NULL;
979 ptrace_put_breakpoints(task); 979 ptrace_put_breakpoints(task);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 271ff6318eda..d5ca8236315c 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -24,6 +24,7 @@
24#include <linux/cpumask.h> 24#include <linux/cpumask.h>
25#include <linux/memblock.h> 25#include <linux/memblock.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/reboot.h>
27 28
28#include <asm/prom.h> 29#include <asm/prom.h>
29#include <asm/rtas.h> 30#include <asm/rtas.h>
@@ -38,7 +39,7 @@
38#include <asm/udbg.h> 39#include <asm/udbg.h>
39#include <asm/syscalls.h> 40#include <asm/syscalls.h>
40#include <asm/smp.h> 41#include <asm/smp.h>
41#include <asm/atomic.h> 42#include <linux/atomic.h>
42#include <asm/time.h> 43#include <asm/time.h>
43#include <asm/mmu.h> 44#include <asm/mmu.h>
44#include <asm/topology.h> 45#include <asm/topology.h>
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index bf5f5ce3a7bd..e037c7494fd8 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/reboot.h>
20#include <asm/delay.h> 21#include <asm/delay.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include <asm/rtas.h> 23#include <asm/rtas.h>
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 67f6c3b51357..481ef064c8f1 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -27,7 +27,7 @@
27#include <asm/rtas.h> 27#include <asm/rtas.h>
28#include <asm/prom.h> 28#include <asm/prom.h>
29#include <asm/nvram.h> 29#include <asm/nvram.h>
30#include <asm/atomic.h> 30#include <linux/atomic.h>
31#include <asm/machdep.h> 31#include <asm/machdep.h>
32 32
33 33
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 79fca2651b65..b1d738d12890 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
375 375
376int threads_per_core, threads_shift; 376int threads_per_core, threads_shift;
377cpumask_t threads_core_mask; 377cpumask_t threads_core_mask;
378EXPORT_SYMBOL_GPL(threads_per_core);
379EXPORT_SYMBOL_GPL(threads_shift);
380EXPORT_SYMBOL_GPL(threads_core_mask);
378 381
379static void __init cpu_init_thread_core_maps(int tpc) 382static void __init cpu_init_thread_core_maps(int tpc)
380{ 383{
@@ -704,29 +707,14 @@ static int powerpc_debugfs_init(void)
704arch_initcall(powerpc_debugfs_init); 707arch_initcall(powerpc_debugfs_init);
705#endif 708#endif
706 709
707static int ppc_dflt_bus_notify(struct notifier_block *nb, 710void ppc_printk_progress(char *s, unsigned short hex)
708 unsigned long action, void *data)
709{ 711{
710 struct device *dev = data; 712 pr_info("%s\n", s);
711
712 /* We are only intereted in device addition */
713 if (action != BUS_NOTIFY_ADD_DEVICE)
714 return 0;
715
716 set_dma_ops(dev, &dma_direct_ops);
717
718 return NOTIFY_DONE;
719} 713}
720 714
721static struct notifier_block ppc_dflt_plat_bus_notifier = { 715void arch_setup_pdev_archdata(struct platform_device *pdev)
722 .notifier_call = ppc_dflt_bus_notify,
723 .priority = INT_MAX,
724};
725
726static int __init setup_bus_notifier(void)
727{ 716{
728 bus_register_notifier(&platform_bus_type, &ppc_dflt_plat_bus_notifier); 717 pdev->archdata.dma_mask = DMA_BIT_MASK(32);
729 return 0; 718 pdev->dev.dma_mask = &pdev->archdata.dma_mask;
719 set_dma_ops(&pdev->dev, &dma_direct_ops);
730} 720}
731
732arch_initcall(setup_bus_notifier);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 620d792b52e4..209135af0a40 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -48,8 +48,8 @@ extern void bootx_init(unsigned long r4, unsigned long phys);
48 48
49int boot_cpuid = -1; 49int boot_cpuid = -1;
50EXPORT_SYMBOL_GPL(boot_cpuid); 50EXPORT_SYMBOL_GPL(boot_cpuid);
51int __initdata boot_cpu_count;
52int boot_cpuid_phys; 51int boot_cpuid_phys;
52EXPORT_SYMBOL_GPL(boot_cpuid_phys);
53 53
54int smp_hw_index[NR_CPUS]; 54int smp_hw_index[NR_CPUS];
55 55
@@ -127,6 +127,8 @@ notrace void __init machine_init(unsigned long dt_ptr)
127 /* Do some early initialization based on the flat device tree */ 127 /* Do some early initialization based on the flat device tree */
128 early_init_devtree(__va(dt_ptr)); 128 early_init_devtree(__va(dt_ptr));
129 129
130 early_init_mmu();
131
130 probe_machine(); 132 probe_machine();
131 133
132 setup_kdump_trampoline(); 134 setup_kdump_trampoline();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a88bf2713d41..aebef1320ed7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -63,6 +63,7 @@
63#include <asm/kexec.h> 63#include <asm/kexec.h>
64#include <asm/mmu_context.h> 64#include <asm/mmu_context.h>
65#include <asm/code-patching.h> 65#include <asm/code-patching.h>
66#include <asm/kvm_ppc.h>
66 67
67#include "setup.h" 68#include "setup.h"
68 69
@@ -73,7 +74,7 @@
73#endif 74#endif
74 75
75int boot_cpuid = 0; 76int boot_cpuid = 0;
76int __initdata boot_cpu_count; 77int __initdata spinning_secondaries;
77u64 ppc64_pft_size; 78u64 ppc64_pft_size;
78 79
79/* Pick defaults since we might want to patch instructions 80/* Pick defaults since we might want to patch instructions
@@ -253,11 +254,11 @@ void smp_release_cpus(void)
253 for (i = 0; i < 100000; i++) { 254 for (i = 0; i < 100000; i++) {
254 mb(); 255 mb();
255 HMT_low(); 256 HMT_low();
256 if (boot_cpu_count == 0) 257 if (spinning_secondaries == 0)
257 break; 258 break;
258 udelay(1); 259 udelay(1);
259 } 260 }
260 DBG("boot_cpu_count = %d\n", boot_cpu_count); 261 DBG("spinning_secondaries = %d\n", spinning_secondaries);
261 262
262 DBG(" <- smp_release_cpus()\n"); 263 DBG(" <- smp_release_cpus()\n");
263} 264}
@@ -580,6 +581,8 @@ void __init setup_arch(char **cmdline_p)
580 /* Initialize the MMU context management stuff */ 581 /* Initialize the MMU context management stuff */
581 mmu_context_init(); 582 mmu_context_init();
582 583
584 kvm_rma_init();
585
583 ppc64_boot_msg(0x15, "Setup Done"); 586 ppc64_boot_msg(0x15, "Setup Done");
584} 587}
585 588
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index 03e45c4a9ef1..640de836e466 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -11,7 +11,7 @@
11#include <linux/unistd.h> 11#include <linux/unistd.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <asm/atomic.h> 14#include <linux/atomic.h>
15#include <asm/smp.h> 15#include <asm/smp.h>
16#include <asm/time.h> 16#include <asm/time.h>
17 17
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98d..7bf2187dfd99 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -33,7 +33,7 @@
33#include <linux/topology.h> 33#include <linux/topology.h>
34 34
35#include <asm/ptrace.h> 35#include <asm/ptrace.h>
36#include <asm/atomic.h> 36#include <linux/atomic.h>
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/page.h> 38#include <asm/page.h>
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
@@ -202,14 +202,6 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
202 smp_ops->cause_ipi(cpu, info->data); 202 smp_ops->cause_ipi(cpu, info->data);
203} 203}
204 204
205void smp_muxed_ipi_resend(void)
206{
207 struct cpu_messages *info = &__get_cpu_var(ipi_message);
208
209 if (info->messages)
210 smp_ops->cause_ipi(smp_processor_id(), info->data);
211}
212
213irqreturn_t smp_ipi_demux(void) 205irqreturn_t smp_ipi_demux(void)
214{ 206{
215 struct cpu_messages *info = &__get_cpu_var(ipi_message); 207 struct cpu_messages *info = &__get_cpu_var(ipi_message);
@@ -238,15 +230,26 @@ irqreturn_t smp_ipi_demux(void)
238} 230}
239#endif /* CONFIG_PPC_SMP_MUXED_IPI */ 231#endif /* CONFIG_PPC_SMP_MUXED_IPI */
240 232
233static inline void do_message_pass(int cpu, int msg)
234{
235 if (smp_ops->message_pass)
236 smp_ops->message_pass(cpu, msg);
237#ifdef CONFIG_PPC_SMP_MUXED_IPI
238 else
239 smp_muxed_ipi_message_pass(cpu, msg);
240#endif
241}
242
241void smp_send_reschedule(int cpu) 243void smp_send_reschedule(int cpu)
242{ 244{
243 if (likely(smp_ops)) 245 if (likely(smp_ops))
244 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 246 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
245} 247}
248EXPORT_SYMBOL_GPL(smp_send_reschedule);
246 249
247void arch_send_call_function_single_ipi(int cpu) 250void arch_send_call_function_single_ipi(int cpu)
248{ 251{
249 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 252 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
250} 253}
251 254
252void arch_send_call_function_ipi_mask(const struct cpumask *mask) 255void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -254,7 +257,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
254 unsigned int cpu; 257 unsigned int cpu;
255 258
256 for_each_cpu(cpu, mask) 259 for_each_cpu(cpu, mask)
257 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 260 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
258} 261}
259 262
260#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 263#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
@@ -268,7 +271,7 @@ void smp_send_debugger_break(void)
268 271
269 for_each_online_cpu(cpu) 272 for_each_online_cpu(cpu)
270 if (cpu != me) 273 if (cpu != me)
271 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 274 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
272} 275}
273#endif 276#endif
274 277
@@ -303,6 +306,10 @@ struct thread_info *current_set[NR_CPUS];
303static void __devinit smp_store_cpu_info(int id) 306static void __devinit smp_store_cpu_info(int id)
304{ 307{
305 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 308 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
309#ifdef CONFIG_PPC_FSL_BOOK3E
310 per_cpu(next_tlbcam_idx, id)
311 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
312#endif
306} 313}
307 314
308void __init smp_prepare_cpus(unsigned int max_cpus) 315void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f33acfd872ad..03b29a6759ab 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -544,7 +544,7 @@ DEFINE_PER_CPU(u8, irq_work_pending);
544 544
545#endif /* 32 vs 64 bit */ 545#endif /* 32 vs 64 bit */
546 546
547void set_irq_work_pending(void) 547void arch_irq_work_raise(void)
548{ 548{
549 preempt_disable(); 549 preempt_disable();
550 set_irq_work_pending_flag(); 550 set_irq_work_pending_flag();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1a0141426cda..f19d9777d3c1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1387,10 +1387,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
1387 int code = 0; 1387 int code = 0;
1388 int err; 1388 int err;
1389 1389
1390 preempt_disable(); 1390 flush_spe_to_thread(current);
1391 if (regs->msr & MSR_SPE)
1392 giveup_spe(current);
1393 preempt_enable();
1394 1391
1395 spefscr = current->thread.spefscr; 1392 spefscr = current->thread.spefscr;
1396 fpexc_mode = current->thread.fpexc_mode; 1393 fpexc_mode = current->thread.fpexc_mode;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 23d65abbedce..faa82c1f3f68 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -31,6 +31,9 @@ void __init udbg_early_init(void)
31#if defined(CONFIG_PPC_EARLY_DEBUG_LPAR) 31#if defined(CONFIG_PPC_EARLY_DEBUG_LPAR)
32 /* For LPAR machines that have an HVC console on vterm 0 */ 32 /* For LPAR machines that have an HVC console on vterm 0 */
33 udbg_init_debug_lpar(); 33 udbg_init_debug_lpar();
34#elif defined(CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI)
35 /* For LPAR machines that have an HVSI console on vterm 0 */
36 udbg_init_debug_lpar_hvsi();
34#elif defined(CONFIG_PPC_EARLY_DEBUG_G5) 37#elif defined(CONFIG_PPC_EARLY_DEBUG_G5)
35 /* For use on Apple G5 machines */ 38 /* For use on Apple G5 machines */
36 udbg_init_pmac_realmode(); 39 udbg_init_pmac_realmode();
@@ -68,6 +71,8 @@ void __init udbg_early_init(void)
68 71
69#ifdef CONFIG_PPC_EARLY_DEBUG 72#ifdef CONFIG_PPC_EARLY_DEBUG
70 console_loglevel = 10; 73 console_loglevel = 10;
74
75 register_early_udbg_console();
71#endif 76#endif
72} 77}
73 78
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5f3cff83e089..33aa715dab28 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -387,8 +387,10 @@ static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
387 } 387 }
388} 388}
389 389
390void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) 390void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
391{ 391{
392 int usermode = vcpu->arch.shared->msr & MSR_PR;
393
392 vcpu->arch.shadow_pid = !usermode; 394 vcpu->arch.shadow_pid = !usermode;
393} 395}
394 396
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index b7baff78f90c..78133deb4b64 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
20 bool 20 bool
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select KVM_MMIO
24 23
25config KVM_BOOK3S_HANDLER 24config KVM_BOOK3S_HANDLER
26 bool 25 bool
@@ -28,16 +27,22 @@ config KVM_BOOK3S_HANDLER
28config KVM_BOOK3S_32_HANDLER 27config KVM_BOOK3S_32_HANDLER
29 bool 28 bool
30 select KVM_BOOK3S_HANDLER 29 select KVM_BOOK3S_HANDLER
30 select KVM_MMIO
31 31
32config KVM_BOOK3S_64_HANDLER 32config KVM_BOOK3S_64_HANDLER
33 bool 33 bool
34 select KVM_BOOK3S_HANDLER 34 select KVM_BOOK3S_HANDLER
35 35
36config KVM_BOOK3S_PR
37 bool
38 select KVM_MMIO
39
36config KVM_BOOK3S_32 40config KVM_BOOK3S_32
37 tristate "KVM support for PowerPC book3s_32 processors" 41 tristate "KVM support for PowerPC book3s_32 processors"
38 depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT 42 depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
39 select KVM 43 select KVM
40 select KVM_BOOK3S_32_HANDLER 44 select KVM_BOOK3S_32_HANDLER
45 select KVM_BOOK3S_PR
41 ---help--- 46 ---help---
42 Support running unmodified book3s_32 guest kernels 47 Support running unmodified book3s_32 guest kernels
43 in virtual machines on book3s_32 host processors. 48 in virtual machines on book3s_32 host processors.
@@ -50,8 +55,8 @@ config KVM_BOOK3S_32
50config KVM_BOOK3S_64 55config KVM_BOOK3S_64
51 tristate "KVM support for PowerPC book3s_64 processors" 56 tristate "KVM support for PowerPC book3s_64 processors"
52 depends on EXPERIMENTAL && PPC_BOOK3S_64 57 depends on EXPERIMENTAL && PPC_BOOK3S_64
53 select KVM
54 select KVM_BOOK3S_64_HANDLER 58 select KVM_BOOK3S_64_HANDLER
59 select KVM
55 ---help--- 60 ---help---
56 Support running unmodified book3s_64 and book3s_32 guest kernels 61 Support running unmodified book3s_64 and book3s_32 guest kernels
57 in virtual machines on book3s_64 host processors. 62 in virtual machines on book3s_64 host processors.
@@ -61,10 +66,34 @@ config KVM_BOOK3S_64
61 66
62 If unsure, say N. 67 If unsure, say N.
63 68
69config KVM_BOOK3S_64_HV
70 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
71 depends on KVM_BOOK3S_64
72 ---help---
73 Support running unmodified book3s_64 guest kernels in
74 virtual machines on POWER7 and PPC970 processors that have
75 hypervisor mode available to the host.
76
77 If you say Y here, KVM will use the hardware virtualization
78 facilities of POWER7 (and later) processors, meaning that
79 guest operating systems will run at full hardware speed
80 using supervisor and user modes. However, this also means
81 that KVM is not usable under PowerVM (pHyp), is only usable
82 on POWER7 (or later) processors and PPC970-family processors,
83 and cannot emulate a different processor from the host processor.
84
85 If unsure, say N.
86
87config KVM_BOOK3S_64_PR
88 def_bool y
89 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
90 select KVM_BOOK3S_PR
91
64config KVM_440 92config KVM_440
65 bool "KVM support for PowerPC 440 processors" 93 bool "KVM support for PowerPC 440 processors"
66 depends on EXPERIMENTAL && 44x 94 depends on EXPERIMENTAL && 44x
67 select KVM 95 select KVM
96 select KVM_MMIO
68 ---help--- 97 ---help---
69 Support running unmodified 440 guest kernels in virtual machines on 98 Support running unmodified 440 guest kernels in virtual machines on
70 440 host processors. 99 440 host processors.
@@ -89,6 +118,7 @@ config KVM_E500
89 bool "KVM support for PowerPC E500 processors" 118 bool "KVM support for PowerPC E500 processors"
90 depends on EXPERIMENTAL && E500 119 depends on EXPERIMENTAL && E500
91 select KVM 120 select KVM
121 select KVM_MMIO
92 ---help--- 122 ---help---
93 Support running unmodified E500 guest kernels in virtual machines on 123 Support running unmodified E500 guest kernels in virtual machines on
94 E500 host processors. 124 E500 host processors.
@@ -99,6 +129,5 @@ config KVM_E500
99 If unsure, say N. 129 If unsure, say N.
100 130
101source drivers/vhost/Kconfig 131source drivers/vhost/Kconfig
102source drivers/virtio/Kconfig
103 132
104endif # VIRTUALIZATION 133endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 4d6863823f69..08428e2c188d 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -38,24 +38,42 @@ kvm-e500-objs := \
38 e500_emulate.o 38 e500_emulate.o
39kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) 39kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
40 40
41kvm-book3s_64-objs := \ 41kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
42 $(common-objs-y) \ 42 ../../../virt/kvm/coalesced_mmio.o \
43 fpu.o \ 43 fpu.o \
44 book3s_paired_singles.o \ 44 book3s_paired_singles.o \
45 book3s.o \ 45 book3s_pr.o \
46 book3s_emulate.o \ 46 book3s_emulate.o \
47 book3s_interrupts.o \ 47 book3s_interrupts.o \
48 book3s_mmu_hpte.o \ 48 book3s_mmu_hpte.o \
49 book3s_64_mmu_host.o \ 49 book3s_64_mmu_host.o \
50 book3s_64_mmu.o \ 50 book3s_64_mmu.o \
51 book3s_32_mmu.o 51 book3s_32_mmu.o
52kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) 52
53kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
54 book3s_hv.o \
55 book3s_hv_interrupts.o \
56 book3s_64_mmu_hv.o
57kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
58 book3s_hv_rm_mmu.o \
59 book3s_64_vio_hv.o \
60 book3s_hv_builtin.o
61
62kvm-book3s_64-module-objs := \
63 ../../../virt/kvm/kvm_main.o \
64 powerpc.o \
65 emulate.o \
66 book3s.o \
67 $(kvm-book3s_64-objs-y)
68
69kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
53 70
54kvm-book3s_32-objs := \ 71kvm-book3s_32-objs := \
55 $(common-objs-y) \ 72 $(common-objs-y) \
56 fpu.o \ 73 fpu.o \
57 book3s_paired_singles.o \ 74 book3s_paired_singles.o \
58 book3s.o \ 75 book3s.o \
76 book3s_pr.o \
59 book3s_emulate.o \ 77 book3s_emulate.o \
60 book3s_interrupts.o \ 78 book3s_interrupts.o \
61 book3s_mmu_hpte.o \ 79 book3s_mmu_hpte.o \
@@ -70,3 +88,4 @@ obj-$(CONFIG_KVM_E500) += kvm.o
70obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 88obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
71obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o 89obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
72 90
91obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 0f95b5cce033..f68a34d16035 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -17,7 +17,6 @@
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include "trace.h"
21 20
22#include <asm/reg.h> 21#include <asm/reg.h>
23#include <asm/cputable.h> 22#include <asm/cputable.h>
@@ -28,25 +27,17 @@
28#include <asm/kvm_ppc.h> 27#include <asm/kvm_ppc.h>
29#include <asm/kvm_book3s.h> 28#include <asm/kvm_book3s.h>
30#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
30#include <asm/page.h>
31#include <linux/gfp.h> 31#include <linux/gfp.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35 35
36#include "trace.h"
37
36#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 38#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
37 39
38/* #define EXIT_DEBUG */ 40/* #define EXIT_DEBUG */
39/* #define DEBUG_EXT */
40
41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
42 ulong msr);
43
44/* Some compatibility defines */
45#ifdef CONFIG_PPC_BOOK3S_32
46#define MSR_USER32 MSR_USER
47#define MSR_USER64 MSR_USER
48#define HW_PAGE_SIZE PAGE_SIZE
49#endif
50 41
51struct kvm_stats_debugfs_item debugfs_entries[] = { 42struct kvm_stats_debugfs_item debugfs_entries[] = {
52 { "exits", VCPU_STAT(sum_exits) }, 43 { "exits", VCPU_STAT(sum_exits) },
@@ -77,100 +68,11 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
77{ 68{
78} 69}
79 70
80void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
81{
82#ifdef CONFIG_PPC_BOOK3S_64
83 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
84 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
85 sizeof(get_paca()->shadow_vcpu));
86 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
87#endif
88
89#ifdef CONFIG_PPC_BOOK3S_32
90 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
91#endif
92}
93
94void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
95{
96#ifdef CONFIG_PPC_BOOK3S_64
97 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
98 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
99 sizeof(get_paca()->shadow_vcpu));
100 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
101#endif
102
103 kvmppc_giveup_ext(vcpu, MSR_FP);
104 kvmppc_giveup_ext(vcpu, MSR_VEC);
105 kvmppc_giveup_ext(vcpu, MSR_VSX);
106}
107
108static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
109{
110 ulong smsr = vcpu->arch.shared->msr;
111
112 /* Guest MSR values */
113 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
114 /* Process MSR values */
115 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
116 /* External providers the guest reserved */
117 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
118 /* 64-bit Process MSR values */
119#ifdef CONFIG_PPC_BOOK3S_64
120 smsr |= MSR_ISF | MSR_HV;
121#endif
122 vcpu->arch.shadow_msr = smsr;
123}
124
125void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
126{
127 ulong old_msr = vcpu->arch.shared->msr;
128
129#ifdef EXIT_DEBUG
130 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
131#endif
132
133 msr &= to_book3s(vcpu)->msr_mask;
134 vcpu->arch.shared->msr = msr;
135 kvmppc_recalc_shadow_msr(vcpu);
136
137 if (msr & MSR_POW) {
138 if (!vcpu->arch.pending_exceptions) {
139 kvm_vcpu_block(vcpu);
140 vcpu->stat.halt_wakeup++;
141
142 /* Unset POW bit after we woke up */
143 msr &= ~MSR_POW;
144 vcpu->arch.shared->msr = msr;
145 }
146 }
147
148 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
149 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
150 kvmppc_mmu_flush_segments(vcpu);
151 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
152
153 /* Preload magic page segment when in kernel mode */
154 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
155 struct kvm_vcpu_arch *a = &vcpu->arch;
156
157 if (msr & MSR_DR)
158 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
159 else
160 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
161 }
162 }
163
164 /* Preload FPU if it's enabled */
165 if (vcpu->arch.shared->msr & MSR_FP)
166 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
167}
168
169void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 71void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
170{ 72{
171 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); 73 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
172 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; 74 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
173 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); 75 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
174 vcpu->arch.mmu.reset_msr(vcpu); 76 vcpu->arch.mmu.reset_msr(vcpu);
175} 77}
176 78
@@ -204,11 +106,13 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
204static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 106static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
205 unsigned int vec) 107 unsigned int vec)
206{ 108{
109 unsigned long old_pending = vcpu->arch.pending_exceptions;
110
207 clear_bit(kvmppc_book3s_vec2irqprio(vec), 111 clear_bit(kvmppc_book3s_vec2irqprio(vec),
208 &vcpu->arch.pending_exceptions); 112 &vcpu->arch.pending_exceptions);
209 113
210 if (!vcpu->arch.pending_exceptions) 114 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
211 vcpu->arch.shared->int_pending = 0; 115 old_pending);
212} 116}
213 117
214void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 118void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
@@ -225,8 +129,8 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
225 129
226void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 130void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
227{ 131{
228 to_book3s(vcpu)->prog_flags = flags; 132 /* might as well deliver this straight away */
229 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); 133 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
230} 134}
231 135
232void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 136void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
@@ -266,21 +170,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
266{ 170{
267 int deliver = 1; 171 int deliver = 1;
268 int vec = 0; 172 int vec = 0;
269 ulong flags = 0ULL; 173 bool crit = kvmppc_critical_section(vcpu);
270 ulong crit_raw = vcpu->arch.shared->critical;
271 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
272 bool crit;
273
274 /* Truncate crit indicators in 32 bit mode */
275 if (!(vcpu->arch.shared->msr & MSR_SF)) {
276 crit_raw &= 0xffffffff;
277 crit_r1 &= 0xffffffff;
278 }
279
280 /* Critical section when crit == r1 */
281 crit = (crit_raw == crit_r1);
282 /* ... and we're in supervisor mode */
283 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
284 174
285 switch (priority) { 175 switch (priority) {
286 case BOOK3S_IRQPRIO_DECREMENTER: 176 case BOOK3S_IRQPRIO_DECREMENTER:
@@ -315,7 +205,6 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
315 break; 205 break;
316 case BOOK3S_IRQPRIO_PROGRAM: 206 case BOOK3S_IRQPRIO_PROGRAM:
317 vec = BOOK3S_INTERRUPT_PROGRAM; 207 vec = BOOK3S_INTERRUPT_PROGRAM;
318 flags = to_book3s(vcpu)->prog_flags;
319 break; 208 break;
320 case BOOK3S_IRQPRIO_VSX: 209 case BOOK3S_IRQPRIO_VSX:
321 vec = BOOK3S_INTERRUPT_VSX; 210 vec = BOOK3S_INTERRUPT_VSX;
@@ -346,7 +235,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
346#endif 235#endif
347 236
348 if (deliver) 237 if (deliver)
349 kvmppc_inject_interrupt(vcpu, vec, flags); 238 kvmppc_inject_interrupt(vcpu, vec, 0);
350 239
351 return deliver; 240 return deliver;
352} 241}
@@ -392,64 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
392 } 281 }
393 282
394 /* Tell the guest about our interrupt status */ 283 /* Tell the guest about our interrupt status */
395 if (*pending) 284 kvmppc_update_int_pending(vcpu, *pending, old_pending);
396 vcpu->arch.shared->int_pending = 1;
397 else if (old_pending)
398 vcpu->arch.shared->int_pending = 0;
399}
400
401void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
402{
403 u32 host_pvr;
404
405 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
406 vcpu->arch.pvr = pvr;
407#ifdef CONFIG_PPC_BOOK3S_64
408 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
409 kvmppc_mmu_book3s_64_init(vcpu);
410 to_book3s(vcpu)->hior = 0xfff00000;
411 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
412 } else
413#endif
414 {
415 kvmppc_mmu_book3s_32_init(vcpu);
416 to_book3s(vcpu)->hior = 0;
417 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
418 }
419
420 /* If we are in hypervisor level on 970, we can tell the CPU to
421 * treat DCBZ as 32 bytes store */
422 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
423 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
424 !strcmp(cur_cpu_spec->platform, "ppc970"))
425 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
426
427 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
428 really needs them in a VM on Cell and force disable them. */
429 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
430 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
431
432#ifdef CONFIG_PPC_BOOK3S_32
433 /* 32 bit Book3S always has 32 byte dcbz */
434 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
435#endif
436
437 /* On some CPUs we can execute paired single operations natively */
438 asm ( "mfpvr %0" : "=r"(host_pvr));
439 switch (host_pvr) {
440 case 0x00080200: /* lonestar 2.0 */
441 case 0x00088202: /* lonestar 2.2 */
442 case 0x70000100: /* gekko 1.0 */
443 case 0x00080100: /* gekko 2.0 */
444 case 0x00083203: /* gekko 2.3a */
445 case 0x00083213: /* gekko 2.3b */
446 case 0x00083204: /* gekko 2.4 */
447 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
448 case 0x00087200: /* broadway */
449 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
450 /* Enable HID2.PSE - in case we need it later */
451 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
452 }
453} 285}
454 286
455pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 287pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -471,44 +303,6 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
471 return gfn_to_pfn(vcpu->kvm, gfn); 303 return gfn_to_pfn(vcpu->kvm, gfn);
472} 304}
473 305
474/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
475 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
476 * emulate 32 bytes dcbz length.
477 *
478 * The Book3s_64 inventors also realized this case and implemented a special bit
479 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
480 *
481 * My approach here is to patch the dcbz instruction on executing pages.
482 */
483static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
484{
485 struct page *hpage;
486 u64 hpage_offset;
487 u32 *page;
488 int i;
489
490 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
491 if (is_error_page(hpage)) {
492 kvm_release_page_clean(hpage);
493 return;
494 }
495
496 hpage_offset = pte->raddr & ~PAGE_MASK;
497 hpage_offset &= ~0xFFFULL;
498 hpage_offset /= 4;
499
500 get_page(hpage);
501 page = kmap_atomic(hpage, KM_USER0);
502
503 /* patch dcbz into reserved instruction, so we trap */
504 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
505 if ((page[i] & 0xff0007ff) == INS_DCBZ)
506 page[i] &= 0xfffffff7;
507
508 kunmap_atomic(page, KM_USER0);
509 put_page(hpage);
510}
511
512static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 306static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
513 struct kvmppc_pte *pte) 307 struct kvmppc_pte *pte)
514{ 308{
@@ -606,519 +400,6 @@ mmio:
606 return EMULATE_DO_MMIO; 400 return EMULATE_DO_MMIO;
607} 401}
608 402
609static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
610{
611 ulong mp_pa = vcpu->arch.magic_page_pa;
612
613 if (unlikely(mp_pa) &&
614 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
615 return 1;
616 }
617
618 return kvm_is_visible_gfn(vcpu->kvm, gfn);
619}
620
621int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
622 ulong eaddr, int vec)
623{
624 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
625 int r = RESUME_GUEST;
626 int relocated;
627 int page_found = 0;
628 struct kvmppc_pte pte;
629 bool is_mmio = false;
630 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
631 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
632 u64 vsid;
633
634 relocated = data ? dr : ir;
635
636 /* Resolve real address if translation turned on */
637 if (relocated) {
638 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
639 } else {
640 pte.may_execute = true;
641 pte.may_read = true;
642 pte.may_write = true;
643 pte.raddr = eaddr & KVM_PAM;
644 pte.eaddr = eaddr;
645 pte.vpage = eaddr >> 12;
646 }
647
648 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
649 case 0:
650 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
651 break;
652 case MSR_DR:
653 case MSR_IR:
654 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
655
656 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
657 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
658 else
659 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
660 pte.vpage |= vsid;
661
662 if (vsid == -1)
663 page_found = -EINVAL;
664 break;
665 }
666
667 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
668 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
669 /*
670 * If we do the dcbz hack, we have to NX on every execution,
671 * so we can patch the executing code. This renders our guest
672 * NX-less.
673 */
674 pte.may_execute = !data;
675 }
676
677 if (page_found == -ENOENT) {
678 /* Page not found in guest PTE entries */
679 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
680 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
681 vcpu->arch.shared->msr |=
682 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
683 kvmppc_book3s_queue_irqprio(vcpu, vec);
684 } else if (page_found == -EPERM) {
685 /* Storage protection */
686 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
687 vcpu->arch.shared->dsisr =
688 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
689 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
690 vcpu->arch.shared->msr |=
691 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
692 kvmppc_book3s_queue_irqprio(vcpu, vec);
693 } else if (page_found == -EINVAL) {
694 /* Page not found in guest SLB */
695 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
696 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
697 } else if (!is_mmio &&
698 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
699 /* The guest's PTE is not mapped yet. Map on the host */
700 kvmppc_mmu_map_page(vcpu, &pte);
701 if (data)
702 vcpu->stat.sp_storage++;
703 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
704 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
705 kvmppc_patch_dcbz(vcpu, &pte);
706 } else {
707 /* MMIO */
708 vcpu->stat.mmio_exits++;
709 vcpu->arch.paddr_accessed = pte.raddr;
710 r = kvmppc_emulate_mmio(run, vcpu);
711 if ( r == RESUME_HOST_NV )
712 r = RESUME_HOST;
713 }
714
715 return r;
716}
717
718static inline int get_fpr_index(int i)
719{
720#ifdef CONFIG_VSX
721 i *= 2;
722#endif
723 return i;
724}
725
726/* Give up external provider (FPU, Altivec, VSX) */
727void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
728{
729 struct thread_struct *t = &current->thread;
730 u64 *vcpu_fpr = vcpu->arch.fpr;
731#ifdef CONFIG_VSX
732 u64 *vcpu_vsx = vcpu->arch.vsr;
733#endif
734 u64 *thread_fpr = (u64*)t->fpr;
735 int i;
736
737 if (!(vcpu->arch.guest_owned_ext & msr))
738 return;
739
740#ifdef DEBUG_EXT
741 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
742#endif
743
744 switch (msr) {
745 case MSR_FP:
746 giveup_fpu(current);
747 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
748 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
749
750 vcpu->arch.fpscr = t->fpscr.val;
751 break;
752 case MSR_VEC:
753#ifdef CONFIG_ALTIVEC
754 giveup_altivec(current);
755 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
756 vcpu->arch.vscr = t->vscr;
757#endif
758 break;
759 case MSR_VSX:
760#ifdef CONFIG_VSX
761 __giveup_vsx(current);
762 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
763 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
764#endif
765 break;
766 default:
767 BUG();
768 }
769
770 vcpu->arch.guest_owned_ext &= ~msr;
771 current->thread.regs->msr &= ~msr;
772 kvmppc_recalc_shadow_msr(vcpu);
773}
774
775static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
776{
777 ulong srr0 = kvmppc_get_pc(vcpu);
778 u32 last_inst = kvmppc_get_last_inst(vcpu);
779 int ret;
780
781 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
782 if (ret == -ENOENT) {
783 ulong msr = vcpu->arch.shared->msr;
784
785 msr = kvmppc_set_field(msr, 33, 33, 1);
786 msr = kvmppc_set_field(msr, 34, 36, 0);
787 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
788 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
789 return EMULATE_AGAIN;
790 }
791
792 return EMULATE_DONE;
793}
794
795static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
796{
797
798 /* Need to do paired single emulation? */
799 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
800 return EMULATE_DONE;
801
802 /* Read out the instruction */
803 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
804 /* Need to emulate */
805 return EMULATE_FAIL;
806
807 return EMULATE_AGAIN;
808}
809
810/* Handle external providers (FPU, Altivec, VSX) */
811static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
812 ulong msr)
813{
814 struct thread_struct *t = &current->thread;
815 u64 *vcpu_fpr = vcpu->arch.fpr;
816#ifdef CONFIG_VSX
817 u64 *vcpu_vsx = vcpu->arch.vsr;
818#endif
819 u64 *thread_fpr = (u64*)t->fpr;
820 int i;
821
822 /* When we have paired singles, we emulate in software */
823 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
824 return RESUME_GUEST;
825
826 if (!(vcpu->arch.shared->msr & msr)) {
827 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
828 return RESUME_GUEST;
829 }
830
831 /* We already own the ext */
832 if (vcpu->arch.guest_owned_ext & msr) {
833 return RESUME_GUEST;
834 }
835
836#ifdef DEBUG_EXT
837 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
838#endif
839
840 current->thread.regs->msr |= msr;
841
842 switch (msr) {
843 case MSR_FP:
844 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
845 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
846
847 t->fpscr.val = vcpu->arch.fpscr;
848 t->fpexc_mode = 0;
849 kvmppc_load_up_fpu();
850 break;
851 case MSR_VEC:
852#ifdef CONFIG_ALTIVEC
853 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
854 t->vscr = vcpu->arch.vscr;
855 t->vrsave = -1;
856 kvmppc_load_up_altivec();
857#endif
858 break;
859 case MSR_VSX:
860#ifdef CONFIG_VSX
861 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
862 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
863 kvmppc_load_up_vsx();
864#endif
865 break;
866 default:
867 BUG();
868 }
869
870 vcpu->arch.guest_owned_ext |= msr;
871
872 kvmppc_recalc_shadow_msr(vcpu);
873
874 return RESUME_GUEST;
875}
876
877int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
878 unsigned int exit_nr)
879{
880 int r = RESUME_HOST;
881
882 vcpu->stat.sum_exits++;
883
884 run->exit_reason = KVM_EXIT_UNKNOWN;
885 run->ready_for_interrupt_injection = 1;
886
887 trace_kvm_book3s_exit(exit_nr, vcpu);
888 kvm_resched(vcpu);
889 switch (exit_nr) {
890 case BOOK3S_INTERRUPT_INST_STORAGE:
891 vcpu->stat.pf_instruc++;
892
893#ifdef CONFIG_PPC_BOOK3S_32
894 /* We set segments as unused segments when invalidating them. So
895 * treat the respective fault as segment fault. */
896 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
897 == SR_INVALID) {
898 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
899 r = RESUME_GUEST;
900 break;
901 }
902#endif
903
904 /* only care about PTEG not found errors, but leave NX alone */
905 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
906 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
907 vcpu->stat.sp_instruc++;
908 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
909 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
910 /*
911 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
912 * so we can't use the NX bit inside the guest. Let's cross our fingers,
913 * that no guest that needs the dcbz hack does NX.
914 */
915 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
916 r = RESUME_GUEST;
917 } else {
918 vcpu->arch.shared->msr |=
919 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
920 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
921 r = RESUME_GUEST;
922 }
923 break;
924 case BOOK3S_INTERRUPT_DATA_STORAGE:
925 {
926 ulong dar = kvmppc_get_fault_dar(vcpu);
927 vcpu->stat.pf_storage++;
928
929#ifdef CONFIG_PPC_BOOK3S_32
930 /* We set segments as unused segments when invalidating them. So
931 * treat the respective fault as segment fault. */
932 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
933 kvmppc_mmu_map_segment(vcpu, dar);
934 r = RESUME_GUEST;
935 break;
936 }
937#endif
938
939 /* The only case we need to handle is missing shadow PTEs */
940 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
941 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
942 } else {
943 vcpu->arch.shared->dar = dar;
944 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
945 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
946 r = RESUME_GUEST;
947 }
948 break;
949 }
950 case BOOK3S_INTERRUPT_DATA_SEGMENT:
951 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
952 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
953 kvmppc_book3s_queue_irqprio(vcpu,
954 BOOK3S_INTERRUPT_DATA_SEGMENT);
955 }
956 r = RESUME_GUEST;
957 break;
958 case BOOK3S_INTERRUPT_INST_SEGMENT:
959 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
960 kvmppc_book3s_queue_irqprio(vcpu,
961 BOOK3S_INTERRUPT_INST_SEGMENT);
962 }
963 r = RESUME_GUEST;
964 break;
965 /* We're good on these - the host merely wanted to get our attention */
966 case BOOK3S_INTERRUPT_DECREMENTER:
967 vcpu->stat.dec_exits++;
968 r = RESUME_GUEST;
969 break;
970 case BOOK3S_INTERRUPT_EXTERNAL:
971 vcpu->stat.ext_intr_exits++;
972 r = RESUME_GUEST;
973 break;
974 case BOOK3S_INTERRUPT_PERFMON:
975 r = RESUME_GUEST;
976 break;
977 case BOOK3S_INTERRUPT_PROGRAM:
978 {
979 enum emulation_result er;
980 ulong flags;
981
982program_interrupt:
983 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
984
985 if (vcpu->arch.shared->msr & MSR_PR) {
986#ifdef EXIT_DEBUG
987 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
988#endif
989 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
990 (INS_DCBZ & 0xfffffff7)) {
991 kvmppc_core_queue_program(vcpu, flags);
992 r = RESUME_GUEST;
993 break;
994 }
995 }
996
997 vcpu->stat.emulated_inst_exits++;
998 er = kvmppc_emulate_instruction(run, vcpu);
999 switch (er) {
1000 case EMULATE_DONE:
1001 r = RESUME_GUEST_NV;
1002 break;
1003 case EMULATE_AGAIN:
1004 r = RESUME_GUEST;
1005 break;
1006 case EMULATE_FAIL:
1007 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
1008 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
1009 kvmppc_core_queue_program(vcpu, flags);
1010 r = RESUME_GUEST;
1011 break;
1012 case EMULATE_DO_MMIO:
1013 run->exit_reason = KVM_EXIT_MMIO;
1014 r = RESUME_HOST_NV;
1015 break;
1016 default:
1017 BUG();
1018 }
1019 break;
1020 }
1021 case BOOK3S_INTERRUPT_SYSCALL:
1022 if (vcpu->arch.osi_enabled &&
1023 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1024 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1025 /* MOL hypercalls */
1026 u64 *gprs = run->osi.gprs;
1027 int i;
1028
1029 run->exit_reason = KVM_EXIT_OSI;
1030 for (i = 0; i < 32; i++)
1031 gprs[i] = kvmppc_get_gpr(vcpu, i);
1032 vcpu->arch.osi_needed = 1;
1033 r = RESUME_HOST_NV;
1034 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
1035 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1036 /* KVM PV hypercalls */
1037 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1038 r = RESUME_GUEST;
1039 } else {
1040 /* Guest syscalls */
1041 vcpu->stat.syscall_exits++;
1042 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1043 r = RESUME_GUEST;
1044 }
1045 break;
1046 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1047 case BOOK3S_INTERRUPT_ALTIVEC:
1048 case BOOK3S_INTERRUPT_VSX:
1049 {
1050 int ext_msr = 0;
1051
1052 switch (exit_nr) {
1053 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
1054 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
1055 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
1056 }
1057
1058 switch (kvmppc_check_ext(vcpu, exit_nr)) {
1059 case EMULATE_DONE:
1060 /* everything ok - let's enable the ext */
1061 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1062 break;
1063 case EMULATE_FAIL:
1064 /* we need to emulate this instruction */
1065 goto program_interrupt;
1066 break;
1067 default:
1068 /* nothing to worry about - go again */
1069 break;
1070 }
1071 break;
1072 }
1073 case BOOK3S_INTERRUPT_ALIGNMENT:
1074 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
1075 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
1076 kvmppc_get_last_inst(vcpu));
1077 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
1078 kvmppc_get_last_inst(vcpu));
1079 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1080 }
1081 r = RESUME_GUEST;
1082 break;
1083 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1084 case BOOK3S_INTERRUPT_TRACE:
1085 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1086 r = RESUME_GUEST;
1087 break;
1088 default:
1089 /* Ugh - bork here! What did we get? */
1090 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1091 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
1092 r = RESUME_HOST;
1093 BUG();
1094 break;
1095 }
1096
1097
1098 if (!(r & RESUME_HOST)) {
1099 /* To avoid clobbering exit_reason, only check for signals if
1100 * we aren't already exiting to userspace for some other
1101 * reason. */
1102 if (signal_pending(current)) {
1103#ifdef EXIT_DEBUG
1104 printk(KERN_EMERG "KVM: Going back to host\n");
1105#endif
1106 vcpu->stat.signal_exits++;
1107 run->exit_reason = KVM_EXIT_INTR;
1108 r = -EINTR;
1109 } else {
1110 /* In case an interrupt came in that was triggered
1111 * from userspace (like DEC), we need to check what
1112 * to inject now! */
1113 kvmppc_core_deliver_interrupts(vcpu);
1114 }
1115 }
1116
1117 trace_kvm_book3s_reenter(r, vcpu);
1118
1119 return r;
1120}
1121
1122int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 403int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1123{ 404{
1124 return 0; 405 return 0;
@@ -1179,69 +460,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1179 return 0; 460 return 0;
1180} 461}
1181 462
1182int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1183 struct kvm_sregs *sregs)
1184{
1185 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1186 int i;
1187
1188 sregs->pvr = vcpu->arch.pvr;
1189
1190 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1191 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1192 for (i = 0; i < 64; i++) {
1193 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
1194 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
1195 }
1196 } else {
1197 for (i = 0; i < 16; i++)
1198 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1199
1200 for (i = 0; i < 8; i++) {
1201 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1202 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1203 }
1204 }
1205
1206 return 0;
1207}
1208
1209int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1210 struct kvm_sregs *sregs)
1211{
1212 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1213 int i;
1214
1215 kvmppc_set_pvr(vcpu, sregs->pvr);
1216
1217 vcpu3s->sdr1 = sregs->u.s.sdr1;
1218 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1219 for (i = 0; i < 64; i++) {
1220 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1221 sregs->u.s.ppc64.slb[i].slbe);
1222 }
1223 } else {
1224 for (i = 0; i < 16; i++) {
1225 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1226 }
1227 for (i = 0; i < 8; i++) {
1228 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1229 (u32)sregs->u.s.ppc32.ibat[i]);
1230 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1231 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1232 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1233 (u32)sregs->u.s.ppc32.dbat[i]);
1234 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1235 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1236 }
1237 }
1238
1239 /* Flush the MMU after messing with the segments */
1240 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1241
1242 return 0;
1243}
1244
1245int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 463int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1246{ 464{
1247 return -ENOTSUPP; 465 return -ENOTSUPP;
@@ -1296,202 +514,3 @@ out:
1296 mutex_unlock(&kvm->slots_lock); 514 mutex_unlock(&kvm->slots_lock);
1297 return r; 515 return r;
1298} 516}
1299
1300int kvmppc_core_check_processor_compat(void)
1301{
1302 return 0;
1303}
1304
1305struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1306{
1307 struct kvmppc_vcpu_book3s *vcpu_book3s;
1308 struct kvm_vcpu *vcpu;
1309 int err = -ENOMEM;
1310 unsigned long p;
1311
1312 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1313 if (!vcpu_book3s)
1314 goto out;
1315
1316 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
1317 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1318 if (!vcpu_book3s->shadow_vcpu)
1319 goto free_vcpu;
1320
1321 vcpu = &vcpu_book3s->vcpu;
1322 err = kvm_vcpu_init(vcpu, kvm, id);
1323 if (err)
1324 goto free_shadow_vcpu;
1325
1326 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1327 /* the real shared page fills the last 4k of our page */
1328 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1329 if (!p)
1330 goto uninit_vcpu;
1331
1332 vcpu->arch.host_retip = kvm_return_point;
1333 vcpu->arch.host_msr = mfmsr();
1334#ifdef CONFIG_PPC_BOOK3S_64
1335 /* default to book3s_64 (970fx) */
1336 vcpu->arch.pvr = 0x3C0301;
1337#else
1338 /* default to book3s_32 (750) */
1339 vcpu->arch.pvr = 0x84202;
1340#endif
1341 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1342 vcpu_book3s->slb_nr = 64;
1343
1344 /* remember where some real-mode handlers are */
1345 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
1346 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
1347 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
1348#ifdef CONFIG_PPC_BOOK3S_64
1349 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
1350#else
1351 vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
1352#endif
1353
1354 vcpu->arch.shadow_msr = MSR_USER64;
1355
1356 err = kvmppc_mmu_init(vcpu);
1357 if (err < 0)
1358 goto uninit_vcpu;
1359
1360 return vcpu;
1361
1362uninit_vcpu:
1363 kvm_vcpu_uninit(vcpu);
1364free_shadow_vcpu:
1365 kfree(vcpu_book3s->shadow_vcpu);
1366free_vcpu:
1367 vfree(vcpu_book3s);
1368out:
1369 return ERR_PTR(err);
1370}
1371
1372void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1373{
1374 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1375
1376 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1377 kvm_vcpu_uninit(vcpu);
1378 kfree(vcpu_book3s->shadow_vcpu);
1379 vfree(vcpu_book3s);
1380}
1381
1382extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
1383int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1384{
1385 int ret;
1386 double fpr[32][TS_FPRWIDTH];
1387 unsigned int fpscr;
1388 int fpexc_mode;
1389#ifdef CONFIG_ALTIVEC
1390 vector128 vr[32];
1391 vector128 vscr;
1392 unsigned long uninitialized_var(vrsave);
1393 int used_vr;
1394#endif
1395#ifdef CONFIG_VSX
1396 int used_vsr;
1397#endif
1398 ulong ext_msr;
1399
1400 /* No need to go into the guest when all we do is going out */
1401 if (signal_pending(current)) {
1402 kvm_run->exit_reason = KVM_EXIT_INTR;
1403 return -EINTR;
1404 }
1405
1406 /* Save FPU state in stack */
1407 if (current->thread.regs->msr & MSR_FP)
1408 giveup_fpu(current);
1409 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1410 fpscr = current->thread.fpscr.val;
1411 fpexc_mode = current->thread.fpexc_mode;
1412
1413#ifdef CONFIG_ALTIVEC
1414 /* Save Altivec state in stack */
1415 used_vr = current->thread.used_vr;
1416 if (used_vr) {
1417 if (current->thread.regs->msr & MSR_VEC)
1418 giveup_altivec(current);
1419 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1420 vscr = current->thread.vscr;
1421 vrsave = current->thread.vrsave;
1422 }
1423#endif
1424
1425#ifdef CONFIG_VSX
1426 /* Save VSX state in stack */
1427 used_vsr = current->thread.used_vsr;
1428 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1429 __giveup_vsx(current);
1430#endif
1431
1432 /* Remember the MSR with disabled extensions */
1433 ext_msr = current->thread.regs->msr;
1434
1435 /* XXX we get called with irq disabled - change that! */
1436 local_irq_enable();
1437
1438 /* Preload FPU if it's enabled */
1439 if (vcpu->arch.shared->msr & MSR_FP)
1440 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1441
1442 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
1443
1444 local_irq_disable();
1445
1446 current->thread.regs->msr = ext_msr;
1447
1448 /* Make sure we save the guest FPU/Altivec/VSX state */
1449 kvmppc_giveup_ext(vcpu, MSR_FP);
1450 kvmppc_giveup_ext(vcpu, MSR_VEC);
1451 kvmppc_giveup_ext(vcpu, MSR_VSX);
1452
1453 /* Restore FPU state from stack */
1454 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1455 current->thread.fpscr.val = fpscr;
1456 current->thread.fpexc_mode = fpexc_mode;
1457
1458#ifdef CONFIG_ALTIVEC
1459 /* Restore Altivec state from stack */
1460 if (used_vr && current->thread.used_vr) {
1461 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1462 current->thread.vscr = vscr;
1463 current->thread.vrsave = vrsave;
1464 }
1465 current->thread.used_vr = used_vr;
1466#endif
1467
1468#ifdef CONFIG_VSX
1469 current->thread.used_vsr = used_vsr;
1470#endif
1471
1472 return ret;
1473}
1474
1475static int kvmppc_book3s_init(void)
1476{
1477 int r;
1478
1479 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1480 THIS_MODULE);
1481
1482 if (r)
1483 return r;
1484
1485 r = kvmppc_mmu_hpte_sysinit();
1486
1487 return r;
1488}
1489
1490static void kvmppc_book3s_exit(void)
1491{
1492 kvmppc_mmu_hpte_sysexit();
1493 kvm_exit();
1494}
1495
1496module_init(kvmppc_book3s_init);
1497module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index d7889ef3211e..c6d3e194b6b4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
41} 41}
42 42
43static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( 43static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
44 struct kvmppc_vcpu_book3s *vcpu_book3s, 44 struct kvm_vcpu *vcpu,
45 gva_t eaddr) 45 gva_t eaddr)
46{ 46{
47 int i; 47 int i;
48 u64 esid = GET_ESID(eaddr); 48 u64 esid = GET_ESID(eaddr);
49 u64 esid_1t = GET_ESID_1T(eaddr); 49 u64 esid_1t = GET_ESID_1T(eaddr);
50 50
51 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 51 for (i = 0; i < vcpu->arch.slb_nr; i++) {
52 u64 cmp_esid = esid; 52 u64 cmp_esid = esid;
53 53
54 if (!vcpu_book3s->slb[i].valid) 54 if (!vcpu->arch.slb[i].valid)
55 continue; 55 continue;
56 56
57 if (vcpu_book3s->slb[i].tb) 57 if (vcpu->arch.slb[i].tb)
58 cmp_esid = esid_1t; 58 cmp_esid = esid_1t;
59 59
60 if (vcpu_book3s->slb[i].esid == cmp_esid) 60 if (vcpu->arch.slb[i].esid == cmp_esid)
61 return &vcpu_book3s->slb[i]; 61 return &vcpu->arch.slb[i];
62 } 62 }
63 63
64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", 64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
65 eaddr, esid, esid_1t); 65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 66 for (i = 0; i < vcpu->arch.slb_nr; i++) {
67 if (vcpu_book3s->slb[i].vsid) 67 if (vcpu->arch.slb[i].vsid)
68 dprintk(" %d: %c%c%c %llx %llx\n", i, 68 dprintk(" %d: %c%c%c %llx %llx\n", i,
69 vcpu_book3s->slb[i].valid ? 'v' : ' ', 69 vcpu->arch.slb[i].valid ? 'v' : ' ',
70 vcpu_book3s->slb[i].large ? 'l' : ' ', 70 vcpu->arch.slb[i].large ? 'l' : ' ',
71 vcpu_book3s->slb[i].tb ? 't' : ' ', 71 vcpu->arch.slb[i].tb ? 't' : ' ',
72 vcpu_book3s->slb[i].esid, 72 vcpu->arch.slb[i].esid,
73 vcpu_book3s->slb[i].vsid); 73 vcpu->arch.slb[i].vsid);
74 } 74 }
75 75
76 return NULL; 76 return NULL;
@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
81{ 81{
82 struct kvmppc_slb *slb; 82 struct kvmppc_slb *slb;
83 83
84 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr); 84 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
85 if (!slb) 85 if (!slb)
86 return 0; 86 return 0;
87 87
@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
180 return 0; 180 return 0;
181 } 181 }
182 182
183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); 183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
184 if (!slbe) 184 if (!slbe)
185 goto no_seg_found; 185 goto no_seg_found;
186 186
@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
320 esid_1t = GET_ESID_1T(rb); 320 esid_1t = GET_ESID_1T(rb);
321 slb_nr = rb & 0xfff; 321 slb_nr = rb & 0xfff;
322 322
323 if (slb_nr > vcpu_book3s->slb_nr) 323 if (slb_nr > vcpu->arch.slb_nr)
324 return; 324 return;
325 325
326 slbe = &vcpu_book3s->slb[slb_nr]; 326 slbe = &vcpu->arch.slb[slb_nr];
327 327
328 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 328 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
329 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; 329 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
344 344
345static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) 345static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
346{ 346{
347 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
348 struct kvmppc_slb *slbe; 347 struct kvmppc_slb *slbe;
349 348
350 if (slb_nr > vcpu_book3s->slb_nr) 349 if (slb_nr > vcpu->arch.slb_nr)
351 return 0; 350 return 0;
352 351
353 slbe = &vcpu_book3s->slb[slb_nr]; 352 slbe = &vcpu->arch.slb[slb_nr];
354 353
355 return slbe->orige; 354 return slbe->orige;
356} 355}
357 356
358static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) 357static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
359{ 358{
360 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
361 struct kvmppc_slb *slbe; 359 struct kvmppc_slb *slbe;
362 360
363 if (slb_nr > vcpu_book3s->slb_nr) 361 if (slb_nr > vcpu->arch.slb_nr)
364 return 0; 362 return 0;
365 363
366 slbe = &vcpu_book3s->slb[slb_nr]; 364 slbe = &vcpu->arch.slb[slb_nr];
367 365
368 return slbe->origv; 366 return slbe->origv;
369} 367}
370 368
371static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) 369static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
372{ 370{
373 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
374 struct kvmppc_slb *slbe; 371 struct kvmppc_slb *slbe;
375 372
376 dprintk("KVM MMU: slbie(0x%llx)\n", ea); 373 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
377 374
378 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea); 375 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
379 376
380 if (!slbe) 377 if (!slbe)
381 return; 378 return;
@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
389 386
390static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) 387static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
391{ 388{
392 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
393 int i; 389 int i;
394 390
395 dprintk("KVM MMU: slbia()\n"); 391 dprintk("KVM MMU: slbia()\n");
396 392
397 for (i = 1; i < vcpu_book3s->slb_nr; i++) 393 for (i = 1; i < vcpu->arch.slb_nr; i++)
398 vcpu_book3s->slb[i].valid = false; 394 vcpu->arch.slb[i].valid = false;
399 395
400 if (vcpu->arch.shared->msr & MSR_IR) { 396 if (vcpu->arch.shared->msr & MSR_IR) {
401 kvmppc_mmu_flush_segments(vcpu); 397 kvmppc_mmu_flush_segments(vcpu);
@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
464 ulong mp_ea = vcpu->arch.magic_page_ea; 460 ulong mp_ea = vcpu->arch.magic_page_ea;
465 461
466 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 462 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
467 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 463 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
468 if (slb) 464 if (slb)
469 gvsid = slb->vsid; 465 gvsid = slb->vsid;
470 } 466 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
new file mode 100644
index 000000000000..bc3a2ea94217
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -0,0 +1,180 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 */
17
18#include <linux/types.h>
19#include <linux/string.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/highmem.h>
23#include <linux/gfp.h>
24#include <linux/slab.h>
25#include <linux/hugetlb.h>
26
27#include <asm/tlbflush.h>
28#include <asm/kvm_ppc.h>
29#include <asm/kvm_book3s.h>
30#include <asm/mmu-hash64.h>
31#include <asm/hvcall.h>
32#include <asm/synch.h>
33#include <asm/ppc-opcode.h>
34#include <asm/cputable.h>
35
36/* For now use fixed-size 16MB page table */
37#define HPT_ORDER 24
38#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
39#define HPT_HASH_MASK (HPT_NPTEG - 1)
40
41/* Pages in the VRMA are 16MB pages */
42#define VRMA_PAGE_ORDER 24
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
44
45/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
46#define MAX_LPID_970 63
47#define NR_LPIDS (LPID_RSVD + 1)
48unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
49
50long kvmppc_alloc_hpt(struct kvm *kvm)
51{
52 unsigned long hpt;
53 unsigned long lpid;
54
55 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
56 HPT_ORDER - PAGE_SHIFT);
57 if (!hpt) {
58 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
59 return -ENOMEM;
60 }
61 kvm->arch.hpt_virt = hpt;
62
63 do {
64 lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
65 if (lpid >= NR_LPIDS) {
66 pr_err("kvm_alloc_hpt: No LPIDs free\n");
67 free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
68 return -ENOMEM;
69 }
70 } while (test_and_set_bit(lpid, lpid_inuse));
71
72 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
73 kvm->arch.lpid = lpid;
74
75 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
76 return 0;
77}
78
79void kvmppc_free_hpt(struct kvm *kvm)
80{
81 clear_bit(kvm->arch.lpid, lpid_inuse);
82 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
83}
84
85void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
86{
87 unsigned long i;
88 unsigned long npages = kvm->arch.ram_npages;
89 unsigned long pfn;
90 unsigned long *hpte;
91 unsigned long hash;
92 struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
93
94 if (!pginfo)
95 return;
96
97 /* VRMA can't be > 1TB */
98 if (npages > 1ul << (40 - kvm->arch.ram_porder))
99 npages = 1ul << (40 - kvm->arch.ram_porder);
100 /* Can't use more than 1 HPTE per HPTEG */
101 if (npages > HPT_NPTEG)
102 npages = HPT_NPTEG;
103
104 for (i = 0; i < npages; ++i) {
105 pfn = pginfo[i].pfn;
106 if (!pfn)
107 break;
108 /* can't use hpt_hash since va > 64 bits */
109 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
110 /*
111 * We assume that the hash table is empty and no
112 * vcpus are using it at this stage. Since we create
113 * at most one HPTE per HPTEG, we just assume entry 7
114 * is available and use it.
115 */
116 hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
117 hpte += 7 * 2;
118 /* HPTE low word - RPN, protection, etc. */
119 hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
120 HPTE_R_M | PP_RWXX;
121 wmb();
122 hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
123 (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
124 HPTE_V_LARGE | HPTE_V_VALID;
125 }
126}
127
128int kvmppc_mmu_hv_init(void)
129{
130 unsigned long host_lpid, rsvd_lpid;
131
132 if (!cpu_has_feature(CPU_FTR_HVMODE))
133 return -EINVAL;
134
135 memset(lpid_inuse, 0, sizeof(lpid_inuse));
136
137 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
138 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
139 rsvd_lpid = LPID_RSVD;
140 } else {
141 host_lpid = 0; /* PPC970 */
142 rsvd_lpid = MAX_LPID_970;
143 }
144
145 set_bit(host_lpid, lpid_inuse);
146 /* rsvd_lpid is reserved for use in partition switching */
147 set_bit(rsvd_lpid, lpid_inuse);
148
149 return 0;
150}
151
152void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
153{
154}
155
156static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
157{
158 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
159}
160
161static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
162 struct kvmppc_pte *gpte, bool data)
163{
164 return -ENOENT;
165}
166
167void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
168{
169 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
170
171 if (cpu_has_feature(CPU_FTR_ARCH_206))
172 vcpu->arch.slb_nr = 32; /* POWER7 */
173 else
174 vcpu->arch.slb_nr = 64;
175
176 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
177 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
178
179 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
180}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
new file mode 100644
index 000000000000..ea0f8c537c28
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -0,0 +1,73 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 */
18
19#include <linux/types.h>
20#include <linux/string.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/highmem.h>
24#include <linux/gfp.h>
25#include <linux/slab.h>
26#include <linux/hugetlb.h>
27#include <linux/list.h>
28
29#include <asm/tlbflush.h>
30#include <asm/kvm_ppc.h>
31#include <asm/kvm_book3s.h>
32#include <asm/mmu-hash64.h>
33#include <asm/hvcall.h>
34#include <asm/synch.h>
35#include <asm/ppc-opcode.h>
36#include <asm/kvm_host.h>
37#include <asm/udbg.h>
38
39#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
40
41long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
42 unsigned long ioba, unsigned long tce)
43{
44 struct kvm *kvm = vcpu->kvm;
45 struct kvmppc_spapr_tce_table *stt;
46
47 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
48 /* liobn, ioba, tce); */
49
50 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
51 if (stt->liobn == liobn) {
52 unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
53 struct page *page;
54 u64 *tbl;
55
56 /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p window_size=0x%x\n", */
57 /* liobn, stt, stt->window_size); */
58 if (ioba >= stt->window_size)
59 return H_PARAMETER;
60
61 page = stt->pages[idx / TCES_PER_PAGE];
62 tbl = (u64 *)page_address(page);
63
64 /* FIXME: Need to validate the TCE itself */
65 /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
66 tbl[idx % TCES_PER_PAGE] = tce;
67 return H_SUCCESS;
68 }
69 }
70
71 /* Didn't find the liobn, punt it to userspace */
72 return H_TOO_HARD;
73}
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0d..88c8f26add02 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,8 +20,11 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <asm/kvm_book3s.h> 21#include <asm/kvm_book3s.h>
22 22
23EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); 23#ifdef CONFIG_KVM_BOOK3S_64_HV
24EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); 24EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
25#else
26EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter);
27EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline);
25EXPORT_SYMBOL_GPL(kvmppc_rmcall); 28EXPORT_SYMBOL_GPL(kvmppc_rmcall);
26EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); 29EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
27#ifdef CONFIG_ALTIVEC 30#ifdef CONFIG_ALTIVEC
@@ -30,3 +33,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
30#ifdef CONFIG_VSX 33#ifdef CONFIG_VSX
31EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); 34EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
32#endif 35#endif
36#endif
37
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
new file mode 100644
index 000000000000..cc0d7f1b19ab
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -0,0 +1,1269 @@
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
27#include <linux/fs.h>
28#include <linux/anon_inodes.h>
29#include <linux/cpumask.h>
30#include <linux/spinlock.h>
31#include <linux/page-flags.h>
32
33#include <asm/reg.h>
34#include <asm/cputable.h>
35#include <asm/cacheflush.h>
36#include <asm/tlbflush.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
39#include <asm/kvm_ppc.h>
40#include <asm/kvm_book3s.h>
41#include <asm/mmu_context.h>
42#include <asm/lppaca.h>
43#include <asm/processor.h>
44#include <asm/cputhreads.h>
45#include <asm/page.h>
46#include <linux/gfp.h>
47#include <linux/sched.h>
48#include <linux/vmalloc.h>
49#include <linux/highmem.h>
50
51/*
52 * For now, limit memory to 64GB and require it to be large pages.
53 * This value is chosen because it makes the ram_pginfo array be
54 * 64kB in size, which is about as large as we want to be trying
55 * to allocate with kmalloc.
56 */
57#define MAX_MEM_ORDER 36
58
59#define LARGE_PAGE_ORDER 24 /* 16MB pages */
60
61/* #define EXIT_DEBUG */
62/* #define EXIT_DEBUG_SIMPLE */
63/* #define EXIT_DEBUG_INT */
64
65void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
66{
67 local_paca->kvm_hstate.kvm_vcpu = vcpu;
68 local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
69}
70
71void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
72{
73}
74
75static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu);
76static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu);
77
78void kvmppc_vcpu_block(struct kvm_vcpu *vcpu)
79{
80 u64 now;
81 unsigned long dec_nsec;
82
83 now = get_tb();
84 if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu))
85 kvmppc_core_queue_dec(vcpu);
86 if (vcpu->arch.pending_exceptions)
87 return;
88 if (vcpu->arch.dec_expires != ~(u64)0) {
89 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC /
90 tb_ticks_per_sec;
91 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
92 HRTIMER_MODE_REL);
93 }
94
95 kvmppc_vcpu_blocked(vcpu);
96
97 kvm_vcpu_block(vcpu);
98 vcpu->stat.halt_wakeup++;
99
100 if (vcpu->arch.dec_expires != ~(u64)0)
101 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
102
103 kvmppc_vcpu_unblocked(vcpu);
104}
105
106void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
107{
108 vcpu->arch.shregs.msr = msr;
109}
110
111void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
112{
113 vcpu->arch.pvr = pvr;
114}
115
116void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
117{
118 int r;
119
120 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
121 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
122 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
123 for (r = 0; r < 16; ++r)
124 pr_err("r%2d = %.16lx r%d = %.16lx\n",
125 r, kvmppc_get_gpr(vcpu, r),
126 r+16, kvmppc_get_gpr(vcpu, r+16));
127 pr_err("ctr = %.16lx lr = %.16lx\n",
128 vcpu->arch.ctr, vcpu->arch.lr);
129 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
130 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
131 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
132 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
133 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
134 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
135 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
136 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
137 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
138 pr_err("fault dar = %.16lx dsisr = %.8x\n",
139 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
140 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
141 for (r = 0; r < vcpu->arch.slb_max; ++r)
142 pr_err(" ESID = %.16llx VSID = %.16llx\n",
143 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
144 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
145 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
146 vcpu->arch.last_inst);
147}
148
149struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
150{
151 int r;
152 struct kvm_vcpu *v, *ret = NULL;
153
154 mutex_lock(&kvm->lock);
155 kvm_for_each_vcpu(r, v, kvm) {
156 if (v->vcpu_id == id) {
157 ret = v;
158 break;
159 }
160 }
161 mutex_unlock(&kvm->lock);
162 return ret;
163}
164
165static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
166{
167 vpa->shared_proc = 1;
168 vpa->yield_count = 1;
169}
170
171static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
172 unsigned long flags,
173 unsigned long vcpuid, unsigned long vpa)
174{
175 struct kvm *kvm = vcpu->kvm;
176 unsigned long pg_index, ra, len;
177 unsigned long pg_offset;
178 void *va;
179 struct kvm_vcpu *tvcpu;
180
181 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
182 if (!tvcpu)
183 return H_PARAMETER;
184
185 flags >>= 63 - 18;
186 flags &= 7;
187 if (flags == 0 || flags == 4)
188 return H_PARAMETER;
189 if (flags < 4) {
190 if (vpa & 0x7f)
191 return H_PARAMETER;
192 /* registering new area; convert logical addr to real */
193 pg_index = vpa >> kvm->arch.ram_porder;
194 pg_offset = vpa & (kvm->arch.ram_psize - 1);
195 if (pg_index >= kvm->arch.ram_npages)
196 return H_PARAMETER;
197 if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
198 return H_PARAMETER;
199 ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
200 ra |= pg_offset;
201 va = __va(ra);
202 if (flags <= 1)
203 len = *(unsigned short *)(va + 4);
204 else
205 len = *(unsigned int *)(va + 4);
206 if (pg_offset + len > kvm->arch.ram_psize)
207 return H_PARAMETER;
208 switch (flags) {
209 case 1: /* register VPA */
210 if (len < 640)
211 return H_PARAMETER;
212 tvcpu->arch.vpa = va;
213 init_vpa(vcpu, va);
214 break;
215 case 2: /* register DTL */
216 if (len < 48)
217 return H_PARAMETER;
218 if (!tvcpu->arch.vpa)
219 return H_RESOURCE;
220 len -= len % 48;
221 tvcpu->arch.dtl = va;
222 tvcpu->arch.dtl_end = va + len;
223 break;
224 case 3: /* register SLB shadow buffer */
225 if (len < 8)
226 return H_PARAMETER;
227 if (!tvcpu->arch.vpa)
228 return H_RESOURCE;
229 tvcpu->arch.slb_shadow = va;
230 len = (len - 16) / 16;
231 tvcpu->arch.slb_shadow = va;
232 break;
233 }
234 } else {
235 switch (flags) {
236 case 5: /* unregister VPA */
237 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
238 return H_RESOURCE;
239 tvcpu->arch.vpa = NULL;
240 break;
241 case 6: /* unregister DTL */
242 tvcpu->arch.dtl = NULL;
243 break;
244 case 7: /* unregister SLB shadow buffer */
245 tvcpu->arch.slb_shadow = NULL;
246 break;
247 }
248 }
249 return H_SUCCESS;
250}
251
252int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
253{
254 unsigned long req = kvmppc_get_gpr(vcpu, 3);
255 unsigned long target, ret = H_SUCCESS;
256 struct kvm_vcpu *tvcpu;
257
258 switch (req) {
259 case H_CEDE:
260 vcpu->arch.shregs.msr |= MSR_EE;
261 vcpu->arch.ceded = 1;
262 smp_mb();
263 if (!vcpu->arch.prodded)
264 kvmppc_vcpu_block(vcpu);
265 else
266 vcpu->arch.prodded = 0;
267 smp_mb();
268 vcpu->arch.ceded = 0;
269 break;
270 case H_PROD:
271 target = kvmppc_get_gpr(vcpu, 4);
272 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
273 if (!tvcpu) {
274 ret = H_PARAMETER;
275 break;
276 }
277 tvcpu->arch.prodded = 1;
278 smp_mb();
279 if (vcpu->arch.ceded) {
280 if (waitqueue_active(&vcpu->wq)) {
281 wake_up_interruptible(&vcpu->wq);
282 vcpu->stat.halt_wakeup++;
283 }
284 }
285 break;
286 case H_CONFER:
287 break;
288 case H_REGISTER_VPA:
289 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
290 kvmppc_get_gpr(vcpu, 5),
291 kvmppc_get_gpr(vcpu, 6));
292 break;
293 default:
294 return RESUME_HOST;
295 }
296 kvmppc_set_gpr(vcpu, 3, ret);
297 vcpu->arch.hcall_needed = 0;
298 return RESUME_GUEST;
299}
300
301static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
302 struct task_struct *tsk)
303{
304 int r = RESUME_HOST;
305
306 vcpu->stat.sum_exits++;
307
308 run->exit_reason = KVM_EXIT_UNKNOWN;
309 run->ready_for_interrupt_injection = 1;
310 switch (vcpu->arch.trap) {
311 /* We're good on these - the host merely wanted to get our attention */
312 case BOOK3S_INTERRUPT_HV_DECREMENTER:
313 vcpu->stat.dec_exits++;
314 r = RESUME_GUEST;
315 break;
316 case BOOK3S_INTERRUPT_EXTERNAL:
317 vcpu->stat.ext_intr_exits++;
318 r = RESUME_GUEST;
319 break;
320 case BOOK3S_INTERRUPT_PERFMON:
321 r = RESUME_GUEST;
322 break;
323 case BOOK3S_INTERRUPT_PROGRAM:
324 {
325 ulong flags;
326 /*
327 * Normally program interrupts are delivered directly
328 * to the guest by the hardware, but we can get here
329 * as a result of a hypervisor emulation interrupt
330 * (e40) getting turned into a 700 by BML RTAS.
331 */
332 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
333 kvmppc_core_queue_program(vcpu, flags);
334 r = RESUME_GUEST;
335 break;
336 }
337 case BOOK3S_INTERRUPT_SYSCALL:
338 {
339 /* hcall - punt to userspace */
340 int i;
341
342 if (vcpu->arch.shregs.msr & MSR_PR) {
343 /* sc 1 from userspace - reflect to guest syscall */
344 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
345 r = RESUME_GUEST;
346 break;
347 }
348 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
349 for (i = 0; i < 9; ++i)
350 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
351 run->exit_reason = KVM_EXIT_PAPR_HCALL;
352 vcpu->arch.hcall_needed = 1;
353 r = RESUME_HOST;
354 break;
355 }
356 /*
357 * We get these next two if the guest does a bad real-mode access,
358 * as we have enabled VRMA (virtualized real mode area) mode in the
359 * LPCR. We just generate an appropriate DSI/ISI to the guest.
360 */
361 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
362 vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
363 vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
364 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
365 r = RESUME_GUEST;
366 break;
367 case BOOK3S_INTERRUPT_H_INST_STORAGE:
368 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
369 0x08000000);
370 r = RESUME_GUEST;
371 break;
372 /*
373 * This occurs if the guest executes an illegal instruction.
374 * We just generate a program interrupt to the guest, since
375 * we don't emulate any guest instructions at this stage.
376 */
377 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
378 kvmppc_core_queue_program(vcpu, 0x80000);
379 r = RESUME_GUEST;
380 break;
381 default:
382 kvmppc_dump_regs(vcpu);
383 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
384 vcpu->arch.trap, kvmppc_get_pc(vcpu),
385 vcpu->arch.shregs.msr);
386 r = RESUME_HOST;
387 BUG();
388 break;
389 }
390
391
392 if (!(r & RESUME_HOST)) {
393 /* To avoid clobbering exit_reason, only check for signals if
394 * we aren't already exiting to userspace for some other
395 * reason. */
396 if (signal_pending(tsk)) {
397 vcpu->stat.signal_exits++;
398 run->exit_reason = KVM_EXIT_INTR;
399 r = -EINTR;
400 } else {
401 kvmppc_core_deliver_interrupts(vcpu);
402 }
403 }
404
405 return r;
406}
407
408int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
409 struct kvm_sregs *sregs)
410{
411 int i;
412
413 sregs->pvr = vcpu->arch.pvr;
414
415 memset(sregs, 0, sizeof(struct kvm_sregs));
416 for (i = 0; i < vcpu->arch.slb_max; i++) {
417 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
418 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
419 }
420
421 return 0;
422}
423
424int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
425 struct kvm_sregs *sregs)
426{
427 int i, j;
428
429 kvmppc_set_pvr(vcpu, sregs->pvr);
430
431 j = 0;
432 for (i = 0; i < vcpu->arch.slb_nr; i++) {
433 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
434 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
435 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
436 ++j;
437 }
438 }
439 vcpu->arch.slb_max = j;
440
441 return 0;
442}
443
444int kvmppc_core_check_processor_compat(void)
445{
446 if (cpu_has_feature(CPU_FTR_HVMODE))
447 return 0;
448 return -EIO;
449}
450
451struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
452{
453 struct kvm_vcpu *vcpu;
454 int err = -EINVAL;
455 int core;
456 struct kvmppc_vcore *vcore;
457
458 core = id / threads_per_core;
459 if (core >= KVM_MAX_VCORES)
460 goto out;
461
462 err = -ENOMEM;
463 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
464 if (!vcpu)
465 goto out;
466
467 err = kvm_vcpu_init(vcpu, kvm, id);
468 if (err)
469 goto free_vcpu;
470
471 vcpu->arch.shared = &vcpu->arch.shregs;
472 vcpu->arch.last_cpu = -1;
473 vcpu->arch.mmcr[0] = MMCR0_FC;
474 vcpu->arch.ctrl = CTRL_RUNLATCH;
475 /* default to host PVR, since we can't spoof it */
476 vcpu->arch.pvr = mfspr(SPRN_PVR);
477 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
478
479 kvmppc_mmu_book3s_hv_init(vcpu);
480
481 /*
482 * Some vcpus may start out in stopped state. If we initialize
483 * them to busy-in-host state they will stop other vcpus in the
484 * vcore from running. Instead we initialize them to blocked
485 * state, effectively considering them to be stopped until we
486 * see the first run ioctl for them.
487 */
488 vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
489
490 init_waitqueue_head(&vcpu->arch.cpu_run);
491
492 mutex_lock(&kvm->lock);
493 vcore = kvm->arch.vcores[core];
494 if (!vcore) {
495 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
496 if (vcore) {
497 INIT_LIST_HEAD(&vcore->runnable_threads);
498 spin_lock_init(&vcore->lock);
499 }
500 kvm->arch.vcores[core] = vcore;
501 }
502 mutex_unlock(&kvm->lock);
503
504 if (!vcore)
505 goto free_vcpu;
506
507 spin_lock(&vcore->lock);
508 ++vcore->num_threads;
509 ++vcore->n_blocked;
510 spin_unlock(&vcore->lock);
511 vcpu->arch.vcore = vcore;
512
513 return vcpu;
514
515free_vcpu:
516 kfree(vcpu);
517out:
518 return ERR_PTR(err);
519}
520
521void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
522{
523 kvm_vcpu_uninit(vcpu);
524 kfree(vcpu);
525}
526
527static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu)
528{
529 struct kvmppc_vcore *vc = vcpu->arch.vcore;
530
531 spin_lock(&vc->lock);
532 vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
533 ++vc->n_blocked;
534 if (vc->n_runnable > 0 &&
535 vc->n_runnable + vc->n_blocked == vc->num_threads) {
536 vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
537 arch.run_list);
538 wake_up(&vcpu->arch.cpu_run);
539 }
540 spin_unlock(&vc->lock);
541}
542
543static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu)
544{
545 struct kvmppc_vcore *vc = vcpu->arch.vcore;
546
547 spin_lock(&vc->lock);
548 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
549 --vc->n_blocked;
550 spin_unlock(&vc->lock);
551}
552
553extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
554extern void xics_wake_cpu(int cpu);
555
556static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
557 struct kvm_vcpu *vcpu)
558{
559 struct kvm_vcpu *v;
560
561 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
562 return;
563 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
564 --vc->n_runnable;
565 /* decrement the physical thread id of each following vcpu */
566 v = vcpu;
567 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
568 --v->arch.ptid;
569 list_del(&vcpu->arch.run_list);
570}
571
572static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
573{
574 int cpu;
575 struct paca_struct *tpaca;
576 struct kvmppc_vcore *vc = vcpu->arch.vcore;
577
578 cpu = vc->pcpu + vcpu->arch.ptid;
579 tpaca = &paca[cpu];
580 tpaca->kvm_hstate.kvm_vcpu = vcpu;
581 tpaca->kvm_hstate.kvm_vcore = vc;
582 smp_wmb();
583#ifdef CONFIG_PPC_ICP_NATIVE
584 if (vcpu->arch.ptid) {
585 tpaca->cpu_start = 0x80;
586 tpaca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST;
587 wmb();
588 xics_wake_cpu(cpu);
589 ++vc->n_woken;
590 }
591#endif
592}
593
594static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
595{
596 int i;
597
598 HMT_low();
599 i = 0;
600 while (vc->nap_count < vc->n_woken) {
601 if (++i >= 1000000) {
602 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
603 vc->nap_count, vc->n_woken);
604 break;
605 }
606 cpu_relax();
607 }
608 HMT_medium();
609}
610
611/*
612 * Check that we are on thread 0 and that any other threads in
613 * this core are off-line.
614 */
615static int on_primary_thread(void)
616{
617 int cpu = smp_processor_id();
618 int thr = cpu_thread_in_core(cpu);
619
620 if (thr)
621 return 0;
622 while (++thr < threads_per_core)
623 if (cpu_online(cpu + thr))
624 return 0;
625 return 1;
626}
627
628/*
629 * Run a set of guest threads on a physical core.
630 * Called with vc->lock held.
631 */
632static int kvmppc_run_core(struct kvmppc_vcore *vc)
633{
634 struct kvm_vcpu *vcpu, *vnext;
635 long ret;
636 u64 now;
637
638 /* don't start if any threads have a signal pending */
639 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
640 if (signal_pending(vcpu->arch.run_task))
641 return 0;
642
643 /*
644 * Make sure we are running on thread 0, and that
645 * secondary threads are offline.
646 * XXX we should also block attempts to bring any
647 * secondary threads online.
648 */
649 if (threads_per_core > 1 && !on_primary_thread()) {
650 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
651 vcpu->arch.ret = -EBUSY;
652 goto out;
653 }
654
655 vc->n_woken = 0;
656 vc->nap_count = 0;
657 vc->entry_exit_count = 0;
658 vc->vcore_running = 1;
659 vc->in_guest = 0;
660 vc->pcpu = smp_processor_id();
661 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
662 kvmppc_start_thread(vcpu);
663 vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
664 arch.run_list);
665
666 spin_unlock(&vc->lock);
667
668 preempt_disable();
669 kvm_guest_enter();
670 __kvmppc_vcore_entry(NULL, vcpu);
671
672 /* wait for secondary threads to finish writing their state to memory */
673 spin_lock(&vc->lock);
674 if (vc->nap_count < vc->n_woken)
675 kvmppc_wait_for_nap(vc);
676 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
677 vc->vcore_running = 2;
678 spin_unlock(&vc->lock);
679
680 /* make sure updates to secondary vcpu structs are visible now */
681 smp_mb();
682 kvm_guest_exit();
683
684 preempt_enable();
685 kvm_resched(vcpu);
686
687 now = get_tb();
688 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
689 /* cancel pending dec exception if dec is positive */
690 if (now < vcpu->arch.dec_expires &&
691 kvmppc_core_pending_dec(vcpu))
692 kvmppc_core_dequeue_dec(vcpu);
693 if (!vcpu->arch.trap) {
694 if (signal_pending(vcpu->arch.run_task)) {
695 vcpu->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
696 vcpu->arch.ret = -EINTR;
697 }
698 continue; /* didn't get to run */
699 }
700 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
701 vcpu->arch.run_task);
702 vcpu->arch.ret = ret;
703 vcpu->arch.trap = 0;
704 }
705
706 spin_lock(&vc->lock);
707 out:
708 vc->vcore_running = 0;
709 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
710 arch.run_list) {
711 if (vcpu->arch.ret != RESUME_GUEST) {
712 kvmppc_remove_runnable(vc, vcpu);
713 wake_up(&vcpu->arch.cpu_run);
714 }
715 }
716
717 return 1;
718}
719
720static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
721{
722 int ptid;
723 int wait_state;
724 struct kvmppc_vcore *vc;
725 DEFINE_WAIT(wait);
726
727 /* No need to go into the guest when all we do is going out */
728 if (signal_pending(current)) {
729 kvm_run->exit_reason = KVM_EXIT_INTR;
730 return -EINTR;
731 }
732
733 /* On PPC970, check that we have an RMA region */
734 if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
735 return -EPERM;
736
737 kvm_run->exit_reason = 0;
738 vcpu->arch.ret = RESUME_GUEST;
739 vcpu->arch.trap = 0;
740
741 flush_fp_to_thread(current);
742 flush_altivec_to_thread(current);
743 flush_vsx_to_thread(current);
744
745 /*
746 * Synchronize with other threads in this virtual core
747 */
748 vc = vcpu->arch.vcore;
749 spin_lock(&vc->lock);
750 /* This happens the first time this is called for a vcpu */
751 if (vcpu->arch.state == KVMPPC_VCPU_BLOCKED)
752 --vc->n_blocked;
753 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
754 ptid = vc->n_runnable;
755 vcpu->arch.run_task = current;
756 vcpu->arch.kvm_run = kvm_run;
757 vcpu->arch.ptid = ptid;
758 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
759 ++vc->n_runnable;
760
761 wait_state = TASK_INTERRUPTIBLE;
762 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
763 if (signal_pending(current)) {
764 if (!vc->vcore_running) {
765 kvm_run->exit_reason = KVM_EXIT_INTR;
766 vcpu->arch.ret = -EINTR;
767 break;
768 }
769 /* have to wait for vcore to stop executing guest */
770 wait_state = TASK_UNINTERRUPTIBLE;
771 smp_send_reschedule(vc->pcpu);
772 }
773
774 if (!vc->vcore_running &&
775 vc->n_runnable + vc->n_blocked == vc->num_threads) {
776 /* we can run now */
777 if (kvmppc_run_core(vc))
778 continue;
779 }
780
781 if (vc->vcore_running == 1 && VCORE_EXIT_COUNT(vc) == 0)
782 kvmppc_start_thread(vcpu);
783
784 /* wait for other threads to come in, or wait for vcore */
785 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
786 spin_unlock(&vc->lock);
787 schedule();
788 finish_wait(&vcpu->arch.cpu_run, &wait);
789 spin_lock(&vc->lock);
790 }
791
792 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
793 kvmppc_remove_runnable(vc, vcpu);
794 spin_unlock(&vc->lock);
795
796 return vcpu->arch.ret;
797}
798
799int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
800{
801 int r;
802
803 do {
804 r = kvmppc_run_vcpu(run, vcpu);
805
806 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
807 !(vcpu->arch.shregs.msr & MSR_PR)) {
808 r = kvmppc_pseries_do_hcall(vcpu);
809 kvmppc_core_deliver_interrupts(vcpu);
810 }
811 } while (r == RESUME_GUEST);
812 return r;
813}
814
815static long kvmppc_stt_npages(unsigned long window_size)
816{
817 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
818 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
819}
820
821static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
822{
823 struct kvm *kvm = stt->kvm;
824 int i;
825
826 mutex_lock(&kvm->lock);
827 list_del(&stt->list);
828 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
829 __free_page(stt->pages[i]);
830 kfree(stt);
831 mutex_unlock(&kvm->lock);
832
833 kvm_put_kvm(kvm);
834}
835
836static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
837{
838 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
839 struct page *page;
840
841 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
842 return VM_FAULT_SIGBUS;
843
844 page = stt->pages[vmf->pgoff];
845 get_page(page);
846 vmf->page = page;
847 return 0;
848}
849
850static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
851 .fault = kvm_spapr_tce_fault,
852};
853
854static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
855{
856 vma->vm_ops = &kvm_spapr_tce_vm_ops;
857 return 0;
858}
859
860static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
861{
862 struct kvmppc_spapr_tce_table *stt = filp->private_data;
863
864 release_spapr_tce_table(stt);
865 return 0;
866}
867
868static struct file_operations kvm_spapr_tce_fops = {
869 .mmap = kvm_spapr_tce_mmap,
870 .release = kvm_spapr_tce_release,
871};
872
873long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
874 struct kvm_create_spapr_tce *args)
875{
876 struct kvmppc_spapr_tce_table *stt = NULL;
877 long npages;
878 int ret = -ENOMEM;
879 int i;
880
881 /* Check this LIOBN hasn't been previously allocated */
882 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
883 if (stt->liobn == args->liobn)
884 return -EBUSY;
885 }
886
887 npages = kvmppc_stt_npages(args->window_size);
888
889 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
890 GFP_KERNEL);
891 if (!stt)
892 goto fail;
893
894 stt->liobn = args->liobn;
895 stt->window_size = args->window_size;
896 stt->kvm = kvm;
897
898 for (i = 0; i < npages; i++) {
899 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
900 if (!stt->pages[i])
901 goto fail;
902 }
903
904 kvm_get_kvm(kvm);
905
906 mutex_lock(&kvm->lock);
907 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
908
909 mutex_unlock(&kvm->lock);
910
911 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
912 stt, O_RDWR);
913
914fail:
915 if (stt) {
916 for (i = 0; i < npages; i++)
917 if (stt->pages[i])
918 __free_page(stt->pages[i]);
919
920 kfree(stt);
921 }
922 return ret;
923}
924
925/* Work out RMLS (real mode limit selector) field value for a given RMA size.
926 Assumes POWER7 or PPC970. */
927static inline int lpcr_rmls(unsigned long rma_size)
928{
929 switch (rma_size) {
930 case 32ul << 20: /* 32 MB */
931 if (cpu_has_feature(CPU_FTR_ARCH_206))
932 return 8; /* only supported on POWER7 */
933 return -1;
934 case 64ul << 20: /* 64 MB */
935 return 3;
936 case 128ul << 20: /* 128 MB */
937 return 7;
938 case 256ul << 20: /* 256 MB */
939 return 4;
940 case 1ul << 30: /* 1 GB */
941 return 2;
942 case 16ul << 30: /* 16 GB */
943 return 1;
944 case 256ul << 30: /* 256 GB */
945 return 0;
946 default:
947 return -1;
948 }
949}
950
951static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
952{
953 struct kvmppc_rma_info *ri = vma->vm_file->private_data;
954 struct page *page;
955
956 if (vmf->pgoff >= ri->npages)
957 return VM_FAULT_SIGBUS;
958
959 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
960 get_page(page);
961 vmf->page = page;
962 return 0;
963}
964
965static const struct vm_operations_struct kvm_rma_vm_ops = {
966 .fault = kvm_rma_fault,
967};
968
969static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
970{
971 vma->vm_flags |= VM_RESERVED;
972 vma->vm_ops = &kvm_rma_vm_ops;
973 return 0;
974}
975
976static int kvm_rma_release(struct inode *inode, struct file *filp)
977{
978 struct kvmppc_rma_info *ri = filp->private_data;
979
980 kvm_release_rma(ri);
981 return 0;
982}
983
984static struct file_operations kvm_rma_fops = {
985 .mmap = kvm_rma_mmap,
986 .release = kvm_rma_release,
987};
988
989long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
990{
991 struct kvmppc_rma_info *ri;
992 long fd;
993
994 ri = kvm_alloc_rma();
995 if (!ri)
996 return -ENOMEM;
997
998 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
999 if (fd < 0)
1000 kvm_release_rma(ri);
1001
1002 ret->rma_size = ri->npages << PAGE_SHIFT;
1003 return fd;
1004}
1005
1006static struct page *hva_to_page(unsigned long addr)
1007{
1008 struct page *page[1];
1009 int npages;
1010
1011 might_sleep();
1012
1013 npages = get_user_pages_fast(addr, 1, 1, page);
1014
1015 if (unlikely(npages != 1))
1016 return 0;
1017
1018 return page[0];
1019}
1020
1021int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1022 struct kvm_userspace_memory_region *mem)
1023{
1024 unsigned long psize, porder;
1025 unsigned long i, npages, totalpages;
1026 unsigned long pg_ix;
1027 struct kvmppc_pginfo *pginfo;
1028 unsigned long hva;
1029 struct kvmppc_rma_info *ri = NULL;
1030 struct page *page;
1031
1032 /* For now, only allow 16MB pages */
1033 porder = LARGE_PAGE_ORDER;
1034 psize = 1ul << porder;
1035 if ((mem->memory_size & (psize - 1)) ||
1036 (mem->guest_phys_addr & (psize - 1))) {
1037 pr_err("bad memory_size=%llx @ %llx\n",
1038 mem->memory_size, mem->guest_phys_addr);
1039 return -EINVAL;
1040 }
1041
1042 npages = mem->memory_size >> porder;
1043 totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder;
1044
1045 /* More memory than we have space to track? */
1046 if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER)))
1047 return -EINVAL;
1048
1049 /* Do we already have an RMA registered? */
1050 if (mem->guest_phys_addr == 0 && kvm->arch.rma)
1051 return -EINVAL;
1052
1053 if (totalpages > kvm->arch.ram_npages)
1054 kvm->arch.ram_npages = totalpages;
1055
1056 /* Is this one of our preallocated RMAs? */
1057 if (mem->guest_phys_addr == 0) {
1058 struct vm_area_struct *vma;
1059
1060 down_read(&current->mm->mmap_sem);
1061 vma = find_vma(current->mm, mem->userspace_addr);
1062 if (vma && vma->vm_file &&
1063 vma->vm_file->f_op == &kvm_rma_fops &&
1064 mem->userspace_addr == vma->vm_start)
1065 ri = vma->vm_file->private_data;
1066 up_read(&current->mm->mmap_sem);
1067 if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
1068 pr_err("CPU requires an RMO\n");
1069 return -EINVAL;
1070 }
1071 }
1072
1073 if (ri) {
1074 unsigned long rma_size;
1075 unsigned long lpcr;
1076 long rmls;
1077
1078 rma_size = ri->npages << PAGE_SHIFT;
1079 if (rma_size > mem->memory_size)
1080 rma_size = mem->memory_size;
1081 rmls = lpcr_rmls(rma_size);
1082 if (rmls < 0) {
1083 pr_err("Can't use RMA of 0x%lx bytes\n", rma_size);
1084 return -EINVAL;
1085 }
1086 atomic_inc(&ri->use_count);
1087 kvm->arch.rma = ri;
1088 kvm->arch.n_rma_pages = rma_size >> porder;
1089
1090 /* Update LPCR and RMOR */
1091 lpcr = kvm->arch.lpcr;
1092 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1093 /* PPC970; insert RMLS value (split field) in HID4 */
1094 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1095 (3ul << HID4_RMLS2_SH));
1096 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1097 ((rmls & 3) << HID4_RMLS2_SH);
1098 /* RMOR is also in HID4 */
1099 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1100 << HID4_RMOR_SH;
1101 } else {
1102 /* POWER7 */
1103 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1104 lpcr |= rmls << LPCR_RMLS_SH;
1105 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1106 }
1107 kvm->arch.lpcr = lpcr;
1108 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
1109 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1110 }
1111
1112 pg_ix = mem->guest_phys_addr >> porder;
1113 pginfo = kvm->arch.ram_pginfo + pg_ix;
1114 for (i = 0; i < npages; ++i, ++pg_ix) {
1115 if (ri && pg_ix < kvm->arch.n_rma_pages) {
1116 pginfo[i].pfn = ri->base_pfn +
1117 (pg_ix << (porder - PAGE_SHIFT));
1118 continue;
1119 }
1120 hva = mem->userspace_addr + (i << porder);
1121 page = hva_to_page(hva);
1122 if (!page) {
1123 pr_err("oops, no pfn for hva %lx\n", hva);
1124 goto err;
1125 }
1126 /* Check it's a 16MB page */
1127 if (!PageHead(page) ||
1128 compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
1129 pr_err("page at %lx isn't 16MB (o=%d)\n",
1130 hva, compound_order(page));
1131 goto err;
1132 }
1133 pginfo[i].pfn = page_to_pfn(page);
1134 }
1135
1136 return 0;
1137
1138 err:
1139 return -EINVAL;
1140}
1141
1142void kvmppc_core_commit_memory_region(struct kvm *kvm,
1143 struct kvm_userspace_memory_region *mem)
1144{
1145 if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
1146 !kvm->arch.rma)
1147 kvmppc_map_vrma(kvm, mem);
1148}
1149
1150int kvmppc_core_init_vm(struct kvm *kvm)
1151{
1152 long r;
1153 unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
1154 long err = -ENOMEM;
1155 unsigned long lpcr;
1156
1157 /* Allocate hashed page table */
1158 r = kvmppc_alloc_hpt(kvm);
1159 if (r)
1160 return r;
1161
1162 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1163
1164 kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
1165 GFP_KERNEL);
1166 if (!kvm->arch.ram_pginfo) {
1167 pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
1168 npages * sizeof(struct kvmppc_pginfo));
1169 goto out_free;
1170 }
1171
1172 kvm->arch.ram_npages = 0;
1173 kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
1174 kvm->arch.ram_porder = LARGE_PAGE_ORDER;
1175 kvm->arch.rma = NULL;
1176 kvm->arch.n_rma_pages = 0;
1177
1178 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
1179
1180 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1181 /* PPC970; HID4 is effectively the LPCR */
1182 unsigned long lpid = kvm->arch.lpid;
1183 kvm->arch.host_lpid = 0;
1184 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1185 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1186 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1187 ((lpid & 0xf) << HID4_LPID5_SH);
1188 } else {
1189 /* POWER7; init LPCR for virtual RMA mode */
1190 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1191 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1192 lpcr &= LPCR_PECE | LPCR_LPES;
1193 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1194 LPCR_VPM0 | LPCR_VRMA_L;
1195 }
1196 kvm->arch.lpcr = lpcr;
1197
1198 return 0;
1199
1200 out_free:
1201 kvmppc_free_hpt(kvm);
1202 return err;
1203}
1204
1205void kvmppc_core_destroy_vm(struct kvm *kvm)
1206{
1207 struct kvmppc_pginfo *pginfo;
1208 unsigned long i;
1209
1210 if (kvm->arch.ram_pginfo) {
1211 pginfo = kvm->arch.ram_pginfo;
1212 kvm->arch.ram_pginfo = NULL;
1213 for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i)
1214 if (pginfo[i].pfn)
1215 put_page(pfn_to_page(pginfo[i].pfn));
1216 kfree(pginfo);
1217 }
1218 if (kvm->arch.rma) {
1219 kvm_release_rma(kvm->arch.rma);
1220 kvm->arch.rma = NULL;
1221 }
1222
1223 kvmppc_free_hpt(kvm);
1224 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1225}
1226
1227/* These are stubs for now */
1228void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1229{
1230}
1231
1232/* We don't need to emulate any privileged instructions or dcbz */
1233int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1234 unsigned int inst, int *advance)
1235{
1236 return EMULATE_FAIL;
1237}
1238
1239int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
1240{
1241 return EMULATE_FAIL;
1242}
1243
1244int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
1245{
1246 return EMULATE_FAIL;
1247}
1248
1249static int kvmppc_book3s_hv_init(void)
1250{
1251 int r;
1252
1253 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1254
1255 if (r)
1256 return r;
1257
1258 r = kvmppc_mmu_hv_init();
1259
1260 return r;
1261}
1262
1263static void kvmppc_book3s_hv_exit(void)
1264{
1265 kvm_exit();
1266}
1267
1268module_init(kvmppc_book3s_hv_init);
1269module_exit(kvmppc_book3s_hv_exit);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
new file mode 100644
index 000000000000..d43120355eec
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kvm_host.h>
10#include <linux/preempt.h>
11#include <linux/sched.h>
12#include <linux/spinlock.h>
13#include <linux/bootmem.h>
14#include <linux/init.h>
15
16#include <asm/cputable.h>
17#include <asm/kvm_ppc.h>
18#include <asm/kvm_book3s.h>
19
20/*
21 * This maintains a list of RMAs (real mode areas) for KVM guests to use.
22 * Each RMA has to be physically contiguous and of a size that the
23 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
24 * and other larger sizes. Since we are unlikely to be allocate that
25 * much physically contiguous memory after the system is up and running,
26 * we preallocate a set of RMAs in early boot for KVM to use.
27 */
28static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
29static unsigned long kvm_rma_count;
30
31static int __init early_parse_rma_size(char *p)
32{
33 if (!p)
34 return 1;
35
36 kvm_rma_size = memparse(p, &p);
37
38 return 0;
39}
40early_param("kvm_rma_size", early_parse_rma_size);
41
42static int __init early_parse_rma_count(char *p)
43{
44 if (!p)
45 return 1;
46
47 kvm_rma_count = simple_strtoul(p, NULL, 0);
48
49 return 0;
50}
51early_param("kvm_rma_count", early_parse_rma_count);
52
53static struct kvmppc_rma_info *rma_info;
54static LIST_HEAD(free_rmas);
55static DEFINE_SPINLOCK(rma_lock);
56
57/* Work out RMLS (real mode limit selector) field value for a given RMA size.
58 Assumes POWER7 or PPC970. */
59static inline int lpcr_rmls(unsigned long rma_size)
60{
61 switch (rma_size) {
62 case 32ul << 20: /* 32 MB */
63 if (cpu_has_feature(CPU_FTR_ARCH_206))
64 return 8; /* only supported on POWER7 */
65 return -1;
66 case 64ul << 20: /* 64 MB */
67 return 3;
68 case 128ul << 20: /* 128 MB */
69 return 7;
70 case 256ul << 20: /* 256 MB */
71 return 4;
72 case 1ul << 30: /* 1 GB */
73 return 2;
74 case 16ul << 30: /* 16 GB */
75 return 1;
76 case 256ul << 30: /* 256 GB */
77 return 0;
78 default:
79 return -1;
80 }
81}
82
83/*
84 * Called at boot time while the bootmem allocator is active,
85 * to allocate contiguous physical memory for the real memory
86 * areas for guests.
87 */
88void kvm_rma_init(void)
89{
90 unsigned long i;
91 unsigned long j, npages;
92 void *rma;
93 struct page *pg;
94
95 /* Only do this on PPC970 in HV mode */
96 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
97 !cpu_has_feature(CPU_FTR_ARCH_201))
98 return;
99
100 if (!kvm_rma_size || !kvm_rma_count)
101 return;
102
103 /* Check that the requested size is one supported in hardware */
104 if (lpcr_rmls(kvm_rma_size) < 0) {
105 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
106 return;
107 }
108
109 npages = kvm_rma_size >> PAGE_SHIFT;
110 rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
111 for (i = 0; i < kvm_rma_count; ++i) {
112 rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
113 pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
114 kvm_rma_size >> 20);
115 rma_info[i].base_virt = rma;
116 rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
117 rma_info[i].npages = npages;
118 list_add_tail(&rma_info[i].list, &free_rmas);
119 atomic_set(&rma_info[i].use_count, 0);
120
121 pg = pfn_to_page(rma_info[i].base_pfn);
122 for (j = 0; j < npages; ++j) {
123 atomic_inc(&pg->_count);
124 ++pg;
125 }
126 }
127}
128
129struct kvmppc_rma_info *kvm_alloc_rma(void)
130{
131 struct kvmppc_rma_info *ri;
132
133 ri = NULL;
134 spin_lock(&rma_lock);
135 if (!list_empty(&free_rmas)) {
136 ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list);
137 list_del(&ri->list);
138 atomic_inc(&ri->use_count);
139 }
140 spin_unlock(&rma_lock);
141 return ri;
142}
143EXPORT_SYMBOL_GPL(kvm_alloc_rma);
144
145void kvm_release_rma(struct kvmppc_rma_info *ri)
146{
147 if (atomic_dec_and_test(&ri->use_count)) {
148 spin_lock(&rma_lock);
149 list_add_tail(&ri->list, &free_rmas);
150 spin_unlock(&rma_lock);
151
152 }
153}
154EXPORT_SYMBOL_GPL(kvm_release_rma);
155
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
new file mode 100644
index 000000000000..3f7b674dd4bf
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -0,0 +1,166 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 *
17 * Derived from book3s_interrupts.S, which is:
18 * Copyright SUSE Linux Products GmbH 2009
19 *
20 * Authors: Alexander Graf <agraf@suse.de>
21 */
22
23#include <asm/ppc_asm.h>
24#include <asm/kvm_asm.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/ppc-opcode.h>
30
31/*****************************************************************************
32 * *
33 * Guest entry / exit code that is in kernel module memory (vmalloc) *
34 * *
35 ****************************************************************************/
36
37/* Registers:
38 * r4: vcpu pointer
39 */
40_GLOBAL(__kvmppc_vcore_entry)
41
42 /* Write correct stack frame */
43 mflr r0
44 std r0,PPC_LR_STKOFF(r1)
45
46 /* Save host state to the stack */
47 stdu r1, -SWITCH_FRAME_SIZE(r1)
48
49 /* Save non-volatile registers (r14 - r31) */
50 SAVE_NVGPRS(r1)
51
52 /* Save host DSCR */
53BEGIN_FTR_SECTION
54 mfspr r3, SPRN_DSCR
55 std r3, HSTATE_DSCR(r13)
56END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
57
58 /* Save host DABR */
59 mfspr r3, SPRN_DABR
60 std r3, HSTATE_DABR(r13)
61
62 /* Hard-disable interrupts */
63 mfmsr r10
64 std r10, HSTATE_HOST_MSR(r13)
65 rldicl r10,r10,48,1
66 rotldi r10,r10,16
67 mtmsrd r10,1
68
69 /* Save host PMU registers and load guest PMU registers */
70 /* R4 is live here (vcpu pointer) but not r3 or r5 */
71 li r3, 1
72 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
73 mfspr r7, SPRN_MMCR0 /* save MMCR0 */
74 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
75 isync
76 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
77 lbz r5, LPPACA_PMCINUSE(r3)
78 cmpwi r5, 0
79 beq 31f /* skip if not */
80 mfspr r5, SPRN_MMCR1
81 mfspr r6, SPRN_MMCRA
82 std r7, HSTATE_MMCR(r13)
83 std r5, HSTATE_MMCR + 8(r13)
84 std r6, HSTATE_MMCR + 16(r13)
85 mfspr r3, SPRN_PMC1
86 mfspr r5, SPRN_PMC2
87 mfspr r6, SPRN_PMC3
88 mfspr r7, SPRN_PMC4
89 mfspr r8, SPRN_PMC5
90 mfspr r9, SPRN_PMC6
91BEGIN_FTR_SECTION
92 mfspr r10, SPRN_PMC7
93 mfspr r11, SPRN_PMC8
94END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
95 stw r3, HSTATE_PMC(r13)
96 stw r5, HSTATE_PMC + 4(r13)
97 stw r6, HSTATE_PMC + 8(r13)
98 stw r7, HSTATE_PMC + 12(r13)
99 stw r8, HSTATE_PMC + 16(r13)
100 stw r9, HSTATE_PMC + 20(r13)
101BEGIN_FTR_SECTION
102 stw r10, HSTATE_PMC + 24(r13)
103 stw r11, HSTATE_PMC + 28(r13)
104END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
10531:
106
107 /*
108 * Put whatever is in the decrementer into the
109 * hypervisor decrementer.
110 */
111 mfspr r8,SPRN_DEC
112 mftb r7
113 mtspr SPRN_HDEC,r8
114 extsw r8,r8
115 add r8,r8,r7
116 std r8,HSTATE_DECEXP(r13)
117
118 /*
119 * On PPC970, if the guest vcpu has an external interrupt pending,
120 * send ourselves an IPI so as to interrupt the guest once it
121 * enables interrupts. (It must have interrupts disabled,
122 * otherwise we would already have delivered the interrupt.)
123 */
124BEGIN_FTR_SECTION
125 ld r0, VCPU_PENDING_EXC(r4)
126 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
127 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
128 and. r0, r0, r7
129 beq 32f
130 mr r31, r4
131 lhz r3, PACAPACAINDEX(r13)
132 bl smp_send_reschedule
133 nop
134 mr r4, r31
13532:
136END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
137
138 /* Jump to partition switch code */
139 bl .kvmppc_hv_entry_trampoline
140 nop
141
142/*
143 * We return here in virtual mode after the guest exits
144 * with something that we can't handle in real mode.
145 * Interrupts are enabled again at this point.
146 */
147
148.global kvmppc_handler_highmem
149kvmppc_handler_highmem:
150
151 /*
152 * Register usage at this point:
153 *
154 * R1 = host R1
155 * R2 = host R2
156 * R12 = exit handler id
157 * R13 = PACA
158 */
159
160 /* Restore non-volatile host registers (r14 - r31) */
161 REST_NVGPRS(r1)
162
163 addi r1, r1, SWITCH_FRAME_SIZE
164 ld r0, PPC_LR_STKOFF(r1)
165 mtlr r0
166 blr
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
new file mode 100644
index 000000000000..fcfe6b055558
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -0,0 +1,370 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
14
15#include <asm/tlbflush.h>
16#include <asm/kvm_ppc.h>
17#include <asm/kvm_book3s.h>
18#include <asm/mmu-hash64.h>
19#include <asm/hvcall.h>
20#include <asm/synch.h>
21#include <asm/ppc-opcode.h>
22
23/* For now use fixed-size 16MB page table */
24#define HPT_ORDER 24
25#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
26#define HPT_HASH_MASK (HPT_NPTEG - 1)
27
28#define HPTE_V_HVLOCK 0x40UL
29
30static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
31{
32 unsigned long tmp, old;
33
34 asm volatile(" ldarx %0,0,%2\n"
35 " and. %1,%0,%3\n"
36 " bne 2f\n"
37 " ori %0,%0,%4\n"
38 " stdcx. %0,0,%2\n"
39 " beq+ 2f\n"
40 " li %1,%3\n"
41 "2: isync"
42 : "=&r" (tmp), "=&r" (old)
43 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
44 : "cc", "memory");
45 return old == 0;
46}
47
48long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
49 long pte_index, unsigned long pteh, unsigned long ptel)
50{
51 unsigned long porder;
52 struct kvm *kvm = vcpu->kvm;
53 unsigned long i, lpn, pa;
54 unsigned long *hpte;
55
56 /* only handle 4k, 64k and 16M pages for now */
57 porder = 12;
58 if (pteh & HPTE_V_LARGE) {
59 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
60 (ptel & 0xf000) == 0x1000) {
61 /* 64k page */
62 porder = 16;
63 } else if ((ptel & 0xff000) == 0) {
64 /* 16M page */
65 porder = 24;
66 /* lowest AVA bit must be 0 for 16M pages */
67 if (pteh & 0x80)
68 return H_PARAMETER;
69 } else
70 return H_PARAMETER;
71 }
72 lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
73 if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
74 return H_PARAMETER;
75 pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
76 if (!pa)
77 return H_PARAMETER;
78 /* Check WIMG */
79 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
80 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
81 return H_PARAMETER;
82 pteh &= ~0x60UL;
83 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
84 ptel |= pa;
85 if (pte_index >= (HPT_NPTEG << 3))
86 return H_PARAMETER;
87 if (likely((flags & H_EXACT) == 0)) {
88 pte_index &= ~7UL;
89 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
90 for (i = 0; ; ++i) {
91 if (i == 8)
92 return H_PTEG_FULL;
93 if ((*hpte & HPTE_V_VALID) == 0 &&
94 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
95 break;
96 hpte += 2;
97 }
98 } else {
99 i = 0;
100 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
101 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
102 return H_PTEG_FULL;
103 }
104 hpte[1] = ptel;
105 eieio();
106 hpte[0] = pteh;
107 asm volatile("ptesync" : : : "memory");
108 atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
109 vcpu->arch.gpr[4] = pte_index + i;
110 return H_SUCCESS;
111}
112
113static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
114 unsigned long pte_index)
115{
116 unsigned long rb, va_low;
117
118 rb = (v & ~0x7fUL) << 16; /* AVA field */
119 va_low = pte_index >> 3;
120 if (v & HPTE_V_SECONDARY)
121 va_low = ~va_low;
122 /* xor vsid from AVA */
123 if (!(v & HPTE_V_1TB_SEG))
124 va_low ^= v >> 12;
125 else
126 va_low ^= v >> 24;
127 va_low &= 0x7ff;
128 if (v & HPTE_V_LARGE) {
129 rb |= 1; /* L field */
130 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
131 (r & 0xff000)) {
132 /* non-16MB large page, must be 64k */
133 /* (masks depend on page size) */
134 rb |= 0x1000; /* page encoding in LP field */
135 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
136 rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
137 }
138 } else {
139 /* 4kB page */
140 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
141 }
142 rb |= (v >> 54) & 0x300; /* B field */
143 return rb;
144}
145
146#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
147
148static inline int try_lock_tlbie(unsigned int *lock)
149{
150 unsigned int tmp, old;
151 unsigned int token = LOCK_TOKEN;
152
153 asm volatile("1:lwarx %1,0,%2\n"
154 " cmpwi cr0,%1,0\n"
155 " bne 2f\n"
156 " stwcx. %3,0,%2\n"
157 " bne- 1b\n"
158 " isync\n"
159 "2:"
160 : "=&r" (tmp), "=&r" (old)
161 : "r" (lock), "r" (token)
162 : "cc", "memory");
163 return old == 0;
164}
165
166long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
167 unsigned long pte_index, unsigned long avpn,
168 unsigned long va)
169{
170 struct kvm *kvm = vcpu->kvm;
171 unsigned long *hpte;
172 unsigned long v, r, rb;
173
174 if (pte_index >= (HPT_NPTEG << 3))
175 return H_PARAMETER;
176 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
177 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
178 cpu_relax();
179 if ((hpte[0] & HPTE_V_VALID) == 0 ||
180 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
181 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
182 hpte[0] &= ~HPTE_V_HVLOCK;
183 return H_NOT_FOUND;
184 }
185 if (atomic_read(&kvm->online_vcpus) == 1)
186 flags |= H_LOCAL;
187 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
188 vcpu->arch.gpr[5] = r = hpte[1];
189 rb = compute_tlbie_rb(v, r, pte_index);
190 hpte[0] = 0;
191 if (!(flags & H_LOCAL)) {
192 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
193 cpu_relax();
194 asm volatile("ptesync" : : : "memory");
195 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
196 : : "r" (rb), "r" (kvm->arch.lpid));
197 asm volatile("ptesync" : : : "memory");
198 kvm->arch.tlbie_lock = 0;
199 } else {
200 asm volatile("ptesync" : : : "memory");
201 asm volatile("tlbiel %0" : : "r" (rb));
202 asm volatile("ptesync" : : : "memory");
203 }
204 return H_SUCCESS;
205}
206
207long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
208{
209 struct kvm *kvm = vcpu->kvm;
210 unsigned long *args = &vcpu->arch.gpr[4];
211 unsigned long *hp, tlbrb[4];
212 long int i, found;
213 long int n_inval = 0;
214 unsigned long flags, req, pte_index;
215 long int local = 0;
216 long int ret = H_SUCCESS;
217
218 if (atomic_read(&kvm->online_vcpus) == 1)
219 local = 1;
220 for (i = 0; i < 4; ++i) {
221 pte_index = args[i * 2];
222 flags = pte_index >> 56;
223 pte_index &= ((1ul << 56) - 1);
224 req = flags >> 6;
225 flags &= 3;
226 if (req == 3)
227 break;
228 if (req != 1 || flags == 3 ||
229 pte_index >= (HPT_NPTEG << 3)) {
230 /* parameter error */
231 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
232 ret = H_PARAMETER;
233 break;
234 }
235 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
236 while (!lock_hpte(hp, HPTE_V_HVLOCK))
237 cpu_relax();
238 found = 0;
239 if (hp[0] & HPTE_V_VALID) {
240 switch (flags & 3) {
241 case 0: /* absolute */
242 found = 1;
243 break;
244 case 1: /* andcond */
245 if (!(hp[0] & args[i * 2 + 1]))
246 found = 1;
247 break;
248 case 2: /* AVPN */
249 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
250 found = 1;
251 break;
252 }
253 }
254 if (!found) {
255 hp[0] &= ~HPTE_V_HVLOCK;
256 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
257 continue;
258 }
259 /* insert R and C bits from PTE */
260 flags |= (hp[1] >> 5) & 0x0c;
261 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
262 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
263 hp[0] = 0;
264 }
265 if (n_inval == 0)
266 return ret;
267
268 if (!local) {
269 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
270 cpu_relax();
271 asm volatile("ptesync" : : : "memory");
272 for (i = 0; i < n_inval; ++i)
273 asm volatile(PPC_TLBIE(%1,%0)
274 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
275 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
276 kvm->arch.tlbie_lock = 0;
277 } else {
278 asm volatile("ptesync" : : : "memory");
279 for (i = 0; i < n_inval; ++i)
280 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
281 asm volatile("ptesync" : : : "memory");
282 }
283 return ret;
284}
285
286long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
287 unsigned long pte_index, unsigned long avpn,
288 unsigned long va)
289{
290 struct kvm *kvm = vcpu->kvm;
291 unsigned long *hpte;
292 unsigned long v, r, rb;
293
294 if (pte_index >= (HPT_NPTEG << 3))
295 return H_PARAMETER;
296 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
297 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
298 cpu_relax();
299 if ((hpte[0] & HPTE_V_VALID) == 0 ||
300 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
301 hpte[0] &= ~HPTE_V_HVLOCK;
302 return H_NOT_FOUND;
303 }
304 if (atomic_read(&kvm->online_vcpus) == 1)
305 flags |= H_LOCAL;
306 v = hpte[0];
307 r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
308 HPTE_R_KEY_HI | HPTE_R_KEY_LO);
309 r |= (flags << 55) & HPTE_R_PP0;
310 r |= (flags << 48) & HPTE_R_KEY_HI;
311 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
312 rb = compute_tlbie_rb(v, r, pte_index);
313 hpte[0] = v & ~HPTE_V_VALID;
314 if (!(flags & H_LOCAL)) {
315 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
316 cpu_relax();
317 asm volatile("ptesync" : : : "memory");
318 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
319 : : "r" (rb), "r" (kvm->arch.lpid));
320 asm volatile("ptesync" : : : "memory");
321 kvm->arch.tlbie_lock = 0;
322 } else {
323 asm volatile("ptesync" : : : "memory");
324 asm volatile("tlbiel %0" : : "r" (rb));
325 asm volatile("ptesync" : : : "memory");
326 }
327 hpte[1] = r;
328 eieio();
329 hpte[0] = v & ~HPTE_V_HVLOCK;
330 asm volatile("ptesync" : : : "memory");
331 return H_SUCCESS;
332}
333
334static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
335{
336 long int i;
337 unsigned long offset, rpn;
338
339 offset = realaddr & (kvm->arch.ram_psize - 1);
340 rpn = (realaddr - offset) >> PAGE_SHIFT;
341 for (i = 0; i < kvm->arch.ram_npages; ++i)
342 if (rpn == kvm->arch.ram_pginfo[i].pfn)
343 return (i << PAGE_SHIFT) + offset;
344 return HPTE_R_RPN; /* all 1s in the RPN field */
345}
346
347long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
348 unsigned long pte_index)
349{
350 struct kvm *kvm = vcpu->kvm;
351 unsigned long *hpte, r;
352 int i, n = 1;
353
354 if (pte_index >= (HPT_NPTEG << 3))
355 return H_PARAMETER;
356 if (flags & H_READ_4) {
357 pte_index &= ~3;
358 n = 4;
359 }
360 for (i = 0; i < n; ++i, ++pte_index) {
361 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
362 r = hpte[1];
363 if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
364 r = reverse_xlate(kvm, r & HPTE_R_RPN) |
365 (r & ~HPTE_R_RPN);
366 vcpu->arch.gpr[4 + i * 2] = hpte[0];
367 vcpu->arch.gpr[5 + i * 2] = r;
368 }
369 return H_SUCCESS;
370}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
new file mode 100644
index 000000000000..6dd33581a228
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -0,0 +1,1345 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h>
26
27/*****************************************************************************
28 * *
29 * Real Mode handlers that need to be in the linear mapping *
30 * *
31 ****************************************************************************/
32
33 .globl kvmppc_skip_interrupt
34kvmppc_skip_interrupt:
35 mfspr r13,SPRN_SRR0
36 addi r13,r13,4
37 mtspr SPRN_SRR0,r13
38 GET_SCRATCH0(r13)
39 rfid
40 b .
41
42 .globl kvmppc_skip_Hinterrupt
43kvmppc_skip_Hinterrupt:
44 mfspr r13,SPRN_HSRR0
45 addi r13,r13,4
46 mtspr SPRN_HSRR0,r13
47 GET_SCRATCH0(r13)
48 hrfid
49 b .
50
51/*
52 * Call kvmppc_handler_trampoline_enter in real mode.
53 * Must be called with interrupts hard-disabled.
54 *
55 * Input Registers:
56 *
57 * LR = return address to continue at after eventually re-enabling MMU
58 */
59_GLOBAL(kvmppc_hv_entry_trampoline)
60 mfmsr r10
61 LOAD_REG_ADDR(r5, kvmppc_hv_entry)
62 li r0,MSR_RI
63 andc r0,r10,r0
64 li r6,MSR_IR | MSR_DR
65 andc r6,r10,r6
66 mtmsrd r0,1 /* clear RI in MSR */
67 mtsrr0 r5
68 mtsrr1 r6
69 RFI
70
71#define ULONG_SIZE 8
72#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
73
74/******************************************************************************
75 * *
76 * Entry code *
77 * *
78 *****************************************************************************/
79
80#define XICS_XIRR 4
81#define XICS_QIRR 0xc
82
83/*
84 * We come in here when wakened from nap mode on a secondary hw thread.
85 * Relocation is off and most register values are lost.
86 * r13 points to the PACA.
87 */
88 .globl kvm_start_guest
89kvm_start_guest:
90 ld r1,PACAEMERGSP(r13)
91 subi r1,r1,STACK_FRAME_OVERHEAD
92
93 /* get vcpu pointer */
94 ld r4, HSTATE_KVM_VCPU(r13)
95
96 /* We got here with an IPI; clear it */
97 ld r5, HSTATE_XICS_PHYS(r13)
98 li r0, 0xff
99 li r6, XICS_QIRR
100 li r7, XICS_XIRR
101 lwzcix r8, r5, r7 /* ack the interrupt */
102 sync
103 stbcix r0, r5, r6 /* clear it */
104 stwcix r8, r5, r7 /* EOI it */
105
106.global kvmppc_hv_entry
107kvmppc_hv_entry:
108
109 /* Required state:
110 *
111 * R4 = vcpu pointer
112 * MSR = ~IR|DR
113 * R13 = PACA
114 * R1 = host R1
115 * all other volatile GPRS = free
116 */
117 mflr r0
118 std r0, HSTATE_VMHANDLER(r13)
119
120 ld r14, VCPU_GPR(r14)(r4)
121 ld r15, VCPU_GPR(r15)(r4)
122 ld r16, VCPU_GPR(r16)(r4)
123 ld r17, VCPU_GPR(r17)(r4)
124 ld r18, VCPU_GPR(r18)(r4)
125 ld r19, VCPU_GPR(r19)(r4)
126 ld r20, VCPU_GPR(r20)(r4)
127 ld r21, VCPU_GPR(r21)(r4)
128 ld r22, VCPU_GPR(r22)(r4)
129 ld r23, VCPU_GPR(r23)(r4)
130 ld r24, VCPU_GPR(r24)(r4)
131 ld r25, VCPU_GPR(r25)(r4)
132 ld r26, VCPU_GPR(r26)(r4)
133 ld r27, VCPU_GPR(r27)(r4)
134 ld r28, VCPU_GPR(r28)(r4)
135 ld r29, VCPU_GPR(r29)(r4)
136 ld r30, VCPU_GPR(r30)(r4)
137 ld r31, VCPU_GPR(r31)(r4)
138
139 /* Load guest PMU registers */
140 /* R4 is live here (vcpu pointer) */
141 li r3, 1
142 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
143 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
144 isync
145 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
146 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
147 lwz r6, VCPU_PMC + 8(r4)
148 lwz r7, VCPU_PMC + 12(r4)
149 lwz r8, VCPU_PMC + 16(r4)
150 lwz r9, VCPU_PMC + 20(r4)
151BEGIN_FTR_SECTION
152 lwz r10, VCPU_PMC + 24(r4)
153 lwz r11, VCPU_PMC + 28(r4)
154END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
155 mtspr SPRN_PMC1, r3
156 mtspr SPRN_PMC2, r5
157 mtspr SPRN_PMC3, r6
158 mtspr SPRN_PMC4, r7
159 mtspr SPRN_PMC5, r8
160 mtspr SPRN_PMC6, r9
161BEGIN_FTR_SECTION
162 mtspr SPRN_PMC7, r10
163 mtspr SPRN_PMC8, r11
164END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
165 ld r3, VCPU_MMCR(r4)
166 ld r5, VCPU_MMCR + 8(r4)
167 ld r6, VCPU_MMCR + 16(r4)
168 mtspr SPRN_MMCR1, r5
169 mtspr SPRN_MMCRA, r6
170 mtspr SPRN_MMCR0, r3
171 isync
172
173 /* Load up FP, VMX and VSX registers */
174 bl kvmppc_load_fp
175
176BEGIN_FTR_SECTION
177 /* Switch DSCR to guest value */
178 ld r5, VCPU_DSCR(r4)
179 mtspr SPRN_DSCR, r5
180END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
181
182 /*
183 * Set the decrementer to the guest decrementer.
184 */
185 ld r8,VCPU_DEC_EXPIRES(r4)
186 mftb r7
187 subf r3,r7,r8
188 mtspr SPRN_DEC,r3
189 stw r3,VCPU_DEC(r4)
190
191 ld r5, VCPU_SPRG0(r4)
192 ld r6, VCPU_SPRG1(r4)
193 ld r7, VCPU_SPRG2(r4)
194 ld r8, VCPU_SPRG3(r4)
195 mtspr SPRN_SPRG0, r5
196 mtspr SPRN_SPRG1, r6
197 mtspr SPRN_SPRG2, r7
198 mtspr SPRN_SPRG3, r8
199
200 /* Save R1 in the PACA */
201 std r1, HSTATE_HOST_R1(r13)
202
203 /* Increment yield count if they have a VPA */
204 ld r3, VCPU_VPA(r4)
205 cmpdi r3, 0
206 beq 25f
207 lwz r5, LPPACA_YIELDCOUNT(r3)
208 addi r5, r5, 1
209 stw r5, LPPACA_YIELDCOUNT(r3)
21025:
211 /* Load up DAR and DSISR */
212 ld r5, VCPU_DAR(r4)
213 lwz r6, VCPU_DSISR(r4)
214 mtspr SPRN_DAR, r5
215 mtspr SPRN_DSISR, r6
216
217 /* Set partition DABR */
218 li r5,3
219 ld r6,VCPU_DABR(r4)
220 mtspr SPRN_DABRX,r5
221 mtspr SPRN_DABR,r6
222
223BEGIN_FTR_SECTION
224 /* Restore AMR and UAMOR, set AMOR to all 1s */
225 ld r5,VCPU_AMR(r4)
226 ld r6,VCPU_UAMOR(r4)
227 li r7,-1
228 mtspr SPRN_AMR,r5
229 mtspr SPRN_UAMOR,r6
230 mtspr SPRN_AMOR,r7
231END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
232
233 /* Clear out SLB */
234 li r6,0
235 slbmte r6,r6
236 slbia
237 ptesync
238
239BEGIN_FTR_SECTION
240 b 30f
241END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
242 /*
243 * POWER7 host -> guest partition switch code.
244 * We don't have to lock against concurrent tlbies,
245 * but we do have to coordinate across hardware threads.
246 */
247 /* Increment entry count iff exit count is zero. */
248 ld r5,HSTATE_KVM_VCORE(r13)
249 addi r9,r5,VCORE_ENTRY_EXIT
25021: lwarx r3,0,r9
251 cmpwi r3,0x100 /* any threads starting to exit? */
252 bge secondary_too_late /* if so we're too late to the party */
253 addi r3,r3,1
254 stwcx. r3,0,r9
255 bne 21b
256
257 /* Primary thread switches to guest partition. */
258 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
259 lwz r6,VCPU_PTID(r4)
260 cmpwi r6,0
261 bne 20f
262 ld r6,KVM_SDR1(r9)
263 lwz r7,KVM_LPID(r9)
264 li r0,LPID_RSVD /* switch to reserved LPID */
265 mtspr SPRN_LPID,r0
266 ptesync
267 mtspr SPRN_SDR1,r6 /* switch to partition page table */
268 mtspr SPRN_LPID,r7
269 isync
270 li r0,1
271 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
272 b 10f
273
274 /* Secondary threads wait for primary to have done partition switch */
27520: lbz r0,VCORE_IN_GUEST(r5)
276 cmpwi r0,0
277 beq 20b
278
279 /* Set LPCR. Set the MER bit if there is a pending external irq. */
28010: ld r8,KVM_LPCR(r9)
281 ld r0,VCPU_PENDING_EXC(r4)
282 li r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
283 oris r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
284 and. r0,r0,r7
285 beq 11f
286 ori r8,r8,LPCR_MER
28711: mtspr SPRN_LPCR,r8
288 ld r8,KVM_RMOR(r9)
289 mtspr SPRN_RMOR,r8
290 isync
291
292 /* Check if HDEC expires soon */
293 mfspr r3,SPRN_HDEC
294 cmpwi r3,10
295 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
296 mr r9,r4
297 blt hdec_soon
298
299 /*
300 * Invalidate the TLB if we could possibly have stale TLB
301 * entries for this partition on this core due to the use
302 * of tlbiel.
303 * XXX maybe only need this on primary thread?
304 */
305 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
306 lwz r5,VCPU_VCPUID(r4)
307 lhz r6,PACAPACAINDEX(r13)
308 rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
309 lhz r8,VCPU_LAST_CPU(r4)
310 sldi r7,r6,1 /* see if this is the same vcpu */
311 add r7,r7,r9 /* as last ran on this pcpu */
312 lhz r0,KVM_LAST_VCPU(r7)
313 cmpw r6,r8 /* on the same cpu core as last time? */
314 bne 3f
315 cmpw r0,r5 /* same vcpu as this core last ran? */
316 beq 1f
3173: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
318 sth r5,KVM_LAST_VCPU(r7)
319 li r6,128
320 mtctr r6
321 li r7,0x800 /* IS field = 0b10 */
322 ptesync
3232: tlbiel r7
324 addi r7,r7,0x1000
325 bdnz 2b
326 ptesync
3271:
328
329 /* Save purr/spurr */
330 mfspr r5,SPRN_PURR
331 mfspr r6,SPRN_SPURR
332 std r5,HSTATE_PURR(r13)
333 std r6,HSTATE_SPURR(r13)
334 ld r7,VCPU_PURR(r4)
335 ld r8,VCPU_SPURR(r4)
336 mtspr SPRN_PURR,r7
337 mtspr SPRN_SPURR,r8
338 b 31f
339
340 /*
341 * PPC970 host -> guest partition switch code.
342 * We have to lock against concurrent tlbies,
343 * using native_tlbie_lock to lock against host tlbies
344 * and kvm->arch.tlbie_lock to lock against guest tlbies.
345 * We also have to invalidate the TLB since its
346 * entries aren't tagged with the LPID.
347 */
34830: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
349
350 /* first take native_tlbie_lock */
351 .section ".toc","aw"
352toc_tlbie_lock:
353 .tc native_tlbie_lock[TC],native_tlbie_lock
354 .previous
355 ld r3,toc_tlbie_lock@toc(2)
356 lwz r8,PACA_LOCK_TOKEN(r13)
35724: lwarx r0,0,r3
358 cmpwi r0,0
359 bne 24b
360 stwcx. r8,0,r3
361 bne 24b
362 isync
363
364 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
365 li r0,0x18f
366 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
367 or r0,r7,r0
368 ptesync
369 sync
370 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
371 isync
372 li r0,0
373 stw r0,0(r3) /* drop native_tlbie_lock */
374
375 /* invalidate the whole TLB */
376 li r0,256
377 mtctr r0
378 li r6,0
37925: tlbiel r6
380 addi r6,r6,0x1000
381 bdnz 25b
382 ptesync
383
384 /* Take the guest's tlbie_lock */
385 addi r3,r9,KVM_TLBIE_LOCK
38624: lwarx r0,0,r3
387 cmpwi r0,0
388 bne 24b
389 stwcx. r8,0,r3
390 bne 24b
391 isync
392 ld r6,KVM_SDR1(r9)
393 mtspr SPRN_SDR1,r6 /* switch to partition page table */
394
395 /* Set up HID4 with the guest's LPID etc. */
396 sync
397 mtspr SPRN_HID4,r7
398 isync
399
400 /* drop the guest's tlbie_lock */
401 li r0,0
402 stw r0,0(r3)
403
404 /* Check if HDEC expires soon */
405 mfspr r3,SPRN_HDEC
406 cmpwi r3,10
407 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
408 mr r9,r4
409 blt hdec_soon
410
411 /* Enable HDEC interrupts */
412 mfspr r0,SPRN_HID0
413 li r3,1
414 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
415 sync
416 mtspr SPRN_HID0,r0
417 mfspr r0,SPRN_HID0
418 mfspr r0,SPRN_HID0
419 mfspr r0,SPRN_HID0
420 mfspr r0,SPRN_HID0
421 mfspr r0,SPRN_HID0
422 mfspr r0,SPRN_HID0
423
424 /* Load up guest SLB entries */
42531: lwz r5,VCPU_SLB_MAX(r4)
426 cmpwi r5,0
427 beq 9f
428 mtctr r5
429 addi r6,r4,VCPU_SLB
4301: ld r8,VCPU_SLB_E(r6)
431 ld r9,VCPU_SLB_V(r6)
432 slbmte r9,r8
433 addi r6,r6,VCPU_SLB_SIZE
434 bdnz 1b
4359:
436
437 /* Restore state of CTRL run bit; assume 1 on entry */
438 lwz r5,VCPU_CTRL(r4)
439 andi. r5,r5,1
440 bne 4f
441 mfspr r6,SPRN_CTRLF
442 clrrdi r6,r6,1
443 mtspr SPRN_CTRLT,r6
4444:
445 ld r6, VCPU_CTR(r4)
446 lwz r7, VCPU_XER(r4)
447
448 mtctr r6
449 mtxer r7
450
451 /* Move SRR0 and SRR1 into the respective regs */
452 ld r6, VCPU_SRR0(r4)
453 ld r7, VCPU_SRR1(r4)
454 mtspr SPRN_SRR0, r6
455 mtspr SPRN_SRR1, r7
456
457 ld r10, VCPU_PC(r4)
458
459 ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
460 rldicl r11, r11, 63 - MSR_HV_LG, 1
461 rotldi r11, r11, 1 + MSR_HV_LG
462 ori r11, r11, MSR_ME
463
464fast_guest_return:
465 mtspr SPRN_HSRR0,r10
466 mtspr SPRN_HSRR1,r11
467
468 /* Activate guest mode, so faults get handled by KVM */
469 li r9, KVM_GUEST_MODE_GUEST
470 stb r9, HSTATE_IN_GUEST(r13)
471
472 /* Enter guest */
473
474 ld r5, VCPU_LR(r4)
475 lwz r6, VCPU_CR(r4)
476 mtlr r5
477 mtcr r6
478
479 ld r0, VCPU_GPR(r0)(r4)
480 ld r1, VCPU_GPR(r1)(r4)
481 ld r2, VCPU_GPR(r2)(r4)
482 ld r3, VCPU_GPR(r3)(r4)
483 ld r5, VCPU_GPR(r5)(r4)
484 ld r6, VCPU_GPR(r6)(r4)
485 ld r7, VCPU_GPR(r7)(r4)
486 ld r8, VCPU_GPR(r8)(r4)
487 ld r9, VCPU_GPR(r9)(r4)
488 ld r10, VCPU_GPR(r10)(r4)
489 ld r11, VCPU_GPR(r11)(r4)
490 ld r12, VCPU_GPR(r12)(r4)
491 ld r13, VCPU_GPR(r13)(r4)
492
493 ld r4, VCPU_GPR(r4)(r4)
494
495 hrfid
496 b .
497
498/******************************************************************************
499 * *
500 * Exit code *
501 * *
502 *****************************************************************************/
503
504/*
505 * We come here from the first-level interrupt handlers.
506 */
507 .globl kvmppc_interrupt
508kvmppc_interrupt:
509 /*
510 * Register contents:
511 * R12 = interrupt vector
512 * R13 = PACA
513 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
514 * guest R13 saved in SPRN_SCRATCH0
515 */
516 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
517 std r9, HSTATE_HOST_R2(r13)
518 ld r9, HSTATE_KVM_VCPU(r13)
519
520 /* Save registers */
521
522 std r0, VCPU_GPR(r0)(r9)
523 std r1, VCPU_GPR(r1)(r9)
524 std r2, VCPU_GPR(r2)(r9)
525 std r3, VCPU_GPR(r3)(r9)
526 std r4, VCPU_GPR(r4)(r9)
527 std r5, VCPU_GPR(r5)(r9)
528 std r6, VCPU_GPR(r6)(r9)
529 std r7, VCPU_GPR(r7)(r9)
530 std r8, VCPU_GPR(r8)(r9)
531 ld r0, HSTATE_HOST_R2(r13)
532 std r0, VCPU_GPR(r9)(r9)
533 std r10, VCPU_GPR(r10)(r9)
534 std r11, VCPU_GPR(r11)(r9)
535 ld r3, HSTATE_SCRATCH0(r13)
536 lwz r4, HSTATE_SCRATCH1(r13)
537 std r3, VCPU_GPR(r12)(r9)
538 stw r4, VCPU_CR(r9)
539
540 /* Restore R1/R2 so we can handle faults */
541 ld r1, HSTATE_HOST_R1(r13)
542 ld r2, PACATOC(r13)
543
544 mfspr r10, SPRN_SRR0
545 mfspr r11, SPRN_SRR1
546 std r10, VCPU_SRR0(r9)
547 std r11, VCPU_SRR1(r9)
548 andi. r0, r12, 2 /* need to read HSRR0/1? */
549 beq 1f
550 mfspr r10, SPRN_HSRR0
551 mfspr r11, SPRN_HSRR1
552 clrrdi r12, r12, 2
5531: std r10, VCPU_PC(r9)
554 std r11, VCPU_MSR(r9)
555
556 GET_SCRATCH0(r3)
557 mflr r4
558 std r3, VCPU_GPR(r13)(r9)
559 std r4, VCPU_LR(r9)
560
561 /* Unset guest mode */
562 li r0, KVM_GUEST_MODE_NONE
563 stb r0, HSTATE_IN_GUEST(r13)
564
565 stw r12,VCPU_TRAP(r9)
566
567 /* See if this is a leftover HDEC interrupt */
568 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
569 bne 2f
570 mfspr r3,SPRN_HDEC
571 cmpwi r3,0
572 bge ignore_hdec
5732:
574 /* See if this is something we can handle in real mode */
575 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
576 beq hcall_try_real_mode
577hcall_real_cont:
578
579 /* Check for mediated interrupts (could be done earlier really ...) */
580BEGIN_FTR_SECTION
581 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
582 bne+ 1f
583 ld r5,VCPU_KVM(r9)
584 ld r5,KVM_LPCR(r5)
585 andi. r0,r11,MSR_EE
586 beq 1f
587 andi. r0,r5,LPCR_MER
588 bne bounce_ext_interrupt
5891:
590END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
591
592 /* Save DEC */
593 mfspr r5,SPRN_DEC
594 mftb r6
595 extsw r5,r5
596 add r5,r5,r6
597 std r5,VCPU_DEC_EXPIRES(r9)
598
599 /* Save HEIR (HV emulation assist reg) in last_inst
600 if this is an HEI (HV emulation interrupt, e40) */
601 li r3,-1
602BEGIN_FTR_SECTION
603 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
604 bne 11f
605 mfspr r3,SPRN_HEIR
606END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
60711: stw r3,VCPU_LAST_INST(r9)
608
609 /* Save more register state */
610 mfxer r5
611 mfdar r6
612 mfdsisr r7
613 mfctr r8
614
615 stw r5, VCPU_XER(r9)
616 std r6, VCPU_DAR(r9)
617 stw r7, VCPU_DSISR(r9)
618 std r8, VCPU_CTR(r9)
619 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
620BEGIN_FTR_SECTION
621 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
622 beq 6f
623END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
6247: std r6, VCPU_FAULT_DAR(r9)
625 stw r7, VCPU_FAULT_DSISR(r9)
626
627 /* Save guest CTRL register, set runlatch to 1 */
628 mfspr r6,SPRN_CTRLF
629 stw r6,VCPU_CTRL(r9)
630 andi. r0,r6,1
631 bne 4f
632 ori r6,r6,1
633 mtspr SPRN_CTRLT,r6
6344:
635 /* Read the guest SLB and save it away */
636 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
637 mtctr r0
638 li r6,0
639 addi r7,r9,VCPU_SLB
640 li r5,0
6411: slbmfee r8,r6
642 andis. r0,r8,SLB_ESID_V@h
643 beq 2f
644 add r8,r8,r6 /* put index in */
645 slbmfev r3,r6
646 std r8,VCPU_SLB_E(r7)
647 std r3,VCPU_SLB_V(r7)
648 addi r7,r7,VCPU_SLB_SIZE
649 addi r5,r5,1
6502: addi r6,r6,1
651 bdnz 1b
652 stw r5,VCPU_SLB_MAX(r9)
653
654 /*
655 * Save the guest PURR/SPURR
656 */
657BEGIN_FTR_SECTION
658 mfspr r5,SPRN_PURR
659 mfspr r6,SPRN_SPURR
660 ld r7,VCPU_PURR(r9)
661 ld r8,VCPU_SPURR(r9)
662 std r5,VCPU_PURR(r9)
663 std r6,VCPU_SPURR(r9)
664 subf r5,r7,r5
665 subf r6,r8,r6
666
667 /*
668 * Restore host PURR/SPURR and add guest times
669 * so that the time in the guest gets accounted.
670 */
671 ld r3,HSTATE_PURR(r13)
672 ld r4,HSTATE_SPURR(r13)
673 add r3,r3,r5
674 add r4,r4,r6
675 mtspr SPRN_PURR,r3
676 mtspr SPRN_SPURR,r4
677END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
678
679 /* Clear out SLB */
680 li r5,0
681 slbmte r5,r5
682 slbia
683 ptesync
684
685hdec_soon:
686BEGIN_FTR_SECTION
687 b 32f
688END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
689 /*
690 * POWER7 guest -> host partition switch code.
691 * We don't have to lock against tlbies but we do
692 * have to coordinate the hardware threads.
693 */
694 /* Increment the threads-exiting-guest count in the 0xff00
695 bits of vcore->entry_exit_count */
696 lwsync
697 ld r5,HSTATE_KVM_VCORE(r13)
698 addi r6,r5,VCORE_ENTRY_EXIT
69941: lwarx r3,0,r6
700 addi r0,r3,0x100
701 stwcx. r0,0,r6
702 bne 41b
703
704 /*
705 * At this point we have an interrupt that we have to pass
706 * up to the kernel or qemu; we can't handle it in real mode.
707 * Thus we have to do a partition switch, so we have to
708 * collect the other threads, if we are the first thread
709 * to take an interrupt. To do this, we set the HDEC to 0,
710 * which causes an HDEC interrupt in all threads within 2ns
711 * because the HDEC register is shared between all 4 threads.
712 * However, we don't need to bother if this is an HDEC
713 * interrupt, since the other threads will already be on their
714 * way here in that case.
715 */
716 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
717 beq 40f
718 cmpwi r3,0x100 /* Are we the first here? */
719 bge 40f
720 cmpwi r3,1
721 ble 40f
722 li r0,0
723 mtspr SPRN_HDEC,r0
72440:
725
726 /* Secondary threads wait for primary to do partition switch */
727 ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
728 ld r5,HSTATE_KVM_VCORE(r13)
729 lwz r3,VCPU_PTID(r9)
730 cmpwi r3,0
731 beq 15f
732 HMT_LOW
73313: lbz r3,VCORE_IN_GUEST(r5)
734 cmpwi r3,0
735 bne 13b
736 HMT_MEDIUM
737 b 16f
738
739 /* Primary thread waits for all the secondaries to exit guest */
74015: lwz r3,VCORE_ENTRY_EXIT(r5)
741 srwi r0,r3,8
742 clrldi r3,r3,56
743 cmpw r3,r0
744 bne 15b
745 isync
746
747 /* Primary thread switches back to host partition */
748 ld r6,KVM_HOST_SDR1(r4)
749 lwz r7,KVM_HOST_LPID(r4)
750 li r8,LPID_RSVD /* switch to reserved LPID */
751 mtspr SPRN_LPID,r8
752 ptesync
753 mtspr SPRN_SDR1,r6 /* switch to partition page table */
754 mtspr SPRN_LPID,r7
755 isync
756 li r0,0
757 stb r0,VCORE_IN_GUEST(r5)
758 lis r8,0x7fff /* MAX_INT@h */
759 mtspr SPRN_HDEC,r8
760
76116: ld r8,KVM_HOST_LPCR(r4)
762 mtspr SPRN_LPCR,r8
763 isync
764 b 33f
765
766 /*
767 * PPC970 guest -> host partition switch code.
768 * We have to lock against concurrent tlbies, and
769 * we have to flush the whole TLB.
770 */
77132: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
772
773 /* Take the guest's tlbie_lock */
774 lwz r8,PACA_LOCK_TOKEN(r13)
775 addi r3,r4,KVM_TLBIE_LOCK
77624: lwarx r0,0,r3
777 cmpwi r0,0
778 bne 24b
779 stwcx. r8,0,r3
780 bne 24b
781 isync
782
783 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
784 li r0,0x18f
785 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
786 or r0,r7,r0
787 ptesync
788 sync
789 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
790 isync
791 li r0,0
792 stw r0,0(r3) /* drop guest tlbie_lock */
793
794 /* invalidate the whole TLB */
795 li r0,256
796 mtctr r0
797 li r6,0
79825: tlbiel r6
799 addi r6,r6,0x1000
800 bdnz 25b
801 ptesync
802
803 /* take native_tlbie_lock */
804 ld r3,toc_tlbie_lock@toc(2)
80524: lwarx r0,0,r3
806 cmpwi r0,0
807 bne 24b
808 stwcx. r8,0,r3
809 bne 24b
810 isync
811
812 ld r6,KVM_HOST_SDR1(r4)
813 mtspr SPRN_SDR1,r6 /* switch to host page table */
814
815 /* Set up host HID4 value */
816 sync
817 mtspr SPRN_HID4,r7
818 isync
819 li r0,0
820 stw r0,0(r3) /* drop native_tlbie_lock */
821
822 lis r8,0x7fff /* MAX_INT@h */
823 mtspr SPRN_HDEC,r8
824
825 /* Disable HDEC interrupts */
826 mfspr r0,SPRN_HID0
827 li r3,0
828 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
829 sync
830 mtspr SPRN_HID0,r0
831 mfspr r0,SPRN_HID0
832 mfspr r0,SPRN_HID0
833 mfspr r0,SPRN_HID0
834 mfspr r0,SPRN_HID0
835 mfspr r0,SPRN_HID0
836 mfspr r0,SPRN_HID0
837
838 /* load host SLB entries */
83933: ld r8,PACA_SLBSHADOWPTR(r13)
840
841 .rept SLB_NUM_BOLTED
842 ld r5,SLBSHADOW_SAVEAREA(r8)
843 ld r6,SLBSHADOW_SAVEAREA+8(r8)
844 andis. r7,r5,SLB_ESID_V@h
845 beq 1f
846 slbmte r6,r5
8471: addi r8,r8,16
848 .endr
849
850 /* Save and reset AMR and UAMOR before turning on the MMU */
851BEGIN_FTR_SECTION
852 mfspr r5,SPRN_AMR
853 mfspr r6,SPRN_UAMOR
854 std r5,VCPU_AMR(r9)
855 std r6,VCPU_UAMOR(r9)
856 li r6,0
857 mtspr SPRN_AMR,r6
858END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
859
860 /* Restore host DABR and DABRX */
861 ld r5,HSTATE_DABR(r13)
862 li r6,7
863 mtspr SPRN_DABR,r5
864 mtspr SPRN_DABRX,r6
865
866 /* Switch DSCR back to host value */
867BEGIN_FTR_SECTION
868 mfspr r8, SPRN_DSCR
869 ld r7, HSTATE_DSCR(r13)
870 std r8, VCPU_DSCR(r7)
871 mtspr SPRN_DSCR, r7
872END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
873
874 /* Save non-volatile GPRs */
875 std r14, VCPU_GPR(r14)(r9)
876 std r15, VCPU_GPR(r15)(r9)
877 std r16, VCPU_GPR(r16)(r9)
878 std r17, VCPU_GPR(r17)(r9)
879 std r18, VCPU_GPR(r18)(r9)
880 std r19, VCPU_GPR(r19)(r9)
881 std r20, VCPU_GPR(r20)(r9)
882 std r21, VCPU_GPR(r21)(r9)
883 std r22, VCPU_GPR(r22)(r9)
884 std r23, VCPU_GPR(r23)(r9)
885 std r24, VCPU_GPR(r24)(r9)
886 std r25, VCPU_GPR(r25)(r9)
887 std r26, VCPU_GPR(r26)(r9)
888 std r27, VCPU_GPR(r27)(r9)
889 std r28, VCPU_GPR(r28)(r9)
890 std r29, VCPU_GPR(r29)(r9)
891 std r30, VCPU_GPR(r30)(r9)
892 std r31, VCPU_GPR(r31)(r9)
893
894 /* Save SPRGs */
895 mfspr r3, SPRN_SPRG0
896 mfspr r4, SPRN_SPRG1
897 mfspr r5, SPRN_SPRG2
898 mfspr r6, SPRN_SPRG3
899 std r3, VCPU_SPRG0(r9)
900 std r4, VCPU_SPRG1(r9)
901 std r5, VCPU_SPRG2(r9)
902 std r6, VCPU_SPRG3(r9)
903
904 /* Increment yield count if they have a VPA */
905 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
906 cmpdi r8, 0
907 beq 25f
908 lwz r3, LPPACA_YIELDCOUNT(r8)
909 addi r3, r3, 1
910 stw r3, LPPACA_YIELDCOUNT(r8)
91125:
912 /* Save PMU registers if requested */
913 /* r8 and cr0.eq are live here */
914 li r3, 1
915 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
916 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
917 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
918 isync
919 beq 21f /* if no VPA, save PMU stuff anyway */
920 lbz r7, LPPACA_PMCINUSE(r8)
921 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
922 bne 21f
923 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
924 b 22f
92521: mfspr r5, SPRN_MMCR1
926 mfspr r6, SPRN_MMCRA
927 std r4, VCPU_MMCR(r9)
928 std r5, VCPU_MMCR + 8(r9)
929 std r6, VCPU_MMCR + 16(r9)
930 mfspr r3, SPRN_PMC1
931 mfspr r4, SPRN_PMC2
932 mfspr r5, SPRN_PMC3
933 mfspr r6, SPRN_PMC4
934 mfspr r7, SPRN_PMC5
935 mfspr r8, SPRN_PMC6
936BEGIN_FTR_SECTION
937 mfspr r10, SPRN_PMC7
938 mfspr r11, SPRN_PMC8
939END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
940 stw r3, VCPU_PMC(r9)
941 stw r4, VCPU_PMC + 4(r9)
942 stw r5, VCPU_PMC + 8(r9)
943 stw r6, VCPU_PMC + 12(r9)
944 stw r7, VCPU_PMC + 16(r9)
945 stw r8, VCPU_PMC + 20(r9)
946BEGIN_FTR_SECTION
947 stw r10, VCPU_PMC + 24(r9)
948 stw r11, VCPU_PMC + 28(r9)
949END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
95022:
951 /* save FP state */
952 mr r3, r9
953 bl .kvmppc_save_fp
954
955 /* Secondary threads go off to take a nap on POWER7 */
956BEGIN_FTR_SECTION
957 lwz r0,VCPU_PTID(r3)
958 cmpwi r0,0
959 bne secondary_nap
960END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
961
962 /*
963 * Reload DEC. HDEC interrupts were disabled when
964 * we reloaded the host's LPCR value.
965 */
966 ld r3, HSTATE_DECEXP(r13)
967 mftb r4
968 subf r4, r4, r3
969 mtspr SPRN_DEC, r4
970
971 /* Reload the host's PMU registers */
972 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
973 lbz r4, LPPACA_PMCINUSE(r3)
974 cmpwi r4, 0
975 beq 23f /* skip if not */
976 lwz r3, HSTATE_PMC(r13)
977 lwz r4, HSTATE_PMC + 4(r13)
978 lwz r5, HSTATE_PMC + 8(r13)
979 lwz r6, HSTATE_PMC + 12(r13)
980 lwz r8, HSTATE_PMC + 16(r13)
981 lwz r9, HSTATE_PMC + 20(r13)
982BEGIN_FTR_SECTION
983 lwz r10, HSTATE_PMC + 24(r13)
984 lwz r11, HSTATE_PMC + 28(r13)
985END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
986 mtspr SPRN_PMC1, r3
987 mtspr SPRN_PMC2, r4
988 mtspr SPRN_PMC3, r5
989 mtspr SPRN_PMC4, r6
990 mtspr SPRN_PMC5, r8
991 mtspr SPRN_PMC6, r9
992BEGIN_FTR_SECTION
993 mtspr SPRN_PMC7, r10
994 mtspr SPRN_PMC8, r11
995END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
996 ld r3, HSTATE_MMCR(r13)
997 ld r4, HSTATE_MMCR + 8(r13)
998 ld r5, HSTATE_MMCR + 16(r13)
999 mtspr SPRN_MMCR1, r4
1000 mtspr SPRN_MMCRA, r5
1001 mtspr SPRN_MMCR0, r3
1002 isync
100323:
1004 /*
1005 * For external and machine check interrupts, we need
1006 * to call the Linux handler to process the interrupt.
1007 * We do that by jumping to the interrupt vector address
1008 * which we have in r12. The [h]rfid at the end of the
1009 * handler will return to the book3s_hv_interrupts.S code.
1010 * For other interrupts we do the rfid to get back
1011 * to the book3s_interrupts.S code here.
1012 */
1013 ld r8, HSTATE_VMHANDLER(r13)
1014 ld r7, HSTATE_HOST_MSR(r13)
1015
1016 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1017 beq 11f
1018 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1019
1020 /* RFI into the highmem handler, or branch to interrupt handler */
102112: mfmsr r6
1022 mtctr r12
1023 li r0, MSR_RI
1024 andc r6, r6, r0
1025 mtmsrd r6, 1 /* Clear RI in MSR */
1026 mtsrr0 r8
1027 mtsrr1 r7
1028 beqctr
1029 RFI
1030
103111:
1032BEGIN_FTR_SECTION
1033 b 12b
1034END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1035 mtspr SPRN_HSRR0, r8
1036 mtspr SPRN_HSRR1, r7
1037 ba 0x500
1038
10396: mfspr r6,SPRN_HDAR
1040 mfspr r7,SPRN_HDSISR
1041 b 7b
1042
1043/*
1044 * Try to handle an hcall in real mode.
1045 * Returns to the guest if we handle it, or continues on up to
1046 * the kernel if we can't (i.e. if we don't have a handler for
1047 * it, or if the handler returns H_TOO_HARD).
1048 */
1049 .globl hcall_try_real_mode
1050hcall_try_real_mode:
1051 ld r3,VCPU_GPR(r3)(r9)
1052 andi. r0,r11,MSR_PR
1053 bne hcall_real_cont
1054 clrrdi r3,r3,2
1055 cmpldi r3,hcall_real_table_end - hcall_real_table
1056 bge hcall_real_cont
1057 LOAD_REG_ADDR(r4, hcall_real_table)
1058 lwzx r3,r3,r4
1059 cmpwi r3,0
1060 beq hcall_real_cont
1061 add r3,r3,r4
1062 mtctr r3
1063 mr r3,r9 /* get vcpu pointer */
1064 ld r4,VCPU_GPR(r4)(r9)
1065 bctrl
1066 cmpdi r3,H_TOO_HARD
1067 beq hcall_real_fallback
1068 ld r4,HSTATE_KVM_VCPU(r13)
1069 std r3,VCPU_GPR(r3)(r4)
1070 ld r10,VCPU_PC(r4)
1071 ld r11,VCPU_MSR(r4)
1072 b fast_guest_return
1073
1074 /* We've attempted a real mode hcall, but it's punted it back
1075 * to userspace. We need to restore some clobbered volatiles
1076 * before resuming the pass-it-to-qemu path */
1077hcall_real_fallback:
1078 li r12,BOOK3S_INTERRUPT_SYSCALL
1079 ld r9, HSTATE_KVM_VCPU(r13)
1080 ld r11, VCPU_MSR(r9)
1081
1082 b hcall_real_cont
1083
1084 .globl hcall_real_table
1085hcall_real_table:
1086 .long 0 /* 0 - unused */
1087 .long .kvmppc_h_remove - hcall_real_table
1088 .long .kvmppc_h_enter - hcall_real_table
1089 .long .kvmppc_h_read - hcall_real_table
1090 .long 0 /* 0x10 - H_CLEAR_MOD */
1091 .long 0 /* 0x14 - H_CLEAR_REF */
1092 .long .kvmppc_h_protect - hcall_real_table
1093 .long 0 /* 0x1c - H_GET_TCE */
1094 .long .kvmppc_h_put_tce - hcall_real_table
1095 .long 0 /* 0x24 - H_SET_SPRG0 */
1096 .long .kvmppc_h_set_dabr - hcall_real_table
1097 .long 0 /* 0x2c */
1098 .long 0 /* 0x30 */
1099 .long 0 /* 0x34 */
1100 .long 0 /* 0x38 */
1101 .long 0 /* 0x3c */
1102 .long 0 /* 0x40 */
1103 .long 0 /* 0x44 */
1104 .long 0 /* 0x48 */
1105 .long 0 /* 0x4c */
1106 .long 0 /* 0x50 */
1107 .long 0 /* 0x54 */
1108 .long 0 /* 0x58 */
1109 .long 0 /* 0x5c */
1110 .long 0 /* 0x60 */
1111 .long 0 /* 0x64 */
1112 .long 0 /* 0x68 */
1113 .long 0 /* 0x6c */
1114 .long 0 /* 0x70 */
1115 .long 0 /* 0x74 */
1116 .long 0 /* 0x78 */
1117 .long 0 /* 0x7c */
1118 .long 0 /* 0x80 */
1119 .long 0 /* 0x84 */
1120 .long 0 /* 0x88 */
1121 .long 0 /* 0x8c */
1122 .long 0 /* 0x90 */
1123 .long 0 /* 0x94 */
1124 .long 0 /* 0x98 */
1125 .long 0 /* 0x9c */
1126 .long 0 /* 0xa0 */
1127 .long 0 /* 0xa4 */
1128 .long 0 /* 0xa8 */
1129 .long 0 /* 0xac */
1130 .long 0 /* 0xb0 */
1131 .long 0 /* 0xb4 */
1132 .long 0 /* 0xb8 */
1133 .long 0 /* 0xbc */
1134 .long 0 /* 0xc0 */
1135 .long 0 /* 0xc4 */
1136 .long 0 /* 0xc8 */
1137 .long 0 /* 0xcc */
1138 .long 0 /* 0xd0 */
1139 .long 0 /* 0xd4 */
1140 .long 0 /* 0xd8 */
1141 .long 0 /* 0xdc */
1142 .long 0 /* 0xe0 */
1143 .long 0 /* 0xe4 */
1144 .long 0 /* 0xe8 */
1145 .long 0 /* 0xec */
1146 .long 0 /* 0xf0 */
1147 .long 0 /* 0xf4 */
1148 .long 0 /* 0xf8 */
1149 .long 0 /* 0xfc */
1150 .long 0 /* 0x100 */
1151 .long 0 /* 0x104 */
1152 .long 0 /* 0x108 */
1153 .long 0 /* 0x10c */
1154 .long 0 /* 0x110 */
1155 .long 0 /* 0x114 */
1156 .long 0 /* 0x118 */
1157 .long 0 /* 0x11c */
1158 .long 0 /* 0x120 */
1159 .long .kvmppc_h_bulk_remove - hcall_real_table
1160hcall_real_table_end:
1161
1162ignore_hdec:
1163 mr r4,r9
1164 b fast_guest_return
1165
1166bounce_ext_interrupt:
1167 mr r4,r9
1168 mtspr SPRN_SRR0,r10
1169 mtspr SPRN_SRR1,r11
1170 li r10,BOOK3S_INTERRUPT_EXTERNAL
1171 LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
1172 b fast_guest_return
1173
1174_GLOBAL(kvmppc_h_set_dabr)
1175 std r4,VCPU_DABR(r3)
1176 mtspr SPRN_DABR,r4
1177 li r3,0
1178 blr
1179
1180secondary_too_late:
1181 ld r5,HSTATE_KVM_VCORE(r13)
1182 HMT_LOW
118313: lbz r3,VCORE_IN_GUEST(r5)
1184 cmpwi r3,0
1185 bne 13b
1186 HMT_MEDIUM
1187 ld r11,PACA_SLBSHADOWPTR(r13)
1188
1189 .rept SLB_NUM_BOLTED
1190 ld r5,SLBSHADOW_SAVEAREA(r11)
1191 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1192 andis. r7,r5,SLB_ESID_V@h
1193 beq 1f
1194 slbmte r6,r5
11951: addi r11,r11,16
1196 .endr
1197 b 50f
1198
1199secondary_nap:
1200 /* Clear any pending IPI */
120150: ld r5, HSTATE_XICS_PHYS(r13)
1202 li r0, 0xff
1203 li r6, XICS_QIRR
1204 stbcix r0, r5, r6
1205
1206 /* increment the nap count and then go to nap mode */
1207 ld r4, HSTATE_KVM_VCORE(r13)
1208 addi r4, r4, VCORE_NAP_COUNT
1209 lwsync /* make previous updates visible */
121051: lwarx r3, 0, r4
1211 addi r3, r3, 1
1212 stwcx. r3, 0, r4
1213 bne 51b
1214 isync
1215
1216 mfspr r4, SPRN_LPCR
1217 li r0, LPCR_PECE
1218 andc r4, r4, r0
1219 ori r4, r4, LPCR_PECE0 /* exit nap on interrupt */
1220 mtspr SPRN_LPCR, r4
1221 li r0, 0
1222 std r0, HSTATE_SCRATCH0(r13)
1223 ptesync
1224 ld r0, HSTATE_SCRATCH0(r13)
12251: cmpd r0, r0
1226 bne 1b
1227 nap
1228 b .
1229
1230/*
1231 * Save away FP, VMX and VSX registers.
1232 * r3 = vcpu pointer
1233 */
1234_GLOBAL(kvmppc_save_fp)
1235 mfmsr r9
1236 ori r8,r9,MSR_FP
1237#ifdef CONFIG_ALTIVEC
1238BEGIN_FTR_SECTION
1239 oris r8,r8,MSR_VEC@h
1240END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1241#endif
1242#ifdef CONFIG_VSX
1243BEGIN_FTR_SECTION
1244 oris r8,r8,MSR_VSX@h
1245END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1246#endif
1247 mtmsrd r8
1248 isync
1249#ifdef CONFIG_VSX
1250BEGIN_FTR_SECTION
1251 reg = 0
1252 .rept 32
1253 li r6,reg*16+VCPU_VSRS
1254 stxvd2x reg,r6,r3
1255 reg = reg + 1
1256 .endr
1257FTR_SECTION_ELSE
1258#endif
1259 reg = 0
1260 .rept 32
1261 stfd reg,reg*8+VCPU_FPRS(r3)
1262 reg = reg + 1
1263 .endr
1264#ifdef CONFIG_VSX
1265ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1266#endif
1267 mffs fr0
1268 stfd fr0,VCPU_FPSCR(r3)
1269
1270#ifdef CONFIG_ALTIVEC
1271BEGIN_FTR_SECTION
1272 reg = 0
1273 .rept 32
1274 li r6,reg*16+VCPU_VRS
1275 stvx reg,r6,r3
1276 reg = reg + 1
1277 .endr
1278 mfvscr vr0
1279 li r6,VCPU_VSCR
1280 stvx vr0,r6,r3
1281END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1282#endif
1283 mfspr r6,SPRN_VRSAVE
1284 stw r6,VCPU_VRSAVE(r3)
1285 mtmsrd r9
1286 isync
1287 blr
1288
1289/*
1290 * Load up FP, VMX and VSX registers
1291 * r4 = vcpu pointer
1292 */
1293 .globl kvmppc_load_fp
1294kvmppc_load_fp:
1295 mfmsr r9
1296 ori r8,r9,MSR_FP
1297#ifdef CONFIG_ALTIVEC
1298BEGIN_FTR_SECTION
1299 oris r8,r8,MSR_VEC@h
1300END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1301#endif
1302#ifdef CONFIG_VSX
1303BEGIN_FTR_SECTION
1304 oris r8,r8,MSR_VSX@h
1305END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1306#endif
1307 mtmsrd r8
1308 isync
1309 lfd fr0,VCPU_FPSCR(r4)
1310 MTFSF_L(fr0)
1311#ifdef CONFIG_VSX
1312BEGIN_FTR_SECTION
1313 reg = 0
1314 .rept 32
1315 li r7,reg*16+VCPU_VSRS
1316 lxvd2x reg,r7,r4
1317 reg = reg + 1
1318 .endr
1319FTR_SECTION_ELSE
1320#endif
1321 reg = 0
1322 .rept 32
1323 lfd reg,reg*8+VCPU_FPRS(r4)
1324 reg = reg + 1
1325 .endr
1326#ifdef CONFIG_VSX
1327ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1328#endif
1329
1330#ifdef CONFIG_ALTIVEC
1331BEGIN_FTR_SECTION
1332 li r7,VCPU_VSCR
1333 lvx vr0,r7,r4
1334 mtvscr vr0
1335 reg = 0
1336 .rept 32
1337 li r7,reg*16+VCPU_VRS
1338 lvx reg,r7,r4
1339 reg = reg + 1
1340 .endr
1341END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1342#endif
1343 lwz r7,VCPU_VRSAVE(r4)
1344 mtspr SPRN_VRSAVE,r7
1345 blr
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 2f0bc928b08a..c54b0e30cf3f 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -29,8 +29,7 @@
29#define ULONG_SIZE 8 29#define ULONG_SIZE 8
30#define FUNC(name) GLUE(.,name) 30#define FUNC(name) GLUE(.,name)
31 31
32#define GET_SHADOW_VCPU(reg) \ 32#define GET_SHADOW_VCPU_R13
33 addi reg, r13, PACA_KVM_SVCPU
34 33
35#define DISABLE_INTERRUPTS \ 34#define DISABLE_INTERRUPTS \
36 mfmsr r0; \ 35 mfmsr r0; \
@@ -43,8 +42,8 @@
43#define ULONG_SIZE 4 42#define ULONG_SIZE 4
44#define FUNC(name) name 43#define FUNC(name) name
45 44
46#define GET_SHADOW_VCPU(reg) \ 45#define GET_SHADOW_VCPU_R13 \
47 lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) 46 lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2)
48 47
49#define DISABLE_INTERRUPTS \ 48#define DISABLE_INTERRUPTS \
50 mfmsr r0; \ 49 mfmsr r0; \
@@ -85,7 +84,7 @@
85 * r3: kvm_run pointer 84 * r3: kvm_run pointer
86 * r4: vcpu pointer 85 * r4: vcpu pointer
87 */ 86 */
88_GLOBAL(__kvmppc_vcpu_entry) 87_GLOBAL(__kvmppc_vcpu_run)
89 88
90kvm_start_entry: 89kvm_start_entry:
91 /* Write correct stack frame */ 90 /* Write correct stack frame */
@@ -107,17 +106,11 @@ kvm_start_entry:
107 /* Load non-volatile guest state from the vcpu */ 106 /* Load non-volatile guest state from the vcpu */
108 VCPU_LOAD_NVGPRS(r4) 107 VCPU_LOAD_NVGPRS(r4)
109 108
110 GET_SHADOW_VCPU(r5) 109kvm_start_lightweight:
111
112 /* Save R1/R2 in the PACA */
113 PPC_STL r1, SVCPU_HOST_R1(r5)
114 PPC_STL r2, SVCPU_HOST_R2(r5)
115 110
116 /* XXX swap in/out on load? */ 111 GET_SHADOW_VCPU_R13
117 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) 112 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
118 PPC_STL r3, SVCPU_VMHANDLER(r5) 113 PPC_STL r3, HSTATE_VMHANDLER(r13)
119
120kvm_start_lightweight:
121 114
122 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 115 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
123 116
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 79751d8dd131..41cb0017e757 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -21,7 +21,6 @@
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22#include <linux/hash.h> 22#include <linux/hash.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include "trace.h"
25 24
26#include <asm/kvm_ppc.h> 25#include <asm/kvm_ppc.h>
27#include <asm/kvm_book3s.h> 26#include <asm/kvm_book3s.h>
@@ -29,6 +28,8 @@
29#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
30#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
31 30
31#include "trace.h"
32
32#define PTE_SIZE 12 33#define PTE_SIZE 12
33 34
34static struct kmem_cache *hpte_cache; 35static struct kmem_cache *hpte_cache;
@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
58void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 59void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
59{ 60{
60 u64 index; 61 u64 index;
62 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
61 63
62 trace_kvm_book3s_mmu_map(pte); 64 trace_kvm_book3s_mmu_map(pte);
63 65
64 spin_lock(&vcpu->arch.mmu_lock); 66 spin_lock(&vcpu3s->mmu_lock);
65 67
66 /* Add to ePTE list */ 68 /* Add to ePTE list */
67 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); 69 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
68 hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); 70 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
69 71
70 /* Add to ePTE_long list */ 72 /* Add to ePTE_long list */
71 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); 73 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
72 hlist_add_head_rcu(&pte->list_pte_long, 74 hlist_add_head_rcu(&pte->list_pte_long,
73 &vcpu->arch.hpte_hash_pte_long[index]); 75 &vcpu3s->hpte_hash_pte_long[index]);
74 76
75 /* Add to vPTE list */ 77 /* Add to vPTE list */
76 index = kvmppc_mmu_hash_vpte(pte->pte.vpage); 78 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
77 hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); 79 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
78 80
79 /* Add to vPTE_long list */ 81 /* Add to vPTE_long list */
80 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); 82 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
81 hlist_add_head_rcu(&pte->list_vpte_long, 83 hlist_add_head_rcu(&pte->list_vpte_long,
82 &vcpu->arch.hpte_hash_vpte_long[index]); 84 &vcpu3s->hpte_hash_vpte_long[index]);
83 85
84 spin_unlock(&vcpu->arch.mmu_lock); 86 spin_unlock(&vcpu3s->mmu_lock);
85} 87}
86 88
87static void free_pte_rcu(struct rcu_head *head) 89static void free_pte_rcu(struct rcu_head *head)
@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head)
92 94
93static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 95static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
94{ 96{
97 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
98
95 trace_kvm_book3s_mmu_invalidate(pte); 99 trace_kvm_book3s_mmu_invalidate(pte);
96 100
97 /* Different for 32 and 64 bit */ 101 /* Different for 32 and 64 bit */
98 kvmppc_mmu_invalidate_pte(vcpu, pte); 102 kvmppc_mmu_invalidate_pte(vcpu, pte);
99 103
100 spin_lock(&vcpu->arch.mmu_lock); 104 spin_lock(&vcpu3s->mmu_lock);
101 105
102 /* pte already invalidated in between? */ 106 /* pte already invalidated in between? */
103 if (hlist_unhashed(&pte->list_pte)) { 107 if (hlist_unhashed(&pte->list_pte)) {
104 spin_unlock(&vcpu->arch.mmu_lock); 108 spin_unlock(&vcpu3s->mmu_lock);
105 return; 109 return;
106 } 110 }
107 111
@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
115 else 119 else
116 kvm_release_pfn_clean(pte->pfn); 120 kvm_release_pfn_clean(pte->pfn);
117 121
118 spin_unlock(&vcpu->arch.mmu_lock); 122 spin_unlock(&vcpu3s->mmu_lock);
119 123
120 vcpu->arch.hpte_cache_count--; 124 vcpu3s->hpte_cache_count--;
121 call_rcu(&pte->rcu_head, free_pte_rcu); 125 call_rcu(&pte->rcu_head, free_pte_rcu);
122} 126}
123 127
124static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) 128static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
125{ 129{
130 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 struct hpte_cache *pte; 131 struct hpte_cache *pte;
127 struct hlist_node *node; 132 struct hlist_node *node;
128 int i; 133 int i;
@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
130 rcu_read_lock(); 135 rcu_read_lock();
131 136
132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 137 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 138 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
134 139
135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 140 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
136 invalidate_pte(vcpu, pte); 141 invalidate_pte(vcpu, pte);
@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
141 146
142static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) 147static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143{ 148{
149 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
144 struct hlist_head *list; 150 struct hlist_head *list;
145 struct hlist_node *node; 151 struct hlist_node *node;
146 struct hpte_cache *pte; 152 struct hpte_cache *pte;
147 153
148 /* Find the list of entries in the map */ 154 /* Find the list of entries in the map */
149 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; 155 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
150 156
151 rcu_read_lock(); 157 rcu_read_lock();
152 158
@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
160 166
161static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) 167static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
162{ 168{
169 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
163 struct hlist_head *list; 170 struct hlist_head *list;
164 struct hlist_node *node; 171 struct hlist_node *node;
165 struct hpte_cache *pte; 172 struct hpte_cache *pte;
166 173
167 /* Find the list of entries in the map */ 174 /* Find the list of entries in the map */
168 list = &vcpu->arch.hpte_hash_pte_long[ 175 list = &vcpu3s->hpte_hash_pte_long[
169 kvmppc_mmu_hash_pte_long(guest_ea)]; 176 kvmppc_mmu_hash_pte_long(guest_ea)];
170 177
171 rcu_read_lock(); 178 rcu_read_lock();
@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
203/* Flush with mask 0xfffffffff */ 210/* Flush with mask 0xfffffffff */
204static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) 211static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
205{ 212{
213 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
206 struct hlist_head *list; 214 struct hlist_head *list;
207 struct hlist_node *node; 215 struct hlist_node *node;
208 struct hpte_cache *pte; 216 struct hpte_cache *pte;
209 u64 vp_mask = 0xfffffffffULL; 217 u64 vp_mask = 0xfffffffffULL;
210 218
211 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; 219 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
212 220
213 rcu_read_lock(); 221 rcu_read_lock();
214 222
@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
223/* Flush with mask 0xffffff000 */ 231/* Flush with mask 0xffffff000 */
224static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) 232static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
225{ 233{
234 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
226 struct hlist_head *list; 235 struct hlist_head *list;
227 struct hlist_node *node; 236 struct hlist_node *node;
228 struct hpte_cache *pte; 237 struct hpte_cache *pte;
229 u64 vp_mask = 0xffffff000ULL; 238 u64 vp_mask = 0xffffff000ULL;
230 239
231 list = &vcpu->arch.hpte_hash_vpte_long[ 240 list = &vcpu3s->hpte_hash_vpte_long[
232 kvmppc_mmu_hash_vpte_long(guest_vp)]; 241 kvmppc_mmu_hash_vpte_long(guest_vp)];
233 242
234 rcu_read_lock(); 243 rcu_read_lock();
@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
261 270
262void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 271void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
263{ 272{
273 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
264 struct hlist_node *node; 274 struct hlist_node *node;
265 struct hpte_cache *pte; 275 struct hpte_cache *pte;
266 int i; 276 int i;
@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
270 rcu_read_lock(); 280 rcu_read_lock();
271 281
272 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 282 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
273 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 283 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
274 284
275 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 285 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
276 if ((pte->pte.raddr >= pa_start) && 286 if ((pte->pte.raddr >= pa_start) &&
@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
283 293
284struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) 294struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
285{ 295{
296 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
286 struct hpte_cache *pte; 297 struct hpte_cache *pte;
287 298
288 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); 299 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289 vcpu->arch.hpte_cache_count++; 300 vcpu3s->hpte_cache_count++;
290 301
291 if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) 302 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
292 kvmppc_mmu_pte_flush_all(vcpu); 303 kvmppc_mmu_pte_flush_all(vcpu);
293 304
294 return pte; 305 return pte;
@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
309 320
310int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) 321int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
311{ 322{
323 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
324
312 /* init hpte lookup hashes */ 325 /* init hpte lookup hashes */
313 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, 326 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
314 ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); 327 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
315 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, 328 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
316 ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); 329 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
317 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, 330 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
318 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); 331 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
319 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, 332 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
320 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); 333 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
321 334
322 spin_lock_init(&vcpu->arch.mmu_lock); 335 spin_lock_init(&vcpu3s->mmu_lock);
323 336
324 return 0; 337 return 0;
325} 338}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
new file mode 100644
index 000000000000..0c0d3f274437
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -0,0 +1,1029 @@
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
23#include <linux/err.h>
24#include <linux/slab.h>
25
26#include <asm/reg.h>
27#include <asm/cputable.h>
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30#include <asm/uaccess.h>
31#include <asm/io.h>
32#include <asm/kvm_ppc.h>
33#include <asm/kvm_book3s.h>
34#include <asm/mmu_context.h>
35#include <linux/gfp.h>
36#include <linux/sched.h>
37#include <linux/vmalloc.h>
38#include <linux/highmem.h>
39
40#include "trace.h"
41
42/* #define EXIT_DEBUG */
43/* #define DEBUG_EXT */
44
45static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
46 ulong msr);
47
48/* Some compatibility defines */
49#ifdef CONFIG_PPC_BOOK3S_32
50#define MSR_USER32 MSR_USER
51#define MSR_USER64 MSR_USER
52#define HW_PAGE_SIZE PAGE_SIZE
53#endif
54
55void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
56{
57#ifdef CONFIG_PPC_BOOK3S_64
58 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
59 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
60 sizeof(get_paca()->shadow_vcpu));
61 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
62#endif
63
64#ifdef CONFIG_PPC_BOOK3S_32
65 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
66#endif
67}
68
69void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
70{
71#ifdef CONFIG_PPC_BOOK3S_64
72 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
73 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
74 sizeof(get_paca()->shadow_vcpu));
75 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
76#endif
77
78 kvmppc_giveup_ext(vcpu, MSR_FP);
79 kvmppc_giveup_ext(vcpu, MSR_VEC);
80 kvmppc_giveup_ext(vcpu, MSR_VSX);
81}
82
83static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
84{
85 ulong smsr = vcpu->arch.shared->msr;
86
87 /* Guest MSR values */
88 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
89 /* Process MSR values */
90 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
91 /* External providers the guest reserved */
92 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
93 /* 64-bit Process MSR values */
94#ifdef CONFIG_PPC_BOOK3S_64
95 smsr |= MSR_ISF | MSR_HV;
96#endif
97 vcpu->arch.shadow_msr = smsr;
98}
99
100void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
101{
102 ulong old_msr = vcpu->arch.shared->msr;
103
104#ifdef EXIT_DEBUG
105 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
106#endif
107
108 msr &= to_book3s(vcpu)->msr_mask;
109 vcpu->arch.shared->msr = msr;
110 kvmppc_recalc_shadow_msr(vcpu);
111
112 if (msr & MSR_POW) {
113 if (!vcpu->arch.pending_exceptions) {
114 kvm_vcpu_block(vcpu);
115 vcpu->stat.halt_wakeup++;
116
117 /* Unset POW bit after we woke up */
118 msr &= ~MSR_POW;
119 vcpu->arch.shared->msr = msr;
120 }
121 }
122
123 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
124 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
125 kvmppc_mmu_flush_segments(vcpu);
126 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
127
128 /* Preload magic page segment when in kernel mode */
129 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
130 struct kvm_vcpu_arch *a = &vcpu->arch;
131
132 if (msr & MSR_DR)
133 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
134 else
135 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
136 }
137 }
138
139 /* Preload FPU if it's enabled */
140 if (vcpu->arch.shared->msr & MSR_FP)
141 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
142}
143
144void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
145{
146 u32 host_pvr;
147
148 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
149 vcpu->arch.pvr = pvr;
150#ifdef CONFIG_PPC_BOOK3S_64
151 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
152 kvmppc_mmu_book3s_64_init(vcpu);
153 to_book3s(vcpu)->hior = 0xfff00000;
154 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
155 } else
156#endif
157 {
158 kvmppc_mmu_book3s_32_init(vcpu);
159 to_book3s(vcpu)->hior = 0;
160 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
161 }
162
163 /* If we are in hypervisor level on 970, we can tell the CPU to
164 * treat DCBZ as 32 bytes store */
165 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
166 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
167 !strcmp(cur_cpu_spec->platform, "ppc970"))
168 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
169
170 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
171 really needs them in a VM on Cell and force disable them. */
172 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
173 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
174
175#ifdef CONFIG_PPC_BOOK3S_32
176 /* 32 bit Book3S always has 32 byte dcbz */
177 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
178#endif
179
180 /* On some CPUs we can execute paired single operations natively */
181 asm ( "mfpvr %0" : "=r"(host_pvr));
182 switch (host_pvr) {
183 case 0x00080200: /* lonestar 2.0 */
184 case 0x00088202: /* lonestar 2.2 */
185 case 0x70000100: /* gekko 1.0 */
186 case 0x00080100: /* gekko 2.0 */
187 case 0x00083203: /* gekko 2.3a */
188 case 0x00083213: /* gekko 2.3b */
189 case 0x00083204: /* gekko 2.4 */
190 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
191 case 0x00087200: /* broadway */
192 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
193 /* Enable HID2.PSE - in case we need it later */
194 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
195 }
196}
197
198/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
199 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
200 * emulate 32 bytes dcbz length.
201 *
202 * The Book3s_64 inventors also realized this case and implemented a special bit
203 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
204 *
205 * My approach here is to patch the dcbz instruction on executing pages.
206 */
207static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
208{
209 struct page *hpage;
210 u64 hpage_offset;
211 u32 *page;
212 int i;
213
214 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
215 if (is_error_page(hpage)) {
216 kvm_release_page_clean(hpage);
217 return;
218 }
219
220 hpage_offset = pte->raddr & ~PAGE_MASK;
221 hpage_offset &= ~0xFFFULL;
222 hpage_offset /= 4;
223
224 get_page(hpage);
225 page = kmap_atomic(hpage, KM_USER0);
226
227 /* patch dcbz into reserved instruction, so we trap */
228 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
229 if ((page[i] & 0xff0007ff) == INS_DCBZ)
230 page[i] &= 0xfffffff7;
231
232 kunmap_atomic(page, KM_USER0);
233 put_page(hpage);
234}
235
236static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
237{
238 ulong mp_pa = vcpu->arch.magic_page_pa;
239
240 if (unlikely(mp_pa) &&
241 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
242 return 1;
243 }
244
245 return kvm_is_visible_gfn(vcpu->kvm, gfn);
246}
247
248int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
249 ulong eaddr, int vec)
250{
251 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
252 int r = RESUME_GUEST;
253 int relocated;
254 int page_found = 0;
255 struct kvmppc_pte pte;
256 bool is_mmio = false;
257 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
258 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
259 u64 vsid;
260
261 relocated = data ? dr : ir;
262
263 /* Resolve real address if translation turned on */
264 if (relocated) {
265 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
266 } else {
267 pte.may_execute = true;
268 pte.may_read = true;
269 pte.may_write = true;
270 pte.raddr = eaddr & KVM_PAM;
271 pte.eaddr = eaddr;
272 pte.vpage = eaddr >> 12;
273 }
274
275 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
276 case 0:
277 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
278 break;
279 case MSR_DR:
280 case MSR_IR:
281 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
282
283 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
284 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
285 else
286 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
287 pte.vpage |= vsid;
288
289 if (vsid == -1)
290 page_found = -EINVAL;
291 break;
292 }
293
294 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
295 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
296 /*
297 * If we do the dcbz hack, we have to NX on every execution,
298 * so we can patch the executing code. This renders our guest
299 * NX-less.
300 */
301 pte.may_execute = !data;
302 }
303
304 if (page_found == -ENOENT) {
305 /* Page not found in guest PTE entries */
306 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
307 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
308 vcpu->arch.shared->msr |=
309 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
310 kvmppc_book3s_queue_irqprio(vcpu, vec);
311 } else if (page_found == -EPERM) {
312 /* Storage protection */
313 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
314 vcpu->arch.shared->dsisr =
315 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
316 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
317 vcpu->arch.shared->msr |=
318 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
319 kvmppc_book3s_queue_irqprio(vcpu, vec);
320 } else if (page_found == -EINVAL) {
321 /* Page not found in guest SLB */
322 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
323 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
324 } else if (!is_mmio &&
325 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
326 /* The guest's PTE is not mapped yet. Map on the host */
327 kvmppc_mmu_map_page(vcpu, &pte);
328 if (data)
329 vcpu->stat.sp_storage++;
330 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
331 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
332 kvmppc_patch_dcbz(vcpu, &pte);
333 } else {
334 /* MMIO */
335 vcpu->stat.mmio_exits++;
336 vcpu->arch.paddr_accessed = pte.raddr;
337 r = kvmppc_emulate_mmio(run, vcpu);
338 if ( r == RESUME_HOST_NV )
339 r = RESUME_HOST;
340 }
341
342 return r;
343}
344
345static inline int get_fpr_index(int i)
346{
347#ifdef CONFIG_VSX
348 i *= 2;
349#endif
350 return i;
351}
352
353/* Give up external provider (FPU, Altivec, VSX) */
354void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
355{
356 struct thread_struct *t = &current->thread;
357 u64 *vcpu_fpr = vcpu->arch.fpr;
358#ifdef CONFIG_VSX
359 u64 *vcpu_vsx = vcpu->arch.vsr;
360#endif
361 u64 *thread_fpr = (u64*)t->fpr;
362 int i;
363
364 if (!(vcpu->arch.guest_owned_ext & msr))
365 return;
366
367#ifdef DEBUG_EXT
368 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
369#endif
370
371 switch (msr) {
372 case MSR_FP:
373 giveup_fpu(current);
374 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
375 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
376
377 vcpu->arch.fpscr = t->fpscr.val;
378 break;
379 case MSR_VEC:
380#ifdef CONFIG_ALTIVEC
381 giveup_altivec(current);
382 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
383 vcpu->arch.vscr = t->vscr;
384#endif
385 break;
386 case MSR_VSX:
387#ifdef CONFIG_VSX
388 __giveup_vsx(current);
389 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
390 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
391#endif
392 break;
393 default:
394 BUG();
395 }
396
397 vcpu->arch.guest_owned_ext &= ~msr;
398 current->thread.regs->msr &= ~msr;
399 kvmppc_recalc_shadow_msr(vcpu);
400}
401
402static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
403{
404 ulong srr0 = kvmppc_get_pc(vcpu);
405 u32 last_inst = kvmppc_get_last_inst(vcpu);
406 int ret;
407
408 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
409 if (ret == -ENOENT) {
410 ulong msr = vcpu->arch.shared->msr;
411
412 msr = kvmppc_set_field(msr, 33, 33, 1);
413 msr = kvmppc_set_field(msr, 34, 36, 0);
414 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
415 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
416 return EMULATE_AGAIN;
417 }
418
419 return EMULATE_DONE;
420}
421
422static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
423{
424
425 /* Need to do paired single emulation? */
426 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
427 return EMULATE_DONE;
428
429 /* Read out the instruction */
430 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
431 /* Need to emulate */
432 return EMULATE_FAIL;
433
434 return EMULATE_AGAIN;
435}
436
437/* Handle external providers (FPU, Altivec, VSX) */
438static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
439 ulong msr)
440{
441 struct thread_struct *t = &current->thread;
442 u64 *vcpu_fpr = vcpu->arch.fpr;
443#ifdef CONFIG_VSX
444 u64 *vcpu_vsx = vcpu->arch.vsr;
445#endif
446 u64 *thread_fpr = (u64*)t->fpr;
447 int i;
448
449 /* When we have paired singles, we emulate in software */
450 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
451 return RESUME_GUEST;
452
453 if (!(vcpu->arch.shared->msr & msr)) {
454 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
455 return RESUME_GUEST;
456 }
457
458 /* We already own the ext */
459 if (vcpu->arch.guest_owned_ext & msr) {
460 return RESUME_GUEST;
461 }
462
463#ifdef DEBUG_EXT
464 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
465#endif
466
467 current->thread.regs->msr |= msr;
468
469 switch (msr) {
470 case MSR_FP:
471 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
472 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
473
474 t->fpscr.val = vcpu->arch.fpscr;
475 t->fpexc_mode = 0;
476 kvmppc_load_up_fpu();
477 break;
478 case MSR_VEC:
479#ifdef CONFIG_ALTIVEC
480 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
481 t->vscr = vcpu->arch.vscr;
482 t->vrsave = -1;
483 kvmppc_load_up_altivec();
484#endif
485 break;
486 case MSR_VSX:
487#ifdef CONFIG_VSX
488 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
489 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
490 kvmppc_load_up_vsx();
491#endif
492 break;
493 default:
494 BUG();
495 }
496
497 vcpu->arch.guest_owned_ext |= msr;
498
499 kvmppc_recalc_shadow_msr(vcpu);
500
501 return RESUME_GUEST;
502}
503
504int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
505 unsigned int exit_nr)
506{
507 int r = RESUME_HOST;
508
509 vcpu->stat.sum_exits++;
510
511 run->exit_reason = KVM_EXIT_UNKNOWN;
512 run->ready_for_interrupt_injection = 1;
513
514 trace_kvm_book3s_exit(exit_nr, vcpu);
515 kvm_resched(vcpu);
516 switch (exit_nr) {
517 case BOOK3S_INTERRUPT_INST_STORAGE:
518 vcpu->stat.pf_instruc++;
519
520#ifdef CONFIG_PPC_BOOK3S_32
521 /* We set segments as unused segments when invalidating them. So
522 * treat the respective fault as segment fault. */
523 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
524 == SR_INVALID) {
525 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
526 r = RESUME_GUEST;
527 break;
528 }
529#endif
530
531 /* only care about PTEG not found errors, but leave NX alone */
532 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
533 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
534 vcpu->stat.sp_instruc++;
535 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
536 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
537 /*
538 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
539 * so we can't use the NX bit inside the guest. Let's cross our fingers,
540 * that no guest that needs the dcbz hack does NX.
541 */
542 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
543 r = RESUME_GUEST;
544 } else {
545 vcpu->arch.shared->msr |=
546 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
547 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
548 r = RESUME_GUEST;
549 }
550 break;
551 case BOOK3S_INTERRUPT_DATA_STORAGE:
552 {
553 ulong dar = kvmppc_get_fault_dar(vcpu);
554 vcpu->stat.pf_storage++;
555
556#ifdef CONFIG_PPC_BOOK3S_32
557 /* We set segments as unused segments when invalidating them. So
558 * treat the respective fault as segment fault. */
559 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
560 kvmppc_mmu_map_segment(vcpu, dar);
561 r = RESUME_GUEST;
562 break;
563 }
564#endif
565
566 /* The only case we need to handle is missing shadow PTEs */
567 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
568 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
569 } else {
570 vcpu->arch.shared->dar = dar;
571 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
572 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
573 r = RESUME_GUEST;
574 }
575 break;
576 }
577 case BOOK3S_INTERRUPT_DATA_SEGMENT:
578 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
579 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
580 kvmppc_book3s_queue_irqprio(vcpu,
581 BOOK3S_INTERRUPT_DATA_SEGMENT);
582 }
583 r = RESUME_GUEST;
584 break;
585 case BOOK3S_INTERRUPT_INST_SEGMENT:
586 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
587 kvmppc_book3s_queue_irqprio(vcpu,
588 BOOK3S_INTERRUPT_INST_SEGMENT);
589 }
590 r = RESUME_GUEST;
591 break;
592 /* We're good on these - the host merely wanted to get our attention */
593 case BOOK3S_INTERRUPT_DECREMENTER:
594 vcpu->stat.dec_exits++;
595 r = RESUME_GUEST;
596 break;
597 case BOOK3S_INTERRUPT_EXTERNAL:
598 vcpu->stat.ext_intr_exits++;
599 r = RESUME_GUEST;
600 break;
601 case BOOK3S_INTERRUPT_PERFMON:
602 r = RESUME_GUEST;
603 break;
604 case BOOK3S_INTERRUPT_PROGRAM:
605 {
606 enum emulation_result er;
607 ulong flags;
608
609program_interrupt:
610 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
611
612 if (vcpu->arch.shared->msr & MSR_PR) {
613#ifdef EXIT_DEBUG
614 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
615#endif
616 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
617 (INS_DCBZ & 0xfffffff7)) {
618 kvmppc_core_queue_program(vcpu, flags);
619 r = RESUME_GUEST;
620 break;
621 }
622 }
623
624 vcpu->stat.emulated_inst_exits++;
625 er = kvmppc_emulate_instruction(run, vcpu);
626 switch (er) {
627 case EMULATE_DONE:
628 r = RESUME_GUEST_NV;
629 break;
630 case EMULATE_AGAIN:
631 r = RESUME_GUEST;
632 break;
633 case EMULATE_FAIL:
634 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
635 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
636 kvmppc_core_queue_program(vcpu, flags);
637 r = RESUME_GUEST;
638 break;
639 case EMULATE_DO_MMIO:
640 run->exit_reason = KVM_EXIT_MMIO;
641 r = RESUME_HOST_NV;
642 break;
643 default:
644 BUG();
645 }
646 break;
647 }
648 case BOOK3S_INTERRUPT_SYSCALL:
649 if (vcpu->arch.osi_enabled &&
650 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
651 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
652 /* MOL hypercalls */
653 u64 *gprs = run->osi.gprs;
654 int i;
655
656 run->exit_reason = KVM_EXIT_OSI;
657 for (i = 0; i < 32; i++)
658 gprs[i] = kvmppc_get_gpr(vcpu, i);
659 vcpu->arch.osi_needed = 1;
660 r = RESUME_HOST_NV;
661 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
662 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
663 /* KVM PV hypercalls */
664 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
665 r = RESUME_GUEST;
666 } else {
667 /* Guest syscalls */
668 vcpu->stat.syscall_exits++;
669 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
670 r = RESUME_GUEST;
671 }
672 break;
673 case BOOK3S_INTERRUPT_FP_UNAVAIL:
674 case BOOK3S_INTERRUPT_ALTIVEC:
675 case BOOK3S_INTERRUPT_VSX:
676 {
677 int ext_msr = 0;
678
679 switch (exit_nr) {
680 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
681 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
682 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
683 }
684
685 switch (kvmppc_check_ext(vcpu, exit_nr)) {
686 case EMULATE_DONE:
687 /* everything ok - let's enable the ext */
688 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
689 break;
690 case EMULATE_FAIL:
691 /* we need to emulate this instruction */
692 goto program_interrupt;
693 break;
694 default:
695 /* nothing to worry about - go again */
696 break;
697 }
698 break;
699 }
700 case BOOK3S_INTERRUPT_ALIGNMENT:
701 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
702 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
703 kvmppc_get_last_inst(vcpu));
704 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
705 kvmppc_get_last_inst(vcpu));
706 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
707 }
708 r = RESUME_GUEST;
709 break;
710 case BOOK3S_INTERRUPT_MACHINE_CHECK:
711 case BOOK3S_INTERRUPT_TRACE:
712 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
713 r = RESUME_GUEST;
714 break;
715 default:
716 /* Ugh - bork here! What did we get? */
717 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
718 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
719 r = RESUME_HOST;
720 BUG();
721 break;
722 }
723
724
725 if (!(r & RESUME_HOST)) {
726 /* To avoid clobbering exit_reason, only check for signals if
727 * we aren't already exiting to userspace for some other
728 * reason. */
729 if (signal_pending(current)) {
730#ifdef EXIT_DEBUG
731 printk(KERN_EMERG "KVM: Going back to host\n");
732#endif
733 vcpu->stat.signal_exits++;
734 run->exit_reason = KVM_EXIT_INTR;
735 r = -EINTR;
736 } else {
737 /* In case an interrupt came in that was triggered
738 * from userspace (like DEC), we need to check what
739 * to inject now! */
740 kvmppc_core_deliver_interrupts(vcpu);
741 }
742 }
743
744 trace_kvm_book3s_reenter(r, vcpu);
745
746 return r;
747}
748
749int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
750 struct kvm_sregs *sregs)
751{
752 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
753 int i;
754
755 sregs->pvr = vcpu->arch.pvr;
756
757 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
758 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
759 for (i = 0; i < 64; i++) {
760 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
761 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
762 }
763 } else {
764 for (i = 0; i < 16; i++)
765 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
766
767 for (i = 0; i < 8; i++) {
768 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
769 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
770 }
771 }
772
773 return 0;
774}
775
776int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
777 struct kvm_sregs *sregs)
778{
779 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
780 int i;
781
782 kvmppc_set_pvr(vcpu, sregs->pvr);
783
784 vcpu3s->sdr1 = sregs->u.s.sdr1;
785 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
786 for (i = 0; i < 64; i++) {
787 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
788 sregs->u.s.ppc64.slb[i].slbe);
789 }
790 } else {
791 for (i = 0; i < 16; i++) {
792 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
793 }
794 for (i = 0; i < 8; i++) {
795 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
796 (u32)sregs->u.s.ppc32.ibat[i]);
797 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
798 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
799 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
800 (u32)sregs->u.s.ppc32.dbat[i]);
801 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
802 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
803 }
804 }
805
806 /* Flush the MMU after messing with the segments */
807 kvmppc_mmu_pte_flush(vcpu, 0, 0);
808
809 return 0;
810}
811
812int kvmppc_core_check_processor_compat(void)
813{
814 return 0;
815}
816
817struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
818{
819 struct kvmppc_vcpu_book3s *vcpu_book3s;
820 struct kvm_vcpu *vcpu;
821 int err = -ENOMEM;
822 unsigned long p;
823
824 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
825 if (!vcpu_book3s)
826 goto out;
827
828 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
829 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
830 if (!vcpu_book3s->shadow_vcpu)
831 goto free_vcpu;
832
833 vcpu = &vcpu_book3s->vcpu;
834 err = kvm_vcpu_init(vcpu, kvm, id);
835 if (err)
836 goto free_shadow_vcpu;
837
838 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
839 /* the real shared page fills the last 4k of our page */
840 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
841 if (!p)
842 goto uninit_vcpu;
843
844 vcpu->arch.host_retip = kvm_return_point;
845 vcpu->arch.host_msr = mfmsr();
846#ifdef CONFIG_PPC_BOOK3S_64
847 /* default to book3s_64 (970fx) */
848 vcpu->arch.pvr = 0x3C0301;
849#else
850 /* default to book3s_32 (750) */
851 vcpu->arch.pvr = 0x84202;
852#endif
853 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
854 vcpu->arch.slb_nr = 64;
855
856 /* remember where some real-mode handlers are */
857 vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
858 vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter);
859 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
860#ifdef CONFIG_PPC_BOOK3S_64
861 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
862#else
863 vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
864#endif
865
866 vcpu->arch.shadow_msr = MSR_USER64;
867
868 err = kvmppc_mmu_init(vcpu);
869 if (err < 0)
870 goto uninit_vcpu;
871
872 return vcpu;
873
874uninit_vcpu:
875 kvm_vcpu_uninit(vcpu);
876free_shadow_vcpu:
877 kfree(vcpu_book3s->shadow_vcpu);
878free_vcpu:
879 vfree(vcpu_book3s);
880out:
881 return ERR_PTR(err);
882}
883
884void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
885{
886 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
887
888 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
889 kvm_vcpu_uninit(vcpu);
890 kfree(vcpu_book3s->shadow_vcpu);
891 vfree(vcpu_book3s);
892}
893
894int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
895{
896 int ret;
897 double fpr[32][TS_FPRWIDTH];
898 unsigned int fpscr;
899 int fpexc_mode;
900#ifdef CONFIG_ALTIVEC
901 vector128 vr[32];
902 vector128 vscr;
903 unsigned long uninitialized_var(vrsave);
904 int used_vr;
905#endif
906#ifdef CONFIG_VSX
907 int used_vsr;
908#endif
909 ulong ext_msr;
910
911 /* No need to go into the guest when all we do is going out */
912 if (signal_pending(current)) {
913 kvm_run->exit_reason = KVM_EXIT_INTR;
914 return -EINTR;
915 }
916
917 /* Save FPU state in stack */
918 if (current->thread.regs->msr & MSR_FP)
919 giveup_fpu(current);
920 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
921 fpscr = current->thread.fpscr.val;
922 fpexc_mode = current->thread.fpexc_mode;
923
924#ifdef CONFIG_ALTIVEC
925 /* Save Altivec state in stack */
926 used_vr = current->thread.used_vr;
927 if (used_vr) {
928 if (current->thread.regs->msr & MSR_VEC)
929 giveup_altivec(current);
930 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
931 vscr = current->thread.vscr;
932 vrsave = current->thread.vrsave;
933 }
934#endif
935
936#ifdef CONFIG_VSX
937 /* Save VSX state in stack */
938 used_vsr = current->thread.used_vsr;
939 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
940 __giveup_vsx(current);
941#endif
942
943 /* Remember the MSR with disabled extensions */
944 ext_msr = current->thread.regs->msr;
945
946 /* Preload FPU if it's enabled */
947 if (vcpu->arch.shared->msr & MSR_FP)
948 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
949
950 kvm_guest_enter();
951
952 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
953
954 kvm_guest_exit();
955
956 local_irq_disable();
957
958 current->thread.regs->msr = ext_msr;
959
960 /* Make sure we save the guest FPU/Altivec/VSX state */
961 kvmppc_giveup_ext(vcpu, MSR_FP);
962 kvmppc_giveup_ext(vcpu, MSR_VEC);
963 kvmppc_giveup_ext(vcpu, MSR_VSX);
964
965 /* Restore FPU state from stack */
966 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
967 current->thread.fpscr.val = fpscr;
968 current->thread.fpexc_mode = fpexc_mode;
969
970#ifdef CONFIG_ALTIVEC
971 /* Restore Altivec state from stack */
972 if (used_vr && current->thread.used_vr) {
973 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
974 current->thread.vscr = vscr;
975 current->thread.vrsave = vrsave;
976 }
977 current->thread.used_vr = used_vr;
978#endif
979
980#ifdef CONFIG_VSX
981 current->thread.used_vsr = used_vsr;
982#endif
983
984 return ret;
985}
986
987int kvmppc_core_prepare_memory_region(struct kvm *kvm,
988 struct kvm_userspace_memory_region *mem)
989{
990 return 0;
991}
992
993void kvmppc_core_commit_memory_region(struct kvm *kvm,
994 struct kvm_userspace_memory_region *mem)
995{
996}
997
998int kvmppc_core_init_vm(struct kvm *kvm)
999{
1000 return 0;
1001}
1002
1003void kvmppc_core_destroy_vm(struct kvm *kvm)
1004{
1005}
1006
1007static int kvmppc_book3s_init(void)
1008{
1009 int r;
1010
1011 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1012 THIS_MODULE);
1013
1014 if (r)
1015 return r;
1016
1017 r = kvmppc_mmu_hpte_sysinit();
1018
1019 return r;
1020}
1021
1022static void kvmppc_book3s_exit(void)
1023{
1024 kvmppc_mmu_hpte_sysexit();
1025 kvm_exit();
1026}
1027
1028module_init(kvmppc_book3s_init);
1029module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 1a1b34487e71..c1f877c4a884 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,41 +36,44 @@
36#if defined(CONFIG_PPC_BOOK3S_64) 36#if defined(CONFIG_PPC_BOOK3S_64)
37 37
38#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) 38#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
39#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
40#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) 39#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
41#define FUNC(name) GLUE(.,name) 40#define FUNC(name) GLUE(.,name)
42 41
42kvmppc_skip_interrupt:
43 /*
44 * Here all GPRs are unchanged from when the interrupt happened
45 * except for r13, which is saved in SPRG_SCRATCH0.
46 */
47 mfspr r13, SPRN_SRR0
48 addi r13, r13, 4
49 mtspr SPRN_SRR0, r13
50 GET_SCRATCH0(r13)
51 rfid
52 b .
53
54kvmppc_skip_Hinterrupt:
55 /*
56 * Here all GPRs are unchanged from when the interrupt happened
57 * except for r13, which is saved in SPRG_SCRATCH0.
58 */
59 mfspr r13, SPRN_HSRR0
60 addi r13, r13, 4
61 mtspr SPRN_HSRR0, r13
62 GET_SCRATCH0(r13)
63 hrfid
64 b .
65
43#elif defined(CONFIG_PPC_BOOK3S_32) 66#elif defined(CONFIG_PPC_BOOK3S_32)
44 67
45#define LOAD_SHADOW_VCPU(reg) \
46 mfspr reg, SPRN_SPRG_THREAD; \
47 lwz reg, THREAD_KVM_SVCPU(reg); \
48 /* PPC32 can have a NULL pointer - let's check for that */ \
49 mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
50 mfcr r12; \
51 cmpwi reg, 0; \
52 bne 1f; \
53 mfspr reg, SPRN_SPRG_SCRATCH0; \
54 mtcr r12; \
55 mfspr r12, SPRN_SPRG_SCRATCH1; \
56 b kvmppc_resume_\intno; \
571:; \
58 mtcr r12; \
59 mfspr r12, SPRN_SPRG_SCRATCH1; \
60 tophys(reg, reg)
61
62#define SHADOW_VCPU_OFF 0
63#define MSR_NOIRQ MSR_KERNEL 68#define MSR_NOIRQ MSR_KERNEL
64#define FUNC(name) name 69#define FUNC(name) name
65 70
66#endif
67
68.macro INTERRUPT_TRAMPOLINE intno 71.macro INTERRUPT_TRAMPOLINE intno
69 72
70.global kvmppc_trampoline_\intno 73.global kvmppc_trampoline_\intno
71kvmppc_trampoline_\intno: 74kvmppc_trampoline_\intno:
72 75
73 SET_SCRATCH0(r13) /* Save r13 */ 76 mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
74 77
75 /* 78 /*
76 * First thing to do is to find out if we're coming 79 * First thing to do is to find out if we're coming
@@ -78,19 +81,28 @@ kvmppc_trampoline_\intno:
78 * 81 *
79 * To distinguish, we check a magic byte in the PACA/current 82 * To distinguish, we check a magic byte in the PACA/current
80 */ 83 */
81 LOAD_SHADOW_VCPU(r13) 84 mfspr r13, SPRN_SPRG_THREAD
82 PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 85 lwz r13, THREAD_KVM_SVCPU(r13)
86 /* PPC32 can have a NULL pointer - let's check for that */
87 mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */
83 mfcr r12 88 mfcr r12
84 stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 89 cmpwi r13, 0
85 lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 90 bne 1f
912: mtcr r12
92 mfspr r12, SPRN_SPRG_SCRATCH1
93 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
94 b kvmppc_resume_\intno /* Get back original handler */
95
961: tophys(r13, r13)
97 stw r12, HSTATE_SCRATCH1(r13)
98 mfspr r12, SPRN_SPRG_SCRATCH1
99 stw r12, HSTATE_SCRATCH0(r13)
100 lbz r12, HSTATE_IN_GUEST(r13)
86 cmpwi r12, KVM_GUEST_MODE_NONE 101 cmpwi r12, KVM_GUEST_MODE_NONE
87 bne ..kvmppc_handler_hasmagic_\intno 102 bne ..kvmppc_handler_hasmagic_\intno
88 /* No KVM guest? Then jump back to the Linux handler! */ 103 /* No KVM guest? Then jump back to the Linux handler! */
89 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 104 lwz r12, HSTATE_SCRATCH1(r13)
90 mtcr r12 105 b 2b
91 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
92 GET_SCRATCH0(r13) /* r13 = original r13 */
93 b kvmppc_resume_\intno /* Get back original handler */
94 106
95 /* Now we know we're handling a KVM guest */ 107 /* Now we know we're handling a KVM guest */
96..kvmppc_handler_hasmagic_\intno: 108..kvmppc_handler_hasmagic_\intno:
@@ -112,9 +124,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
112INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE 124INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
113INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE 125INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
114INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL 126INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
115#ifdef CONFIG_PPC_BOOK3S_64
116INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV
117#endif
118INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT 127INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
119INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM 128INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
120INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL 129INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
@@ -124,14 +133,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
124INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON 133INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
125INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC 134INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
126 135
127/* Those are only available on 64 bit machines */
128
129#ifdef CONFIG_PPC_BOOK3S_64
130INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
131INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
132INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
133#endif
134
135/* 136/*
136 * Bring us back to the faulting code, but skip the 137 * Bring us back to the faulting code, but skip the
137 * faulting instruction. 138 * faulting instruction.
@@ -143,8 +144,8 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
143 * 144 *
144 * R12 = free 145 * R12 = free
145 * R13 = Shadow VCPU (PACA) 146 * R13 = Shadow VCPU (PACA)
146 * SVCPU.SCRATCH0 = guest R12 147 * HSTATE.SCRATCH0 = guest R12
147 * SVCPU.SCRATCH1 = guest CR 148 * HSTATE.SCRATCH1 = guest CR
148 * SPRG_SCRATCH0 = guest R13 149 * SPRG_SCRATCH0 = guest R13
149 * 150 *
150 */ 151 */
@@ -156,13 +157,14 @@ kvmppc_handler_skip_ins:
156 mtsrr0 r12 157 mtsrr0 r12
157 158
158 /* Clean up all state */ 159 /* Clean up all state */
159 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 160 lwz r12, HSTATE_SCRATCH1(r13)
160 mtcr r12 161 mtcr r12
161 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 162 PPC_LL r12, HSTATE_SCRATCH0(r13)
162 GET_SCRATCH0(r13) 163 GET_SCRATCH0(r13)
163 164
164 /* And get back into the code */ 165 /* And get back into the code */
165 RFI 166 RFI
167#endif
166 168
167/* 169/*
168 * This trampoline brings us back to a real mode handler 170 * This trampoline brings us back to a real mode handler
@@ -251,12 +253,4 @@ define_load_up(altivec)
251define_load_up(vsx) 253define_load_up(vsx)
252#endif 254#endif
253 255
254.global kvmppc_trampoline_lowmem
255kvmppc_trampoline_lowmem:
256 PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
257
258.global kvmppc_trampoline_enter
259kvmppc_trampoline_enter:
260 PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
261
262#include "book3s_segment.S" 256#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 451264274b8c..aed32e517212 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -22,7 +22,7 @@
22#if defined(CONFIG_PPC_BOOK3S_64) 22#if defined(CONFIG_PPC_BOOK3S_64)
23 23
24#define GET_SHADOW_VCPU(reg) \ 24#define GET_SHADOW_VCPU(reg) \
25 addi reg, r13, PACA_KVM_SVCPU 25 mr reg, r13
26 26
27#elif defined(CONFIG_PPC_BOOK3S_32) 27#elif defined(CONFIG_PPC_BOOK3S_32)
28 28
@@ -71,6 +71,10 @@ kvmppc_handler_trampoline_enter:
71 /* r3 = shadow vcpu */ 71 /* r3 = shadow vcpu */
72 GET_SHADOW_VCPU(r3) 72 GET_SHADOW_VCPU(r3)
73 73
74 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
75 PPC_STL r1, HSTATE_HOST_R1(r3)
76 PPC_STL r2, HSTATE_HOST_R2(r3)
77
74 /* Move SRR0 and SRR1 into the respective regs */ 78 /* Move SRR0 and SRR1 into the respective regs */
75 PPC_LL r9, SVCPU_PC(r3) 79 PPC_LL r9, SVCPU_PC(r3)
76 mtsrr0 r9 80 mtsrr0 r9
@@ -78,36 +82,36 @@ kvmppc_handler_trampoline_enter:
78 82
79 /* Activate guest mode, so faults get handled by KVM */ 83 /* Activate guest mode, so faults get handled by KVM */
80 li r11, KVM_GUEST_MODE_GUEST 84 li r11, KVM_GUEST_MODE_GUEST
81 stb r11, SVCPU_IN_GUEST(r3) 85 stb r11, HSTATE_IN_GUEST(r3)
82 86
83 /* Switch to guest segment. This is subarch specific. */ 87 /* Switch to guest segment. This is subarch specific. */
84 LOAD_GUEST_SEGMENTS 88 LOAD_GUEST_SEGMENTS
85 89
86 /* Enter guest */ 90 /* Enter guest */
87 91
88 PPC_LL r4, (SVCPU_CTR)(r3) 92 PPC_LL r4, SVCPU_CTR(r3)
89 PPC_LL r5, (SVCPU_LR)(r3) 93 PPC_LL r5, SVCPU_LR(r3)
90 lwz r6, (SVCPU_CR)(r3) 94 lwz r6, SVCPU_CR(r3)
91 lwz r7, (SVCPU_XER)(r3) 95 lwz r7, SVCPU_XER(r3)
92 96
93 mtctr r4 97 mtctr r4
94 mtlr r5 98 mtlr r5
95 mtcr r6 99 mtcr r6
96 mtxer r7 100 mtxer r7
97 101
98 PPC_LL r0, (SVCPU_R0)(r3) 102 PPC_LL r0, SVCPU_R0(r3)
99 PPC_LL r1, (SVCPU_R1)(r3) 103 PPC_LL r1, SVCPU_R1(r3)
100 PPC_LL r2, (SVCPU_R2)(r3) 104 PPC_LL r2, SVCPU_R2(r3)
101 PPC_LL r4, (SVCPU_R4)(r3) 105 PPC_LL r4, SVCPU_R4(r3)
102 PPC_LL r5, (SVCPU_R5)(r3) 106 PPC_LL r5, SVCPU_R5(r3)
103 PPC_LL r6, (SVCPU_R6)(r3) 107 PPC_LL r6, SVCPU_R6(r3)
104 PPC_LL r7, (SVCPU_R7)(r3) 108 PPC_LL r7, SVCPU_R7(r3)
105 PPC_LL r8, (SVCPU_R8)(r3) 109 PPC_LL r8, SVCPU_R8(r3)
106 PPC_LL r9, (SVCPU_R9)(r3) 110 PPC_LL r9, SVCPU_R9(r3)
107 PPC_LL r10, (SVCPU_R10)(r3) 111 PPC_LL r10, SVCPU_R10(r3)
108 PPC_LL r11, (SVCPU_R11)(r3) 112 PPC_LL r11, SVCPU_R11(r3)
109 PPC_LL r12, (SVCPU_R12)(r3) 113 PPC_LL r12, SVCPU_R12(r3)
110 PPC_LL r13, (SVCPU_R13)(r3) 114 PPC_LL r13, SVCPU_R13(r3)
111 115
112 PPC_LL r3, (SVCPU_R3)(r3) 116 PPC_LL r3, (SVCPU_R3)(r3)
113 117
@@ -125,56 +129,63 @@ kvmppc_handler_trampoline_enter_end:
125.global kvmppc_handler_trampoline_exit 129.global kvmppc_handler_trampoline_exit
126kvmppc_handler_trampoline_exit: 130kvmppc_handler_trampoline_exit:
127 131
132.global kvmppc_interrupt
133kvmppc_interrupt:
134
128 /* Register usage at this point: 135 /* Register usage at this point:
129 * 136 *
130 * SPRG_SCRATCH0 = guest R13 137 * SPRG_SCRATCH0 = guest R13
131 * R12 = exit handler id 138 * R12 = exit handler id
132 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] 139 * R13 = shadow vcpu (32-bit) or PACA (64-bit)
133 * SVCPU.SCRATCH0 = guest R12 140 * HSTATE.SCRATCH0 = guest R12
134 * SVCPU.SCRATCH1 = guest CR 141 * HSTATE.SCRATCH1 = guest CR
135 * 142 *
136 */ 143 */
137 144
138 /* Save registers */ 145 /* Save registers */
139 146
140 PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13) 147 PPC_STL r0, SVCPU_R0(r13)
141 PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13) 148 PPC_STL r1, SVCPU_R1(r13)
142 PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13) 149 PPC_STL r2, SVCPU_R2(r13)
143 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13) 150 PPC_STL r3, SVCPU_R3(r13)
144 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13) 151 PPC_STL r4, SVCPU_R4(r13)
145 PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13) 152 PPC_STL r5, SVCPU_R5(r13)
146 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13) 153 PPC_STL r6, SVCPU_R6(r13)
147 PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13) 154 PPC_STL r7, SVCPU_R7(r13)
148 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13) 155 PPC_STL r8, SVCPU_R8(r13)
149 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13) 156 PPC_STL r9, SVCPU_R9(r13)
150 PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13) 157 PPC_STL r10, SVCPU_R10(r13)
151 PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13) 158 PPC_STL r11, SVCPU_R11(r13)
152 159
153 /* Restore R1/R2 so we can handle faults */ 160 /* Restore R1/R2 so we can handle faults */
154 PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13) 161 PPC_LL r1, HSTATE_HOST_R1(r13)
155 PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) 162 PPC_LL r2, HSTATE_HOST_R2(r13)
156 163
157 /* Save guest PC and MSR */ 164 /* Save guest PC and MSR */
165#ifdef CONFIG_PPC64
166BEGIN_FTR_SECTION
158 andi. r0,r12,0x2 167 andi. r0,r12,0x2
159 beq 1f 168 beq 1f
160 mfspr r3,SPRN_HSRR0 169 mfspr r3,SPRN_HSRR0
161 mfspr r4,SPRN_HSRR1 170 mfspr r4,SPRN_HSRR1
162 andi. r12,r12,0x3ffd 171 andi. r12,r12,0x3ffd
163 b 2f 172 b 2f
173END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
174#endif
1641: mfsrr0 r3 1751: mfsrr0 r3
165 mfsrr1 r4 176 mfsrr1 r4
1662: 1772:
167 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) 178 PPC_STL r3, SVCPU_PC(r13)
168 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) 179 PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
169 180
170 /* Get scratch'ed off registers */ 181 /* Get scratch'ed off registers */
171 GET_SCRATCH0(r9) 182 GET_SCRATCH0(r9)
172 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 183 PPC_LL r8, HSTATE_SCRATCH0(r13)
173 lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 184 lwz r7, HSTATE_SCRATCH1(r13)
174 185
175 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13) 186 PPC_STL r9, SVCPU_R13(r13)
176 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13) 187 PPC_STL r8, SVCPU_R12(r13)
177 stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13) 188 stw r7, SVCPU_CR(r13)
178 189
179 /* Save more register state */ 190 /* Save more register state */
180 191
@@ -184,11 +195,11 @@ kvmppc_handler_trampoline_exit:
184 mfctr r8 195 mfctr r8
185 mflr r9 196 mflr r9
186 197
187 stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13) 198 stw r5, SVCPU_XER(r13)
188 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13) 199 PPC_STL r6, SVCPU_FAULT_DAR(r13)
189 stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13) 200 stw r7, SVCPU_FAULT_DSISR(r13)
190 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13) 201 PPC_STL r8, SVCPU_CTR(r13)
191 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13) 202 PPC_STL r9, SVCPU_LR(r13)
192 203
193 /* 204 /*
194 * In order for us to easily get the last instruction, 205 * In order for us to easily get the last instruction,
@@ -218,7 +229,7 @@ ld_last_inst:
218 /* Set guest mode to 'jump over instruction' so if lwz faults 229 /* Set guest mode to 'jump over instruction' so if lwz faults
219 * we'll just continue at the next IP. */ 230 * we'll just continue at the next IP. */
220 li r9, KVM_GUEST_MODE_SKIP 231 li r9, KVM_GUEST_MODE_SKIP
221 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 232 stb r9, HSTATE_IN_GUEST(r13)
222 233
223 /* 1) enable paging for data */ 234 /* 1) enable paging for data */
224 mfmsr r9 235 mfmsr r9
@@ -232,13 +243,13 @@ ld_last_inst:
232 sync 243 sync
233 244
234#endif 245#endif
235 stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13) 246 stw r0, SVCPU_LAST_INST(r13)
236 247
237no_ld_last_inst: 248no_ld_last_inst:
238 249
239 /* Unset guest mode */ 250 /* Unset guest mode */
240 li r9, KVM_GUEST_MODE_NONE 251 li r9, KVM_GUEST_MODE_NONE
241 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 252 stb r9, HSTATE_IN_GUEST(r13)
242 253
243 /* Switch back to host MMU */ 254 /* Switch back to host MMU */
244 LOAD_HOST_SEGMENTS 255 LOAD_HOST_SEGMENTS
@@ -248,7 +259,7 @@ no_ld_last_inst:
248 * R1 = host R1 259 * R1 = host R1
249 * R2 = host R2 260 * R2 = host R2
250 * R12 = exit handler id 261 * R12 = exit handler id
251 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] 262 * R13 = shadow vcpu (32-bit) or PACA (64-bit)
252 * SVCPU.* = guest * 263 * SVCPU.* = guest *
253 * 264 *
254 */ 265 */
@@ -258,7 +269,7 @@ no_ld_last_inst:
258 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */ 269 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
259 mtsrr1 r7 270 mtsrr1 r7
260 /* Load highmem handler address */ 271 /* Load highmem handler address */
261 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13) 272 PPC_LL r8, HSTATE_VMHANDLER(r13)
262 mtsrr0 r8 273 mtsrr0 r8
263 274
264 RFI 275 RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8462b3a1c1c7..ee45fa01220e 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2007 15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
@@ -78,6 +79,60 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
78 } 79 }
79} 80}
80 81
82#ifdef CONFIG_SPE
83void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
84{
85 preempt_disable();
86 enable_kernel_spe();
87 kvmppc_save_guest_spe(vcpu);
88 vcpu->arch.shadow_msr &= ~MSR_SPE;
89 preempt_enable();
90}
91
92static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
93{
94 preempt_disable();
95 enable_kernel_spe();
96 kvmppc_load_guest_spe(vcpu);
97 vcpu->arch.shadow_msr |= MSR_SPE;
98 preempt_enable();
99}
100
101static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
102{
103 if (vcpu->arch.shared->msr & MSR_SPE) {
104 if (!(vcpu->arch.shadow_msr & MSR_SPE))
105 kvmppc_vcpu_enable_spe(vcpu);
106 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
107 kvmppc_vcpu_disable_spe(vcpu);
108 }
109}
110#else
111static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112{
113}
114#endif
115
116/*
117 * Helper function for "full" MSR writes. No need to call this if only
118 * EE/CE/ME/DE/RI are changing.
119 */
120void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
121{
122 u32 old_msr = vcpu->arch.shared->msr;
123
124 vcpu->arch.shared->msr = new_msr;
125
126 kvmppc_mmu_msr_notify(vcpu, old_msr);
127
128 if (vcpu->arch.shared->msr & MSR_WE) {
129 kvm_vcpu_block(vcpu);
130 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
131 };
132
133 kvmppc_vcpu_sync_spe(vcpu);
134}
135
81static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 136static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 unsigned int priority) 137 unsigned int priority)
83{ 138{
@@ -257,6 +312,19 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
257 vcpu->arch.shared->int_pending = 0; 312 vcpu->arch.shared->int_pending = 0;
258} 313}
259 314
315int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
316{
317 int ret;
318
319 local_irq_disable();
320 kvm_guest_enter();
321 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
322 kvm_guest_exit();
323 local_irq_enable();
324
325 return ret;
326}
327
260/** 328/**
261 * kvmppc_handle_exit 329 * kvmppc_handle_exit
262 * 330 *
@@ -344,10 +412,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
344 r = RESUME_GUEST; 412 r = RESUME_GUEST;
345 break; 413 break;
346 414
347 case BOOKE_INTERRUPT_SPE_UNAVAIL: 415#ifdef CONFIG_SPE
348 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL); 416 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
417 if (vcpu->arch.shared->msr & MSR_SPE)
418 kvmppc_vcpu_enable_spe(vcpu);
419 else
420 kvmppc_booke_queue_irqprio(vcpu,
421 BOOKE_IRQPRIO_SPE_UNAVAIL);
349 r = RESUME_GUEST; 422 r = RESUME_GUEST;
350 break; 423 break;
424 }
351 425
352 case BOOKE_INTERRUPT_SPE_FP_DATA: 426 case BOOKE_INTERRUPT_SPE_FP_DATA:
353 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); 427 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +432,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
358 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); 432 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
359 r = RESUME_GUEST; 433 r = RESUME_GUEST;
360 break; 434 break;
435#else
436 case BOOKE_INTERRUPT_SPE_UNAVAIL:
437 /*
438 * Guest wants SPE, but host kernel doesn't support it. Send
439 * an "unimplemented operation" program check to the guest.
440 */
441 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
442 r = RESUME_GUEST;
443 break;
444
445 /*
446 * These really should never happen without CONFIG_SPE,
447 * as we should never enable the real MSR[SPE] in the guest.
448 */
449 case BOOKE_INTERRUPT_SPE_FP_DATA:
450 case BOOKE_INTERRUPT_SPE_FP_ROUND:
451 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
452 __func__, exit_nr, vcpu->arch.pc);
453 run->hw.hardware_exit_reason = exit_nr;
454 r = RESUME_HOST;
455 break;
456#endif
361 457
362 case BOOKE_INTERRUPT_DATA_STORAGE: 458 case BOOKE_INTERRUPT_DATA_STORAGE:
363 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, 459 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
@@ -392,6 +488,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
392 gpa_t gpaddr; 488 gpa_t gpaddr;
393 gfn_t gfn; 489 gfn_t gfn;
394 490
491#ifdef CONFIG_KVM_E500
492 if (!(vcpu->arch.shared->msr & MSR_PR) &&
493 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
494 kvmppc_map_magic(vcpu);
495 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
496 r = RESUME_GUEST;
497
498 break;
499 }
500#endif
501
395 /* Check the guest TLB. */ 502 /* Check the guest TLB. */
396 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); 503 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
397 if (gtlb_index < 0) { 504 if (gtlb_index < 0) {
@@ -514,6 +621,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
514 621
515 vcpu->arch.pc = 0; 622 vcpu->arch.pc = 0;
516 vcpu->arch.shared->msr = 0; 623 vcpu->arch.shared->msr = 0;
624 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
517 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 625 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
518 626
519 vcpu->arch.shadow_pid = 1; 627 vcpu->arch.shadow_pid = 1;
@@ -770,6 +878,26 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
770 return -ENOTSUPP; 878 return -ENOTSUPP;
771} 879}
772 880
881int kvmppc_core_prepare_memory_region(struct kvm *kvm,
882 struct kvm_userspace_memory_region *mem)
883{
884 return 0;
885}
886
887void kvmppc_core_commit_memory_region(struct kvm *kvm,
888 struct kvm_userspace_memory_region *mem)
889{
890}
891
892int kvmppc_core_init_vm(struct kvm *kvm)
893{
894 return 0;
895}
896
897void kvmppc_core_destroy_vm(struct kvm *kvm)
898{
899}
900
773int __init kvmppc_booke_init(void) 901int __init kvmppc_booke_init(void)
774{ 902{
775 unsigned long ivor[16]; 903 unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb7030358..8e1fe33d64e5 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,24 +52,19 @@
52 52
53extern unsigned long kvmppc_booke_handlers; 53extern unsigned long kvmppc_booke_handlers;
54 54
55/* Helper function for "full" MSR writes. No need to call this if only EE is 55void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
56 * changing. */ 56void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
57static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
58{
59 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
60 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
61
62 vcpu->arch.shared->msr = new_msr;
63
64 if (vcpu->arch.shared->msr & MSR_WE) {
65 kvm_vcpu_block(vcpu);
66 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
67 };
68}
69 57
70int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 58int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
71 unsigned int inst, int *advance); 59 unsigned int inst, int *advance);
72int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 60int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
73int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 61int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
74 62
63/* low-level asm code to transfer guest state */
64void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
65void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
66
67/* high-level function, manages flags, host state */
68void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
69
75#endif /* __KVM_BOOKE_H__ */ 70#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index b58ccae95904..42f2fb1f66e9 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2007 15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */ 19 */
@@ -24,8 +25,6 @@
24#include <asm/page.h> 25#include <asm/page.h>
25#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
26 27
27#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
28
29#define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) 28#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
30 29
31/* The host stack layout: */ 30/* The host stack layout: */
@@ -192,6 +191,12 @@ _GLOBAL(kvmppc_resume_host)
192 lwz r3, VCPU_HOST_PID(r4) 191 lwz r3, VCPU_HOST_PID(r4)
193 mtspr SPRN_PID, r3 192 mtspr SPRN_PID, r3
194 193
194#ifdef CONFIG_FSL_BOOKE
195 /* we cheat and know that Linux doesn't use PID1 which is always 0 */
196 lis r3, 0
197 mtspr SPRN_PID1, r3
198#endif
199
195 /* Restore host IVPR before re-enabling interrupts. We cheat and know 200 /* Restore host IVPR before re-enabling interrupts. We cheat and know
196 * that Linux IVPR is always 0xc0000000. */ 201 * that Linux IVPR is always 0xc0000000. */
197 lis r3, 0xc000 202 lis r3, 0xc000
@@ -241,6 +246,14 @@ _GLOBAL(kvmppc_resume_host)
241heavyweight_exit: 246heavyweight_exit:
242 /* Not returning to guest. */ 247 /* Not returning to guest. */
243 248
249#ifdef CONFIG_SPE
250 /* save guest SPEFSCR and load host SPEFSCR */
251 mfspr r9, SPRN_SPEFSCR
252 stw r9, VCPU_SPEFSCR(r4)
253 lwz r9, VCPU_HOST_SPEFSCR(r4)
254 mtspr SPRN_SPEFSCR, r9
255#endif
256
244 /* We already saved guest volatile register state; now save the 257 /* We already saved guest volatile register state; now save the
245 * non-volatiles. */ 258 * non-volatiles. */
246 stw r15, VCPU_GPR(r15)(r4) 259 stw r15, VCPU_GPR(r15)(r4)
@@ -342,6 +355,14 @@ _GLOBAL(__kvmppc_vcpu_run)
342 lwz r30, VCPU_GPR(r30)(r4) 355 lwz r30, VCPU_GPR(r30)(r4)
343 lwz r31, VCPU_GPR(r31)(r4) 356 lwz r31, VCPU_GPR(r31)(r4)
344 357
358#ifdef CONFIG_SPE
359 /* save host SPEFSCR and load guest SPEFSCR */
360 mfspr r3, SPRN_SPEFSCR
361 stw r3, VCPU_HOST_SPEFSCR(r4)
362 lwz r3, VCPU_SPEFSCR(r4)
363 mtspr SPRN_SPEFSCR, r3
364#endif
365
345lightweight_exit: 366lightweight_exit:
346 stw r2, HOST_R2(r1) 367 stw r2, HOST_R2(r1)
347 368
@@ -350,6 +371,11 @@ lightweight_exit:
350 lwz r3, VCPU_SHADOW_PID(r4) 371 lwz r3, VCPU_SHADOW_PID(r4)
351 mtspr SPRN_PID, r3 372 mtspr SPRN_PID, r3
352 373
374#ifdef CONFIG_FSL_BOOKE
375 lwz r3, VCPU_SHADOW_PID1(r4)
376 mtspr SPRN_PID1, r3
377#endif
378
353#ifdef CONFIG_44x 379#ifdef CONFIG_44x
354 iccci 0, 0 /* XXX hack */ 380 iccci 0, 0 /* XXX hack */
355#endif 381#endif
@@ -405,20 +431,17 @@ lightweight_exit:
405 431
406 /* Finish loading guest volatiles and jump to guest. */ 432 /* Finish loading guest volatiles and jump to guest. */
407 lwz r3, VCPU_CTR(r4) 433 lwz r3, VCPU_CTR(r4)
434 lwz r5, VCPU_CR(r4)
435 lwz r6, VCPU_PC(r4)
436 lwz r7, VCPU_SHADOW_MSR(r4)
408 mtctr r3 437 mtctr r3
409 lwz r3, VCPU_CR(r4) 438 mtcr r5
410 mtcr r3 439 mtsrr0 r6
440 mtsrr1 r7
411 lwz r5, VCPU_GPR(r5)(r4) 441 lwz r5, VCPU_GPR(r5)(r4)
412 lwz r6, VCPU_GPR(r6)(r4) 442 lwz r6, VCPU_GPR(r6)(r4)
413 lwz r7, VCPU_GPR(r7)(r4) 443 lwz r7, VCPU_GPR(r7)(r4)
414 lwz r8, VCPU_GPR(r8)(r4) 444 lwz r8, VCPU_GPR(r8)(r4)
415 lwz r3, VCPU_PC(r4)
416 mtsrr0 r3
417 lwz r3, VCPU_SHARED(r4)
418 lwz r3, (VCPU_SHARED_MSR + 4)(r3)
419 oris r3, r3, KVMPPC_MSR_MASK@h
420 ori r3, r3, KVMPPC_MSR_MASK@l
421 mtsrr1 r3
422 445
423 /* Clear any debug events which occurred since we disabled MSR[DE]. 446 /* Clear any debug events which occurred since we disabled MSR[DE].
424 * XXX This gives us a 3-instruction window in which a breakpoint 447 * XXX This gives us a 3-instruction window in which a breakpoint
@@ -430,3 +453,24 @@ lightweight_exit:
430 lwz r3, VCPU_GPR(r3)(r4) 453 lwz r3, VCPU_GPR(r3)(r4)
431 lwz r4, VCPU_GPR(r4)(r4) 454 lwz r4, VCPU_GPR(r4)(r4)
432 rfi 455 rfi
456
457#ifdef CONFIG_SPE
458_GLOBAL(kvmppc_save_guest_spe)
459 cmpi 0,r3,0
460 beqlr-
461 SAVE_32EVRS(0, r4, r3, VCPU_EVR)
462 evxor evr6, evr6, evr6
463 evmwumiaa evr6, evr6, evr6
464 li r4,VCPU_ACC
465 evstddx evr6, r4, r3 /* save acc */
466 blr
467
468_GLOBAL(kvmppc_load_guest_spe)
469 cmpi 0,r3,0
470 beqlr-
471 li r4,VCPU_ACC
472 evlddx evr6,r4,r3
473 evmra evr6,evr6 /* load acc */
474 REST_32EVRS(0, r4, r3, VCPU_EVR)
475 blr
476#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 318dbc61ba44..797a7447c268 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, <yu.liu@freescale.com> 4 * Author: Yu Liu, <yu.liu@freescale.com>
5 * 5 *
@@ -41,6 +41,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
41void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 41void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
42{ 42{
43 kvmppc_e500_tlb_put(vcpu); 43 kvmppc_e500_tlb_put(vcpu);
44
45#ifdef CONFIG_SPE
46 if (vcpu->arch.shadow_msr & MSR_SPE)
47 kvmppc_vcpu_disable_spe(vcpu);
48#endif
44} 49}
45 50
46int kvmppc_core_check_processor_compat(void) 51int kvmppc_core_check_processor_compat(void)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 69cd665a0caf..d48ae396f41e 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -81,8 +81,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
81 kvmppc_set_pid(vcpu, spr_val); 81 kvmppc_set_pid(vcpu, spr_val);
82 break; 82 break;
83 case SPRN_PID1: 83 case SPRN_PID1:
84 if (spr_val != 0)
85 return EMULATE_FAIL;
84 vcpu_e500->pid[1] = spr_val; break; 86 vcpu_e500->pid[1] = spr_val; break;
85 case SPRN_PID2: 87 case SPRN_PID2:
88 if (spr_val != 0)
89 return EMULATE_FAIL;
86 vcpu_e500->pid[2] = spr_val; break; 90 vcpu_e500->pid[2] = spr_val; break;
87 case SPRN_MAS0: 91 case SPRN_MAS0:
88 vcpu_e500->mas0 = spr_val; break; 92 vcpu_e500->mas0 = spr_val; break;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index b18fe353397d..13c432ea2fa8 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -28,8 +28,196 @@
28 28
29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) 29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
30 30
31struct id {
32 unsigned long val;
33 struct id **pentry;
34};
35
36#define NUM_TIDS 256
37
38/*
39 * This table provide mappings from:
40 * (guestAS,guestTID,guestPR) --> ID of physical cpu
41 * guestAS [0..1]
42 * guestTID [0..255]
43 * guestPR [0..1]
44 * ID [1..255]
45 * Each vcpu keeps one vcpu_id_table.
46 */
47struct vcpu_id_table {
48 struct id id[2][NUM_TIDS][2];
49};
50
51/*
52 * This table provide reversed mappings of vcpu_id_table:
53 * ID --> address of vcpu_id_table item.
54 * Each physical core has one pcpu_id_table.
55 */
56struct pcpu_id_table {
57 struct id *entry[NUM_TIDS];
58};
59
60static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
61
62/* This variable keeps last used shadow ID on local core.
63 * The valid range of shadow ID is [1..255] */
64static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
65
31static unsigned int tlb1_entry_num; 66static unsigned int tlb1_entry_num;
32 67
68/*
69 * Allocate a free shadow id and setup a valid sid mapping in given entry.
70 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
71 *
72 * The caller must have preemption disabled, and keep it that way until
73 * it has finished with the returned shadow id (either written into the
74 * TLB or arch.shadow_pid, or discarded).
75 */
76static inline int local_sid_setup_one(struct id *entry)
77{
78 unsigned long sid;
79 int ret = -1;
80
81 sid = ++(__get_cpu_var(pcpu_last_used_sid));
82 if (sid < NUM_TIDS) {
83 __get_cpu_var(pcpu_sids).entry[sid] = entry;
84 entry->val = sid;
85 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
86 ret = sid;
87 }
88
89 /*
90 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
91 * the caller will invalidate everything and start over.
92 *
93 * sid > NUM_TIDS indicates a race, which we disable preemption to
94 * avoid.
95 */
96 WARN_ON(sid > NUM_TIDS);
97
98 return ret;
99}
100
101/*
102 * Check if given entry contain a valid shadow id mapping.
103 * An ID mapping is considered valid only if
104 * both vcpu and pcpu know this mapping.
105 *
106 * The caller must have preemption disabled, and keep it that way until
107 * it has finished with the returned shadow id (either written into the
108 * TLB or arch.shadow_pid, or discarded).
109 */
110static inline int local_sid_lookup(struct id *entry)
111{
112 if (entry && entry->val != 0 &&
113 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
114 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
115 return entry->val;
116 return -1;
117}
118
119/* Invalidate all id mappings on local core */
120static inline void local_sid_destroy_all(void)
121{
122 preempt_disable();
123 __get_cpu_var(pcpu_last_used_sid) = 0;
124 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
125 preempt_enable();
126}
127
128static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
129{
130 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
131 return vcpu_e500->idt;
132}
133
134static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
135{
136 kfree(vcpu_e500->idt);
137}
138
139/* Invalidate all mappings on vcpu */
140static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
141{
142 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
143
144 /* Update shadow pid when mappings are changed */
145 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
146}
147
148/* Invalidate one ID mapping on vcpu */
149static inline void kvmppc_e500_id_table_reset_one(
150 struct kvmppc_vcpu_e500 *vcpu_e500,
151 int as, int pid, int pr)
152{
153 struct vcpu_id_table *idt = vcpu_e500->idt;
154
155 BUG_ON(as >= 2);
156 BUG_ON(pid >= NUM_TIDS);
157 BUG_ON(pr >= 2);
158
159 idt->id[as][pid][pr].val = 0;
160 idt->id[as][pid][pr].pentry = NULL;
161
162 /* Update shadow pid when mappings are changed */
163 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
164}
165
166/*
167 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
168 * This function first lookup if a valid mapping exists,
169 * if not, then creates a new one.
170 *
171 * The caller must have preemption disabled, and keep it that way until
172 * it has finished with the returned shadow id (either written into the
173 * TLB or arch.shadow_pid, or discarded).
174 */
175static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
176 unsigned int as, unsigned int gid,
177 unsigned int pr, int avoid_recursion)
178{
179 struct vcpu_id_table *idt = vcpu_e500->idt;
180 int sid;
181
182 BUG_ON(as >= 2);
183 BUG_ON(gid >= NUM_TIDS);
184 BUG_ON(pr >= 2);
185
186 sid = local_sid_lookup(&idt->id[as][gid][pr]);
187
188 while (sid <= 0) {
189 /* No mapping yet */
190 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
191 if (sid <= 0) {
192 _tlbil_all();
193 local_sid_destroy_all();
194 }
195
196 /* Update shadow pid when mappings are changed */
197 if (!avoid_recursion)
198 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
199 }
200
201 return sid;
202}
203
204/* Map guest pid to shadow.
205 * We use PID to keep shadow of current guest non-zero PID,
206 * and use PID1 to keep shadow of guest zero PID.
207 * So that guest tlbe with TID=0 can be accessed at any time */
208void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
209{
210 preempt_disable();
211 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
212 get_cur_as(&vcpu_e500->vcpu),
213 get_cur_pid(&vcpu_e500->vcpu),
214 get_cur_pr(&vcpu_e500->vcpu), 1);
215 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
216 get_cur_as(&vcpu_e500->vcpu), 0,
217 get_cur_pr(&vcpu_e500->vcpu), 1);
218 preempt_enable();
219}
220
33void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) 221void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
34{ 222{
35 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 223 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -41,25 +229,14 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
41 229
42 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 230 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
43 printk("Guest TLB%d:\n", tlbsel); 231 printk("Guest TLB%d:\n", tlbsel);
44 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) { 232 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
45 tlbe = &vcpu_e500->guest_tlb[tlbsel][i]; 233 tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
46 if (tlbe->mas1 & MAS1_VALID) 234 if (tlbe->mas1 & MAS1_VALID)
47 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n", 235 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
48 tlbsel, i, tlbe->mas1, tlbe->mas2, 236 tlbsel, i, tlbe->mas1, tlbe->mas2,
49 tlbe->mas3, tlbe->mas7); 237 tlbe->mas3, tlbe->mas7);
50 } 238 }
51 } 239 }
52
53 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
54 printk("Shadow TLB%d:\n", tlbsel);
55 for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
56 tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
57 if (tlbe->mas1 & MAS1_VALID)
58 printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
59 tlbsel, i, tlbe->mas1, tlbe->mas2,
60 tlbe->mas3, tlbe->mas7);
61 }
62 }
63} 240}
64 241
65static inline unsigned int tlb0_get_next_victim( 242static inline unsigned int tlb0_get_next_victim(
@@ -67,16 +244,17 @@ static inline unsigned int tlb0_get_next_victim(
67{ 244{
68 unsigned int victim; 245 unsigned int victim;
69 246
70 victim = vcpu_e500->guest_tlb_nv[0]++; 247 victim = vcpu_e500->gtlb_nv[0]++;
71 if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM)) 248 if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
72 vcpu_e500->guest_tlb_nv[0] = 0; 249 vcpu_e500->gtlb_nv[0] = 0;
73 250
74 return victim; 251 return victim;
75} 252}
76 253
77static inline unsigned int tlb1_max_shadow_size(void) 254static inline unsigned int tlb1_max_shadow_size(void)
78{ 255{
79 return tlb1_entry_num - tlbcam_index; 256 /* reserve one entry for magic page */
257 return tlb1_entry_num - tlbcam_index - 1;
80} 258}
81 259
82static inline int tlbe_is_writable(struct tlbe *tlbe) 260static inline int tlbe_is_writable(struct tlbe *tlbe)
@@ -112,72 +290,149 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
112/* 290/*
113 * writing shadow tlb entry to host TLB 291 * writing shadow tlb entry to host TLB
114 */ 292 */
115static inline void __write_host_tlbe(struct tlbe *stlbe) 293static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
116{ 294{
295 unsigned long flags;
296
297 local_irq_save(flags);
298 mtspr(SPRN_MAS0, mas0);
117 mtspr(SPRN_MAS1, stlbe->mas1); 299 mtspr(SPRN_MAS1, stlbe->mas1);
118 mtspr(SPRN_MAS2, stlbe->mas2); 300 mtspr(SPRN_MAS2, stlbe->mas2);
119 mtspr(SPRN_MAS3, stlbe->mas3); 301 mtspr(SPRN_MAS3, stlbe->mas3);
120 mtspr(SPRN_MAS7, stlbe->mas7); 302 mtspr(SPRN_MAS7, stlbe->mas7);
121 __asm__ __volatile__ ("tlbwe\n" : : ); 303 asm volatile("isync; tlbwe" : : : "memory");
304 local_irq_restore(flags);
122} 305}
123 306
124static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 307static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
125 int tlbsel, int esel) 308 int tlbsel, int esel, struct tlbe *stlbe)
126{ 309{
127 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
128
129 local_irq_disable();
130 if (tlbsel == 0) { 310 if (tlbsel == 0) {
131 __write_host_tlbe(stlbe); 311 __write_host_tlbe(stlbe,
312 MAS0_TLBSEL(0) |
313 MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
132 } else { 314 } else {
133 unsigned register mas0; 315 __write_host_tlbe(stlbe,
134 316 MAS0_TLBSEL(1) |
135 mas0 = mfspr(SPRN_MAS0); 317 MAS0_ESEL(to_htlb1_esel(esel)));
136
137 mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
138 __write_host_tlbe(stlbe);
139
140 mtspr(SPRN_MAS0, mas0);
141 } 318 }
142 local_irq_enable(); 319 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
320 stlbe->mas3, stlbe->mas7);
321}
322
323void kvmppc_map_magic(struct kvm_vcpu *vcpu)
324{
325 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
326 struct tlbe magic;
327 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
328 unsigned int stid;
329 pfn_t pfn;
330
331 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
332 get_page(pfn_to_page(pfn));
333
334 preempt_disable();
335 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
336
337 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
338 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
339 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
340 magic.mas3 = (pfn << PAGE_SHIFT) |
341 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
342 magic.mas7 = pfn >> (32 - PAGE_SHIFT);
343
344 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
345 preempt_enable();
143} 346}
144 347
145void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) 348void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
146{ 349{
147 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 350 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
148 int i; 351
149 unsigned register mas0; 352 /* Shadow PID may be expired on local core */
150 353 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
151 /* Load all valid TLB1 entries to reduce guest tlb miss fault */
152 local_irq_disable();
153 mas0 = mfspr(SPRN_MAS0);
154 for (i = 0; i < tlb1_max_shadow_size(); i++) {
155 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
156
157 if (get_tlb_v(stlbe)) {
158 mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
159 | MAS0_ESEL(to_htlb1_esel(i)));
160 __write_host_tlbe(stlbe);
161 }
162 }
163 mtspr(SPRN_MAS0, mas0);
164 local_irq_enable();
165} 354}
166 355
167void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) 356void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
168{ 357{
169 _tlbil_all(); 358}
359
360static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
361 int tlbsel, int esel)
362{
363 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
364 struct vcpu_id_table *idt = vcpu_e500->idt;
365 unsigned int pr, tid, ts, pid;
366 u32 val, eaddr;
367 unsigned long flags;
368
369 ts = get_tlb_ts(gtlbe);
370 tid = get_tlb_tid(gtlbe);
371
372 preempt_disable();
373
374 /* One guest ID may be mapped to two shadow IDs */
375 for (pr = 0; pr < 2; pr++) {
376 /*
377 * The shadow PID can have a valid mapping on at most one
378 * host CPU. In the common case, it will be valid on this
379 * CPU, in which case (for TLB0) we do a local invalidation
380 * of the specific address.
381 *
382 * If the shadow PID is not valid on the current host CPU, or
383 * if we're invalidating a TLB1 entry, we invalidate the
384 * entire shadow PID.
385 */
386 if (tlbsel == 1 ||
387 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
388 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
389 continue;
390 }
391
392 /*
393 * The guest is invalidating a TLB0 entry which is in a PID
394 * that has a valid shadow mapping on this host CPU. We
395 * search host TLB0 to invalidate it's shadow TLB entry,
396 * similar to __tlbil_va except that we need to look in AS1.
397 */
398 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
399 eaddr = get_tlb_eaddr(gtlbe);
400
401 local_irq_save(flags);
402
403 mtspr(SPRN_MAS6, val);
404 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
405 val = mfspr(SPRN_MAS1);
406 if (val & MAS1_VALID) {
407 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
408 asm volatile("tlbwe");
409 }
410
411 local_irq_restore(flags);
412 }
413
414 preempt_enable();
170} 415}
171 416
172/* Search the guest TLB for a matching entry. */ 417/* Search the guest TLB for a matching entry. */
173static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, 418static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
174 gva_t eaddr, int tlbsel, unsigned int pid, int as) 419 gva_t eaddr, int tlbsel, unsigned int pid, int as)
175{ 420{
421 int size = vcpu_e500->gtlb_size[tlbsel];
422 int set_base;
176 int i; 423 int i;
177 424
178 /* XXX Replace loop with fancy data structures. */ 425 if (tlbsel == 0) {
179 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) { 426 int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
180 struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i]; 427 set_base = (eaddr >> PAGE_SHIFT) & mask;
428 set_base *= KVM_E500_TLB0_WAY_NUM;
429 size = KVM_E500_TLB0_WAY_NUM;
430 } else {
431 set_base = 0;
432 }
433
434 for (i = 0; i < size; i++) {
435 struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
181 unsigned int tid; 436 unsigned int tid;
182 437
183 if (eaddr < get_tlb_eaddr(tlbe)) 438 if (eaddr < get_tlb_eaddr(tlbe))
@@ -196,66 +451,32 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
196 if (get_tlb_ts(tlbe) != as && as != -1) 451 if (get_tlb_ts(tlbe) != as && as != -1)
197 continue; 452 continue;
198 453
199 return i; 454 return set_base + i;
200 } 455 }
201 456
202 return -1; 457 return -1;
203} 458}
204 459
205static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500, 460static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
206 int tlbsel, int esel) 461 struct tlbe *gtlbe,
207{ 462 pfn_t pfn)
208 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
209 struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
210
211 if (page) {
212 vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
213
214 if (get_tlb_v(stlbe)) {
215 if (tlbe_is_writable(stlbe))
216 kvm_release_page_dirty(page);
217 else
218 kvm_release_page_clean(page);
219 }
220 }
221}
222
223static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
224 int tlbsel, int esel)
225{ 463{
226 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; 464 priv->pfn = pfn;
465 priv->flags = E500_TLB_VALID;
227 466
228 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); 467 if (tlbe_is_writable(gtlbe))
229 stlbe->mas1 = 0; 468 priv->flags |= E500_TLB_DIRTY;
230 trace_kvm_stlb_inval(index_of(tlbsel, esel));
231} 469}
232 470
233static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, 471static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
234 gva_t eaddr, gva_t eend, u32 tid)
235{ 472{
236 unsigned int pid = tid & 0xff; 473 if (priv->flags & E500_TLB_VALID) {
237 unsigned int i; 474 if (priv->flags & E500_TLB_DIRTY)
238 475 kvm_release_pfn_dirty(priv->pfn);
239 /* XXX Replace loop with fancy data structures. */ 476 else
240 for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) { 477 kvm_release_pfn_clean(priv->pfn);
241 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
242 unsigned int tid;
243
244 if (!get_tlb_v(stlbe))
245 continue;
246
247 if (eend < get_tlb_eaddr(stlbe))
248 continue;
249 478
250 if (eaddr > get_tlb_end(stlbe)) 479 priv->flags = 0;
251 continue;
252
253 tid = get_tlb_tid(stlbe);
254 if (tid && (tid != pid))
255 continue;
256
257 kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
258 write_host_tlbe(vcpu_e500, 1, i);
259 } 480 }
260} 481}
261 482
@@ -273,7 +494,7 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
273 tsized = (vcpu_e500->mas4 >> 7) & 0x1f; 494 tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
274 495
275 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 496 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
276 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); 497 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
277 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) 498 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
278 | MAS1_TID(vcpu_e500->pid[pidsel]) 499 | MAS1_TID(vcpu_e500->pid[pidsel])
279 | MAS1_TSIZE(tsized); 500 | MAS1_TSIZE(tsized);
@@ -286,56 +507,154 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
286 vcpu_e500->mas7 = 0; 507 vcpu_e500->mas7 = 0;
287} 508}
288 509
289static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 510static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
290 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel) 511 struct tlbe *gtlbe, int tsize,
512 struct tlbe_priv *priv,
513 u64 gvaddr, struct tlbe *stlbe)
291{ 514{
292 struct page *new_page; 515 pfn_t pfn = priv->pfn;
293 struct tlbe *stlbe; 516 unsigned int stid;
294 hpa_t hpaddr;
295
296 stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
297
298 /* Get reference to new page. */
299 new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
300 if (is_error_page(new_page)) {
301 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n",
302 (long)gfn);
303 kvm_release_page_clean(new_page);
304 return;
305 }
306 hpaddr = page_to_phys(new_page);
307
308 /* Drop reference to old page. */
309 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
310 517
311 vcpu_e500->shadow_pages[tlbsel][esel] = new_page; 518 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
519 get_tlb_tid(gtlbe),
520 get_cur_pr(&vcpu_e500->vcpu), 0);
312 521
313 /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */ 522 /* Force TS=1 IPROT=0 for all guest mappings. */
314 stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K) 523 stlbe->mas1 = MAS1_TSIZE(tsize)
315 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; 524 | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
316 stlbe->mas2 = (gvaddr & MAS2_EPN) 525 stlbe->mas2 = (gvaddr & MAS2_EPN)
317 | e500_shadow_mas2_attrib(gtlbe->mas2, 526 | e500_shadow_mas2_attrib(gtlbe->mas2,
318 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 527 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
319 stlbe->mas3 = (hpaddr & MAS3_RPN) 528 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
320 | e500_shadow_mas3_attrib(gtlbe->mas3, 529 | e500_shadow_mas3_attrib(gtlbe->mas3,
321 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 530 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
322 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; 531 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
532}
323 533
324 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, 534
325 stlbe->mas3, stlbe->mas7); 535static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
536 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
537 struct tlbe *stlbe)
538{
539 struct kvm_memory_slot *slot;
540 unsigned long pfn, hva;
541 int pfnmap = 0;
542 int tsize = BOOK3E_PAGESZ_4K;
543 struct tlbe_priv *priv;
544
545 /*
546 * Translate guest physical to true physical, acquiring
547 * a page reference if it is normal, non-reserved memory.
548 *
549 * gfn_to_memslot() must succeed because otherwise we wouldn't
550 * have gotten this far. Eventually we should just pass the slot
551 * pointer through from the first lookup.
552 */
553 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
554 hva = gfn_to_hva_memslot(slot, gfn);
555
556 if (tlbsel == 1) {
557 struct vm_area_struct *vma;
558 down_read(&current->mm->mmap_sem);
559
560 vma = find_vma(current->mm, hva);
561 if (vma && hva >= vma->vm_start &&
562 (vma->vm_flags & VM_PFNMAP)) {
563 /*
564 * This VMA is a physically contiguous region (e.g.
565 * /dev/mem) that bypasses normal Linux page
566 * management. Find the overlap between the
567 * vma and the memslot.
568 */
569
570 unsigned long start, end;
571 unsigned long slot_start, slot_end;
572
573 pfnmap = 1;
574
575 start = vma->vm_pgoff;
576 end = start +
577 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
578
579 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
580
581 slot_start = pfn - (gfn - slot->base_gfn);
582 slot_end = slot_start + slot->npages;
583
584 if (start < slot_start)
585 start = slot_start;
586 if (end > slot_end)
587 end = slot_end;
588
589 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
590 MAS1_TSIZE_SHIFT;
591
592 /*
593 * e500 doesn't implement the lowest tsize bit,
594 * or 1K pages.
595 */
596 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
597
598 /*
599 * Now find the largest tsize (up to what the guest
600 * requested) that will cover gfn, stay within the
601 * range, and for which gfn and pfn are mutually
602 * aligned.
603 */
604
605 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
606 unsigned long gfn_start, gfn_end, tsize_pages;
607 tsize_pages = 1 << (tsize - 2);
608
609 gfn_start = gfn & ~(tsize_pages - 1);
610 gfn_end = gfn_start + tsize_pages;
611
612 if (gfn_start + pfn - gfn < start)
613 continue;
614 if (gfn_end + pfn - gfn > end)
615 continue;
616 if ((gfn & (tsize_pages - 1)) !=
617 (pfn & (tsize_pages - 1)))
618 continue;
619
620 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
621 pfn &= ~(tsize_pages - 1);
622 break;
623 }
624 }
625
626 up_read(&current->mm->mmap_sem);
627 }
628
629 if (likely(!pfnmap)) {
630 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
631 if (is_error_pfn(pfn)) {
632 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
633 (long)gfn);
634 kvm_release_pfn_clean(pfn);
635 return;
636 }
637 }
638
639 /* Drop old priv and setup new one. */
640 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
641 kvmppc_e500_priv_release(priv);
642 kvmppc_e500_priv_setup(priv, gtlbe, pfn);
643
644 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
326} 645}
327 646
328/* XXX only map the one-one case, for now use TLB0 */ 647/* XXX only map the one-one case, for now use TLB0 */
329static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500, 648static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
330 int tlbsel, int esel) 649 int esel, struct tlbe *stlbe)
331{ 650{
332 struct tlbe *gtlbe; 651 struct tlbe *gtlbe;
333 652
334 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; 653 gtlbe = &vcpu_e500->gtlb_arch[0][esel];
335 654
336 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), 655 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
337 get_tlb_raddr(gtlbe) >> PAGE_SHIFT, 656 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
338 gtlbe, tlbsel, esel); 657 gtlbe, 0, esel, stlbe);
339 658
340 return esel; 659 return esel;
341} 660}
@@ -344,53 +663,37 @@ static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
344 * the shadow TLB. */ 663 * the shadow TLB. */
345/* XXX for both one-one and one-to-many , for now use TLB1 */ 664/* XXX for both one-one and one-to-many , for now use TLB1 */
346static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, 665static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
347 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe) 666 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
348{ 667{
349 unsigned int victim; 668 unsigned int victim;
350 669
351 victim = vcpu_e500->guest_tlb_nv[1]++; 670 victim = vcpu_e500->gtlb_nv[1]++;
352 671
353 if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size())) 672 if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
354 vcpu_e500->guest_tlb_nv[1] = 0; 673 vcpu_e500->gtlb_nv[1] = 0;
355 674
356 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim); 675 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
357 676
358 return victim; 677 return victim;
359} 678}
360 679
361/* Invalidate all guest kernel mappings when enter usermode, 680void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
362 * so that when they fault back in they will get the
363 * proper permission bits. */
364void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
365{ 681{
366 if (usermode) { 682 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
367 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
368 int i;
369
370 /* XXX Replace loop with fancy data structures. */
371 for (i = 0; i < tlb1_max_shadow_size(); i++)
372 kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
373 683
374 _tlbil_all(); 684 /* Recalc shadow pid since MSR changes */
375 } 685 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
376} 686}
377 687
378static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, 688static inline int kvmppc_e500_gtlbe_invalidate(
379 int tlbsel, int esel) 689 struct kvmppc_vcpu_e500 *vcpu_e500,
690 int tlbsel, int esel)
380{ 691{
381 struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; 692 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
382 693
383 if (unlikely(get_tlb_iprot(gtlbe))) 694 if (unlikely(get_tlb_iprot(gtlbe)))
384 return -1; 695 return -1;
385 696
386 if (tlbsel == 1) {
387 kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
388 get_tlb_end(gtlbe),
389 get_tlb_tid(gtlbe));
390 } else {
391 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
392 }
393
394 gtlbe->mas1 = 0; 697 gtlbe->mas1 = 0;
395 698
396 return 0; 699 return 0;
@@ -401,13 +704,14 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
401 int esel; 704 int esel;
402 705
403 if (value & MMUCSR0_TLB0FI) 706 if (value & MMUCSR0_TLB0FI)
404 for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++) 707 for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
405 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); 708 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
406 if (value & MMUCSR0_TLB1FI) 709 if (value & MMUCSR0_TLB1FI)
407 for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++) 710 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
408 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); 711 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
409 712
410 _tlbil_all(); 713 /* Invalidate all vcpu id mappings */
714 kvmppc_e500_id_table_reset_all(vcpu_e500);
411 715
412 return EMULATE_DONE; 716 return EMULATE_DONE;
413} 717}
@@ -428,7 +732,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
428 732
429 if (ia) { 733 if (ia) {
430 /* invalidate all entries */ 734 /* invalidate all entries */
431 for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++) 735 for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
432 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 736 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
433 } else { 737 } else {
434 ea &= 0xfffff000; 738 ea &= 0xfffff000;
@@ -438,7 +742,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
438 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 742 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
439 } 743 }
440 744
441 _tlbil_all(); 745 /* Invalidate all vcpu id mappings */
746 kvmppc_e500_id_table_reset_all(vcpu_e500);
442 747
443 return EMULATE_DONE; 748 return EMULATE_DONE;
444} 749}
@@ -452,9 +757,9 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
452 tlbsel = get_tlb_tlbsel(vcpu_e500); 757 tlbsel = get_tlb_tlbsel(vcpu_e500);
453 esel = get_tlb_esel(vcpu_e500, tlbsel); 758 esel = get_tlb_esel(vcpu_e500, tlbsel);
454 759
455 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; 760 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
456 vcpu_e500->mas0 &= ~MAS0_NV(~0); 761 vcpu_e500->mas0 &= ~MAS0_NV(~0);
457 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); 762 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
458 vcpu_e500->mas1 = gtlbe->mas1; 763 vcpu_e500->mas1 = gtlbe->mas1;
459 vcpu_e500->mas2 = gtlbe->mas2; 764 vcpu_e500->mas2 = gtlbe->mas2;
460 vcpu_e500->mas3 = gtlbe->mas3; 765 vcpu_e500->mas3 = gtlbe->mas3;
@@ -477,14 +782,14 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
477 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 782 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
478 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); 783 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
479 if (esel >= 0) { 784 if (esel >= 0) {
480 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; 785 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
481 break; 786 break;
482 } 787 }
483 } 788 }
484 789
485 if (gtlbe) { 790 if (gtlbe) {
486 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) 791 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
487 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); 792 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
488 vcpu_e500->mas1 = gtlbe->mas1; 793 vcpu_e500->mas1 = gtlbe->mas1;
489 vcpu_e500->mas2 = gtlbe->mas2; 794 vcpu_e500->mas2 = gtlbe->mas2;
490 vcpu_e500->mas3 = gtlbe->mas3; 795 vcpu_e500->mas3 = gtlbe->mas3;
@@ -497,7 +802,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
497 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; 802 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
498 803
499 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 804 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
500 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); 805 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
501 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0) 806 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
502 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0)) 807 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
503 | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); 808 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
@@ -514,23 +819,16 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
514int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) 819int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
515{ 820{
516 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 821 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
517 u64 eaddr;
518 u64 raddr;
519 u32 tid;
520 struct tlbe *gtlbe; 822 struct tlbe *gtlbe;
521 int tlbsel, esel, stlbsel, sesel; 823 int tlbsel, esel;
522 824
523 tlbsel = get_tlb_tlbsel(vcpu_e500); 825 tlbsel = get_tlb_tlbsel(vcpu_e500);
524 esel = get_tlb_esel(vcpu_e500, tlbsel); 826 esel = get_tlb_esel(vcpu_e500, tlbsel);
525 827
526 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; 828 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
527 829
528 if (get_tlb_v(gtlbe) && tlbsel == 1) { 830 if (get_tlb_v(gtlbe))
529 eaddr = get_tlb_eaddr(gtlbe); 831 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
530 tid = get_tlb_tid(gtlbe);
531 kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
532 get_tlb_end(gtlbe), tid);
533 }
534 832
535 gtlbe->mas1 = vcpu_e500->mas1; 833 gtlbe->mas1 = vcpu_e500->mas1;
536 gtlbe->mas2 = vcpu_e500->mas2; 834 gtlbe->mas2 = vcpu_e500->mas2;
@@ -542,6 +840,12 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
542 840
543 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 841 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
544 if (tlbe_is_host_safe(vcpu, gtlbe)) { 842 if (tlbe_is_host_safe(vcpu, gtlbe)) {
843 struct tlbe stlbe;
844 int stlbsel, sesel;
845 u64 eaddr;
846 u64 raddr;
847
848 preempt_disable();
545 switch (tlbsel) { 849 switch (tlbsel) {
546 case 0: 850 case 0:
547 /* TLB0 */ 851 /* TLB0 */
@@ -549,7 +853,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
549 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); 853 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
550 854
551 stlbsel = 0; 855 stlbsel = 0;
552 sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel); 856 sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
553 857
554 break; 858 break;
555 859
@@ -564,13 +868,14 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
564 * are mapped on the fly. */ 868 * are mapped on the fly. */
565 stlbsel = 1; 869 stlbsel = 1;
566 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, 870 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
567 raddr >> PAGE_SHIFT, gtlbe); 871 raddr >> PAGE_SHIFT, gtlbe, &stlbe);
568 break; 872 break;
569 873
570 default: 874 default:
571 BUG(); 875 BUG();
572 } 876 }
573 write_host_tlbe(vcpu_e500, stlbsel, sesel); 877 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
878 preempt_enable();
574 } 879 }
575 880
576 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 881 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -610,7 +915,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
610{ 915{
611 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 916 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
612 struct tlbe *gtlbe = 917 struct tlbe *gtlbe =
613 &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)]; 918 &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
614 u64 pgmask = get_tlb_bytes(gtlbe) - 1; 919 u64 pgmask = get_tlb_bytes(gtlbe) - 1;
615 920
616 return get_tlb_raddr(gtlbe) | (eaddr & pgmask); 921 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
@@ -618,38 +923,37 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
618 923
619void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 924void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
620{ 925{
621 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
622 int tlbsel, i;
623
624 for (tlbsel = 0; tlbsel < 2; tlbsel++)
625 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
626 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
627
628 /* discard all guest mapping */
629 _tlbil_all();
630} 926}
631 927
632void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, 928void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
633 unsigned int index) 929 unsigned int index)
634{ 930{
635 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 931 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
932 struct tlbe_priv *priv;
933 struct tlbe *gtlbe, stlbe;
636 int tlbsel = tlbsel_of(index); 934 int tlbsel = tlbsel_of(index);
637 int esel = esel_of(index); 935 int esel = esel_of(index);
638 int stlbsel, sesel; 936 int stlbsel, sesel;
639 937
938 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
939
940 preempt_disable();
640 switch (tlbsel) { 941 switch (tlbsel) {
641 case 0: 942 case 0:
642 stlbsel = 0; 943 stlbsel = 0;
643 sesel = esel; 944 sesel = esel;
945 priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
946
947 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
948 priv, eaddr, &stlbe);
644 break; 949 break;
645 950
646 case 1: { 951 case 1: {
647 gfn_t gfn = gpaddr >> PAGE_SHIFT; 952 gfn_t gfn = gpaddr >> PAGE_SHIFT;
648 struct tlbe *gtlbe
649 = &vcpu_e500->guest_tlb[tlbsel][esel];
650 953
651 stlbsel = 1; 954 stlbsel = 1;
652 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe); 955 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
956 gtlbe, &stlbe);
653 break; 957 break;
654 } 958 }
655 959
@@ -657,7 +961,9 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
657 BUG(); 961 BUG();
658 break; 962 break;
659 } 963 }
660 write_host_tlbe(vcpu_e500, stlbsel, sesel); 964
965 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
966 preempt_enable();
661} 967}
662 968
663int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, 969int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
@@ -679,8 +985,10 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
679{ 985{
680 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 986 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
681 987
682 vcpu_e500->pid[0] = vcpu->arch.shadow_pid = 988 if (vcpu->arch.pid != pid) {
683 vcpu->arch.pid = pid; 989 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
990 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
991 }
684} 992}
685 993
686void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) 994void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -688,14 +996,14 @@ void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
688 struct tlbe *tlbe; 996 struct tlbe *tlbe;
689 997
690 /* Insert large initial mapping for guest. */ 998 /* Insert large initial mapping for guest. */
691 tlbe = &vcpu_e500->guest_tlb[1][0]; 999 tlbe = &vcpu_e500->gtlb_arch[1][0];
692 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); 1000 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
693 tlbe->mas2 = 0; 1001 tlbe->mas2 = 0;
694 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; 1002 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
695 tlbe->mas7 = 0; 1003 tlbe->mas7 = 0;
696 1004
697 /* 4K map for serial output. Used by kernel wrapper. */ 1005 /* 4K map for serial output. Used by kernel wrapper. */
698 tlbe = &vcpu_e500->guest_tlb[1][1]; 1006 tlbe = &vcpu_e500->gtlb_arch[1][1];
699 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); 1007 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
700 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; 1008 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
701 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; 1009 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
@@ -706,68 +1014,64 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
706{ 1014{
707 tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF; 1015 tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
708 1016
709 vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE; 1017 vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
710 vcpu_e500->guest_tlb[0] = 1018 vcpu_e500->gtlb_arch[0] =
711 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); 1019 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
712 if (vcpu_e500->guest_tlb[0] == NULL) 1020 if (vcpu_e500->gtlb_arch[0] == NULL)
713 goto err_out; 1021 goto err_out;
714 1022
715 vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE; 1023 vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
716 vcpu_e500->shadow_tlb[0] = 1024 vcpu_e500->gtlb_arch[1] =
717 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
718 if (vcpu_e500->shadow_tlb[0] == NULL)
719 goto err_out_guest0;
720
721 vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
722 vcpu_e500->guest_tlb[1] =
723 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL); 1025 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
724 if (vcpu_e500->guest_tlb[1] == NULL) 1026 if (vcpu_e500->gtlb_arch[1] == NULL)
725 goto err_out_shadow0; 1027 goto err_out_guest0;
726 1028
727 vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num; 1029 vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
728 vcpu_e500->shadow_tlb[1] = 1030 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
729 kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL); 1031 if (vcpu_e500->gtlb_priv[0] == NULL)
730 if (vcpu_e500->shadow_tlb[1] == NULL)
731 goto err_out_guest1; 1032 goto err_out_guest1;
1033 vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
1034 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
732 1035
733 vcpu_e500->shadow_pages[0] = (struct page **) 1036 if (vcpu_e500->gtlb_priv[1] == NULL)
734 kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL); 1037 goto err_out_priv0;
735 if (vcpu_e500->shadow_pages[0] == NULL)
736 goto err_out_shadow1;
737 1038
738 vcpu_e500->shadow_pages[1] = (struct page **) 1039 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
739 kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL); 1040 goto err_out_priv1;
740 if (vcpu_e500->shadow_pages[1] == NULL)
741 goto err_out_page0;
742 1041
743 /* Init TLB configuration register */ 1042 /* Init TLB configuration register */
744 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; 1043 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
745 vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0]; 1044 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
746 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; 1045 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
747 vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1]; 1046 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
748 1047
749 return 0; 1048 return 0;
750 1049
751err_out_page0: 1050err_out_priv1:
752 kfree(vcpu_e500->shadow_pages[0]); 1051 kfree(vcpu_e500->gtlb_priv[1]);
753err_out_shadow1: 1052err_out_priv0:
754 kfree(vcpu_e500->shadow_tlb[1]); 1053 kfree(vcpu_e500->gtlb_priv[0]);
755err_out_guest1: 1054err_out_guest1:
756 kfree(vcpu_e500->guest_tlb[1]); 1055 kfree(vcpu_e500->gtlb_arch[1]);
757err_out_shadow0:
758 kfree(vcpu_e500->shadow_tlb[0]);
759err_out_guest0: 1056err_out_guest0:
760 kfree(vcpu_e500->guest_tlb[0]); 1057 kfree(vcpu_e500->gtlb_arch[0]);
761err_out: 1058err_out:
762 return -1; 1059 return -1;
763} 1060}
764 1061
765void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 1062void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
766{ 1063{
767 kfree(vcpu_e500->shadow_pages[1]); 1064 int stlbsel, i;
768 kfree(vcpu_e500->shadow_pages[0]); 1065
769 kfree(vcpu_e500->shadow_tlb[1]); 1066 /* release all privs */
770 kfree(vcpu_e500->guest_tlb[1]); 1067 for (stlbsel = 0; stlbsel < 2; stlbsel++)
771 kfree(vcpu_e500->shadow_tlb[0]); 1068 for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
772 kfree(vcpu_e500->guest_tlb[0]); 1069 struct tlbe_priv *priv =
1070 &vcpu_e500->gtlb_priv[stlbsel][i];
1071 kvmppc_e500_priv_release(priv);
1072 }
1073
1074 kvmppc_e500_id_table_free(vcpu_e500);
1075 kfree(vcpu_e500->gtlb_arch[1]);
1076 kfree(vcpu_e500->gtlb_arch[0]);
773} 1077}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 458946b4775d..59b88e99a235 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, yu.liu@freescale.com 4 * Author: Yu Liu, yu.liu@freescale.com
5 * 5 *
@@ -55,6 +55,7 @@ extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
55extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *); 55extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
56extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *); 56extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
57extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); 57extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
58extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
58 59
59/* TLB helper functions */ 60/* TLB helper functions */
60static inline unsigned int get_tlb_size(const struct tlbe *tlbe) 61static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
@@ -110,6 +111,16 @@ static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
110 return vcpu->arch.pid & 0xff; 111 return vcpu->arch.pid & 0xff;
111} 112}
112 113
114static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
115{
116 return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
117}
118
119static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
120{
121 return !!(vcpu->arch.shared->msr & MSR_PR);
122}
123
113static inline unsigned int get_cur_spid( 124static inline unsigned int get_cur_spid(
114 const struct kvmppc_vcpu_e500 *vcpu_e500) 125 const struct kvmppc_vcpu_e500 *vcpu_e500)
115{ 126{
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 616dd516ca1f..a107c9be0fb1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -30,6 +30,7 @@
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/kvm_ppc.h> 31#include <asm/kvm_ppc.h>
32#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
33#include <asm/cputhreads.h>
33#include "timing.h" 34#include "timing.h"
34#include "../mm/mmu_decl.h" 35#include "../mm/mmu_decl.h"
35 36
@@ -38,8 +39,12 @@
38 39
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 40int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{ 41{
42#ifndef CONFIG_KVM_BOOK3S_64_HV
41 return !(v->arch.shared->msr & MSR_WE) || 43 return !(v->arch.shared->msr & MSR_WE) ||
42 !!(v->arch.pending_exceptions); 44 !!(v->arch.pending_exceptions);
45#else
46 return !(v->arch.ceded) || !!(v->arch.pending_exceptions);
47#endif
43} 48}
44 49
45int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 50int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
@@ -73,7 +78,8 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
73 } 78 }
74 case HC_VENDOR_KVM | KVM_HC_FEATURES: 79 case HC_VENDOR_KVM | KVM_HC_FEATURES:
75 r = HC_EV_SUCCESS; 80 r = HC_EV_SUCCESS;
76#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */ 81#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
82 /* XXX Missing magic page on 44x */
77 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 83 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
78#endif 84#endif
79 85
@@ -147,7 +153,7 @@ void kvm_arch_check_processor_compat(void *rtn)
147 153
148int kvm_arch_init_vm(struct kvm *kvm) 154int kvm_arch_init_vm(struct kvm *kvm)
149{ 155{
150 return 0; 156 return kvmppc_core_init_vm(kvm);
151} 157}
152 158
153void kvm_arch_destroy_vm(struct kvm *kvm) 159void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -163,6 +169,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
163 kvm->vcpus[i] = NULL; 169 kvm->vcpus[i] = NULL;
164 170
165 atomic_set(&kvm->online_vcpus, 0); 171 atomic_set(&kvm->online_vcpus, 0);
172
173 kvmppc_core_destroy_vm(kvm);
174
166 mutex_unlock(&kvm->lock); 175 mutex_unlock(&kvm->lock);
167} 176}
168 177
@@ -180,10 +189,13 @@ int kvm_dev_ioctl_check_extension(long ext)
180#else 189#else
181 case KVM_CAP_PPC_SEGSTATE: 190 case KVM_CAP_PPC_SEGSTATE:
182#endif 191#endif
183 case KVM_CAP_PPC_PAIRED_SINGLES:
184 case KVM_CAP_PPC_UNSET_IRQ: 192 case KVM_CAP_PPC_UNSET_IRQ:
185 case KVM_CAP_PPC_IRQ_LEVEL: 193 case KVM_CAP_PPC_IRQ_LEVEL:
186 case KVM_CAP_ENABLE_CAP: 194 case KVM_CAP_ENABLE_CAP:
195 r = 1;
196 break;
197#ifndef CONFIG_KVM_BOOK3S_64_HV
198 case KVM_CAP_PPC_PAIRED_SINGLES:
187 case KVM_CAP_PPC_OSI: 199 case KVM_CAP_PPC_OSI:
188 case KVM_CAP_PPC_GET_PVINFO: 200 case KVM_CAP_PPC_GET_PVINFO:
189 r = 1; 201 r = 1;
@@ -191,6 +203,21 @@ int kvm_dev_ioctl_check_extension(long ext)
191 case KVM_CAP_COALESCED_MMIO: 203 case KVM_CAP_COALESCED_MMIO:
192 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 204 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
193 break; 205 break;
206#endif
207#ifdef CONFIG_KVM_BOOK3S_64_HV
208 case KVM_CAP_SPAPR_TCE:
209 r = 1;
210 break;
211 case KVM_CAP_PPC_SMT:
212 r = threads_per_core;
213 break;
214 case KVM_CAP_PPC_RMA:
215 r = 1;
216 /* PPC970 requires an RMA */
217 if (cpu_has_feature(CPU_FTR_ARCH_201))
218 r = 2;
219 break;
220#endif
194 default: 221 default:
195 r = 0; 222 r = 0;
196 break; 223 break;
@@ -211,7 +238,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
211 struct kvm_userspace_memory_region *mem, 238 struct kvm_userspace_memory_region *mem,
212 int user_alloc) 239 int user_alloc)
213{ 240{
214 return 0; 241 return kvmppc_core_prepare_memory_region(kvm, mem);
215} 242}
216 243
217void kvm_arch_commit_memory_region(struct kvm *kvm, 244void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -219,7 +246,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
219 struct kvm_memory_slot old, 246 struct kvm_memory_slot old,
220 int user_alloc) 247 int user_alloc)
221{ 248{
222 return; 249 kvmppc_core_commit_memory_region(kvm, mem);
223} 250}
224 251
225 252
@@ -287,6 +314,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
287 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 314 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
288 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 315 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
289 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 316 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
317 vcpu->arch.dec_expires = ~(u64)0;
290 318
291#ifdef CONFIG_KVM_EXIT_TIMING 319#ifdef CONFIG_KVM_EXIT_TIMING
292 mutex_init(&vcpu->arch.exit_timing_lock); 320 mutex_init(&vcpu->arch.exit_timing_lock);
@@ -313,6 +341,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
313 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 341 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
314#endif 342#endif
315 kvmppc_core_vcpu_load(vcpu, cpu); 343 kvmppc_core_vcpu_load(vcpu, cpu);
344 vcpu->cpu = smp_processor_id();
316} 345}
317 346
318void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 347void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -321,6 +350,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
321#ifdef CONFIG_BOOKE 350#ifdef CONFIG_BOOKE
322 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 351 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
323#endif 352#endif
353 vcpu->cpu = -1;
324} 354}
325 355
326int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 356int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -492,15 +522,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
492 for (i = 0; i < 32; i++) 522 for (i = 0; i < 32; i++)
493 kvmppc_set_gpr(vcpu, i, gprs[i]); 523 kvmppc_set_gpr(vcpu, i, gprs[i]);
494 vcpu->arch.osi_needed = 0; 524 vcpu->arch.osi_needed = 0;
525 } else if (vcpu->arch.hcall_needed) {
526 int i;
527
528 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
529 for (i = 0; i < 9; ++i)
530 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
531 vcpu->arch.hcall_needed = 0;
495 } 532 }
496 533
497 kvmppc_core_deliver_interrupts(vcpu); 534 kvmppc_core_deliver_interrupts(vcpu);
498 535
499 local_irq_disable(); 536 r = kvmppc_vcpu_run(run, vcpu);
500 kvm_guest_enter();
501 r = __kvmppc_vcpu_run(run, vcpu);
502 kvm_guest_exit();
503 local_irq_enable();
504 537
505 if (vcpu->sigset_active) 538 if (vcpu->sigset_active)
506 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 539 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -518,6 +551,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
518 if (waitqueue_active(&vcpu->wq)) { 551 if (waitqueue_active(&vcpu->wq)) {
519 wake_up_interruptible(&vcpu->wq); 552 wake_up_interruptible(&vcpu->wq);
520 vcpu->stat.halt_wakeup++; 553 vcpu->stat.halt_wakeup++;
554 } else if (vcpu->cpu != -1) {
555 smp_send_reschedule(vcpu->cpu);
521 } 556 }
522 557
523 return 0; 558 return 0;
@@ -633,6 +668,29 @@ long kvm_arch_vm_ioctl(struct file *filp,
633 668
634 break; 669 break;
635 } 670 }
671#ifdef CONFIG_KVM_BOOK3S_64_HV
672 case KVM_CREATE_SPAPR_TCE: {
673 struct kvm_create_spapr_tce create_tce;
674 struct kvm *kvm = filp->private_data;
675
676 r = -EFAULT;
677 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
678 goto out;
679 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
680 goto out;
681 }
682
683 case KVM_ALLOCATE_RMA: {
684 struct kvm *kvm = filp->private_data;
685 struct kvm_allocate_rma rma;
686
687 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
688 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
689 r = -EFAULT;
690 break;
691 }
692#endif /* CONFIG_KVM_BOOK3S_64_HV */
693
636 default: 694 default:
637 r = -ENOTTY; 695 r = -ENOTTY;
638 } 696 }
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 319177df9587..07b6110a4bb7 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -56,15 +56,6 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
56{ 56{
57 u64 old; 57 u64 old;
58 58
59 do_div(duration, tb_ticks_per_usec);
60 if (unlikely(duration > 0xFFFFFFFF)) {
61 printk(KERN_ERR"%s - duration too big -> overflow"
62 " duration %lld type %d exit #%d\n",
63 __func__, duration, type,
64 vcpu->arch.timing_count_type[type]);
65 return;
66 }
67
68 mutex_lock(&vcpu->arch.exit_timing_lock); 59 mutex_lock(&vcpu->arch.exit_timing_lock);
69 60
70 vcpu->arch.timing_count_type[type]++; 61 vcpu->arch.timing_count_type[type]++;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 3aca1b042b8c..b135d3d397db 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -103,7 +103,7 @@ TRACE_EVENT(kvm_gtlb_write,
103 * Book3S trace points * 103 * Book3S trace points *
104 *************************************************************************/ 104 *************************************************************************/
105 105
106#ifdef CONFIG_PPC_BOOK3S 106#ifdef CONFIG_KVM_BOOK3S_PR
107 107
108TRACE_EVENT(kvm_book3s_exit, 108TRACE_EVENT(kvm_book3s_exit,
109 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), 109 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
@@ -252,7 +252,7 @@ TRACE_EVENT(kvm_book3s_mmu_flush,
252 ), 252 ),
253 253
254 TP_fast_assign( 254 TP_fast_assign(
255 __entry->count = vcpu->arch.hpte_cache_count; 255 __entry->count = to_book3s(vcpu)->hpte_cache_count;
256 __entry->p1 = p1; 256 __entry->p1 = p1;
257 __entry->p2 = p2; 257 __entry->p2 = p2;
258 __entry->type = type; 258 __entry->type = type;
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 024acab588fd..f60e006d90c3 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -186,10 +186,11 @@ void __init MMU_init_hw(void)
186unsigned long __init mmu_mapin_ram(unsigned long top) 186unsigned long __init mmu_mapin_ram(unsigned long top)
187{ 187{
188 unsigned long addr; 188 unsigned long addr;
189 unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
189 190
190 /* Pin in enough TLBs to cover any lowmem not covered by the 191 /* Pin in enough TLBs to cover any lowmem not covered by the
191 * initial 256M mapping established in head_44x.S */ 192 * initial 256M mapping established in head_44x.S */
192 for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr; 193 for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
193 addr += PPC_PIN_SIZE) { 194 addr += PPC_PIN_SIZE) {
194 if (mmu_has_feature(MMU_FTR_TYPE_47x)) 195 if (mmu_has_feature(MMU_FTR_TYPE_47x))
195 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); 196 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
@@ -218,19 +219,25 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
218void setup_initial_memory_limit(phys_addr_t first_memblock_base, 219void setup_initial_memory_limit(phys_addr_t first_memblock_base,
219 phys_addr_t first_memblock_size) 220 phys_addr_t first_memblock_size)
220{ 221{
222 u64 size;
223
224#ifndef CONFIG_RELOCATABLE
221 /* We don't currently support the first MEMBLOCK not mapping 0 225 /* We don't currently support the first MEMBLOCK not mapping 0
222 * physical on those processors 226 * physical on those processors
223 */ 227 */
224 BUG_ON(first_memblock_base != 0); 228 BUG_ON(first_memblock_base != 0);
229#endif
225 230
226 /* 44x has a 256M TLB entry pinned at boot */ 231 /* 44x has a 256M TLB entry pinned at boot */
227 memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); 232 size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE));
233 memblock_set_current_limit(first_memblock_base + size);
228} 234}
229 235
230#ifdef CONFIG_SMP 236#ifdef CONFIG_SMP
231void __cpuinit mmu_init_secondary(int cpu) 237void __cpuinit mmu_init_secondary(int cpu)
232{ 238{
233 unsigned long addr; 239 unsigned long addr;
240 unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
234 241
235 /* Pin in enough TLBs to cover any lowmem not covered by the 242 /* Pin in enough TLBs to cover any lowmem not covered by the
236 * initial 256M mapping established in head_44x.S 243 * initial 256M mapping established in head_44x.S
@@ -241,7 +248,7 @@ void __cpuinit mmu_init_secondary(int cpu)
241 * stack. current (r2) isn't initialized, smp_processor_id() 248 * stack. current (r2) isn't initialized, smp_processor_id()
242 * will not work, current thread info isn't accessible, ... 249 * will not work, current thread info isn't accessible, ...
243 */ 250 */
244 for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr; 251 for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
245 addr += PPC_PIN_SIZE) { 252 addr += PPC_PIN_SIZE) {
246 if (mmu_has_feature(MMU_FTR_TYPE_47x)) 253 if (mmu_has_feature(MMU_FTR_TYPE_47x))
247 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); 254 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ad35f66c69e8..5efe8c96d37f 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -174,7 +174,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
174 die("Weird page fault", regs, SIGSEGV); 174 die("Weird page fault", regs, SIGSEGV);
175 } 175 }
176 176
177 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 177 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
178 178
179 /* When running in the kernel we expect faults to occur only to 179 /* When running in the kernel we expect faults to occur only to
180 * addresses in user space. All other faults represent errors in the 180 * addresses in user space. All other faults represent errors in the
@@ -320,7 +320,7 @@ good_area:
320 } 320 }
321 if (ret & VM_FAULT_MAJOR) { 321 if (ret & VM_FAULT_MAJOR) {
322 current->maj_flt++; 322 current->maj_flt++;
323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
324 regs, address); 324 regs, address);
325#ifdef CONFIG_PPC_SMLPAR 325#ifdef CONFIG_PPC_SMLPAR
326 if (firmware_has_feature(FW_FEATURE_CMO)) { 326 if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -331,7 +331,7 @@ good_area:
331#endif 331#endif
332 } else { 332 } else {
333 current->min_flt++; 333 current->min_flt++;
334 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 334 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
335 regs, address); 335 regs, address);
336 } 336 }
337 up_read(&mm->mmap_sem); 337 up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index dfd764896db0..90039bc64119 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
37 37
38#define HPTE_LOCK_BIT 3 38#define HPTE_LOCK_BIT 3
39 39
40static DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long va, int psize, int ssize) 42static inline void __tlbie(unsigned long va, int psize, int ssize)
43{ 43{
@@ -51,7 +51,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
51 va &= ~0xffful; 51 va &= ~0xffful;
52 va |= ssize << 8; 52 va |= ssize << 8;
53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54 : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) 54 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
55 : "memory"); 55 : "memory");
56 break; 56 break;
57 default: 57 default:
@@ -61,7 +61,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
61 va |= ssize << 8; 61 va |= ssize << 8;
62 va |= 1; /* L */ 62 va |= 1; /* L */
63 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) 63 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
64 : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) 64 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
65 : "memory"); 65 : "memory");
66 break; 66 break;
67 } 67 }
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 5de0f254dbb5..c77fef56dad6 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -191,38 +191,6 @@ void __init *early_get_page(void)
191 return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 191 return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
192} 192}
193 193
194/* Free up now-unused memory */
195static void free_sec(unsigned long start, unsigned long end, const char *name)
196{
197 unsigned long cnt = 0;
198
199 while (start < end) {
200 ClearPageReserved(virt_to_page(start));
201 init_page_count(virt_to_page(start));
202 free_page(start);
203 cnt++;
204 start += PAGE_SIZE;
205 }
206 if (cnt) {
207 printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
208 totalram_pages += cnt;
209 }
210}
211
212void free_initmem(void)
213{
214#define FREESEC(TYPE) \
215 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
216 (unsigned long)(&__ ## TYPE ## _end), \
217 #TYPE);
218
219 printk ("Freeing unused kernel memory:");
220 FREESEC(init);
221 printk("\n");
222 ppc_md.progress = NULL;
223#undef FREESEC
224}
225
226#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ 194#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
227void setup_initial_memory_limit(phys_addr_t first_memblock_base, 195void setup_initial_memory_limit(phys_addr_t first_memblock_base,
228 phys_addr_t first_memblock_size) 196 phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index f6dbb4c20e64..e94b57fb79a0 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -83,22 +83,6 @@ EXPORT_SYMBOL_GPL(memstart_addr);
83phys_addr_t kernstart_addr; 83phys_addr_t kernstart_addr;
84EXPORT_SYMBOL_GPL(kernstart_addr); 84EXPORT_SYMBOL_GPL(kernstart_addr);
85 85
86void free_initmem(void)
87{
88 unsigned long addr;
89
90 addr = (unsigned long)__init_begin;
91 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
92 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
93 ClearPageReserved(virt_to_page(addr));
94 init_page_count(virt_to_page(addr));
95 free_page(addr);
96 totalram_pages++;
97 }
98 printk ("Freeing unused kernel memory: %luk freed\n",
99 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
100}
101
102static void pgd_ctor(void *addr) 86static void pgd_ctor(void *addr)
103{ 87{
104 memset(addr, 0, PGD_TABLE_SIZE); 88 memset(addr, 0, PGD_TABLE_SIZE);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 29d4dde65c45..c781bbcf7338 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -249,7 +249,7 @@ static int __init mark_nonram_nosave(void)
249 */ 249 */
250void __init paging_init(void) 250void __init paging_init(void)
251{ 251{
252 unsigned long total_ram = memblock_phys_mem_size(); 252 unsigned long long total_ram = memblock_phys_mem_size();
253 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 253 phys_addr_t top_of_ram = memblock_end_of_DRAM();
254 unsigned long max_zone_pfns[MAX_NR_ZONES]; 254 unsigned long max_zone_pfns[MAX_NR_ZONES];
255 255
@@ -269,7 +269,7 @@ void __init paging_init(void)
269 kmap_prot = PAGE_KERNEL; 269 kmap_prot = PAGE_KERNEL;
270#endif /* CONFIG_HIGHMEM */ 270#endif /* CONFIG_HIGHMEM */
271 271
272 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n", 272 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
273 (unsigned long long)top_of_ram, total_ram); 273 (unsigned long long)top_of_ram, total_ram);
274 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 274 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
275 (long int)((top_of_ram - total_ram) >> 20)); 275 (long int)((top_of_ram - total_ram) >> 20));
@@ -337,8 +337,9 @@ void __init mem_init(void)
337 337
338 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 338 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
339 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 339 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
340 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
340 struct page *page = pfn_to_page(pfn); 341 struct page *page = pfn_to_page(pfn);
341 if (memblock_is_reserved(pfn << PAGE_SHIFT)) 342 if (memblock_is_reserved(paddr))
342 continue; 343 continue;
343 ClearPageReserved(page); 344 ClearPageReserved(page);
344 init_page_count(page); 345 init_page_count(page);
@@ -352,6 +353,15 @@ void __init mem_init(void)
352 } 353 }
353#endif /* CONFIG_HIGHMEM */ 354#endif /* CONFIG_HIGHMEM */
354 355
356#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
357 /*
358 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
359 * functions.... do it here for the non-smp case.
360 */
361 per_cpu(next_tlbcam_idx, smp_processor_id()) =
362 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
363#endif
364
355 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " 365 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
356 "%luk reserved, %luk data, %luk bss, %luk init)\n", 366 "%luk reserved, %luk data, %luk bss, %luk init)\n",
357 nr_free_pages() << (PAGE_SHIFT-10), 367 nr_free_pages() << (PAGE_SHIFT-10),
@@ -382,6 +392,25 @@ void __init mem_init(void)
382 mem_init_done = 1; 392 mem_init_done = 1;
383} 393}
384 394
395void free_initmem(void)
396{
397 unsigned long addr;
398
399 ppc_md.progress = ppc_printk_progress;
400
401 addr = (unsigned long)__init_begin;
402 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
403 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
404 ClearPageReserved(virt_to_page(addr));
405 init_page_count(virt_to_page(addr));
406 free_page(addr);
407 totalram_pages++;
408 }
409 pr_info("Freeing unused kernel memory: %luk freed\n",
410 ((unsigned long)__init_end -
411 (unsigned long)__init_begin) >> 10);
412}
413
385#ifdef CONFIG_BLK_DEV_INITRD 414#ifdef CONFIG_BLK_DEV_INITRD
386void __init free_initrd_mem(unsigned long start, unsigned long end) 415void __init free_initrd_mem(unsigned long start, unsigned long end)
387{ 416{
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 27b863c14941..9a445f64accd 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -177,3 +177,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
177 flush_range(vma->vm_mm, start, end); 177 flush_range(vma->vm_mm, start, end);
178} 178}
179EXPORT_SYMBOL(flush_tlb_range); 179EXPORT_SYMBOL(flush_tlb_range);
180
181void __init early_init_mmu(void)
182{
183}
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index af0892209417..4ebb34bc01d6 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -30,6 +30,212 @@
30#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE) 30#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
31#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE) 31#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
32 32
33/**********************************************************************
34 * *
35 * TLB miss handling for Book3E with a bolted linear mapping *
36 * No virtual page table, no nested TLB misses *
37 * *
38 **********************************************************************/
39
40.macro tlb_prolog_bolted addr
41 mtspr SPRN_SPRG_TLB_SCRATCH,r13
42 mfspr r13,SPRN_SPRG_PACA
43 std r10,PACA_EXTLB+EX_TLB_R10(r13)
44 mfcr r10
45 std r11,PACA_EXTLB+EX_TLB_R11(r13)
46 std r16,PACA_EXTLB+EX_TLB_R16(r13)
47 mfspr r16,\addr /* get faulting address */
48 std r14,PACA_EXTLB+EX_TLB_R14(r13)
49 ld r14,PACAPGD(r13)
50 std r15,PACA_EXTLB+EX_TLB_R15(r13)
51 std r10,PACA_EXTLB+EX_TLB_CR(r13)
52 TLB_MISS_PROLOG_STATS_BOLTED
53.endm
54
55.macro tlb_epilog_bolted
56 ld r14,PACA_EXTLB+EX_TLB_CR(r13)
57 ld r10,PACA_EXTLB+EX_TLB_R10(r13)
58 ld r11,PACA_EXTLB+EX_TLB_R11(r13)
59 mtcr r14
60 ld r14,PACA_EXTLB+EX_TLB_R14(r13)
61 ld r15,PACA_EXTLB+EX_TLB_R15(r13)
62 TLB_MISS_RESTORE_STATS_BOLTED
63 ld r16,PACA_EXTLB+EX_TLB_R16(r13)
64 mfspr r13,SPRN_SPRG_TLB_SCRATCH
65.endm
66
67/* Data TLB miss */
68 START_EXCEPTION(data_tlb_miss_bolted)
69 tlb_prolog_bolted SPRN_DEAR
70
71 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
72
73 /* We do the user/kernel test for the PID here along with the RW test
74 */
75 /* We pre-test some combination of permissions to avoid double
76 * faults:
77 *
78 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
79 * ESR_ST is 0x00800000
80 * _PAGE_BAP_SW is 0x00000010
81 * So the shift is >> 19. This tests for supervisor writeability.
82 * If the page happens to be supervisor writeable and not user
83 * writeable, we will take a new fault later, but that should be
84 * a rare enough case.
85 *
86 * We also move ESR_ST in _PAGE_DIRTY position
87 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
88 *
89 * MAS1 is preset for all we need except for TID that needs to
90 * be cleared for kernel translations
91 */
92
93 mfspr r11,SPRN_ESR
94
95 srdi r15,r16,60 /* get region */
96 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
97 bne- dtlb_miss_fault_bolted
98
99 rlwinm r10,r11,32-19,27,27
100 rlwimi r10,r11,32-16,19,19
101 cmpwi r15,0
102 ori r10,r10,_PAGE_PRESENT
103 oris r11,r10,_PAGE_ACCESSED@h
104
105 TLB_MISS_STATS_SAVE_INFO_BOLTED
106 bne tlb_miss_kernel_bolted
107
108tlb_miss_common_bolted:
109/*
110 * This is the guts of the TLB miss handler for bolted-linear.
111 * We are entered with:
112 *
113 * r16 = faulting address
114 * r15 = crap (free to use)
115 * r14 = page table base
116 * r13 = PACA
117 * r11 = PTE permission mask
118 * r10 = crap (free to use)
119 */
120 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
121 cmpldi cr0,r14,0
122 clrrdi r15,r15,3
123 beq tlb_miss_fault_bolted
124
125BEGIN_MMU_FTR_SECTION
126 /* Set the TLB reservation and search for existing entry. Then load
127 * the entry.
128 */
129 PPC_TLBSRX_DOT(0,r16)
130 ldx r14,r14,r15
131 beq normal_tlb_miss_done
132MMU_FTR_SECTION_ELSE
133 ldx r14,r14,r15
134ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
135
136#ifndef CONFIG_PPC_64K_PAGES
137 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
138 clrrdi r15,r15,3
139
140 cmpldi cr0,r14,0
141 beq tlb_miss_fault_bolted
142
143 ldx r14,r14,r15
144#endif /* CONFIG_PPC_64K_PAGES */
145
146 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
147 clrrdi r15,r15,3
148
149 cmpldi cr0,r14,0
150 beq tlb_miss_fault_bolted
151
152 ldx r14,r14,r15
153
154 rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
155 clrrdi r15,r15,3
156
157 cmpldi cr0,r14,0
158 beq tlb_miss_fault_bolted
159
160 ldx r14,r14,r15
161
162 /* Check if required permissions are met */
163 andc. r15,r11,r14
164 rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
165 bne- tlb_miss_fault_bolted
166
167 /* Now we build the MAS:
168 *
169 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
170 * MAS 1 : Almost fully setup
171 * - PID already updated by caller if necessary
172 * - TSIZE need change if !base page size, not
173 * yet implemented for now
174 * MAS 2 : Defaults not useful, need to be redone
175 * MAS 3+7 : Needs to be done
176 */
177 clrrdi r11,r16,12 /* Clear low crap in EA */
178 clrldi r15,r15,12 /* Clear crap at the top */
179 rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
180 rlwimi r15,r14,32-8,22,25 /* Move in U bits */
181 mtspr SPRN_MAS2,r11
182 andi. r11,r14,_PAGE_DIRTY
183 rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
184
185 /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
186 bne 1f
187 li r11,MAS3_SW|MAS3_UW
188 andc r15,r15,r11
1891:
190 mtspr SPRN_MAS7_MAS3,r15
191 tlbwe
192
193 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
194 tlb_epilog_bolted
195 rfi
196
197itlb_miss_kernel_bolted:
198 li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
199 oris r11,r11,_PAGE_ACCESSED@h
200tlb_miss_kernel_bolted:
201 mfspr r10,SPRN_MAS1
202 ld r14,PACA_KERNELPGD(r13)
203 cmpldi cr0,r15,8 /* Check for vmalloc region */
204 rlwinm r10,r10,0,16,1 /* Clear TID */
205 mtspr SPRN_MAS1,r10
206 beq+ tlb_miss_common_bolted
207
208tlb_miss_fault_bolted:
209 /* We need to check if it was an instruction miss */
210 andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
211 bne itlb_miss_fault_bolted
212dtlb_miss_fault_bolted:
213 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
214 tlb_epilog_bolted
215 b exc_data_storage_book3e
216itlb_miss_fault_bolted:
217 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
218 tlb_epilog_bolted
219 b exc_instruction_storage_book3e
220
221/* Instruction TLB miss */
222 START_EXCEPTION(instruction_tlb_miss_bolted)
223 tlb_prolog_bolted SPRN_SRR0
224
225 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
226 srdi r15,r16,60 /* get region */
227 TLB_MISS_STATS_SAVE_INFO_BOLTED
228 bne- itlb_miss_fault_bolted
229
230 li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
231
232 /* We do the user/kernel test for the PID here along with the RW test
233 */
234
235 cmpldi cr0,r15,0 /* Check for user region */
236 oris r11,r11,_PAGE_ACCESSED@h
237 beq tlb_miss_common_bolted
238 b itlb_miss_kernel_bolted
33 239
34/********************************************************************** 240/**********************************************************************
35 * * 241 * *
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 0bdad3aecc67..d32ec643c231 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -35,6 +35,7 @@
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/memblock.h> 37#include <linux/memblock.h>
38#include <linux/of_fdt.h>
38 39
39#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
40#include <asm/tlb.h> 41#include <asm/tlb.h>
@@ -102,6 +103,12 @@ unsigned long linear_map_top; /* Top of linear mapping */
102 103
103#endif /* CONFIG_PPC64 */ 104#endif /* CONFIG_PPC64 */
104 105
106#ifdef CONFIG_PPC_FSL_BOOK3E
107/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
108DEFINE_PER_CPU(int, next_tlbcam_idx);
109EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
110#endif
111
105/* 112/*
106 * Base TLB flushing operations: 113 * Base TLB flushing operations:
107 * 114 *
@@ -266,6 +273,17 @@ EXPORT_SYMBOL(flush_tlb_page);
266 273
267#endif /* CONFIG_SMP */ 274#endif /* CONFIG_SMP */
268 275
276#ifdef CONFIG_PPC_47x
277void __init early_init_mmu_47x(void)
278{
279#ifdef CONFIG_SMP
280 unsigned long root = of_get_flat_dt_root();
281 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
282 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
283#endif /* CONFIG_SMP */
284}
285#endif /* CONFIG_PPC_47x */
286
269/* 287/*
270 * Flush kernel TLB entries in the given range 288 * Flush kernel TLB entries in the given range
271 */ 289 */
@@ -443,14 +461,27 @@ static void setup_page_sizes(void)
443 } 461 }
444} 462}
445 463
446static void setup_mmu_htw(void) 464static void __patch_exception(int exc, unsigned long addr)
447{ 465{
448 extern unsigned int interrupt_base_book3e; 466 extern unsigned int interrupt_base_book3e;
449 extern unsigned int exc_data_tlb_miss_htw_book3e; 467 unsigned int *ibase = &interrupt_base_book3e;
450 extern unsigned int exc_instruction_tlb_miss_htw_book3e; 468
469 /* Our exceptions vectors start with a NOP and -then- a branch
470 * to deal with single stepping from userspace which stops on
471 * the second instruction. Thus we need to patch the second
472 * instruction of the exception, not the first one
473 */
451 474
452 unsigned int *ibase = &interrupt_base_book3e; 475 patch_branch(ibase + (exc / 4) + 1, addr, 0);
476}
477
478#define patch_exception(exc, name) do { \
479 extern unsigned int name; \
480 __patch_exception((exc), (unsigned long)&name); \
481} while (0)
453 482
483static void setup_mmu_htw(void)
484{
454 /* Check if HW tablewalk is present, and if yes, enable it by: 485 /* Check if HW tablewalk is present, and if yes, enable it by:
455 * 486 *
456 * - patching the TLB miss handlers to branch to the 487 * - patching the TLB miss handlers to branch to the
@@ -462,19 +493,12 @@ static void setup_mmu_htw(void)
462 493
463 if ((tlb0cfg & TLBnCFG_IND) && 494 if ((tlb0cfg & TLBnCFG_IND) &&
464 (tlb0cfg & TLBnCFG_PT)) { 495 (tlb0cfg & TLBnCFG_PT)) {
465 /* Our exceptions vectors start with a NOP and -then- a branch 496 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
466 * to deal with single stepping from userspace which stops on 497 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
467 * the second instruction. Thus we need to patch the second
468 * instruction of the exception, not the first one
469 */
470 patch_branch(ibase + (0x1c0 / 4) + 1,
471 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
472 patch_branch(ibase + (0x1e0 / 4) + 1,
473 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
474 book3e_htw_enabled = 1; 498 book3e_htw_enabled = 1;
475 } 499 }
476 pr_info("MMU: Book3E Page Tables %s\n", 500 pr_info("MMU: Book3E HW tablewalk %s\n",
477 book3e_htw_enabled ? "Enabled" : "Disabled"); 501 book3e_htw_enabled ? "enabled" : "not supported");
478} 502}
479 503
480/* 504/*
@@ -549,6 +573,9 @@ static void __early_init_mmu(int boot_cpu)
549 /* limit memory so we dont have linear faults */ 573 /* limit memory so we dont have linear faults */
550 memblock_enforce_memory_limit(linear_map_top); 574 memblock_enforce_memory_limit(linear_map_top);
551 memblock_analyze(); 575 memblock_analyze();
576
577 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
578 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
552 } 579 }
553#endif 580#endif
554 581
@@ -584,4 +611,11 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
584 /* Finally limit subsequent allocations */ 611 /* Finally limit subsequent allocations */
585 memblock_set_current_limit(first_memblock_base + ppc64_rma_size); 612 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
586} 613}
614#else /* ! CONFIG_PPC64 */
615void __init early_init_mmu(void)
616{
617#ifdef CONFIG_PPC_47x
618 early_init_mmu_47x();
619#endif
620}
587#endif /* CONFIG_PPC64 */ 621#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
new file mode 100644
index 000000000000..266b3950c3ac
--- /dev/null
+++ b/arch/powerpc/net/Makefile
@@ -0,0 +1,4 @@
1#
2# Arch-specific network modules
3#
4obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
new file mode 100644
index 000000000000..af1ab5e9a691
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit.h
@@ -0,0 +1,227 @@
1/* bpf_jit.h: BPF JIT compiler for PPC64
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
8 * of the License.
9 */
10#ifndef _BPF_JIT_H
11#define _BPF_JIT_H
12
13#define BPF_PPC_STACK_LOCALS 32
14#define BPF_PPC_STACK_BASIC (48+64)
15#define BPF_PPC_STACK_SAVE (18*8)
16#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
17 BPF_PPC_STACK_SAVE)
18#define BPF_PPC_SLOWPATH_FRAME (48+64)
19
20/*
21 * Generated code register usage:
22 *
23 * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
24 *
25 * skb r3 (Entry parameter)
26 * A register r4
27 * X register r5
28 * addr param r6
29 * r7-r10 scratch
30 * skb->data r14
31 * skb headlen r15 (skb->len - skb->data_len)
32 * m[0] r16
33 * m[...] ...
34 * m[15] r31
35 */
36#define r_skb 3
37#define r_ret 3
38#define r_A 4
39#define r_X 5
40#define r_addr 6
41#define r_scratch1 7
42#define r_D 14
43#define r_HL 15
44#define r_M 16
45
46#ifndef __ASSEMBLY__
47
48/*
49 * Assembly helpers from arch/powerpc/net/bpf_jit.S:
50 */
51extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
52
53#define FUNCTION_DESCR_SIZE 24
54
55/*
56 * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
57 * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
58 * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
59 */
60#define IMM_H(i) ((uintptr_t)(i)>>16)
61#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
62 (((uintptr_t)(i) & 0x8000) >> 15))
63#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
64
65#define PLANT_INSTR(d, idx, instr) \
66 do { if (d) { (d)[idx] = instr; } idx++; } while (0)
67#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
68
69#define PPC_NOP() EMIT(PPC_INST_NOP)
70#define PPC_BLR() EMIT(PPC_INST_BLR)
71#define PPC_BLRL() EMIT(PPC_INST_BLRL)
72#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r))
73#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \
74 __PPC_RA(a) | IMM_L(i))
75#define PPC_MR(d, a) PPC_OR(d, a, a)
76#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
77#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
78 __PPC_RS(d) | __PPC_RA(a) | IMM_L(i))
79#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
80#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \
81 __PPC_RA(base) | ((i) & 0xfffc))
82
83#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \
84 __PPC_RA(base) | IMM_L(i))
85#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \
86 __PPC_RA(base) | IMM_L(i))
87#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \
88 __PPC_RA(base) | IMM_L(i))
89/* Convenience helpers for the above with 'far' offsets: */
90#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
91 else { PPC_ADDIS(r, base, IMM_HA(i)); \
92 PPC_LD(r, r, IMM_L(i)); } } while(0)
93
94#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
95 else { PPC_ADDIS(r, base, IMM_HA(i)); \
96 PPC_LWZ(r, r, IMM_L(i)); } } while(0)
97
98#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
99 else { PPC_ADDIS(r, base, IMM_HA(i)); \
100 PPC_LHZ(r, r, IMM_L(i)); } } while(0)
101
102#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i))
103#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i))
104#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i))
105#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b))
106
107#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \
108 __PPC_RB(a) | __PPC_RA(b))
109#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \
110 __PPC_RA(a) | __PPC_RB(b))
111#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \
112 __PPC_RA(a) | __PPC_RB(b))
113#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \
114 __PPC_RA(a) | __PPC_RB(b))
115#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \
116 __PPC_RA(a) | IMM_L(i))
117#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \
118 __PPC_RA(a) | __PPC_RB(b))
119#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \
120 __PPC_RS(a) | __PPC_RB(b))
121#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \
122 __PPC_RS(a) | IMM_L(i))
123#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \
124 __PPC_RS(a) | __PPC_RB(b))
125#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \
126 __PPC_RS(a) | __PPC_RB(b))
127#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \
128 __PPC_RS(a) | IMM_L(i))
129#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \
130 __PPC_RS(a) | IMM_L(i))
131#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \
132 __PPC_RS(a) | __PPC_RB(s))
133#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \
134 __PPC_RS(a) | __PPC_RB(s))
135/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
136#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
137 __PPC_RS(a) | __PPC_SH(i) | \
138 __PPC_MB(0) | __PPC_ME(31-(i)))
139/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
140#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
141 __PPC_RS(a) | __PPC_SH(32-(i)) | \
142 __PPC_MB(i) | __PPC_ME(31))
143/* sldi = rldicr Rx, Ry, n, 63-n */
144#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \
145 __PPC_RS(a) | __PPC_SH(i) | \
146 __PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
147#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a))
148
149/* Long jump; (unconditional 'branch') */
150#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
151 (((dest) - (ctx->idx * 4)) & 0x03fffffc))
152/* "cond" here covers BO:BI fields. */
153#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
154 (((cond) & 0x3ff) << 16) | \
155 (((dest) - (ctx->idx * 4)) & \
156 0xfffc))
157#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
158 if ((u32)(uintptr_t)(i) >= 32768) { \
159 PPC_ADDIS(d, d, IMM_HA(i)); \
160 } } while(0)
161#define PPC_LI64(d, i) do { \
162 if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
163 PPC_LI32(d, i); \
164 else { \
165 PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
166 if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
167 PPC_ORI(d, d, \
168 ((uintptr_t)(i) >> 32) & 0xffff); \
169 PPC_SLDI(d, d, 32); \
170 if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
171 PPC_ORIS(d, d, \
172 ((uintptr_t)(i) >> 16) & 0xffff); \
173 if ((uintptr_t)(i) & 0x000000000000ffffULL) \
174 PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
175 } } while (0);
176
177static inline bool is_nearbranch(int offset)
178{
179 return (offset < 32768) && (offset >= -32768);
180}
181
182/*
183 * The fly in the ointment of code size changing from pass to pass is
184 * avoided by padding the short branch case with a NOP. If code size differs
185 * with different branch reaches we will have the issue of code moving from
186 * one pass to the next and will need a few passes to converge on a stable
187 * state.
188 */
189#define PPC_BCC(cond, dest) do { \
190 if (is_nearbranch((dest) - (ctx->idx * 4))) { \
191 PPC_BCC_SHORT(cond, dest); \
192 PPC_NOP(); \
193 } else { \
194 /* Flip the 'T or F' bit to invert comparison */ \
195 PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
196 PPC_JMP(dest); \
197 } } while(0)
198
199/* To create a branch condition, select a bit of cr0... */
200#define CR0_LT 0
201#define CR0_GT 1
202#define CR0_EQ 2
203/* ...and modify BO[3] */
204#define COND_CMP_TRUE 0x100
205#define COND_CMP_FALSE 0x000
206/* Together, they make all required comparisons: */
207#define COND_GT (CR0_GT | COND_CMP_TRUE)
208#define COND_GE (CR0_LT | COND_CMP_FALSE)
209#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
210#define COND_NE (CR0_EQ | COND_CMP_FALSE)
211#define COND_LT (CR0_LT | COND_CMP_TRUE)
212
213#define SEEN_DATAREF 0x10000 /* might call external helpers */
214#define SEEN_XREG 0x20000 /* X reg is used */
215#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
216 * storage */
217#define SEEN_MEM_MSK 0x0ffff
218
219struct codegen_context {
220 unsigned int seen;
221 unsigned int idx;
222 int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
223};
224
225#endif
226
227#endif
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
new file mode 100644
index 000000000000..ff4506e85cce
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -0,0 +1,138 @@
1/* bpf_jit.S: Packet/header access helper functions
2 * for PPC64 BPF compiler.
3 *
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11
12#include <asm/ppc_asm.h>
13#include "bpf_jit.h"
14
15/*
16 * All of these routines are called directly from generated code,
17 * whose register usage is:
18 *
19 * r3 skb
20 * r4,r5 A,X
21 * r6 *** address parameter to helper ***
22 * r7-r10 scratch
23 * r14 skb->data
24 * r15 skb headlen
25 * r16-31 M[]
26 */
27
28/*
29 * To consider: These helpers are so small it could be better to just
30 * generate them inline. Inline code can do the simple headlen check
31 * then branch directly to slow_path_XXX if required. (In fact, could
32 * load a spare GPR with the address of slow_path_generic and pass size
33 * as an argument, making the call site a mtlr, li and bllr.)
34 *
35 * Technically, the "is addr < 0" check is unnecessary & slowing down
36 * the ABS path, as it's statically checked on generation.
37 */
38 .globl sk_load_word
39sk_load_word:
40 cmpdi r_addr, 0
41 blt bpf_error
42 /* Are we accessing past headlen? */
43 subi r_scratch1, r_HL, 4
44 cmpd r_scratch1, r_addr
45 blt bpf_slow_path_word
46 /* Nope, just hitting the header. cr0 here is eq or gt! */
47 lwzx r_A, r_D, r_addr
48 /* When big endian we don't need to byteswap. */
49 blr /* Return success, cr0 != LT */
50
51 .globl sk_load_half
52sk_load_half:
53 cmpdi r_addr, 0
54 blt bpf_error
55 subi r_scratch1, r_HL, 2
56 cmpd r_scratch1, r_addr
57 blt bpf_slow_path_half
58 lhzx r_A, r_D, r_addr
59 blr
60
61 .globl sk_load_byte
62sk_load_byte:
63 cmpdi r_addr, 0
64 blt bpf_error
65 cmpd r_HL, r_addr
66 ble bpf_slow_path_byte
67 lbzx r_A, r_D, r_addr
68 blr
69
70/*
71 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
72 * r_addr is the offset value, already known positive
73 */
74 .globl sk_load_byte_msh
75sk_load_byte_msh:
76 cmpd r_HL, r_addr
77 ble bpf_slow_path_byte_msh
78 lbzx r_X, r_D, r_addr
79 rlwinm r_X, r_X, 2, 32-4-2, 31-2
80 blr
81
82bpf_error:
83 /* Entered with cr0 = lt */
84 li r3, 0
85 /* Generated code will 'blt epilogue', returning 0. */
86 blr
87
88/* Call out to skb_copy_bits:
89 * We'll need to back up our volatile regs first; we have
90 * local variable space at r1+(BPF_PPC_STACK_BASIC).
91 * Allocate a new stack frame here to remain ABI-compliant in
92 * stashing LR.
93 */
94#define bpf_slow_path_common(SIZE) \
95 mflr r0; \
96 std r0, 16(r1); \
97 /* R3 goes in parameter space of caller's frame */ \
98 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
99 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
100 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
101 addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
102 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
103 /* R3 = r_skb, as passed */ \
104 mr r4, r_addr; \
105 li r6, SIZE; \
106 bl skb_copy_bits; \
107 /* R3 = 0 on success */ \
108 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
109 ld r0, 16(r1); \
110 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
111 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
112 mtlr r0; \
113 cmpdi r3, 0; \
114 blt bpf_error; /* cr0 = LT */ \
115 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
116 /* Great success! */
117
118bpf_slow_path_word:
119 bpf_slow_path_common(4)
120 /* Data value is on stack, and cr0 != LT */
121 lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
122 blr
123
124bpf_slow_path_half:
125 bpf_slow_path_common(2)
126 lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
127 blr
128
129bpf_slow_path_byte:
130 bpf_slow_path_common(1)
131 lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
132 blr
133
134bpf_slow_path_byte_msh:
135 bpf_slow_path_common(1)
136 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
137 rlwinm r_X, r_X, 2, 32-4-2, 31-2
138 blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..73619d3aeb6c
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -0,0 +1,694 @@
1/* bpf_jit_comp.c: BPF JIT compiler for PPC64
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#include <linux/moduleloader.h>
13#include <asm/cacheflush.h>
14#include <linux/netdevice.h>
15#include <linux/filter.h>
16#include "bpf_jit.h"
17
18#ifndef __BIG_ENDIAN
19/* There are endianness assumptions herein. */
20#error "Little-endian PPC not supported in BPF compiler"
21#endif
22
23int bpf_jit_enable __read_mostly;
24
25
26static inline void bpf_flush_icache(void *start, void *end)
27{
28 smp_wmb();
29 flush_icache_range((unsigned long)start, (unsigned long)end);
30}
31
32static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
33 struct codegen_context *ctx)
34{
35 int i;
36 const struct sock_filter *filter = fp->insns;
37
38 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
39 /* Make stackframe */
40 if (ctx->seen & SEEN_DATAREF) {
41 /* If we call any helpers (for loads), save LR */
42 EMIT(PPC_INST_MFLR | __PPC_RT(0));
43 PPC_STD(0, 1, 16);
44
45 /* Back up non-volatile regs. */
46 PPC_STD(r_D, 1, -(8*(32-r_D)));
47 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
48 }
49 if (ctx->seen & SEEN_MEM) {
50 /*
51 * Conditionally save regs r15-r31 as some will be used
52 * for M[] data.
53 */
54 for (i = r_M; i < (r_M+16); i++) {
55 if (ctx->seen & (1 << (i-r_M)))
56 PPC_STD(i, 1, -(8*(32-i)));
57 }
58 }
59 EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) |
60 (-BPF_PPC_STACKFRAME & 0xfffc));
61 }
62
63 if (ctx->seen & SEEN_DATAREF) {
64 /*
65 * If this filter needs to access skb data,
66 * prepare r_D and r_HL:
67 * r_HL = skb->len - skb->data_len
68 * r_D = skb->data
69 */
70 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
71 data_len));
72 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
73 PPC_SUB(r_HL, r_HL, r_scratch1);
74 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
75 }
76
77 if (ctx->seen & SEEN_XREG) {
78 /*
79 * TODO: Could also detect whether first instr. sets X and
80 * avoid this (as below, with A).
81 */
82 PPC_LI(r_X, 0);
83 }
84
85 switch (filter[0].code) {
86 case BPF_S_RET_K:
87 case BPF_S_LD_W_LEN:
88 case BPF_S_ANC_PROTOCOL:
89 case BPF_S_ANC_IFINDEX:
90 case BPF_S_ANC_MARK:
91 case BPF_S_ANC_RXHASH:
92 case BPF_S_ANC_CPU:
93 case BPF_S_ANC_QUEUE:
94 case BPF_S_LD_W_ABS:
95 case BPF_S_LD_H_ABS:
96 case BPF_S_LD_B_ABS:
97 /* first instruction sets A register (or is RET 'constant') */
98 break;
99 default:
100 /* make sure we dont leak kernel information to user */
101 PPC_LI(r_A, 0);
102 }
103}
104
105static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
106{
107 int i;
108
109 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
110 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
111 if (ctx->seen & SEEN_DATAREF) {
112 PPC_LD(0, 1, 16);
113 PPC_MTLR(0);
114 PPC_LD(r_D, 1, -(8*(32-r_D)));
115 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
116 }
117 if (ctx->seen & SEEN_MEM) {
118 /* Restore any saved non-vol registers */
119 for (i = r_M; i < (r_M+16); i++) {
120 if (ctx->seen & (1 << (i-r_M)))
121 PPC_LD(i, 1, -(8*(32-i)));
122 }
123 }
124 }
125 /* The RETs have left a return value in R3. */
126
127 PPC_BLR();
128}
129
130/* Assemble the body code between the prologue & epilogue. */
131static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
132 struct codegen_context *ctx,
133 unsigned int *addrs)
134{
135 const struct sock_filter *filter = fp->insns;
136 int flen = fp->len;
137 u8 *func;
138 unsigned int true_cond;
139 int i;
140
141 /* Start of epilogue code */
142 unsigned int exit_addr = addrs[flen];
143
144 for (i = 0; i < flen; i++) {
145 unsigned int K = filter[i].k;
146
147 /*
148 * addrs[] maps a BPF bytecode address into a real offset from
149 * the start of the body code.
150 */
151 addrs[i] = ctx->idx * 4;
152
153 switch (filter[i].code) {
154 /*** ALU ops ***/
155 case BPF_S_ALU_ADD_X: /* A += X; */
156 ctx->seen |= SEEN_XREG;
157 PPC_ADD(r_A, r_A, r_X);
158 break;
159 case BPF_S_ALU_ADD_K: /* A += K; */
160 if (!K)
161 break;
162 PPC_ADDI(r_A, r_A, IMM_L(K));
163 if (K >= 32768)
164 PPC_ADDIS(r_A, r_A, IMM_HA(K));
165 break;
166 case BPF_S_ALU_SUB_X: /* A -= X; */
167 ctx->seen |= SEEN_XREG;
168 PPC_SUB(r_A, r_A, r_X);
169 break;
170 case BPF_S_ALU_SUB_K: /* A -= K */
171 if (!K)
172 break;
173 PPC_ADDI(r_A, r_A, IMM_L(-K));
174 if (K >= 32768)
175 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
176 break;
177 case BPF_S_ALU_MUL_X: /* A *= X; */
178 ctx->seen |= SEEN_XREG;
179 PPC_MUL(r_A, r_A, r_X);
180 break;
181 case BPF_S_ALU_MUL_K: /* A *= K */
182 if (K < 32768)
183 PPC_MULI(r_A, r_A, K);
184 else {
185 PPC_LI32(r_scratch1, K);
186 PPC_MUL(r_A, r_A, r_scratch1);
187 }
188 break;
189 case BPF_S_ALU_DIV_X: /* A /= X; */
190 ctx->seen |= SEEN_XREG;
191 PPC_CMPWI(r_X, 0);
192 if (ctx->pc_ret0 != -1) {
193 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
194 } else {
195 /*
196 * Exit, returning 0; first pass hits here
197 * (longer worst-case code size).
198 */
199 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
200 PPC_LI(r_ret, 0);
201 PPC_JMP(exit_addr);
202 }
203 PPC_DIVWU(r_A, r_A, r_X);
204 break;
205 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
206 PPC_LI32(r_scratch1, K);
207 /* Top 32 bits of 64bit result -> A */
208 PPC_MULHWU(r_A, r_A, r_scratch1);
209 break;
210 case BPF_S_ALU_AND_X:
211 ctx->seen |= SEEN_XREG;
212 PPC_AND(r_A, r_A, r_X);
213 break;
214 case BPF_S_ALU_AND_K:
215 if (!IMM_H(K))
216 PPC_ANDI(r_A, r_A, K);
217 else {
218 PPC_LI32(r_scratch1, K);
219 PPC_AND(r_A, r_A, r_scratch1);
220 }
221 break;
222 case BPF_S_ALU_OR_X:
223 ctx->seen |= SEEN_XREG;
224 PPC_OR(r_A, r_A, r_X);
225 break;
226 case BPF_S_ALU_OR_K:
227 if (IMM_L(K))
228 PPC_ORI(r_A, r_A, IMM_L(K));
229 if (K >= 65536)
230 PPC_ORIS(r_A, r_A, IMM_H(K));
231 break;
232 case BPF_S_ALU_LSH_X: /* A <<= X; */
233 ctx->seen |= SEEN_XREG;
234 PPC_SLW(r_A, r_A, r_X);
235 break;
236 case BPF_S_ALU_LSH_K:
237 if (K == 0)
238 break;
239 else
240 PPC_SLWI(r_A, r_A, K);
241 break;
242 case BPF_S_ALU_RSH_X: /* A >>= X; */
243 ctx->seen |= SEEN_XREG;
244 PPC_SRW(r_A, r_A, r_X);
245 break;
246 case BPF_S_ALU_RSH_K: /* A >>= K; */
247 if (K == 0)
248 break;
249 else
250 PPC_SRWI(r_A, r_A, K);
251 break;
252 case BPF_S_ALU_NEG:
253 PPC_NEG(r_A, r_A);
254 break;
255 case BPF_S_RET_K:
256 PPC_LI32(r_ret, K);
257 if (!K) {
258 if (ctx->pc_ret0 == -1)
259 ctx->pc_ret0 = i;
260 }
261 /*
262 * If this isn't the very last instruction, branch to
263 * the epilogue if we've stuff to clean up. Otherwise,
264 * if there's nothing to tidy, just return. If we /are/
265 * the last instruction, we're about to fall through to
266 * the epilogue to return.
267 */
268 if (i != flen - 1) {
269 /*
270 * Note: 'seen' is properly valid only on pass
271 * #2. Both parts of this conditional are the
272 * same instruction size though, meaning the
273 * first pass will still correctly determine the
274 * code size/addresses.
275 */
276 if (ctx->seen)
277 PPC_JMP(exit_addr);
278 else
279 PPC_BLR();
280 }
281 break;
282 case BPF_S_RET_A:
283 PPC_MR(r_ret, r_A);
284 if (i != flen - 1) {
285 if (ctx->seen)
286 PPC_JMP(exit_addr);
287 else
288 PPC_BLR();
289 }
290 break;
291 case BPF_S_MISC_TAX: /* X = A */
292 PPC_MR(r_X, r_A);
293 break;
294 case BPF_S_MISC_TXA: /* A = X */
295 ctx->seen |= SEEN_XREG;
296 PPC_MR(r_A, r_X);
297 break;
298
299 /*** Constant loads/M[] access ***/
300 case BPF_S_LD_IMM: /* A = K */
301 PPC_LI32(r_A, K);
302 break;
303 case BPF_S_LDX_IMM: /* X = K */
304 PPC_LI32(r_X, K);
305 break;
306 case BPF_S_LD_MEM: /* A = mem[K] */
307 PPC_MR(r_A, r_M + (K & 0xf));
308 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
309 break;
310 case BPF_S_LDX_MEM: /* X = mem[K] */
311 PPC_MR(r_X, r_M + (K & 0xf));
312 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
313 break;
314 case BPF_S_ST: /* mem[K] = A */
315 PPC_MR(r_M + (K & 0xf), r_A);
316 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
317 break;
318 case BPF_S_STX: /* mem[K] = X */
319 PPC_MR(r_M + (K & 0xf), r_X);
320 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
321 break;
322 case BPF_S_LD_W_LEN: /* A = skb->len; */
323 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
324 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
325 break;
326 case BPF_S_LDX_W_LEN: /* X = skb->len; */
327 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
328 break;
329
330 /*** Ancillary info loads ***/
331
332 /* None of the BPF_S_ANC* codes appear to be passed by
333 * sk_chk_filter(). The interpreter and the x86 BPF
334 * compiler implement them so we do too -- they may be
335 * planted in future.
336 */
337 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
338 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
339 protocol) != 2);
340 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
341 protocol));
342 /* ntohs is a NOP with BE loads. */
343 break;
344 case BPF_S_ANC_IFINDEX:
345 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
346 dev));
347 PPC_CMPDI(r_scratch1, 0);
348 if (ctx->pc_ret0 != -1) {
349 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
350 } else {
351 /* Exit, returning 0; first pass hits here. */
352 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
353 PPC_LI(r_ret, 0);
354 PPC_JMP(exit_addr);
355 }
356 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
357 ifindex) != 4);
358 PPC_LWZ_OFFS(r_A, r_scratch1,
359 offsetof(struct net_device, ifindex));
360 break;
361 case BPF_S_ANC_MARK:
362 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
363 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
364 mark));
365 break;
366 case BPF_S_ANC_RXHASH:
367 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
368 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
369 rxhash));
370 break;
371 case BPF_S_ANC_QUEUE:
372 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
373 queue_mapping) != 2);
374 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
375 queue_mapping));
376 break;
377 case BPF_S_ANC_CPU:
378#ifdef CONFIG_SMP
379 /*
380 * PACA ptr is r13:
381 * raw_smp_processor_id() = local_paca->paca_index
382 */
383 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
384 paca_index) != 2);
385 PPC_LHZ_OFFS(r_A, 13,
386 offsetof(struct paca_struct, paca_index));
387#else
388 PPC_LI(r_A, 0);
389#endif
390 break;
391
392 /*** Absolute loads from packet header/data ***/
393 case BPF_S_LD_W_ABS:
394 func = sk_load_word;
395 goto common_load;
396 case BPF_S_LD_H_ABS:
397 func = sk_load_half;
398 goto common_load;
399 case BPF_S_LD_B_ABS:
400 func = sk_load_byte;
401 common_load:
402 /*
403 * Load from [K]. Reference with the (negative)
404 * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
405 */
406 ctx->seen |= SEEN_DATAREF;
407 if ((int)K < 0)
408 return -ENOTSUPP;
409 PPC_LI64(r_scratch1, func);
410 PPC_MTLR(r_scratch1);
411 PPC_LI32(r_addr, K);
412 PPC_BLRL();
413 /*
414 * Helper returns 'lt' condition on error, and an
415 * appropriate return value in r3
416 */
417 PPC_BCC(COND_LT, exit_addr);
418 break;
419
420 /*** Indirect loads from packet header/data ***/
421 case BPF_S_LD_W_IND:
422 func = sk_load_word;
423 goto common_load_ind;
424 case BPF_S_LD_H_IND:
425 func = sk_load_half;
426 goto common_load_ind;
427 case BPF_S_LD_B_IND:
428 func = sk_load_byte;
429 common_load_ind:
430 /*
431 * Load from [X + K]. Negative offsets are tested for
432 * in the helper functions, and result in a 'ret 0'.
433 */
434 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
435 PPC_LI64(r_scratch1, func);
436 PPC_MTLR(r_scratch1);
437 PPC_ADDI(r_addr, r_X, IMM_L(K));
438 if (K >= 32768)
439 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
440 PPC_BLRL();
441 /* If error, cr0.LT set */
442 PPC_BCC(COND_LT, exit_addr);
443 break;
444
445 case BPF_S_LDX_B_MSH:
446 /*
447 * x86 version drops packet (RET 0) when K<0, whereas
448 * interpreter does allow K<0 (__load_pointer, special
449 * ancillary data). common_load returns ENOTSUPP if K<0,
450 * so we fall back to interpreter & filter works.
451 */
452 func = sk_load_byte_msh;
453 goto common_load;
454 break;
455
456 /*** Jump and branches ***/
457 case BPF_S_JMP_JA:
458 if (K != 0)
459 PPC_JMP(addrs[i + 1 + K]);
460 break;
461
462 case BPF_S_JMP_JGT_K:
463 case BPF_S_JMP_JGT_X:
464 true_cond = COND_GT;
465 goto cond_branch;
466 case BPF_S_JMP_JGE_K:
467 case BPF_S_JMP_JGE_X:
468 true_cond = COND_GE;
469 goto cond_branch;
470 case BPF_S_JMP_JEQ_K:
471 case BPF_S_JMP_JEQ_X:
472 true_cond = COND_EQ;
473 goto cond_branch;
474 case BPF_S_JMP_JSET_K:
475 case BPF_S_JMP_JSET_X:
476 true_cond = COND_NE;
477 /* Fall through */
478 cond_branch:
479 /* same targets, can avoid doing the test :) */
480 if (filter[i].jt == filter[i].jf) {
481 if (filter[i].jt > 0)
482 PPC_JMP(addrs[i + 1 + filter[i].jt]);
483 break;
484 }
485
486 switch (filter[i].code) {
487 case BPF_S_JMP_JGT_X:
488 case BPF_S_JMP_JGE_X:
489 case BPF_S_JMP_JEQ_X:
490 ctx->seen |= SEEN_XREG;
491 PPC_CMPLW(r_A, r_X);
492 break;
493 case BPF_S_JMP_JSET_X:
494 ctx->seen |= SEEN_XREG;
495 PPC_AND_DOT(r_scratch1, r_A, r_X);
496 break;
497 case BPF_S_JMP_JEQ_K:
498 case BPF_S_JMP_JGT_K:
499 case BPF_S_JMP_JGE_K:
500 if (K < 32768)
501 PPC_CMPLWI(r_A, K);
502 else {
503 PPC_LI32(r_scratch1, K);
504 PPC_CMPLW(r_A, r_scratch1);
505 }
506 break;
507 case BPF_S_JMP_JSET_K:
508 if (K < 32768)
509 /* PPC_ANDI is /only/ dot-form */
510 PPC_ANDI(r_scratch1, r_A, K);
511 else {
512 PPC_LI32(r_scratch1, K);
513 PPC_AND_DOT(r_scratch1, r_A,
514 r_scratch1);
515 }
516 break;
517 }
518 /* Sometimes branches are constructed "backward", with
519 * the false path being the branch and true path being
520 * a fallthrough to the next instruction.
521 */
522 if (filter[i].jt == 0)
523 /* Swap the sense of the branch */
524 PPC_BCC(true_cond ^ COND_CMP_TRUE,
525 addrs[i + 1 + filter[i].jf]);
526 else {
527 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
528 if (filter[i].jf != 0)
529 PPC_JMP(addrs[i + 1 + filter[i].jf]);
530 }
531 break;
532 default:
533 /* The filter contains something cruel & unusual.
534 * We don't handle it, but also there shouldn't be
535 * anything missing from our list.
536 */
537 if (printk_ratelimit())
538 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
539 filter[i].code, i);
540 return -ENOTSUPP;
541 }
542
543 }
544 /* Set end-of-body-code address for exit. */
545 addrs[i] = ctx->idx * 4;
546
547 return 0;
548}
549
550void bpf_jit_compile(struct sk_filter *fp)
551{
552 unsigned int proglen;
553 unsigned int alloclen;
554 u32 *image = NULL;
555 u32 *code_base;
556 unsigned int *addrs;
557 struct codegen_context cgctx;
558 int pass;
559 int flen = fp->len;
560
561 if (!bpf_jit_enable)
562 return;
563
564 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
565 if (addrs == NULL)
566 return;
567
568 /*
569 * There are multiple assembly passes as the generated code will change
570 * size as it settles down, figuring out the max branch offsets/exit
571 * paths required.
572 *
573 * The range of standard conditional branches is +/- 32Kbytes. Since
574 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
575 * finish with 8 bytes/instruction. Not feasible, so long jumps are
576 * used, distinct from short branches.
577 *
578 * Current:
579 *
580 * For now, both branch types assemble to 2 words (short branches padded
581 * with a NOP); this is less efficient, but assembly will always complete
582 * after exactly 3 passes:
583 *
584 * First pass: No code buffer; Program is "faux-generated" -- no code
585 * emitted but maximum size of output determined (and addrs[] filled
586 * in). Also, we note whether we use M[], whether we use skb data, etc.
587 * All generation choices assumed to be 'worst-case', e.g. branches all
588 * far (2 instructions), return path code reduction not available, etc.
589 *
590 * Second pass: Code buffer allocated with size determined previously.
591 * Prologue generated to support features we have seen used. Exit paths
592 * determined and addrs[] is filled in again, as code may be slightly
593 * smaller as a result.
594 *
595 * Third pass: Code generated 'for real', and branch destinations
596 * determined from now-accurate addrs[] map.
597 *
598 * Ideal:
599 *
600 * If we optimise this, near branches will be shorter. On the
601 * first assembly pass, we should err on the side of caution and
602 * generate the biggest code. On subsequent passes, branches will be
603 * generated short or long and code size will reduce. With smaller
604 * code, more branches may fall into the short category, and code will
605 * reduce more.
606 *
607 * Finally, if we see one pass generate code the same size as the
608 * previous pass we have converged and should now generate code for
609 * real. Allocating at the end will also save the memory that would
610 * otherwise be wasted by the (small) current code shrinkage.
611 * Preferably, we should do a small number of passes (e.g. 5) and if we
612 * haven't converged by then, get impatient and force code to generate
613 * as-is, even if the odd branch would be left long. The chances of a
614 * long jump are tiny with all but the most enormous of BPF filter
615 * inputs, so we should usually converge on the third pass.
616 */
617
618 cgctx.idx = 0;
619 cgctx.seen = 0;
620 cgctx.pc_ret0 = -1;
621 /* Scouting faux-generate pass 0 */
622 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
623 /* We hit something illegal or unsupported. */
624 goto out;
625
626 /*
627 * Pretend to build prologue, given the features we've seen. This will
628 * update ctgtx.idx as it pretends to output instructions, then we can
629 * calculate total size from idx.
630 */
631 bpf_jit_build_prologue(fp, 0, &cgctx);
632 bpf_jit_build_epilogue(0, &cgctx);
633
634 proglen = cgctx.idx * 4;
635 alloclen = proglen + FUNCTION_DESCR_SIZE;
636 image = module_alloc(max_t(unsigned int, alloclen,
637 sizeof(struct work_struct)));
638 if (!image)
639 goto out;
640
641 code_base = image + (FUNCTION_DESCR_SIZE/4);
642
643 /* Code generation passes 1-2 */
644 for (pass = 1; pass < 3; pass++) {
645 /* Now build the prologue, body code & epilogue for real. */
646 cgctx.idx = 0;
647 bpf_jit_build_prologue(fp, code_base, &cgctx);
648 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
649 bpf_jit_build_epilogue(code_base, &cgctx);
650
651 if (bpf_jit_enable > 1)
652 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
653 proglen - (cgctx.idx * 4), cgctx.seen);
654 }
655
656 if (bpf_jit_enable > 1)
657 pr_info("flen=%d proglen=%u pass=%d image=%p\n",
658 flen, proglen, pass, image);
659
660 if (image) {
661 if (bpf_jit_enable > 1)
662 print_hex_dump(KERN_ERR, "JIT code: ",
663 DUMP_PREFIX_ADDRESS,
664 16, 1, code_base,
665 proglen, false);
666
667 bpf_flush_icache(code_base, code_base + (proglen/4));
668 /* Function descriptor nastiness: Address + TOC */
669 ((u64 *)image)[0] = (u64)code_base;
670 ((u64 *)image)[1] = local_paca->kernel_toc;
671 fp->bpf_func = (void *)image;
672 }
673out:
674 kfree(addrs);
675 return;
676}
677
678static void jit_free_defer(struct work_struct *arg)
679{
680 module_free(NULL, arg);
681}
682
683/* run from softirq, we must use a work_struct to call
684 * module_free() from process context
685 */
686void bpf_jit_free(struct sk_filter *fp)
687{
688 if (fp->bpf_func != sk_run_filter) {
689 struct work_struct *work = (struct work_struct *)fp->bpf_func;
690
691 INIT_WORK(work, jit_free_defer);
692 schedule_work(work);
693 }
694}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 47ea1be1481b..90f4496017e4 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -55,14 +55,6 @@ config PPC_MPC5200_BUGFIX
55 55
56 It is safe to say 'Y' here 56 It is safe to say 'Y' here
57 57
58config PPC_MPC5200_GPIO
59 bool "MPC5200 GPIO support"
60 depends on PPC_MPC52xx
61 select ARCH_REQUIRE_GPIOLIB
62 select GENERIC_GPIO
63 help
64 Enable gpiolib support for mpc5200 based boards
65
66config PPC_MPC5200_LPBFIFO 58config PPC_MPC5200_LPBFIFO
67 tristate "MPC5200 LocalPlus bus FIFO driver" 59 tristate "MPC5200 LocalPlus bus FIFO driver"
68 depends on PPC_MPC52xx 60 depends on PPC_MPC52xx
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index 2bc8cd0c5cfc..4e62486791e9 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -14,5 +14,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
14 obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o 14 obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
15endif 15endif
16 16
17obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
18obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o 17obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
deleted file mode 100644
index 1757d1db4b51..000000000000
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ /dev/null
@@ -1,380 +0,0 @@
1/*
2 * MPC52xx gpio driver
3 *
4 * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/of.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/of_gpio.h>
24#include <linux/io.h>
25#include <linux/of_platform.h>
26
27#include <asm/gpio.h>
28#include <asm/mpc52xx.h>
29#include <sysdev/fsl_soc.h>
30
31static DEFINE_SPINLOCK(gpio_lock);
32
33struct mpc52xx_gpiochip {
34 struct of_mm_gpio_chip mmchip;
35 unsigned int shadow_dvo;
36 unsigned int shadow_gpioe;
37 unsigned int shadow_ddr;
38};
39
40/*
41 * GPIO LIB API implementation for wakeup GPIOs.
42 *
43 * There's a maximum of 8 wakeup GPIOs. Which of these are available
44 * for use depends on your board setup.
45 *
46 * 0 -> GPIO_WKUP_7
47 * 1 -> GPIO_WKUP_6
48 * 2 -> PSC6_1
49 * 3 -> PSC6_0
50 * 4 -> ETH_17
51 * 5 -> PSC3_9
52 * 6 -> PSC2_4
53 * 7 -> PSC1_4
54 *
55 */
56static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
57{
58 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
59 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
60 unsigned int ret;
61
62 ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
63
64 pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret);
65
66 return ret;
67}
68
69static inline void
70__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
71{
72 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
73 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
74 struct mpc52xx_gpiochip, mmchip);
75 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
76
77 if (val)
78 chip->shadow_dvo |= 1 << (7 - gpio);
79 else
80 chip->shadow_dvo &= ~(1 << (7 - gpio));
81
82 out_8(&regs->wkup_dvo, chip->shadow_dvo);
83}
84
85static void
86mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(&gpio_lock, flags);
91
92 __mpc52xx_wkup_gpio_set(gc, gpio, val);
93
94 spin_unlock_irqrestore(&gpio_lock, flags);
95
96 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
97}
98
99static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
100{
101 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
102 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
103 struct mpc52xx_gpiochip, mmchip);
104 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
105 unsigned long flags;
106
107 spin_lock_irqsave(&gpio_lock, flags);
108
109 /* set the direction */
110 chip->shadow_ddr &= ~(1 << (7 - gpio));
111 out_8(&regs->wkup_ddr, chip->shadow_ddr);
112
113 /* and enable the pin */
114 chip->shadow_gpioe |= 1 << (7 - gpio);
115 out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
116
117 spin_unlock_irqrestore(&gpio_lock, flags);
118
119 return 0;
120}
121
122static int
123mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
124{
125 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
126 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
127 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
128 struct mpc52xx_gpiochip, mmchip);
129 unsigned long flags;
130
131 spin_lock_irqsave(&gpio_lock, flags);
132
133 __mpc52xx_wkup_gpio_set(gc, gpio, val);
134
135 /* Then set direction */
136 chip->shadow_ddr |= 1 << (7 - gpio);
137 out_8(&regs->wkup_ddr, chip->shadow_ddr);
138
139 /* Finally enable the pin */
140 chip->shadow_gpioe |= 1 << (7 - gpio);
141 out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
142
143 spin_unlock_irqrestore(&gpio_lock, flags);
144
145 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
146
147 return 0;
148}
149
150static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
151{
152 struct mpc52xx_gpiochip *chip;
153 struct mpc52xx_gpio_wkup __iomem *regs;
154 struct gpio_chip *gc;
155 int ret;
156
157 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
158 if (!chip)
159 return -ENOMEM;
160
161 gc = &chip->mmchip.gc;
162
163 gc->ngpio = 8;
164 gc->direction_input = mpc52xx_wkup_gpio_dir_in;
165 gc->direction_output = mpc52xx_wkup_gpio_dir_out;
166 gc->get = mpc52xx_wkup_gpio_get;
167 gc->set = mpc52xx_wkup_gpio_set;
168
169 ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
170 if (ret)
171 return ret;
172
173 regs = chip->mmchip.regs;
174 chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
175 chip->shadow_ddr = in_8(&regs->wkup_ddr);
176 chip->shadow_dvo = in_8(&regs->wkup_dvo);
177
178 return 0;
179}
180
181static int mpc52xx_gpiochip_remove(struct platform_device *ofdev)
182{
183 return -EBUSY;
184}
185
186static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
187 {
188 .compatible = "fsl,mpc5200-gpio-wkup",
189 },
190 {}
191};
192
193static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
194 .driver = {
195 .name = "gpio_wkup",
196 .owner = THIS_MODULE,
197 .of_match_table = mpc52xx_wkup_gpiochip_match,
198 },
199 .probe = mpc52xx_wkup_gpiochip_probe,
200 .remove = mpc52xx_gpiochip_remove,
201};
202
203/*
204 * GPIO LIB API implementation for simple GPIOs
205 *
206 * There's a maximum of 32 simple GPIOs. Which of these are available
207 * for use depends on your board setup.
208 * The numbering reflects the bit numbering in the port registers:
209 *
210 * 0..1 > reserved
211 * 2..3 > IRDA
212 * 4..7 > ETHR
213 * 8..11 > reserved
214 * 12..15 > USB
215 * 16..17 > reserved
216 * 18..23 > PSC3
217 * 24..27 > PSC2
218 * 28..31 > PSC1
219 */
220static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
221{
222 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
223 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
224 unsigned int ret;
225
226 ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
227
228 return ret;
229}
230
231static inline void
232__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
233{
234 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
235 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
236 struct mpc52xx_gpiochip, mmchip);
237 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
238
239 if (val)
240 chip->shadow_dvo |= 1 << (31 - gpio);
241 else
242 chip->shadow_dvo &= ~(1 << (31 - gpio));
243 out_be32(&regs->simple_dvo, chip->shadow_dvo);
244}
245
246static void
247mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
248{
249 unsigned long flags;
250
251 spin_lock_irqsave(&gpio_lock, flags);
252
253 __mpc52xx_simple_gpio_set(gc, gpio, val);
254
255 spin_unlock_irqrestore(&gpio_lock, flags);
256
257 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
258}
259
260static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
261{
262 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
263 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
264 struct mpc52xx_gpiochip, mmchip);
265 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
266 unsigned long flags;
267
268 spin_lock_irqsave(&gpio_lock, flags);
269
270 /* set the direction */
271 chip->shadow_ddr &= ~(1 << (31 - gpio));
272 out_be32(&regs->simple_ddr, chip->shadow_ddr);
273
274 /* and enable the pin */
275 chip->shadow_gpioe |= 1 << (31 - gpio);
276 out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
277
278 spin_unlock_irqrestore(&gpio_lock, flags);
279
280 return 0;
281}
282
283static int
284mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
285{
286 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
287 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
288 struct mpc52xx_gpiochip, mmchip);
289 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
290 unsigned long flags;
291
292 spin_lock_irqsave(&gpio_lock, flags);
293
294 /* First set initial value */
295 __mpc52xx_simple_gpio_set(gc, gpio, val);
296
297 /* Then set direction */
298 chip->shadow_ddr |= 1 << (31 - gpio);
299 out_be32(&regs->simple_ddr, chip->shadow_ddr);
300
301 /* Finally enable the pin */
302 chip->shadow_gpioe |= 1 << (31 - gpio);
303 out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
304
305 spin_unlock_irqrestore(&gpio_lock, flags);
306
307 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
308
309 return 0;
310}
311
312static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
313{
314 struct mpc52xx_gpiochip *chip;
315 struct gpio_chip *gc;
316 struct mpc52xx_gpio __iomem *regs;
317 int ret;
318
319 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
320 if (!chip)
321 return -ENOMEM;
322
323 gc = &chip->mmchip.gc;
324
325 gc->ngpio = 32;
326 gc->direction_input = mpc52xx_simple_gpio_dir_in;
327 gc->direction_output = mpc52xx_simple_gpio_dir_out;
328 gc->get = mpc52xx_simple_gpio_get;
329 gc->set = mpc52xx_simple_gpio_set;
330
331 ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
332 if (ret)
333 return ret;
334
335 regs = chip->mmchip.regs;
336 chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
337 chip->shadow_ddr = in_be32(&regs->simple_ddr);
338 chip->shadow_dvo = in_be32(&regs->simple_dvo);
339
340 return 0;
341}
342
343static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
344 {
345 .compatible = "fsl,mpc5200-gpio",
346 },
347 {}
348};
349
350static struct platform_driver mpc52xx_simple_gpiochip_driver = {
351 .driver = {
352 .name = "gpio",
353 .owner = THIS_MODULE,
354 .of_match_table = mpc52xx_simple_gpiochip_match,
355 },
356 .probe = mpc52xx_simple_gpiochip_probe,
357 .remove = mpc52xx_gpiochip_remove,
358};
359
360static int __init mpc52xx_gpio_init(void)
361{
362 if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver))
363 printk(KERN_ERR "Unable to register wakeup GPIO driver\n");
364
365 if (platform_driver_register(&mpc52xx_simple_gpiochip_driver))
366 printk(KERN_ERR "Unable to register simple GPIO driver\n");
367
368 return 0;
369}
370
371
372/* Make sure we get initialised before anyone else tries to use us */
373subsys_initcall(mpc52xx_gpio_init);
374
375/* No exit call at the moment as we cannot unregister of gpio chips */
376
377MODULE_DESCRIPTION("Freescale MPC52xx gpio driver");
378MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
379MODULE_LICENSE("GPL v2");
380
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index 82051bddcc40..bfb11e01133e 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -264,7 +264,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
264 (unsigned long long)res->flags); 264 (unsigned long long)res->flags);
265 out_be32(&pci_regs->iw0btar, 265 out_be32(&pci_regs->iw0btar,
266 MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start, 266 MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
267 res->end - res->start + 1)); 267 resource_size(res)));
268 iwcr0 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM; 268 iwcr0 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM;
269 if (res->flags & IORESOURCE_PREFETCH) 269 if (res->flags & IORESOURCE_PREFETCH)
270 iwcr0 |= MPC52xx_PCI_IWCR_READ_MULTI; 270 iwcr0 |= MPC52xx_PCI_IWCR_READ_MULTI;
@@ -278,7 +278,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
278 res->start, res->end, res->flags); 278 res->start, res->end, res->flags);
279 out_be32(&pci_regs->iw1btar, 279 out_be32(&pci_regs->iw1btar,
280 MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start, 280 MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
281 res->end - res->start + 1)); 281 resource_size(res)));
282 iwcr1 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM; 282 iwcr1 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM;
283 if (res->flags & IORESOURCE_PREFETCH) 283 if (res->flags & IORESOURCE_PREFETCH)
284 iwcr1 |= MPC52xx_PCI_IWCR_READ_MULTI; 284 iwcr1 |= MPC52xx_PCI_IWCR_READ_MULTI;
@@ -300,7 +300,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
300 out_be32(&pci_regs->iw2btar, 300 out_be32(&pci_regs->iw2btar,
301 MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys, 301 MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys,
302 res->start, 302 res->start,
303 res->end - res->start + 1)); 303 resource_size(res)));
304 iwcr2 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_IO; 304 iwcr2 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_IO;
305 305
306 /* Set all the IWCR fields at once; they're in the same reg */ 306 /* Set all the IWCR fields at once; they're in the same reg */
@@ -402,7 +402,7 @@ mpc52xx_add_bridge(struct device_node *node)
402 402
403 hose->ops = &mpc52xx_pci_ops; 403 hose->ops = &mpc52xx_pci_ops;
404 404
405 pci_regs = ioremap(rsrc.start, rsrc.end - rsrc.start + 1); 405 pci_regs = ioremap(rsrc.start, resource_size(&rsrc));
406 if (!pci_regs) 406 if (!pci_regs)
407 return -ENOMEM; 407 return -ENOMEM;
408 408
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index a2b9b9ef1240..c55129f5760a 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -28,7 +28,7 @@
28#include <linux/of_device.h> 28#include <linux/of_device.h>
29 29
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/atomic.h> 31#include <linux/atomic.h>
32#include <asm/time.h> 32#include <asm/time.h>
33#include <asm/io.h> 33#include <asm/io.h>
34#include <asm/machdep.h> 34#include <asm/machdep.h>
@@ -101,7 +101,7 @@ static void __init mpc83xx_km_setup_arch(void)
101 __func__); 101 __func__);
102 return; 102 return;
103 } 103 }
104 base = ioremap(res.start, res.end - res.start + 1); 104 base = ioremap(res.start, resource_size(&res));
105 105
106 /* 106 /*
107 * IMMR + 0x14A8[4:5] = 11 (clk delay for UCC 2) 107 * IMMR + 0x14A8[4:5] = 11 (clk delay for UCC 2)
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index ec0b401bc9cf..32a52896822f 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -27,7 +27,7 @@
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28 28
29#include <asm/system.h> 29#include <asm/system.h>
30#include <asm/atomic.h> 30#include <linux/atomic.h>
31#include <asm/time.h> 31#include <asm/time.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/machdep.h> 33#include <asm/machdep.h>
@@ -68,7 +68,7 @@ static void __init mpc832x_sys_setup_arch(void)
68 struct resource res; 68 struct resource res;
69 69
70 of_address_to_resource(np, 0, &res); 70 of_address_to_resource(np, 0, &res);
71 bcsr_regs = ioremap(res.start, res.end - res.start +1); 71 bcsr_regs = ioremap(res.start, resource_size(&res));
72 of_node_put(np); 72 of_node_put(np);
73 } 73 }
74 74
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 81e44fa1c644..6b45969567d4 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -26,7 +26,7 @@
26#include <linux/of_platform.h> 26#include <linux/of_platform.h>
27 27
28#include <asm/system.h> 28#include <asm/system.h>
29#include <asm/atomic.h> 29#include <linux/atomic.h>
30#include <asm/time.h> 30#include <asm/time.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index d0a634b056ca..041c5177e737 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -26,7 +26,7 @@
26#include <linux/of_platform.h> 26#include <linux/of_platform.h>
27 27
28#include <asm/system.h> 28#include <asm/system.h>
29#include <asm/atomic.h> 29#include <linux/atomic.h>
30#include <asm/time.h> 30#include <asm/time.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
@@ -53,7 +53,7 @@ static int mpc834xemds_usb_cfg(void)
53 struct resource res; 53 struct resource res;
54 54
55 of_address_to_resource(np, 0, &res); 55 of_address_to_resource(np, 0, &res);
56 bcsr_regs = ioremap(res.start, res.end - res.start + 1); 56 bcsr_regs = ioremap(res.start, resource_size(&res));
57 of_node_put(np); 57 of_node_put(np);
58 } 58 }
59 if (!bcsr_regs) 59 if (!bcsr_regs)
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 09e9d6fb7411..934cc8c46bbc 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -34,7 +34,7 @@
34#include <linux/of_device.h> 34#include <linux/of_device.h>
35 35
36#include <asm/system.h> 36#include <asm/system.h>
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/io.h> 39#include <asm/io.h>
40#include <asm/machdep.h> 40#include <asm/machdep.h>
@@ -76,7 +76,7 @@ static void __init mpc836x_mds_setup_arch(void)
76 struct resource res; 76 struct resource res;
77 77
78 of_address_to_resource(np, 0, &res); 78 of_address_to_resource(np, 0, &res);
79 bcsr_regs = ioremap(res.start, res.end - res.start +1); 79 bcsr_regs = ioremap(res.start, resource_size(&res));
80 of_node_put(np); 80 of_node_put(np);
81 } 81 }
82 82
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index 49023dbe1576..af41d8c810a8 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -28,7 +28,7 @@
28#include <linux/of_platform.h> 28#include <linux/of_platform.h>
29 29
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/atomic.h> 31#include <linux/atomic.h>
32#include <asm/time.h> 32#include <asm/time.h>
33#include <asm/io.h> 33#include <asm/io.h>
34#include <asm/machdep.h> 34#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 2c64164722d0..1ad748bb39b4 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -171,7 +171,7 @@ int mpc831x_usb_cfg(void)
171 of_node_put(np); 171 of_node_put(np);
172 return ret; 172 return ret;
173 } 173 }
174 usb_regs = ioremap(res.start, res.end - res.start + 1); 174 usb_regs = ioremap(res.start, resource_size(&res));
175 175
176 /* Using on-chip PHY */ 176 /* Using on-chip PHY */
177 if (prop && (!strcmp(prop, "utmi_wide") || 177 if (prop && (!strcmp(prop, "utmi_wide") ||
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index b6976e1726e4..498534cd5265 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -67,6 +67,16 @@ config MPC85xx_RDB
67 help 67 help
68 This option enables support for the MPC85xx RDB (P2020 RDB) board 68 This option enables support for the MPC85xx RDB (P2020 RDB) board
69 69
70config P1010_RDB
71 bool "Freescale P1010RDB"
72 select DEFAULT_UIMAGE
73 help
74 This option enables support for the MPC85xx RDB (P1010 RDB) board
75
76 P1010RDB contains P1010Si, which provides CPU performance up to 800
77 MHz and 1600 DMIPS, additional functionality and faster interfaces
78 (DDR3/3L, SATA II, and PCI Express).
79
70config P1022_DS 80config P1022_DS
71 bool "Freescale P1022 DS" 81 bool "Freescale P1022 DS"
72 select DEFAULT_UIMAGE 82 select DEFAULT_UIMAGE
@@ -75,6 +85,12 @@ config P1022_DS
75 help 85 help
76 This option enables support for the Freescale P1022DS reference board. 86 This option enables support for the Freescale P1022DS reference board.
77 87
88config P1023_RDS
89 bool "Freescale P1023 RDS"
90 select DEFAULT_UIMAGE
91 help
92 This option enables support for the P1023 RDS board
93
78config SOCRATES 94config SOCRATES
79 bool "Socrates" 95 bool "Socrates"
80 select DEFAULT_UIMAGE 96 select DEFAULT_UIMAGE
@@ -155,6 +171,18 @@ config SBC8560
155 help 171 help
156 This option enables support for the Wind River SBC8560 board 172 This option enables support for the Wind River SBC8560 board
157 173
174config P2040_RDB
175 bool "Freescale P2040 RDB"
176 select DEFAULT_UIMAGE
177 select PPC_E500MC
178 select PHYS_64BIT
179 select SWIOTLB
180 select MPC8xxx_GPIO
181 select HAS_RAPIDIO
182 select PPC_EPAPR_HV_PIC
183 help
184 This option enables support for the P2040 RDB board
185
158config P3041_DS 186config P3041_DS
159 bool "Freescale P3041 DS" 187 bool "Freescale P3041 DS"
160 select DEFAULT_UIMAGE 188 select DEFAULT_UIMAGE
@@ -163,6 +191,7 @@ config P3041_DS
163 select SWIOTLB 191 select SWIOTLB
164 select MPC8xxx_GPIO 192 select MPC8xxx_GPIO
165 select HAS_RAPIDIO 193 select HAS_RAPIDIO
194 select PPC_EPAPR_HV_PIC
166 help 195 help
167 This option enables support for the P3041 DS board 196 This option enables support for the P3041 DS board
168 197
@@ -174,6 +203,7 @@ config P4080_DS
174 select SWIOTLB 203 select SWIOTLB
175 select MPC8xxx_GPIO 204 select MPC8xxx_GPIO
176 select HAS_RAPIDIO 205 select HAS_RAPIDIO
206 select PPC_EPAPR_HV_PIC
177 help 207 help
178 This option enables support for the P4080 DS board 208 This option enables support for the P4080 DS board
179 209
@@ -188,6 +218,7 @@ config P5020_DS
188 select SWIOTLB 218 select SWIOTLB
189 select MPC8xxx_GPIO 219 select MPC8xxx_GPIO
190 select HAS_RAPIDIO 220 select HAS_RAPIDIO
221 select PPC_EPAPR_HV_PIC
191 help 222 help
192 This option enables support for the P5020 DS board 223 This option enables support for the P5020 DS board
193 224
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index dd70db77d63e..a971b32c5c0a 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -10,7 +10,10 @@ obj-$(CONFIG_MPC8536_DS) += mpc8536_ds.o
10obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o 10obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o
11obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o 11obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
12obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o 12obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
13obj-$(CONFIG_P1010_RDB) += p1010rdb.o
13obj-$(CONFIG_P1022_DS) += p1022_ds.o 14obj-$(CONFIG_P1022_DS) += p1022_ds.o
15obj-$(CONFIG_P1023_RDS) += p1023_rds.o
16obj-$(CONFIG_P2040_RDB) += p2040_rdb.o corenet_ds.o
14obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o 17obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
15obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o 18obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
16obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o 19obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 2ab338c9ac37..802ad110b757 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Maintained by Kumar Gala (see MAINTAINERS for contact information) 4 * Maintained by Kumar Gala (see MAINTAINERS for contact information)
5 * 5 *
6 * Copyright 2009 Freescale Semiconductor Inc. 6 * Copyright 2009-2011 Freescale Semiconductor Inc.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -22,6 +22,7 @@
22#include <asm/time.h> 22#include <asm/time.h>
23#include <asm/machdep.h> 23#include <asm/machdep.h>
24#include <asm/pci-bridge.h> 24#include <asm/pci-bridge.h>
25#include <asm/ppc-pci.h>
25#include <mm/mmu_decl.h> 26#include <mm/mmu_decl.h>
26#include <asm/prom.h> 27#include <asm/prom.h>
27#include <asm/udbg.h> 28#include <asm/udbg.h>
@@ -61,10 +62,6 @@ void __init corenet_ds_pic_init(void)
61 mpic_init(mpic); 62 mpic_init(mpic);
62} 63}
63 64
64#ifdef CONFIG_PCI
65static int primary_phb_addr;
66#endif
67
68/* 65/*
69 * Setup the architecture 66 * Setup the architecture
70 */ 67 */
@@ -85,18 +82,19 @@ void __init corenet_ds_setup_arch(void)
85#endif 82#endif
86 83
87#ifdef CONFIG_PCI 84#ifdef CONFIG_PCI
88 for_each_compatible_node(np, "pci", "fsl,p4080-pcie") { 85 for_each_node_by_type(np, "pci") {
89 struct resource rsrc; 86 if (of_device_is_compatible(np, "fsl,p4080-pcie") ||
90 of_address_to_resource(np, 0, &rsrc); 87 of_device_is_compatible(np, "fsl,qoriq-pcie-v2.2")) {
91 if ((rsrc.start & 0xfffff) == primary_phb_addr)
92 fsl_add_bridge(np, 1);
93 else
94 fsl_add_bridge(np, 0); 88 fsl_add_bridge(np, 0);
95 89 hose = pci_find_hose_for_OF_device(np);
96 hose = pci_find_hose_for_OF_device(np); 90 max = min(max, hose->dma_window_base_cur +
97 max = min(max, hose->dma_window_base_cur + 91 hose->dma_window_size);
98 hose->dma_window_size); 92 }
99 } 93 }
94
95#ifdef CONFIG_PPC64
96 pci_devs_phb_init();
97#endif
100#endif 98#endif
101 99
102#ifdef CONFIG_SWIOTLB 100#ifdef CONFIG_SWIOTLB
@@ -116,6 +114,19 @@ static const struct of_device_id of_device_ids[] __devinitconst = {
116 { 114 {
117 .compatible = "fsl,rapidio-delta", 115 .compatible = "fsl,rapidio-delta",
118 }, 116 },
117 {
118 .compatible = "fsl,p4080-pcie",
119 },
120 {
121 .compatible = "fsl,qoriq-pcie-v2.2",
122 },
123 /* The following two are for the Freescale hypervisor */
124 {
125 .name = "hypervisor",
126 },
127 {
128 .name = "handles",
129 },
119 {} 130 {}
120}; 131};
121 132
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 6299a2a51ae8..2bf99786d249 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -31,7 +31,7 @@
31#include <asm/system.h> 31#include <asm/system.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/atomic.h> 34#include <linux/atomic.h>
35#include <asm/time.h> 35#include <asm/time.h>
36#include <asm/io.h> 36#include <asm/io.h>
37#include <asm/machdep.h> 37#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index c7b97f70312e..1b9a8cf1873a 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -83,7 +83,8 @@ void __init mpc85xx_ds_pic_init(void)
83 if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) { 83 if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
84 mpic = mpic_alloc(np, r.start, 84 mpic = mpic_alloc(np, r.start,
85 MPIC_PRIMARY | 85 MPIC_PRIMARY |
86 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS, 86 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
87 MPIC_SINGLE_DEST_CPU,
87 0, 256, " OpenPIC "); 88 0, 256, " OpenPIC ");
88 } else { 89 } else {
89 mpic = mpic_alloc(np, r.start, 90 mpic = mpic_alloc(np, r.start,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 747d1ee661fd..973b3f4a4b49 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -36,7 +36,7 @@
36#include <linux/memblock.h> 36#include <linux/memblock.h>
37 37
38#include <asm/system.h> 38#include <asm/system.h>
39#include <asm/atomic.h> 39#include <linux/atomic.h>
40#include <asm/time.h> 40#include <asm/time.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/machdep.h> 42#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 088f30b0c088..f5ff9110c97e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -58,10 +58,11 @@ void __init mpc85xx_rdb_pic_init(void)
58 return; 58 return;
59 } 59 }
60 60
61 if (of_flat_dt_is_compatible(root, "fsl,85XXRDB-CAMP")) { 61 if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
62 mpic = mpic_alloc(np, r.start, 62 mpic = mpic_alloc(np, r.start,
63 MPIC_PRIMARY | 63 MPIC_PRIMARY |
64 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS, 64 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
65 MPIC_SINGLE_DEST_CPU,
65 0, 256, " OpenPIC "); 66 0, 256, " OpenPIC ");
66 } else { 67 } else {
67 mpic = mpic_alloc(np, r.start, 68 mpic = mpic_alloc(np, r.start,
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
new file mode 100644
index 000000000000..d7387fa7f534
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -0,0 +1,122 @@
1/*
2 * P1010RDB Board Setup
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/stddef.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/of_platform.h>
18
19#include <asm/system.h>
20#include <asm/time.h>
21#include <asm/machdep.h>
22#include <asm/pci-bridge.h>
23#include <mm/mmu_decl.h>
24#include <asm/prom.h>
25#include <asm/udbg.h>
26#include <asm/mpic.h>
27
28#include <sysdev/fsl_soc.h>
29#include <sysdev/fsl_pci.h>
30
31void __init p1010_rdb_pic_init(void)
32{
33 struct mpic *mpic;
34 struct resource r;
35 struct device_node *np;
36
37 np = of_find_node_by_type(NULL, "open-pic");
38 if (np == NULL) {
39 printk(KERN_ERR "Could not find open-pic node\n");
40 return;
41 }
42
43 if (of_address_to_resource(np, 0, &r)) {
44 printk(KERN_ERR "Failed to map mpic register space\n");
45 of_node_put(np);
46 return;
47 }
48
49 mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET |
50 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
51 0, 256, " OpenPIC ");
52
53 BUG_ON(mpic == NULL);
54 of_node_put(np);
55
56 mpic_init(mpic);
57
58}
59
60
61/*
62 * Setup the architecture
63 */
64static void __init p1010_rdb_setup_arch(void)
65{
66#ifdef CONFIG_PCI
67 struct device_node *np;
68#endif
69
70 if (ppc_md.progress)
71 ppc_md.progress("p1010_rdb_setup_arch()", 0);
72
73#ifdef CONFIG_PCI
74 for_each_node_by_type(np, "pci") {
75 if (of_device_is_compatible(np, "fsl,p1010-pcie"))
76 fsl_add_bridge(np, 0);
77 }
78
79#endif
80
81 printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
82}
83
84static struct of_device_id __initdata p1010rdb_ids[] = {
85 { .type = "soc", },
86 { .compatible = "soc", },
87 { .compatible = "simple-bus", },
88 {},
89};
90
91static int __init p1010rdb_publish_devices(void)
92{
93 return of_platform_bus_probe(NULL, p1010rdb_ids, NULL);
94}
95machine_device_initcall(p1010_rdb, p1010rdb_publish_devices);
96machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
97
98/*
99 * Called very early, device-tree isn't unflattened
100 */
101static int __init p1010_rdb_probe(void)
102{
103 unsigned long root = of_get_flat_dt_root();
104
105 if (of_flat_dt_is_compatible(root, "fsl,P1010RDB"))
106 return 1;
107 return 0;
108}
109
110define_machine(p1010_rdb) {
111 .name = "P1010 RDB",
112 .probe = p1010_rdb_probe,
113 .setup_arch = p1010_rdb_setup_arch,
114 .init_IRQ = p1010_rdb_pic_init,
115#ifdef CONFIG_PCI
116 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
117#endif
118 .get_irq = mpic_get_irq,
119 .restart = fsl_rstcr_restart,
120 .calibrate_decr = generic_calibrate_decr,
121 .progress = udbg_progress,
122};
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 7eb5c40c069f..266b3aadfe5e 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -129,6 +129,7 @@ static void p1022ds_set_gamma_table(int monitor_port, char *gamma_table_base)
129static void p1022ds_set_monitor_port(int monitor_port) 129static void p1022ds_set_monitor_port(int monitor_port)
130{ 130{
131 struct device_node *pixis_node; 131 struct device_node *pixis_node;
132 void __iomem *pixis;
132 u8 __iomem *brdcfg1; 133 u8 __iomem *brdcfg1;
133 134
134 pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis"); 135 pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis");
@@ -137,12 +138,12 @@ static void p1022ds_set_monitor_port(int monitor_port)
137 return; 138 return;
138 } 139 }
139 140
140 brdcfg1 = of_iomap(pixis_node, 0); 141 pixis = of_iomap(pixis_node, 0);
141 if (!brdcfg1) { 142 if (!pixis) {
142 pr_err("p1022ds: could not map ngPIXIS registers\n"); 143 pr_err("p1022ds: could not map ngPIXIS registers\n");
143 return; 144 return;
144 } 145 }
145 brdcfg1 += 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */ 146 brdcfg1 = pixis + 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */
146 147
147 switch (monitor_port) { 148 switch (monitor_port) {
148 case 0: /* DVI */ 149 case 0: /* DVI */
@@ -158,6 +159,8 @@ static void p1022ds_set_monitor_port(int monitor_port)
158 default: 159 default:
159 pr_err("p1022ds: unsupported monitor port %i\n", monitor_port); 160 pr_err("p1022ds: unsupported monitor port %i\n", monitor_port);
160 } 161 }
162
163 iounmap(pixis);
161} 164}
162 165
163/** 166/**
@@ -192,8 +195,13 @@ void p1022ds_set_pixel_clock(unsigned int pixclock)
192 do_div(temp, pixclock); 195 do_div(temp, pixclock);
193 freq = temp; 196 freq = temp;
194 197
195 /* pixclk is the ratio of the platform clock to the pixel clock */ 198 /*
199 * 'pxclk' is the ratio of the platform clock to the pixel clock.
200 * This number is programmed into the CLKDVDR register, and the valid
201 * range of values is 2-255.
202 */
196 pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq); 203 pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
204 pxclk = clamp_t(u32, pxclk, 2, 255);
197 205
198 /* Disable the pixel clock, and set it to non-inverted and no delay */ 206 /* Disable the pixel clock, and set it to non-inverted and no delay */
199 clrbits32(&guts->clkdvdr, 207 clrbits32(&guts->clkdvdr,
@@ -201,6 +209,8 @@ void p1022ds_set_pixel_clock(unsigned int pixclock)
201 209
202 /* Enable the clock and set the pxclk */ 210 /* Enable the clock and set the pxclk */
203 setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16)); 211 setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
212
213 iounmap(guts);
204} 214}
205 215
206/** 216/**
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
new file mode 100644
index 000000000000..835e0b335bfa
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2010-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Roy Zang <tie-fei.zang@freescale.com>
5 *
6 * Description:
7 * P1023 RDS Board Setup
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/module.h>
21#include <linux/fsl_devices.h>
22#include <linux/of_platform.h>
23#include <linux/of_device.h>
24
25#include <asm/system.h>
26#include <asm/time.h>
27#include <asm/machdep.h>
28#include <asm/pci-bridge.h>
29#include <mm/mmu_decl.h>
30#include <asm/prom.h>
31#include <asm/udbg.h>
32#include <asm/mpic.h>
33
34#include <sysdev/fsl_soc.h>
35#include <sysdev/fsl_pci.h>
36
37/* ************************************************************************
38 *
39 * Setup the architecture
40 *
41 */
42#ifdef CONFIG_SMP
43void __init mpc85xx_smp_init(void);
44#endif
45
46static void __init mpc85xx_rds_setup_arch(void)
47{
48 struct device_node *np;
49
50 if (ppc_md.progress)
51 ppc_md.progress("p1023_rds_setup_arch()", 0);
52
53 /* Map BCSR area */
54 np = of_find_node_by_name(NULL, "bcsr");
55 if (np != NULL) {
56 static u8 __iomem *bcsr_regs;
57
58 bcsr_regs = of_iomap(np, 0);
59 of_node_put(np);
60
61 if (!bcsr_regs) {
62 printk(KERN_ERR
63 "BCSR: Failed to map bcsr register space\n");
64 return;
65 } else {
66#define BCSR15_I2C_BUS0_SEG_CLR 0x07
67#define BCSR15_I2C_BUS0_SEG2 0x02
68/*
69 * Note: Accessing exclusively i2c devices.
70 *
71 * The i2c controller selects initially ID EEPROM in the u-boot;
72 * but if menu configuration selects RTC support in the kernel,
73 * the i2c controller switches to select RTC chip in the kernel.
74 */
75#ifdef CONFIG_RTC_CLASS
76 /* Enable RTC chip on the segment #2 of i2c */
77 clrbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG_CLR);
78 setbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG2);
79#endif
80
81 iounmap(bcsr_regs);
82 }
83 }
84
85#ifdef CONFIG_PCI
86 for_each_compatible_node(np, "pci", "fsl,p1023-pcie")
87 fsl_add_bridge(np, 0);
88#endif
89
90#ifdef CONFIG_SMP
91 mpc85xx_smp_init();
92#endif
93}
94
95static struct of_device_id p1023_ids[] = {
96 { .type = "soc", },
97 { .compatible = "soc", },
98 { .compatible = "simple-bus", },
99 {},
100};
101
102
103static int __init p1023_publish_devices(void)
104{
105 of_platform_bus_probe(NULL, p1023_ids, NULL);
106
107 return 0;
108}
109
110machine_device_initcall(p1023_rds, p1023_publish_devices);
111
112static void __init mpc85xx_rds_pic_init(void)
113{
114 struct mpic *mpic;
115 struct resource r;
116 struct device_node *np = NULL;
117
118 np = of_find_node_by_type(NULL, "open-pic");
119 if (!np) {
120 printk(KERN_ERR "Could not find open-pic node\n");
121 return;
122 }
123
124 if (of_address_to_resource(np, 0, &r)) {
125 printk(KERN_ERR "Failed to map mpic register space\n");
126 of_node_put(np);
127 return;
128 }
129
130 mpic = mpic_alloc(np, r.start,
131 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
132 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
133 0, 256, " OpenPIC ");
134
135 BUG_ON(mpic == NULL);
136 of_node_put(np);
137
138 mpic_init(mpic);
139}
140
141static int __init p1023_rds_probe(void)
142{
143 unsigned long root = of_get_flat_dt_root();
144
145 return of_flat_dt_is_compatible(root, "fsl,P1023RDS");
146
147}
148
149define_machine(p1023_rds) {
150 .name = "P1023 RDS",
151 .probe = p1023_rds_probe,
152 .setup_arch = mpc85xx_rds_setup_arch,
153 .init_IRQ = mpc85xx_rds_pic_init,
154 .get_irq = mpic_get_irq,
155 .restart = fsl_rstcr_restart,
156 .calibrate_decr = generic_calibrate_decr,
157 .progress = udbg_progress,
158#ifdef CONFIG_PCI
159 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
160#endif
161};
162
diff --git a/arch/powerpc/platforms/85xx/p2040_rdb.c b/arch/powerpc/platforms/85xx/p2040_rdb.c
new file mode 100644
index 000000000000..32b56ac73dfb
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p2040_rdb.c
@@ -0,0 +1,88 @@
1/*
2 * P2040 RDB Setup
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/kdev_t.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/phy.h>
18
19#include <asm/system.h>
20#include <asm/time.h>
21#include <asm/machdep.h>
22#include <asm/pci-bridge.h>
23#include <mm/mmu_decl.h>
24#include <asm/prom.h>
25#include <asm/udbg.h>
26#include <asm/mpic.h>
27
28#include <linux/of_platform.h>
29#include <sysdev/fsl_soc.h>
30#include <sysdev/fsl_pci.h>
31#include <asm/ehv_pic.h>
32
33#include "corenet_ds.h"
34
35/*
36 * Called very early, device-tree isn't unflattened
37 */
38static int __init p2040_rdb_probe(void)
39{
40 unsigned long root = of_get_flat_dt_root();
41#ifdef CONFIG_SMP
42 extern struct smp_ops_t smp_85xx_ops;
43#endif
44
45 if (of_flat_dt_is_compatible(root, "fsl,P2040RDB"))
46 return 1;
47
48 /* Check if we're running under the Freescale hypervisor */
49 if (of_flat_dt_is_compatible(root, "fsl,P2040RDB-hv")) {
50 ppc_md.init_IRQ = ehv_pic_init;
51 ppc_md.get_irq = ehv_pic_get_irq;
52 ppc_md.restart = fsl_hv_restart;
53 ppc_md.power_off = fsl_hv_halt;
54 ppc_md.halt = fsl_hv_halt;
55#ifdef CONFIG_SMP
56 /*
57 * Disable the timebase sync operations because we can't write
58 * to the timebase registers under the hypervisor.
59 */
60 smp_85xx_ops.give_timebase = NULL;
61 smp_85xx_ops.take_timebase = NULL;
62#endif
63 return 1;
64 }
65
66 return 0;
67}
68
69define_machine(p2040_rdb) {
70 .name = "P2040 RDB",
71 .probe = p2040_rdb_probe,
72 .setup_arch = corenet_ds_setup_arch,
73 .init_IRQ = corenet_ds_pic_init,
74#ifdef CONFIG_PCI
75 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
76#endif
77 .get_irq = mpic_get_coreint_irq,
78 .restart = fsl_rstcr_restart,
79 .calibrate_decr = generic_calibrate_decr,
80 .progress = udbg_progress,
81 .power_save = e500_idle,
82};
83
84machine_device_initcall(p2040_rdb, corenet_ds_publish_devices);
85
86#ifdef CONFIG_SWIOTLB
87machine_arch_initcall(p2040_rdb, swiotlb_setup_bus_notifier);
88#endif
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c
index 0ed52e18298c..96d99a374dcf 100644
--- a/arch/powerpc/platforms/85xx/p3041_ds.c
+++ b/arch/powerpc/platforms/85xx/p3041_ds.c
@@ -30,6 +30,7 @@
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h> 31#include <sysdev/fsl_soc.h>
32#include <sysdev/fsl_pci.h> 32#include <sysdev/fsl_pci.h>
33#include <asm/ehv_pic.h>
33 34
34#include "corenet_ds.h" 35#include "corenet_ds.h"
35 36
@@ -39,8 +40,32 @@
39static int __init p3041_ds_probe(void) 40static int __init p3041_ds_probe(void)
40{ 41{
41 unsigned long root = of_get_flat_dt_root(); 42 unsigned long root = of_get_flat_dt_root();
43#ifdef CONFIG_SMP
44 extern struct smp_ops_t smp_85xx_ops;
45#endif
46
47 if (of_flat_dt_is_compatible(root, "fsl,P3041DS"))
48 return 1;
49
50 /* Check if we're running under the Freescale hypervisor */
51 if (of_flat_dt_is_compatible(root, "fsl,P3041DS-hv")) {
52 ppc_md.init_IRQ = ehv_pic_init;
53 ppc_md.get_irq = ehv_pic_get_irq;
54 ppc_md.restart = fsl_hv_restart;
55 ppc_md.power_off = fsl_hv_halt;
56 ppc_md.halt = fsl_hv_halt;
57#ifdef CONFIG_SMP
58 /*
59 * Disable the timebase sync operations because we can't write
60 * to the timebase registers under the hypervisor.
61 */
62 smp_85xx_ops.give_timebase = NULL;
63 smp_85xx_ops.take_timebase = NULL;
64#endif
65 return 1;
66 }
42 67
43 return of_flat_dt_is_compatible(root, "fsl,P3041DS"); 68 return 0;
44} 69}
45 70
46define_machine(p3041_ds) { 71define_machine(p3041_ds) {
@@ -55,6 +80,7 @@ define_machine(p3041_ds) {
55 .restart = fsl_rstcr_restart, 80 .restart = fsl_rstcr_restart,
56 .calibrate_decr = generic_calibrate_decr, 81 .calibrate_decr = generic_calibrate_decr,
57 .progress = udbg_progress, 82 .progress = udbg_progress,
83 .power_save = e500_idle,
58}; 84};
59 85
60machine_device_initcall(p3041_ds, corenet_ds_publish_devices); 86machine_device_initcall(p3041_ds, corenet_ds_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
index 84170460497b..d1b21d7663e3 100644
--- a/arch/powerpc/platforms/85xx/p4080_ds.c
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -29,31 +29,42 @@
29#include <linux/of_platform.h> 29#include <linux/of_platform.h>
30#include <sysdev/fsl_soc.h> 30#include <sysdev/fsl_soc.h>
31#include <sysdev/fsl_pci.h> 31#include <sysdev/fsl_pci.h>
32#include <asm/ehv_pic.h>
32 33
33#include "corenet_ds.h" 34#include "corenet_ds.h"
34 35
35#ifdef CONFIG_PCI
36static int primary_phb_addr;
37#endif
38
39/* 36/*
40 * Called very early, device-tree isn't unflattened 37 * Called very early, device-tree isn't unflattened
41 */ 38 */
42static int __init p4080_ds_probe(void) 39static int __init p4080_ds_probe(void)
43{ 40{
44 unsigned long root = of_get_flat_dt_root(); 41 unsigned long root = of_get_flat_dt_root();
42#ifdef CONFIG_SMP
43 extern struct smp_ops_t smp_85xx_ops;
44#endif
45 45
46 if (of_flat_dt_is_compatible(root, "fsl,P4080DS")) { 46 if (of_flat_dt_is_compatible(root, "fsl,P4080DS"))
47#ifdef CONFIG_PCI 47 return 1;
48 /* treat PCIe1 as primary, 48
49 * shouldn't matter as we have no ISA on the board 49 /* Check if we're running under the Freescale hypervisor */
50 */ 50 if (of_flat_dt_is_compatible(root, "fsl,P4080DS-hv")) {
51 primary_phb_addr = 0x0000; 51 ppc_md.init_IRQ = ehv_pic_init;
52 ppc_md.get_irq = ehv_pic_get_irq;
53 ppc_md.restart = fsl_hv_restart;
54 ppc_md.power_off = fsl_hv_halt;
55 ppc_md.halt = fsl_hv_halt;
56#ifdef CONFIG_SMP
57 /*
58 * Disable the timebase sync operations because we can't write
59 * to the timebase registers under the hypervisor.
60 */
61 smp_85xx_ops.give_timebase = NULL;
62 smp_85xx_ops.take_timebase = NULL;
52#endif 63#endif
53 return 1; 64 return 1;
54 } else {
55 return 0;
56 } 65 }
66
67 return 0;
57} 68}
58 69
59define_machine(p4080_ds) { 70define_machine(p4080_ds) {
@@ -68,7 +79,10 @@ define_machine(p4080_ds) {
68 .restart = fsl_rstcr_restart, 79 .restart = fsl_rstcr_restart,
69 .calibrate_decr = generic_calibrate_decr, 80 .calibrate_decr = generic_calibrate_decr,
70 .progress = udbg_progress, 81 .progress = udbg_progress,
82 .power_save = e500_idle,
71}; 83};
72 84
73machine_device_initcall(p4080_ds, corenet_ds_publish_devices); 85machine_device_initcall(p4080_ds, corenet_ds_publish_devices);
86#ifdef CONFIG_SWIOTLB
74machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier); 87machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
88#endif
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c
index 7467b712ee00..e8cba5004fd8 100644
--- a/arch/powerpc/platforms/85xx/p5020_ds.c
+++ b/arch/powerpc/platforms/85xx/p5020_ds.c
@@ -30,6 +30,7 @@
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h> 31#include <sysdev/fsl_soc.h>
32#include <sysdev/fsl_pci.h> 32#include <sysdev/fsl_pci.h>
33#include <asm/ehv_pic.h>
33 34
34#include "corenet_ds.h" 35#include "corenet_ds.h"
35 36
@@ -39,8 +40,32 @@
39static int __init p5020_ds_probe(void) 40static int __init p5020_ds_probe(void)
40{ 41{
41 unsigned long root = of_get_flat_dt_root(); 42 unsigned long root = of_get_flat_dt_root();
43#ifdef CONFIG_SMP
44 extern struct smp_ops_t smp_85xx_ops;
45#endif
46
47 if (of_flat_dt_is_compatible(root, "fsl,P5020DS"))
48 return 1;
49
50 /* Check if we're running under the Freescale hypervisor */
51 if (of_flat_dt_is_compatible(root, "fsl,P5020DS-hv")) {
52 ppc_md.init_IRQ = ehv_pic_init;
53 ppc_md.get_irq = ehv_pic_get_irq;
54 ppc_md.restart = fsl_hv_restart;
55 ppc_md.power_off = fsl_hv_halt;
56 ppc_md.halt = fsl_hv_halt;
57#ifdef CONFIG_SMP
58 /*
59 * Disable the timebase sync operations because we can't write
60 * to the timebase registers under the hypervisor.
61 */
62 smp_85xx_ops.give_timebase = NULL;
63 smp_85xx_ops.take_timebase = NULL;
64#endif
65 return 1;
66 }
42 67
43 return of_flat_dt_is_compatible(root, "fsl,P5020DS"); 68 return 0;
44} 69}
45 70
46define_machine(p5020_ds) { 71define_machine(p5020_ds) {
@@ -60,6 +85,11 @@ define_machine(p5020_ds) {
60 .restart = fsl_rstcr_restart, 85 .restart = fsl_rstcr_restart,
61 .calibrate_decr = generic_calibrate_decr, 86 .calibrate_decr = generic_calibrate_decr,
62 .progress = udbg_progress, 87 .progress = udbg_progress,
88#ifdef CONFIG_PPC64
89 .power_save = book3e_idle,
90#else
91 .power_save = e500_idle,
92#endif
63}; 93};
64 94
65machine_device_initcall(p5020_ds, corenet_ds_publish_devices); 95machine_device_initcall(p5020_ds, corenet_ds_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index ecdd8c09e4ed..d07dcb7f4ee9 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -34,7 +34,7 @@
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/page.h> 36#include <asm/page.h>
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/io.h> 39#include <asm/io.h>
40#include <asm/machdep.h> 40#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index d2dfd465fbf6..09ced7221750 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -285,7 +285,7 @@ static int __init sbc8560_bdrstcr_init(void)
285 285
286 printk(KERN_INFO "sbc8560: Found BRSTCR at i/o 0x%x\n", res.start); 286 printk(KERN_INFO "sbc8560: Found BRSTCR at i/o 0x%x\n", res.start);
287 287
288 brstcr = ioremap(res.start, res.end - res.start); 288 brstcr = ioremap(res.start, resource_size(&res));
289 if(!brstcr) 289 if(!brstcr)
290 printk(KERN_WARNING "sbc8560: ioremap of brstcr failed.\n"); 290 printk(KERN_WARNING "sbc8560: ioremap of brstcr failed.\n");
291 291
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d6a93a10c0f5..5b9b901f6443 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
2 * Author: Andy Fleming <afleming@freescale.com> 2 * Author: Andy Fleming <afleming@freescale.com>
3 * Kumar Gala <galak@kernel.crashing.org> 3 * Kumar Gala <galak@kernel.crashing.org>
4 * 4 *
5 * Copyright 2006-2008 Freescale Semiconductor Inc. 5 * Copyright 2006-2008, 2011 Freescale Semiconductor Inc.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
@@ -111,14 +111,6 @@ smp_85xx_kick_cpu(int nr)
111 return 0; 111 return 0;
112} 112}
113 113
114static void __init
115smp_85xx_setup_cpu(int cpu_nr)
116{
117 mpic_setup_this_cpu();
118 if (cpu_has_feature(CPU_FTR_DBELL))
119 doorbell_setup_this_cpu();
120}
121
122struct smp_ops_t smp_85xx_ops = { 114struct smp_ops_t smp_85xx_ops = {
123 .kick_cpu = smp_85xx_kick_cpu, 115 .kick_cpu = smp_85xx_kick_cpu,
124#ifdef CONFIG_KEXEC 116#ifdef CONFIG_KEXEC
@@ -224,24 +216,36 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
224} 216}
225#endif /* CONFIG_KEXEC */ 217#endif /* CONFIG_KEXEC */
226 218
219static void __init
220smp_85xx_setup_cpu(int cpu_nr)
221{
222 if (smp_85xx_ops.probe == smp_mpic_probe)
223 mpic_setup_this_cpu();
224
225 if (cpu_has_feature(CPU_FTR_DBELL))
226 doorbell_setup_this_cpu();
227}
228
227void __init mpc85xx_smp_init(void) 229void __init mpc85xx_smp_init(void)
228{ 230{
229 struct device_node *np; 231 struct device_node *np;
230 232
233 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
234
231 np = of_find_node_by_type(NULL, "open-pic"); 235 np = of_find_node_by_type(NULL, "open-pic");
232 if (np) { 236 if (np) {
233 smp_85xx_ops.probe = smp_mpic_probe; 237 smp_85xx_ops.probe = smp_mpic_probe;
234 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
235 smp_85xx_ops.message_pass = smp_mpic_message_pass; 238 smp_85xx_ops.message_pass = smp_mpic_message_pass;
236 } 239 }
237 240
238 if (cpu_has_feature(CPU_FTR_DBELL)) { 241 if (cpu_has_feature(CPU_FTR_DBELL)) {
239 smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass; 242 /*
243 * If left NULL, .message_pass defaults to
244 * smp_muxed_ipi_message_pass
245 */
240 smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 246 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
241 } 247 }
242 248
243 BUG_ON(!smp_85xx_ops.message_pass);
244
245 smp_ops = &smp_85xx_ops; 249 smp_ops = &smp_85xx_ops;
246 250
247#ifdef CONFIG_KEXEC 251#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 0125604d096e..a9dc5e795123 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -123,7 +123,7 @@ static void xes_mpc85xx_fixups(void)
123 continue; 123 continue;
124 } 124 }
125 125
126 l2_base = ioremap(r[0].start, r[0].end - r[0].start + 1); 126 l2_base = ioremap(r[0].start, resource_size(&r[0]));
127 127
128 xes_mpc85xx_configure_l2(l2_base); 128 xes_mpc85xx_configure_l2(l2_base);
129 } 129 }
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index a896511690c2..74e018ef724b 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -39,12 +39,19 @@
39#include <sysdev/fsl_pci.h> 39#include <sysdev/fsl_pci.h>
40#include <sysdev/fsl_soc.h> 40#include <sysdev/fsl_soc.h>
41#include <sysdev/simple_gpio.h> 41#include <sysdev/simple_gpio.h>
42#include <asm/fsl_guts.h>
42 43
43#include "mpc86xx.h" 44#include "mpc86xx.h"
44 45
45static struct device_node *pixis_node; 46static struct device_node *pixis_node;
46static unsigned char *pixis_bdcfg0, *pixis_arch; 47static unsigned char *pixis_bdcfg0, *pixis_arch;
47 48
49/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
50#define CLKDVDR_PXCKEN 0x80000000
51#define CLKDVDR_PXCKINV 0x10000000
52#define CLKDVDR_PXCKDLY 0x06000000
53#define CLKDVDR_PXCLK_MASK 0x001F0000
54
48#ifdef CONFIG_SUSPEND 55#ifdef CONFIG_SUSPEND
49static irqreturn_t mpc8610_sw9_irq(int irq, void *data) 56static irqreturn_t mpc8610_sw9_irq(int irq, void *data)
50{ 57{
@@ -205,72 +212,54 @@ void mpc8610hpcd_set_monitor_port(int monitor_port)
205 bdcfg[monitor_port]); 212 bdcfg[monitor_port]);
206} 213}
207 214
215/**
216 * mpc8610hpcd_set_pixel_clock: program the DIU's clock
217 *
218 * @pixclock: the wavelength, in picoseconds, of the clock
219 */
208void mpc8610hpcd_set_pixel_clock(unsigned int pixclock) 220void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
209{ 221{
210 u32 __iomem *clkdvdr; 222 struct device_node *guts_np = NULL;
211 u32 temp; 223 struct ccsr_guts_86xx __iomem *guts;
212 /* variables for pixel clock calcs */ 224 unsigned long freq;
213 ulong bestval, bestfreq, speed_ccb, minpixclock, maxpixclock; 225 u64 temp;
214 ulong pixval; 226 u32 pxclk;
215 long err; 227
216 int i; 228 /* Map the global utilities registers. */
217 229 guts_np = of_find_compatible_node(NULL, NULL, "fsl,mpc8610-guts");
218 clkdvdr = ioremap(get_immrbase() + 0xe0800, sizeof(u32)); 230 if (!guts_np) {
219 if (!clkdvdr) { 231 pr_err("mpc8610hpcd: missing global utilties device node\n");
220 printk(KERN_ERR "Err: can't map clock divider register!\n");
221 return; 232 return;
222 } 233 }
223 234
224 /* Pixel Clock configuration */ 235 guts = of_iomap(guts_np, 0);
225 speed_ccb = fsl_get_sys_freq(); 236 of_node_put(guts_np);
226 237 if (!guts) {
227 /* Calculate the pixel clock with the smallest error */ 238 pr_err("mpc8610hpcd: could not map global utilties device\n");
228 /* calculate the following in steps to avoid overflow */ 239 return;
229 pr_debug("DIU pixclock in ps - %d\n", pixclock);
230 temp = 1000000000/pixclock;
231 temp *= 1000;
232 pixclock = temp;
233 pr_debug("DIU pixclock freq - %u\n", pixclock);
234
235 temp = pixclock * 5 / 100;
236 pr_debug("deviation = %d\n", temp);
237 minpixclock = pixclock - temp;
238 maxpixclock = pixclock + temp;
239 pr_debug("DIU minpixclock - %lu\n", minpixclock);
240 pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
241 pixval = speed_ccb/pixclock;
242 pr_debug("DIU pixval = %lu\n", pixval);
243
244 err = 100000000;
245 bestval = pixval;
246 pr_debug("DIU bestval = %lu\n", bestval);
247
248 bestfreq = 0;
249 for (i = -1; i <= 1; i++) {
250 temp = speed_ccb / ((pixval+i) + 1);
251 pr_debug("DIU test pixval i= %d, pixval=%lu, temp freq. = %u\n",
252 i, pixval, temp);
253 if ((temp < minpixclock) || (temp > maxpixclock))
254 pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
255 minpixclock, maxpixclock);
256 else if (abs(temp - pixclock) < err) {
257 pr_debug("Entered the else if block %d\n", i);
258 err = abs(temp - pixclock);
259 bestval = pixval+i;
260 bestfreq = temp;
261 }
262 } 240 }
263 241
264 pr_debug("DIU chose = %lx\n", bestval); 242 /* Convert pixclock from a wavelength to a frequency */
265 pr_debug("DIU error = %ld\n NomPixClk ", err); 243 temp = 1000000000000ULL;
266 pr_debug("DIU: Best Freq = %lx\n", bestfreq); 244 do_div(temp, pixclock);
267 /* Modify PXCLK in GUTS CLKDVDR */ 245 freq = temp;
268 pr_debug("DIU: Current value of CLKDVDR = 0x%08x\n", (*clkdvdr)); 246
269 temp = (*clkdvdr) & 0x2000FFFF; 247 /*
270 *clkdvdr = temp; /* turn off clock */ 248 * 'pxclk' is the ratio of the platform clock to the pixel clock.
271 *clkdvdr = temp | 0x80000000 | (((bestval) & 0x1F) << 16); 249 * On the MPC8610, the value programmed into CLKDVDR is the ratio
272 pr_debug("DIU: Modified value of CLKDVDR = 0x%08x\n", (*clkdvdr)); 250 * minus one. The valid range of values is 2-31.
273 iounmap(clkdvdr); 251 */
252 pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq) - 1;
253 pxclk = clamp_t(u32, pxclk, 2, 31);
254
255 /* Disable the pixel clock, and set it to non-inverted and no delay */
256 clrbits32(&guts->clkdvdr,
257 CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
258
259 /* Enable the clock and set the pxclk */
260 setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
261
262 iounmap(guts);
274} 263}
275 264
276ssize_t mpc8610hpcd_show_monitor_port(int monitor_port, char *buf) 265ssize_t mpc8610hpcd_show_monitor_port(int monitor_port, char *buf)
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index f970ca2b180c..d0af7fb2f344 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -78,6 +78,10 @@ config MPIC
78 bool 78 bool
79 default n 79 default n
80 80
81config PPC_EPAPR_HV_PIC
82 bool
83 default n
84
81config MPIC_WEIRD 85config MPIC_WEIRD
82 bool 86 bool
83 default n 87 default n
@@ -266,7 +270,7 @@ config TAU_AVERAGE
266 270
267config QUICC_ENGINE 271config QUICC_ENGINE
268 bool "Freescale QUICC Engine (QE) Support" 272 bool "Freescale QUICC Engine (QE) Support"
269 depends on FSL_SOC 273 depends on FSL_SOC && PPC32
270 select PPC_LIB_RHEAP 274 select PPC_LIB_RHEAP
271 select CRC32 275 select CRC32
272 help 276 help
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 2165b65876f9..e06e39589a09 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,7 +1,6 @@
1config PPC64 1config PPC64
2 bool "64-bit kernel" 2 bool "64-bit kernel"
3 default n 3 default n
4 select PPC_HAVE_PMU_SUPPORT
5 help 4 help
6 This option selects whether a 32-bit or a 64-bit kernel 5 This option selects whether a 32-bit or a 64-bit kernel
7 will be built. 6 will be built.
@@ -69,6 +68,7 @@ choice
69config PPC_BOOK3S_64 68config PPC_BOOK3S_64
70 bool "Server processors" 69 bool "Server processors"
71 select PPC_FPU 70 select PPC_FPU
71 select PPC_HAVE_PMU_SUPPORT
72 72
73config PPC_BOOK3E_64 73config PPC_BOOK3E_64
74 bool "Embedded processors" 74 bool "Embedded processors"
diff --git a/arch/powerpc/platforms/amigaone/Kconfig b/arch/powerpc/platforms/amigaone/Kconfig
index 022476717718..128de25cc284 100644
--- a/arch/powerpc/platforms/amigaone/Kconfig
+++ b/arch/powerpc/platforms/amigaone/Kconfig
@@ -8,7 +8,7 @@ config AMIGAONE
8 select NOT_COHERENT_CACHE 8 select NOT_COHERENT_CACHE
9 select CHECK_CACHE_COHERENCY 9 select CHECK_CACHE_COHERENCY
10 select DEFAULT_UIMAGE 10 select DEFAULT_UIMAGE
11 select PCSPKR_PLATFORM 11 select HAVE_PCSPKR_PLATFORM
12 help 12 help
13 Select AmigaOne for the following machines: 13 Select AmigaOne for the following machines:
14 - AmigaOne SE/Teron CX (G3 only) 14 - AmigaOne SE/Teron CX (G3 only)
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
index 05b0db3ef638..844c0facb4f7 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c
@@ -393,19 +393,19 @@ static int __init celleb_setup_epci(struct device_node *node,
393 393
394 if (of_address_to_resource(node, 0, &r)) 394 if (of_address_to_resource(node, 0, &r))
395 goto error; 395 goto error;
396 hose->cfg_addr = ioremap(r.start, (r.end - r.start + 1)); 396 hose->cfg_addr = ioremap(r.start, resource_size(&r));
397 if (!hose->cfg_addr) 397 if (!hose->cfg_addr)
398 goto error; 398 goto error;
399 pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n", 399 pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
400 r.start, (unsigned long)hose->cfg_addr, (r.end - r.start + 1)); 400 r.start, (unsigned long)hose->cfg_addr, resource_size(&r));
401 401
402 if (of_address_to_resource(node, 2, &r)) 402 if (of_address_to_resource(node, 2, &r))
403 goto error; 403 goto error;
404 hose->cfg_data = ioremap(r.start, (r.end - r.start + 1)); 404 hose->cfg_data = ioremap(r.start, resource_size(&r));
405 if (!hose->cfg_data) 405 if (!hose->cfg_data)
406 goto error; 406 goto error;
407 pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n", 407 pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
408 r.start, (unsigned long)hose->cfg_data, (r.end - r.start + 1)); 408 r.start, (unsigned long)hose->cfg_data, resource_size(&r));
409 409
410 hose->ops = &celleb_epci_ops; 410 hose->ops = &celleb_epci_ops;
411 celleb_epci_init(hose); 411 celleb_epci_init(hose);
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index a881bbee8de0..ae790ac4a589 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -494,7 +494,7 @@ static __init int celleb_setup_pciex(struct device_node *node,
494 pr_err("PCIEXC:Failed to get config resource.\n"); 494 pr_err("PCIEXC:Failed to get config resource.\n");
495 return 1; 495 return 1;
496 } 496 }
497 phb->cfg_addr = ioremap(r.start, r.end - r.start + 1); 497 phb->cfg_addr = ioremap(r.start, resource_size(&r));
498 if (!phb->cfg_addr) { 498 if (!phb->cfg_addr) {
499 pr_err("PCIEXC:Failed to remap SMMIO region.\n"); 499 pr_err("PCIEXC:Failed to remap SMMIO region.\n");
500 return 1; 500 return 1;
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index d809836bcf5f..7f92096fe968 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -24,7 +24,7 @@
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <asm/atomic.h> 27#include <linux/atomic.h>
28#include <asm/machdep.h> 28#include <asm/machdep.h>
29#include <asm/spu.h> 29#include <asm/spu.h>
30 30
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index dbb641ea90dd..f2e1dfe4bf31 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -28,7 +28,7 @@
28#include <linux/cpu.h> 28#include <linux/cpu.h>
29 29
30#include <asm/ptrace.h> 30#include <asm/ptrace.h>
31#include <asm/atomic.h> 31#include <linux/atomic.h>
32#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index f465d474ad9b..4e5c91489c02 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -222,7 +222,7 @@ static int spu_map_resource(struct spu *spu, int nr,
222 return ret; 222 return ret;
223 if (phys) 223 if (phys)
224 *phys = resource.start; 224 *phys = resource.start;
225 len = resource.end - resource.start + 1; 225 len = resource_size(&resource);
226 *virt = ioremap(resource.start, len); 226 *virt = ioremap(resource.start, len);
227 if (!*virt) 227 if (!*virt)
228 return -EINVAL; 228 return -EINVAL;
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 0c87bcd2452a..bf4d41d8fa14 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -24,7 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <asm/atomic.h> 27#include <linux/atomic.h>
28#include <asm/spu.h> 28#include <asm/spu.h>
29#include <asm/spu_csa.h> 29#include <asm/spu_csa.h>
30#include "spufs.h" 30#include "spufs.h"
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 3c7c3f82d842..fb59c46e9e9e 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1850,9 +1850,16 @@ out:
1850 return ret; 1850 return ret;
1851} 1851}
1852 1852
1853static int spufs_mfc_fsync(struct file *file, int datasync) 1853static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1854{ 1854{
1855 return spufs_mfc_flush(file, NULL); 1855 struct inode *inode = file->f_path.dentry->d_inode;
1856 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1857 if (!err) {
1858 mutex_lock(&inode->i_mutex);
1859 err = spufs_mfc_flush(file, NULL);
1860 mutex_unlock(&inode->i_mutex);
1861 }
1862 return err;
1856} 1863}
1857 1864
1858static int spufs_mfc_fasync(int fd, struct file *file, int on) 1865static int spufs_mfc_fasync(int fd, struct file *file, int on)
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 856e9c398068..e481f6b9a789 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -611,15 +611,14 @@ out:
611 611
612static struct file_system_type spufs_type; 612static struct file_system_type spufs_type;
613 613
614long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, 614long spufs_create(struct path *path, struct dentry *dentry,
615 struct file *filp) 615 unsigned int flags, mode_t mode, struct file *filp)
616{ 616{
617 struct dentry *dentry;
618 int ret; 617 int ret;
619 618
620 ret = -EINVAL; 619 ret = -EINVAL;
621 /* check if we are on spufs */ 620 /* check if we are on spufs */
622 if (nd->path.dentry->d_sb->s_type != &spufs_type) 621 if (path->dentry->d_sb->s_type != &spufs_type)
623 goto out; 622 goto out;
624 623
625 /* don't accept undefined flags */ 624 /* don't accept undefined flags */
@@ -627,33 +626,27 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
627 goto out; 626 goto out;
628 627
629 /* only threads can be underneath a gang */ 628 /* only threads can be underneath a gang */
630 if (nd->path.dentry != nd->path.dentry->d_sb->s_root) { 629 if (path->dentry != path->dentry->d_sb->s_root) {
631 if ((flags & SPU_CREATE_GANG) || 630 if ((flags & SPU_CREATE_GANG) ||
632 !SPUFS_I(nd->path.dentry->d_inode)->i_gang) 631 !SPUFS_I(path->dentry->d_inode)->i_gang)
633 goto out; 632 goto out;
634 } 633 }
635 634
636 dentry = lookup_create(nd, 1);
637 ret = PTR_ERR(dentry);
638 if (IS_ERR(dentry))
639 goto out_dir;
640
641 mode &= ~current_umask(); 635 mode &= ~current_umask();
642 636
643 if (flags & SPU_CREATE_GANG) 637 if (flags & SPU_CREATE_GANG)
644 ret = spufs_create_gang(nd->path.dentry->d_inode, 638 ret = spufs_create_gang(path->dentry->d_inode,
645 dentry, nd->path.mnt, mode); 639 dentry, path->mnt, mode);
646 else 640 else
647 ret = spufs_create_context(nd->path.dentry->d_inode, 641 ret = spufs_create_context(path->dentry->d_inode,
648 dentry, nd->path.mnt, flags, mode, 642 dentry, path->mnt, flags, mode,
649 filp); 643 filp);
650 if (ret >= 0) 644 if (ret >= 0)
651 fsnotify_mkdir(nd->path.dentry->d_inode, dentry); 645 fsnotify_mkdir(path->dentry->d_inode, dentry);
652 return ret; 646 return ret;
653 647
654out_dir:
655 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
656out: 648out:
649 mutex_unlock(&path->dentry->d_inode->i_mutex);
657 return ret; 650 return ret;
658} 651}
659 652
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index c448bac65518..099245f230b2 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -248,7 +248,7 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[];
248/* system call implementation */ 248/* system call implementation */
249extern struct spufs_calls spufs_calls; 249extern struct spufs_calls spufs_calls;
250long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); 250long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
251long spufs_create(struct nameidata *nd, unsigned int flags, 251long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
252 mode_t mode, struct file *filp); 252 mode_t mode, struct file *filp);
253/* ELF coredump callbacks for writing SPU ELF notes */ 253/* ELF coredump callbacks for writing SPU ELF notes */
254extern int spufs_coredump_extra_notes_size(void); 254extern int spufs_coredump_extra_notes_size(void);
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index a3d2ce54ea2e..609e016e92d0 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -62,21 +62,17 @@ out:
62static long do_spu_create(const char __user *pathname, unsigned int flags, 62static long do_spu_create(const char __user *pathname, unsigned int flags,
63 mode_t mode, struct file *neighbor) 63 mode_t mode, struct file *neighbor)
64{ 64{
65 char *tmp; 65 struct path path;
66 struct dentry *dentry;
66 int ret; 67 int ret;
67 68
68 tmp = getname(pathname); 69 dentry = user_path_create(AT_FDCWD, pathname, &path, 1);
69 ret = PTR_ERR(tmp); 70 ret = PTR_ERR(dentry);
70 if (!IS_ERR(tmp)) { 71 if (!IS_ERR(dentry)) {
71 struct nameidata nd; 72 ret = spufs_create(&path, dentry, flags, mode, neighbor);
72 73 mutex_unlock(&path.dentry->d_inode->i_mutex);
73 ret = kern_path_parent(tmp, &nd); 74 dput(dentry);
74 if (!ret) { 75 path_put(&path);
75 nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
76 ret = spufs_create(&nd, flags, mode, neighbor);
77 path_put(&nd.path);
78 }
79 putname(tmp);
80 } 76 }
81 77
82 return ret; 78 return ret;
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index bc0b0efdc5fe..d3cdab582c5d 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -1,6 +1,7 @@
1config PPC_CHRP 1config PPC_CHRP
2 bool "Common Hardware Reference Platform (CHRP) based machines" 2 bool "Common Hardware Reference Platform (CHRP) based machines"
3 depends on 6xx 3 depends on 6xx
4 select HAVE_PCSPKR_PLATFORM
4 select MPIC 5 select MPIC
5 select PPC_I8259 6 select PPC_I8259
6 select PPC_INDIRECT_PCI 7 select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index f3376fa9d284..83285c5a2049 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -142,7 +142,7 @@ hydra_init(void)
142 return 0; 142 return 0;
143 } 143 }
144 of_node_put(np); 144 of_node_put(np);
145 Hydra = ioremap(r.start, r.end-r.start); 145 Hydra = ioremap(r.start, resource_size(&r));
146 printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start); 146 printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
147 printk("Hydra Feature_Control was %x", 147 printk("Hydra Feature_Control was %x",
148 in_le32(&Hydra->Feature_Control)); 148 in_le32(&Hydra->Feature_Control));
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index a800122e4dda..feab30bbae23 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -18,7 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19 19
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21#include <asm/atomic.h> 21#include <linux/atomic.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index 29c02f36b32f..f519ee17ff7d 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -167,7 +167,7 @@ BEGIN_FTR_SECTION
167 std r12,PACA_EXGEN+EX_R13(r13) 167 std r12,PACA_EXGEN+EX_R13(r13)
168 EXCEPTION_PROLOG_ISERIES_1 168 EXCEPTION_PROLOG_ISERIES_1
169FTR_SECTION_ELSE 169FTR_SECTION_ELSE
170 EXCEPTION_PROLOG_1(PACA_EXGEN) 170 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
171 EXCEPTION_PROLOG_ISERIES_1 171 EXCEPTION_PROLOG_ISERIES_1
172ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) 172ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
173 b data_access_common 173 b data_access_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
index bae3fba5ad8e..50271b550a99 100644
--- a/arch/powerpc/platforms/iseries/exception.h
+++ b/arch/powerpc/platforms/iseries/exception.h
@@ -39,7 +39,7 @@
39label##_iSeries: \ 39label##_iSeries: \
40 HMT_MEDIUM; \ 40 HMT_MEDIUM; \
41 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ 41 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
42 EXCEPTION_PROLOG_1(area); \ 42 EXCEPTION_PROLOG_1(area, NOTEST, 0); \
43 EXCEPTION_PROLOG_ISERIES_1; \ 43 EXCEPTION_PROLOG_ISERIES_1; \
44 b label##_common 44 b label##_common
45 45
@@ -48,7 +48,7 @@ label##_iSeries: \
48label##_iSeries: \ 48label##_iSeries: \
49 HMT_MEDIUM; \ 49 HMT_MEDIUM; \
50 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ 50 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
51 EXCEPTION_PROLOG_1(PACA_EXGEN); \ 51 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
52 lbz r10,PACASOFTIRQEN(r13); \ 52 lbz r10,PACASOFTIRQEN(r13); \
53 cmpwi 0,r10,0; \ 53 cmpwi 0,r10,0; \
54 beq- label##_iSeries_masked; \ 54 beq- label##_iSeries_masked; \
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index e3265adde5d3..8bda9be06fa0 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -29,7 +29,7 @@
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30 30
31#include <asm/ptrace.h> 31#include <asm/ptrace.h>
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34#include <asm/page.h> 34#include <asm/page.h>
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
@@ -75,7 +75,7 @@ static void __devinit smp_iSeries_setup_cpu(int nr)
75} 75}
76 76
77static struct smp_ops_t iSeries_smp_ops = { 77static struct smp_ops_t iSeries_smp_ops = {
78 .message_pass = smp_muxed_ipi_message_pass, 78 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
79 .cause_ipi = smp_iSeries_cause_ipi, 79 .cause_ipi = smp_iSeries_cause_ipi,
80 .probe = smp_iSeries_probe, 80 .probe = smp_iSeries_probe,
81 .kick_cpu = smp_iSeries_kick_cpu, 81 .kick_cpu = smp_iSeries_kick_cpu,
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index fe34c3d9bb74..5b3388b9f911 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -338,35 +338,16 @@ define_machine(maple) {
338#ifdef CONFIG_EDAC 338#ifdef CONFIG_EDAC
339/* 339/*
340 * Register a platform device for CPC925 memory controller on 340 * Register a platform device for CPC925 memory controller on
341 * Motorola ATCA-6101 blade. 341 * all boards with U3H (CPC925) bridge.
342 */ 342 */
343#define MAPLE_CPC925_MODEL "Motorola,ATCA-6101"
344static int __init maple_cpc925_edac_setup(void) 343static int __init maple_cpc925_edac_setup(void)
345{ 344{
346 struct platform_device *pdev; 345 struct platform_device *pdev;
347 struct device_node *np = NULL; 346 struct device_node *np = NULL;
348 struct resource r; 347 struct resource r;
349 const unsigned char *model;
350 int ret; 348 int ret;
351 349 volatile void __iomem *mem;
352 np = of_find_node_by_path("/"); 350 u32 rev;
353 if (!np) {
354 printk(KERN_ERR "%s: Unable to get root node\n", __func__);
355 return -ENODEV;
356 }
357
358 model = (const unsigned char *)of_get_property(np, "model", NULL);
359 if (!model) {
360 printk(KERN_ERR "%s: Unabel to get model info\n", __func__);
361 of_node_put(np);
362 return -ENODEV;
363 }
364
365 ret = strcmp(model, MAPLE_CPC925_MODEL);
366 of_node_put(np);
367
368 if (ret != 0)
369 return 0;
370 351
371 np = of_find_node_by_type(NULL, "memory-controller"); 352 np = of_find_node_by_type(NULL, "memory-controller");
372 if (!np) { 353 if (!np) {
@@ -384,6 +365,22 @@ static int __init maple_cpc925_edac_setup(void)
384 return -ENODEV; 365 return -ENODEV;
385 } 366 }
386 367
368 mem = ioremap(r.start, resource_size(&r));
369 if (!mem) {
370 printk(KERN_ERR "%s: Unable to map memory-controller memory\n",
371 __func__);
372 return -ENOMEM;
373 }
374
375 rev = __raw_readl(mem);
376 iounmap(mem);
377
378 if (rev < 0x34 || rev > 0x3f) { /* U3H */
379 printk(KERN_ERR "%s: Non-CPC925(U3H) bridge revision: %02x\n",
380 __func__, rev);
381 return 0;
382 }
383
387 pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1); 384 pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1);
388 if (IS_ERR(pdev)) 385 if (IS_ERR(pdev))
389 return PTR_ERR(pdev); 386 return PTR_ERR(pdev);
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 321a9b3a2d00..756123bf06ac 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -576,7 +576,7 @@ int pasemi_dma_init(void)
576 res.start = 0xfd800000; 576 res.start = 0xfd800000;
577 res.end = res.start + 0x1000; 577 res.end = res.start + 0x1000;
578 } 578 }
579 dma_status = __ioremap(res.start, res.end-res.start, 0); 579 dma_status = __ioremap(res.start, resource_size(&res), 0);
580 pci_dev_put(iob_pdev); 580 pci_dev_put(iob_pdev);
581 581
582 for (i = 0; i < MAX_TXCH; i++) 582 for (i = 0; i < MAX_TXCH; i++)
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index d679964ae2ab..c2f3e861f5ea 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -12,7 +12,7 @@
12#include <linux/backlight.h> 12#include <linux/backlight.h>
13#include <linux/adb.h> 13#include <linux/adb.h>
14#include <linux/pmu.h> 14#include <linux/pmu.h>
15#include <asm/atomic.h> 15#include <linux/atomic.h>
16#include <asm/prom.h> 16#include <asm/prom.h>
17#include <asm/backlight.h> 17#include <asm/backlight.h>
18 18
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index b1cdcf94aa8e..695443bfdb08 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -580,10 +580,10 @@ int __init pmac_nvram_init(void)
580 /* Try to obtain an address */ 580 /* Try to obtain an address */
581 if (of_address_to_resource(dp, 0, &r1) == 0) { 581 if (of_address_to_resource(dp, 0, &r1) == 0) {
582 nvram_naddrs = 1; 582 nvram_naddrs = 1;
583 s1 = (r1.end - r1.start) + 1; 583 s1 = resource_size(&r1);
584 if (of_address_to_resource(dp, 1, &r2) == 0) { 584 if (of_address_to_resource(dp, 1, &r2) == 0) {
585 nvram_naddrs = 2; 585 nvram_naddrs = 2;
586 s2 = (r2.end - r2.start) + 1; 586 s2 = resource_size(&r2);
587 } 587 }
588 } 588 }
589 589
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index c291afd6b616..5cc83851ad06 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/of_pci.h>
20 21
21#include <asm/sections.h> 22#include <asm/sections.h>
22#include <asm/io.h> 23#include <asm/io.h>
@@ -235,7 +236,7 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
235 236
236 if (offset >= 0x100) 237 if (offset >= 0x100)
237 return PCIBIOS_BAD_REGISTER_NUMBER; 238 return PCIBIOS_BAD_REGISTER_NUMBER;
238 np = pci_busdev_to_OF_node(bus, devfn); 239 np = of_pci_find_child_device(bus->dev.of_node, devfn);
239 if (np == NULL) 240 if (np == NULL)
240 return PCIBIOS_DEVICE_NOT_FOUND; 241 return PCIBIOS_DEVICE_NOT_FOUND;
241 242
@@ -838,8 +839,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
838 * into cfg_addr 839 * into cfg_addr
839 */ 840 */
840 hose->cfg_data = ioremap(cfg_res.start, 0x02000000); 841 hose->cfg_data = ioremap(cfg_res.start, 0x02000000);
841 hose->cfg_addr = ioremap(self_res.start, 842 hose->cfg_addr = ioremap(self_res.start, resource_size(&self_res));
842 self_res.end - self_res.start + 1);
843 843
844 /* 844 /*
845 * /ht node doesn't expose a "ranges" property, we read the register 845 * /ht node doesn't expose a "ranges" property, we read the register
@@ -1323,8 +1323,7 @@ static void fixup_u4_pcie(struct pci_dev* dev)
1323 */ 1323 */
1324 if (r->start >= 0xf0000000 && r->start < 0xf3000000) 1324 if (r->start >= 0xf0000000 && r->start < 0xf3000000)
1325 continue; 1325 continue;
1326 if (!region || (r->end - r->start) > 1326 if (!region || resource_size(r) > resource_size(region))
1327 (region->end - region->start))
1328 region = r; 1327 region = r;
1329 } 1328 }
1330 /* Nothing found, bail */ 1329 /* Nothing found, bail */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index aa45281bd296..a028f08309d6 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -355,9 +355,6 @@ static int initializing = 1;
355static int pmac_late_init(void) 355static int pmac_late_init(void)
356{ 356{
357 initializing = 0; 357 initializing = 0;
358 /* this is udbg (which is __init) and we can later use it during
359 * cpu hotplug (in smp_core99_kick_cpu) */
360 ppc_md.progress = NULL;
361 return 0; 358 return 0;
362} 359}
363machine_late_initcall(powermac, pmac_late_init); 360machine_late_initcall(powermac, pmac_late_init);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index db092d7c4c5b..9a521dc8e485 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -35,7 +35,7 @@
35#include <linux/compiler.h> 35#include <linux/compiler.h>
36 36
37#include <asm/ptrace.h> 37#include <asm/ptrace.h>
38#include <asm/atomic.h> 38#include <linux/atomic.h>
39#include <asm/code-patching.h> 39#include <asm/code-patching.h>
40#include <asm/irq.h> 40#include <asm/irq.h>
41#include <asm/page.h> 41#include <asm/page.h>
@@ -447,7 +447,7 @@ void __init smp_psurge_give_timebase(void)
447 447
448/* PowerSurge-style Macs */ 448/* PowerSurge-style Macs */
449struct smp_ops_t psurge_smp_ops = { 449struct smp_ops_t psurge_smp_ops = {
450 .message_pass = smp_muxed_ipi_message_pass, 450 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
451 .cause_ipi = smp_psurge_cause_ipi, 451 .cause_ipi = smp_psurge_cause_ipi,
452 .probe = smp_psurge_probe, 452 .probe = smp_psurge_probe,
453 .kick_cpu = smp_psurge_kick_cpu, 453 .kick_cpu = smp_psurge_kick_cpu,
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 48211ca134c3..11c9fce43b5b 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -274,7 +274,7 @@ int __init via_calibrate_decr(void)
274 return 0; 274 return 0;
275 } 275 }
276 of_node_put(vias); 276 of_node_put(vias);
277 via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1); 277 via = ioremap(rsrc.start, resource_size(&rsrc));
278 if (via == NULL) { 278 if (via == NULL) {
279 printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); 279 printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
280 return 0; 280 return 0;
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
index bf8330ef2e76..f0536c7cda9f 100644
--- a/arch/powerpc/platforms/prep/Kconfig
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -1,6 +1,7 @@
1config PPC_PREP 1config PPC_PREP
2 bool "PowerPC Reference Platform (PReP) based machines" 2 bool "PowerPC Reference Platform (PReP) based machines"
3 depends on 6xx && BROKEN 3 depends on 6xx && BROKEN
4 select HAVE_PCSPKR_PLATFORM
4 select MPIC 5 select MPIC
5 select PPC_I8259 6 select PPC_I8259
6 select PPC_INDIRECT_PCI 7 select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 71af4c5d6c05..05cf4769b88c 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -1,6 +1,7 @@
1config PPC_PSERIES 1config PPC_PSERIES
2 depends on PPC64 && PPC_BOOK3S 2 depends on PPC64 && PPC_BOOK3S
3 bool "IBM pSeries & new (POWER5-based) iSeries" 3 bool "IBM pSeries & new (POWER5-based) iSeries"
4 select HAVE_PCSPKR_PLATFORM
4 select MPIC 5 select MPIC
5 select PCI_MSI 6 select PCI_MSI
6 select PPC_XICS 7 select PPC_XICS
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 57ceb92b2288..e9be25bc571b 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -262,12 +262,11 @@ int dlpar_attach_node(struct device_node *dn)
262 if (!dn->parent) 262 if (!dn->parent)
263 return -ENOMEM; 263 return -ENOMEM;
264 264
265 rc = blocking_notifier_call_chain(&pSeries_reconfig_chain, 265 rc = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, dn);
266 PSERIES_RECONFIG_ADD, dn); 266 if (rc) {
267 if (rc == NOTIFY_BAD) {
268 printk(KERN_ERR "Failed to add device node %s\n", 267 printk(KERN_ERR "Failed to add device node %s\n",
269 dn->full_name); 268 dn->full_name);
270 return -ENOMEM; /* For now, safe to assume kmalloc failure */ 269 return rc;
271 } 270 }
272 271
273 of_attach_node(dn); 272 of_attach_node(dn);
@@ -297,8 +296,7 @@ int dlpar_detach_node(struct device_node *dn)
297 remove_proc_entry(dn->pde->name, parent->pde); 296 remove_proc_entry(dn->pde->name, parent->pde);
298#endif 297#endif
299 298
300 blocking_notifier_call_chain(&pSeries_reconfig_chain, 299 pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, dn);
301 PSERIES_RECONFIG_REMOVE, dn);
302 of_detach_node(dn); 300 of_detach_node(dn);
303 of_node_put(dn); /* Must decrement the refcount */ 301 of_node_put(dn); /* Must decrement the refcount */
304 302
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 46b55cf563e3..ada6e07532ec 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -31,7 +31,7 @@
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/of.h> 32#include <linux/of.h>
33 33
34#include <asm/atomic.h> 34#include <linux/atomic.h>
35#include <asm/eeh.h> 35#include <asm/eeh.h>
36#include <asm/eeh_event.h> 36#include <asm/eeh_event.h>
37#include <asm/io.h> 37#include <asm/io.h>
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 8ed0d2d0e1b5..fc5ae767989e 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -25,7 +25,7 @@
25#include <linux/rbtree.h> 25#include <linux/rbtree.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <asm/atomic.h> 28#include <linux/atomic.h>
29#include <asm/pci-bridge.h> 29#include <asm/pci-bridge.h>
30#include <asm/ppc-pci.h> 30#include <asm/ppc-pci.h>
31 31
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 46f13a3c5d09..bc0288501f17 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -330,21 +330,17 @@ static void pseries_remove_processor(struct device_node *np)
330static int pseries_smp_notifier(struct notifier_block *nb, 330static int pseries_smp_notifier(struct notifier_block *nb,
331 unsigned long action, void *node) 331 unsigned long action, void *node)
332{ 332{
333 int err = NOTIFY_OK; 333 int err = 0;
334 334
335 switch (action) { 335 switch (action) {
336 case PSERIES_RECONFIG_ADD: 336 case PSERIES_RECONFIG_ADD:
337 if (pseries_add_processor(node)) 337 err = pseries_add_processor(node);
338 err = NOTIFY_BAD;
339 break; 338 break;
340 case PSERIES_RECONFIG_REMOVE: 339 case PSERIES_RECONFIG_REMOVE:
341 pseries_remove_processor(node); 340 pseries_remove_processor(node);
342 break; 341 break;
343 default:
344 err = NOTIFY_DONE;
345 break;
346 } 342 }
347 return err; 343 return notifier_from_errno(err);
348} 344}
349 345
350static struct notifier_block pseries_smp_nb = { 346static struct notifier_block pseries_smp_nb = {
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 33867ec4a234..11d8e0544ac0 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -12,6 +12,8 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/memblock.h> 13#include <linux/memblock.h>
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/memory.h>
16
15#include <asm/firmware.h> 17#include <asm/firmware.h>
16#include <asm/machdep.h> 18#include <asm/machdep.h>
17#include <asm/pSeries_reconfig.h> 19#include <asm/pSeries_reconfig.h>
@@ -20,24 +22,25 @@
20static unsigned long get_memblock_size(void) 22static unsigned long get_memblock_size(void)
21{ 23{
22 struct device_node *np; 24 struct device_node *np;
23 unsigned int memblock_size = 0; 25 unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
26 struct resource r;
24 27
25 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 28 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
26 if (np) { 29 if (np) {
27 const unsigned long *size; 30 const __be64 *size;
28 31
29 size = of_get_property(np, "ibm,lmb-size", NULL); 32 size = of_get_property(np, "ibm,lmb-size", NULL);
30 memblock_size = size ? *size : 0; 33 if (size)
31 34 memblock_size = be64_to_cpup(size);
32 of_node_put(np); 35 of_node_put(np);
33 } else { 36 } else if (machine_is(pseries)) {
37 /* This fallback really only applies to pseries */
34 unsigned int memzero_size = 0; 38 unsigned int memzero_size = 0;
35 const unsigned int *regs;
36 39
37 np = of_find_node_by_path("/memory@0"); 40 np = of_find_node_by_path("/memory@0");
38 if (np) { 41 if (np) {
39 regs = of_get_property(np, "reg", NULL); 42 if (!of_address_to_resource(np, 0, &r))
40 memzero_size = regs ? regs[3] : 0; 43 memzero_size = resource_size(&r);
41 of_node_put(np); 44 of_node_put(np);
42 } 45 }
43 46
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
50 sprintf(buf, "/memory@%x", memzero_size); 53 sprintf(buf, "/memory@%x", memzero_size);
51 np = of_find_node_by_path(buf); 54 np = of_find_node_by_path(buf);
52 if (np) { 55 if (np) {
53 regs = of_get_property(np, "reg", NULL); 56 if (!of_address_to_resource(np, 0, &r))
54 memblock_size = regs ? regs[3] : 0; 57 memblock_size = resource_size(&r);
55 of_node_put(np); 58 of_node_put(np);
56 } 59 }
57 } 60 }
58 } 61 }
59
60 return memblock_size; 62 return memblock_size;
61} 63}
62 64
65/* WARNING: This is going to override the generic definition whenever
66 * pseries is built-in regardless of what platform is active at boot
67 * time. This is fine for now as this is the only "option" and it
68 * should work everywhere. If not, we'll have to turn this into a
69 * ppc_md. callback
70 */
63unsigned long memory_block_size_bytes(void) 71unsigned long memory_block_size_bytes(void)
64{ 72{
65 return get_memblock_size(); 73 return get_memblock_size();
@@ -197,27 +205,21 @@ static int pseries_drconf_memory(unsigned long *base, unsigned int action)
197static int pseries_memory_notifier(struct notifier_block *nb, 205static int pseries_memory_notifier(struct notifier_block *nb,
198 unsigned long action, void *node) 206 unsigned long action, void *node)
199{ 207{
200 int err = NOTIFY_OK; 208 int err = 0;
201 209
202 switch (action) { 210 switch (action) {
203 case PSERIES_RECONFIG_ADD: 211 case PSERIES_RECONFIG_ADD:
204 if (pseries_add_memory(node)) 212 err = pseries_add_memory(node);
205 err = NOTIFY_BAD;
206 break; 213 break;
207 case PSERIES_RECONFIG_REMOVE: 214 case PSERIES_RECONFIG_REMOVE:
208 if (pseries_remove_memory(node)) 215 err = pseries_remove_memory(node);
209 err = NOTIFY_BAD;
210 break; 216 break;
211 case PSERIES_DRCONF_MEM_ADD: 217 case PSERIES_DRCONF_MEM_ADD:
212 case PSERIES_DRCONF_MEM_REMOVE: 218 case PSERIES_DRCONF_MEM_REMOVE:
213 if (pseries_drconf_memory(node, action)) 219 err = pseries_drconf_memory(node, action);
214 err = NOTIFY_BAD;
215 break;
216 default:
217 err = NOTIFY_DONE;
218 break; 220 break;
219 } 221 }
220 return err; 222 return notifier_from_errno(err);
221} 223}
222 224
223static struct notifier_block pseries_mem_nb = { 225static struct notifier_block pseries_mem_nb = {
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index 3f6a89b09816..041e87ca1893 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -73,7 +73,7 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
73 if (ret == H_SUCCESS) 73 if (ret == H_SUCCESS)
74 return count; 74 return count;
75 if (ret == H_BUSY) 75 if (ret == H_BUSY)
76 return 0; 76 return -EAGAIN;
77 return -EIO; 77 return -EIO;
78} 78}
79 79
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 39e6e0a7b2fa..f7205d344efd 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -52,197 +52,6 @@ EXPORT_SYMBOL(plpar_hcall_norets);
52 52
53extern void pSeries_find_serial_port(void); 53extern void pSeries_find_serial_port(void);
54 54
55
56static int vtermno; /* virtual terminal# for udbg */
57
58#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
59static void udbg_hvsi_putc(char c)
60{
61 /* packet's seqno isn't used anyways */
62 uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c };
63 int rc;
64
65 if (c == '\n')
66 udbg_hvsi_putc('\r');
67
68 do {
69 rc = plpar_put_term_char(vtermno, sizeof(packet), packet);
70 } while (rc == H_BUSY);
71}
72
73static long hvsi_udbg_buf_len;
74static uint8_t hvsi_udbg_buf[256];
75
76static int udbg_hvsi_getc_poll(void)
77{
78 unsigned char ch;
79 int rc, i;
80
81 if (hvsi_udbg_buf_len == 0) {
82 rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf);
83 if (rc != H_SUCCESS || hvsi_udbg_buf[0] != 0xff) {
84 /* bad read or non-data packet */
85 hvsi_udbg_buf_len = 0;
86 } else {
87 /* remove the packet header */
88 for (i = 4; i < hvsi_udbg_buf_len; i++)
89 hvsi_udbg_buf[i-4] = hvsi_udbg_buf[i];
90 hvsi_udbg_buf_len -= 4;
91 }
92 }
93
94 if (hvsi_udbg_buf_len <= 0 || hvsi_udbg_buf_len > 256) {
95 /* no data ready */
96 hvsi_udbg_buf_len = 0;
97 return -1;
98 }
99
100 ch = hvsi_udbg_buf[0];
101 /* shift remaining data down */
102 for (i = 1; i < hvsi_udbg_buf_len; i++) {
103 hvsi_udbg_buf[i-1] = hvsi_udbg_buf[i];
104 }
105 hvsi_udbg_buf_len--;
106
107 return ch;
108}
109
110static int udbg_hvsi_getc(void)
111{
112 int ch;
113 for (;;) {
114 ch = udbg_hvsi_getc_poll();
115 if (ch == -1) {
116 /* This shouldn't be needed...but... */
117 volatile unsigned long delay;
118 for (delay=0; delay < 2000000; delay++)
119 ;
120 } else {
121 return ch;
122 }
123 }
124}
125
126static void udbg_putcLP(char c)
127{
128 char buf[16];
129 unsigned long rc;
130
131 if (c == '\n')
132 udbg_putcLP('\r');
133
134 buf[0] = c;
135 do {
136 rc = plpar_put_term_char(vtermno, 1, buf);
137 } while(rc == H_BUSY);
138}
139
140/* Buffered chars getc */
141static long inbuflen;
142static long inbuf[2]; /* must be 2 longs */
143
144static int udbg_getc_pollLP(void)
145{
146 /* The interface is tricky because it may return up to 16 chars.
147 * We save them statically for future calls to udbg_getc().
148 */
149 char ch, *buf = (char *)inbuf;
150 int i;
151 long rc;
152 if (inbuflen == 0) {
153 /* get some more chars. */
154 inbuflen = 0;
155 rc = plpar_get_term_char(vtermno, &inbuflen, buf);
156 if (rc != H_SUCCESS)
157 inbuflen = 0; /* otherwise inbuflen is garbage */
158 }
159 if (inbuflen <= 0 || inbuflen > 16) {
160 /* Catch error case as well as other oddities (corruption) */
161 inbuflen = 0;
162 return -1;
163 }
164 ch = buf[0];
165 for (i = 1; i < inbuflen; i++) /* shuffle them down. */
166 buf[i-1] = buf[i];
167 inbuflen--;
168 return ch;
169}
170
171static int udbg_getcLP(void)
172{
173 int ch;
174 for (;;) {
175 ch = udbg_getc_pollLP();
176 if (ch == -1) {
177 /* This shouldn't be needed...but... */
178 volatile unsigned long delay;
179 for (delay=0; delay < 2000000; delay++)
180 ;
181 } else {
182 return ch;
183 }
184 }
185}
186
187/* call this from early_init() for a working debug console on
188 * vterm capable LPAR machines
189 */
190void __init udbg_init_debug_lpar(void)
191{
192 vtermno = 0;
193 udbg_putc = udbg_putcLP;
194 udbg_getc = udbg_getcLP;
195 udbg_getc_poll = udbg_getc_pollLP;
196
197 register_early_udbg_console();
198}
199
200/* returns 0 if couldn't find or use /chosen/stdout as console */
201void __init find_udbg_vterm(void)
202{
203 struct device_node *stdout_node;
204 const u32 *termno;
205 const char *name;
206
207 /* find the boot console from /chosen/stdout */
208 if (!of_chosen)
209 return;
210 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
211 if (name == NULL)
212 return;
213 stdout_node = of_find_node_by_path(name);
214 if (!stdout_node)
215 return;
216 name = of_get_property(stdout_node, "name", NULL);
217 if (!name) {
218 printk(KERN_WARNING "stdout node missing 'name' property!\n");
219 goto out;
220 }
221
222 /* Check if it's a virtual terminal */
223 if (strncmp(name, "vty", 3) != 0)
224 goto out;
225 termno = of_get_property(stdout_node, "reg", NULL);
226 if (termno == NULL)
227 goto out;
228 vtermno = termno[0];
229
230 if (of_device_is_compatible(stdout_node, "hvterm1")) {
231 udbg_putc = udbg_putcLP;
232 udbg_getc = udbg_getcLP;
233 udbg_getc_poll = udbg_getc_pollLP;
234 add_preferred_console("hvc", termno[0] & 0xff, NULL);
235 } else if (of_device_is_compatible(stdout_node, "hvterm-protocol")) {
236 vtermno = termno[0];
237 udbg_putc = udbg_hvsi_putc;
238 udbg_getc = udbg_hvsi_getc;
239 udbg_getc_poll = udbg_hvsi_getc_poll;
240 add_preferred_console("hvsi", termno[0] & 0xff, NULL);
241 }
242out:
243 of_node_put(stdout_node);
244}
245
246void vpa_init(int cpu) 55void vpa_init(int cpu)
247{ 56{
248 int hwcpu = get_hard_smp_processor_id(cpu); 57 int hwcpu = get_hard_smp_processor_id(cpu);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index e9f6d2859c3c..24c7162f11d9 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -47,7 +47,8 @@ extern void pSeries_final_fixup(void);
47/* Poweron flag used for enabling auto ups restart */ 47/* Poweron flag used for enabling auto ups restart */
48extern unsigned long rtas_poweron_auto; 48extern unsigned long rtas_poweron_auto;
49 49
50extern void find_udbg_vterm(void); 50/* Provided by HVC VIO */
51extern void hvc_vio_init_early(void);
51 52
52/* Dynamic logical Partitioning/Mobility */ 53/* Dynamic logical Partitioning/Mobility */
53extern void dlpar_free_cc_nodes(struct device_node *); 54extern void dlpar_free_cc_nodes(struct device_node *);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 1de2cbb92303..168651acdd83 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -97,7 +97,7 @@ static struct device_node *derive_parent(const char *path)
97 return parent; 97 return parent;
98} 98}
99 99
100BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); 100static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
101 101
102int pSeries_reconfig_notifier_register(struct notifier_block *nb) 102int pSeries_reconfig_notifier_register(struct notifier_block *nb)
103{ 103{
@@ -109,6 +109,14 @@ void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
109 blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb); 109 blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
110} 110}
111 111
112int pSeries_reconfig_notify(unsigned long action, void *p)
113{
114 int err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
115 action, p);
116
117 return notifier_to_errno(err);
118}
119
112static int pSeries_reconfig_add_node(const char *path, struct property *proplist) 120static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
113{ 121{
114 struct device_node *np; 122 struct device_node *np;
@@ -132,11 +140,9 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
132 goto out_err; 140 goto out_err;
133 } 141 }
134 142
135 err = blocking_notifier_call_chain(&pSeries_reconfig_chain, 143 err = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, np);
136 PSERIES_RECONFIG_ADD, np); 144 if (err) {
137 if (err == NOTIFY_BAD) {
138 printk(KERN_ERR "Failed to add device node %s\n", path); 145 printk(KERN_ERR "Failed to add device node %s\n", path);
139 err = -ENOMEM; /* For now, safe to assume kmalloc failure */
140 goto out_err; 146 goto out_err;
141 } 147 }
142 148
@@ -173,8 +179,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
173 179
174 remove_node_proc_entries(np); 180 remove_node_proc_entries(np);
175 181
176 blocking_notifier_call_chain(&pSeries_reconfig_chain, 182 pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, np);
177 PSERIES_RECONFIG_REMOVE, np);
178 of_detach_node(np); 183 of_detach_node(np);
179 184
180 of_node_put(parent); 185 of_node_put(parent);
@@ -472,11 +477,10 @@ static int do_update_property(char *buf, size_t bufsize)
472 else 477 else
473 action = PSERIES_DRCONF_MEM_REMOVE; 478 action = PSERIES_DRCONF_MEM_REMOVE;
474 479
475 rc = blocking_notifier_call_chain(&pSeries_reconfig_chain, 480 rc = pSeries_reconfig_notify(action, value);
476 action, value); 481 if (rc) {
477 if (rc == NOTIFY_BAD) { 482 prom_update_property(np, oldprop, newprop);
478 rc = prom_update_property(np, oldprop, newprop); 483 return rc;
479 return -ENOMEM;
480 } 484 }
481 } 485 }
482 486
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 593acceeff96..d00e52926b71 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -512,9 +512,10 @@ static void __init pSeries_init_early(void)
512{ 512{
513 pr_debug(" -> pSeries_init_early()\n"); 513 pr_debug(" -> pSeries_init_early()\n");
514 514
515#ifdef CONFIG_HVC_CONSOLE
515 if (firmware_has_feature(FW_FEATURE_LPAR)) 516 if (firmware_has_feature(FW_FEATURE_LPAR))
516 find_udbg_vterm(); 517 hvc_vio_init_early();
517 518#endif
518 if (firmware_has_feature(FW_FEATURE_DABR)) 519 if (firmware_has_feature(FW_FEATURE_DABR))
519 ppc_md.set_dabr = pseries_set_dabr; 520 ppc_md.set_dabr = pseries_set_dabr;
520 else if (firmware_has_feature(FW_FEATURE_XDABR)) 521 else if (firmware_has_feature(FW_FEATURE_XDABR))
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index fbffd7e47ab8..4e44c4dcd11c 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -27,7 +27,7 @@
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28 28
29#include <asm/ptrace.h> 29#include <asm/ptrace.h>
30#include <asm/atomic.h> 30#include <linux/atomic.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/page.h> 32#include <asm/page.h>
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
@@ -44,7 +44,6 @@
44#include <asm/mpic.h> 44#include <asm/mpic.h>
45#include <asm/vdso_datapage.h> 45#include <asm/vdso_datapage.h>
46#include <asm/cputhreads.h> 46#include <asm/cputhreads.h>
47#include <asm/mpic.h>
48#include <asm/xics.h> 47#include <asm/xics.h>
49 48
50#include "plpar_wrappers.h" 49#include "plpar_wrappers.h"
@@ -207,7 +206,7 @@ static struct smp_ops_t pSeries_mpic_smp_ops = {
207}; 206};
208 207
209static struct smp_ops_t pSeries_xics_smp_ops = { 208static struct smp_ops_t pSeries_xics_smp_ops = {
210 .message_pass = smp_muxed_ipi_message_pass, 209 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
211 .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ 210 .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
212 .probe = xics_smp_probe, 211 .probe = xics_smp_probe,
213 .kick_cpu = smp_pSeries_kick_cpu, 212 .kick_cpu = smp_pSeries_kick_cpu,
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
index 9d20fa9d3710..71bd105f3863 100644
--- a/arch/powerpc/platforms/wsp/smp.c
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -75,7 +75,7 @@ static int __init smp_a2_probe(void)
75} 75}
76 76
77static struct smp_ops_t a2_smp_ops = { 77static struct smp_ops_t a2_smp_ops = {
78 .message_pass = smp_muxed_ipi_message_pass, 78 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
79 .cause_ipi = doorbell_cause_ipi, 79 .cause_ipi = doorbell_cause_ipi,
80 .probe = smp_a2_probe, 80 .probe = smp_a2_probe,
81 .kick_cpu = smp_a2_kick_cpu, 81 .kick_cpu = smp_a2_kick_cpu,
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 0efa990e3344..cf736ca0cf05 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
4 4
5mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o 5mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o
6obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) 6obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y)
7obj-$(CONFIG_PPC_EPAPR_HV_PIC) += ehv_pic.o
7fsl-msi-obj-$(CONFIG_PCI_MSI) += fsl_msi.o 8fsl-msi-obj-$(CONFIG_PCI_MSI) += fsl_msi.o
8obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o 9obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o
9 10
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index bd0d54060b94..265f0f09395a 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -203,7 +203,7 @@ static int axon_ram_probe(struct platform_device *device)
203 goto failed; 203 goto failed;
204 } 204 }
205 205
206 bank->size = resource.end - resource.start + 1; 206 bank->size = resource_size(&resource);
207 207
208 if (bank->size == 0) { 208 if (bank->size == 0) {
209 dev_err(&device->dev, "No DDR2 memory found for %s%d\n", 209 dev_err(&device->dev, "No DDR2 memory found for %s%d\n",
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 350787c83e22..5d7d59a43c4c 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -148,7 +148,7 @@ unsigned int cpm_pic_init(void)
148 if (ret) 148 if (ret)
149 goto end; 149 goto end;
150 150
151 cpic_reg = ioremap(res.start, res.end - res.start + 1); 151 cpic_reg = ioremap(res.start, resource_size(&res));
152 if (cpic_reg == NULL) 152 if (cpic_reg == NULL)
153 goto end; 153 goto end;
154 154
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 2b69aa0315b3..d55d0ad0deab 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -115,7 +115,7 @@ int cpm_muram_init(void)
115 max = r.end; 115 max = r.end;
116 116
117 rh_attach_region(&cpm_muram_info, r.start - muram_pbase, 117 rh_attach_region(&cpm_muram_info, r.start - muram_pbase,
118 r.end - r.start + 1); 118 resource_size(&r));
119 } 119 }
120 120
121 muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); 121 muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 8e9e06a7ca59..4f2680f431b5 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -239,7 +239,7 @@ static int __init dart_init(struct device_node *dart_node)
239 DARTMAP_RPNMASK); 239 DARTMAP_RPNMASK);
240 240
241 /* Map in DART registers */ 241 /* Map in DART registers */
242 dart = ioremap(r.start, r.end - r.start + 1); 242 dart = ioremap(r.start, resource_size(&r));
243 if (dart == NULL) 243 if (dart == NULL)
244 panic("DART: Cannot map registers!"); 244 panic("DART: Cannot map registers!");
245 245
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
new file mode 100644
index 000000000000..af1a5df46b3e
--- /dev/null
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -0,0 +1,302 @@
1/*
2 * Driver for ePAPR Embedded Hypervisor PIC
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Author: Ashish Kalra <ashish.kalra@freescale.com>
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/irq.h>
17#include <linux/smp.h>
18#include <linux/interrupt.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/of.h>
22
23#include <asm/io.h>
24#include <asm/irq.h>
25#include <asm/smp.h>
26#include <asm/machdep.h>
27#include <asm/ehv_pic.h>
28#include <asm/fsl_hcalls.h>
29
30#include "../../../kernel/irq/settings.h"
31
32static struct ehv_pic *global_ehv_pic;
33static DEFINE_SPINLOCK(ehv_pic_lock);
34
35static u32 hwirq_intspec[NR_EHV_PIC_INTS];
36static u32 __iomem *mpic_percpu_base_vaddr;
37
38#define IRQ_TYPE_MPIC_DIRECT 4
39#define MPIC_EOI 0x00B0
40
41/*
42 * Linux descriptor level callbacks
43 */
44
45void ehv_pic_unmask_irq(struct irq_data *d)
46{
47 unsigned int src = virq_to_hw(d->irq);
48
49 ev_int_set_mask(src, 0);
50}
51
52void ehv_pic_mask_irq(struct irq_data *d)
53{
54 unsigned int src = virq_to_hw(d->irq);
55
56 ev_int_set_mask(src, 1);
57}
58
59void ehv_pic_end_irq(struct irq_data *d)
60{
61 unsigned int src = virq_to_hw(d->irq);
62
63 ev_int_eoi(src);
64}
65
66void ehv_pic_direct_end_irq(struct irq_data *d)
67{
68 out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
69}
70
71int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
72 bool force)
73{
74 unsigned int src = virq_to_hw(d->irq);
75 unsigned int config, prio, cpu_dest;
76 int cpuid = irq_choose_cpu(dest);
77 unsigned long flags;
78
79 spin_lock_irqsave(&ehv_pic_lock, flags);
80 ev_int_get_config(src, &config, &prio, &cpu_dest);
81 ev_int_set_config(src, config, prio, cpuid);
82 spin_unlock_irqrestore(&ehv_pic_lock, flags);
83
84 return 0;
85}
86
87static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
88{
89 /* Now convert sense value */
90
91 switch (type & IRQ_TYPE_SENSE_MASK) {
92 case IRQ_TYPE_EDGE_RISING:
93 return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
94 EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
95
96 case IRQ_TYPE_EDGE_FALLING:
97 case IRQ_TYPE_EDGE_BOTH:
98 return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
99 EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
100
101 case IRQ_TYPE_LEVEL_HIGH:
102 return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
103 EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
104
105 case IRQ_TYPE_LEVEL_LOW:
106 default:
107 return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
108 EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
109 }
110}
111
112int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
113{
114 unsigned int src = virq_to_hw(d->irq);
115 struct irq_desc *desc = irq_to_desc(d->irq);
116 unsigned int vecpri, vold, vnew, prio, cpu_dest;
117 unsigned long flags;
118
119 if (flow_type == IRQ_TYPE_NONE)
120 flow_type = IRQ_TYPE_LEVEL_LOW;
121
122 irq_settings_clr_level(desc);
123 irq_settings_set_trigger_mask(desc, flow_type);
124 if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
125 irq_settings_set_level(desc);
126
127 vecpri = ehv_pic_type_to_vecpri(flow_type);
128
129 spin_lock_irqsave(&ehv_pic_lock, flags);
130 ev_int_get_config(src, &vold, &prio, &cpu_dest);
131 vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
132 EHV_PIC_INFO(VECPRI_SENSE_MASK));
133 vnew |= vecpri;
134
135 /*
136 * TODO : Add specific interface call for platform to set
137 * individual interrupt priorities.
138 * platform currently using static/default priority for all ints
139 */
140
141 prio = 8;
142
143 ev_int_set_config(src, vecpri, prio, cpu_dest);
144
145 spin_unlock_irqrestore(&ehv_pic_lock, flags);
146 return 0;
147}
148
149static struct irq_chip ehv_pic_irq_chip = {
150 .irq_mask = ehv_pic_mask_irq,
151 .irq_unmask = ehv_pic_unmask_irq,
152 .irq_eoi = ehv_pic_end_irq,
153 .irq_set_type = ehv_pic_set_irq_type,
154};
155
156static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
157 .irq_mask = ehv_pic_mask_irq,
158 .irq_unmask = ehv_pic_unmask_irq,
159 .irq_eoi = ehv_pic_direct_end_irq,
160 .irq_set_type = ehv_pic_set_irq_type,
161};
162
163/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
164unsigned int ehv_pic_get_irq(void)
165{
166 int irq;
167
168 BUG_ON(global_ehv_pic == NULL);
169
170 if (global_ehv_pic->coreint_flag)
171 irq = mfspr(SPRN_EPR); /* if core int mode */
172 else
173 ev_int_iack(0, &irq); /* legacy mode */
174
175 if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
176 return NO_IRQ;
177
178 /*
179 * this will also setup revmap[] in the slow path for the first
180 * time, next calls will always use fast path by indexing revmap
181 */
182 return irq_linear_revmap(global_ehv_pic->irqhost, irq);
183}
184
185static int ehv_pic_host_match(struct irq_host *h, struct device_node *node)
186{
187 /* Exact match, unless ehv_pic node is NULL */
188 return h->of_node == NULL || h->of_node == node;
189}
190
191static int ehv_pic_host_map(struct irq_host *h, unsigned int virq,
192 irq_hw_number_t hw)
193{
194 struct ehv_pic *ehv_pic = h->host_data;
195 struct irq_chip *chip;
196
197 /* Default chip */
198 chip = &ehv_pic->hc_irq;
199
200 if (mpic_percpu_base_vaddr)
201 if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
202 chip = &ehv_pic_direct_eoi_irq_chip;
203
204 irq_set_chip_data(virq, chip);
205 /*
206 * using handle_fasteoi_irq as our irq handler, this will
207 * only call the eoi callback and suitable for the MPIC
208 * controller which set ISR/IPR automatically and clear the
209 * highest priority active interrupt in ISR/IPR when we do
210 * a specific eoi
211 */
212 irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
213
214 /* Set default irq type */
215 irq_set_irq_type(virq, IRQ_TYPE_NONE);
216
217 return 0;
218}
219
220static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct,
221 const u32 *intspec, unsigned int intsize,
222 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
223
224{
225 /*
226 * interrupt sense values coming from the guest device tree
227 * interrupt specifiers can have four possible sense and
228 * level encoding information and they need to
229 * be translated between firmware type & linux type.
230 */
231
232 static unsigned char map_of_senses_to_linux_irqtype[4] = {
233 IRQ_TYPE_EDGE_FALLING,
234 IRQ_TYPE_EDGE_RISING,
235 IRQ_TYPE_LEVEL_LOW,
236 IRQ_TYPE_LEVEL_HIGH,
237 };
238
239 *out_hwirq = intspec[0];
240 if (intsize > 1) {
241 hwirq_intspec[intspec[0]] = intspec[1];
242 *out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
243 ~IRQ_TYPE_MPIC_DIRECT];
244 } else {
245 *out_flags = IRQ_TYPE_NONE;
246 }
247
248 return 0;
249}
250
251static struct irq_host_ops ehv_pic_host_ops = {
252 .match = ehv_pic_host_match,
253 .map = ehv_pic_host_map,
254 .xlate = ehv_pic_host_xlate,
255};
256
257void __init ehv_pic_init(void)
258{
259 struct device_node *np, *np2;
260 struct ehv_pic *ehv_pic;
261 int coreint_flag = 1;
262
263 np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
264 if (!np) {
265 pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
266 return;
267 }
268
269 if (!of_find_property(np, "has-external-proxy", NULL))
270 coreint_flag = 0;
271
272 ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
273 if (!ehv_pic) {
274 of_node_put(np);
275 return;
276 }
277
278 ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
279 NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0);
280
281 if (!ehv_pic->irqhost) {
282 of_node_put(np);
283 return;
284 }
285
286 np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
287 if (np2) {
288 mpic_percpu_base_vaddr = of_iomap(np2, 0);
289 if (!mpic_percpu_base_vaddr)
290 pr_err("ehv_pic_init: of_iomap failed\n");
291
292 of_node_put(np2);
293 }
294
295 ehv_pic->irqhost->host_data = ehv_pic;
296 ehv_pic->hc_irq = ehv_pic_irq_chip;
297 ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
298 ehv_pic->coreint_flag = coreint_flag;
299
300 global_ehv_pic = ehv_pic;
301 irq_set_default_host(global_ehv_pic->irqhost);
302}
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 92e78333c47c..419a77239bd7 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -349,7 +349,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
349 goto error_out; 349 goto error_out;
350 } 350 }
351 351
352 msi->msi_regs = ioremap(res.start, res.end - res.start + 1); 352 msi->msi_regs = ioremap(res.start, resource_size(&res));
353 if (!msi->msi_regs) { 353 if (!msi->msi_regs) {
354 dev_err(&dev->dev, "ioremap problem failed\n"); 354 dev_err(&dev->dev, "ioremap problem failed\n");
355 goto error_out; 355 goto error_out;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 31f99ec7382e..4ce547e00473 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -38,10 +38,17 @@ static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
38 38
39static void __init quirk_fsl_pcie_header(struct pci_dev *dev) 39static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
40{ 40{
41 u8 progif;
42
41 /* if we aren't a PCIe don't bother */ 43 /* if we aren't a PCIe don't bother */
42 if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) 44 if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
43 return; 45 return;
44 46
47 /* if we aren't in host mode don't bother */
48 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
49 if (progif & 0x1)
50 return;
51
45 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 52 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
46 fsl_pcie_bus_fixup = 1; 53 fsl_pcie_bus_fixup = 1;
47 return; 54 return;
@@ -64,7 +71,7 @@ static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
64{ 71{
65 resource_size_t pci_addr = res->start - offset; 72 resource_size_t pci_addr = res->start - offset;
66 resource_size_t phys_addr = res->start; 73 resource_size_t phys_addr = res->start;
67 resource_size_t size = res->end - res->start + 1; 74 resource_size_t size = resource_size(res);
68 u32 flags = 0x80044000; /* enable & mem R/W */ 75 u32 flags = 0x80044000; /* enable & mem R/W */
69 unsigned int i; 76 unsigned int i;
70 77
@@ -108,7 +115,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
108 char *name = hose->dn->full_name; 115 char *name = hose->dn->full_name;
109 116
110 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", 117 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
111 (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1); 118 (u64)rsrc->start, (u64)resource_size(rsrc));
112 119
113 if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) { 120 if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) {
114 win_idx = 2; 121 win_idx = 2;
@@ -116,7 +123,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
116 end_idx = 3; 123 end_idx = 3;
117 } 124 }
118 125
119 pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1); 126 pci = ioremap(rsrc->start, resource_size(rsrc));
120 if (!pci) { 127 if (!pci) {
121 dev_err(hose->parent, "Unable to map ATMU registers\n"); 128 dev_err(hose->parent, "Unable to map ATMU registers\n");
122 return; 129 return;
@@ -153,9 +160,9 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
153 } else { 160 } else {
154 pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, " 161 pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
155 "phy base 0x%016llx.\n", 162 "phy base 0x%016llx.\n",
156 (u64)hose->io_resource.start, 163 (u64)hose->io_resource.start,
157 (u64)hose->io_resource.end - (u64)hose->io_resource.start + 1, 164 (u64)resource_size(&hose->io_resource),
158 (u64)hose->io_base_phys); 165 (u64)hose->io_base_phys);
159 out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12)); 166 out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
160 out_be32(&pci->pow[j].potear, 0); 167 out_be32(&pci->pow[j].potear, 0);
161 out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12)); 168 out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
@@ -323,6 +330,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
323 struct pci_controller *hose; 330 struct pci_controller *hose;
324 struct resource rsrc; 331 struct resource rsrc;
325 const int *bus_range; 332 const int *bus_range;
333 u8 progif;
326 334
327 if (!of_device_is_available(dev)) { 335 if (!of_device_is_available(dev)) {
328 pr_warning("%s: disabled\n", dev->full_name); 336 pr_warning("%s: disabled\n", dev->full_name);
@@ -353,6 +361,18 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
353 361
354 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, 362 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
355 PPC_INDIRECT_TYPE_BIG_ENDIAN); 363 PPC_INDIRECT_TYPE_BIG_ENDIAN);
364
365 early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
366 if ((progif & 1) == 1) {
367 /* unmap cfg_data & cfg_addr separately if not on same page */
368 if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
369 ((unsigned long)hose->cfg_addr & PAGE_MASK))
370 iounmap(hose->cfg_data);
371 iounmap(hose->cfg_addr);
372 pcibios_free_controller(hose);
373 return 0;
374 }
375
356 setup_pci_cmd(hose); 376 setup_pci_cmd(hose);
357 377
358 /* check PCI express link status */ 378 /* check PCI express link status */
@@ -380,70 +400,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
380 400
381 return 0; 401 return 0;
382} 402}
383
384DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548E, quirk_fsl_pcie_header);
385DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548, quirk_fsl_pcie_header);
386DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543E, quirk_fsl_pcie_header);
387DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543, quirk_fsl_pcie_header);
388DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8547E, quirk_fsl_pcie_header);
389DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545E, quirk_fsl_pcie_header);
390DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545, quirk_fsl_pcie_header);
391DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569E, quirk_fsl_pcie_header);
392DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569, quirk_fsl_pcie_header);
393DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568E, quirk_fsl_pcie_header);
394DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568, quirk_fsl_pcie_header);
395DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567E, quirk_fsl_pcie_header);
396DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567, quirk_fsl_pcie_header);
397DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533E, quirk_fsl_pcie_header);
398DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_header);
399DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_header);
400DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_header);
401DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572E, quirk_fsl_pcie_header);
402DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572, quirk_fsl_pcie_header);
403DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536E, quirk_fsl_pcie_header);
404DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header);
405DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header);
406DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header);
407DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header);
408DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header);
409DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header);
410DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
411DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
412DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
413DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
414DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header);
415DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header);
416DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
417DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
418DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
419DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header);
420DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header);
421DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header);
422DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040E, quirk_fsl_pcie_header);
423DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040, quirk_fsl_pcie_header);
424DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041E, quirk_fsl_pcie_header);
425DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041, quirk_fsl_pcie_header);
426DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header);
427DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header);
428DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header);
429DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header);
430DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010E, quirk_fsl_pcie_header);
431DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010, quirk_fsl_pcie_header);
432DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020E, quirk_fsl_pcie_header);
433DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020, quirk_fsl_pcie_header);
434#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ 403#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
435 404
436#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) 405DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_pcie_header);
437DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8308, quirk_fsl_pcie_header);
438DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314E, quirk_fsl_pcie_header);
439DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314, quirk_fsl_pcie_header);
440DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315E, quirk_fsl_pcie_header);
441DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315, quirk_fsl_pcie_header);
442DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377E, quirk_fsl_pcie_header);
443DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377, quirk_fsl_pcie_header);
444DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378E, quirk_fsl_pcie_header);
445DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378, quirk_fsl_pcie_header);
446 406
407#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
447struct mpc83xx_pcie_priv { 408struct mpc83xx_pcie_priv {
448 void __iomem *cfg_type0; 409 void __iomem *cfg_type0;
449 void __iomem *cfg_type1; 410 void __iomem *cfg_type1;
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index b3fd081d56f5..2de8551df40f 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1524,7 +1524,7 @@ int fsl_rio_setup(struct platform_device *dev)
1524 port->priv = priv; 1524 port->priv = priv;
1525 port->phys_efptr = 0x100; 1525 port->phys_efptr = 0x100;
1526 1526
1527 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); 1527 priv->regs_win = ioremap(regs.start, resource_size(&regs));
1528 rio_regs_win = priv->regs_win; 1528 rio_regs_win = priv->regs_win;
1529 1529
1530 /* Probe the master port phy type */ 1530 /* Probe the master port phy type */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 19e5015e039b..2d66275e489f 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -32,7 +32,7 @@
32#include <linux/fs_uart_pd.h> 32#include <linux/fs_uart_pd.h>
33 33
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/atomic.h> 35#include <linux/atomic.h>
36#include <asm/io.h> 36#include <asm/io.h>
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/time.h> 38#include <asm/time.h>
@@ -41,6 +41,7 @@
41#include <sysdev/fsl_soc.h> 41#include <sysdev/fsl_soc.h>
42#include <mm/mmu_decl.h> 42#include <mm/mmu_decl.h>
43#include <asm/cpm2.h> 43#include <asm/cpm2.h>
44#include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
44 45
45extern void init_fcc_ioports(struct fs_platform_info*); 46extern void init_fcc_ioports(struct fs_platform_info*);
46extern void init_fec_ioports(struct fs_platform_info*); 47extern void init_fec_ioports(struct fs_platform_info*);
@@ -252,3 +253,29 @@ void fsl_rstcr_restart(char *cmd)
252struct platform_diu_data_ops diu_ops; 253struct platform_diu_data_ops diu_ops;
253EXPORT_SYMBOL(diu_ops); 254EXPORT_SYMBOL(diu_ops);
254#endif 255#endif
256
257/*
258 * Restart the current partition
259 *
260 * This function should be assigned to the ppc_md.restart function pointer,
261 * to initiate a partition restart when we're running under the Freescale
262 * hypervisor.
263 */
264void fsl_hv_restart(char *cmd)
265{
266 pr_info("hv restart\n");
267 fh_partition_restart(-1);
268}
269
270/*
271 * Halt the current partition
272 *
273 * This function should be assigned to the ppc_md.power_off and ppc_md.halt
274 * function pointers, to shut down the partition when we're running under
275 * the Freescale hypervisor.
276 */
277void fsl_hv_halt(void)
278{
279 pr_info("hv exit\n");
280 fh_partition_stop(-1);
281}
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 53609489a62b..2ece02beb8ff 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -36,5 +36,8 @@ struct platform_diu_data_ops {
36extern struct platform_diu_data_ops diu_ops; 36extern struct platform_diu_data_ops diu_ops;
37#endif 37#endif
38 38
39void fsl_hv_restart(char *cmd);
40void fsl_hv_halt(void);
41
39#endif 42#endif
40#endif 43#endif
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 7367d17364cb..95da897f05a7 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -736,7 +736,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
736 return NULL; 736 return NULL;
737 } 737 }
738 738
739 ipic->regs = ioremap(res.start, res.end - res.start + 1); 739 ipic->regs = ioremap(res.start, resource_size(&res));
740 740
741 ipic->irqhost->host_data = ipic; 741 ipic->irqhost->host_data = ipic;
742 742
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index ddc877a3a23a..69f5814ae6d4 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -129,7 +129,7 @@ int __init mmio_nvram_init(void)
129 goto out; 129 goto out;
130 } 130 }
131 nvram_addr = r.start; 131 nvram_addr = r.start;
132 mmio_nvram_len = r.end - r.start + 1; 132 mmio_nvram_len = resource_size(&r);
133 if ( (!mmio_nvram_len) || (!nvram_addr) ) { 133 if ( (!mmio_nvram_len) || (!nvram_addr) ) {
134 printk(KERN_WARNING "nvram: address or length is 0\n"); 134 printk(KERN_WARNING "nvram: address or length is 0\n");
135 ret = -EIO; 135 ret = -EIO;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 20924f2246f0..22e48e2d71f1 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -166,7 +166,7 @@ int mpc8xx_pic_init(void)
166 if (ret) 166 if (ret)
167 goto out; 167 goto out;
168 168
169 siu_reg = ioremap(res.start, res.end - res.start + 1); 169 siu_reg = ioremap(res.start, resource_size(&res));
170 if (siu_reg == NULL) { 170 if (siu_reg == NULL) {
171 ret = -EINVAL; 171 ret = -EINVAL;
172 goto out; 172 goto out;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 58d7a534f877..d5d3ff3d757e 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -598,42 +598,6 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
598 598
599#endif /* CONFIG_MPIC_U3_HT_IRQS */ 599#endif /* CONFIG_MPIC_U3_HT_IRQS */
600 600
601#ifdef CONFIG_SMP
602static int irq_choose_cpu(const struct cpumask *mask)
603{
604 int cpuid;
605
606 if (cpumask_equal(mask, cpu_all_mask)) {
607 static int irq_rover = 0;
608 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
609 unsigned long flags;
610
611 /* Round-robin distribution... */
612 do_round_robin:
613 raw_spin_lock_irqsave(&irq_rover_lock, flags);
614
615 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
616 if (irq_rover >= nr_cpu_ids)
617 irq_rover = cpumask_first(cpu_online_mask);
618
619 cpuid = irq_rover;
620
621 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
622 } else {
623 cpuid = cpumask_first_and(mask, cpu_online_mask);
624 if (cpuid >= nr_cpu_ids)
625 goto do_round_robin;
626 }
627
628 return get_hard_smp_processor_id(cpuid);
629}
630#else
631static int irq_choose_cpu(const struct cpumask *mask)
632{
633 return hard_smp_processor_id();
634}
635#endif
636
637/* Find an mpic associated with a given linux interrupt */ 601/* Find an mpic associated with a given linux interrupt */
638static struct mpic *mpic_find(unsigned int irq) 602static struct mpic *mpic_find(unsigned int irq)
639{ 603{
@@ -849,7 +813,7 @@ static void mpic_unmask_tm(struct irq_data *d)
849 struct mpic *mpic = mpic_from_irq_data(d); 813 struct mpic *mpic = mpic_from_irq_data(d);
850 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; 814 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
851 815
852 DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src); 816 DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src);
853 mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); 817 mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
854 mpic_tm_read(src); 818 mpic_tm_read(src);
855} 819}
diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c
index 2792dc8b038c..50a81387e9b1 100644
--- a/arch/powerpc/sysdev/mv64x60_udbg.c
+++ b/arch/powerpc/sysdev/mv64x60_udbg.c
@@ -125,11 +125,11 @@ static void mv64x60_udbg_init(void)
125 125
126 of_node_put(np); 126 of_node_put(np);
127 127
128 mpsc_base = ioremap(r[0].start, r[0].end - r[0].start + 1); 128 mpsc_base = ioremap(r[0].start, resource_size(&r[0]));
129 if (!mpsc_base) 129 if (!mpsc_base)
130 return; 130 return;
131 131
132 mpsc_intr_cause = ioremap(r[1].start, r[1].end - r[1].start + 1); 132 mpsc_intr_cause = ioremap(r[1].start, resource_size(&r[1]));
133 if (!mpsc_intr_cause) { 133 if (!mpsc_intr_cause) {
134 iounmap(mpsc_base); 134 iounmap(mpsc_base);
135 return; 135 return;
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 9c067fa3b878..a59ba96d2c21 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -265,7 +265,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
265 if (ppc4xx_setup_one_pci_PMM(hose, reg, 265 if (ppc4xx_setup_one_pci_PMM(hose, reg,
266 res->start, 266 res->start,
267 res->start - hose->pci_mem_offset, 267 res->start - hose->pci_mem_offset,
268 res->end + 1 - res->start, 268 resource_size(res),
269 res->flags, 269 res->flags,
270 j) == 0) { 270 j) == 0) {
271 j++; 271 j++;
@@ -290,7 +290,7 @@ static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
290 void __iomem *reg, 290 void __iomem *reg,
291 const struct resource *res) 291 const struct resource *res)
292{ 292{
293 resource_size_t size = res->end - res->start + 1; 293 resource_size_t size = resource_size(res);
294 u32 sa; 294 u32 sa;
295 295
296 /* Calculate window size */ 296 /* Calculate window size */
@@ -349,7 +349,7 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
349 bus_range = of_get_property(np, "bus-range", NULL); 349 bus_range = of_get_property(np, "bus-range", NULL);
350 350
351 /* Map registers */ 351 /* Map registers */
352 reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start); 352 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
353 if (reg == NULL) { 353 if (reg == NULL) {
354 printk(KERN_ERR "%s: Can't map registers !", np->full_name); 354 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
355 goto fail; 355 goto fail;
@@ -465,7 +465,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
465 if (ppc4xx_setup_one_pcix_POM(hose, reg, 465 if (ppc4xx_setup_one_pcix_POM(hose, reg,
466 res->start, 466 res->start,
467 res->start - hose->pci_mem_offset, 467 res->start - hose->pci_mem_offset,
468 res->end + 1 - res->start, 468 resource_size(res),
469 res->flags, 469 res->flags,
470 j) == 0) { 470 j) == 0) {
471 j++; 471 j++;
@@ -492,7 +492,7 @@ static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
492 int big_pim, 492 int big_pim,
493 int enable_msi_hole) 493 int enable_msi_hole)
494{ 494{
495 resource_size_t size = res->end - res->start + 1; 495 resource_size_t size = resource_size(res);
496 u32 sa; 496 u32 sa;
497 497
498 /* RAM is always at 0 */ 498 /* RAM is always at 0 */
@@ -555,7 +555,7 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
555 bus_range = of_get_property(np, "bus-range", NULL); 555 bus_range = of_get_property(np, "bus-range", NULL);
556 556
557 /* Map registers */ 557 /* Map registers */
558 reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start); 558 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
559 if (reg == NULL) { 559 if (reg == NULL) {
560 printk(KERN_ERR "%s: Can't map registers !", np->full_name); 560 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
561 goto fail; 561 goto fail;
@@ -650,12 +650,74 @@ struct ppc4xx_pciex_hwops
650 int (*core_init)(struct device_node *np); 650 int (*core_init)(struct device_node *np);
651 int (*port_init_hw)(struct ppc4xx_pciex_port *port); 651 int (*port_init_hw)(struct ppc4xx_pciex_port *port);
652 int (*setup_utl)(struct ppc4xx_pciex_port *port); 652 int (*setup_utl)(struct ppc4xx_pciex_port *port);
653 void (*check_link)(struct ppc4xx_pciex_port *port);
653}; 654};
654 655
655static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops; 656static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
656 657
657#ifdef CONFIG_44x 658#ifdef CONFIG_44x
658 659
660static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
661 unsigned int sdr_offset,
662 unsigned int mask,
663 unsigned int value,
664 int timeout_ms)
665{
666 u32 val;
667
668 while(timeout_ms--) {
669 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
670 if ((val & mask) == value) {
671 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
672 port->index, sdr_offset, timeout_ms, val);
673 return 0;
674 }
675 msleep(1);
676 }
677 return -1;
678}
679
680static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
681{
682 /* Wait for reset to complete */
683 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
684 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
685 port->index);
686 return -1;
687 }
688 return 0;
689}
690
691static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
692{
693 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
694
695 /* Check for card presence detect if supported, if not, just wait for
696 * link unconditionally.
697 *
698 * note that we don't fail if there is no link, we just filter out
699 * config space accesses. That way, it will be easier to implement
700 * hotplug later on.
701 */
702 if (!port->has_ibpre ||
703 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
704 1 << 28, 1 << 28, 100)) {
705 printk(KERN_INFO
706 "PCIE%d: Device detected, waiting for link...\n",
707 port->index);
708 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
709 0x1000, 0x1000, 2000))
710 printk(KERN_WARNING
711 "PCIE%d: Link up failed\n", port->index);
712 else {
713 printk(KERN_INFO
714 "PCIE%d: link is up !\n", port->index);
715 port->link = 1;
716 }
717 } else
718 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
719}
720
659/* Check various reset bits of the 440SPe PCIe core */ 721/* Check various reset bits of the 440SPe PCIe core */
660static int __init ppc440spe_pciex_check_reset(struct device_node *np) 722static int __init ppc440spe_pciex_check_reset(struct device_node *np)
661{ 723{
@@ -806,7 +868,7 @@ static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
806 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 868 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
807 (1 << 24) | (1 << 16), 1 << 12); 869 (1 << 24) | (1 << 16), 1 << 12);
808 870
809 return 0; 871 return ppc4xx_pciex_port_reset_sdr(port);
810} 872}
811 873
812static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port) 874static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
@@ -856,6 +918,7 @@ static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
856 .core_init = ppc440spe_pciex_core_init, 918 .core_init = ppc440spe_pciex_core_init,
857 .port_init_hw = ppc440speA_pciex_init_port_hw, 919 .port_init_hw = ppc440speA_pciex_init_port_hw,
858 .setup_utl = ppc440speA_pciex_init_utl, 920 .setup_utl = ppc440speA_pciex_init_utl,
921 .check_link = ppc4xx_pciex_check_link_sdr,
859}; 922};
860 923
861static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata = 924static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
@@ -863,6 +926,7 @@ static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
863 .core_init = ppc440spe_pciex_core_init, 926 .core_init = ppc440spe_pciex_core_init,
864 .port_init_hw = ppc440speB_pciex_init_port_hw, 927 .port_init_hw = ppc440speB_pciex_init_port_hw,
865 .setup_utl = ppc440speB_pciex_init_utl, 928 .setup_utl = ppc440speB_pciex_init_utl,
929 .check_link = ppc4xx_pciex_check_link_sdr,
866}; 930};
867 931
868static int __init ppc460ex_pciex_core_init(struct device_node *np) 932static int __init ppc460ex_pciex_core_init(struct device_node *np)
@@ -944,7 +1008,7 @@ static int ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
944 1008
945 port->has_ibpre = 1; 1009 port->has_ibpre = 1;
946 1010
947 return 0; 1011 return ppc4xx_pciex_port_reset_sdr(port);
948} 1012}
949 1013
950static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port) 1014static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
@@ -972,6 +1036,7 @@ static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
972 .core_init = ppc460ex_pciex_core_init, 1036 .core_init = ppc460ex_pciex_core_init,
973 .port_init_hw = ppc460ex_pciex_init_port_hw, 1037 .port_init_hw = ppc460ex_pciex_init_port_hw,
974 .setup_utl = ppc460ex_pciex_init_utl, 1038 .setup_utl = ppc460ex_pciex_init_utl,
1039 .check_link = ppc4xx_pciex_check_link_sdr,
975}; 1040};
976 1041
977static int __init ppc460sx_pciex_core_init(struct device_node *np) 1042static int __init ppc460sx_pciex_core_init(struct device_node *np)
@@ -1075,7 +1140,7 @@ static int ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1075 1140
1076 port->has_ibpre = 1; 1141 port->has_ibpre = 1;
1077 1142
1078 return 0; 1143 return ppc4xx_pciex_port_reset_sdr(port);
1079} 1144}
1080 1145
1081static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port) 1146static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
@@ -1089,6 +1154,7 @@ static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1089 .core_init = ppc460sx_pciex_core_init, 1154 .core_init = ppc460sx_pciex_core_init,
1090 .port_init_hw = ppc460sx_pciex_init_port_hw, 1155 .port_init_hw = ppc460sx_pciex_init_port_hw,
1091 .setup_utl = ppc460sx_pciex_init_utl, 1156 .setup_utl = ppc460sx_pciex_init_utl,
1157 .check_link = ppc4xx_pciex_check_link_sdr,
1092}; 1158};
1093 1159
1094#endif /* CONFIG_44x */ 1160#endif /* CONFIG_44x */
@@ -1154,7 +1220,7 @@ static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1154 1220
1155 port->has_ibpre = 1; 1221 port->has_ibpre = 1;
1156 1222
1157 return 0; 1223 return ppc4xx_pciex_port_reset_sdr(port);
1158} 1224}
1159 1225
1160static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) 1226static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
@@ -1183,11 +1249,11 @@ static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1183 .core_init = ppc405ex_pciex_core_init, 1249 .core_init = ppc405ex_pciex_core_init,
1184 .port_init_hw = ppc405ex_pciex_init_port_hw, 1250 .port_init_hw = ppc405ex_pciex_init_port_hw,
1185 .setup_utl = ppc405ex_pciex_init_utl, 1251 .setup_utl = ppc405ex_pciex_init_utl,
1252 .check_link = ppc4xx_pciex_check_link_sdr,
1186}; 1253};
1187 1254
1188#endif /* CONFIG_40x */ 1255#endif /* CONFIG_40x */
1189 1256
1190
1191/* Check that the core has been initied and if not, do it */ 1257/* Check that the core has been initied and if not, do it */
1192static int __init ppc4xx_pciex_check_core_init(struct device_node *np) 1258static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1193{ 1259{
@@ -1261,26 +1327,6 @@ static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port
1261 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0); 1327 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1262} 1328}
1263 1329
1264static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
1265 unsigned int sdr_offset,
1266 unsigned int mask,
1267 unsigned int value,
1268 int timeout_ms)
1269{
1270 u32 val;
1271
1272 while(timeout_ms--) {
1273 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
1274 if ((val & mask) == value) {
1275 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
1276 port->index, sdr_offset, timeout_ms, val);
1277 return 0;
1278 }
1279 msleep(1);
1280 }
1281 return -1;
1282}
1283
1284static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port) 1330static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1285{ 1331{
1286 int rc = 0; 1332 int rc = 0;
@@ -1291,40 +1337,8 @@ static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1291 if (rc != 0) 1337 if (rc != 0)
1292 return rc; 1338 return rc;
1293 1339
1294 printk(KERN_INFO "PCIE%d: Checking link...\n", 1340 if (ppc4xx_pciex_hwops->check_link)
1295 port->index); 1341 ppc4xx_pciex_hwops->check_link(port);
1296
1297 /* Wait for reset to complete */
1298 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
1299 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
1300 port->index);
1301 return -1;
1302 }
1303
1304 /* Check for card presence detect if supported, if not, just wait for
1305 * link unconditionally.
1306 *
1307 * note that we don't fail if there is no link, we just filter out
1308 * config space accesses. That way, it will be easier to implement
1309 * hotplug later on.
1310 */
1311 if (!port->has_ibpre ||
1312 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
1313 1 << 28, 1 << 28, 100)) {
1314 printk(KERN_INFO
1315 "PCIE%d: Device detected, waiting for link...\n",
1316 port->index);
1317 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
1318 0x1000, 0x1000, 2000))
1319 printk(KERN_WARNING
1320 "PCIE%d: Link up failed\n", port->index);
1321 else {
1322 printk(KERN_INFO
1323 "PCIE%d: link is up !\n", port->index);
1324 port->link = 1;
1325 }
1326 } else
1327 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
1328 1342
1329 /* 1343 /*
1330 * Initialize mapping: disable all regions and configure 1344 * Initialize mapping: disable all regions and configure
@@ -1347,14 +1361,17 @@ static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1347 /* 1361 /*
1348 * Check for VC0 active and assert RDY. 1362 * Check for VC0 active and assert RDY.
1349 */ 1363 */
1350 if (port->link && 1364 if (port->sdr_base) {
1351 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1365 if (port->link &&
1352 1 << 16, 1 << 16, 5000)) { 1366 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1353 printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index); 1367 1 << 16, 1 << 16, 5000)) {
1354 port->link = 0; 1368 printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index);
1369 port->link = 0;
1370 }
1371
1372 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1355 } 1373 }
1356 1374
1357 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1358 msleep(100); 1375 msleep(100);
1359 1376
1360 return 0; 1377 return 0;
@@ -1604,7 +1621,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1604 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, 1621 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1605 res->start, 1622 res->start,
1606 res->start - hose->pci_mem_offset, 1623 res->start - hose->pci_mem_offset,
1607 res->end + 1 - res->start, 1624 resource_size(res),
1608 res->flags, 1625 res->flags,
1609 j) == 0) { 1626 j) == 0) {
1610 j++; 1627 j++;
@@ -1639,7 +1656,7 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1639 void __iomem *mbase, 1656 void __iomem *mbase,
1640 struct resource *res) 1657 struct resource *res)
1641{ 1658{
1642 resource_size_t size = res->end - res->start + 1; 1659 resource_size_t size = resource_size(res);
1643 u64 sa; 1660 u64 sa;
1644 1661
1645 if (port->endpoint) { 1662 if (port->endpoint) {
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index b2acda07220d..18e75ca19fe6 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -347,7 +347,7 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
347 return; 347 return;
348 } 348 }
349 349
350 qe_ic->regs = ioremap(res.start, res.end - res.start + 1); 350 qe_ic->regs = ioremap(res.start, resource_size(&res));
351 351
352 qe_ic->irqhost->host_data = qe_ic; 352 qe_ic->irqhost->host_data = qe_ic;
353 qe_ic->hc_irq = qe_ic_irq_chip; 353 qe_ic->hc_irq = qe_ic_irq_chip;
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index 77e4934b88c5..fd1a6c3b1721 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -41,7 +41,7 @@ int par_io_init(struct device_node *np)
41 ret = of_address_to_resource(np, 0, &res); 41 ret = of_address_to_resource(np, 0, &res);
42 if (ret) 42 if (ret)
43 return ret; 43 return ret;
44 par_io = ioremap(res.start, res.end - res.start + 1); 44 par_io = ioremap(res.start, resource_size(&res));
45 45
46 num_ports = of_get_property(np, "num-ports", NULL); 46 num_ports = of_get_property(np, "num-ports", NULL);
47 if (num_ports) 47 if (num_ports)
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index ee056807b52c..9f51f97abb5d 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -23,7 +23,7 @@
23#include <asm/tsi108.h> 23#include <asm/tsi108.h>
24 24
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/atomic.h> 26#include <linux/atomic.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/prom.h> 29#include <asm/prom.h>
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 1f15ad436140..50e32afe392e 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -17,6 +17,7 @@
17#include <linux/cpu.h> 17#include <linux/cpu.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/module.h>
20 21
21#include <asm/prom.h> 22#include <asm/prom.h>
22#include <asm/io.h> 23#include <asm/io.h>
@@ -24,6 +25,7 @@
24#include <asm/irq.h> 25#include <asm/irq.h>
25#include <asm/errno.h> 26#include <asm/errno.h>
26#include <asm/xics.h> 27#include <asm/xics.h>
28#include <asm/kvm_ppc.h>
27 29
28struct icp_ipl { 30struct icp_ipl {
29 union { 31 union {
@@ -139,6 +141,12 @@ static void icp_native_cause_ipi(int cpu, unsigned long data)
139 icp_native_set_qirr(cpu, IPI_PRIORITY); 141 icp_native_set_qirr(cpu, IPI_PRIORITY);
140} 142}
141 143
144void xics_wake_cpu(int cpu)
145{
146 icp_native_set_qirr(cpu, IPI_PRIORITY);
147}
148EXPORT_SYMBOL_GPL(xics_wake_cpu);
149
142static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) 150static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
143{ 151{
144 int cpu = smp_processor_id(); 152 int cpu = smp_processor_id();
@@ -185,6 +193,7 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
185 } 193 }
186 194
187 icp_native_regs[cpu] = ioremap(addr, size); 195 icp_native_regs[cpu] = ioremap(addr, size);
196 kvmppc_set_xics_phys(cpu, addr);
188 if (!icp_native_regs[cpu]) { 197 if (!icp_native_regs[cpu]) {
189 pr_warning("icp_native: Failed ioremap for CPU %d, " 198 pr_warning("icp_native: Failed ioremap for CPU %d, "
190 "interrupt server #0x%x, addr %#lx\n", 199 "interrupt server #0x%x, addr %#lx\n",
@@ -247,7 +256,7 @@ static int __init icp_native_init_one_node(struct device_node *np,
247 return -1; 256 return -1;
248 } 257 }
249 258
250 if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start)) 259 if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r)))
251 return -1; 260 return -1;
252 261
253 (*indx)++; 262 (*indx)++;