aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
commit1b17366d695c8ab03f98d0155357e97a427e1dce (patch)
treed223c79cc33ca1d890d264a202a1dd9c29655039
parentd12de1ef5eba3adb88f8e9dd81b6a60349466378 (diff)
parent7179ba52889bef7e5e23f72908270e1ab2b7fc6f (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "So here's my next branch for powerpc. A bit late as I was on vacation last week. It's mostly the same stuff that was in next already, I just added two patches today which are the wiring up of lockref for powerpc, which for some reason fell through the cracks last time and is trivial. The highlights are, in addition to a bunch of bug fixes: - Reworked Machine Check handling on kernels running without a hypervisor (or acting as a hypervisor). Provides hooks to handle some errors in real mode such as TLB errors, handle SLB errors, etc... - Support for retrieving memory error information from the service processor on IBM servers running without a hypervisor and routing them to the memory poison infrastructure. - _PAGE_NUMA support on server processors - 32-bit BookE relocatable kernel support - FSL e6500 hardware tablewalk support - A bunch of new/revived board support - FSL e6500 deeper idle states and altivec powerdown support You'll notice a generic mm change here, it has been acked by the relevant authorities and is a pre-req for our _PAGE_NUMA support" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits) powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked() powerpc: Add support for the optimised lockref implementation powerpc/powernv: Call OPAL sync before kexec'ing powerpc/eeh: Escalate error on non-existing PE powerpc/eeh: Handle multiple EEH errors powerpc: Fix transactional FP/VMX/VSX unavailable handlers powerpc: Don't corrupt transactional state when using FP/VMX in kernel powerpc: Reclaim two unused thread_info flag bits powerpc: Fix races with irq_work Move precessing of MCE queued event out from syscall exit path. pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines powerpc: Make add_system_ram_resources() __init powerpc: add SATA_MV to ppc64_defconfig powerpc/powernv: Increase candidate fw image size powerpc: Add debug checks to catch invalid cpu-to-node mappings powerpc: Fix the setup of CPU-to-Node mappings during CPU online powerpc/iommu: Don't detach device without IOMMU group powerpc/eeh: Hotplug improvement powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space powerpc/eeh: Add restore_config operation ...
-rw-r--r--Documentation/devicetree/bindings/video/ssd1289fb.txt13
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile7
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts2
-rw-r--r--arch/powerpc/boot/dts/mvme5100.dts185
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa.dts23
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa.dtsi85
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa_36b.dts (renamed from arch/powerpc/boot/dts/p1010rdb_36b.dts)47
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pb.dts35
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pb_36b.dts58
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dts66
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dtsi43
-rw-r--r--arch/powerpc/boot/dts/p1010rdb_32b.dtsi79
-rw-r--r--arch/powerpc/boot/dts/p1010rdb_36b.dtsi79
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dtsi3
-rw-r--r--arch/powerpc/boot/dts/p1025twr.dts95
-rw-r--r--arch/powerpc/boot/dts/p1025twr.dtsi280
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml507.dts2
-rw-r--r--arch/powerpc/boot/mvme5100.c27
-rwxr-xr-xarch/powerpc/boot/wrapper4
-rw-r--r--arch/powerpc/configs/85xx/p1023_defconfig188
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig3
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig144
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/bitops.h5
-rw-r--r--arch/powerpc/include/asm/cache.h14
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h1
-rw-r--r--arch/powerpc/include/asm/code-patching.h7
-rw-r--r--arch/powerpc/include/asm/cputable.h12
-rw-r--r--arch/powerpc/include/asm/eeh.h14
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h2
-rw-r--r--arch/powerpc/include/asm/hardirq.h3
-rw-r--r--arch/powerpc/include/asm/io.h16
-rw-r--r--arch/powerpc/include/asm/iommu.h54
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/mce.h197
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h13
-rw-r--r--arch/powerpc/include/asm/mmu.h21
-rw-r--r--arch/powerpc/include/asm/opal.h108
-rw-r--r--arch/powerpc/include/asm/paca.h16
-rw-r--r--arch/powerpc/include/asm/pgtable.h66
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h14
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/include/asm/ps3.h1
-rw-r--r--arch/powerpc/include/asm/pte-hash64.h8
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h10
-rw-r--r--arch/powerpc/include/asm/spinlock.h12
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/tm.h1
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/include/asm/vio.h1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c13
-rw-r--r--arch/powerpc/kernel/cacheinfo.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S54
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S38
-rw-r--r--arch/powerpc/kernel/cputable.c16
-rw-r--r--arch/powerpc/kernel/crash.c1
-rw-r--r--arch/powerpc/kernel/dma-iommu.c4
-rw-r--r--arch/powerpc/kernel/eeh.c17
-rw-r--r--arch/powerpc/kernel/eeh_driver.c166
-rw-r--r--arch/powerpc/kernel/eeh_pe.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S27
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S242
-rw-r--r--arch/powerpc/kernel/fpu.S16
-rw-r--r--arch/powerpc/kernel/fsl_booke_entry_mapping.S2
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S266
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c1
-rw-r--r--arch/powerpc/kernel/idle_power7.S1
-rw-r--r--arch/powerpc/kernel/iomap.c1
-rw-r--r--arch/powerpc/kernel/iommu.c145
-rw-r--r--arch/powerpc/kernel/irq.c12
-rw-r--r--arch/powerpc/kernel/kgdb.c1
-rw-r--r--arch/powerpc/kernel/mce.c352
-rw-r--r--arch/powerpc/kernel/mce_power.c284
-rw-r--r--arch/powerpc/kernel/misc_32.S4
-rw-r--r--arch/powerpc/kernel/misc_64.S6
-rw-r--r--arch/powerpc/kernel/paca.c37
-rw-r--r--arch/powerpc/kernel/process.c177
-rw-r--r--arch/powerpc/kernel/prom.c41
-rw-r--r--arch/powerpc/kernel/setup_64.c47
-rw-r--r--arch/powerpc/kernel/signal.c3
-rw-r--r--arch/powerpc/kernel/signal_32.c21
-rw-r--r--arch/powerpc/kernel/signal_64.c14
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c1
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/kernel/swsusp_booke.S32
-rw-r--r--arch/powerpc/kernel/syscalls.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c388
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kernel/traps.c72
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S1
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S1
-rw-r--r--arch/powerpc/kernel/vector.S10
-rw-r--r--arch/powerpc/kernel/vio.c31
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c50
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S2
-rw-r--r--arch/powerpc/lib/code-patching.c15
-rw-r--r--arch/powerpc/lib/crtsavres.S186
-rw-r--r--arch/powerpc/math-emu/math_efp.c316
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c80
-rw-r--r--arch/powerpc/mm/hash_low_64.S15
-rw-r--r--arch/powerpc/mm/hash_utils_64.c7
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c54
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
-rw-r--r--arch/powerpc/mm/mem.c8
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c96
-rw-r--r--arch/powerpc/mm/pgtable.c3
-rw-r--r--arch/powerpc/mm/pgtable_32.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c15
-rw-r--r--arch/powerpc/mm/tlb_hash64.c1
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S174
-rw-r--r--arch/powerpc/mm/tlb_nohash.c112
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S4
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c1
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c1
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c1
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c1
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c1
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c1
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c1
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig6
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/common.c38
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx.h6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c29
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c25
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c17
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c147
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c14
-rw-r--r--arch/powerpc/platforms/chrp/smp.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig13
-rw-r--r--arch/powerpc/platforms/embedded6xx/Makefile1
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c221
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c1
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c5
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c1
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig5
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c223
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c24
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c35
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c146
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c267
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.c230
-rw-r--r--arch/powerpc/platforms/powernv/pci.h3
-rw-r--r--arch/powerpc/platforms/powernv/setup.c6
-rw-r--r--arch/powerpc/platforms/ps3/spu.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c1
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c167
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c11
-rw-r--r--arch/powerpc/sysdev/Kconfig2
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_ifc.c1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c31
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.h1
-rw-r--r--arch/powerpc/sysdev/i8259.c1
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c6
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c1
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c10
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c1
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c1
-rw-r--r--arch/powerpc/xmon/xmon.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/math-emu/op-common.h9
-rw-r--r--mm/mempolicy.c5
-rw-r--r--scripts/mod/modpost.c8
214 files changed, 6171 insertions, 1649 deletions
diff --git a/Documentation/devicetree/bindings/video/ssd1289fb.txt b/Documentation/devicetree/bindings/video/ssd1289fb.txt
new file mode 100644
index 000000000000..4fcd5e68cb6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/ssd1289fb.txt
@@ -0,0 +1,13 @@
1* Solomon SSD1289 Framebuffer Driver
2
3Required properties:
4 - compatible: Should be "solomon,ssd1289fb". The only supported bus for
5 now is lbc.
6 - reg: Should contain address of the controller on the LBC bus. The detail
7 was described in Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
8
9Examples:
10display@2,0 {
11 compatible = "solomon,ssd1289fb";
12 reg = <0x2 0x0000 0x0004>;
13};
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1695b6ab503d..25493a0b174c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@ config PPC
140 select OLD_SIGACTION if PPC32 140 select OLD_SIGACTION if PPC32
141 select HAVE_DEBUG_STACKOVERFLOW 141 select HAVE_DEBUG_STACKOVERFLOW
142 select HAVE_IRQ_EXIT_ON_IRQ_STACK 142 select HAVE_IRQ_EXIT_ON_IRQ_STACK
143 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
143 144
144config GENERIC_CSUM 145config GENERIC_CSUM
145 def_bool CPU_LITTLE_ENDIAN 146 def_bool CPU_LITTLE_ENDIAN
@@ -214,9 +215,6 @@ config DEFAULT_UIMAGE
214 Used to allow a board to specify it wants a uImage built by default 215 Used to allow a board to specify it wants a uImage built by default
215 default n 216 default n
216 217
217config REDBOOT
218 bool
219
220config ARCH_HIBERNATION_POSSIBLE 218config ARCH_HIBERNATION_POSSIBLE
221 bool 219 bool
222 default y 220 default y
@@ -384,6 +382,12 @@ config ARCH_HAS_WALK_MEMORY
384config ARCH_ENABLE_MEMORY_HOTREMOVE 382config ARCH_ENABLE_MEMORY_HOTREMOVE
385 def_bool y 383 def_bool y
386 384
385config PPC64_SUPPORTS_MEMORY_FAILURE
386 bool "Add support for memory hwpoison"
387 depends on PPC_BOOK3S_64
388 default "y" if PPC_POWERNV
389 select ARCH_SUPPORTS_MEMORY_FAILURE
390
387config KEXEC 391config KEXEC
388 bool "kexec system call" 392 bool "kexec system call"
389 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) 393 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
@@ -404,8 +408,7 @@ config KEXEC
404config CRASH_DUMP 408config CRASH_DUMP
405 bool "Build a kdump crash kernel" 409 bool "Build a kdump crash kernel"
406 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) 410 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
407 select RELOCATABLE if PPC64 || 44x 411 select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
408 select DYNAMIC_MEMSTART if FSL_BOOKE
409 help 412 help
410 Build a kernel suitable for use as a kdump capture kernel. 413 Build a kernel suitable for use as a kdump capture kernel.
411 The same kernel binary can be used as production kernel and dump 414 The same kernel binary can be used as production kernel and dump
@@ -886,7 +889,7 @@ config DYNAMIC_MEMSTART
886 889
887config RELOCATABLE 890config RELOCATABLE
888 bool "Build a relocatable kernel" 891 bool "Build a relocatable kernel"
889 depends on ADVANCED_OPTIONS && FLATMEM && 44x 892 depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE)
890 select NONSTATIC_KERNEL 893 select NONSTATIC_KERNEL
891 help 894 help
892 This builds a kernel image that is capable of running at the 895 This builds a kernel image that is capable of running at the
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 554734ff302e..d61c03525777 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -16,6 +16,7 @@ mktree
16uImage 16uImage
17cuImage.* 17cuImage.*
18dtbImage.* 18dtbImage.*
19*.dtb
19treeImage.* 20treeImage.*
20zImage 21zImage
21zImage.initrd 22zImage.initrd
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ca7f08cc4afd..90e9d9548660 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -71,9 +71,9 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
71 uartlite.c mpc52xx-psc.c 71 uartlite.c mpc52xx-psc.c
72src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c 72src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
73src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c 73src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
74src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c 74src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
75src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c 75src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
76src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c 76src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c
77 77
78src-plat-y := of.c epapr.c 78src-plat-y := of.c epapr.c
79src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ 79src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
@@ -95,7 +95,7 @@ src-plat-$(CONFIG_FSL_SOC_BOOKE) += cuboot-85xx.c cuboot-85xx-cpm2.c
95src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \ 95src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
96 cuboot-c2k.c gamecube-head.S \ 96 cuboot-c2k.c gamecube-head.S \
97 gamecube.c wii-head.S wii.c holly.c \ 97 gamecube.c wii-head.S wii.c holly.c \
98 prpmc2800.c 98 prpmc2800.c fixed-head.S mvme5100.c
99src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c 99src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
100src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c 100src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
101src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c 101src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
@@ -286,6 +286,7 @@ image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2
286image-$(CONFIG_PPC_C2K) += cuImage.c2k 286image-$(CONFIG_PPC_C2K) += cuImage.c2k
287image-$(CONFIG_GAMECUBE) += dtbImage.gamecube 287image-$(CONFIG_GAMECUBE) += dtbImage.gamecube
288image-$(CONFIG_WII) += dtbImage.wii 288image-$(CONFIG_WII) += dtbImage.wii
289image-$(CONFIG_MVME5100) += dtbImage.mvme5100
289 290
290# Board port in arch/powerpc/platform/amigaone/Kconfig 291# Board port in arch/powerpc/platform/amigaone/Kconfig
291image-$(CONFIG_AMIGAONE) += cuImage.amigaone 292image-$(CONFIG_AMIGAONE) += cuImage.amigaone
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi
new file mode 100644
index 000000000000..d3cc8d0f7c25
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi
@@ -0,0 +1,82 @@
1/*
2 * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x102300 ]
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35dma2: dma@102300 {
36 #address-cells = <1>;
37 #size-cells = <1>;
38 compatible = "fsl,elo3-dma";
39 reg = <0x102300 0x4>,
40 <0x102600 0x4>;
41 ranges = <0x0 0x102100 0x500>;
42 dma-channel@0 {
43 compatible = "fsl,eloplus-dma-channel";
44 reg = <0x0 0x80>;
45 interrupts = <464 2 0 0>;
46 };
47 dma-channel@80 {
48 compatible = "fsl,eloplus-dma-channel";
49 reg = <0x80 0x80>;
50 interrupts = <465 2 0 0>;
51 };
52 dma-channel@100 {
53 compatible = "fsl,eloplus-dma-channel";
54 reg = <0x100 0x80>;
55 interrupts = <466 2 0 0>;
56 };
57 dma-channel@180 {
58 compatible = "fsl,eloplus-dma-channel";
59 reg = <0x180 0x80>;
60 interrupts = <467 2 0 0>;
61 };
62 dma-channel@300 {
63 compatible = "fsl,eloplus-dma-channel";
64 reg = <0x300 0x80>;
65 interrupts = <468 2 0 0>;
66 };
67 dma-channel@380 {
68 compatible = "fsl,eloplus-dma-channel";
69 reg = <0x380 0x80>;
70 interrupts = <469 2 0 0>;
71 };
72 dma-channel@400 {
73 compatible = "fsl,eloplus-dma-channel";
74 reg = <0x400 0x80>;
75 interrupts = <470 2 0 0>;
76 };
77 dma-channel@480 {
78 compatible = "fsl,eloplus-dma-channel";
79 reg = <0x480 0x80>;
80 interrupts = <471 2 0 0>;
81 };
82};
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
index 68cc5e7f6477..642dc3a83d0e 100644
--- a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -36,7 +36,8 @@
36 #address-cells = <2>; 36 #address-cells = <2>;
37 #size-cells = <1>; 37 #size-cells = <1>;
38 compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; 38 compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
39 interrupts = <19 2 0 0>; 39 interrupts = <19 2 0 0>,
40 <16 2 0 0>;
40}; 41};
41 42
42/* controller at 0x9000 */ 43/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
index adb82fd9057f..407cb5fd0f5b 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -36,7 +36,8 @@
36 #address-cells = <2>; 36 #address-cells = <2>;
37 #size-cells = <1>; 37 #size-cells = <1>;
38 compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus"; 38 compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
39 interrupts = <19 2 0 0>; 39 interrupts = <19 2 0 0>,
40 <16 2 0 0>;
40}; 41};
41 42
42/* controller at 0x9000 */ 43/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index e179803a81ef..ebf202234549 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -40,7 +40,8 @@
40 * pin muxing when the DIU is enabled. 40 * pin muxing when the DIU is enabled.
41 */ 41 */
42 compatible = "fsl,p1022-elbc", "fsl,elbc"; 42 compatible = "fsl,p1022-elbc", "fsl,elbc";
43 interrupts = <19 2 0 0>; 43 interrupts = <19 2 0 0>,
44 <16 2 0 0>;
44}; 45};
45 46
46/* controller at 0x9000 */ 47/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index f1105bffa915..81437fdf1db4 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -36,7 +36,8 @@
36 #address-cells = <2>; 36 #address-cells = <2>;
37 #size-cells = <1>; 37 #size-cells = <1>;
38 compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus"; 38 compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
39 interrupts = <19 2 0 0>; 39 interrupts = <19 2 0 0>,
40 <16 2 0 0>;
40}; 41};
41 42
42/* controller at 0xa000 */ 43/* controller at 0xa000 */
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 1613d6e4049e..5ba7f01e2a29 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -406,7 +406,7 @@
406 406
407 MSI: ppc4xx-msi@C10000000 { 407 MSI: ppc4xx-msi@C10000000 {
408 compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; 408 compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
409 reg = < 0x0 0xEF620000 0x100>; 409 reg = <0xEF620000 0x100>;
410 sdr-base = <0x4B0>; 410 sdr-base = <0x4B0>;
411 msi-data = <0x00000000>; 411 msi-data = <0x00000000>;
412 msi-mask = <0x44440000>; 412 msi-mask = <0x44440000>;
diff --git a/arch/powerpc/boot/dts/mvme5100.dts b/arch/powerpc/boot/dts/mvme5100.dts
new file mode 100644
index 000000000000..1ecb341a232a
--- /dev/null
+++ b/arch/powerpc/boot/dts/mvme5100.dts
@@ -0,0 +1,185 @@
1/*
2 * Device Tree Source for Motorola/Emerson MVME5100.
3 *
4 * Copyright 2013 CSC Australia Pty. Ltd.
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without
8 * any warranty of any kind, whether express or implied.
9 */
10
11/dts-v1/;
12
13/ {
14 model = "MVME5100";
15 compatible = "MVME5100";
16 #address-cells = <1>;
17 #size-cells = <1>;
18
19 aliases {
20 serial0 = &serial0;
21 pci0 = &pci0;
22 };
23
24 cpus {
25 #address-cells = <1>;
26 #size-cells = <0>;
27
28 PowerPC,7410 {
29 device_type = "cpu";
30 reg = <0x0>;
31 /* Following required by dtc but not used */
32 d-cache-line-size = <32>;
33 i-cache-line-size = <32>;
34 i-cache-size = <32768>;
35 d-cache-size = <32768>;
36 timebase-frequency = <25000000>;
37 clock-frequency = <500000000>;
38 bus-frequency = <100000000>;
39 };
40 };
41
42 memory {
43 device_type = "memory";
44 reg = <0x0 0x20000000>;
45 };
46
47 hawk@fef80000 {
48 #address-cells = <1>;
49 #size-cells = <1>;
50 compatible = "hawk-bridge", "simple-bus";
51 ranges = <0x0 0xfef80000 0x10000>;
52 reg = <0xfef80000 0x10000>;
53
54 serial0: serial@8000 {
55 device_type = "serial";
56 compatible = "ns16550";
57 reg = <0x8000 0x80>;
58 reg-shift = <4>;
59 clock-frequency = <1843200>;
60 current-speed = <9600>;
61 interrupts = <1 1>; // IRQ1 Level Active Low.
62 interrupt-parent = <&mpic>;
63 };
64
65 serial1: serial@8200 {
66 device_type = "serial";
67 compatible = "ns16550";
68 reg = <0x8200 0x80>;
69 reg-shift = <4>;
70 clock-frequency = <1843200>;
71 current-speed = <9600>;
72 interrupts = <1 1>; // IRQ1 Level Active Low.
73 interrupt-parent = <&mpic>;
74 };
75
76 mpic: interrupt-controller@f3f80000 {
77 #interrupt-cells = <2>;
78 #address-cells = <0>;
79 device_type = "open-pic";
80 compatible = "chrp,open-pic";
81 interrupt-controller;
82 reg = <0xf3f80000 0x40000>;
83 };
84 };
85
86 pci0: pci@feff0000 {
87 #address-cells = <3>;
88 #size-cells = <2>;
89 #interrupt-cells = <1>;
90 device_type = "pci";
91 compatible = "hawk-pci";
92 reg = <0xfec00000 0x400000>;
93 8259-interrupt-acknowledge = <0xfeff0030>;
94 ranges = <0x1000000 0x0 0x0 0xfe000000 0x0 0x800000
95 0x2000000 0x0 0x80000000 0x80000000 0x0 0x74000000>;
96 bus-range = <0 255>;
97 clock-frequency = <33333333>;
98 interrupt-parent = <&mpic>;
99 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
100 interrupt-map = <
101
102 /*
103 * This definition (IDSEL 11) duplicates the
104 * interrupts definition in the i8259
105 * interrupt controller below.
106 *
107 * Do not change the interrupt sense/polarity from
108 * 0x2 to anything else, doing so will cause endless
109 * "spurious" i8259 interrupts to be fielded.
110 */
111 // IDSEL 11 - iPMC712 PCI/ISA Bridge
112 0x5800 0x0 0x0 0x1 &mpic 0x0 0x2
113 0x5800 0x0 0x0 0x2 &mpic 0x0 0x2
114 0x5800 0x0 0x0 0x3 &mpic 0x0 0x2
115 0x5800 0x0 0x0 0x4 &mpic 0x0 0x2
116
117 /* IDSEL 12 - Not Used */
118
119 /* IDSEL 13 - Universe VME Bridge */
120 0x6800 0x0 0x0 0x1 &mpic 0x5 0x1
121 0x6800 0x0 0x0 0x2 &mpic 0x6 0x1
122 0x6800 0x0 0x0 0x3 &mpic 0x7 0x1
123 0x6800 0x0 0x0 0x4 &mpic 0x8 0x1
124
125 /* IDSEL 14 - ENET 1 */
126 0x7000 0x0 0x0 0x1 &mpic 0x2 0x1
127
128 /* IDSEL 15 - Not Used */
129
130 /* IDSEL 16 - PMC Slot 1 */
131 0x8000 0x0 0x0 0x1 &mpic 0x9 0x1
132 0x8000 0x0 0x0 0x2 &mpic 0xa 0x1
133 0x8000 0x0 0x0 0x3 &mpic 0xb 0x1
134 0x8000 0x0 0x0 0x4 &mpic 0xc 0x1
135
136 /* IDSEL 17 - PMC Slot 2 */
137 0x8800 0x0 0x0 0x1 &mpic 0xc 0x1
138 0x8800 0x0 0x0 0x2 &mpic 0x9 0x1
139 0x8800 0x0 0x0 0x3 &mpic 0xa 0x1
140 0x8800 0x0 0x0 0x4 &mpic 0xb 0x1
141
142 /* IDSEL 18 - Not Used */
143
144 /* IDSEL 19 - ENET 2 */
145 0x9800 0x0 0x0 0x1 &mpic 0xd 0x1
146
147 /* IDSEL 20 - PMCSPAN (PCI-X) */
148 0xa000 0x0 0x0 0x1 &mpic 0x9 0x1
149 0xa000 0x0 0x0 0x2 &mpic 0xa 0x1
150 0xa000 0x0 0x0 0x3 &mpic 0xb 0x1
151 0xa000 0x0 0x0 0x4 &mpic 0xc 0x1
152
153 >;
154
155 isa {
156 #address-cells = <2>;
157 #size-cells = <1>;
158 #interrupt-cells = <2>;
159 device_type = "isa";
160 compatible = "isa";
161 ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>;
162 interrupt-parent = <&i8259>;
163
164 i8259: interrupt-controller@20 {
165 #interrupt-cells = <2>;
166 #address-cells = <0>;
167 interrupts = <0 2>;
168 device_type = "interrupt-controller";
169 compatible = "chrp,iic";
170 interrupt-controller;
171 reg = <1 0x00000020 0x00000002
172 1 0x000000a0 0x00000002
173 1 0x000004d0 0x00000002>;
174 interrupt-parent = <&mpic>;
175 };
176
177 };
178
179 };
180
181 chosen {
182 linux,stdout-path = &serial0;
183 };
184
185};
diff --git a/arch/powerpc/boot/dts/p1010rdb-pa.dts b/arch/powerpc/boot/dts/p1010rdb-pa.dts
new file mode 100644
index 000000000000..767d4c032857
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pa.dts
@@ -0,0 +1,23 @@
1/*
2 * P1010 RDB Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/include/ "fsl/p1010si-pre.dtsi"
13
14/ {
15 model = "fsl,P1010RDB";
16 compatible = "fsl,P1010RDB";
17
18 /include/ "p1010rdb_32b.dtsi"
19};
20
21/include/ "p1010rdb.dtsi"
22/include/ "p1010rdb-pa.dtsi"
23/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pa.dtsi b/arch/powerpc/boot/dts/p1010rdb-pa.dtsi
new file mode 100644
index 000000000000..434fb2d58575
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pa.dtsi
@@ -0,0 +1,85 @@
1/*
2 * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&ifc_nand {
36 partition@0 {
37 /* This location must not be altered */
38 /* 1MB for u-boot Bootloader Image */
39 reg = <0x0 0x00100000>;
40 label = "NAND U-Boot Image";
41 read-only;
42 };
43
44 partition@100000 {
45 /* 1MB for DTB Image */
46 reg = <0x00100000 0x00100000>;
47 label = "NAND DTB Image";
48 };
49
50 partition@200000 {
51 /* 4MB for Linux Kernel Image */
52 reg = <0x00200000 0x00400000>;
53 label = "NAND Linux Kernel Image";
54 };
55
56 partition@600000 {
57 /* 4MB for Compressed Root file System Image */
58 reg = <0x00600000 0x00400000>;
59 label = "NAND Compressed RFS Image";
60 };
61
62 partition@a00000 {
63 /* 15MB for JFFS2 based Root file System */
64 reg = <0x00a00000 0x00f00000>;
65 label = "NAND JFFS2 Root File System";
66 };
67
68 partition@1900000 {
69 /* 7MB for User Area */
70 reg = <0x01900000 0x00700000>;
71 label = "NAND User area";
72 };
73};
74
75&phy0 {
76 interrupts = <1 1 0 0>;
77};
78
79&phy1 {
80 interrupts = <2 1 0 0>;
81};
82
83&phy2 {
84 interrupts = <4 1 0 0>;
85};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dts b/arch/powerpc/boot/dts/p1010rdb-pa_36b.dts
index 64776f4a4651..3033371bc007 100644
--- a/arch/powerpc/boot/dts/p1010rdb_36b.dts
+++ b/arch/powerpc/boot/dts/p1010rdb-pa_36b.dts
@@ -38,52 +38,9 @@
38 model = "fsl,P1010RDB"; 38 model = "fsl,P1010RDB";
39 compatible = "fsl,P1010RDB"; 39 compatible = "fsl,P1010RDB";
40 40
41 memory { 41 /include/ "p1010rdb_36b.dtsi"
42 device_type = "memory";
43 };
44
45 board_ifc: ifc: ifc@fffe1e000 {
46 /* NOR, NAND Flashes and CPLD on board */
47 ranges = <0x0 0x0 0xf 0xee000000 0x02000000
48 0x1 0x0 0xf 0xff800000 0x00010000
49 0x3 0x0 0xf 0xffb00000 0x00000020>;
50 reg = <0xf 0xffe1e000 0 0x2000>;
51 };
52
53 board_soc: soc: soc@fffe00000 {
54 ranges = <0x0 0xf 0xffe00000 0x100000>;
55 };
56
57 pci0: pcie@fffe09000 {
58 reg = <0xf 0xffe09000 0 0x1000>;
59 ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
60 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
61 pcie@0 {
62 ranges = <0x2000000 0x0 0xc0000000
63 0x2000000 0x0 0xc0000000
64 0x0 0x20000000
65
66 0x1000000 0x0 0x0
67 0x1000000 0x0 0x0
68 0x0 0x100000>;
69 };
70 };
71
72 pci1: pcie@fffe0a000 {
73 reg = <0xf 0xffe0a000 0 0x1000>;
74 ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
75 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
76 pcie@0 {
77 ranges = <0x2000000 0x0 0xc0000000
78 0x2000000 0x0 0xc0000000
79 0x0 0x20000000
80
81 0x1000000 0x0 0x0
82 0x1000000 0x0 0x0
83 0x0 0x100000>;
84 };
85 };
86}; 42};
87 43
88/include/ "p1010rdb.dtsi" 44/include/ "p1010rdb.dtsi"
45/include/ "p1010rdb-pa.dtsi"
89/include/ "fsl/p1010si-post.dtsi" 46/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pb.dts b/arch/powerpc/boot/dts/p1010rdb-pb.dts
new file mode 100644
index 000000000000..6eeb7d3185be
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pb.dts
@@ -0,0 +1,35 @@
1/*
2 * P1010 RDB Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/include/ "fsl/p1010si-pre.dtsi"
13
14/ {
15 model = "fsl,P1010RDB-PB";
16 compatible = "fsl,P1010RDB-PB";
17
18 /include/ "p1010rdb_32b.dtsi"
19};
20
21/include/ "p1010rdb.dtsi"
22
23&phy0 {
24 interrupts = <0 1 0 0>;
25};
26
27&phy1 {
28 interrupts = <2 1 0 0>;
29};
30
31&phy2 {
32 interrupts = <1 1 0 0>;
33};
34
35/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts b/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts
new file mode 100644
index 000000000000..7ab3c907b326
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts
@@ -0,0 +1,58 @@
1/*
2 * P1010 RDB Device Tree Source (36-bit address map)
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1010si-pre.dtsi"
36
37/ {
38 model = "fsl,P1010RDB-PB";
39 compatible = "fsl,P1010RDB-PB";
40
41 /include/ "p1010rdb_36b.dtsi"
42};
43
44/include/ "p1010rdb.dtsi"
45
46&phy0 {
47 interrupts = <0 1 0 0>;
48};
49
50&phy1 {
51 interrupts = <2 1 0 0>;
52};
53
54&phy2 {
55 interrupts = <1 1 0 0>;
56};
57
58/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
deleted file mode 100644
index b868d22984e9..000000000000
--- a/arch/powerpc/boot/dts/p1010rdb.dts
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * P1010 RDB Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/include/ "fsl/p1010si-pre.dtsi"
13
14/ {
15 model = "fsl,P1010RDB";
16 compatible = "fsl,P1010RDB";
17
18 memory {
19 device_type = "memory";
20 };
21
22 board_ifc: ifc: ifc@ffe1e000 {
23 /* NOR, NAND Flashes and CPLD on board */
24 ranges = <0x0 0x0 0x0 0xee000000 0x02000000
25 0x1 0x0 0x0 0xff800000 0x00010000
26 0x3 0x0 0x0 0xffb00000 0x00000020>;
27 reg = <0x0 0xffe1e000 0 0x2000>;
28 };
29
30 board_soc: soc: soc@ffe00000 {
31 ranges = <0x0 0x0 0xffe00000 0x100000>;
32 };
33
34 pci0: pcie@ffe09000 {
35 reg = <0 0xffe09000 0 0x1000>;
36 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
37 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
38 pcie@0 {
39 ranges = <0x2000000 0x0 0xa0000000
40 0x2000000 0x0 0xa0000000
41 0x0 0x20000000
42
43 0x1000000 0x0 0x0
44 0x1000000 0x0 0x0
45 0x0 0x100000>;
46 };
47 };
48
49 pci1: pcie@ffe0a000 {
50 reg = <0 0xffe0a000 0 0x1000>;
51 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
52 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
53 pcie@0 {
54 ranges = <0x2000000 0x0 0x80000000
55 0x2000000 0x0 0x80000000
56 0x0 0x20000000
57
58 0x1000000 0x0 0x0
59 0x1000000 0x0 0x0
60 0x0 0x100000>;
61 };
62 };
63};
64
65/include/ "p1010rdb.dtsi"
66/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
index ec7c27a64671..ea534efa790d 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -69,49 +69,11 @@
69 }; 69 };
70 }; 70 };
71 71
72 nand@1,0 { 72 ifc_nand: nand@1,0 {
73 #address-cells = <1>; 73 #address-cells = <1>;
74 #size-cells = <1>; 74 #size-cells = <1>;
75 compatible = "fsl,ifc-nand"; 75 compatible = "fsl,ifc-nand";
76 reg = <0x1 0x0 0x10000>; 76 reg = <0x1 0x0 0x10000>;
77
78 partition@0 {
79 /* This location must not be altered */
80 /* 1MB for u-boot Bootloader Image */
81 reg = <0x0 0x00100000>;
82 label = "NAND U-Boot Image";
83 read-only;
84 };
85
86 partition@100000 {
87 /* 1MB for DTB Image */
88 reg = <0x00100000 0x00100000>;
89 label = "NAND DTB Image";
90 };
91
92 partition@200000 {
93 /* 4MB for Linux Kernel Image */
94 reg = <0x00200000 0x00400000>;
95 label = "NAND Linux Kernel Image";
96 };
97
98 partition@600000 {
99 /* 4MB for Compressed Root file System Image */
100 reg = <0x00600000 0x00400000>;
101 label = "NAND Compressed RFS Image";
102 };
103
104 partition@a00000 {
105 /* 15MB for JFFS2 based Root file System */
106 reg = <0x00a00000 0x00f00000>;
107 label = "NAND JFFS2 Root File System";
108 };
109
110 partition@1900000 {
111 /* 7MB for User Area */
112 reg = <0x01900000 0x00700000>;
113 label = "NAND User area";
114 };
115 }; 77 };
116 78
117 cpld@3,0 { 79 cpld@3,0 {
@@ -193,17 +155,14 @@
193 155
194 mdio@24000 { 156 mdio@24000 {
195 phy0: ethernet-phy@0 { 157 phy0: ethernet-phy@0 {
196 interrupts = <3 1 0 0>;
197 reg = <0x1>; 158 reg = <0x1>;
198 }; 159 };
199 160
200 phy1: ethernet-phy@1 { 161 phy1: ethernet-phy@1 {
201 interrupts = <2 1 0 0>;
202 reg = <0x0>; 162 reg = <0x0>;
203 }; 163 };
204 164
205 phy2: ethernet-phy@2 { 165 phy2: ethernet-phy@2 {
206 interrupts = <2 1 0 0>;
207 reg = <0x2>; 166 reg = <0x2>;
208 }; 167 };
209 168
diff --git a/arch/powerpc/boot/dts/p1010rdb_32b.dtsi b/arch/powerpc/boot/dts/p1010rdb_32b.dtsi
new file mode 100644
index 000000000000..fdc19aab2f70
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_32b.dtsi
@@ -0,0 +1,79 @@
1/*
2 * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35memory {
36 device_type = "memory";
37};
38
39board_ifc: ifc: ifc@ffe1e000 {
40 /* NOR, NAND Flashes and CPLD on board */
41 ranges = <0x0 0x0 0x0 0xee000000 0x02000000
42 0x1 0x0 0x0 0xff800000 0x00010000
43 0x3 0x0 0x0 0xffb00000 0x00000020>;
44 reg = <0x0 0xffe1e000 0 0x2000>;
45};
46
47board_soc: soc: soc@ffe00000 {
48 ranges = <0x0 0x0 0xffe00000 0x100000>;
49};
50
51pci0: pcie@ffe09000 {
52 reg = <0 0xffe09000 0 0x1000>;
53 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
54 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
55 pcie@0 {
56 ranges = <0x2000000 0x0 0xa0000000
57 0x2000000 0x0 0xa0000000
58 0x0 0x20000000
59
60 0x1000000 0x0 0x0
61 0x1000000 0x0 0x0
62 0x0 0x100000>;
63 };
64};
65
66pci1: pcie@ffe0a000 {
67 reg = <0 0xffe0a000 0 0x1000>;
68 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
69 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
70 pcie@0 {
71 ranges = <0x2000000 0x0 0x80000000
72 0x2000000 0x0 0x80000000
73 0x0 0x20000000
74
75 0x1000000 0x0 0x0
76 0x1000000 0x0 0x0
77 0x0 0x100000>;
78 };
79};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dtsi b/arch/powerpc/boot/dts/p1010rdb_36b.dtsi
new file mode 100644
index 000000000000..de2fceed4f79
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_36b.dtsi
@@ -0,0 +1,79 @@
1/*
2 * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35memory {
36 device_type = "memory";
37};
38
39board_ifc: ifc: ifc@fffe1e000 {
40 /* NOR, NAND Flashes and CPLD on board */
41 ranges = <0x0 0x0 0xf 0xee000000 0x02000000
42 0x1 0x0 0xf 0xff800000 0x00010000
43 0x3 0x0 0xf 0xffb00000 0x00000020>;
44 reg = <0xf 0xffe1e000 0 0x2000>;
45};
46
47board_soc: soc: soc@fffe00000 {
48 ranges = <0x0 0xf 0xffe00000 0x100000>;
49};
50
51pci0: pcie@fffe09000 {
52 reg = <0xf 0xffe09000 0 0x1000>;
53 ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
54 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
55 pcie@0 {
56 ranges = <0x2000000 0x0 0xc0000000
57 0x2000000 0x0 0xc0000000
58 0x0 0x20000000
59
60 0x1000000 0x0 0x0
61 0x1000000 0x0 0x0
62 0x0 0x100000>;
63 };
64};
65
66pci1: pcie@fffe0a000 {
67 reg = <0xf 0xffe0a000 0 0x1000>;
68 ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
69 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
70 pcie@0 {
71 ranges = <0x2000000 0x0 0xc0000000
72 0x2000000 0x0 0xc0000000
73 0x0 0x20000000
74
75 0x1000000 0x0 0x0
76 0x1000000 0x0 0x0
77 0x0 0x100000>;
78 };
79};
diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
index 873da350d01b..957e0dc1dc0f 100644
--- a/arch/powerpc/boot/dts/p1022ds.dtsi
+++ b/arch/powerpc/boot/dts/p1022ds.dtsi
@@ -146,8 +146,9 @@
146 */ 146 */
147 }; 147 };
148 rtc@68 { 148 rtc@68 {
149 compatible = "dallas,ds1339"; 149 compatible = "dallas,ds3232";
150 reg = <0x68>; 150 reg = <0x68>;
151 interrupts = <0x1 0x1 0 0>;
151 }; 152 };
152 adt7461@4c { 153 adt7461@4c {
153 compatible = "adi,adt7461"; 154 compatible = "adi,adt7461";
diff --git a/arch/powerpc/boot/dts/p1025twr.dts b/arch/powerpc/boot/dts/p1025twr.dts
new file mode 100644
index 000000000000..9036a4987905
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025twr.dts
@@ -0,0 +1,95 @@
1/*
2 * P1025 TWR Device Tree Source (32-bit address map)
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1021si-pre.dtsi"
36/ {
37 model = "fsl,P1025";
38 compatible = "fsl,TWR-P1025";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@ffe05000 {
45 reg = <0 0xffe05000 0 0x1000>;
46
47 /* NOR Flash and SSD1289 */
48 ranges = <0x0 0x0 0x0 0xec000000 0x04000000
49 0x2 0x0 0x0 0xe0000000 0x00020000>;
50 };
51
52 soc: soc@ffe00000 {
53 ranges = <0x0 0x0 0xffe00000 0x100000>;
54 };
55
56 pci0: pcie@ffe09000 {
57 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
58 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
59 reg = <0 0xffe09000 0 0x1000>;
60 pcie@0 {
61 ranges = <0x2000000 0x0 0xa0000000
62 0x2000000 0x0 0xa0000000
63 0x0 0x20000000
64
65 0x1000000 0x0 0x0
66 0x1000000 0x0 0x0
67 0x0 0x100000>;
68 };
69 };
70
71 pci1: pcie@ffe0a000 {
72 reg = <0 0xffe0a000 0 0x1000>;
73 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
74 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
75 pcie@0 {
76 ranges = <0x2000000 0x0 0x80000000
77 0x2000000 0x0 0x80000000
78 0x0 0x20000000
79
80 0x1000000 0x0 0x0
81 0x1000000 0x0 0x0
82 0x0 0x100000>;
83 };
84 };
85
86 qe: qe@ffe80000 {
87 ranges = <0x0 0x0 0xffe80000 0x40000>;
88 reg = <0 0xffe80000 0 0x480>;
89 brg-frequency = <0>;
90 bus-frequency = <0>;
91 };
92};
93
94/include/ "p1025twr.dtsi"
95/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025twr.dtsi b/arch/powerpc/boot/dts/p1025twr.dtsi
new file mode 100644
index 000000000000..8453501c256e
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025twr.dtsi
@@ -0,0 +1,280 @@
1/*
2 * P1025 TWR Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/{
36 aliases {
37 ethernet3 = &enet3;
38 ethernet4 = &enet4;
39 };
40};
41
42&lbc {
43 nor@0,0 {
44 #address-cells = <1>;
45 #size-cells = <1>;
46 compatible = "cfi-flash";
47 reg = <0x0 0x0 0x4000000>;
48 bank-width = <2>;
49 device-width = <1>;
50
51 partition@0 {
52 /* This location must not be altered */
53 /* 256KB for Vitesse 7385 Switch firmware */
54 reg = <0x0 0x00040000>;
55 label = "NOR Vitesse-7385 Firmware";
56 read-only;
57 };
58
59 partition@40000 {
60 /* 256KB for DTB Image */
61 reg = <0x00040000 0x00040000>;
62 label = "NOR DTB Image";
63 };
64
65 partition@80000 {
66 /* 5.5 MB for Linux Kernel Image */
67 reg = <0x00080000 0x00580000>;
68 label = "NOR Linux Kernel Image";
69 };
70
71 partition@400000 {
72 /* 56.75MB for Root file System */
73 reg = <0x00600000 0x038c0000>;
74 label = "NOR Root File System";
75 };
76
77 partition@ec0000 {
78 /* This location must not be altered */
79 /* 256KB for QE ucode firmware*/
80 reg = <0x03ec0000 0x00040000>;
81 label = "NOR QE microcode firmware";
82 read-only;
83 };
84
85 partition@f00000 {
86 /* This location must not be altered */
87 /* 512KB for u-boot Bootloader Image */
88 /* 512KB for u-boot Environment Variables */
89 reg = <0x03f00000 0x00100000>;
90 label = "NOR U-Boot Image";
91 read-only;
92 };
93 };
94
95 /* CS2 for Display */
96 display@2,0 {
97 compatible = "solomon,ssd1289fb";
98 reg = <0x2 0x0000 0x0004>;
99 };
100
101};
102
103&soc {
104 usb@22000 {
105 phy_type = "ulpi";
106 };
107
108 mdio@24000 {
109 phy0: ethernet-phy@2 {
110 interrupt-parent = <&mpic>;
111 interrupts = <1 1 0 0>;
112 reg = <0x2>;
113 };
114
115 phy1: ethernet-phy@1 {
116 interrupt-parent = <&mpic>;
117 interrupts = <2 1 0 0>;
118 reg = <0x1>;
119 };
120
121 tbi0: tbi-phy@11 {
122 reg = <0x11>;
123 device_type = "tbi-phy";
124 };
125 };
126
127 mdio@25000 {
128 tbi1: tbi-phy@11 {
129 reg = <0x11>;
130 device_type = "tbi-phy";
131 };
132 };
133
134 mdio@26000 {
135 tbi2: tbi-phy@11 {
136 reg = <0x11>;
137 device_type = "tbi-phy";
138 };
139 };
140
141 enet0: ethernet@b0000 {
142 phy-handle = <&phy0>;
143 phy-connection-type = "rgmii-id";
144
145 };
146
147 enet1: ethernet@b1000 {
148 status = "disabled";
149 };
150
151 enet2: ethernet@b2000 {
152 phy-handle = <&phy1>;
153 phy-connection-type = "rgmii-id";
154 };
155
156 par_io@e0100 {
157 #address-cells = <1>;
158 #size-cells = <1>;
159 reg = <0xe0100 0x60>;
160 ranges = <0x0 0xe0100 0x60>;
161 device_type = "par_io";
162 num-ports = <3>;
163 pio1: ucc_pin@01 {
164 pio-map = <
165 /* port pin dir open_drain assignment has_irq */
166 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
167 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
168 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */
169 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */
170 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */
171 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */
172 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */
173 0x0 0xc 0x1 0x0 0x2 0x0 /* ENET1_TXD3_SER1_TXD3 */
174 0x0 0x6 0x2 0x0 0x2 0x0 /* ENET1_RXD0_SER1_RXD0 */
175 0x0 0xa 0x2 0x0 0x2 0x0 /* ENET1_RXD1_SER1_RXD1 */
176 0x0 0xe 0x2 0x0 0x2 0x0 /* ENET1_RXD2_SER1_RXD2 */
177 0x0 0xf 0x2 0x0 0x2 0x0 /* ENET1_RXD3_SER1_RXD3 */
178 0x0 0x5 0x1 0x0 0x2 0x0 /* ENET1_TX_EN_SER1_RTS_B */
179 0x0 0xd 0x1 0x0 0x2 0x0 /* ENET1_TX_ER */
180 0x0 0x4 0x2 0x0 0x2 0x0 /* ENET1_RX_DV_SER1_CTS_B */
181 0x0 0x8 0x2 0x0 0x2 0x0 /* ENET1_RX_ER_SER1_CD_B */
182 0x0 0x11 0x2 0x0 0x2 0x0 /* ENET1_CRS */
183 0x0 0x10 0x2 0x0 0x2 0x0>; /* ENET1_COL */
184 };
185
186 pio2: ucc_pin@02 {
187 pio-map = <
188 /* port pin dir open_drain assignment has_irq */
189 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
190 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
191 0x1 0xb 0x2 0x0 0x1 0x0 /* CLK13 */
192 0x1 0x7 0x1 0x0 0x2 0x0 /* ENET5_TXD0_SER5_TXD0 */
193 0x1 0xa 0x1 0x0 0x2 0x0 /* ENET5_TXD1_SER5_TXD1 */
194 0x1 0x6 0x2 0x0 0x2 0x0 /* ENET5_RXD0_SER5_RXD0 */
195 0x1 0x9 0x2 0x0 0x2 0x0 /* ENET5_RXD1_SER5_RXD1 */
196 0x1 0x5 0x1 0x0 0x2 0x0 /* ENET5_TX_EN_SER5_RTS_B */
197 0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */
198 0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */
199 };
200
201 pio3: ucc_pin@03 {
202 pio-map = <
203 /* port pin dir open_drain assignment has_irq */
204 0x0 0x16 0x2 0x0 0x2 0x0 /* SER7_CD_B*/
205 0x0 0x12 0x2 0x0 0x2 0x0 /* SER7_CTS_B*/
206 0x0 0x13 0x1 0x0 0x2 0x0 /* SER7_RTS_B*/
207 0x0 0x14 0x2 0x0 0x2 0x0 /* SER7_RXD0*/
208 0x0 0x15 0x1 0x0 0x2 0x0>; /* SER7_TXD0*/
209 };
210
211 pio4: ucc_pin@04 {
212 pio-map = <
213 /* port pin dir open_drain assignment has_irq */
214 0x1 0x0 0x2 0x0 0x2 0x0 /* SER3_CD_B*/
215 0x0 0x1c 0x2 0x0 0x2 0x0 /* SER3_CTS_B*/
216 0x0 0x1d 0x1 0x0 0x2 0x0 /* SER3_RTS_B*/
217 0x0 0x1e 0x2 0x0 0x2 0x0 /* SER3_RXD0*/
218 0x0 0x1f 0x1 0x0 0x2 0x0>; /* SER3_TXD0*/
219 };
220 };
221};
222
223&qe {
224 enet3: ucc@2000 {
225 device_type = "network";
226 compatible = "ucc_geth";
227 rx-clock-name = "clk12";
228 tx-clock-name = "clk9";
229 pio-handle = <&pio1>;
230 phy-handle = <&qe_phy0>;
231 phy-connection-type = "mii";
232 };
233
234 mdio@2120 {
235 qe_phy0: ethernet-phy@18 {
236 interrupt-parent = <&mpic>;
237 interrupts = <4 1 0 0>;
238 reg = <0x18>;
239 device_type = "ethernet-phy";
240 };
241 qe_phy1: ethernet-phy@19 {
242 interrupt-parent = <&mpic>;
243 interrupts = <5 1 0 0>;
244 reg = <0x19>;
245 device_type = "ethernet-phy";
246 };
247 tbi-phy@11 {
248 reg = <0x11>;
249 device_type = "tbi-phy";
250 };
251 };
252
253 enet4: ucc@2400 {
254 device_type = "network";
255 compatible = "ucc_geth";
256 rx-clock-name = "none";
257 tx-clock-name = "clk13";
258 pio-handle = <&pio2>;
259 phy-handle = <&qe_phy1>;
260 phy-connection-type = "rmii";
261 };
262
263 serial2: ucc@2600 {
264 device_type = "serial";
265 compatible = "ucc_uart";
266 port-number = <0>;
267 rx-clock-name = "brg6";
268 tx-clock-name = "brg6";
269 pio-handle = <&pio3>;
270 };
271
272 serial3: ucc@2200 {
273 device_type = "serial";
274 compatible = "ucc_uart";
275 port-number = <1>;
276 rx-clock-name = "brg2";
277 tx-clock-name = "brg2";
278 pio-handle = <&pio4>;
279 };
280};
diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts
index fc7073bc547e..391a4e299783 100644
--- a/arch/powerpc/boot/dts/virtex440-ml507.dts
+++ b/arch/powerpc/boot/dts/virtex440-ml507.dts
@@ -257,6 +257,8 @@
257 #size-cells = <1>; 257 #size-cells = <1>;
258 compatible = "xlnx,compound"; 258 compatible = "xlnx,compound";
259 ethernet@81c00000 { 259 ethernet@81c00000 {
260 #address-cells = <1>;
261 #size-cells = <0>;
260 compatible = "xlnx,xps-ll-temac-1.01.b"; 262 compatible = "xlnx,xps-ll-temac-1.01.b";
261 device_type = "network"; 263 device_type = "network";
262 interrupt-parent = <&xps_intc_0>; 264 interrupt-parent = <&xps_intc_0>;
diff --git a/arch/powerpc/boot/mvme5100.c b/arch/powerpc/boot/mvme5100.c
new file mode 100644
index 000000000000..cb865f83c60b
--- /dev/null
+++ b/arch/powerpc/boot/mvme5100.c
@@ -0,0 +1,27 @@
1/*
2 * Motorola/Emerson MVME5100 with PPCBug firmware.
3 *
4 * Author: Stephen Chivers <schivers@csc.com>
5 *
6 * Copyright 2013 CSC Australia Pty. Ltd.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 */
13#include "types.h"
14#include "ops.h"
15#include "io.h"
16
17BSS_STACK(4096);
18
19void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
20{
21 u32 heapsize;
22
23 heapsize = 0x8000000 - (u32)_end; /* 128M */
24 simple_alloc_init(_end, heapsize, 32, 64);
25 fdt_init(_dtb_start);
26 serial_console_init();
27}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 2e1af74a64be..d27a25518b01 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -265,6 +265,10 @@ epapr)
265 link_address='0x20000000' 265 link_address='0x20000000'
266 pie=-pie 266 pie=-pie
267 ;; 267 ;;
268mvme5100)
269 platformo="$object/fixed-head.o $object/mvme5100.o"
270 binary=y
271 ;;
268esac 272esac
269 273
270vmz="$tmpdir/`basename \"$kernel\"`.$ext" 274vmz="$tmpdir/`basename \"$kernel\"`.$ext"
diff --git a/arch/powerpc/configs/85xx/p1023_defconfig b/arch/powerpc/configs/85xx/p1023_defconfig
deleted file mode 100644
index b06d37da44f4..000000000000
--- a/arch/powerpc/configs/85xx/p1023_defconfig
+++ /dev/null
@@ -1,188 +0,0 @@
1CONFIG_PPC_85xx=y
2CONFIG_SMP=y
3CONFIG_NR_CPUS=2
4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y
6CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_AUDIT=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_RCU_FANOUT=32
11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=14
14CONFIG_BLK_DEV_INITRD=y
15CONFIG_KALLSYMS_ALL=y
16CONFIG_EMBEDDED=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y
21# CONFIG_BLK_DEV_BSG is not set
22CONFIG_PARTITION_ADVANCED=y
23CONFIG_MAC_PARTITION=y
24CONFIG_PHYSICAL_START=0x00000000
25CONFIG_P1023_RDB=y
26CONFIG_P1023_RDS=y
27CONFIG_QUICC_ENGINE=y
28CONFIG_QE_GPIO=y
29CONFIG_CPM2=y
30CONFIG_HIGHMEM=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_BINFMT_MISC=m
33CONFIG_MATH_EMULATION=y
34CONFIG_SWIOTLB=y
35CONFIG_PCI=y
36CONFIG_PCIEPORTBUS=y
37# CONFIG_PCIEAER is not set
38# CONFIG_PCIEASPM is not set
39CONFIG_PCI_MSI=y
40CONFIG_NET=y
41CONFIG_PACKET=y
42CONFIG_UNIX=y
43CONFIG_XFRM_USER=y
44CONFIG_NET_KEY=y
45CONFIG_INET=y
46CONFIG_IP_MULTICAST=y
47CONFIG_IP_ADVANCED_ROUTER=y
48CONFIG_IP_MULTIPLE_TABLES=y
49CONFIG_IP_ROUTE_MULTIPATH=y
50CONFIG_IP_ROUTE_VERBOSE=y
51CONFIG_IP_PNP=y
52CONFIG_IP_PNP_DHCP=y
53CONFIG_IP_PNP_BOOTP=y
54CONFIG_IP_PNP_RARP=y
55CONFIG_NET_IPIP=y
56CONFIG_IP_MROUTE=y
57CONFIG_IP_PIMSM_V1=y
58CONFIG_IP_PIMSM_V2=y
59CONFIG_ARPD=y
60CONFIG_INET_ESP=y
61# CONFIG_INET_XFRM_MODE_BEET is not set
62# CONFIG_INET_LRO is not set
63CONFIG_IPV6=y
64CONFIG_IP_SCTP=m
65CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
66CONFIG_DEVTMPFS=y
67CONFIG_DEVTMPFS_MOUNT=y
68CONFIG_MTD=y
69CONFIG_MTD_CMDLINE_PARTS=y
70CONFIG_MTD_CHAR=y
71CONFIG_MTD_BLOCK=y
72CONFIG_MTD_CFI=y
73CONFIG_MTD_CFI_AMDSTD=y
74CONFIG_MTD_PHYSMAP_OF=y
75CONFIG_MTD_NAND=y
76CONFIG_MTD_NAND_FSL_ELBC=y
77CONFIG_PROC_DEVICETREE=y
78CONFIG_BLK_DEV_LOOP=y
79CONFIG_BLK_DEV_RAM=y
80CONFIG_BLK_DEV_RAM_SIZE=131072
81CONFIG_EEPROM_AT24=y
82CONFIG_EEPROM_LEGACY=y
83CONFIG_BLK_DEV_SD=y
84CONFIG_CHR_DEV_ST=y
85CONFIG_BLK_DEV_SR=y
86CONFIG_CHR_DEV_SG=y
87CONFIG_SCSI_MULTI_LUN=y
88CONFIG_SCSI_LOGGING=y
89CONFIG_ATA=y
90CONFIG_SATA_FSL=y
91CONFIG_SATA_SIL24=y
92CONFIG_NETDEVICES=y
93CONFIG_DUMMY=y
94CONFIG_FS_ENET=y
95CONFIG_FSL_PQ_MDIO=y
96CONFIG_E1000E=y
97CONFIG_PHYLIB=y
98CONFIG_AT803X_PHY=y
99CONFIG_MARVELL_PHY=y
100CONFIG_DAVICOM_PHY=y
101CONFIG_CICADA_PHY=y
102CONFIG_VITESSE_PHY=y
103CONFIG_FIXED_PHY=y
104CONFIG_INPUT_FF_MEMLESS=m
105# CONFIG_INPUT_MOUSEDEV is not set
106# CONFIG_INPUT_KEYBOARD is not set
107# CONFIG_INPUT_MOUSE is not set
108CONFIG_SERIO_LIBPS2=y
109CONFIG_SERIAL_8250=y
110CONFIG_SERIAL_8250_CONSOLE=y
111CONFIG_SERIAL_8250_NR_UARTS=2
112CONFIG_SERIAL_8250_RUNTIME_UARTS=2
113CONFIG_SERIAL_8250_EXTENDED=y
114CONFIG_SERIAL_8250_MANY_PORTS=y
115CONFIG_SERIAL_8250_SHARE_IRQ=y
116CONFIG_SERIAL_8250_DETECT_IRQ=y
117CONFIG_SERIAL_8250_RSA=y
118CONFIG_HW_RANDOM=y
119CONFIG_NVRAM=y
120CONFIG_I2C=y
121CONFIG_I2C_CHARDEV=y
122CONFIG_I2C_CPM=m
123CONFIG_I2C_MPC=y
124CONFIG_GPIO_MPC8XXX=y
125# CONFIG_HWMON is not set
126CONFIG_VIDEO_OUTPUT_CONTROL=y
127CONFIG_SOUND=y
128CONFIG_SND=y
129CONFIG_SND_MIXER_OSS=y
130CONFIG_SND_PCM_OSS=y
131# CONFIG_SND_SUPPORT_OLD_API is not set
132CONFIG_USB=y
133CONFIG_USB_DEVICEFS=y
134CONFIG_USB_MON=y
135CONFIG_USB_EHCI_HCD=y
136CONFIG_USB_EHCI_FSL=y
137CONFIG_USB_STORAGE=y
138CONFIG_EDAC=y
139CONFIG_EDAC_MM_EDAC=y
140CONFIG_RTC_CLASS=y
141CONFIG_RTC_DRV_DS1307=y
142CONFIG_RTC_DRV_CMOS=y
143CONFIG_DMADEVICES=y
144CONFIG_FSL_DMA=y
145# CONFIG_NET_DMA is not set
146CONFIG_STAGING=y
147CONFIG_EXT2_FS=y
148CONFIG_EXT3_FS=y
149# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
150CONFIG_ISO9660_FS=m
151CONFIG_JOLIET=y
152CONFIG_ZISOFS=y
153CONFIG_UDF_FS=m
154CONFIG_MSDOS_FS=m
155CONFIG_VFAT_FS=y
156CONFIG_NTFS_FS=y
157CONFIG_PROC_KCORE=y
158CONFIG_TMPFS=y
159CONFIG_ADFS_FS=m
160CONFIG_AFFS_FS=m
161CONFIG_HFS_FS=m
162CONFIG_HFSPLUS_FS=m
163CONFIG_BEFS_FS=m
164CONFIG_BFS_FS=m
165CONFIG_EFS_FS=m
166CONFIG_CRAMFS=y
167CONFIG_VXFS_FS=m
168CONFIG_HPFS_FS=m
169CONFIG_QNX4FS_FS=m
170CONFIG_SYSV_FS=m
171CONFIG_UFS_FS=m
172CONFIG_NFS_FS=y
173CONFIG_NFS_V4=y
174CONFIG_ROOT_NFS=y
175CONFIG_NFSD=y
176CONFIG_CRC_T10DIF=y
177CONFIG_FRAME_WARN=8092
178CONFIG_DEBUG_FS=y
179CONFIG_DETECT_HUNG_TASK=y
180# CONFIG_DEBUG_BUGVERBOSE is not set
181CONFIG_DEBUG_INFO=y
182CONFIG_STRICT_DEVMEM=y
183CONFIG_CRYPTO_PCBC=m
184CONFIG_CRYPTO_SHA256=y
185CONFIG_CRYPTO_SHA512=y
186CONFIG_CRYPTO_AES=y
187# CONFIG_CRYPTO_ANSI_CPRNG is not set
188CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 69128740c14d..15b1ff5d96e7 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -70,3 +70,4 @@ CONFIG_DEBUG_KERNEL=y
70CONFIG_DETECT_HUNG_TASK=y 70CONFIG_DETECT_HUNG_TASK=y
71CONFIG_DEBUG_INFO=y 71CONFIG_DEBUG_INFO=y
72# CONFIG_RCU_CPU_STALL_DETECTOR is not set 72# CONFIG_RCU_CPU_STALL_DETECTOR is not set
73CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 219fd470ed22..b8a79d7ee89f 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -72,3 +72,4 @@ CONFIG_DEBUG_KERNEL=y
72CONFIG_DETECT_HUNG_TASK=y 72CONFIG_DETECT_HUNG_TASK=y
73CONFIG_DEBUG_INFO=y 73CONFIG_DEBUG_INFO=y
74# CONFIG_RCU_CPU_STALL_DETECTOR is not set 74# CONFIG_RCU_CPU_STALL_DETECTOR is not set
75CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index d2e0fab5ee5b..83d3550fdb54 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -31,6 +31,7 @@ CONFIG_C293_PCIE=y
31CONFIG_P1010_RDB=y 31CONFIG_P1010_RDB=y
32CONFIG_P1022_DS=y 32CONFIG_P1022_DS=y
33CONFIG_P1022_RDK=y 33CONFIG_P1022_RDK=y
34CONFIG_P1023_RDB=y
34CONFIG_P1023_RDS=y 35CONFIG_P1023_RDS=y
35CONFIG_SOCRATES=y 36CONFIG_SOCRATES=y
36CONFIG_KSI8560=y 37CONFIG_KSI8560=y
@@ -113,6 +114,7 @@ CONFIG_BLK_DEV_LOOP=y
113CONFIG_BLK_DEV_NBD=y 114CONFIG_BLK_DEV_NBD=y
114CONFIG_BLK_DEV_RAM=y 115CONFIG_BLK_DEV_RAM=y
115CONFIG_BLK_DEV_RAM_SIZE=131072 116CONFIG_BLK_DEV_RAM_SIZE=131072
117CONFIG_EEPROM_AT24=y
116CONFIG_EEPROM_LEGACY=y 118CONFIG_EEPROM_LEGACY=y
117CONFIG_BLK_DEV_SD=y 119CONFIG_BLK_DEV_SD=y
118CONFIG_CHR_DEV_ST=y 120CONFIG_CHR_DEV_ST=y
@@ -211,6 +213,7 @@ CONFIG_EDAC=y
211CONFIG_EDAC_MM_EDAC=y 213CONFIG_EDAC_MM_EDAC=y
212CONFIG_RTC_CLASS=y 214CONFIG_RTC_CLASS=y
213CONFIG_RTC_DRV_CMOS=y 215CONFIG_RTC_DRV_CMOS=y
216CONFIG_RTC_DRV_DS1307=y
214CONFIG_DMADEVICES=y 217CONFIG_DMADEVICES=y
215CONFIG_FSL_DMA=y 218CONFIG_FSL_DMA=y
216# CONFIG_NET_DMA is not set 219# CONFIG_NET_DMA is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 4cb7b59e98bd..4b686294feb4 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -34,6 +34,7 @@ CONFIG_C293_PCIE=y
34CONFIG_P1010_RDB=y 34CONFIG_P1010_RDB=y
35CONFIG_P1022_DS=y 35CONFIG_P1022_DS=y
36CONFIG_P1022_RDK=y 36CONFIG_P1022_RDK=y
37CONFIG_P1023_RDB=y
37CONFIG_P1023_RDS=y 38CONFIG_P1023_RDS=y
38CONFIG_SOCRATES=y 39CONFIG_SOCRATES=y
39CONFIG_KSI8560=y 40CONFIG_KSI8560=y
@@ -116,6 +117,7 @@ CONFIG_BLK_DEV_LOOP=y
116CONFIG_BLK_DEV_NBD=y 117CONFIG_BLK_DEV_NBD=y
117CONFIG_BLK_DEV_RAM=y 118CONFIG_BLK_DEV_RAM=y
118CONFIG_BLK_DEV_RAM_SIZE=131072 119CONFIG_BLK_DEV_RAM_SIZE=131072
120CONFIG_EEPROM_AT24=y
119CONFIG_EEPROM_LEGACY=y 121CONFIG_EEPROM_LEGACY=y
120CONFIG_BLK_DEV_SD=y 122CONFIG_BLK_DEV_SD=y
121CONFIG_CHR_DEV_ST=y 123CONFIG_CHR_DEV_ST=y
@@ -212,6 +214,7 @@ CONFIG_EDAC=y
212CONFIG_EDAC_MM_EDAC=y 214CONFIG_EDAC_MM_EDAC=y
213CONFIG_RTC_CLASS=y 215CONFIG_RTC_CLASS=y
214CONFIG_RTC_DRV_CMOS=y 216CONFIG_RTC_DRV_CMOS=y
217CONFIG_RTC_DRV_DS1307=y
215CONFIG_DMADEVICES=y 218CONFIG_DMADEVICES=y
216CONFIG_FSL_DMA=y 219CONFIG_FSL_DMA=y
217# CONFIG_NET_DMA is not set 220# CONFIG_NET_DMA is not set
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 5c258823e694..d954e80c286a 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -55,3 +55,4 @@ CONFIG_PARTITION_ADVANCED=y
55CONFIG_CRC_CCITT=y 55CONFIG_CRC_CCITT=y
56# CONFIG_RCU_CPU_STALL_DETECTOR is not set 56# CONFIG_RCU_CPU_STALL_DETECTOR is not set
57# CONFIG_CRYPTO_ANSI_CPRNG is not set 57# CONFIG_CRYPTO_ANSI_CPRNG is not set
58CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 9e146cdf63de..3f47d00a10c0 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -78,3 +78,4 @@ CONFIG_DEBUG_KERNEL=y
78CONFIG_DETECT_HUNG_TASK=y 78CONFIG_DETECT_HUNG_TASK=y
79CONFIG_DEBUG_INFO=y 79CONFIG_DEBUG_INFO=y
80# CONFIG_RCU_CPU_STALL_DETECTOR is not set 80# CONFIG_RCU_CPU_STALL_DETECTOR is not set
81CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
new file mode 100644
index 000000000000..93c7752e2dbb
--- /dev/null
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -0,0 +1,144 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_NO_HZ=y
4CONFIG_HIGH_RES_TIMERS=y
5CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=14
8# CONFIG_UTS_NS is not set
9# CONFIG_IPC_NS is not set
10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set
12CONFIG_CC_OPTIMIZE_FOR_SIZE=y
13# CONFIG_COMPAT_BRK is not set
14CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y
16# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_PPC_CHRP is not set
18# CONFIG_PPC_PMAC is not set
19CONFIG_EMBEDDED6xx=y
20CONFIG_MVME5100=y
21CONFIG_KVM_GUEST=y
22CONFIG_HZ_100=y
23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
24# CONFIG_COMPACTION is not set
25CONFIG_CMDLINE_BOOL=y
26CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs"
27CONFIG_NET=y
28CONFIG_PACKET=y
29CONFIG_UNIX=y
30CONFIG_INET=y
31CONFIG_IP_MULTICAST=y
32CONFIG_IP_PNP=y
33CONFIG_IP_PNP_DHCP=y
34CONFIG_IP_PNP_BOOTP=y
35# CONFIG_INET_LRO is not set
36# CONFIG_IPV6 is not set
37CONFIG_NETFILTER=y
38CONFIG_NF_CONNTRACK=m
39CONFIG_NF_CT_PROTO_SCTP=m
40CONFIG_NF_CONNTRACK_AMANDA=m
41CONFIG_NF_CONNTRACK_FTP=m
42CONFIG_NF_CONNTRACK_H323=m
43CONFIG_NF_CONNTRACK_IRC=m
44CONFIG_NF_CONNTRACK_NETBIOS_NS=m
45CONFIG_NF_CONNTRACK_PPTP=m
46CONFIG_NF_CONNTRACK_SIP=m
47CONFIG_NF_CONNTRACK_TFTP=m
48CONFIG_NETFILTER_XT_MATCH_MAC=m
49CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
50CONFIG_NETFILTER_XT_MATCH_STATE=m
51CONFIG_NF_CONNTRACK_IPV4=m
52CONFIG_IP_NF_IPTABLES=m
53CONFIG_IP_NF_FILTER=m
54CONFIG_IP_NF_TARGET_REJECT=m
55CONFIG_IP_NF_MANGLE=m
56CONFIG_IP_NF_TARGET_ECN=m
57CONFIG_IP_NF_TARGET_TTL=m
58CONFIG_IP_NF_RAW=m
59CONFIG_IP_NF_ARPTABLES=m
60CONFIG_IP_NF_ARPFILTER=m
61CONFIG_IP_NF_ARP_MANGLE=m
62CONFIG_LAPB=m
63CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
64CONFIG_PROC_DEVICETREE=y
65CONFIG_BLK_DEV_LOOP=y
66CONFIG_BLK_DEV_RAM=y
67CONFIG_BLK_DEV_RAM_COUNT=2
68CONFIG_BLK_DEV_RAM_SIZE=8192
69CONFIG_EEPROM_LEGACY=m
70CONFIG_NETDEVICES=y
71CONFIG_TUN=m
72# CONFIG_NET_VENDOR_3COM is not set
73CONFIG_E100=y
74# CONFIG_WLAN is not set
75# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
76# CONFIG_INPUT_KEYBOARD is not set
77# CONFIG_INPUT_MOUSE is not set
78# CONFIG_SERIO is not set
79CONFIG_SERIAL_8250=y
80CONFIG_SERIAL_8250_CONSOLE=y
81CONFIG_SERIAL_8250_NR_UARTS=10
82CONFIG_SERIAL_8250_EXTENDED=y
83CONFIG_SERIAL_8250_MANY_PORTS=y
84CONFIG_SERIAL_8250_SHARE_IRQ=y
85CONFIG_SERIAL_OF_PLATFORM=y
86CONFIG_HW_RANDOM=y
87CONFIG_I2C=y
88CONFIG_I2C_CHARDEV=y
89CONFIG_I2C_MPC=y
90# CONFIG_HWMON is not set
91CONFIG_VIDEO_OUTPUT_CONTROL=m
92# CONFIG_VGA_CONSOLE is not set
93# CONFIG_HID is not set
94# CONFIG_USB_SUPPORT is not set
95# CONFIG_IOMMU_SUPPORT is not set
96CONFIG_VME_BUS=m
97CONFIG_VME_CA91CX42=m
98CONFIG_EXT2_FS=m
99CONFIG_EXT3_FS=m
100# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
101CONFIG_XFS_FS=m
102CONFIG_ISO9660_FS=m
103CONFIG_JOLIET=y
104CONFIG_ZISOFS=y
105CONFIG_UDF_FS=m
106CONFIG_MSDOS_FS=m
107CONFIG_VFAT_FS=m
108CONFIG_PROC_KCORE=y
109CONFIG_TMPFS=y
110CONFIG_NFS_FS=y
111CONFIG_NFS_V3_ACL=y
112CONFIG_NFS_V4=y
113CONFIG_ROOT_NFS=y
114CONFIG_NFSD=m
115CONFIG_NFSD_V3=y
116CONFIG_CIFS=m
117CONFIG_NLS=y
118CONFIG_NLS_CODEPAGE_437=m
119CONFIG_NLS_CODEPAGE_932=m
120CONFIG_NLS_ISO8859_1=m
121CONFIG_NLS_UTF8=m
122CONFIG_CRC_CCITT=m
123CONFIG_CRC_T10DIF=y
124CONFIG_XZ_DEC=y
125CONFIG_XZ_DEC_X86=y
126CONFIG_XZ_DEC_IA64=y
127CONFIG_XZ_DEC_ARM=y
128CONFIG_XZ_DEC_ARMTHUMB=y
129CONFIG_XZ_DEC_SPARC=y
130CONFIG_MAGIC_SYSRQ=y
131CONFIG_DEBUG_KERNEL=y
132CONFIG_DETECT_HUNG_TASK=y
133CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=20
134CONFIG_CRYPTO_CBC=y
135CONFIG_CRYPTO_PCBC=m
136CONFIG_CRYPTO_MD5=y
137CONFIG_CRYPTO_MICHAEL_MIC=m
138CONFIG_CRYPTO_SHA1=m
139CONFIG_CRYPTO_BLOWFISH=m
140CONFIG_CRYPTO_DES=y
141CONFIG_CRYPTO_SERPENT=m
142CONFIG_CRYPTO_TWOFISH=m
143CONFIG_CRYPTO_DEFLATE=m
144# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 581a3bcae728..e015896b7e5c 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -186,6 +186,7 @@ CONFIG_SCSI_DH_RDAC=m
186CONFIG_SCSI_DH_ALUA=m 186CONFIG_SCSI_DH_ALUA=m
187CONFIG_ATA=y 187CONFIG_ATA=y
188CONFIG_SATA_SIL24=y 188CONFIG_SATA_SIL24=y
189CONFIG_SATA_MV=y
189CONFIG_SATA_SVW=y 190CONFIG_SATA_SVW=y
190CONFIG_MD=y 191CONFIG_MD=y
191CONFIG_BLK_DEV_MD=y 192CONFIG_BLK_DEV_MD=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 8616fde0896f..4b6f8bf104e0 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -84,3 +84,4 @@ CONFIG_DEBUG_KERNEL=y
84CONFIG_DETECT_HUNG_TASK=y 84CONFIG_DETECT_HUNG_TASK=y
85CONFIG_DEBUG_INFO=y 85CONFIG_DEBUG_INFO=y
86# CONFIG_RCU_CPU_STALL_DETECTOR is not set 86# CONFIG_RCU_CPU_STALL_DETECTOR is not set
87CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 910194e9a1e2..a5e9a7d494d8 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -46,6 +46,11 @@
46#include <asm/asm-compat.h> 46#include <asm/asm-compat.h>
47#include <asm/synch.h> 47#include <asm/synch.h>
48 48
49/* PPC bit number conversion */
50#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
53
49/* 54/*
50 * clear_bit doesn't imply a memory barrier 55 * clear_bit doesn't imply a memory barrier
51 */ 56 */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 9e495c9a6a88..ed0afc1e44a4 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -41,8 +41,20 @@ struct ppc64_caches {
41extern struct ppc64_caches ppc64_caches; 41extern struct ppc64_caches ppc64_caches;
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43 43
44#if !defined(__ASSEMBLY__) 44#if defined(__ASSEMBLY__)
45/*
46 * For a snooping icache, we still need a dummy icbi to purge all the
47 * prefetched instructions from the ifetch buffers. We also need a sync
48 * before the icbi to order the the actual stores to memory that might
49 * have modified instructions with the icbi.
50 */
51#define PURGE_PREFETCHED_INS \
52 sync; \
53 icbi 0,r3; \
54 sync; \
55 isync
45 56
57#else
46#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 58#define __read_mostly __attribute__((__section__(".data..read_mostly")))
47 59
48#ifdef CONFIG_6xx 60#ifdef CONFIG_6xx
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index e245aab7f191..d463c68fe7f0 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
300 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 300 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
301 cmpxchg_local((ptr), (o), (n)); \ 301 cmpxchg_local((ptr), (o), (n)); \
302 }) 302 })
303#define cmpxchg64_relaxed cmpxchg64_local
303#else 304#else
304#include <asm-generic/cmpxchg-local.h> 305#include <asm-generic/cmpxchg-local.h>
305#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 306#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index a6f8c7a5cbb7..97e02f985df8 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -34,6 +34,13 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
34unsigned long branch_target(const unsigned int *instr); 34unsigned long branch_target(const unsigned int *instr);
35unsigned int translate_branch(const unsigned int *dest, 35unsigned int translate_branch(const unsigned int *dest,
36 const unsigned int *src); 36 const unsigned int *src);
37#ifdef CONFIG_PPC_BOOK3E_64
38void __patch_exception(int exc, unsigned long addr);
39#define patch_exception(exc, name) do { \
40 extern unsigned int name; \
41 __patch_exception((exc), (unsigned long)&name); \
42} while (0)
43#endif
37 44
38static inline unsigned long ppc_function_entry(void *func) 45static inline unsigned long ppc_function_entry(void *func)
39{ 46{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 0d4939ba48e7..617cc767c076 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -90,6 +90,18 @@ struct cpu_spec {
90 * if the error is fatal, 1 if it was fully recovered and 0 to 90 * if the error is fatal, 1 if it was fully recovered and 0 to
91 * pass up (not CPU originated) */ 91 * pass up (not CPU originated) */
92 int (*machine_check)(struct pt_regs *regs); 92 int (*machine_check)(struct pt_regs *regs);
93
94 /*
95 * Processor specific early machine check handler which is
96 * called in real mode to handle SLB and TLB errors.
97 */
98 long (*machine_check_early)(struct pt_regs *regs);
99
100 /*
101 * Processor specific routine to flush tlbs.
102 */
103 void (*flush_tlb)(unsigned long inval_selector);
104
93}; 105};
94 106
95extern struct cpu_spec *cur_cpu_spec; 107extern struct cpu_spec *cur_cpu_spec;
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index d3e5e9bc8f94..9e39ceb1d19f 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -90,7 +90,8 @@ struct eeh_pe {
90#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */ 90#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
91#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */ 91#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
92 92
93#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */ 93#define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */
94#define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */
94 95
95struct eeh_dev { 96struct eeh_dev {
96 int mode; /* EEH mode */ 97 int mode; /* EEH mode */
@@ -117,6 +118,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
117 return edev ? edev->pdev : NULL; 118 return edev ? edev->pdev : NULL;
118} 119}
119 120
121/* Return values from eeh_ops::next_error */
122enum {
123 EEH_NEXT_ERR_NONE = 0,
124 EEH_NEXT_ERR_INF,
125 EEH_NEXT_ERR_FROZEN_PE,
126 EEH_NEXT_ERR_FENCED_PHB,
127 EEH_NEXT_ERR_DEAD_PHB,
128 EEH_NEXT_ERR_DEAD_IOC
129};
130
120/* 131/*
121 * The struct is used to trace the registered EEH operation 132 * The struct is used to trace the registered EEH operation
122 * callback functions. Actually, those operation callback 133 * callback functions. Actually, those operation callback
@@ -157,6 +168,7 @@ struct eeh_ops {
157 int (*read_config)(struct device_node *dn, int where, int size, u32 *val); 168 int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
158 int (*write_config)(struct device_node *dn, int where, int size, u32 val); 169 int (*write_config)(struct device_node *dn, int where, int size, u32 val);
159 int (*next_error)(struct eeh_pe **pe); 170 int (*next_error)(struct eeh_pe **pe);
171 int (*restore_config)(struct device_node *dn);
160}; 172};
161 173
162extern struct eeh_ops *eeh_ops; 174extern struct eeh_ops *eeh_ops;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 243ce69ad685..66830618cc19 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -301,9 +301,12 @@ do_kvm_##n: \
301 beq 4f; /* if from kernel mode */ \ 301 beq 4f; /* if from kernel mode */ \
302 ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 302 ACCOUNT_CPU_USER_ENTRY(r9, r10); \
303 SAVE_PPR(area, r9, r10); \ 303 SAVE_PPR(area, r9, r10); \
3044: std r2,GPR2(r1); /* save r2 in stackframe */ \ 3044: EXCEPTION_PROLOG_COMMON_2(area) \
305 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 305 EXCEPTION_PROLOG_COMMON_3(n) \
306 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 306 ACCOUNT_STOLEN_TIME
307
308/* Save original regs values from save area to stack frame. */
309#define EXCEPTION_PROLOG_COMMON_2(area) \
307 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 310 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
308 ld r10,area+EX_R10(r13); \ 311 ld r10,area+EX_R10(r13); \
309 std r9,GPR9(r1); \ 312 std r9,GPR9(r1); \
@@ -318,11 +321,16 @@ do_kvm_##n: \
318 ld r10,area+EX_CFAR(r13); \ 321 ld r10,area+EX_CFAR(r13); \
319 std r10,ORIG_GPR3(r1); \ 322 std r10,ORIG_GPR3(r1); \
320 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 323 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
324 GET_CTR(r10, area); \
325 std r10,_CTR(r1);
326
327#define EXCEPTION_PROLOG_COMMON_3(n) \
328 std r2,GPR2(r1); /* save r2 in stackframe */ \
329 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
330 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
321 mflr r9; /* Get LR, later save to stack */ \ 331 mflr r9; /* Get LR, later save to stack */ \
322 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 332 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
323 std r9,_LINK(r1); \ 333 std r9,_LINK(r1); \
324 GET_CTR(r10, area); \
325 std r10,_CTR(r1); \
326 lbz r10,PACASOFTIRQEN(r13); \ 334 lbz r10,PACASOFTIRQEN(r13); \
327 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 335 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
328 std r10,SOFTE(r1); \ 336 std r10,SOFTE(r1); \
@@ -332,8 +340,7 @@ do_kvm_##n: \
332 li r10,0; \ 340 li r10,0; \
333 ld r11,exception_marker@toc(r2); \ 341 ld r11,exception_marker@toc(r2); \
334 std r10,RESULT(r1); /* clear regs->result */ \ 342 std r10,RESULT(r1); /* clear regs->result */ \
335 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 343 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
336 ACCOUNT_STOLEN_TIME
337 344
338/* 345/*
339 * Exception vectors. 346 * Exception vectors.
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 420b45368fcf..067fb0dca549 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -285,7 +285,7 @@ struct fsl_lbc_ctrl {
285 /* device info */ 285 /* device info */
286 struct device *dev; 286 struct device *dev;
287 struct fsl_lbc_regs __iomem *regs; 287 struct fsl_lbc_regs __iomem *regs;
288 int irq; 288 int irq[2];
289 wait_queue_head_t irq_wait; 289 wait_queue_head_t irq_wait;
290 spinlock_t lock; 290 spinlock_t lock;
291 void *nand; 291 void *nand;
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3bdcfce2c42a..418fb654370d 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -6,7 +6,8 @@
6 6
7typedef struct { 7typedef struct {
8 unsigned int __softirq_pending; 8 unsigned int __softirq_pending;
9 unsigned int timer_irqs; 9 unsigned int timer_irqs_event;
10 unsigned int timer_irqs_others;
10 unsigned int pmu_irqs; 11 unsigned int pmu_irqs;
11 unsigned int mce_exceptions; 12 unsigned int mce_exceptions;
12 unsigned int spurious_irqs; 13 unsigned int spurious_irqs;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 575fbf81fad0..97d3869991ca 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -191,8 +191,24 @@ DEF_MMIO_OUT_D(out_le32, 32, stw);
191 191
192#endif /* __BIG_ENDIAN */ 192#endif /* __BIG_ENDIAN */
193 193
194/*
195 * Cache inhibitied accessors for use in real mode, you don't want to use these
196 * unless you know what you're doing.
197 *
198 * NB. These use the cpu byte ordering.
199 */
200DEF_MMIO_OUT_X(out_rm8, 8, stbcix);
201DEF_MMIO_OUT_X(out_rm16, 16, sthcix);
202DEF_MMIO_OUT_X(out_rm32, 32, stwcix);
203DEF_MMIO_IN_X(in_rm8, 8, lbzcix);
204DEF_MMIO_IN_X(in_rm16, 16, lhzcix);
205DEF_MMIO_IN_X(in_rm32, 32, lwzcix);
206
194#ifdef __powerpc64__ 207#ifdef __powerpc64__
195 208
209DEF_MMIO_OUT_X(out_rm64, 64, stdcix);
210DEF_MMIO_IN_X(in_rm64, 64, ldcix);
211
196#ifdef __BIG_ENDIAN__ 212#ifdef __BIG_ENDIAN__
197DEF_MMIO_OUT_D(out_be64, 64, std); 213DEF_MMIO_OUT_D(out_be64, 64, std);
198DEF_MMIO_IN_D(in_be64, 64, ld); 214DEF_MMIO_IN_D(in_be64, 64, ld);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index c34656a8925e..f7a8036579b5 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,22 +30,19 @@
30#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/types.h> 31#include <asm/types.h>
32 32
33#define IOMMU_PAGE_SHIFT 12 33#define IOMMU_PAGE_SHIFT_4K 12
34#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT) 34#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
35#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) 35#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
36#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) 36#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
37
38#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
39#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
40#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
37 41
38/* Boot time flags */ 42/* Boot time flags */
39extern int iommu_is_off; 43extern int iommu_is_off;
40extern int iommu_force_on; 44extern int iommu_force_on;
41 45
42/* Pure 2^n version of get_order */
43static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
44{
45 return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
46}
47
48
49/* 46/*
50 * IOMAP_MAX_ORDER defines the largest contiguous block 47 * IOMAP_MAX_ORDER defines the largest contiguous block
51 * of dma space we can get. IOMAP_MAX_ORDER = 13 48 * of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -76,11 +73,20 @@ struct iommu_table {
76 struct iommu_pool large_pool; 73 struct iommu_pool large_pool;
77 struct iommu_pool pools[IOMMU_NR_POOLS]; 74 struct iommu_pool pools[IOMMU_NR_POOLS];
78 unsigned long *it_map; /* A simple allocation bitmap for now */ 75 unsigned long *it_map; /* A simple allocation bitmap for now */
76 unsigned long it_page_shift;/* table iommu page size */
79#ifdef CONFIG_IOMMU_API 77#ifdef CONFIG_IOMMU_API
80 struct iommu_group *it_group; 78 struct iommu_group *it_group;
81#endif 79#endif
82}; 80};
83 81
82/* Pure 2^n version of get_order */
83static inline __attribute_const__
84int get_iommu_order(unsigned long size, struct iommu_table *tbl)
85{
86 return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
87}
88
89
84struct scatterlist; 90struct scatterlist;
85 91
86static inline void set_iommu_table_base(struct device *dev, void *base) 92static inline void set_iommu_table_base(struct device *dev, void *base)
@@ -101,8 +107,34 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
101 */ 107 */
102extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 108extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
103 int nid); 109 int nid);
110#ifdef CONFIG_IOMMU_API
104extern void iommu_register_group(struct iommu_table *tbl, 111extern void iommu_register_group(struct iommu_table *tbl,
105 int pci_domain_number, unsigned long pe_num); 112 int pci_domain_number, unsigned long pe_num);
113extern int iommu_add_device(struct device *dev);
114extern void iommu_del_device(struct device *dev);
115#else
116static inline void iommu_register_group(struct iommu_table *tbl,
117 int pci_domain_number,
118 unsigned long pe_num)
119{
120}
121
122static inline int iommu_add_device(struct device *dev)
123{
124 return 0;
125}
126
127static inline void iommu_del_device(struct device *dev)
128{
129}
130#endif /* !CONFIG_IOMMU_API */
131
132static inline void set_iommu_table_base_and_group(struct device *dev,
133 void *base)
134{
135 set_iommu_table_base(dev, base);
136 iommu_add_device(dev);
137}
106 138
107extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 139extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
108 struct scatterlist *sglist, int nelems, 140 struct scatterlist *sglist, int nelems,
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 1bd92fd43cfb..1503d8c7c41b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -74,6 +74,7 @@
74#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39 74#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
75#define BOOKE_INTERRUPT_HV_SYSCALL 40 75#define BOOKE_INTERRUPT_HV_SYSCALL 40
76#define BOOKE_INTERRUPT_HV_PRIV 41 76#define BOOKE_INTERRUPT_HV_PRIV 41
77#define BOOKE_INTERRUPT_LRAT_ERROR 42
77 78
78/* book3s */ 79/* book3s */
79 80
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 844c28de7ec0..d0a2a2f99564 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -132,8 +132,6 @@ struct slb_shadow {
132 } save_area[SLB_NUM_BOLTED]; 132 } save_area[SLB_NUM_BOLTED];
133} ____cacheline_aligned; 133} ____cacheline_aligned;
134 134
135extern struct slb_shadow slb_shadow[];
136
137/* 135/*
138 * Layout of entries in the hypervisor's dispatch trace log buffer. 136 * Layout of entries in the hypervisor's dispatch trace log buffer.
139 */ 137 */
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
new file mode 100644
index 000000000000..8e99edf6d966
--- /dev/null
+++ b/arch/powerpc/include/asm/mce.h
@@ -0,0 +1,197 @@
1/*
2 * Machine check exception header file.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20 */
21
22#ifndef __ASM_PPC64_MCE_H__
23#define __ASM_PPC64_MCE_H__
24
25#include <linux/bitops.h>
26
27/*
28 * Machine Check bits on power7 and power8
29 */
30#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */
31
32/* SRR1 bits for machine check (On Power7 and Power8) */
33#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */
34
35#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */
36#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */
37#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */
38#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45))
39#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */
40#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */
41#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45))
42
43/* SRR1 bits for machine check (On Power8) */
44#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45))
45
46/* DSISR bits for machine check (On Power7 and Power8) */
47#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */
48#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */
49#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */
50#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */
51#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */
52#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */
53#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */
54
55/*
56 * DSISR bits for machine check (Power8) in addition to above.
57 * Secondary DERAT Multihit
58 */
59#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54))
60
61/* SLB error bits */
62#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \
63 P7_DSISR_MC_SLB_PARITY_MFSLB | \
64 P7_DSISR_MC_SLB_MULTIHIT | \
65 P7_DSISR_MC_SLB_MULTIHIT_PARITY)
66
67#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
68 P8_DSISR_MC_ERAT_MULTIHIT_SEC)
69enum MCE_Version {
70 MCE_V1 = 1,
71};
72
73enum MCE_Severity {
74 MCE_SEV_NO_ERROR = 0,
75 MCE_SEV_WARNING = 1,
76 MCE_SEV_ERROR_SYNC = 2,
77 MCE_SEV_FATAL = 3,
78};
79
80enum MCE_Disposition {
81 MCE_DISPOSITION_RECOVERED = 0,
82 MCE_DISPOSITION_NOT_RECOVERED = 1,
83};
84
85enum MCE_Initiator {
86 MCE_INITIATOR_UNKNOWN = 0,
87 MCE_INITIATOR_CPU = 1,
88};
89
90enum MCE_ErrorType {
91 MCE_ERROR_TYPE_UNKNOWN = 0,
92 MCE_ERROR_TYPE_UE = 1,
93 MCE_ERROR_TYPE_SLB = 2,
94 MCE_ERROR_TYPE_ERAT = 3,
95 MCE_ERROR_TYPE_TLB = 4,
96};
97
98enum MCE_UeErrorType {
99 MCE_UE_ERROR_INDETERMINATE = 0,
100 MCE_UE_ERROR_IFETCH = 1,
101 MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
102 MCE_UE_ERROR_LOAD_STORE = 3,
103 MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
104};
105
106enum MCE_SlbErrorType {
107 MCE_SLB_ERROR_INDETERMINATE = 0,
108 MCE_SLB_ERROR_PARITY = 1,
109 MCE_SLB_ERROR_MULTIHIT = 2,
110};
111
112enum MCE_EratErrorType {
113 MCE_ERAT_ERROR_INDETERMINATE = 0,
114 MCE_ERAT_ERROR_PARITY = 1,
115 MCE_ERAT_ERROR_MULTIHIT = 2,
116};
117
118enum MCE_TlbErrorType {
119 MCE_TLB_ERROR_INDETERMINATE = 0,
120 MCE_TLB_ERROR_PARITY = 1,
121 MCE_TLB_ERROR_MULTIHIT = 2,
122};
123
124struct machine_check_event {
125 enum MCE_Version version:8; /* 0x00 */
126 uint8_t in_use; /* 0x01 */
127 enum MCE_Severity severity:8; /* 0x02 */
128 enum MCE_Initiator initiator:8; /* 0x03 */
129 enum MCE_ErrorType error_type:8; /* 0x04 */
130 enum MCE_Disposition disposition:8; /* 0x05 */
131 uint8_t reserved_1[2]; /* 0x06 */
132 uint64_t gpr3; /* 0x08 */
133 uint64_t srr0; /* 0x10 */
134 uint64_t srr1; /* 0x18 */
135 union { /* 0x20 */
136 struct {
137 enum MCE_UeErrorType ue_error_type:8;
138 uint8_t effective_address_provided;
139 uint8_t physical_address_provided;
140 uint8_t reserved_1[5];
141 uint64_t effective_address;
142 uint64_t physical_address;
143 uint8_t reserved_2[8];
144 } ue_error;
145
146 struct {
147 enum MCE_SlbErrorType slb_error_type:8;
148 uint8_t effective_address_provided;
149 uint8_t reserved_1[6];
150 uint64_t effective_address;
151 uint8_t reserved_2[16];
152 } slb_error;
153
154 struct {
155 enum MCE_EratErrorType erat_error_type:8;
156 uint8_t effective_address_provided;
157 uint8_t reserved_1[6];
158 uint64_t effective_address;
159 uint8_t reserved_2[16];
160 } erat_error;
161
162 struct {
163 enum MCE_TlbErrorType tlb_error_type:8;
164 uint8_t effective_address_provided;
165 uint8_t reserved_1[6];
166 uint64_t effective_address;
167 uint8_t reserved_2[16];
168 } tlb_error;
169 } u;
170};
171
172struct mce_error_info {
173 enum MCE_ErrorType error_type:8;
174 union {
175 enum MCE_UeErrorType ue_error_type:8;
176 enum MCE_SlbErrorType slb_error_type:8;
177 enum MCE_EratErrorType erat_error_type:8;
178 enum MCE_TlbErrorType tlb_error_type:8;
179 } u;
180 uint8_t reserved[2];
181};
182
183#define MAX_MC_EVT 100
184
185/* Release flags for get_mce_event() */
186#define MCE_EVENT_RELEASE true
187#define MCE_EVENT_DONTRELEASE false
188
189extern void save_mce_event(struct pt_regs *regs, long handled,
190 struct mce_error_info *mce_err, uint64_t addr);
191extern int get_mce_event(struct machine_check_event *mce, bool release);
192extern void release_mce_event(void);
193extern void machine_check_queue_event(void);
194extern void machine_check_print_event_info(struct machine_check_event *evt);
195extern uint64_t get_mce_fault_addr(struct machine_check_event *evt);
196
197#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 936db360790a..89b785d16846 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -286,8 +286,21 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
286extern int mmu_linear_psize; 286extern int mmu_linear_psize;
287extern int mmu_vmemmap_psize; 287extern int mmu_vmemmap_psize;
288 288
289struct tlb_core_data {
290 /* For software way selection, as on Freescale TLB1 */
291 u8 esel_next, esel_max, esel_first;
292
293 /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
294 u8 lock;
295};
296
289#ifdef CONFIG_PPC64 297#ifdef CONFIG_PPC64
290extern unsigned long linear_map_top; 298extern unsigned long linear_map_top;
299extern int book3e_htw_mode;
300
301#define PPC_HTW_NONE 0
302#define PPC_HTW_IBM 1
303#define PPC_HTW_E6500 2
291 304
292/* 305/*
293 * 64-bit booke platforms don't load the tlb in the tlb miss handler code. 306 * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 691fd8aca939..f8d1d6dcf7db 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -180,16 +180,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
180#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ 180#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
181#define MMU_PAGE_256K 4 181#define MMU_PAGE_256K 4
182#define MMU_PAGE_1M 5 182#define MMU_PAGE_1M 5
183#define MMU_PAGE_4M 6 183#define MMU_PAGE_2M 6
184#define MMU_PAGE_8M 7 184#define MMU_PAGE_4M 7
185#define MMU_PAGE_16M 8 185#define MMU_PAGE_8M 8
186#define MMU_PAGE_64M 9 186#define MMU_PAGE_16M 9
187#define MMU_PAGE_256M 10 187#define MMU_PAGE_64M 10
188#define MMU_PAGE_1G 11 188#define MMU_PAGE_256M 11
189#define MMU_PAGE_16G 12 189#define MMU_PAGE_1G 12
190#define MMU_PAGE_64G 13 190#define MMU_PAGE_16G 13
191 191#define MMU_PAGE_64G 14
192#define MMU_PAGE_COUNT 14 192
193#define MMU_PAGE_COUNT 15
193 194
194#if defined(CONFIG_PPC_STD_MMU_64) 195#if defined(CONFIG_PPC_STD_MMU_64)
195/* 64-bit classic hash table MMU */ 196/* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 7bdcf340016c..40157e2ca691 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -33,6 +33,28 @@ struct opal_takeover_args {
33 u64 rd_loc; /* r11 */ 33 u64 rd_loc; /* r11 */
34}; 34};
35 35
36/*
37 * SG entry
38 *
39 * WARNING: The current implementation requires each entry
40 * to represent a block that is 4k aligned *and* each block
41 * size except the last one in the list to be as well.
42 */
43struct opal_sg_entry {
44 void *data;
45 long length;
46};
47
48/* sg list */
49struct opal_sg_list {
50 unsigned long num_entries;
51 struct opal_sg_list *next;
52 struct opal_sg_entry entry[];
53};
54
55/* We calculate number of sg entries based on PAGE_SIZE */
56#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
57
36extern long opal_query_takeover(u64 *hal_size, u64 *hal_align); 58extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
37 59
38extern long opal_do_takeover(struct opal_takeover_args *args); 60extern long opal_do_takeover(struct opal_takeover_args *args);
@@ -132,6 +154,9 @@ extern int opal_enter_rtas(struct rtas_args *args,
132#define OPAL_FLASH_VALIDATE 76 154#define OPAL_FLASH_VALIDATE 76
133#define OPAL_FLASH_MANAGE 77 155#define OPAL_FLASH_MANAGE 77
134#define OPAL_FLASH_UPDATE 78 156#define OPAL_FLASH_UPDATE 78
157#define OPAL_GET_MSG 85
158#define OPAL_CHECK_ASYNC_COMPLETION 86
159#define OPAL_SYNC_HOST_REBOOT 87
135 160
136#ifndef __ASSEMBLY__ 161#ifndef __ASSEMBLY__
137 162
@@ -211,7 +236,16 @@ enum OpalPendingState {
211 OPAL_EVENT_ERROR_LOG = 0x40, 236 OPAL_EVENT_ERROR_LOG = 0x40,
212 OPAL_EVENT_EPOW = 0x80, 237 OPAL_EVENT_EPOW = 0x80,
213 OPAL_EVENT_LED_STATUS = 0x100, 238 OPAL_EVENT_LED_STATUS = 0x100,
214 OPAL_EVENT_PCI_ERROR = 0x200 239 OPAL_EVENT_PCI_ERROR = 0x200,
240 OPAL_EVENT_MSG_PENDING = 0x800,
241};
242
243enum OpalMessageType {
244 OPAL_MSG_ASYNC_COMP = 0,
245 OPAL_MSG_MEM_ERR,
246 OPAL_MSG_EPOW,
247 OPAL_MSG_SHUTDOWN,
248 OPAL_MSG_TYPE_MAX,
215}; 249};
216 250
217/* Machine check related definitions */ 251/* Machine check related definitions */
@@ -311,12 +345,16 @@ enum OpalMveEnableAction {
311 OPAL_ENABLE_MVE = 1 345 OPAL_ENABLE_MVE = 1
312}; 346};
313 347
314enum OpalPciResetAndReinitScope { 348enum OpalPciResetScope {
315 OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3, 349 OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
316 OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5, 350 OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
317 OPAL_PCI_IODA_TABLE_RESET = 6, 351 OPAL_PCI_IODA_TABLE_RESET = 6,
318}; 352};
319 353
354enum OpalPciReinitScope {
355 OPAL_REINIT_PCI_DEV = 1000
356};
357
320enum OpalPciResetState { 358enum OpalPciResetState {
321 OPAL_DEASSERT_RESET = 0, 359 OPAL_DEASSERT_RESET = 0,
322 OPAL_ASSERT_RESET = 1 360 OPAL_ASSERT_RESET = 1
@@ -356,6 +394,12 @@ enum OpalLPCAddressType {
356 OPAL_LPC_FW = 2, 394 OPAL_LPC_FW = 2,
357}; 395};
358 396
397struct opal_msg {
398 uint32_t msg_type;
399 uint32_t reserved;
400 uint64_t params[8];
401};
402
359struct opal_machine_check_event { 403struct opal_machine_check_event {
360 enum OpalMCE_Version version:8; /* 0x00 */ 404 enum OpalMCE_Version version:8; /* 0x00 */
361 uint8_t in_use; /* 0x01 */ 405 uint8_t in_use; /* 0x01 */
@@ -404,6 +448,58 @@ struct opal_machine_check_event {
404 } u; 448 } u;
405}; 449};
406 450
451/* FSP memory errors handling */
452enum OpalMemErr_Version {
453 OpalMemErr_V1 = 1,
454};
455
456enum OpalMemErrType {
457 OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
458 OPAL_MEM_ERR_TYPE_DYN_DALLOC,
459 OPAL_MEM_ERR_TYPE_SCRUB,
460};
461
462/* Memory Reilience error type */
463enum OpalMemErr_ResilErrType {
464 OPAL_MEM_RESILIENCE_CE = 0,
465 OPAL_MEM_RESILIENCE_UE,
466 OPAL_MEM_RESILIENCE_UE_SCRUB,
467};
468
469/* Dynamic Memory Deallocation type */
470enum OpalMemErr_DynErrType {
471 OPAL_MEM_DYNAMIC_DEALLOC = 0,
472};
473
474/* OpalMemoryErrorData->flags */
475#define OPAL_MEM_CORRECTED_ERROR 0x0001
476#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
477#define OPAL_MEM_ACK_REQUIRED 0x8000
478
479struct OpalMemoryErrorData {
480 enum OpalMemErr_Version version:8; /* 0x00 */
481 enum OpalMemErrType type:8; /* 0x01 */
482 uint16_t flags; /* 0x02 */
483 uint8_t reserved_1[4]; /* 0x04 */
484
485 union {
486 /* Memory Resilience corrected/uncorrected error info */
487 struct {
488 enum OpalMemErr_ResilErrType resil_err_type:8;
489 uint8_t reserved_1[7];
490 uint64_t physical_address_start;
491 uint64_t physical_address_end;
492 } resilience;
493 /* Dynamic memory deallocation error info */
494 struct {
495 enum OpalMemErr_DynErrType dyn_err_type:8;
496 uint8_t reserved_1[7];
497 uint64_t physical_address_start;
498 uint64_t physical_address_end;
499 } dyn_dealloc;
500 } u;
501};
502
407enum { 503enum {
408 OPAL_P7IOC_DIAG_TYPE_NONE = 0, 504 OPAL_P7IOC_DIAG_TYPE_NONE = 0,
409 OPAL_P7IOC_DIAG_TYPE_RGC = 1, 505 OPAL_P7IOC_DIAG_TYPE_RGC = 1,
@@ -710,7 +806,7 @@ int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
710int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer, 806int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
711 uint64_t diag_buffer_len); 807 uint64_t diag_buffer_len);
712int64_t opal_pci_fence_phb(uint64_t phb_id); 808int64_t opal_pci_fence_phb(uint64_t phb_id);
713int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); 809int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
714int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); 810int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
715int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); 811int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
716int64_t opal_get_epow_status(__be64 *status); 812int64_t opal_get_epow_status(__be64 *status);
@@ -731,6 +827,10 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
731int64_t opal_manage_flash(uint8_t op); 827int64_t opal_manage_flash(uint8_t op);
732int64_t opal_update_flash(uint64_t blk_list); 828int64_t opal_update_flash(uint64_t blk_list);
733 829
830int64_t opal_get_msg(uint64_t buffer, size_t size);
831int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
832int64_t opal_sync_host_reboot(void);
833
734/* Internal functions */ 834/* Internal functions */
735extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); 835extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
736 836
@@ -744,6 +844,8 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
744 int depth, void *data); 844 int depth, void *data);
745 845
746extern int opal_notifier_register(struct notifier_block *nb); 846extern int opal_notifier_register(struct notifier_block *nb);
847extern int opal_message_notifier_register(enum OpalMessageType msg_type,
848 struct notifier_block *nb);
747extern void opal_notifier_enable(void); 849extern void opal_notifier_enable(void);
748extern void opal_notifier_disable(void); 850extern void opal_notifier_disable(void);
749extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); 851extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b6ea9e068c13..9c5dbc3833fb 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -16,7 +16,6 @@
16 16
17#ifdef CONFIG_PPC64 17#ifdef CONFIG_PPC64
18 18
19#include <linux/init.h>
20#include <asm/types.h> 19#include <asm/types.h>
21#include <asm/lppaca.h> 20#include <asm/lppaca.h>
22#include <asm/mmu.h> 21#include <asm/mmu.h>
@@ -113,6 +112,10 @@ struct paca_struct {
113 /* Keep pgd in the same cacheline as the start of extlb */ 112 /* Keep pgd in the same cacheline as the start of extlb */
114 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ 113 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
115 pgd_t *kernel_pgd; /* Kernel PGD */ 114 pgd_t *kernel_pgd; /* Kernel PGD */
115
116 /* Shared by all threads of a core -- points to tcd of first thread */
117 struct tlb_core_data *tcd_ptr;
118
116 /* We can have up to 3 levels of reentrancy in the TLB miss handler */ 119 /* We can have up to 3 levels of reentrancy in the TLB miss handler */
117 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; 120 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
118 u64 exmc[8]; /* used for machine checks */ 121 u64 exmc[8]; /* used for machine checks */
@@ -123,6 +126,8 @@ struct paca_struct {
123 void *mc_kstack; 126 void *mc_kstack;
124 void *crit_kstack; 127 void *crit_kstack;
125 void *dbg_kstack; 128 void *dbg_kstack;
129
130 struct tlb_core_data tcd;
126#endif /* CONFIG_PPC_BOOK3E */ 131#endif /* CONFIG_PPC_BOOK3E */
127 132
128 mm_context_t context; 133 mm_context_t context;
@@ -152,6 +157,15 @@ struct paca_struct {
152 */ 157 */
153 struct opal_machine_check_event *opal_mc_evt; 158 struct opal_machine_check_event *opal_mc_evt;
154#endif 159#endif
160#ifdef CONFIG_PPC_BOOK3S_64
161 /* Exclusive emergency stack pointer for machine check exception. */
162 void *mc_emergency_sp;
163 /*
164 * Flag to check whether we are in machine check early handler
165 * and already using emergency stack.
166 */
167 u16 in_mce;
168#endif
155 169
156 /* Stuff for accurate time accounting */ 170 /* Stuff for accurate time accounting */
157 u64 user_time; /* accumulated usermode TB ticks */ 171 u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 7d6eacf249cf..b999ca318985 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -3,6 +3,7 @@
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6#include <linux/mmdebug.h>
6#include <asm/processor.h> /* For TASK_SIZE */ 7#include <asm/processor.h> /* For TASK_SIZE */
7#include <asm/mmu.h> 8#include <asm/mmu.h>
8#include <asm/page.h> 9#include <asm/page.h>
@@ -33,10 +34,73 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
33static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 34static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
34static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 35static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
35static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 36static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
36static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
37static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 37static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
38static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } 38static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
39 39
40#ifdef CONFIG_NUMA_BALANCING
41
42static inline int pte_present(pte_t pte)
43{
44 return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
45}
46
47#define pte_numa pte_numa
48static inline int pte_numa(pte_t pte)
49{
50 return (pte_val(pte) &
51 (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
52}
53
54#define pte_mknonnuma pte_mknonnuma
55static inline pte_t pte_mknonnuma(pte_t pte)
56{
57 pte_val(pte) &= ~_PAGE_NUMA;
58 pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED;
59 return pte;
60}
61
62#define pte_mknuma pte_mknuma
63static inline pte_t pte_mknuma(pte_t pte)
64{
65 /*
66 * We should not set _PAGE_NUMA on non present ptes. Also clear the
67 * present bit so that hash_page will return 1 and we collect this
68 * as numa fault.
69 */
70 if (pte_present(pte)) {
71 pte_val(pte) |= _PAGE_NUMA;
72 pte_val(pte) &= ~_PAGE_PRESENT;
73 } else
74 VM_BUG_ON(1);
75 return pte;
76}
77
78#define pmd_numa pmd_numa
79static inline int pmd_numa(pmd_t pmd)
80{
81 return pte_numa(pmd_pte(pmd));
82}
83
84#define pmd_mknonnuma pmd_mknonnuma
85static inline pmd_t pmd_mknonnuma(pmd_t pmd)
86{
87 return pte_pmd(pte_mknonnuma(pmd_pte(pmd)));
88}
89
90#define pmd_mknuma pmd_mknuma
91static inline pmd_t pmd_mknuma(pmd_t pmd)
92{
93 return pte_pmd(pte_mknuma(pmd_pte(pmd)));
94}
95
96# else
97
98static inline int pte_present(pte_t pte)
99{
100 return pte_val(pte) & _PAGE_PRESENT;
101}
102#endif /* CONFIG_NUMA_BALANCING */
103
40/* Conversion functions: convert a page and protection to a page entry, 104/* Conversion functions: convert a page and protection to a page entry,
41 * and a page entry and page directory to the page they refer to. 105 * and a page entry and page directory to the page they refer to.
42 * 106 *
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index f595b98079ee..6586a40a46ce 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -4,7 +4,6 @@
4#ifndef _ASM_POWERPC_PPC_ASM_H 4#ifndef _ASM_POWERPC_PPC_ASM_H
5#define _ASM_POWERPC_PPC_ASM_H 5#define _ASM_POWERPC_PPC_ASM_H
6 6
7#include <linux/init.h>
8#include <linux/stringify.h> 7#include <linux/stringify.h>
9#include <asm/asm-compat.h> 8#include <asm/asm-compat.h>
10#include <asm/processor.h> 9#include <asm/processor.h>
@@ -295,6 +294,11 @@ n:
295 * you want to access various offsets within it). On ppc32 this is 294 * you want to access various offsets within it). On ppc32 this is
296 * identical to LOAD_REG_IMMEDIATE. 295 * identical to LOAD_REG_IMMEDIATE.
297 * 296 *
297 * LOAD_REG_ADDR_PIC(rn, name)
298 * Loads the address of label 'name' into register 'run'. Use this when
299 * the kernel doesn't run at the linked or relocated address. Please
300 * note that this macro will clobber the lr register.
301 *
298 * LOAD_REG_ADDRBASE(rn, name) 302 * LOAD_REG_ADDRBASE(rn, name)
299 * ADDROFF(name) 303 * ADDROFF(name)
300 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into 304 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
@@ -305,6 +309,14 @@ n:
305 * LOAD_REG_ADDRBASE(rX, name) 309 * LOAD_REG_ADDRBASE(rX, name)
306 * ld rY,ADDROFF(name)(rX) 310 * ld rY,ADDROFF(name)(rX)
307 */ 311 */
312
313/* Be careful, this will clobber the lr register. */
314#define LOAD_REG_ADDR_PIC(reg, name) \
315 bl 0f; \
3160: mflr reg; \
317 addis reg,reg,(name - 0b)@ha; \
318 addi reg,reg,(name - 0b)@l;
319
308#ifdef __powerpc64__ 320#ifdef __powerpc64__
309#define LOAD_REG_IMMEDIATE(reg,expr) \ 321#define LOAD_REG_IMMEDIATE(reg,expr) \
310 lis reg,(expr)@highest; \ 322 lis reg,(expr)@highest; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index fc14a38c7ccf..8ca20ac28dc2 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -256,6 +256,8 @@ struct thread_struct {
256 unsigned long evr[32]; /* upper 32-bits of SPE regs */ 256 unsigned long evr[32]; /* upper 32-bits of SPE regs */
257 u64 acc; /* Accumulator */ 257 u64 acc; /* Accumulator */
258 unsigned long spefscr; /* SPE & eFP status */ 258 unsigned long spefscr; /* SPE & eFP status */
259 unsigned long spefscr_last; /* SPEFSCR value on last prctl
260 call or trap return */
259 int used_spe; /* set if process has used spe */ 261 int used_spe; /* set if process has used spe */
260#endif /* CONFIG_SPE */ 262#endif /* CONFIG_SPE */
261#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 263#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -317,7 +319,9 @@ struct thread_struct {
317 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) 319 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
318 320
319#ifdef CONFIG_SPE 321#ifdef CONFIG_SPE
320#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, 322#define SPEFSCR_INIT \
323 .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
324 .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
321#else 325#else
322#define SPEFSCR_INIT 326#define SPEFSCR_INIT
323#endif 327#endif
@@ -373,6 +377,8 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
373extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); 377extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
374extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); 378extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
375 379
380extern void fp_enable(void);
381extern void vec_enable(void);
376extern void load_fp_state(struct thread_fp_state *fp); 382extern void load_fp_state(struct thread_fp_state *fp);
377extern void store_fp_state(struct thread_fp_state *fp); 383extern void store_fp_state(struct thread_fp_state *fp);
378extern void load_vr_state(struct thread_vr_state *vr); 384extern void load_vr_state(struct thread_vr_state *vr);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 678a7c1d9cb8..a1bc7e758422 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -21,7 +21,6 @@
21#if !defined(_ASM_POWERPC_PS3_H) 21#if !defined(_ASM_POWERPC_PS3_H)
22#define _ASM_POWERPC_PS3_H 22#define _ASM_POWERPC_PS3_H
23 23
24#include <linux/init.h>
25#include <linux/types.h> 24#include <linux/types.h>
26#include <linux/device.h> 25#include <linux/device.h>
27#include <asm/cell-pmu.h> 26#include <asm/cell-pmu.h>
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
index 0419eeb53274..2505d8eab15c 100644
--- a/arch/powerpc/include/asm/pte-hash64.h
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -19,7 +19,7 @@
19#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 19#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
20#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 20#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
21#define _PAGE_GUARDED 0x0008 21#define _PAGE_GUARDED 0x0008
22#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 22/* We can derive Memory coherence from _PAGE_NO_CACHE */
23#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 23#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
24#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 24#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
25#define _PAGE_DIRTY 0x0080 /* C: page changed */ 25#define _PAGE_DIRTY 0x0080 /* C: page changed */
@@ -27,6 +27,12 @@
27#define _PAGE_RW 0x0200 /* software: user write access allowed */ 27#define _PAGE_RW 0x0200 /* software: user write access allowed */
28#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 28#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
29 29
30/*
31 * Used for tracking numa faults
32 */
33#define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */
34
35
30/* No separate kernel read-only */ 36/* No separate kernel read-only */
31#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ 37#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
32#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW 38#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fa8388ed94c5..62b114e079cf 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1075,6 +1075,8 @@
1075#define PVR_8560 0x80200000 1075#define PVR_8560 0x80200000
1076#define PVR_VER_E500V1 0x8020 1076#define PVR_VER_E500V1 0x8020
1077#define PVR_VER_E500V2 0x8021 1077#define PVR_VER_E500V2 0x8021
1078#define PVR_VER_E6500 0x8040
1079
1078/* 1080/*
1079 * For the 8xx processors, all of them report the same PVR family for 1081 * For the 8xx processors, all of them report the same PVR family for
1080 * the PowerPC core. The various versions of these processors must be 1082 * the PowerPC core. The various versions of these processors must be
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2e31aacd8acc..163c3b05a76e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -101,6 +101,7 @@
101#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ 101#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
102#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ 102#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
103#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ 103#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
104#define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */
104#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */ 105#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
105#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */ 106#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
106#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */ 107#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
@@ -170,6 +171,7 @@
170#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */ 171#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
171#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ 172#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
172#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ 173#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
174#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */
173#define SPRN_SVR 0x3FF /* System Version Register */ 175#define SPRN_SVR 0x3FF /* System Version Register */
174 176
175/* 177/*
@@ -216,6 +218,14 @@
216#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ 218#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
217#define CCR1_TCS 0x00000080 /* Timer Clock Select */ 219#define CCR1_TCS 0x00000080 /* Timer Clock Select */
218 220
221/* Bit definitions for PWRMGTCR0. */
222#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
223#define PWRMGTCR0_PW20_ENT_SHIFT 8
224#define PWRMGTCR0_PW20_ENT 0x3F00
225#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */
226#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16
227#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000
228
219/* Bit definitions for the MCSR. */ 229/* Bit definitions for the MCSR. */
220#define MCSR_MCS 0x80000000 /* Machine Check Summary */ 230#define MCSR_MCS 0x80000000 /* Machine Check Summary */
221#define MCSR_IB 0x40000000 /* Instruction PLB Error */ 231#define MCSR_IB 0x40000000 /* Instruction PLB Error */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f6e78d63fb6a..35aa339410bd 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -30,8 +30,6 @@
30 30
31#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ 31#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
32 32
33#define arch_spin_is_locked(x) ((x)->slock != 0)
34
35#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
36/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
37#ifdef __BIG_ENDIAN__ 35#ifdef __BIG_ENDIAN__
@@ -56,6 +54,16 @@
56#define SYNC_IO 54#define SYNC_IO
57#endif 55#endif
58 56
57static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
58{
59 return lock.slock == 0;
60}
61
62static inline int arch_spin_is_locked(arch_spinlock_t *lock)
63{
64 return !arch_spin_value_unlocked(*lock);
65}
66
59/* 67/*
60 * This returns the old value in the lock, so we succeeded 68 * This returns the old value in the lock, so we succeeded
61 * in getting the lock if the return value is 0. 69 * in getting the lock if the return value is 0.
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 9854c564ac52..b034ecdb7c74 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -91,8 +91,7 @@ static inline struct thread_info *current_thread_info(void)
91#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling 91#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
92 TIF_NEED_RESCHED */ 92 TIF_NEED_RESCHED */
93#define TIF_32BIT 4 /* 32 bit binary */ 93#define TIF_32BIT 4 /* 32 bit binary */
94#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */ 94#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
95#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
96#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 95#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
97#define TIF_SINGLESTEP 8 /* singlestepping active */ 96#define TIF_SINGLESTEP 8 /* singlestepping active */
98#define TIF_NOHZ 9 /* in adaptive nohz mode */ 97#define TIF_NOHZ 9 /* in adaptive nohz mode */
@@ -115,8 +114,7 @@ static inline struct thread_info *current_thread_info(void)
115#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 114#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 115#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
117#define _TIF_32BIT (1<<TIF_32BIT) 116#define _TIF_32BIT (1<<TIF_32BIT)
118#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK) 117#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
119#define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW)
120#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 118#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
121#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 119#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
122#define _TIF_SECCOMP (1<<TIF_SECCOMP) 120#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -132,7 +130,8 @@ static inline struct thread_info *current_thread_info(void)
132 _TIF_NOHZ) 130 _TIF_NOHZ)
133 131
134#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 132#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
135 _TIF_NOTIFY_RESUME | _TIF_UPROBE) 133 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
134 _TIF_RESTORE_TM)
136#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 135#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
137 136
138/* Bits in local_flags */ 137/* Bits in local_flags */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 9dfbc34bdbf5..0c9f8b74dd97 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -15,6 +15,7 @@ extern void do_load_up_transact_altivec(struct thread_struct *thread);
15extern void tm_enable(void); 15extern void tm_enable(void);
16extern void tm_reclaim(struct thread_struct *thread, 16extern void tm_reclaim(struct thread_struct *thread,
17 unsigned long orig_msr, uint8_t cause); 17 unsigned long orig_msr, uint8_t cause);
18extern void tm_reclaim_current(uint8_t cause);
18extern void tm_recheckpoint(struct thread_struct *thread, 19extern void tm_recheckpoint(struct thread_struct *thread,
19 unsigned long orig_msr); 20 unsigned long orig_msr);
20extern void tm_abort(uint8_t cause); 21extern void tm_abort(uint8_t cause);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 89e3ef2496ac..d0b5fca6b077 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,7 +22,15 @@ struct device_node;
22 22
23static inline int cpu_to_node(int cpu) 23static inline int cpu_to_node(int cpu)
24{ 24{
25 return numa_cpu_lookup_table[cpu]; 25 int nid;
26
27 nid = numa_cpu_lookup_table[cpu];
28
29 /*
30 * During early boot, the numa-cpu lookup table might not have been
31 * setup for all CPUs yet. In such cases, default to node 0.
32 */
33 return (nid < 0) ? 0 : nid;
26} 34}
27 35
28#define parent_node(node) (node) 36#define parent_node(node) (node)
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 68d0cc998b1b..4f9b7ca0710f 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -15,7 +15,6 @@
15#define _ASM_POWERPC_VIO_H 15#define _ASM_POWERPC_VIO_H
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18#include <linux/init.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/device.h> 19#include <linux/device.h>
21#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 445cb6e39d5b..904d713366ff 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
39obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 39obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
40obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o 40obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
41obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o 41obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
42obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
42obj64-$(CONFIG_RELOCATABLE) += reloc_64.o 43obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
43obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o 44obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
44obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o 45obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d3de01066f7d..8d1d94d9c649 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -203,6 +203,15 @@ int main(void)
203 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); 203 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack));
204 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); 204 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack));
205 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); 205 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack));
206 DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr));
207
208 DEFINE(TCD_ESEL_NEXT,
209 offsetof(struct tlb_core_data, esel_next));
210 DEFINE(TCD_ESEL_MAX,
211 offsetof(struct tlb_core_data, esel_max));
212 DEFINE(TCD_ESEL_FIRST,
213 offsetof(struct tlb_core_data, esel_first));
214 DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
206#endif /* CONFIG_PPC_BOOK3E */ 215#endif /* CONFIG_PPC_BOOK3E */
207 216
208#ifdef CONFIG_PPC_STD_MMU_64 217#ifdef CONFIG_PPC_STD_MMU_64
@@ -232,6 +241,10 @@ int main(void)
232 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); 241 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
233#endif /* CONFIG_PPC_STD_MMU_64 */ 242#endif /* CONFIG_PPC_STD_MMU_64 */
234 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 243 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
244#ifdef CONFIG_PPC_BOOK3S_64
245 DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp));
246 DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce));
247#endif
235 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 248 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
236 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); 249 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
237 DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); 250 DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 654932727873..abfa011344d9 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/cpumask.h> 14#include <linux/cpumask.h>
15#include <linux/init.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/kobject.h> 16#include <linux/kobject.h>
18#include <linux/list.h> 17#include <linux/list.h>
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index bfb18c7290b7..cc2d8962e090 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -53,11 +53,57 @@ _GLOBAL(__e500_dcache_setup)
53 isync 53 isync
54 blr 54 blr
55 55
56/*
57 * FIXME - we haven't yet done testing to determine a reasonable default
58 * value for PW20_WAIT_IDLE_BIT.
59 */
60#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
61_GLOBAL(setup_pw20_idle)
62 mfspr r3, SPRN_PWRMGTCR0
63
64 /* Set PW20_WAIT bit, enable pw20 state*/
65 ori r3, r3, PWRMGTCR0_PW20_WAIT
66 li r11, PW20_WAIT_IDLE_BIT
67
68 /* Set Automatic PW20 Core Idle Count */
69 rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
70
71 mtspr SPRN_PWRMGTCR0, r3
72
73 blr
74
75/*
76 * FIXME - we haven't yet done testing to determine a reasonable default
77 * value for AV_WAIT_IDLE_BIT.
78 */
79#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
80_GLOBAL(setup_altivec_idle)
81 mfspr r3, SPRN_PWRMGTCR0
82
83 /* Enable Altivec Idle */
84 oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
85 li r11, AV_WAIT_IDLE_BIT
86
87 /* Set Automatic AltiVec Idle Count */
88 rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
89
90 mtspr SPRN_PWRMGTCR0, r3
91
92 blr
93
56_GLOBAL(__setup_cpu_e6500) 94_GLOBAL(__setup_cpu_e6500)
57 mflr r6 95 mflr r6
58#ifdef CONFIG_PPC64 96#ifdef CONFIG_PPC64
59 bl .setup_altivec_ivors 97 bl .setup_altivec_ivors
98 /* Touch IVOR42 only if the CPU supports E.HV category */
99 mfspr r10,SPRN_MMUCFG
100 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
101 beq 1f
102 bl .setup_lrat_ivor
1031:
60#endif 104#endif
105 bl setup_pw20_idle
106 bl setup_altivec_idle
61 bl __setup_cpu_e5500 107 bl __setup_cpu_e5500
62 mtlr r6 108 mtlr r6
63 blr 109 blr
@@ -119,6 +165,14 @@ _GLOBAL(__setup_cpu_e5500)
119_GLOBAL(__restore_cpu_e6500) 165_GLOBAL(__restore_cpu_e6500)
120 mflr r5 166 mflr r5
121 bl .setup_altivec_ivors 167 bl .setup_altivec_ivors
168 /* Touch IVOR42 only if the CPU supports E.HV category */
169 mfspr r10,SPRN_MMUCFG
170 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
171 beq 1f
172 bl .setup_lrat_ivor
1731:
174 bl .setup_pw20_idle
175 bl .setup_altivec_idle
122 bl __restore_cpu_e5500 176 bl __restore_cpu_e5500
123 mtlr r5 177 mtlr r5
124 blr 178 blr
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 18b5b9cf8e37..37d1bb002aa9 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -29,7 +29,7 @@ _GLOBAL(__setup_cpu_power7)
29 mtspr SPRN_LPID,r0 29 mtspr SPRN_LPID,r0
30 mfspr r3,SPRN_LPCR 30 mfspr r3,SPRN_LPCR
31 bl __init_LPCR 31 bl __init_LPCR
32 bl __init_TLB 32 bl __init_tlb_power7
33 mtlr r11 33 mtlr r11
34 blr 34 blr
35 35
@@ -42,7 +42,7 @@ _GLOBAL(__restore_cpu_power7)
42 mtspr SPRN_LPID,r0 42 mtspr SPRN_LPID,r0
43 mfspr r3,SPRN_LPCR 43 mfspr r3,SPRN_LPCR
44 bl __init_LPCR 44 bl __init_LPCR
45 bl __init_TLB 45 bl __init_tlb_power7
46 mtlr r11 46 mtlr r11
47 blr 47 blr
48 48
@@ -59,7 +59,7 @@ _GLOBAL(__setup_cpu_power8)
59 oris r3, r3, LPCR_AIL_3@h 59 oris r3, r3, LPCR_AIL_3@h
60 bl __init_LPCR 60 bl __init_LPCR
61 bl __init_HFSCR 61 bl __init_HFSCR
62 bl __init_TLB 62 bl __init_tlb_power8
63 bl __init_PMU_HV 63 bl __init_PMU_HV
64 mtlr r11 64 mtlr r11
65 blr 65 blr
@@ -78,7 +78,7 @@ _GLOBAL(__restore_cpu_power8)
78 oris r3, r3, LPCR_AIL_3@h 78 oris r3, r3, LPCR_AIL_3@h
79 bl __init_LPCR 79 bl __init_LPCR
80 bl __init_HFSCR 80 bl __init_HFSCR
81 bl __init_TLB 81 bl __init_tlb_power8
82 bl __init_PMU_HV 82 bl __init_PMU_HV
83 mtlr r11 83 mtlr r11
84 blr 84 blr
@@ -134,15 +134,31 @@ __init_HFSCR:
134 mtspr SPRN_HFSCR,r3 134 mtspr SPRN_HFSCR,r3
135 blr 135 blr
136 136
137__init_TLB: 137/*
138 /* 138 * Clear the TLB using the specified IS form of tlbiel instruction
139 * Clear the TLB using the "IS 3" form of tlbiel instruction 139 * (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
140 * (invalidate by congruence class). P7 has 128 CCs, P8 has 512 140 *
141 * so we just always do 512 141 * r3 = IS field
142 */ 142 */
143__init_tlb_power7:
144 li r3,0xc00 /* IS field = 0b11 */
145_GLOBAL(__flush_tlb_power7)
146 li r6,128
147 mtctr r6
148 mr r7,r3 /* IS field */
149 ptesync
1502: tlbiel r7
151 addi r7,r7,0x1000
152 bdnz 2b
153 ptesync
1541: blr
155
156__init_tlb_power8:
157 li r3,0xc00 /* IS field = 0b11 */
158_GLOBAL(__flush_tlb_power8)
143 li r6,512 159 li r6,512
144 mtctr r6 160 mtctr r6
145 li r7,0xc00 /* IS field = 0b11 */ 161 mr r7,r3 /* IS field */
146 ptesync 162 ptesync
1472: tlbiel r7 1632: tlbiel r7
148 addi r7,r7,0x1000 164 addi r7,r7,0x1000
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 597d954e5860..6c8dd5da4de5 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -71,6 +71,10 @@ extern void __restore_cpu_power7(void);
71extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); 71extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
72extern void __restore_cpu_power8(void); 72extern void __restore_cpu_power8(void);
73extern void __restore_cpu_a2(void); 73extern void __restore_cpu_a2(void);
74extern void __flush_tlb_power7(unsigned long inval_selector);
75extern void __flush_tlb_power8(unsigned long inval_selector);
76extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
77extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
74#endif /* CONFIG_PPC64 */ 78#endif /* CONFIG_PPC64 */
75#if defined(CONFIG_E500) 79#if defined(CONFIG_E500)
76extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); 80extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -440,6 +444,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
440 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 444 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
441 .cpu_setup = __setup_cpu_power7, 445 .cpu_setup = __setup_cpu_power7,
442 .cpu_restore = __restore_cpu_power7, 446 .cpu_restore = __restore_cpu_power7,
447 .flush_tlb = __flush_tlb_power7,
448 .machine_check_early = __machine_check_early_realmode_p7,
443 .platform = "power7", 449 .platform = "power7",
444 }, 450 },
445 { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ 451 { /* 2.07-compliant processor, i.e. Power8 "architected" mode */
@@ -456,6 +462,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
456 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 462 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
457 .cpu_setup = __setup_cpu_power8, 463 .cpu_setup = __setup_cpu_power8,
458 .cpu_restore = __restore_cpu_power8, 464 .cpu_restore = __restore_cpu_power8,
465 .flush_tlb = __flush_tlb_power8,
466 .machine_check_early = __machine_check_early_realmode_p8,
459 .platform = "power8", 467 .platform = "power8",
460 }, 468 },
461 { /* Power7 */ 469 { /* Power7 */
@@ -474,6 +482,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
474 .oprofile_type = PPC_OPROFILE_POWER4, 482 .oprofile_type = PPC_OPROFILE_POWER4,
475 .cpu_setup = __setup_cpu_power7, 483 .cpu_setup = __setup_cpu_power7,
476 .cpu_restore = __restore_cpu_power7, 484 .cpu_restore = __restore_cpu_power7,
485 .flush_tlb = __flush_tlb_power7,
486 .machine_check_early = __machine_check_early_realmode_p7,
477 .platform = "power7", 487 .platform = "power7",
478 }, 488 },
479 { /* Power7+ */ 489 { /* Power7+ */
@@ -492,6 +502,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
492 .oprofile_type = PPC_OPROFILE_POWER4, 502 .oprofile_type = PPC_OPROFILE_POWER4,
493 .cpu_setup = __setup_cpu_power7, 503 .cpu_setup = __setup_cpu_power7,
494 .cpu_restore = __restore_cpu_power7, 504 .cpu_restore = __restore_cpu_power7,
505 .flush_tlb = __flush_tlb_power7,
506 .machine_check_early = __machine_check_early_realmode_p7,
495 .platform = "power7+", 507 .platform = "power7+",
496 }, 508 },
497 { /* Power8E */ 509 { /* Power8E */
@@ -510,6 +522,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
510 .oprofile_type = PPC_OPROFILE_INVALID, 522 .oprofile_type = PPC_OPROFILE_INVALID,
511 .cpu_setup = __setup_cpu_power8, 523 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8, 524 .cpu_restore = __restore_cpu_power8,
525 .flush_tlb = __flush_tlb_power8,
526 .machine_check_early = __machine_check_early_realmode_p8,
513 .platform = "power8", 527 .platform = "power8",
514 }, 528 },
515 { /* Power8 */ 529 { /* Power8 */
@@ -528,6 +542,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
528 .oprofile_type = PPC_OPROFILE_INVALID, 542 .oprofile_type = PPC_OPROFILE_INVALID,
529 .cpu_setup = __setup_cpu_power8, 543 .cpu_setup = __setup_cpu_power8,
530 .cpu_restore = __restore_cpu_power8, 544 .cpu_restore = __restore_cpu_power8,
545 .flush_tlb = __flush_tlb_power8,
546 .machine_check_early = __machine_check_early_realmode_p8,
531 .platform = "power8", 547 .platform = "power8",
532 }, 548 },
533 { /* Cell Broadband Engine */ 549 { /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index fdcd8f551aff..18d7c80ddeb9 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -17,7 +17,6 @@
17#include <linux/export.h> 17#include <linux/export.h>
18#include <linux/crash_dump.h> 18#include <linux/crash_dump.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/irq.h> 20#include <linux/irq.h>
22#include <linux/types.h> 21#include <linux/types.h>
23 22
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index e4897523de41..54d0116256f7 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
83 return 0; 83 return 0;
84 } 84 }
85 85
86 if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) { 86 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); 87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", 88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
89 mask, tbl->it_offset << IOMMU_PAGE_SHIFT); 89 mask, tbl->it_offset << tbl->it_page_shift);
90 return 0; 90 return 0;
91 } else 91 } else
92 return 1; 92 return 1;
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 4bd687d5e7aa..148db72a8c43 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -84,7 +84,7 @@
84#define EEH_MAX_FAILS 2100000 84#define EEH_MAX_FAILS 2100000
85 85
86/* Time to wait for a PCI slot to report status, in milliseconds */ 86/* Time to wait for a PCI slot to report status, in milliseconds */
87#define PCI_BUS_RESET_WAIT_MSEC (60*1000) 87#define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
88 88
89/* Platform dependent EEH operations */ 89/* Platform dependent EEH operations */
90struct eeh_ops *eeh_ops = NULL; 90struct eeh_ops *eeh_ops = NULL;
@@ -921,6 +921,13 @@ void eeh_add_device_late(struct pci_dev *dev)
921 eeh_sysfs_remove_device(edev->pdev); 921 eeh_sysfs_remove_device(edev->pdev);
922 edev->mode &= ~EEH_DEV_SYSFS; 922 edev->mode &= ~EEH_DEV_SYSFS;
923 923
924 /*
925 * We definitely should have the PCI device removed
926 * though it wasn't correctly. So we needn't call
927 * into error handler afterwards.
928 */
929 edev->mode |= EEH_DEV_NO_HANDLER;
930
924 edev->pdev = NULL; 931 edev->pdev = NULL;
925 dev->dev.archdata.edev = NULL; 932 dev->dev.archdata.edev = NULL;
926 } 933 }
@@ -1023,6 +1030,14 @@ void eeh_remove_device(struct pci_dev *dev)
1023 else 1030 else
1024 edev->mode |= EEH_DEV_DISCONNECTED; 1031 edev->mode |= EEH_DEV_DISCONNECTED;
1025 1032
1033 /*
1034 * We're removing from the PCI subsystem, that means
1035 * the PCI device driver can't support EEH or not
1036 * well. So we rely on hotplug completely to do recovery
1037 * for the specific PCI device.
1038 */
1039 edev->mode |= EEH_DEV_NO_HANDLER;
1040
1026 eeh_addr_cache_rmv_dev(dev); 1041 eeh_addr_cache_rmv_dev(dev);
1027 eeh_sysfs_remove_device(dev); 1042 eeh_sysfs_remove_device(dev);
1028 edev->mode &= ~EEH_DEV_SYSFS; 1043 edev->mode &= ~EEH_DEV_SYSFS;
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index c17f90d0f73c..7bb30dca4e19 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -217,7 +217,8 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
217 if (!driver) return NULL; 217 if (!driver) return NULL;
218 218
219 if (!driver->err_handler || 219 if (!driver->err_handler ||
220 !driver->err_handler->mmio_enabled) { 220 !driver->err_handler->mmio_enabled ||
221 (edev->mode & EEH_DEV_NO_HANDLER)) {
221 eeh_pcid_put(dev); 222 eeh_pcid_put(dev);
222 return NULL; 223 return NULL;
223 } 224 }
@@ -258,7 +259,8 @@ static void *eeh_report_reset(void *data, void *userdata)
258 eeh_enable_irq(dev); 259 eeh_enable_irq(dev);
259 260
260 if (!driver->err_handler || 261 if (!driver->err_handler ||
261 !driver->err_handler->slot_reset) { 262 !driver->err_handler->slot_reset ||
263 (edev->mode & EEH_DEV_NO_HANDLER)) {
262 eeh_pcid_put(dev); 264 eeh_pcid_put(dev);
263 return NULL; 265 return NULL;
264 } 266 }
@@ -297,7 +299,9 @@ static void *eeh_report_resume(void *data, void *userdata)
297 eeh_enable_irq(dev); 299 eeh_enable_irq(dev);
298 300
299 if (!driver->err_handler || 301 if (!driver->err_handler ||
300 !driver->err_handler->resume) { 302 !driver->err_handler->resume ||
303 (edev->mode & EEH_DEV_NO_HANDLER)) {
304 edev->mode &= ~EEH_DEV_NO_HANDLER;
301 eeh_pcid_put(dev); 305 eeh_pcid_put(dev);
302 return NULL; 306 return NULL;
303 } 307 }
@@ -476,7 +480,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
476/* The longest amount of time to wait for a pci device 480/* The longest amount of time to wait for a pci device
477 * to come back on line, in seconds. 481 * to come back on line, in seconds.
478 */ 482 */
479#define MAX_WAIT_FOR_RECOVERY 150 483#define MAX_WAIT_FOR_RECOVERY 300
480 484
481static void eeh_handle_normal_event(struct eeh_pe *pe) 485static void eeh_handle_normal_event(struct eeh_pe *pe)
482{ 486{
@@ -637,86 +641,92 @@ static void eeh_handle_special_event(void)
637{ 641{
638 struct eeh_pe *pe, *phb_pe; 642 struct eeh_pe *pe, *phb_pe;
639 struct pci_bus *bus; 643 struct pci_bus *bus;
640 struct pci_controller *hose, *tmp; 644 struct pci_controller *hose;
641 unsigned long flags; 645 unsigned long flags;
642 int rc = 0; 646 int rc;
643 647
644 /*
645 * The return value from next_error() has been classified as follows.
646 * It might be good to enumerate them. However, next_error() is only
647 * supported by PowerNV platform for now. So it would be fine to use
648 * integer directly:
649 *
650 * 4 - Dead IOC 3 - Dead PHB
651 * 2 - Fenced PHB 1 - Frozen PE
652 * 0 - No error found
653 *
654 */
655 rc = eeh_ops->next_error(&pe);
656 if (rc <= 0)
657 return;
658 648
659 switch (rc) { 649 do {
660 case 4: 650 rc = eeh_ops->next_error(&pe);
661 /* Mark all PHBs in dead state */ 651
662 eeh_serialize_lock(&flags); 652 switch (rc) {
663 list_for_each_entry_safe(hose, tmp, 653 case EEH_NEXT_ERR_DEAD_IOC:
664 &hose_list, list_node) { 654 /* Mark all PHBs in dead state */
665 phb_pe = eeh_phb_pe_get(hose); 655 eeh_serialize_lock(&flags);
666 if (!phb_pe) continue; 656
667 657 /* Purge all events */
668 eeh_pe_state_mark(phb_pe, 658 eeh_remove_event(NULL);
669 EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); 659
660 list_for_each_entry(hose, &hose_list, list_node) {
661 phb_pe = eeh_phb_pe_get(hose);
662 if (!phb_pe) continue;
663
664 eeh_pe_state_mark(phb_pe,
665 EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
666 }
667
668 eeh_serialize_unlock(flags);
669
670 break;
671 case EEH_NEXT_ERR_FROZEN_PE:
672 case EEH_NEXT_ERR_FENCED_PHB:
673 case EEH_NEXT_ERR_DEAD_PHB:
674 /* Mark the PE in fenced state */
675 eeh_serialize_lock(&flags);
676
677 /* Purge all events of the PHB */
678 eeh_remove_event(pe);
679
680 if (rc == EEH_NEXT_ERR_DEAD_PHB)
681 eeh_pe_state_mark(pe,
682 EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
683 else
684 eeh_pe_state_mark(pe,
685 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
686
687 eeh_serialize_unlock(flags);
688
689 break;
690 case EEH_NEXT_ERR_NONE:
691 return;
692 default:
693 pr_warn("%s: Invalid value %d from next_error()\n",
694 __func__, rc);
695 return;
670 } 696 }
671 eeh_serialize_unlock(flags);
672
673 /* Purge all events */
674 eeh_remove_event(NULL);
675 break;
676 case 3:
677 case 2:
678 case 1:
679 /* Mark the PE in fenced state */
680 eeh_serialize_lock(&flags);
681 if (rc == 3)
682 eeh_pe_state_mark(pe,
683 EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
684 else
685 eeh_pe_state_mark(pe,
686 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
687 eeh_serialize_unlock(flags);
688
689 /* Purge all events of the PHB */
690 eeh_remove_event(pe);
691 break;
692 default:
693 pr_err("%s: Invalid value %d from next_error()\n",
694 __func__, rc);
695 return;
696 }
697 697
698 /* 698 /*
699 * For fenced PHB and frozen PE, it's handled as normal 699 * For fenced PHB and frozen PE, it's handled as normal
700 * event. We have to remove the affected PHBs for dead 700 * event. We have to remove the affected PHBs for dead
701 * PHB and IOC 701 * PHB and IOC
702 */ 702 */
703 if (rc == 2 || rc == 1) 703 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
704 eeh_handle_normal_event(pe); 704 rc == EEH_NEXT_ERR_FENCED_PHB) {
705 else { 705 eeh_handle_normal_event(pe);
706 pci_lock_rescan_remove(); 706 } else {
707 list_for_each_entry_safe(hose, tmp, 707 pci_lock_rescan_remove();
708 &hose_list, list_node) { 708 list_for_each_entry(hose, &hose_list, list_node) {
709 phb_pe = eeh_phb_pe_get(hose); 709 phb_pe = eeh_phb_pe_get(hose);
710 if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD)) 710 if (!phb_pe ||
711 continue; 711 !(phb_pe->state & EEH_PE_PHB_DEAD))
712 712 continue;
713 bus = eeh_pe_bus_get(phb_pe); 713
714 /* Notify all devices that they're about to go down. */ 714 /* Notify all devices to be down */
715 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); 715 bus = eeh_pe_bus_get(phb_pe);
716 pcibios_remove_pci_devices(bus); 716 eeh_pe_dev_traverse(pe,
717 eeh_report_failure, NULL);
718 pcibios_remove_pci_devices(bus);
719 }
720 pci_unlock_rescan_remove();
717 } 721 }
718 pci_unlock_rescan_remove(); 722
719 } 723 /*
724 * If we have detected dead IOC, we needn't proceed
725 * any more since all PHBs would have been removed
726 */
727 if (rc == EEH_NEXT_ERR_DEAD_IOC)
728 break;
729 } while (rc != EEH_NEXT_ERR_NONE);
720} 730}
721 731
722/** 732/**
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index f9450537e335..f0c353fa655a 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -25,7 +25,6 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/export.h> 26#include <linux/export.h>
27#include <linux/gfp.h> 27#include <linux/gfp.h>
28#include <linux/init.h>
29#include <linux/kernel.h> 28#include <linux/kernel.h>
30#include <linux/pci.h> 29#include <linux/pci.h>
31#include <linux/string.h> 30#include <linux/string.h>
@@ -737,6 +736,9 @@ static void *eeh_restore_one_device_bars(void *data, void *flag)
737 else 736 else
738 eeh_restore_device_bars(edev, dn); 737 eeh_restore_device_bars(edev, dn);
739 738
739 if (eeh_ops->restore_config)
740 eeh_ops->restore_config(dn);
741
740 return NULL; 742 return NULL;
741} 743}
742 744
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index bbfb0294b354..662c6dd98072 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -664,8 +664,16 @@ _GLOBAL(ret_from_except_lite)
664 bl .restore_interrupts 664 bl .restore_interrupts
665 SCHEDULE_USER 665 SCHEDULE_USER
666 b .ret_from_except_lite 666 b .ret_from_except_lite
667 6672:
6682: bl .save_nvgprs 668#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
669 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
670 bne 3f /* only restore TM if nothing else to do */
671 addi r3,r1,STACK_FRAME_OVERHEAD
672 bl .restore_tm_state
673 b restore
6743:
675#endif
676 bl .save_nvgprs
669 bl .restore_interrupts 677 bl .restore_interrupts
670 addi r3,r1,STACK_FRAME_OVERHEAD 678 addi r3,r1,STACK_FRAME_OVERHEAD
671 bl .do_notify_resume 679 bl .do_notify_resume
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index e7751561fd1d..063b65dd4f27 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -308,6 +308,7 @@ interrupt_base_book3e: /* fake trap */
308 EXCEPTION_STUB(0x2e0, guest_doorbell_crit) 308 EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
309 EXCEPTION_STUB(0x300, hypercall) 309 EXCEPTION_STUB(0x300, hypercall)
310 EXCEPTION_STUB(0x320, ehpriv) 310 EXCEPTION_STUB(0x320, ehpriv)
311 EXCEPTION_STUB(0x340, lrat_error)
311 312
312 .globl interrupt_end_book3e 313 .globl interrupt_end_book3e
313interrupt_end_book3e: 314interrupt_end_book3e:
@@ -677,6 +678,17 @@ kernel_dbg_exc:
677 bl .unknown_exception 678 bl .unknown_exception
678 b .ret_from_except 679 b .ret_from_except
679 680
681/* LRAT Error interrupt */
682 START_EXCEPTION(lrat_error);
683 NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
684 PROLOG_ADDITION_NONE)
685 EXCEPTION_COMMON(0x340, PACA_EXGEN, INTS_KEEP)
686 addi r3,r1,STACK_FRAME_OVERHEAD
687 bl .save_nvgprs
688 INTS_RESTORE_HARD
689 bl .unknown_exception
690 b .ret_from_except
691
680/* 692/*
681 * An interrupt came in while soft-disabled; We mark paca->irq_happened 693 * An interrupt came in while soft-disabled; We mark paca->irq_happened
682 * accordingly and if the interrupt is level sensitive, we hard disable 694 * accordingly and if the interrupt is level sensitive, we hard disable
@@ -859,6 +871,7 @@ BAD_STACK_TRAMPOLINE(0x2e0)
859BAD_STACK_TRAMPOLINE(0x300) 871BAD_STACK_TRAMPOLINE(0x300)
860BAD_STACK_TRAMPOLINE(0x310) 872BAD_STACK_TRAMPOLINE(0x310)
861BAD_STACK_TRAMPOLINE(0x320) 873BAD_STACK_TRAMPOLINE(0x320)
874BAD_STACK_TRAMPOLINE(0x340)
862BAD_STACK_TRAMPOLINE(0x400) 875BAD_STACK_TRAMPOLINE(0x400)
863BAD_STACK_TRAMPOLINE(0x500) 876BAD_STACK_TRAMPOLINE(0x500)
864BAD_STACK_TRAMPOLINE(0x600) 877BAD_STACK_TRAMPOLINE(0x600)
@@ -1055,12 +1068,9 @@ skpinv: addi r6,r6,1 /* Increment */
1055 mtspr SPRN_MAS0,r3 1068 mtspr SPRN_MAS0,r3
1056 tlbre 1069 tlbre
1057 mfspr r6,SPRN_MAS1 1070 mfspr r6,SPRN_MAS1
1058 rlwinm r6,r6,0,2,0 /* clear IPROT */ 1071 rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */
1059 mtspr SPRN_MAS1,r6 1072 mtspr SPRN_MAS1,r6
1060 tlbwe 1073 tlbwe
1061
1062 /* Invalidate TLB1 */
1063 PPC_TLBILX_ALL(0,R0)
1064 sync 1074 sync
1065 isync 1075 isync
1066 1076
@@ -1114,12 +1124,9 @@ skpinv: addi r6,r6,1 /* Increment */
1114 mtspr SPRN_MAS0,r4 1124 mtspr SPRN_MAS0,r4
1115 tlbre 1125 tlbre
1116 mfspr r5,SPRN_MAS1 1126 mfspr r5,SPRN_MAS1
1117 rlwinm r5,r5,0,2,0 /* clear IPROT */ 1127 rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */
1118 mtspr SPRN_MAS1,r5 1128 mtspr SPRN_MAS1,r5
1119 tlbwe 1129 tlbwe
1120
1121 /* Invalidate TLB1 */
1122 PPC_TLBILX_ALL(0,R0)
1123 sync 1130 sync
1124 isync 1131 isync
1125 1132
@@ -1414,3 +1421,7 @@ _GLOBAL(setup_ehv_ivors)
1414 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ 1421 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1415 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ 1422 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1416 blr 1423 blr
1424
1425_GLOBAL(setup_lrat_ivor)
1426 SET_IVOR(42, 0x340) /* LRAT Error */
1427 blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f905e40922e..38d507306a11 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -155,8 +155,30 @@ machine_check_pSeries_1:
155 */ 155 */
156 HMT_MEDIUM_PPR_DISCARD 156 HMT_MEDIUM_PPR_DISCARD
157 SET_SCRATCH0(r13) /* save r13 */ 157 SET_SCRATCH0(r13) /* save r13 */
158#ifdef CONFIG_PPC_P7_NAP
159BEGIN_FTR_SECTION
160 /* Running native on arch 2.06 or later, check if we are
161 * waking up from nap. We only handle no state loss and
162 * supervisor state loss. We do -not- handle hypervisor
163 * state loss at this time.
164 */
165 mfspr r13,SPRN_SRR1
166 rlwinm. r13,r13,47-31,30,31
167 beq 9f
168
169 /* waking up from powersave (nap) state */
170 cmpwi cr1,r13,2
171 /* Total loss of HV state is fatal. let's just stay stuck here */
172 bgt cr1,.
1739:
174END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
175#endif /* CONFIG_PPC_P7_NAP */
158 EXCEPTION_PROLOG_0(PACA_EXMC) 176 EXCEPTION_PROLOG_0(PACA_EXMC)
177BEGIN_FTR_SECTION
178 b machine_check_pSeries_early
179FTR_SECTION_ELSE
159 b machine_check_pSeries_0 180 b machine_check_pSeries_0
181ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
160 182
161 . = 0x300 183 . = 0x300
162 .globl data_access_pSeries 184 .globl data_access_pSeries
@@ -405,6 +427,64 @@ denorm_exception_hv:
405 427
406 .align 7 428 .align 7
407 /* moved from 0x200 */ 429 /* moved from 0x200 */
430machine_check_pSeries_early:
431BEGIN_FTR_SECTION
432 EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
433 /*
434 * Register contents:
435 * R13 = PACA
436 * R9 = CR
437 * Original R9 to R13 is saved on PACA_EXMC
438 *
439 * Switch to mc_emergency stack and handle re-entrancy (though we
440 * currently don't test for overflow). Save MCE registers srr1,
441 * srr0, dar and dsisr and then set ME=1
442 *
443 * We use paca->in_mce to check whether this is the first entry or
444 * nested machine check. We increment paca->in_mce to track nested
445 * machine checks.
446 *
447 * If this is the first entry then set stack pointer to
448 * paca->mc_emergency_sp, otherwise r1 is already pointing to
449 * stack frame on mc_emergency stack.
450 *
451 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
452 * checkstop if we get another machine check exception before we do
453 * rfid with MSR_ME=1.
454 */
455 mr r11,r1 /* Save r1 */
456 lhz r10,PACA_IN_MCE(r13)
457 cmpwi r10,0 /* Are we in nested machine check */
458 bne 0f /* Yes, we are. */
459 /* First machine check entry */
460 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
4610: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
462 addi r10,r10,1 /* increment paca->in_mce */
463 sth r10,PACA_IN_MCE(r13)
464 std r11,GPR1(r1) /* Save r1 on the stack. */
465 std r11,0(r1) /* make stack chain pointer */
466 mfspr r11,SPRN_SRR0 /* Save SRR0 */
467 std r11,_NIP(r1)
468 mfspr r11,SPRN_SRR1 /* Save SRR1 */
469 std r11,_MSR(r1)
470 mfspr r11,SPRN_DAR /* Save DAR */
471 std r11,_DAR(r1)
472 mfspr r11,SPRN_DSISR /* Save DSISR */
473 std r11,_DSISR(r1)
474 std r9,_CCR(r1) /* Save CR in stackframe */
475 /* Save r9 through r13 from EXMC save area to stack frame. */
476 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
477 mfmsr r11 /* get MSR value */
478 ori r11,r11,MSR_ME /* turn on ME bit */
479 ori r11,r11,MSR_RI /* turn on RI bit */
480 ld r12,PACAKBASE(r13) /* get high part of &label */
481 LOAD_HANDLER(r12, machine_check_handle_early)
482 mtspr SPRN_SRR0,r12
483 mtspr SPRN_SRR1,r11
484 rfid
485 b . /* prevent speculative execution */
486END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
487
408machine_check_pSeries: 488machine_check_pSeries:
409 .globl machine_check_fwnmi 489 .globl machine_check_fwnmi
410machine_check_fwnmi: 490machine_check_fwnmi:
@@ -688,30 +768,6 @@ kvmppc_skip_Hinterrupt:
688 768
689 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 769 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
690 770
691 /*
692 * Machine check is different because we use a different
693 * save area: PACA_EXMC instead of PACA_EXGEN.
694 */
695 .align 7
696 .globl machine_check_common
697machine_check_common:
698
699 mfspr r10,SPRN_DAR
700 std r10,PACA_EXGEN+EX_DAR(r13)
701 mfspr r10,SPRN_DSISR
702 stw r10,PACA_EXGEN+EX_DSISR(r13)
703 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
704 FINISH_NAP
705 DISABLE_INTS
706 ld r3,PACA_EXGEN+EX_DAR(r13)
707 lwz r4,PACA_EXGEN+EX_DSISR(r13)
708 std r3,_DAR(r1)
709 std r4,_DSISR(r1)
710 bl .save_nvgprs
711 addi r3,r1,STACK_FRAME_OVERHEAD
712 bl .machine_check_exception
713 b .ret_from_except
714
715 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 771 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
716 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 772 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
717 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) 773 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
@@ -1080,6 +1136,30 @@ unrecov_user_slb:
1080#endif /* __DISABLED__ */ 1136#endif /* __DISABLED__ */
1081 1137
1082 1138
1139 /*
1140 * Machine check is different because we use a different
1141 * save area: PACA_EXMC instead of PACA_EXGEN.
1142 */
1143 .align 7
1144 .globl machine_check_common
1145machine_check_common:
1146
1147 mfspr r10,SPRN_DAR
1148 std r10,PACA_EXGEN+EX_DAR(r13)
1149 mfspr r10,SPRN_DSISR
1150 stw r10,PACA_EXGEN+EX_DSISR(r13)
1151 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1152 FINISH_NAP
1153 DISABLE_INTS
1154 ld r3,PACA_EXGEN+EX_DAR(r13)
1155 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1156 std r3,_DAR(r1)
1157 std r4,_DSISR(r1)
1158 bl .save_nvgprs
1159 addi r3,r1,STACK_FRAME_OVERHEAD
1160 bl .machine_check_exception
1161 b .ret_from_except
1162
1083 .align 7 1163 .align 7
1084 .globl alignment_common 1164 .globl alignment_common
1085alignment_common: 1165alignment_common:
@@ -1263,6 +1343,120 @@ _GLOBAL(opal_mc_secondary_handler)
1263#endif /* CONFIG_PPC_POWERNV */ 1343#endif /* CONFIG_PPC_POWERNV */
1264 1344
1265 1345
1346#define MACHINE_CHECK_HANDLER_WINDUP \
1347 /* Clear MSR_RI before setting SRR0 and SRR1. */\
1348 li r0,MSR_RI; \
1349 mfmsr r9; /* get MSR value */ \
1350 andc r9,r9,r0; \
1351 mtmsrd r9,1; /* Clear MSR_RI */ \
1352 /* Move original SRR0 and SRR1 into the respective regs */ \
1353 ld r9,_MSR(r1); \
1354 mtspr SPRN_SRR1,r9; \
1355 ld r3,_NIP(r1); \
1356 mtspr SPRN_SRR0,r3; \
1357 ld r9,_CTR(r1); \
1358 mtctr r9; \
1359 ld r9,_XER(r1); \
1360 mtxer r9; \
1361 ld r9,_LINK(r1); \
1362 mtlr r9; \
1363 REST_GPR(0, r1); \
1364 REST_8GPRS(2, r1); \
1365 REST_GPR(10, r1); \
1366 ld r11,_CCR(r1); \
1367 mtcr r11; \
1368 /* Decrement paca->in_mce. */ \
1369 lhz r12,PACA_IN_MCE(r13); \
1370 subi r12,r12,1; \
1371 sth r12,PACA_IN_MCE(r13); \
1372 REST_GPR(11, r1); \
1373 REST_2GPRS(12, r1); \
1374 /* restore original r1. */ \
1375 ld r1,GPR1(r1)
1376
1377 /*
1378 * Handle machine check early in real mode. We come here with
1379 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1380 */
1381 .align 7
1382 .globl machine_check_handle_early
1383machine_check_handle_early:
1384 std r0,GPR0(r1) /* Save r0 */
1385 EXCEPTION_PROLOG_COMMON_3(0x200)
1386 bl .save_nvgprs
1387 addi r3,r1,STACK_FRAME_OVERHEAD
1388 bl .machine_check_early
1389 ld r12,_MSR(r1)
1390#ifdef CONFIG_PPC_P7_NAP
1391 /*
1392 * Check if thread was in power saving mode. We come here when any
1393 * of the following is true:
1394 * a. thread wasn't in power saving mode
1395 * b. thread was in power saving mode with no state loss or
1396 * supervisor state loss
1397 *
1398 * Go back to nap again if (b) is true.
1399 */
1400 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
1401 beq 4f /* No, it wasn;t */
1402 /* Thread was in power saving mode. Go back to nap again. */
1403 cmpwi r11,2
1404 bne 3f
1405 /* Supervisor state loss */
1406 li r0,1
1407 stb r0,PACA_NAPSTATELOST(r13)
14083: bl .machine_check_queue_event
1409 MACHINE_CHECK_HANDLER_WINDUP
1410 GET_PACA(r13)
1411 ld r1,PACAR1(r13)
1412 b .power7_enter_nap_mode
14134:
1414#endif
1415 /*
1416 * Check if we are coming from hypervisor userspace. If yes then we
1417 * continue in host kernel in V mode to deliver the MC event.
1418 */
1419 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
1420 beq 5f
1421 andi. r11,r12,MSR_PR /* See if coming from user. */
1422 bne 9f /* continue in V mode if we are. */
1423
14245:
1425#ifdef CONFIG_KVM_BOOK3S_64_HV
1426 /*
1427 * We are coming from kernel context. Check if we are coming from
1428 * guest. if yes, then we can continue. We will fall through
1429 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1430 */
1431 lbz r11,HSTATE_IN_GUEST(r13)
1432 cmpwi r11,0 /* Check if coming from guest */
1433 bne 9f /* continue if we are. */
1434#endif
1435 /*
1436 * At this point we are not sure about what context we come from.
1437 * Queue up the MCE event and return from the interrupt.
1438 * But before that, check if this is an un-recoverable exception.
1439 * If yes, then stay on emergency stack and panic.
1440 */
1441 andi. r11,r12,MSR_RI
1442 bne 2f
14431: addi r3,r1,STACK_FRAME_OVERHEAD
1444 bl .unrecoverable_exception
1445 b 1b
14462:
1447 /*
1448 * Return from MC interrupt.
1449 * Queue up the MCE event so that we can log it later, while
1450 * returning from kernel or opal call.
1451 */
1452 bl .machine_check_queue_event
1453 MACHINE_CHECK_HANDLER_WINDUP
1454 rfid
14559:
1456 /* Deliver the machine check to host kernel in V mode. */
1457 MACHINE_CHECK_HANDLER_WINDUP
1458 b machine_check_pSeries
1459
1266/* 1460/*
1267 * r13 points to the PACA, r9 contains the saved CR, 1461 * r13 points to the PACA, r9 contains the saved CR,
1268 * r12 contain the saved SRR1, SRR0 is still ready for return 1462 * r12 contain the saved SRR1, SRR0 is still ready for return
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index f7f5b8bed68f..9ad236e5d2c9 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -81,6 +81,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
81#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 81#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
82 82
83/* 83/*
84 * Enable use of the FPU, and VSX if possible, for the caller.
85 */
86_GLOBAL(fp_enable)
87 mfmsr r3
88 ori r3,r3,MSR_FP
89#ifdef CONFIG_VSX
90BEGIN_FTR_SECTION
91 oris r3,r3,MSR_VSX@h
92END_FTR_SECTION_IFSET(CPU_FTR_VSX)
93#endif
94 SYNC
95 MTMSRD(r3)
96 isync /* (not necessary for arch 2.02 and later) */
97 blr
98
99/*
84 * Load state from memory into FP registers including FPSCR. 100 * Load state from memory into FP registers including FPSCR.
85 * Assumes the caller has enabled FP in the MSR. 101 * Assumes the caller has enabled FP in the MSR.
86 */ 102 */
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index a92c79be2728..f22e7e44fbf3 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -176,6 +176,8 @@ skpinv: addi r6,r6,1 /* Increment */
176/* 7. Jump to KERNELBASE mapping */ 176/* 7. Jump to KERNELBASE mapping */
177 lis r6,(KERNELBASE & ~0xfff)@h 177 lis r6,(KERNELBASE & ~0xfff)@h
178 ori r6,r6,(KERNELBASE & ~0xfff)@l 178 ori r6,r6,(KERNELBASE & ~0xfff)@l
179 rlwinm r7,r25,0,0x03ffffff
180 add r6,r7,r6
179 181
180#elif defined(ENTRY_MAPPING_KEXEC_SETUP) 182#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
181/* 183/*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 4f0946de2d5c..b7363bd42452 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/threads.h> 25#include <linux/threads.h>
26#include <linux/init.h>
26#include <asm/reg.h> 27#include <asm/reg.h>
27#include <asm/page.h> 28#include <asm/page.h>
28#include <asm/mmu.h> 29#include <asm/mmu.h>
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index f45726a1d963..b497188a94a1 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -65,29 +65,78 @@ _ENTRY(_start);
65 nop 65 nop
66 66
67 /* Translate device tree address to physical, save in r30/r31 */ 67 /* Translate device tree address to physical, save in r30/r31 */
68 mfmsr r16 68 bl get_phys_addr
69 mfspr r17,SPRN_PID 69 mr r30,r3
70 rlwinm r17,r17,16,0x3fff0000 /* turn PID into MAS6[SPID] */ 70 mr r31,r4
71 rlwimi r17,r16,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
72 mtspr SPRN_MAS6,r17
73
74 tlbsx 0,r3 /* must succeed */
75
76 mfspr r16,SPRN_MAS1
77 mfspr r20,SPRN_MAS3
78 rlwinm r17,r16,25,0x1f /* r17 = log2(page size) */
79 li r18,1024
80 slw r18,r18,r17 /* r18 = page size */
81 addi r18,r18,-1
82 and r19,r3,r18 /* r19 = page offset */
83 andc r31,r20,r18 /* r31 = page base */
84 or r31,r31,r19 /* r31 = devtree phys addr */
85 mfspr r30,SPRN_MAS7
86 71
87 li r25,0 /* phys kernel start (low) */ 72 li r25,0 /* phys kernel start (low) */
88 li r24,0 /* CPU number */ 73 li r24,0 /* CPU number */
89 li r23,0 /* phys kernel start (high) */ 74 li r23,0 /* phys kernel start (high) */
90 75
76#ifdef CONFIG_RELOCATABLE
77 LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */
78
79 /* Translate _stext address to physical, save in r23/r25 */
80 bl get_phys_addr
81 mr r23,r3
82 mr r25,r4
83
84 bl 0f
850: mflr r8
86 addis r3,r8,(is_second_reloc - 0b)@ha
87 lwz r19,(is_second_reloc - 0b)@l(r3)
88
89 /* Check if this is the second relocation. */
90 cmpwi r19,1
91 bne 1f
92
93 /*
94 * For the second relocation, we already get the real memstart_addr
95 * from device tree. So we will map PAGE_OFFSET to memstart_addr,
96 * then the virtual address of start kernel should be:
97 * PAGE_OFFSET + (kernstart_addr - memstart_addr)
98 * Since the offset between kernstart_addr and memstart_addr should
99 * never be beyond 1G, so we can just use the lower 32bit of them
100 * for the calculation.
101 */
102 lis r3,PAGE_OFFSET@h
103
104 addis r4,r8,(kernstart_addr - 0b)@ha
105 addi r4,r4,(kernstart_addr - 0b)@l
106 lwz r5,4(r4)
107
108 addis r6,r8,(memstart_addr - 0b)@ha
109 addi r6,r6,(memstart_addr - 0b)@l
110 lwz r7,4(r6)
111
112 subf r5,r7,r5
113 add r3,r3,r5
114 b 2f
115
1161:
117 /*
118 * We have the runtime (virutal) address of our base.
119 * We calculate our shift of offset from a 64M page.
120 * We could map the 64M page we belong to at PAGE_OFFSET and
121 * get going from there.
122 */
123 lis r4,KERNELBASE@h
124 ori r4,r4,KERNELBASE@l
125 rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */
126 rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */
127 subf r3,r5,r6 /* r3 = r6 - r5 */
128 add r3,r4,r3 /* Required Virtual Address */
129
1302: bl relocate
131
132 /*
133 * For the second relocation, we already set the right tlb entries
134 * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
135 */
136 cmpwi r19,1
137 beq set_ivor
138#endif
139
91/* We try to not make any assumptions about how the boot loader 140/* We try to not make any assumptions about how the boot loader
92 * setup or used the TLBs. We invalidate all mappings from the 141 * setup or used the TLBs. We invalidate all mappings from the
93 * boot loader and load a single entry in TLB1[0] to map the 142 * boot loader and load a single entry in TLB1[0] to map the
@@ -113,6 +162,7 @@ _ENTRY(__early_start)
113#include "fsl_booke_entry_mapping.S" 162#include "fsl_booke_entry_mapping.S"
114#undef ENTRY_MAPPING_BOOT_SETUP 163#undef ENTRY_MAPPING_BOOT_SETUP
115 164
165set_ivor:
116 /* Establish the interrupt vector offsets */ 166 /* Establish the interrupt vector offsets */
117 SET_IVOR(0, CriticalInput); 167 SET_IVOR(0, CriticalInput);
118 SET_IVOR(1, MachineCheck); 168 SET_IVOR(1, MachineCheck);
@@ -166,8 +216,7 @@ _ENTRY(__early_start)
166 /* Check to see if we're the second processor, and jump 216 /* Check to see if we're the second processor, and jump
167 * to the secondary_start code if so 217 * to the secondary_start code if so
168 */ 218 */
169 lis r24, boot_cpuid@h 219 LOAD_REG_ADDR_PIC(r24, boot_cpuid)
170 ori r24, r24, boot_cpuid@l
171 lwz r24, 0(r24) 220 lwz r24, 0(r24)
172 cmpwi r24, -1 221 cmpwi r24, -1
173 mfspr r24,SPRN_PIR 222 mfspr r24,SPRN_PIR
@@ -197,6 +246,18 @@ _ENTRY(__early_start)
197 246
198 bl early_init 247 bl early_init
199 248
249#ifdef CONFIG_RELOCATABLE
250 mr r3,r30
251 mr r4,r31
252#ifdef CONFIG_PHYS_64BIT
253 mr r5,r23
254 mr r6,r25
255#else
256 mr r5,r25
257#endif
258 bl relocate_init
259#endif
260
200#ifdef CONFIG_DYNAMIC_MEMSTART 261#ifdef CONFIG_DYNAMIC_MEMSTART
201 lis r3,kernstart_addr@ha 262 lis r3,kernstart_addr@ha
202 la r3,kernstart_addr@l(r3) 263 la r3,kernstart_addr@l(r3)
@@ -856,6 +917,33 @@ KernelSPE:
856#endif /* CONFIG_SPE */ 917#endif /* CONFIG_SPE */
857 918
858/* 919/*
920 * Translate the effec addr in r3 to phys addr. The phys addr will be put
921 * into r3(higher 32bit) and r4(lower 32bit)
922 */
923get_phys_addr:
924 mfmsr r8
925 mfspr r9,SPRN_PID
926 rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
927 rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
928 mtspr SPRN_MAS6,r9
929
930 tlbsx 0,r3 /* must succeed */
931
932 mfspr r8,SPRN_MAS1
933 mfspr r12,SPRN_MAS3
934 rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */
935 li r10,1024
936 slw r10,r10,r9 /* r10 = page size */
937 addi r10,r10,-1
938 and r11,r3,r10 /* r11 = page offset */
939 andc r4,r12,r10 /* r4 = page base */
940 or r4,r4,r11 /* r4 = devtree phys addr */
941#ifdef CONFIG_PHYS_64BIT
942 mfspr r3,SPRN_MAS7
943#endif
944 blr
945
946/*
859 * Global functions 947 * Global functions
860 */ 948 */
861 949
@@ -1057,24 +1145,36 @@ _GLOBAL(__flush_disable_L1)
1057/* When we get here, r24 needs to hold the CPU # */ 1145/* When we get here, r24 needs to hold the CPU # */
1058 .globl __secondary_start 1146 .globl __secondary_start
1059__secondary_start: 1147__secondary_start:
1060 lis r3,__secondary_hold_acknowledge@h 1148 LOAD_REG_ADDR_PIC(r3, tlbcam_index)
1061 ori r3,r3,__secondary_hold_acknowledge@l 1149 lwz r3,0(r3)
1062 stw r24,0(r3)
1063
1064 li r3,0
1065 mr r4,r24 /* Why? */
1066 bl call_setup_cpu
1067
1068 lis r3,tlbcam_index@ha
1069 lwz r3,tlbcam_index@l(r3)
1070 mtctr r3 1150 mtctr r3
1071 li r26,0 /* r26 safe? */ 1151 li r26,0 /* r26 safe? */
1072 1152
1153 bl switch_to_as1
1154 mr r27,r3 /* tlb entry */
1073 /* Load each CAM entry */ 1155 /* Load each CAM entry */
10741: mr r3,r26 11561: mr r3,r26
1075 bl loadcam_entry 1157 bl loadcam_entry
1076 addi r26,r26,1 1158 addi r26,r26,1
1077 bdnz 1b 1159 bdnz 1b
1160 mr r3,r27 /* tlb entry */
1161 LOAD_REG_ADDR_PIC(r4, memstart_addr)
1162 lwz r4,0(r4)
1163 mr r5,r25 /* phys kernel start */
1164 rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
1165 subf r4,r5,r4 /* memstart_addr - phys kernel start */
1166 li r5,0 /* no device tree */
1167 li r6,0 /* not boot cpu */
1168 bl restore_to_as0
1169
1170
1171 lis r3,__secondary_hold_acknowledge@h
1172 ori r3,r3,__secondary_hold_acknowledge@l
1173 stw r24,0(r3)
1174
1175 li r3,0
1176 mr r4,r24 /* Why? */
1177 bl call_setup_cpu
1078 1178
1079 /* get current_thread_info and current */ 1179 /* get current_thread_info and current */
1080 lis r1,secondary_ti@ha 1180 lis r1,secondary_ti@ha
@@ -1111,6 +1211,112 @@ __secondary_hold_acknowledge:
1111#endif 1211#endif
1112 1212
1113/* 1213/*
1214 * Create a tlb entry with the same effective and physical address as
1215 * the tlb entry used by the current running code. But set the TS to 1.
1216 * Then switch to the address space 1. It will return with the r3 set to
1217 * the ESEL of the new created tlb.
1218 */
1219_GLOBAL(switch_to_as1)
1220 mflr r5
1221
1222 /* Find a entry not used */
1223 mfspr r3,SPRN_TLB1CFG
1224 andi. r3,r3,0xfff
1225 mfspr r4,SPRN_PID
1226 rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */
1227 mtspr SPRN_MAS6,r4
12281: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */
1229 addi r3,r3,-1
1230 rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
1231 mtspr SPRN_MAS0,r4
1232 tlbre
1233 mfspr r4,SPRN_MAS1
1234 andis. r4,r4,MAS1_VALID@h
1235 bne 1b
1236
1237 /* Get the tlb entry used by the current running code */
1238 bl 0f
12390: mflr r4
1240 tlbsx 0,r4
1241
1242 mfspr r4,SPRN_MAS1
1243 ori r4,r4,MAS1_TS /* Set the TS = 1 */
1244 mtspr SPRN_MAS1,r4
1245
1246 mfspr r4,SPRN_MAS0
1247 rlwinm r4,r4,0,~MAS0_ESEL_MASK
1248 rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
1249 mtspr SPRN_MAS0,r4
1250 tlbwe
1251 isync
1252 sync
1253
1254 mfmsr r4
1255 ori r4,r4,MSR_IS | MSR_DS
1256 mtspr SPRN_SRR0,r5
1257 mtspr SPRN_SRR1,r4
1258 sync
1259 rfi
1260
1261/*
1262 * Restore to the address space 0 and also invalidate the tlb entry created
1263 * by switch_to_as1.
1264 * r3 - the tlb entry which should be invalidated
1265 * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
1266 * r5 - device tree virtual address. If r4 is 0, r5 is ignored.
1267 * r6 - boot cpu
1268*/
1269_GLOBAL(restore_to_as0)
1270 mflr r0
1271
1272 bl 0f
12730: mflr r9
1274 addi r9,r9,1f - 0b
1275
1276 /*
1277 * We may map the PAGE_OFFSET in AS0 to a different physical address,
1278 * so we need calculate the right jump and device tree address based
1279 * on the offset passed by r4.
1280 */
1281 add r9,r9,r4
1282 add r5,r5,r4
1283 add r0,r0,r4
1284
12852: mfmsr r7
1286 li r8,(MSR_IS | MSR_DS)
1287 andc r7,r7,r8
1288
1289 mtspr SPRN_SRR0,r9
1290 mtspr SPRN_SRR1,r7
1291 sync
1292 rfi
1293
1294 /* Invalidate the temporary tlb entry for AS1 */
12951: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */
1296 rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
1297 mtspr SPRN_MAS0,r9
1298 tlbre
1299 mfspr r9,SPRN_MAS1
1300 rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */
1301 mtspr SPRN_MAS1,r9
1302 tlbwe
1303 isync
1304
1305 cmpwi r4,0
1306 cmpwi cr1,r6,0
1307 cror eq,4*cr1+eq,eq
1308 bne 3f /* offset != 0 && is_boot_cpu */
1309 mtlr r0
1310 blr
1311
1312 /*
1313 * The PAGE_OFFSET will map to a different physical address,
1314 * jump to _start to do another relocation again.
1315 */
13163: mr r3,r5
1317 bl _start
1318
1319/*
1114 * We put a few things here that have to be page-aligned. This stuff 1320 * We put a few things here that have to be page-aligned. This stuff
1115 * goes at the beginning of the data segment, which is page-aligned. 1321 * goes at the beginning of the data segment, which is page-aligned.
1116 */ 1322 */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f0b47d1a6b0e..b0a1792279bb 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -28,7 +28,6 @@
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/init.h>
32#include <linux/smp.h> 31#include <linux/smp.h>
33 32
34#include <asm/hw_breakpoint.h> 33#include <asm/hw_breakpoint.h>
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 847e40e62fce..3fdef0f0c67f 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -84,6 +84,7 @@ _GLOBAL(power7_nap)
84 std r9,_MSR(r1) 84 std r9,_MSR(r1)
85 std r1,PACAR1(r13) 85 std r1,PACAR1(r13)
86 86
87_GLOBAL(power7_enter_nap_mode)
87#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 88#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
88 /* Tell KVM we're napping */ 89 /* Tell KVM we're napping */
89 li r4,KVM_HWTHREAD_IN_NAP 90 li r4,KVM_HWTHREAD_IN_NAP
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 97a3715ac8bd..b82227e7e21b 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -3,7 +3,6 @@
3 * 3 *
4 * (C) Copyright 2004 Linus Torvalds 4 * (C) Copyright 2004 Linus Torvalds
5 */ 5 */
6#include <linux/init.h>
7#include <linux/pci.h> 6#include <linux/pci.h>
8#include <linux/mm.h> 7#include <linux/mm.h>
9#include <linux/export.h> 8#include <linux/export.h>
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 572bb5b95f35..d773dd440a45 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -251,14 +251,13 @@ again:
251 251
252 if (dev) 252 if (dev)
253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
254 1 << IOMMU_PAGE_SHIFT); 254 1 << tbl->it_page_shift);
255 else 255 else
256 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); 256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ 257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
258 258
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, 259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
260 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, 260 boundary_size >> tbl->it_page_shift, align_mask);
261 align_mask);
262 if (n == -1) { 261 if (n == -1) {
263 if (likely(pass == 0)) { 262 if (likely(pass == 0)) {
264 /* First try the pool from the start */ 263 /* First try the pool from the start */
@@ -320,12 +319,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
320 return DMA_ERROR_CODE; 319 return DMA_ERROR_CODE;
321 320
322 entry += tbl->it_offset; /* Offset into real TCE table */ 321 entry += tbl->it_offset; /* Offset into real TCE table */
323 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 322 ret = entry << tbl->it_page_shift; /* Set the return dma address */
324 323
325 /* Put the TCEs in the HW table */ 324 /* Put the TCEs in the HW table */
326 build_fail = ppc_md.tce_build(tbl, entry, npages, 325 build_fail = ppc_md.tce_build(tbl, entry, npages,
327 (unsigned long)page & IOMMU_PAGE_MASK, 326 (unsigned long)page &
328 direction, attrs); 327 IOMMU_PAGE_MASK(tbl), direction, attrs);
329 328
330 /* ppc_md.tce_build() only returns non-zero for transient errors. 329 /* ppc_md.tce_build() only returns non-zero for transient errors.
331 * Clean up the table bitmap in this case and return 330 * Clean up the table bitmap in this case and return
@@ -352,7 +351,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
352{ 351{
353 unsigned long entry, free_entry; 352 unsigned long entry, free_entry;
354 353
355 entry = dma_addr >> IOMMU_PAGE_SHIFT; 354 entry = dma_addr >> tbl->it_page_shift;
356 free_entry = entry - tbl->it_offset; 355 free_entry = entry - tbl->it_offset;
357 356
358 if (((free_entry + npages) > tbl->it_size) || 357 if (((free_entry + npages) > tbl->it_size) ||
@@ -401,7 +400,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
401 unsigned long flags; 400 unsigned long flags;
402 struct iommu_pool *pool; 401 struct iommu_pool *pool;
403 402
404 entry = dma_addr >> IOMMU_PAGE_SHIFT; 403 entry = dma_addr >> tbl->it_page_shift;
405 free_entry = entry - tbl->it_offset; 404 free_entry = entry - tbl->it_offset;
406 405
407 pool = get_pool(tbl, free_entry); 406 pool = get_pool(tbl, free_entry);
@@ -468,13 +467,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
468 } 467 }
469 /* Allocate iommu entries for that segment */ 468 /* Allocate iommu entries for that segment */
470 vaddr = (unsigned long) sg_virt(s); 469 vaddr = (unsigned long) sg_virt(s);
471 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); 470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
472 align = 0; 471 align = 0;
473 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && 472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
474 (vaddr & ~PAGE_MASK) == 0) 473 (vaddr & ~PAGE_MASK) == 0)
475 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 474 align = PAGE_SHIFT - tbl->it_page_shift;
476 entry = iommu_range_alloc(dev, tbl, npages, &handle, 475 entry = iommu_range_alloc(dev, tbl, npages, &handle,
477 mask >> IOMMU_PAGE_SHIFT, align); 476 mask >> tbl->it_page_shift, align);
478 477
479 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 478 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
480 479
@@ -489,16 +488,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
489 488
490 /* Convert entry to a dma_addr_t */ 489 /* Convert entry to a dma_addr_t */
491 entry += tbl->it_offset; 490 entry += tbl->it_offset;
492 dma_addr = entry << IOMMU_PAGE_SHIFT; 491 dma_addr = entry << tbl->it_page_shift;
493 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); 492 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
494 493
495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 494 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
496 npages, entry, dma_addr); 495 npages, entry, dma_addr);
497 496
498 /* Insert into HW table */ 497 /* Insert into HW table */
499 build_fail = ppc_md.tce_build(tbl, entry, npages, 498 build_fail = ppc_md.tce_build(tbl, entry, npages,
500 vaddr & IOMMU_PAGE_MASK, 499 vaddr & IOMMU_PAGE_MASK(tbl),
501 direction, attrs); 500 direction, attrs);
502 if(unlikely(build_fail)) 501 if(unlikely(build_fail))
503 goto failure; 502 goto failure;
504 503
@@ -559,9 +558,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
559 if (s->dma_length != 0) { 558 if (s->dma_length != 0) {
560 unsigned long vaddr, npages; 559 unsigned long vaddr, npages;
561 560
562 vaddr = s->dma_address & IOMMU_PAGE_MASK; 561 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
563 npages = iommu_num_pages(s->dma_address, s->dma_length, 562 npages = iommu_num_pages(s->dma_address, s->dma_length,
564 IOMMU_PAGE_SIZE); 563 IOMMU_PAGE_SIZE(tbl));
565 __iommu_free(tbl, vaddr, npages); 564 __iommu_free(tbl, vaddr, npages);
566 s->dma_address = DMA_ERROR_CODE; 565 s->dma_address = DMA_ERROR_CODE;
567 s->dma_length = 0; 566 s->dma_length = 0;
@@ -592,7 +591,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
592 if (sg->dma_length == 0) 591 if (sg->dma_length == 0)
593 break; 592 break;
594 npages = iommu_num_pages(dma_handle, sg->dma_length, 593 npages = iommu_num_pages(dma_handle, sg->dma_length,
595 IOMMU_PAGE_SIZE); 594 IOMMU_PAGE_SIZE(tbl));
596 __iommu_free(tbl, dma_handle, npages); 595 __iommu_free(tbl, dma_handle, npages);
597 sg = sg_next(sg); 596 sg = sg_next(sg);
598 } 597 }
@@ -676,7 +675,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
676 set_bit(0, tbl->it_map); 675 set_bit(0, tbl->it_map);
677 676
678 /* We only split the IOMMU table if we have 1GB or more of space */ 677 /* We only split the IOMMU table if we have 1GB or more of space */
679 if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024)) 678 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
680 tbl->nr_pools = IOMMU_NR_POOLS; 679 tbl->nr_pools = IOMMU_NR_POOLS;
681 else 680 else
682 tbl->nr_pools = 1; 681 tbl->nr_pools = 1;
@@ -768,16 +767,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
768 767
769 vaddr = page_address(page) + offset; 768 vaddr = page_address(page) + offset;
770 uaddr = (unsigned long)vaddr; 769 uaddr = (unsigned long)vaddr;
771 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); 770 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
772 771
773 if (tbl) { 772 if (tbl) {
774 align = 0; 773 align = 0;
775 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && 774 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
776 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 775 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
777 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 776 align = PAGE_SHIFT - tbl->it_page_shift;
778 777
779 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 778 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
780 mask >> IOMMU_PAGE_SHIFT, align, 779 mask >> tbl->it_page_shift, align,
781 attrs); 780 attrs);
782 if (dma_handle == DMA_ERROR_CODE) { 781 if (dma_handle == DMA_ERROR_CODE) {
783 if (printk_ratelimit()) { 782 if (printk_ratelimit()) {
@@ -786,7 +785,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
786 npages); 785 npages);
787 } 786 }
788 } else 787 } else
789 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); 788 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
790 } 789 }
791 790
792 return dma_handle; 791 return dma_handle;
@@ -801,7 +800,8 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
801 BUG_ON(direction == DMA_NONE); 800 BUG_ON(direction == DMA_NONE);
802 801
803 if (tbl) { 802 if (tbl) {
804 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); 803 npages = iommu_num_pages(dma_handle, size,
804 IOMMU_PAGE_SIZE(tbl));
805 iommu_free(tbl, dma_handle, npages); 805 iommu_free(tbl, dma_handle, npages);
806 } 806 }
807} 807}
@@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
845 memset(ret, 0, size); 845 memset(ret, 0, size);
846 846
847 /* Set up tces to cover the allocated range */ 847 /* Set up tces to cover the allocated range */
848 nio_pages = size >> IOMMU_PAGE_SHIFT; 848 nio_pages = size >> tbl->it_page_shift;
849 io_order = get_iommu_order(size); 849 io_order = get_iommu_order(size, tbl);
850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
851 mask >> IOMMU_PAGE_SHIFT, io_order, NULL); 851 mask >> tbl->it_page_shift, io_order, NULL);
852 if (mapping == DMA_ERROR_CODE) { 852 if (mapping == DMA_ERROR_CODE) {
853 free_pages((unsigned long)ret, order); 853 free_pages((unsigned long)ret, order);
854 return NULL; 854 return NULL;
@@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
864 unsigned int nio_pages; 864 unsigned int nio_pages;
865 865
866 size = PAGE_ALIGN(size); 866 size = PAGE_ALIGN(size);
867 nio_pages = size >> IOMMU_PAGE_SHIFT; 867 nio_pages = size >> tbl->it_page_shift;
868 iommu_free(tbl, dma_handle, nio_pages); 868 iommu_free(tbl, dma_handle, nio_pages);
869 size = PAGE_ALIGN(size); 869 size = PAGE_ALIGN(size);
870 free_pages((unsigned long)vaddr, get_order(size)); 870 free_pages((unsigned long)vaddr, get_order(size));
@@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
935 if (tce_value) 935 if (tce_value)
936 return -EINVAL; 936 return -EINVAL;
937 937
938 if (ioba & ~IOMMU_PAGE_MASK) 938 if (ioba & ~IOMMU_PAGE_MASK(tbl))
939 return -EINVAL; 939 return -EINVAL;
940 940
941 ioba >>= IOMMU_PAGE_SHIFT; 941 ioba >>= tbl->it_page_shift;
942 if (ioba < tbl->it_offset) 942 if (ioba < tbl->it_offset)
943 return -EINVAL; 943 return -EINVAL;
944 944
@@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) 955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
956 return -EINVAL; 956 return -EINVAL;
957 957
958 if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ)) 958 if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ))
959 return -EINVAL; 959 return -EINVAL;
960 960
961 if (ioba & ~IOMMU_PAGE_MASK) 961 if (ioba & ~IOMMU_PAGE_MASK(tbl))
962 return -EINVAL; 962 return -EINVAL;
963 963
964 ioba >>= IOMMU_PAGE_SHIFT; 964 ioba >>= tbl->it_page_shift;
965 if (ioba < tbl->it_offset) 965 if (ioba < tbl->it_offset)
966 return -EINVAL; 966 return -EINVAL;
967 967
@@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
1037 1037
1038 /* if (unlikely(ret)) 1038 /* if (unlikely(ret))
1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", 1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1040 __func__, hwaddr, entry << IOMMU_PAGE_SHIFT, 1040 __func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl),
1041 hwaddr, ret); */ 1041 hwaddr, ret); */
1042 1042
1043 return ret; 1043 return ret;
@@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
1049{ 1049{
1050 int ret; 1050 int ret;
1051 struct page *page = NULL; 1051 struct page *page = NULL;
1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK; 1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
1053 enum dma_data_direction direction = iommu_tce_direction(tce); 1053 enum dma_data_direction direction = iommu_tce_direction(tce);
1054 1054
1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1, 1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1,
1056 direction != DMA_TO_DEVICE, &page); 1056 direction != DMA_TO_DEVICE, &page);
1057 if (unlikely(ret != 1)) { 1057 if (unlikely(ret != 1)) {
1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", 1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
1059 tce, entry << IOMMU_PAGE_SHIFT, ret); */ 1059 tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */
1060 return -EFAULT; 1060 return -EFAULT;
1061 } 1061 }
1062 hwaddr = (unsigned long) page_address(page) + offset; 1062 hwaddr = (unsigned long) page_address(page) + offset;
@@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
1067 1067
1068 if (ret < 0) 1068 if (ret < 0)
1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", 1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
1070 __func__, entry << IOMMU_PAGE_SHIFT, tce, ret); 1070 __func__, entry << tbl->it_page_shift, tce, ret);
1071 1071
1072 return ret; 1072 return ret;
1073} 1073}
@@ -1105,7 +1105,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
1105} 1105}
1106EXPORT_SYMBOL_GPL(iommu_release_ownership); 1106EXPORT_SYMBOL_GPL(iommu_release_ownership);
1107 1107
1108static int iommu_add_device(struct device *dev) 1108int iommu_add_device(struct device *dev)
1109{ 1109{
1110 struct iommu_table *tbl; 1110 struct iommu_table *tbl;
1111 int ret = 0; 1111 int ret = 0;
@@ -1127,6 +1127,12 @@ static int iommu_add_device(struct device *dev)
1127 pr_debug("iommu_tce: adding %s to iommu group %d\n", 1127 pr_debug("iommu_tce: adding %s to iommu group %d\n",
1128 dev_name(dev), iommu_group_id(tbl->it_group)); 1128 dev_name(dev), iommu_group_id(tbl->it_group));
1129 1129
1130 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1131 pr_err("iommu_tce: unsupported iommu page size.");
1132 pr_err("%s has not been added\n", dev_name(dev));
1133 return -EINVAL;
1134 }
1135
1130 ret = iommu_group_add_device(tbl->it_group, dev); 1136 ret = iommu_group_add_device(tbl->it_group, dev);
1131 if (ret < 0) 1137 if (ret < 0)
1132 pr_err("iommu_tce: %s has not been added, ret=%d\n", 1138 pr_err("iommu_tce: %s has not been added, ret=%d\n",
@@ -1134,52 +1140,23 @@ static int iommu_add_device(struct device *dev)
1134 1140
1135 return ret; 1141 return ret;
1136} 1142}
1143EXPORT_SYMBOL_GPL(iommu_add_device);
1137 1144
1138static void iommu_del_device(struct device *dev) 1145void iommu_del_device(struct device *dev)
1139{
1140 iommu_group_remove_device(dev);
1141}
1142
1143static int iommu_bus_notifier(struct notifier_block *nb,
1144 unsigned long action, void *data)
1145{ 1146{
1146 struct device *dev = data; 1147 /*
1147 1148 * Some devices might not have IOMMU table and group
1148 switch (action) { 1149 * and we needn't detach them from the associated
1149 case BUS_NOTIFY_ADD_DEVICE: 1150 * IOMMU groups
1150 return iommu_add_device(dev); 1151 */
1151 case BUS_NOTIFY_DEL_DEVICE: 1152 if (!dev->iommu_group) {
1152 iommu_del_device(dev); 1153 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1153 return 0; 1154 dev_name(dev));
1154 default: 1155 return;
1155 return 0;
1156 } 1156 }
1157}
1158 1157
1159static struct notifier_block tce_iommu_bus_nb = { 1158 iommu_group_remove_device(dev);
1160 .notifier_call = iommu_bus_notifier,
1161};
1162
1163static int __init tce_iommu_init(void)
1164{
1165 struct pci_dev *pdev = NULL;
1166
1167 BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE);
1168
1169 for_each_pci_dev(pdev)
1170 iommu_add_device(&pdev->dev);
1171
1172 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1173 return 0;
1174}
1175
1176subsys_initcall_sync(tce_iommu_init);
1177
1178#else
1179
1180void iommu_register_group(struct iommu_table *tbl,
1181 int pci_domain_number, unsigned long pe_num)
1182{
1183} 1159}
1160EXPORT_SYMBOL_GPL(iommu_del_device);
1184 1161
1185#endif /* CONFIG_IOMMU_API */ 1162#endif /* CONFIG_IOMMU_API */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ba0165615215..9729b23bfb0a 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -354,8 +354,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
354 354
355 seq_printf(p, "%*s: ", prec, "LOC"); 355 seq_printf(p, "%*s: ", prec, "LOC");
356 for_each_online_cpu(j) 356 for_each_online_cpu(j)
357 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); 357 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
358 seq_printf(p, " Local timer interrupts\n"); 358 seq_printf(p, " Local timer interrupts for timer event device\n");
359
360 seq_printf(p, "%*s: ", prec, "LOC");
361 for_each_online_cpu(j)
362 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
363 seq_printf(p, " Local timer interrupts for others\n");
359 364
360 seq_printf(p, "%*s: ", prec, "SPU"); 365 seq_printf(p, "%*s: ", prec, "SPU");
361 for_each_online_cpu(j) 366 for_each_online_cpu(j)
@@ -389,11 +394,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
389 */ 394 */
390u64 arch_irq_stat_cpu(unsigned int cpu) 395u64 arch_irq_stat_cpu(unsigned int cpu)
391{ 396{
392 u64 sum = per_cpu(irq_stat, cpu).timer_irqs; 397 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
393 398
394 sum += per_cpu(irq_stat, cpu).pmu_irqs; 399 sum += per_cpu(irq_stat, cpu).pmu_irqs;
395 sum += per_cpu(irq_stat, cpu).mce_exceptions; 400 sum += per_cpu(irq_stat, cpu).mce_exceptions;
396 sum += per_cpu(irq_stat, cpu).spurious_irqs; 401 sum += per_cpu(irq_stat, cpu).spurious_irqs;
402 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
397#ifdef CONFIG_PPC_DOORBELL 403#ifdef CONFIG_PPC_DOORBELL
398 sum += per_cpu(irq_stat, cpu).doorbell_irqs; 404 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
399#endif 405#endif
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 83e89d310734..8504657379f1 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/kgdb.h> 18#include <linux/kgdb.h>
20#include <linux/smp.h> 19#include <linux/smp.h>
21#include <linux/signal.h> 20#include <linux/signal.h>
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
new file mode 100644
index 000000000000..cadef7e64e42
--- /dev/null
+++ b/arch/powerpc/kernel/mce.c
@@ -0,0 +1,352 @@
1/*
2 * Machine check exception handling.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20 */
21
22#undef DEBUG
23#define pr_fmt(fmt) "mce: " fmt
24
25#include <linux/types.h>
26#include <linux/ptrace.h>
27#include <linux/percpu.h>
28#include <linux/export.h>
29#include <linux/irq_work.h>
30#include <asm/mce.h>
31
32static DEFINE_PER_CPU(int, mce_nest_count);
33static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
34
35/* Queue for delayed MCE events. */
36static DEFINE_PER_CPU(int, mce_queue_count);
37static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
38
39static void machine_check_process_queued_event(struct irq_work *work);
40struct irq_work mce_event_process_work = {
41 .func = machine_check_process_queued_event,
42};
43
44static void mce_set_error_info(struct machine_check_event *mce,
45 struct mce_error_info *mce_err)
46{
47 mce->error_type = mce_err->error_type;
48 switch (mce_err->error_type) {
49 case MCE_ERROR_TYPE_UE:
50 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
51 break;
52 case MCE_ERROR_TYPE_SLB:
53 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
54 break;
55 case MCE_ERROR_TYPE_ERAT:
56 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
57 break;
58 case MCE_ERROR_TYPE_TLB:
59 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
60 break;
61 case MCE_ERROR_TYPE_UNKNOWN:
62 default:
63 break;
64 }
65}
66
67/*
68 * Decode and save high level MCE information into per cpu buffer which
69 * is an array of machine_check_event structure.
70 */
71void save_mce_event(struct pt_regs *regs, long handled,
72 struct mce_error_info *mce_err,
73 uint64_t addr)
74{
75 uint64_t srr1;
76 int index = __get_cpu_var(mce_nest_count)++;
77 struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
78
79 /*
80 * Return if we don't have enough space to log mce event.
81 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
82 * the check below will stop buffer overrun.
83 */
84 if (index >= MAX_MC_EVT)
85 return;
86
87 /* Populate generic machine check info */
88 mce->version = MCE_V1;
89 mce->srr0 = regs->nip;
90 mce->srr1 = regs->msr;
91 mce->gpr3 = regs->gpr[3];
92 mce->in_use = 1;
93
94 mce->initiator = MCE_INITIATOR_CPU;
95 if (handled)
96 mce->disposition = MCE_DISPOSITION_RECOVERED;
97 else
98 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
99 mce->severity = MCE_SEV_ERROR_SYNC;
100
101 srr1 = regs->msr;
102
103 /*
104 * Populate the mce error_type and type-specific error_type.
105 */
106 mce_set_error_info(mce, mce_err);
107
108 if (!addr)
109 return;
110
111 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
112 mce->u.tlb_error.effective_address_provided = true;
113 mce->u.tlb_error.effective_address = addr;
114 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
115 mce->u.slb_error.effective_address_provided = true;
116 mce->u.slb_error.effective_address = addr;
117 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
118 mce->u.erat_error.effective_address_provided = true;
119 mce->u.erat_error.effective_address = addr;
120 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
121 mce->u.ue_error.effective_address_provided = true;
122 mce->u.ue_error.effective_address = addr;
123 }
124 return;
125}
126
127/*
128 * get_mce_event:
129 * mce Pointer to machine_check_event structure to be filled.
130 * release Flag to indicate whether to free the event slot or not.
131 * 0 <= do not release the mce event. Caller will invoke
132 * release_mce_event() once event has been consumed.
133 * 1 <= release the slot.
134 *
135 * return 1 = success
136 * 0 = failure
137 *
138 * get_mce_event() will be called by platform specific machine check
139 * handle routine and in KVM.
140 * When we call get_mce_event(), we are still in interrupt context and
141 * preemption will not be scheduled until ret_from_expect() routine
142 * is called.
143 */
144int get_mce_event(struct machine_check_event *mce, bool release)
145{
146 int index = __get_cpu_var(mce_nest_count) - 1;
147 struct machine_check_event *mc_evt;
148 int ret = 0;
149
150 /* Sanity check */
151 if (index < 0)
152 return ret;
153
154 /* Check if we have MCE info to process. */
155 if (index < MAX_MC_EVT) {
156 mc_evt = &__get_cpu_var(mce_event[index]);
157 /* Copy the event structure and release the original */
158 if (mce)
159 *mce = *mc_evt;
160 if (release)
161 mc_evt->in_use = 0;
162 ret = 1;
163 }
164 /* Decrement the count to free the slot. */
165 if (release)
166 __get_cpu_var(mce_nest_count)--;
167
168 return ret;
169}
170
171void release_mce_event(void)
172{
173 get_mce_event(NULL, true);
174}
175
176/*
177 * Queue up the MCE event which then can be handled later.
178 */
179void machine_check_queue_event(void)
180{
181 int index;
182 struct machine_check_event evt;
183
184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
185 return;
186
187 index = __get_cpu_var(mce_queue_count)++;
188 /* If queue is full, just return for now. */
189 if (index >= MAX_MC_EVT) {
190 __get_cpu_var(mce_queue_count)--;
191 return;
192 }
193 __get_cpu_var(mce_event_queue[index]) = evt;
194
195 /* Queue irq work to process this event later. */
196 irq_work_queue(&mce_event_process_work);
197}
198
199/*
200 * process pending MCE event from the mce event queue. This function will be
201 * called during syscall exit.
202 */
203static void machine_check_process_queued_event(struct irq_work *work)
204{
205 int index;
206
207 /*
208 * For now just print it to console.
209 * TODO: log this error event to FSP or nvram.
210 */
211 while (__get_cpu_var(mce_queue_count) > 0) {
212 index = __get_cpu_var(mce_queue_count) - 1;
213 machine_check_print_event_info(
214 &__get_cpu_var(mce_event_queue[index]));
215 __get_cpu_var(mce_queue_count)--;
216 }
217}
218
219void machine_check_print_event_info(struct machine_check_event *evt)
220{
221 const char *level, *sevstr, *subtype;
222 static const char *mc_ue_types[] = {
223 "Indeterminate",
224 "Instruction fetch",
225 "Page table walk ifetch",
226 "Load/Store",
227 "Page table walk Load/Store",
228 };
229 static const char *mc_slb_types[] = {
230 "Indeterminate",
231 "Parity",
232 "Multihit",
233 };
234 static const char *mc_erat_types[] = {
235 "Indeterminate",
236 "Parity",
237 "Multihit",
238 };
239 static const char *mc_tlb_types[] = {
240 "Indeterminate",
241 "Parity",
242 "Multihit",
243 };
244
245 /* Print things out */
246 if (evt->version != MCE_V1) {
247 pr_err("Machine Check Exception, Unknown event version %d !\n",
248 evt->version);
249 return;
250 }
251 switch (evt->severity) {
252 case MCE_SEV_NO_ERROR:
253 level = KERN_INFO;
254 sevstr = "Harmless";
255 break;
256 case MCE_SEV_WARNING:
257 level = KERN_WARNING;
258 sevstr = "";
259 break;
260 case MCE_SEV_ERROR_SYNC:
261 level = KERN_ERR;
262 sevstr = "Severe";
263 break;
264 case MCE_SEV_FATAL:
265 default:
266 level = KERN_ERR;
267 sevstr = "Fatal";
268 break;
269 }
270
271 printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
272 evt->disposition == MCE_DISPOSITION_RECOVERED ?
273 "Recovered" : "[Not recovered");
274 printk("%s Initiator: %s\n", level,
275 evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
276 switch (evt->error_type) {
277 case MCE_ERROR_TYPE_UE:
278 subtype = evt->u.ue_error.ue_error_type <
279 ARRAY_SIZE(mc_ue_types) ?
280 mc_ue_types[evt->u.ue_error.ue_error_type]
281 : "Unknown";
282 printk("%s Error type: UE [%s]\n", level, subtype);
283 if (evt->u.ue_error.effective_address_provided)
284 printk("%s Effective address: %016llx\n",
285 level, evt->u.ue_error.effective_address);
286 if (evt->u.ue_error.physical_address_provided)
287 printk("%s Physial address: %016llx\n",
288 level, evt->u.ue_error.physical_address);
289 break;
290 case MCE_ERROR_TYPE_SLB:
291 subtype = evt->u.slb_error.slb_error_type <
292 ARRAY_SIZE(mc_slb_types) ?
293 mc_slb_types[evt->u.slb_error.slb_error_type]
294 : "Unknown";
295 printk("%s Error type: SLB [%s]\n", level, subtype);
296 if (evt->u.slb_error.effective_address_provided)
297 printk("%s Effective address: %016llx\n",
298 level, evt->u.slb_error.effective_address);
299 break;
300 case MCE_ERROR_TYPE_ERAT:
301 subtype = evt->u.erat_error.erat_error_type <
302 ARRAY_SIZE(mc_erat_types) ?
303 mc_erat_types[evt->u.erat_error.erat_error_type]
304 : "Unknown";
305 printk("%s Error type: ERAT [%s]\n", level, subtype);
306 if (evt->u.erat_error.effective_address_provided)
307 printk("%s Effective address: %016llx\n",
308 level, evt->u.erat_error.effective_address);
309 break;
310 case MCE_ERROR_TYPE_TLB:
311 subtype = evt->u.tlb_error.tlb_error_type <
312 ARRAY_SIZE(mc_tlb_types) ?
313 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
314 : "Unknown";
315 printk("%s Error type: TLB [%s]\n", level, subtype);
316 if (evt->u.tlb_error.effective_address_provided)
317 printk("%s Effective address: %016llx\n",
318 level, evt->u.tlb_error.effective_address);
319 break;
320 default:
321 case MCE_ERROR_TYPE_UNKNOWN:
322 printk("%s Error type: Unknown\n", level);
323 break;
324 }
325}
326
327uint64_t get_mce_fault_addr(struct machine_check_event *evt)
328{
329 switch (evt->error_type) {
330 case MCE_ERROR_TYPE_UE:
331 if (evt->u.ue_error.effective_address_provided)
332 return evt->u.ue_error.effective_address;
333 break;
334 case MCE_ERROR_TYPE_SLB:
335 if (evt->u.slb_error.effective_address_provided)
336 return evt->u.slb_error.effective_address;
337 break;
338 case MCE_ERROR_TYPE_ERAT:
339 if (evt->u.erat_error.effective_address_provided)
340 return evt->u.erat_error.effective_address;
341 break;
342 case MCE_ERROR_TYPE_TLB:
343 if (evt->u.tlb_error.effective_address_provided)
344 return evt->u.tlb_error.effective_address;
345 break;
346 default:
347 case MCE_ERROR_TYPE_UNKNOWN:
348 break;
349 }
350 return 0;
351}
352EXPORT_SYMBOL(get_mce_fault_addr);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
new file mode 100644
index 000000000000..27c93f41166f
--- /dev/null
+++ b/arch/powerpc/kernel/mce_power.c
@@ -0,0 +1,284 @@
1/*
2 * Machine check exception handling CPU-side for power7 and power8
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20 */
21
22#undef DEBUG
23#define pr_fmt(fmt) "mce_power: " fmt
24
25#include <linux/types.h>
26#include <linux/ptrace.h>
27#include <asm/mmu.h>
28#include <asm/mce.h>
29
30/* flush SLBs and reload */
31static void flush_and_reload_slb(void)
32{
33 struct slb_shadow *slb;
34 unsigned long i, n;
35
36 /* Invalidate all SLBs */
37 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
38
39#ifdef CONFIG_KVM_BOOK3S_HANDLER
40 /*
41 * If machine check is hit when in guest or in transition, we will
42 * only flush the SLBs and continue.
43 */
44 if (get_paca()->kvm_hstate.in_guest)
45 return;
46#endif
47
48 /* For host kernel, reload the SLBs from shadow SLB buffer. */
49 slb = get_slb_shadow();
50 if (!slb)
51 return;
52
53 n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
54
55 /* Load up the SLB entries from shadow SLB */
56 for (i = 0; i < n; i++) {
57 unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
58 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
59
60 rb = (rb & ~0xFFFul) | i;
61 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
62 }
63}
64
65static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
66{
67 long handled = 1;
68
69 /*
70 * flush and reload SLBs for SLB errors and flush TLBs for TLB errors.
71 * reset the error bits whenever we handle them so that at the end
72 * we can check whether we handled all of them or not.
73 * */
74 if (dsisr & slb_error_bits) {
75 flush_and_reload_slb();
76 /* reset error bits */
77 dsisr &= ~(slb_error_bits);
78 }
79 if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
80 if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
81 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
82 /* reset error bits */
83 dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
84 }
85 /* Any other errors we don't understand? */
86 if (dsisr & 0xffffffffUL)
87 handled = 0;
88
89 return handled;
90}
91
92static long mce_handle_derror_p7(uint64_t dsisr)
93{
94 return mce_handle_derror(dsisr, P7_DSISR_MC_SLB_ERRORS);
95}
96
97static long mce_handle_common_ierror(uint64_t srr1)
98{
99 long handled = 0;
100
101 switch (P7_SRR1_MC_IFETCH(srr1)) {
102 case 0:
103 break;
104 case P7_SRR1_MC_IFETCH_SLB_PARITY:
105 case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
106 /* flush and reload SLBs for SLB errors. */
107 flush_and_reload_slb();
108 handled = 1;
109 break;
110 case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
111 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
112 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
113 handled = 1;
114 }
115 break;
116 default:
117 break;
118 }
119
120 return handled;
121}
122
123static long mce_handle_ierror_p7(uint64_t srr1)
124{
125 long handled = 0;
126
127 handled = mce_handle_common_ierror(srr1);
128
129 if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
130 flush_and_reload_slb();
131 handled = 1;
132 }
133 return handled;
134}
135
136static void mce_get_common_ierror(struct mce_error_info *mce_err, uint64_t srr1)
137{
138 switch (P7_SRR1_MC_IFETCH(srr1)) {
139 case P7_SRR1_MC_IFETCH_SLB_PARITY:
140 mce_err->error_type = MCE_ERROR_TYPE_SLB;
141 mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
142 break;
143 case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
144 mce_err->error_type = MCE_ERROR_TYPE_SLB;
145 mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
146 break;
147 case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
148 mce_err->error_type = MCE_ERROR_TYPE_TLB;
149 mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
150 break;
151 case P7_SRR1_MC_IFETCH_UE:
152 case P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL:
153 mce_err->error_type = MCE_ERROR_TYPE_UE;
154 mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
155 break;
156 case P7_SRR1_MC_IFETCH_UE_TLB_RELOAD:
157 mce_err->error_type = MCE_ERROR_TYPE_UE;
158 mce_err->u.ue_error_type =
159 MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
160 break;
161 }
162}
163
164static void mce_get_ierror_p7(struct mce_error_info *mce_err, uint64_t srr1)
165{
166 mce_get_common_ierror(mce_err, srr1);
167 if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
168 mce_err->error_type = MCE_ERROR_TYPE_SLB;
169 mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
170 }
171}
172
173static void mce_get_derror_p7(struct mce_error_info *mce_err, uint64_t dsisr)
174{
175 if (dsisr & P7_DSISR_MC_UE) {
176 mce_err->error_type = MCE_ERROR_TYPE_UE;
177 mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
178 } else if (dsisr & P7_DSISR_MC_UE_TABLEWALK) {
179 mce_err->error_type = MCE_ERROR_TYPE_UE;
180 mce_err->u.ue_error_type =
181 MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
182 } else if (dsisr & P7_DSISR_MC_ERAT_MULTIHIT) {
183 mce_err->error_type = MCE_ERROR_TYPE_ERAT;
184 mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
185 } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT) {
186 mce_err->error_type = MCE_ERROR_TYPE_SLB;
187 mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
188 } else if (dsisr & P7_DSISR_MC_SLB_PARITY_MFSLB) {
189 mce_err->error_type = MCE_ERROR_TYPE_SLB;
190 mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
191 } else if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
192 mce_err->error_type = MCE_ERROR_TYPE_TLB;
193 mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
194 } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT_PARITY) {
195 mce_err->error_type = MCE_ERROR_TYPE_SLB;
196 mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
197 }
198}
199
200long __machine_check_early_realmode_p7(struct pt_regs *regs)
201{
202 uint64_t srr1, addr;
203 long handled = 1;
204 struct mce_error_info mce_error_info = { 0 };
205
206 srr1 = regs->msr;
207
208 /*
209 * Handle memory errors depending whether this was a load/store or
210 * ifetch exception. Also, populate the mce error_type and
211 * type-specific error_type from either SRR1 or DSISR, depending
212 * whether this was a load/store or ifetch exception
213 */
214 if (P7_SRR1_MC_LOADSTORE(srr1)) {
215 handled = mce_handle_derror_p7(regs->dsisr);
216 mce_get_derror_p7(&mce_error_info, regs->dsisr);
217 addr = regs->dar;
218 } else {
219 handled = mce_handle_ierror_p7(srr1);
220 mce_get_ierror_p7(&mce_error_info, srr1);
221 addr = regs->nip;
222 }
223
224 save_mce_event(regs, handled, &mce_error_info, addr);
225 return handled;
226}
227
228static void mce_get_ierror_p8(struct mce_error_info *mce_err, uint64_t srr1)
229{
230 mce_get_common_ierror(mce_err, srr1);
231 if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
232 mce_err->error_type = MCE_ERROR_TYPE_ERAT;
233 mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
234 }
235}
236
237static void mce_get_derror_p8(struct mce_error_info *mce_err, uint64_t dsisr)
238{
239 mce_get_derror_p7(mce_err, dsisr);
240 if (dsisr & P8_DSISR_MC_ERAT_MULTIHIT_SEC) {
241 mce_err->error_type = MCE_ERROR_TYPE_ERAT;
242 mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
243 }
244}
245
246static long mce_handle_ierror_p8(uint64_t srr1)
247{
248 long handled = 0;
249
250 handled = mce_handle_common_ierror(srr1);
251
252 if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
253 flush_and_reload_slb();
254 handled = 1;
255 }
256 return handled;
257}
258
259static long mce_handle_derror_p8(uint64_t dsisr)
260{
261 return mce_handle_derror(dsisr, P8_DSISR_MC_SLB_ERRORS);
262}
263
264long __machine_check_early_realmode_p8(struct pt_regs *regs)
265{
266 uint64_t srr1, addr;
267 long handled = 1;
268 struct mce_error_info mce_error_info = { 0 };
269
270 srr1 = regs->msr;
271
272 if (P7_SRR1_MC_LOADSTORE(srr1)) {
273 handled = mce_handle_derror_p8(regs->dsisr);
274 mce_get_derror_p8(&mce_error_info, regs->dsisr);
275 addr = regs->dar;
276 } else {
277 handled = mce_handle_ierror_p8(srr1);
278 mce_get_ierror_p8(&mce_error_info, srr1);
279 addr = regs->nip;
280 }
281
282 save_mce_event(regs, handled, &mce_error_info, addr);
283 return handled;
284}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index e47d268727a4..879f09620f83 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -344,7 +344,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
344 */ 344 */
345_KPROBE(flush_icache_range) 345_KPROBE(flush_icache_range)
346BEGIN_FTR_SECTION 346BEGIN_FTR_SECTION
347 isync 347 PURGE_PREFETCHED_INS
348 blr /* for 601, do nothing */ 348 blr /* for 601, do nothing */
349END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 349END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
350 li r5,L1_CACHE_BYTES-1 350 li r5,L1_CACHE_BYTES-1
@@ -448,6 +448,7 @@ _GLOBAL(invalidate_dcache_range)
448 */ 448 */
449_GLOBAL(__flush_dcache_icache) 449_GLOBAL(__flush_dcache_icache)
450BEGIN_FTR_SECTION 450BEGIN_FTR_SECTION
451 PURGE_PREFETCHED_INS
451 blr 452 blr
452END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 453END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
453 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ 454 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
@@ -489,6 +490,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
489 */ 490 */
490_GLOBAL(__flush_dcache_icache_phys) 491_GLOBAL(__flush_dcache_icache_phys)
491BEGIN_FTR_SECTION 492BEGIN_FTR_SECTION
493 PURGE_PREFETCHED_INS
492 blr /* for 601, do nothing */ 494 blr /* for 601, do nothing */
493END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 495END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
494 mfmsr r10 496 mfmsr r10
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 64bf8db12b15..3d0249599d52 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,6 +67,7 @@ PPC64_CACHES:
67 67
68_KPROBE(flush_icache_range) 68_KPROBE(flush_icache_range)
69BEGIN_FTR_SECTION 69BEGIN_FTR_SECTION
70 PURGE_PREFETCHED_INS
70 blr 71 blr
71END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 72END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
72/* 73/*
@@ -211,6 +212,11 @@ _GLOBAL(__flush_dcache_icache)
211 * Different systems have different cache line sizes 212 * Different systems have different cache line sizes
212 */ 213 */
213 214
215BEGIN_FTR_SECTION
216 PURGE_PREFETCHED_INS
217 blr
218END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
219
214/* Flush the dcache */ 220/* Flush the dcache */
215 ld r7,PPC64_CACHES@toc(r2) 221 ld r7,PPC64_CACHES@toc(r2)
216 clrrdi r3,r3,PAGE_SHIFT /* Page align */ 222 clrrdi r3,r3,PAGE_SHIFT /* Page align */
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0620eaaaad45..bf0aada02fe4 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -99,12 +99,28 @@ static inline void free_lppacas(void) { }
99 * 3 persistent SLBs are registered here. The buffer will be zero 99 * 3 persistent SLBs are registered here. The buffer will be zero
100 * initially, hence will all be invaild until we actually write them. 100 * initially, hence will all be invaild until we actually write them.
101 */ 101 */
102struct slb_shadow slb_shadow[] __cacheline_aligned = { 102static struct slb_shadow *slb_shadow;
103 [0 ... (NR_CPUS-1)] = { 103
104 .persistent = cpu_to_be32(SLB_NUM_BOLTED), 104static void __init allocate_slb_shadows(int nr_cpus, int limit)
105 .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)), 105{
106 }, 106 int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
107}; 107 slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
108 memset(slb_shadow, 0, size);
109}
110
111static struct slb_shadow * __init init_slb_shadow(int cpu)
112{
113 struct slb_shadow *s = &slb_shadow[cpu];
114
115 s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
116 s->buffer_length = cpu_to_be32(sizeof(*s));
117
118 return s;
119}
120
121#else /* CONFIG_PPC_STD_MMU_64 */
122
123static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
108 124
109#endif /* CONFIG_PPC_STD_MMU_64 */ 125#endif /* CONFIG_PPC_STD_MMU_64 */
110 126
@@ -142,8 +158,13 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
142 new_paca->__current = &init_task; 158 new_paca->__current = &init_task;
143 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; 159 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
144#ifdef CONFIG_PPC_STD_MMU_64 160#ifdef CONFIG_PPC_STD_MMU_64
145 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 161 new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
146#endif /* CONFIG_PPC_STD_MMU_64 */ 162#endif /* CONFIG_PPC_STD_MMU_64 */
163
164#ifdef CONFIG_PPC_BOOK3E
165 /* For now -- if we have threads this will be adjusted later */
166 new_paca->tcd_ptr = &new_paca->tcd;
167#endif
147} 168}
148 169
149/* Put the paca pointer into r13 and SPRG_PACA */ 170/* Put the paca pointer into r13 and SPRG_PACA */
@@ -190,6 +211,8 @@ void __init allocate_pacas(void)
190 211
191 allocate_lppacas(nr_cpu_ids, limit); 212 allocate_lppacas(nr_cpu_ids, limit);
192 213
214 allocate_slb_shadows(nr_cpu_ids, limit);
215
193 /* Can't use for_each_*_cpu, as they aren't functional yet */ 216 /* Can't use for_each_*_cpu, as they aren't functional yet */
194 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 217 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
195 initialise_paca(&paca[cpu], cpu); 218 initialise_paca(&paca[cpu], cpu);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4a96556fd2d4..64b7a6e61dd1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -25,7 +25,6 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/user.h> 26#include <linux/user.h>
27#include <linux/elf.h> 27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h> 28#include <linux/prctl.h>
30#include <linux/init_task.h> 29#include <linux/init_task.h>
31#include <linux/export.h> 30#include <linux/export.h>
@@ -74,6 +73,48 @@ struct task_struct *last_task_used_vsx = NULL;
74struct task_struct *last_task_used_spe = NULL; 73struct task_struct *last_task_used_spe = NULL;
75#endif 74#endif
76 75
76#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
77void giveup_fpu_maybe_transactional(struct task_struct *tsk)
78{
79 /*
80 * If we are saving the current thread's registers, and the
81 * thread is in a transactional state, set the TIF_RESTORE_TM
82 * bit so that we know to restore the registers before
83 * returning to userspace.
84 */
85 if (tsk == current && tsk->thread.regs &&
86 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
87 !test_thread_flag(TIF_RESTORE_TM)) {
88 tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
89 set_thread_flag(TIF_RESTORE_TM);
90 }
91
92 giveup_fpu(tsk);
93}
94
95void giveup_altivec_maybe_transactional(struct task_struct *tsk)
96{
97 /*
98 * If we are saving the current thread's registers, and the
99 * thread is in a transactional state, set the TIF_RESTORE_TM
100 * bit so that we know to restore the registers before
101 * returning to userspace.
102 */
103 if (tsk == current && tsk->thread.regs &&
104 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
105 !test_thread_flag(TIF_RESTORE_TM)) {
106 tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
107 set_thread_flag(TIF_RESTORE_TM);
108 }
109
110 giveup_altivec(tsk);
111}
112
113#else
114#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
115#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
116#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
117
77#ifdef CONFIG_PPC_FPU 118#ifdef CONFIG_PPC_FPU
78/* 119/*
79 * Make sure the floating-point register state in the 120 * Make sure the floating-point register state in the
@@ -102,13 +143,13 @@ void flush_fp_to_thread(struct task_struct *tsk)
102 */ 143 */
103 BUG_ON(tsk != current); 144 BUG_ON(tsk != current);
104#endif 145#endif
105 giveup_fpu(tsk); 146 giveup_fpu_maybe_transactional(tsk);
106 } 147 }
107 preempt_enable(); 148 preempt_enable();
108 } 149 }
109} 150}
110EXPORT_SYMBOL_GPL(flush_fp_to_thread); 151EXPORT_SYMBOL_GPL(flush_fp_to_thread);
111#endif 152#endif /* CONFIG_PPC_FPU */
112 153
113void enable_kernel_fp(void) 154void enable_kernel_fp(void)
114{ 155{
@@ -116,11 +157,11 @@ void enable_kernel_fp(void)
116 157
117#ifdef CONFIG_SMP 158#ifdef CONFIG_SMP
118 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 159 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
119 giveup_fpu(current); 160 giveup_fpu_maybe_transactional(current);
120 else 161 else
121 giveup_fpu(NULL); /* just enables FP for kernel */ 162 giveup_fpu(NULL); /* just enables FP for kernel */
122#else 163#else
123 giveup_fpu(last_task_used_math); 164 giveup_fpu_maybe_transactional(last_task_used_math);
124#endif /* CONFIG_SMP */ 165#endif /* CONFIG_SMP */
125} 166}
126EXPORT_SYMBOL(enable_kernel_fp); 167EXPORT_SYMBOL(enable_kernel_fp);
@@ -132,11 +173,11 @@ void enable_kernel_altivec(void)
132 173
133#ifdef CONFIG_SMP 174#ifdef CONFIG_SMP
134 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 175 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
135 giveup_altivec(current); 176 giveup_altivec_maybe_transactional(current);
136 else 177 else
137 giveup_altivec_notask(); 178 giveup_altivec_notask();
138#else 179#else
139 giveup_altivec(last_task_used_altivec); 180 giveup_altivec_maybe_transactional(last_task_used_altivec);
140#endif /* CONFIG_SMP */ 181#endif /* CONFIG_SMP */
141} 182}
142EXPORT_SYMBOL(enable_kernel_altivec); 183EXPORT_SYMBOL(enable_kernel_altivec);
@@ -153,7 +194,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
153#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
154 BUG_ON(tsk != current); 195 BUG_ON(tsk != current);
155#endif 196#endif
156 giveup_altivec(tsk); 197 giveup_altivec_maybe_transactional(tsk);
157 } 198 }
158 preempt_enable(); 199 preempt_enable();
159 } 200 }
@@ -182,8 +223,8 @@ EXPORT_SYMBOL(enable_kernel_vsx);
182 223
183void giveup_vsx(struct task_struct *tsk) 224void giveup_vsx(struct task_struct *tsk)
184{ 225{
185 giveup_fpu(tsk); 226 giveup_fpu_maybe_transactional(tsk);
186 giveup_altivec(tsk); 227 giveup_altivec_maybe_transactional(tsk);
187 __giveup_vsx(tsk); 228 __giveup_vsx(tsk);
188} 229}
189 230
@@ -479,7 +520,48 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
479 return false; 520 return false;
480 return true; 521 return true;
481} 522}
523
482#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 524#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
525static void tm_reclaim_thread(struct thread_struct *thr,
526 struct thread_info *ti, uint8_t cause)
527{
528 unsigned long msr_diff = 0;
529
530 /*
531 * If FP/VSX registers have been already saved to the
532 * thread_struct, move them to the transact_fp array.
533 * We clear the TIF_RESTORE_TM bit since after the reclaim
534 * the thread will no longer be transactional.
535 */
536 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
537 msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
538 if (msr_diff & MSR_FP)
539 memcpy(&thr->transact_fp, &thr->fp_state,
540 sizeof(struct thread_fp_state));
541 if (msr_diff & MSR_VEC)
542 memcpy(&thr->transact_vr, &thr->vr_state,
543 sizeof(struct thread_vr_state));
544 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
545 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
546 }
547
548 tm_reclaim(thr, thr->regs->msr, cause);
549
550 /* Having done the reclaim, we now have the checkpointed
551 * FP/VSX values in the registers. These might be valid
552 * even if we have previously called enable_kernel_fp() or
553 * flush_fp_to_thread(), so update thr->regs->msr to
554 * indicate their current validity.
555 */
556 thr->regs->msr |= msr_diff;
557}
558
559void tm_reclaim_current(uint8_t cause)
560{
561 tm_enable();
562 tm_reclaim_thread(&current->thread, current_thread_info(), cause);
563}
564
483static inline void tm_reclaim_task(struct task_struct *tsk) 565static inline void tm_reclaim_task(struct task_struct *tsk)
484{ 566{
485 /* We have to work out if we're switching from/to a task that's in the 567 /* We have to work out if we're switching from/to a task that's in the
@@ -502,9 +584,11 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
502 584
503 /* Stash the original thread MSR, as giveup_fpu et al will 585 /* Stash the original thread MSR, as giveup_fpu et al will
504 * modify it. We hold onto it to see whether the task used 586 * modify it. We hold onto it to see whether the task used
505 * FP & vector regs. 587 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
588 * tm_orig_msr is already set.
506 */ 589 */
507 thr->tm_orig_msr = thr->regs->msr; 590 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
591 thr->tm_orig_msr = thr->regs->msr;
508 592
509 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " 593 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
510 "ccr=%lx, msr=%lx, trap=%lx)\n", 594 "ccr=%lx, msr=%lx, trap=%lx)\n",
@@ -512,7 +596,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
512 thr->regs->ccr, thr->regs->msr, 596 thr->regs->ccr, thr->regs->msr,
513 thr->regs->trap); 597 thr->regs->trap);
514 598
515 tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); 599 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
516 600
517 TM_DEBUG("--- tm_reclaim on pid %d complete\n", 601 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
518 tsk->pid); 602 tsk->pid);
@@ -588,6 +672,43 @@ static inline void __switch_to_tm(struct task_struct *prev)
588 tm_reclaim_task(prev); 672 tm_reclaim_task(prev);
589 } 673 }
590} 674}
675
676/*
677 * This is called if we are on the way out to userspace and the
678 * TIF_RESTORE_TM flag is set. It checks if we need to reload
679 * FP and/or vector state and does so if necessary.
680 * If userspace is inside a transaction (whether active or
681 * suspended) and FP/VMX/VSX instructions have ever been enabled
682 * inside that transaction, then we have to keep them enabled
683 * and keep the FP/VMX/VSX state loaded while ever the transaction
684 * continues. The reason is that if we didn't, and subsequently
685 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
686 * we don't know whether it's the same transaction, and thus we
687 * don't know which of the checkpointed state and the transactional
688 * state to use.
689 */
690void restore_tm_state(struct pt_regs *regs)
691{
692 unsigned long msr_diff;
693
694 clear_thread_flag(TIF_RESTORE_TM);
695 if (!MSR_TM_ACTIVE(regs->msr))
696 return;
697
698 msr_diff = current->thread.tm_orig_msr & ~regs->msr;
699 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
700 if (msr_diff & MSR_FP) {
701 fp_enable();
702 load_fp_state(&current->thread.fp_state);
703 regs->msr |= current->thread.fpexc_mode;
704 }
705 if (msr_diff & MSR_VEC) {
706 vec_enable();
707 load_vr_state(&current->thread.vr_state);
708 }
709 regs->msr |= msr_diff;
710}
711
591#else 712#else
592#define tm_recheckpoint_new_task(new) 713#define tm_recheckpoint_new_task(new)
593#define __switch_to_tm(prev) 714#define __switch_to_tm(prev)
@@ -1175,6 +1296,19 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1175 if (val & PR_FP_EXC_SW_ENABLE) { 1296 if (val & PR_FP_EXC_SW_ENABLE) {
1176#ifdef CONFIG_SPE 1297#ifdef CONFIG_SPE
1177 if (cpu_has_feature(CPU_FTR_SPE)) { 1298 if (cpu_has_feature(CPU_FTR_SPE)) {
1299 /*
1300 * When the sticky exception bits are set
1301 * directly by userspace, it must call prctl
1302 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1303 * in the existing prctl settings) or
1304 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1305 * the bits being set). <fenv.h> functions
1306 * saving and restoring the whole
1307 * floating-point environment need to do so
1308 * anyway to restore the prctl settings from
1309 * the saved environment.
1310 */
1311 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1178 tsk->thread.fpexc_mode = val & 1312 tsk->thread.fpexc_mode = val &
1179 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 1313 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1180 return 0; 1314 return 0;
@@ -1206,9 +1340,22 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1206 1340
1207 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 1341 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1208#ifdef CONFIG_SPE 1342#ifdef CONFIG_SPE
1209 if (cpu_has_feature(CPU_FTR_SPE)) 1343 if (cpu_has_feature(CPU_FTR_SPE)) {
1344 /*
1345 * When the sticky exception bits are set
1346 * directly by userspace, it must call prctl
1347 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1348 * in the existing prctl settings) or
1349 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1350 * the bits being set). <fenv.h> functions
1351 * saving and restoring the whole
1352 * floating-point environment need to do so
1353 * anyway to restore the prctl settings from
1354 * the saved environment.
1355 */
1356 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1210 val = tsk->thread.fpexc_mode; 1357 val = tsk->thread.fpexc_mode;
1211 else 1358 } else
1212 return -EINVAL; 1359 return -EINVAL;
1213#else 1360#else
1214 return -EINVAL; 1361 return -EINVAL;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fa0ad8aafbcc..f58c0d3aaeb4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -523,6 +523,20 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node,
523 return early_init_dt_scan_memory(node, uname, depth, data); 523 return early_init_dt_scan_memory(node, uname, depth, data);
524} 524}
525 525
526/*
527 * For a relocatable kernel, we need to get the memstart_addr first,
528 * then use it to calculate the virtual kernel start address. This has
529 * to happen at a very early stage (before machine_init). In this case,
530 * we just want to get the memstart_address and would not like to mess the
531 * memblock at this stage. So introduce a variable to skip the memblock_add()
532 * for this reason.
533 */
534#ifdef CONFIG_RELOCATABLE
535static int add_mem_to_memblock = 1;
536#else
537#define add_mem_to_memblock 1
538#endif
539
526void __init early_init_dt_add_memory_arch(u64 base, u64 size) 540void __init early_init_dt_add_memory_arch(u64 base, u64 size)
527{ 541{
528#ifdef CONFIG_PPC64 542#ifdef CONFIG_PPC64
@@ -543,7 +557,8 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
543 } 557 }
544 558
545 /* Add the chunk to the MEMBLOCK list */ 559 /* Add the chunk to the MEMBLOCK list */
546 memblock_add(base, size); 560 if (add_mem_to_memblock)
561 memblock_add(base, size);
547} 562}
548 563
549static void __init early_reserve_mem_dt(void) 564static void __init early_reserve_mem_dt(void)
@@ -740,6 +755,30 @@ void __init early_init_devtree(void *params)
740 DBG(" <- early_init_devtree()\n"); 755 DBG(" <- early_init_devtree()\n");
741} 756}
742 757
758#ifdef CONFIG_RELOCATABLE
759/*
760 * This function run before early_init_devtree, so we have to init
761 * initial_boot_params.
762 */
763void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
764{
765 /* Setup flat device-tree pointer */
766 initial_boot_params = params;
767
768 /*
769 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
770 * mess the memblock.
771 */
772 add_mem_to_memblock = 0;
773 of_scan_flat_dt(early_init_dt_scan_root, NULL);
774 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
775 add_mem_to_memblock = 1;
776
777 if (size)
778 *size = first_memblock_size;
779}
780#endif
781
743/******* 782/*******
744 * 783 *
745 * New implementation of the OF "find" APIs, return a refcounted 784 * New implementation of the OF "find" APIs, return a refcounted
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 856dd4e99bfe..f5f11a7d30e5 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -97,6 +97,36 @@ int dcache_bsize;
97int icache_bsize; 97int icache_bsize;
98int ucache_bsize; 98int ucache_bsize;
99 99
100#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
101static void setup_tlb_core_data(void)
102{
103 int cpu;
104
105 for_each_possible_cpu(cpu) {
106 int first = cpu_first_thread_sibling(cpu);
107
108 paca[cpu].tcd_ptr = &paca[first].tcd;
109
110 /*
111 * If we have threads, we need either tlbsrx.
112 * or e6500 tablewalk mode, or else TLB handlers
113 * will be racy and could produce duplicate entries.
114 */
115 if (smt_enabled_at_boot >= 2 &&
116 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
117 book3e_htw_mode != PPC_HTW_E6500) {
118 /* Should we panic instead? */
119 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
120 __func__);
121 }
122 }
123}
124#else
125static void setup_tlb_core_data(void)
126{
127}
128#endif
129
100#ifdef CONFIG_SMP 130#ifdef CONFIG_SMP
101 131
102static char *smt_enabled_cmdline; 132static char *smt_enabled_cmdline;
@@ -445,6 +475,7 @@ void __init setup_system(void)
445 475
446 smp_setup_cpu_maps(); 476 smp_setup_cpu_maps();
447 check_smt_enabled(); 477 check_smt_enabled();
478 setup_tlb_core_data();
448 479
449#ifdef CONFIG_SMP 480#ifdef CONFIG_SMP
450 /* Release secondary cpus out of their spinloops at 0x60 now that 481 /* Release secondary cpus out of their spinloops at 0x60 now that
@@ -520,9 +551,6 @@ static void __init irqstack_early_init(void)
520#ifdef CONFIG_PPC_BOOK3E 551#ifdef CONFIG_PPC_BOOK3E
521static void __init exc_lvl_early_init(void) 552static void __init exc_lvl_early_init(void)
522{ 553{
523 extern unsigned int interrupt_base_book3e;
524 extern unsigned int exc_debug_debug_book3e;
525
526 unsigned int i; 554 unsigned int i;
527 555
528 for_each_possible_cpu(i) { 556 for_each_possible_cpu(i) {
@@ -535,8 +563,7 @@ static void __init exc_lvl_early_init(void)
535 } 563 }
536 564
537 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) 565 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
538 patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1, 566 patch_exception(0x040, exc_debug_debug_book3e);
539 (unsigned long)&exc_debug_debug_book3e, 0);
540} 567}
541#else 568#else
542#define exc_lvl_early_init() 569#define exc_lvl_early_init()
@@ -544,7 +571,8 @@ static void __init exc_lvl_early_init(void)
544 571
545/* 572/*
546 * Stack space used when we detect a bad kernel stack pointer, and 573 * Stack space used when we detect a bad kernel stack pointer, and
547 * early in SMP boots before relocation is enabled. 574 * early in SMP boots before relocation is enabled. Exclusive emergency
575 * stack for machine checks.
548 */ 576 */
549static void __init emergency_stack_init(void) 577static void __init emergency_stack_init(void)
550{ 578{
@@ -567,6 +595,13 @@ static void __init emergency_stack_init(void)
567 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); 595 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
568 sp += THREAD_SIZE; 596 sp += THREAD_SIZE;
569 paca[i].emergency_sp = __va(sp); 597 paca[i].emergency_sp = __va(sp);
598
599#ifdef CONFIG_PPC_BOOK3S_64
600 /* emergency stack for machine check exception handling. */
601 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
602 sp += THREAD_SIZE;
603 paca[i].mc_emergency_sp = __va(sp);
604#endif
570 } 605 }
571} 606}
572 607
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 457e97aa2945..8fc4177ed65a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -203,8 +203,7 @@ unsigned long get_tm_stackpointer(struct pt_regs *regs)
203 203
204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
205 if (MSR_TM_ACTIVE(regs->msr)) { 205 if (MSR_TM_ACTIVE(regs->msr)) {
206 tm_enable(); 206 tm_reclaim_current(TM_CAUSE_SIGNAL);
207 tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
208 if (MSR_TM_TRANSACTIONAL(regs->msr)) 207 if (MSR_TM_TRANSACTIONAL(regs->msr))
209 return current->thread.ckpt_regs.gpr[1]; 208 return current->thread.ckpt_regs.gpr[1];
210 } 209 }
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 68027bfa5f8e..6ce69e6f1fcb 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -519,6 +519,13 @@ static int save_tm_user_regs(struct pt_regs *regs,
519{ 519{
520 unsigned long msr = regs->msr; 520 unsigned long msr = regs->msr;
521 521
522 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
523 * just indicates to userland that we were doing a transaction, but we
524 * don't want to return in transactional state. This also ensures
525 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
526 */
527 regs->msr &= ~MSR_TS_MASK;
528
522 /* Make sure floating point registers are stored in regs */ 529 /* Make sure floating point registers are stored in regs */
523 flush_fp_to_thread(current); 530 flush_fp_to_thread(current);
524 531
@@ -1056,13 +1063,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
1056 /* enter the signal handler in native-endian mode */ 1063 /* enter the signal handler in native-endian mode */
1057 regs->msr &= ~MSR_LE; 1064 regs->msr &= ~MSR_LE;
1058 regs->msr |= (MSR_KERNEL & MSR_LE); 1065 regs->msr |= (MSR_KERNEL & MSR_LE);
1059#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1060 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1061 * just indicates to userland that we were doing a transaction, but we
1062 * don't want to return in transactional state:
1063 */
1064 regs->msr &= ~MSR_TS_MASK;
1065#endif
1066 return 1; 1066 return 1;
1067 1067
1068badframe: 1068badframe:
@@ -1484,13 +1484,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1484 regs->nip = (unsigned long) ka->sa.sa_handler; 1484 regs->nip = (unsigned long) ka->sa.sa_handler;
1485 /* enter the signal handler in big-endian mode */ 1485 /* enter the signal handler in big-endian mode */
1486 regs->msr &= ~MSR_LE; 1486 regs->msr &= ~MSR_LE;
1487#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1488 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1489 * just indicates to userland that we were doing a transaction, but we
1490 * don't want to return in transactional state:
1491 */
1492 regs->msr &= ~MSR_TS_MASK;
1493#endif
1494 return 1; 1487 return 1;
1495 1488
1496badframe: 1489badframe:
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 42991045349f..e35bf773df7a 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -192,6 +192,13 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
192 192
193 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 193 BUG_ON(!MSR_TM_ACTIVE(regs->msr));
194 194
195 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
196 * just indicates to userland that we were doing a transaction, but we
197 * don't want to return in transactional state. This also ensures
198 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
199 */
200 regs->msr &= ~MSR_TS_MASK;
201
195 flush_fp_to_thread(current); 202 flush_fp_to_thread(current);
196 203
197#ifdef CONFIG_ALTIVEC 204#ifdef CONFIG_ALTIVEC
@@ -749,13 +756,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
749 756
750 /* Make sure signal handler doesn't get spurious FP exceptions */ 757 /* Make sure signal handler doesn't get spurious FP exceptions */
751 current->thread.fp_state.fpscr = 0; 758 current->thread.fp_state.fpscr = 0;
752#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
753 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
754 * just indicates to userland that we were doing a transaction, but we
755 * don't want to return in transactional state:
756 */
757 regs->msr &= ~MSR_TS_MASK;
758#endif
759 759
760 /* Set up to return from userspace. */ 760 /* Set up to return from userspace. */
761 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { 761 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index e68fd1ae727a..7a37ecd3afa3 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -9,7 +9,6 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/smp.h> 10#include <linux/smp.h>
11#include <linux/unistd.h> 11#include <linux/unistd.h>
12#include <linux/init.h>
13#include <linux/slab.h> 12#include <linux/slab.h>
14#include <linux/atomic.h> 13#include <linux/atomic.h>
15#include <asm/smp.h> 14#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c1cf4a1522d9..ac2621af3154 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -369,13 +369,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
369 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 369 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
370 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 370 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
371 371
372 if (smp_ops) 372 if (smp_ops && smp_ops->probe)
373 if (smp_ops->probe) 373 smp_ops->probe();
374 max_cpus = smp_ops->probe();
375 else
376 max_cpus = NR_CPUS;
377 else
378 max_cpus = 1;
379} 374}
380 375
381void smp_prepare_boot_cpu(void) 376void smp_prepare_boot_cpu(void)
diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_booke.S
index 0f204053e5b5..553c1405ee05 100644
--- a/arch/powerpc/kernel/swsusp_booke.S
+++ b/arch/powerpc/kernel/swsusp_booke.S
@@ -74,21 +74,21 @@ _GLOBAL(swsusp_arch_suspend)
74 bne 1b 74 bne 1b
75 75
76 /* Save SPRGs */ 76 /* Save SPRGs */
77 mfsprg r4,0 77 mfspr r4,SPRN_SPRG0
78 stw r4,SL_SPRG0(r11) 78 stw r4,SL_SPRG0(r11)
79 mfsprg r4,1 79 mfspr r4,SPRN_SPRG1
80 stw r4,SL_SPRG1(r11) 80 stw r4,SL_SPRG1(r11)
81 mfsprg r4,2 81 mfspr r4,SPRN_SPRG2
82 stw r4,SL_SPRG2(r11) 82 stw r4,SL_SPRG2(r11)
83 mfsprg r4,3 83 mfspr r4,SPRN_SPRG3
84 stw r4,SL_SPRG3(r11) 84 stw r4,SL_SPRG3(r11)
85 mfsprg r4,4 85 mfspr r4,SPRN_SPRG4
86 stw r4,SL_SPRG4(r11) 86 stw r4,SL_SPRG4(r11)
87 mfsprg r4,5 87 mfspr r4,SPRN_SPRG5
88 stw r4,SL_SPRG5(r11) 88 stw r4,SL_SPRG5(r11)
89 mfsprg r4,6 89 mfspr r4,SPRN_SPRG6
90 stw r4,SL_SPRG6(r11) 90 stw r4,SL_SPRG6(r11)
91 mfsprg r4,7 91 mfspr r4,SPRN_SPRG7
92 stw r4,SL_SPRG7(r11) 92 stw r4,SL_SPRG7(r11)
93 93
94 /* Call the low level suspend stuff (we should probably have made 94 /* Call the low level suspend stuff (we should probably have made
@@ -150,21 +150,21 @@ _GLOBAL(swsusp_arch_resume)
150 bl _tlbil_all 150 bl _tlbil_all
151 151
152 lwz r4,SL_SPRG0(r11) 152 lwz r4,SL_SPRG0(r11)
153 mtsprg 0,r4 153 mtspr SPRN_SPRG0,r4
154 lwz r4,SL_SPRG1(r11) 154 lwz r4,SL_SPRG1(r11)
155 mtsprg 1,r4 155 mtspr SPRN_SPRG1,r4
156 lwz r4,SL_SPRG2(r11) 156 lwz r4,SL_SPRG2(r11)
157 mtsprg 2,r4 157 mtspr SPRN_SPRG2,r4
158 lwz r4,SL_SPRG3(r11) 158 lwz r4,SL_SPRG3(r11)
159 mtsprg 3,r4 159 mtspr SPRN_SPRG3,r4
160 lwz r4,SL_SPRG4(r11) 160 lwz r4,SL_SPRG4(r11)
161 mtsprg 4,r4 161 mtspr SPRN_SPRG4,r4
162 lwz r4,SL_SPRG5(r11) 162 lwz r4,SL_SPRG5(r11)
163 mtsprg 5,r4 163 mtspr SPRN_SPRG5,r4
164 lwz r4,SL_SPRG6(r11) 164 lwz r4,SL_SPRG6(r11)
165 mtsprg 6,r4 165 mtspr SPRN_SPRG6,r4
166 lwz r4,SL_SPRG7(r11) 166 lwz r4,SL_SPRG7(r11)
167 mtsprg 7,r4 167 mtspr SPRN_SPRG7,r4
168 168
169 /* restore the MSR */ 169 /* restore the MSR */
170 lwz r3,SL_MSR(r11) 170 lwz r3,SL_MSR(r11)
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 4e3cc47f26b9..cd9be9aa016d 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -34,7 +34,6 @@
34#include <linux/ipc.h> 34#include <linux/ipc.h>
35#include <linux/utsname.h> 35#include <linux/utsname.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/init.h>
38#include <linux/personality.h> 37#include <linux/personality.h>
39 38
40#include <asm/uaccess.h> 39#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index b4e667663d9b..d4a43e64a6a9 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -86,6 +86,304 @@ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
86 86
87#endif /* CONFIG_PPC64 */ 87#endif /* CONFIG_PPC64 */
88 88
89#ifdef CONFIG_PPC_FSL_BOOK3E
90#define MAX_BIT 63
91
92static u64 pw20_wt;
93static u64 altivec_idle_wt;
94
95static unsigned int get_idle_ticks_bit(u64 ns)
96{
97 u64 cycle;
98
99 if (ns >= 10000)
100 cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
101 else
102 cycle = div_u64(ns * tb_ticks_per_usec, 1000);
103
104 if (!cycle)
105 return 0;
106
107 return ilog2(cycle);
108}
109
110static void do_show_pwrmgtcr0(void *val)
111{
112 u32 *value = val;
113
114 *value = mfspr(SPRN_PWRMGTCR0);
115}
116
117static ssize_t show_pw20_state(struct device *dev,
118 struct device_attribute *attr, char *buf)
119{
120 u32 value;
121 unsigned int cpu = dev->id;
122
123 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
124
125 value &= PWRMGTCR0_PW20_WAIT;
126
127 return sprintf(buf, "%u\n", value ? 1 : 0);
128}
129
130static void do_store_pw20_state(void *val)
131{
132 u32 *value = val;
133 u32 pw20_state;
134
135 pw20_state = mfspr(SPRN_PWRMGTCR0);
136
137 if (*value)
138 pw20_state |= PWRMGTCR0_PW20_WAIT;
139 else
140 pw20_state &= ~PWRMGTCR0_PW20_WAIT;
141
142 mtspr(SPRN_PWRMGTCR0, pw20_state);
143}
144
145static ssize_t store_pw20_state(struct device *dev,
146 struct device_attribute *attr,
147 const char *buf, size_t count)
148{
149 u32 value;
150 unsigned int cpu = dev->id;
151
152 if (kstrtou32(buf, 0, &value))
153 return -EINVAL;
154
155 if (value > 1)
156 return -EINVAL;
157
158 smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
159
160 return count;
161}
162
163static ssize_t show_pw20_wait_time(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 u32 value;
167 u64 tb_cycle = 1;
168 u64 time;
169
170 unsigned int cpu = dev->id;
171
172 if (!pw20_wt) {
173 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
174 value = (value & PWRMGTCR0_PW20_ENT) >>
175 PWRMGTCR0_PW20_ENT_SHIFT;
176
177 tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
178 /* convert ms to ns */
179 if (tb_ticks_per_usec > 1000) {
180 time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
181 } else {
182 u32 rem_us;
183
184 time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
185 &rem_us);
186 time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
187 }
188 } else {
189 time = pw20_wt;
190 }
191
192 return sprintf(buf, "%llu\n", time > 0 ? time : 0);
193}
194
195static void set_pw20_wait_entry_bit(void *val)
196{
197 u32 *value = val;
198 u32 pw20_idle;
199
200 pw20_idle = mfspr(SPRN_PWRMGTCR0);
201
202 /* Set Automatic PW20 Core Idle Count */
203 /* clear count */
204 pw20_idle &= ~PWRMGTCR0_PW20_ENT;
205
206 /* set count */
207 pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
208
209 mtspr(SPRN_PWRMGTCR0, pw20_idle);
210}
211
212static ssize_t store_pw20_wait_time(struct device *dev,
213 struct device_attribute *attr,
214 const char *buf, size_t count)
215{
216 u32 entry_bit;
217 u64 value;
218
219 unsigned int cpu = dev->id;
220
221 if (kstrtou64(buf, 0, &value))
222 return -EINVAL;
223
224 if (!value)
225 return -EINVAL;
226
227 entry_bit = get_idle_ticks_bit(value);
228 if (entry_bit > MAX_BIT)
229 return -EINVAL;
230
231 pw20_wt = value;
232
233 smp_call_function_single(cpu, set_pw20_wait_entry_bit,
234 &entry_bit, 1);
235
236 return count;
237}
238
239static ssize_t show_altivec_idle(struct device *dev,
240 struct device_attribute *attr, char *buf)
241{
242 u32 value;
243 unsigned int cpu = dev->id;
244
245 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
246
247 value &= PWRMGTCR0_AV_IDLE_PD_EN;
248
249 return sprintf(buf, "%u\n", value ? 1 : 0);
250}
251
252static void do_store_altivec_idle(void *val)
253{
254 u32 *value = val;
255 u32 altivec_idle;
256
257 altivec_idle = mfspr(SPRN_PWRMGTCR0);
258
259 if (*value)
260 altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
261 else
262 altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
263
264 mtspr(SPRN_PWRMGTCR0, altivec_idle);
265}
266
267static ssize_t store_altivec_idle(struct device *dev,
268 struct device_attribute *attr,
269 const char *buf, size_t count)
270{
271 u32 value;
272 unsigned int cpu = dev->id;
273
274 if (kstrtou32(buf, 0, &value))
275 return -EINVAL;
276
277 if (value > 1)
278 return -EINVAL;
279
280 smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
281
282 return count;
283}
284
285static ssize_t show_altivec_idle_wait_time(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 u32 value;
289 u64 tb_cycle = 1;
290 u64 time;
291
292 unsigned int cpu = dev->id;
293
294 if (!altivec_idle_wt) {
295 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
296 value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
297 PWRMGTCR0_AV_IDLE_CNT_SHIFT;
298
299 tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
300 /* convert ms to ns */
301 if (tb_ticks_per_usec > 1000) {
302 time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
303 } else {
304 u32 rem_us;
305
306 time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
307 &rem_us);
308 time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
309 }
310 } else {
311 time = altivec_idle_wt;
312 }
313
314 return sprintf(buf, "%llu\n", time > 0 ? time : 0);
315}
316
317static void set_altivec_idle_wait_entry_bit(void *val)
318{
319 u32 *value = val;
320 u32 altivec_idle;
321
322 altivec_idle = mfspr(SPRN_PWRMGTCR0);
323
324 /* Set Automatic AltiVec Idle Count */
325 /* clear count */
326 altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
327
328 /* set count */
329 altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
330
331 mtspr(SPRN_PWRMGTCR0, altivec_idle);
332}
333
334static ssize_t store_altivec_idle_wait_time(struct device *dev,
335 struct device_attribute *attr,
336 const char *buf, size_t count)
337{
338 u32 entry_bit;
339 u64 value;
340
341 unsigned int cpu = dev->id;
342
343 if (kstrtou64(buf, 0, &value))
344 return -EINVAL;
345
346 if (!value)
347 return -EINVAL;
348
349 entry_bit = get_idle_ticks_bit(value);
350 if (entry_bit > MAX_BIT)
351 return -EINVAL;
352
353 altivec_idle_wt = value;
354
355 smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
356 &entry_bit, 1);
357
358 return count;
359}
360
361/*
362 * Enable/Disable interface:
363 * 0, disable. 1, enable.
364 */
365static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
366static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
367
368/*
369 * Set wait time interface:(Nanosecond)
370 * Example: Base on TBfreq is 41MHZ.
371 * 1~48(ns): TB[63]
372 * 49~97(ns): TB[62]
373 * 98~195(ns): TB[61]
374 * 196~390(ns): TB[60]
375 * 391~780(ns): TB[59]
376 * 781~1560(ns): TB[58]
377 * ...
378 */
379static DEVICE_ATTR(pw20_wait_time, 0600,
380 show_pw20_wait_time,
381 store_pw20_wait_time);
382static DEVICE_ATTR(altivec_idle_wait_time, 0600,
383 show_altivec_idle_wait_time,
384 store_altivec_idle_wait_time);
385#endif
386
89/* 387/*
90 * Enabling PMCs will slow partition context switch times so we only do 388 * Enabling PMCs will slow partition context switch times so we only do
91 * it the first time we write to the PMCs. 389 * it the first time we write to the PMCs.
@@ -108,14 +406,14 @@ void ppc_enable_pmcs(void)
108} 406}
109EXPORT_SYMBOL(ppc_enable_pmcs); 407EXPORT_SYMBOL(ppc_enable_pmcs);
110 408
111#define SYSFS_PMCSETUP(NAME, ADDRESS) \ 409#define __SYSFS_SPRSETUP(NAME, ADDRESS, EXTRA) \
112static void read_##NAME(void *val) \ 410static void read_##NAME(void *val) \
113{ \ 411{ \
114 *(unsigned long *)val = mfspr(ADDRESS); \ 412 *(unsigned long *)val = mfspr(ADDRESS); \
115} \ 413} \
116static void write_##NAME(void *val) \ 414static void write_##NAME(void *val) \
117{ \ 415{ \
118 ppc_enable_pmcs(); \ 416 EXTRA; \
119 mtspr(ADDRESS, *(unsigned long *)val); \ 417 mtspr(ADDRESS, *(unsigned long *)val); \
120} \ 418} \
121static ssize_t show_##NAME(struct device *dev, \ 419static ssize_t show_##NAME(struct device *dev, \
@@ -140,6 +438,10 @@ static ssize_t __used \
140 return count; \ 438 return count; \
141} 439}
142 440
441#define SYSFS_PMCSETUP(NAME, ADDRESS) \
442 __SYSFS_SPRSETUP(NAME, ADDRESS, ppc_enable_pmcs())
443#define SYSFS_SPRSETUP(NAME, ADDRESS) \
444 __SYSFS_SPRSETUP(NAME, ADDRESS, )
143 445
144/* Let's define all possible registers, we'll only hook up the ones 446/* Let's define all possible registers, we'll only hook up the ones
145 * that are implemented on the current processor 447 * that are implemented on the current processor
@@ -175,10 +477,10 @@ SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
175SYSFS_PMCSETUP(pmc8, SPRN_PMC8); 477SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
176 478
177SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); 479SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
178SYSFS_PMCSETUP(purr, SPRN_PURR); 480SYSFS_SPRSETUP(purr, SPRN_PURR);
179SYSFS_PMCSETUP(spurr, SPRN_SPURR); 481SYSFS_SPRSETUP(spurr, SPRN_SPURR);
180SYSFS_PMCSETUP(dscr, SPRN_DSCR); 482SYSFS_SPRSETUP(dscr, SPRN_DSCR);
181SYSFS_PMCSETUP(pir, SPRN_PIR); 483SYSFS_SPRSETUP(pir, SPRN_PIR);
182 484
183/* 485/*
184 Lets only enable read for phyp resources and 486 Lets only enable read for phyp resources and
@@ -249,34 +551,34 @@ SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
249SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); 551SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
250SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); 552SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
251#ifdef CONFIG_DEBUG_KERNEL 553#ifdef CONFIG_DEBUG_KERNEL
252SYSFS_PMCSETUP(hid0, SPRN_HID0); 554SYSFS_SPRSETUP(hid0, SPRN_HID0);
253SYSFS_PMCSETUP(hid1, SPRN_HID1); 555SYSFS_SPRSETUP(hid1, SPRN_HID1);
254SYSFS_PMCSETUP(hid4, SPRN_HID4); 556SYSFS_SPRSETUP(hid4, SPRN_HID4);
255SYSFS_PMCSETUP(hid5, SPRN_HID5); 557SYSFS_SPRSETUP(hid5, SPRN_HID5);
256SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0); 558SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
257SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1); 559SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
258SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2); 560SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
259SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3); 561SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
260SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4); 562SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
261SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5); 563SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
262SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6); 564SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
263SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7); 565SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
264SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8); 566SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
265SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9); 567SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
266SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT); 568SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
267SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR); 569SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
268SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR); 570SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
269SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR); 571SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
270SYSFS_PMCSETUP(der, SPRN_PA6T_DER); 572SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
271SYSFS_PMCSETUP(mer, SPRN_PA6T_MER); 573SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
272SYSFS_PMCSETUP(ber, SPRN_PA6T_BER); 574SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
273SYSFS_PMCSETUP(ier, SPRN_PA6T_IER); 575SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
274SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER); 576SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
275SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR); 577SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
276SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0); 578SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
277SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1); 579SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
278SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2); 580SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
279SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); 581SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
280#endif /* CONFIG_DEBUG_KERNEL */ 582#endif /* CONFIG_DEBUG_KERNEL */
281#endif /* HAS_PPC_PMC_PA6T */ 583#endif /* HAS_PPC_PMC_PA6T */
282 584
@@ -421,6 +723,15 @@ static void register_cpu_online(unsigned int cpu)
421 device_create_file(s, &dev_attr_pir); 723 device_create_file(s, &dev_attr_pir);
422#endif /* CONFIG_PPC64 */ 724#endif /* CONFIG_PPC64 */
423 725
726#ifdef CONFIG_PPC_FSL_BOOK3E
727 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
728 device_create_file(s, &dev_attr_pw20_state);
729 device_create_file(s, &dev_attr_pw20_wait_time);
730
731 device_create_file(s, &dev_attr_altivec_idle);
732 device_create_file(s, &dev_attr_altivec_idle_wait_time);
733 }
734#endif
424 cacheinfo_cpu_online(cpu); 735 cacheinfo_cpu_online(cpu);
425} 736}
426 737
@@ -493,6 +804,15 @@ static void unregister_cpu_online(unsigned int cpu)
493 device_remove_file(s, &dev_attr_pir); 804 device_remove_file(s, &dev_attr_pir);
494#endif /* CONFIG_PPC64 */ 805#endif /* CONFIG_PPC64 */
495 806
807#ifdef CONFIG_PPC_FSL_BOOK3E
808 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
809 device_remove_file(s, &dev_attr_pw20_state);
810 device_remove_file(s, &dev_attr_pw20_wait_time);
811
812 device_remove_file(s, &dev_attr_altivec_idle);
813 device_remove_file(s, &dev_attr_altivec_idle_wait_time);
814 }
815#endif
496 cacheinfo_cpu_offline(cpu); 816 cacheinfo_cpu_offline(cpu);
497} 817}
498 818
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b3b144121cc9..b3dab20acf34 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -510,7 +510,6 @@ void timer_interrupt(struct pt_regs * regs)
510 */ 510 */
511 may_hard_irq_enable(); 511 may_hard_irq_enable();
512 512
513 __get_cpu_var(irq_stat).timer_irqs++;
514 513
515#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 514#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
516 if (atomic_read(&ppc_n_lost_interrupts) != 0) 515 if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -532,10 +531,15 @@ void timer_interrupt(struct pt_regs * regs)
532 *next_tb = ~(u64)0; 531 *next_tb = ~(u64)0;
533 if (evt->event_handler) 532 if (evt->event_handler)
534 evt->event_handler(evt); 533 evt->event_handler(evt);
534 __get_cpu_var(irq_stat).timer_irqs_event++;
535 } else { 535 } else {
536 now = *next_tb - now; 536 now = *next_tb - now;
537 if (now <= DECREMENTER_MAX) 537 if (now <= DECREMENTER_MAX)
538 set_dec((int)now); 538 set_dec((int)now);
539 /* We may have raced with new irq work */
540 if (test_irq_work_pending())
541 set_dec(1);
542 __get_cpu_var(irq_stat).timer_irqs_others++;
539 } 543 }
540 544
541#ifdef CONFIG_PPC64 545#ifdef CONFIG_PPC64
@@ -801,8 +805,16 @@ static void __init clocksource_init(void)
801static int decrementer_set_next_event(unsigned long evt, 805static int decrementer_set_next_event(unsigned long evt,
802 struct clock_event_device *dev) 806 struct clock_event_device *dev)
803{ 807{
808 /* Don't adjust the decrementer if some irq work is pending */
809 if (test_irq_work_pending())
810 return 0;
804 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 811 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
805 set_dec(evt); 812 set_dec(evt);
813
814 /* We may have raced with new irq work */
815 if (test_irq_work_pending())
816 set_dec(1);
817
806 return 0; 818 return 0;
807} 819}
808 820
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 907a472f9a9e..33cd7a0b8e73 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -285,6 +285,21 @@ void system_reset_exception(struct pt_regs *regs)
285 285
286 /* What should we do here? We could issue a shutdown or hard reset. */ 286 /* What should we do here? We could issue a shutdown or hard reset. */
287} 287}
288
289/*
290 * This function is called in real mode. Strictly no printk's please.
291 *
292 * regs->nip and regs->msr contains srr0 and ssr1.
293 */
294long machine_check_early(struct pt_regs *regs)
295{
296 long handled = 0;
297
298 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
299 handled = cur_cpu_spec->machine_check_early(regs);
300 return handled;
301}
302
288#endif 303#endif
289 304
290/* 305/*
@@ -1384,7 +1399,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
1384 1399
1385 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1400 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1386 regs->nip, regs->msr); 1401 regs->nip, regs->msr);
1387 tm_enable();
1388 1402
1389 /* We can only have got here if the task started using FP after 1403 /* We can only have got here if the task started using FP after
1390 * beginning the transaction. So, the transactional regs are just a 1404 * beginning the transaction. So, the transactional regs are just a
@@ -1393,8 +1407,7 @@ void fp_unavailable_tm(struct pt_regs *regs)
1393 * transaction, and probably retry but now with FP enabled. So the 1407 * transaction, and probably retry but now with FP enabled. So the
1394 * checkpointed FP registers need to be loaded. 1408 * checkpointed FP registers need to be loaded.
1395 */ 1409 */
1396 tm_reclaim(&current->thread, current->thread.regs->msr, 1410 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1397 TM_CAUSE_FAC_UNAV);
1398 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1411 /* Reclaim didn't save out any FPRs to transact_fprs. */
1399 1412
1400 /* Enable FP for the task: */ 1413 /* Enable FP for the task: */
@@ -1403,11 +1416,19 @@ void fp_unavailable_tm(struct pt_regs *regs)
1403 /* This loads and recheckpoints the FP registers from 1416 /* This loads and recheckpoints the FP registers from
1404 * thread.fpr[]. They will remain in registers after the 1417 * thread.fpr[]. They will remain in registers after the
1405 * checkpoint so we don't need to reload them after. 1418 * checkpoint so we don't need to reload them after.
1419 * If VMX is in use, the VRs now hold checkpointed values,
1420 * so we don't want to load the VRs from the thread_struct.
1406 */ 1421 */
1407 tm_recheckpoint(&current->thread, regs->msr); 1422 tm_recheckpoint(&current->thread, MSR_FP);
1423
1424 /* If VMX is in use, get the transactional values back */
1425 if (regs->msr & MSR_VEC) {
1426 do_load_up_transact_altivec(&current->thread);
1427 /* At this point all the VSX state is loaded, so enable it */
1428 regs->msr |= MSR_VSX;
1429 }
1408} 1430}
1409 1431
1410#ifdef CONFIG_ALTIVEC
1411void altivec_unavailable_tm(struct pt_regs *regs) 1432void altivec_unavailable_tm(struct pt_regs *regs)
1412{ 1433{
1413 /* See the comments in fp_unavailable_tm(). This function operates 1434 /* See the comments in fp_unavailable_tm(). This function operates
@@ -1417,18 +1438,21 @@ void altivec_unavailable_tm(struct pt_regs *regs)
1417 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1438 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1418 "MSR=%lx\n", 1439 "MSR=%lx\n",
1419 regs->nip, regs->msr); 1440 regs->nip, regs->msr);
1420 tm_enable(); 1441 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1421 tm_reclaim(&current->thread, current->thread.regs->msr,
1422 TM_CAUSE_FAC_UNAV);
1423 regs->msr |= MSR_VEC; 1442 regs->msr |= MSR_VEC;
1424 tm_recheckpoint(&current->thread, regs->msr); 1443 tm_recheckpoint(&current->thread, MSR_VEC);
1425 current->thread.used_vr = 1; 1444 current->thread.used_vr = 1;
1445
1446 if (regs->msr & MSR_FP) {
1447 do_load_up_transact_fpu(&current->thread);
1448 regs->msr |= MSR_VSX;
1449 }
1426} 1450}
1427#endif
1428 1451
1429#ifdef CONFIG_VSX
1430void vsx_unavailable_tm(struct pt_regs *regs) 1452void vsx_unavailable_tm(struct pt_regs *regs)
1431{ 1453{
1454 unsigned long orig_msr = regs->msr;
1455
1432 /* See the comments in fp_unavailable_tm(). This works similarly, 1456 /* See the comments in fp_unavailable_tm(). This works similarly,
1433 * though we're loading both FP and VEC registers in here. 1457 * though we're loading both FP and VEC registers in here.
1434 * 1458 *
@@ -1440,18 +1464,30 @@ void vsx_unavailable_tm(struct pt_regs *regs)
1440 "MSR=%lx\n", 1464 "MSR=%lx\n",
1441 regs->nip, regs->msr); 1465 regs->nip, regs->msr);
1442 1466
1443 tm_enable(); 1467 current->thread.used_vsr = 1;
1468
1469 /* If FP and VMX are already loaded, we have all the state we need */
1470 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1471 regs->msr |= MSR_VSX;
1472 return;
1473 }
1474
1444 /* This reclaims FP and/or VR regs if they're already enabled */ 1475 /* This reclaims FP and/or VR regs if they're already enabled */
1445 tm_reclaim(&current->thread, current->thread.regs->msr, 1476 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1446 TM_CAUSE_FAC_UNAV);
1447 1477
1448 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1478 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1449 MSR_VSX; 1479 MSR_VSX;
1450 /* This loads & recheckpoints FP and VRs. */ 1480
1451 tm_recheckpoint(&current->thread, regs->msr); 1481 /* This loads & recheckpoints FP and VRs; but we have
1452 current->thread.used_vsr = 1; 1482 * to be sure not to overwrite previously-valid state.
1483 */
1484 tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
1485
1486 if (orig_msr & MSR_FP)
1487 do_load_up_transact_fpu(&current->thread);
1488 if (orig_msr & MSR_VEC)
1489 do_load_up_transact_altivec(&current->thread);
1453} 1490}
1454#endif
1455#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1491#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1456 1492
1457void performance_monitor_exception(struct pt_regs *regs) 1493void performance_monitor_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 6e8f507ed32b..79683d0393f5 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -1,4 +1,3 @@
1#include <linux/init.h>
2#include <linux/linkage.h> 1#include <linux/linkage.h>
3#include <asm/page.h> 2#include <asm/page.h>
4 3
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index b8553d62b792..8df9e2463007 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -1,4 +1,3 @@
1#include <linux/init.h>
2#include <linux/linkage.h> 1#include <linux/linkage.h>
3#include <asm/page.h> 2#include <asm/page.h>
4 3
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 0458a9aaba9d..74f8050518d6 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -37,6 +37,16 @@ _GLOBAL(do_load_up_transact_altivec)
37#endif 37#endif
38 38
39/* 39/*
40 * Enable use of VMX/Altivec for the caller.
41 */
42_GLOBAL(vec_enable)
43 mfmsr r3
44 oris r3,r3,MSR_VEC@h
45 MTMSRD(r3)
46 isync
47 blr
48
49/*
40 * Load state from memory into VMX registers including VSCR. 50 * Load state from memory into VMX registers including VSCR.
41 * Assumes the caller has enabled VMX in the MSR. 51 * Assumes the caller has enabled VMX in the MSR.
42 */ 52 */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 76a64821f4a2..826d8bd9e522 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -518,16 +518,18 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
518 struct dma_attrs *attrs) 518 struct dma_attrs *attrs)
519{ 519{
520 struct vio_dev *viodev = to_vio_dev(dev); 520 struct vio_dev *viodev = to_vio_dev(dev);
521 struct iommu_table *tbl;
521 dma_addr_t ret = DMA_ERROR_CODE; 522 dma_addr_t ret = DMA_ERROR_CODE;
522 523
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { 524 tbl = get_iommu_table_base(dev);
525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
524 atomic_inc(&viodev->cmo.allocs_failed); 526 atomic_inc(&viodev->cmo.allocs_failed);
525 return ret; 527 return ret;
526 } 528 }
527 529
528 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); 530 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
529 if (unlikely(dma_mapping_error(dev, ret))) { 531 if (unlikely(dma_mapping_error(dev, ret))) {
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
531 atomic_inc(&viodev->cmo.allocs_failed); 533 atomic_inc(&viodev->cmo.allocs_failed);
532 } 534 }
533 535
@@ -540,10 +542,12 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
540 struct dma_attrs *attrs) 542 struct dma_attrs *attrs)
541{ 543{
542 struct vio_dev *viodev = to_vio_dev(dev); 544 struct vio_dev *viodev = to_vio_dev(dev);
545 struct iommu_table *tbl;
543 546
547 tbl = get_iommu_table_base(dev);
544 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); 548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
545 549
546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
547} 551}
548 552
549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -551,12 +555,14 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
551 struct dma_attrs *attrs) 555 struct dma_attrs *attrs)
552{ 556{
553 struct vio_dev *viodev = to_vio_dev(dev); 557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct iommu_table *tbl;
554 struct scatterlist *sgl; 559 struct scatterlist *sgl;
555 int ret, count = 0; 560 int ret, count = 0;
556 size_t alloc_size = 0; 561 size_t alloc_size = 0;
557 562
563 tbl = get_iommu_table_base(dev);
558 for (sgl = sglist; count < nelems; count++, sgl++) 564 for (sgl = sglist; count < nelems; count++, sgl++)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); 565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
560 566
561 if (vio_cmo_alloc(viodev, alloc_size)) { 567 if (vio_cmo_alloc(viodev, alloc_size)) {
562 atomic_inc(&viodev->cmo.allocs_failed); 568 atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +578,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
572 } 578 }
573 579
574 for (sgl = sglist, count = 0; count < ret; count++, sgl++) 580 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
575 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 581 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
576 if (alloc_size) 582 if (alloc_size)
577 vio_cmo_dealloc(viodev, alloc_size); 583 vio_cmo_dealloc(viodev, alloc_size);
578 584
@@ -585,12 +591,14 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
585 struct dma_attrs *attrs) 591 struct dma_attrs *attrs)
586{ 592{
587 struct vio_dev *viodev = to_vio_dev(dev); 593 struct vio_dev *viodev = to_vio_dev(dev);
594 struct iommu_table *tbl;
588 struct scatterlist *sgl; 595 struct scatterlist *sgl;
589 size_t alloc_size = 0; 596 size_t alloc_size = 0;
590 int count = 0; 597 int count = 0;
591 598
599 tbl = get_iommu_table_base(dev);
592 for (sgl = sglist; count < nelems; count++, sgl++) 600 for (sgl = sglist; count < nelems; count++, sgl++)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 601 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
594 602
595 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); 603 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
596 604
@@ -706,11 +714,14 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
706{ 714{
707 struct vio_cmo_dev_entry *dev_ent; 715 struct vio_cmo_dev_entry *dev_ent;
708 struct device *dev = &viodev->dev; 716 struct device *dev = &viodev->dev;
717 struct iommu_table *tbl;
709 struct vio_driver *viodrv = to_vio_driver(dev->driver); 718 struct vio_driver *viodrv = to_vio_driver(dev->driver);
710 unsigned long flags; 719 unsigned long flags;
711 size_t size; 720 size_t size;
712 bool dma_capable = false; 721 bool dma_capable = false;
713 722
723 tbl = get_iommu_table_base(dev);
724
714 /* A device requires entitlement if it has a DMA window property */ 725 /* A device requires entitlement if it has a DMA window property */
715 switch (viodev->family) { 726 switch (viodev->family) {
716 case VDEVICE: 727 case VDEVICE:
@@ -736,7 +747,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
736 return -EINVAL; 747 return -EINVAL;
737 } 748 }
738 749
739 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); 750 viodev->cmo.desired =
751 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
740 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 752 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
741 viodev->cmo.desired = VIO_CMO_MIN_ENT; 753 viodev->cmo.desired = VIO_CMO_MIN_ENT;
742 size = VIO_CMO_MIN_ENT; 754 size = VIO_CMO_MIN_ENT;
@@ -1176,9 +1188,10 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1176 &tbl->it_index, &offset, &size); 1188 &tbl->it_index, &offset, &size);
1177 1189
1178 /* TCE table size - measured in tce entries */ 1190 /* TCE table size - measured in tce entries */
1179 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 1191 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1192 tbl->it_size = size >> tbl->it_page_shift;
1180 /* offset for VIO should always be 0 */ 1193 /* offset for VIO should always be 0 */
1181 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 1194 tbl->it_offset = offset >> tbl->it_page_shift;
1182 tbl->it_busno = 0; 1195 tbl->it_busno = 0;
1183 tbl->it_type = TCE_VB; 1196 tbl->it_type = TCE_VB;
1184 tbl->it_blocksize = 16; 1197 tbl->it_blocksize = 16;
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index a353c485808c..768a9f977c00 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -12,6 +12,7 @@
12#include <linux/kvm_host.h> 12#include <linux/kvm_host.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <asm/opal.h> 14#include <asm/opal.h>
15#include <asm/mce.h>
15 16
16/* SRR1 bits for machine check on POWER7 */ 17/* SRR1 bits for machine check on POWER7 */
17#define SRR1_MC_LDSTERR (1ul << (63-42)) 18#define SRR1_MC_LDSTERR (1ul << (63-42))
@@ -58,18 +59,6 @@ static void reload_slb(struct kvm_vcpu *vcpu)
58 } 59 }
59} 60}
60 61
61/* POWER7 TLB flush */
62static void flush_tlb_power7(struct kvm_vcpu *vcpu)
63{
64 unsigned long i, rb;
65
66 rb = TLBIEL_INVAL_SET_LPID;
67 for (i = 0; i < POWER7_TLB_SETS; ++i) {
68 asm volatile("tlbiel %0" : : "r" (rb));
69 rb += 1 << TLBIEL_INVAL_SET_SHIFT;
70 }
71}
72
73/* 62/*
74 * On POWER7, see if we can handle a machine check that occurred inside 63 * On POWER7, see if we can handle a machine check that occurred inside
75 * the guest in real mode, without switching to the host partition. 64 * the guest in real mode, without switching to the host partition.
@@ -79,9 +68,7 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
79static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) 68static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
80{ 69{
81 unsigned long srr1 = vcpu->arch.shregs.msr; 70 unsigned long srr1 = vcpu->arch.shregs.msr;
82#ifdef CONFIG_PPC_POWERNV 71 struct machine_check_event mce_evt;
83 struct opal_machine_check_event *opal_evt;
84#endif
85 long handled = 1; 72 long handled = 1;
86 73
87 if (srr1 & SRR1_MC_LDSTERR) { 74 if (srr1 & SRR1_MC_LDSTERR) {
@@ -96,7 +83,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
96 DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI); 83 DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
97 } 84 }
98 if (dsisr & DSISR_MC_TLB_MULTI) { 85 if (dsisr & DSISR_MC_TLB_MULTI) {
99 flush_tlb_power7(vcpu); 86 if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
87 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
100 dsisr &= ~DSISR_MC_TLB_MULTI; 88 dsisr &= ~DSISR_MC_TLB_MULTI;
101 } 89 }
102 /* Any other errors we don't understand? */ 90 /* Any other errors we don't understand? */
@@ -113,28 +101,38 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
113 reload_slb(vcpu); 101 reload_slb(vcpu);
114 break; 102 break;
115 case SRR1_MC_IFETCH_TLBMULTI: 103 case SRR1_MC_IFETCH_TLBMULTI:
116 flush_tlb_power7(vcpu); 104 if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
105 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
117 break; 106 break;
118 default: 107 default:
119 handled = 0; 108 handled = 0;
120 } 109 }
121 110
122#ifdef CONFIG_PPC_POWERNV
123 /* 111 /*
124 * See if OPAL has already handled the condition. 112 * See if we have already handled the condition in the linux host.
125 * We assume that if the condition is recovered then OPAL 113 * We assume that if the condition is recovered then linux host
126 * will have generated an error log event that we will pick 114 * will have generated an error log event that we will pick
127 * up and log later. 115 * up and log later.
116 * Don't release mce event now. In case if condition is not
117 * recovered we do guest exit and go back to linux host machine
118 * check handler. Hence we need make sure that current mce event
119 * is available for linux host to consume.
128 */ 120 */
129 opal_evt = local_paca->opal_mc_evt; 121 if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
130 if (opal_evt->version == OpalMCE_V1 && 122 goto out;
131 (opal_evt->severity == OpalMCE_SEV_NO_ERROR || 123
132 opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED)) 124 if (mce_evt.version == MCE_V1 &&
125 (mce_evt.severity == MCE_SEV_NO_ERROR ||
126 mce_evt.disposition == MCE_DISPOSITION_RECOVERED))
133 handled = 1; 127 handled = 1;
134 128
129out:
130 /*
131 * If we have handled the error, then release the mce event because
132 * we will be delivering machine check to guest.
133 */
135 if (handled) 134 if (handled)
136 opal_evt->in_use = 0; 135 release_mce_event();
137#endif
138 136
139 return handled; 137 return handled;
140} 138}
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index e8ed7d659c55..a0d6929d8678 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -319,6 +319,8 @@ kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
319 SPRN_DSRR0, SPRN_DSRR1, 0 319 SPRN_DSRR0, SPRN_DSRR1, 0
320kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \ 320kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
321 SPRN_CSRR0, SPRN_CSRR1, 0 321 SPRN_CSRR0, SPRN_CSRR1, 0
322kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \
323 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
322#else 324#else
323/* 325/*
324 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h 326 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 17e5b2364312..d5edbeb8eb82 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -159,6 +159,21 @@ unsigned int translate_branch(const unsigned int *dest, const unsigned int *src)
159 return 0; 159 return 0;
160} 160}
161 161
162#ifdef CONFIG_PPC_BOOK3E_64
163void __patch_exception(int exc, unsigned long addr)
164{
165 extern unsigned int interrupt_base_book3e;
166 unsigned int *ibase = &interrupt_base_book3e;
167
168 /* Our exceptions vectors start with a NOP and -then- a branch
169 * to deal with single stepping from userspace which stops on
170 * the second instruction. Thus we need to patch the second
171 * instruction of the exception, not the first one
172 */
173
174 patch_branch(ibase + (exc / 4) + 1, addr, 0);
175}
176#endif
162 177
163#ifdef CONFIG_CODE_PATCHING_SELFTEST 178#ifdef CONFIG_CODE_PATCHING_SELFTEST
164 179
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index b2c68ce139ae..a5b30c71a8d3 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -231,6 +231,87 @@ _GLOBAL(_rest32gpr_31_x)
231 mr 1,11 231 mr 1,11
232 blr 232 blr
233 233
234#ifdef CONFIG_ALTIVEC
235/* Called with r0 pointing just beyond the end of the vector save area. */
236
237_GLOBAL(_savevr_20)
238 li r11,-192
239 stvx vr20,r11,r0
240_GLOBAL(_savevr_21)
241 li r11,-176
242 stvx vr21,r11,r0
243_GLOBAL(_savevr_22)
244 li r11,-160
245 stvx vr22,r11,r0
246_GLOBAL(_savevr_23)
247 li r11,-144
248 stvx vr23,r11,r0
249_GLOBAL(_savevr_24)
250 li r11,-128
251 stvx vr24,r11,r0
252_GLOBAL(_savevr_25)
253 li r11,-112
254 stvx vr25,r11,r0
255_GLOBAL(_savevr_26)
256 li r11,-96
257 stvx vr26,r11,r0
258_GLOBAL(_savevr_27)
259 li r11,-80
260 stvx vr27,r11,r0
261_GLOBAL(_savevr_28)
262 li r11,-64
263 stvx vr28,r11,r0
264_GLOBAL(_savevr_29)
265 li r11,-48
266 stvx vr29,r11,r0
267_GLOBAL(_savevr_30)
268 li r11,-32
269 stvx vr30,r11,r0
270_GLOBAL(_savevr_31)
271 li r11,-16
272 stvx vr31,r11,r0
273 blr
274
275_GLOBAL(_restvr_20)
276 li r11,-192
277 lvx vr20,r11,r0
278_GLOBAL(_restvr_21)
279 li r11,-176
280 lvx vr21,r11,r0
281_GLOBAL(_restvr_22)
282 li r11,-160
283 lvx vr22,r11,r0
284_GLOBAL(_restvr_23)
285 li r11,-144
286 lvx vr23,r11,r0
287_GLOBAL(_restvr_24)
288 li r11,-128
289 lvx vr24,r11,r0
290_GLOBAL(_restvr_25)
291 li r11,-112
292 lvx vr25,r11,r0
293_GLOBAL(_restvr_26)
294 li r11,-96
295 lvx vr26,r11,r0
296_GLOBAL(_restvr_27)
297 li r11,-80
298 lvx vr27,r11,r0
299_GLOBAL(_restvr_28)
300 li r11,-64
301 lvx vr28,r11,r0
302_GLOBAL(_restvr_29)
303 li r11,-48
304 lvx vr29,r11,r0
305_GLOBAL(_restvr_30)
306 li r11,-32
307 lvx vr30,r11,r0
308_GLOBAL(_restvr_31)
309 li r11,-16
310 lvx vr31,r11,r0
311 blr
312
313#endif /* CONFIG_ALTIVEC */
314
234#else /* CONFIG_PPC64 */ 315#else /* CONFIG_PPC64 */
235 316
236 .section ".text.save.restore","ax",@progbits 317 .section ".text.save.restore","ax",@progbits
@@ -356,6 +437,111 @@ _restgpr0_31:
356 mtlr r0 437 mtlr r0
357 blr 438 blr
358 439
440#ifdef CONFIG_ALTIVEC
441/* Called with r0 pointing just beyond the end of the vector save area. */
442
443.globl _savevr_20
444_savevr_20:
445 li r12,-192
446 stvx vr20,r12,r0
447.globl _savevr_21
448_savevr_21:
449 li r12,-176
450 stvx vr21,r12,r0
451.globl _savevr_22
452_savevr_22:
453 li r12,-160
454 stvx vr22,r12,r0
455.globl _savevr_23
456_savevr_23:
457 li r12,-144
458 stvx vr23,r12,r0
459.globl _savevr_24
460_savevr_24:
461 li r12,-128
462 stvx vr24,r12,r0
463.globl _savevr_25
464_savevr_25:
465 li r12,-112
466 stvx vr25,r12,r0
467.globl _savevr_26
468_savevr_26:
469 li r12,-96
470 stvx vr26,r12,r0
471.globl _savevr_27
472_savevr_27:
473 li r12,-80
474 stvx vr27,r12,r0
475.globl _savevr_28
476_savevr_28:
477 li r12,-64
478 stvx vr28,r12,r0
479.globl _savevr_29
480_savevr_29:
481 li r12,-48
482 stvx vr29,r12,r0
483.globl _savevr_30
484_savevr_30:
485 li r12,-32
486 stvx vr30,r12,r0
487.globl _savevr_31
488_savevr_31:
489 li r12,-16
490 stvx vr31,r12,r0
491 blr
492
493.globl _restvr_20
494_restvr_20:
495 li r12,-192
496 lvx vr20,r12,r0
497.globl _restvr_21
498_restvr_21:
499 li r12,-176
500 lvx vr21,r12,r0
501.globl _restvr_22
502_restvr_22:
503 li r12,-160
504 lvx vr22,r12,r0
505.globl _restvr_23
506_restvr_23:
507 li r12,-144
508 lvx vr23,r12,r0
509.globl _restvr_24
510_restvr_24:
511 li r12,-128
512 lvx vr24,r12,r0
513.globl _restvr_25
514_restvr_25:
515 li r12,-112
516 lvx vr25,r12,r0
517.globl _restvr_26
518_restvr_26:
519 li r12,-96
520 lvx vr26,r12,r0
521.globl _restvr_27
522_restvr_27:
523 li r12,-80
524 lvx vr27,r12,r0
525.globl _restvr_28
526_restvr_28:
527 li r12,-64
528 lvx vr28,r12,r0
529.globl _restvr_29
530_restvr_29:
531 li r12,-48
532 lvx vr29,r12,r0
533.globl _restvr_30
534_restvr_30:
535 li r12,-32
536 lvx vr30,r12,r0
537.globl _restvr_31
538_restvr_31:
539 li r12,-16
540 lvx vr31,r12,r0
541 blr
542
543#endif /* CONFIG_ALTIVEC */
544
359#endif /* CONFIG_PPC64 */ 545#endif /* CONFIG_PPC64 */
360 546
361#endif 547#endif
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index a73f0884d358..28337c9709ae 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/prctl.h>
23 24
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/reg.h> 26#include <asm/reg.h>
@@ -275,21 +276,13 @@ int do_spe_mathemu(struct pt_regs *regs)
275 276
276 case EFSCTSF: 277 case EFSCTSF:
277 case EFSCTUF: 278 case EFSCTUF:
278 if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) { 279 if (SB_c == FP_CLS_NAN) {
279 /* NaN */ 280 vc.wp[1] = 0;
280 if (((vb.wp[1] >> 23) & 0xff) == 0) { 281 FP_SET_EXCEPTION(FP_EX_INVALID);
281 /* denorm */ 282 } else {
282 vc.wp[1] = 0x0; 283 SB_e += (func == EFSCTSF ? 31 : 32);
283 } else if ((vb.wp[1] >> 31) == 0) { 284 FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
284 /* positive normal */ 285 (func == EFSCTSF));
285 vc.wp[1] = (func == EFSCTSF) ?
286 0x7fffffff : 0xffffffff;
287 } else { /* negative normal */
288 vc.wp[1] = (func == EFSCTSF) ?
289 0x80000000 : 0x0;
290 }
291 } else { /* rB is NaN */
292 vc.wp[1] = 0x0;
293 } 286 }
294 goto update_regs; 287 goto update_regs;
295 288
@@ -306,16 +299,25 @@ int do_spe_mathemu(struct pt_regs *regs)
306 } 299 }
307 300
308 case EFSCTSI: 301 case EFSCTSI:
309 case EFSCTSIZ:
310 case EFSCTUI: 302 case EFSCTUI:
303 if (SB_c == FP_CLS_NAN) {
304 vc.wp[1] = 0;
305 FP_SET_EXCEPTION(FP_EX_INVALID);
306 } else {
307 FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
308 ((func & 0x3) != 0));
309 }
310 goto update_regs;
311
312 case EFSCTSIZ:
311 case EFSCTUIZ: 313 case EFSCTUIZ:
312 if (func & 0x4) { 314 if (SB_c == FP_CLS_NAN) {
313 _FP_ROUND(1, SB); 315 vc.wp[1] = 0;
316 FP_SET_EXCEPTION(FP_EX_INVALID);
314 } else { 317 } else {
315 _FP_ROUND_ZERO(1, SB); 318 FP_TO_INT_S(vc.wp[1], SB, 32,
319 ((func & 0x3) != 0));
316 } 320 }
317 FP_TO_INT_S(vc.wp[1], SB, 32,
318 (((func & 0x3) != 0) || SB_s));
319 goto update_regs; 321 goto update_regs;
320 322
321 default: 323 default:
@@ -404,22 +406,13 @@ cmp_s:
404 406
405 case EFDCTSF: 407 case EFDCTSF:
406 case EFDCTUF: 408 case EFDCTUF:
407 if (!((vb.wp[0] >> 20) == 0x7ff && 409 if (DB_c == FP_CLS_NAN) {
408 ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) { 410 vc.wp[1] = 0;
409 /* not a NaN */ 411 FP_SET_EXCEPTION(FP_EX_INVALID);
410 if (((vb.wp[0] >> 20) & 0x7ff) == 0) { 412 } else {
411 /* denorm */ 413 DB_e += (func == EFDCTSF ? 31 : 32);
412 vc.wp[1] = 0x0; 414 FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
413 } else if ((vb.wp[0] >> 31) == 0) { 415 (func == EFDCTSF));
414 /* positive normal */
415 vc.wp[1] = (func == EFDCTSF) ?
416 0x7fffffff : 0xffffffff;
417 } else { /* negative normal */
418 vc.wp[1] = (func == EFDCTSF) ?
419 0x80000000 : 0x0;
420 }
421 } else { /* NaN */
422 vc.wp[1] = 0x0;
423 } 416 }
424 goto update_regs; 417 goto update_regs;
425 418
@@ -437,21 +430,35 @@ cmp_s:
437 430
438 case EFDCTUIDZ: 431 case EFDCTUIDZ:
439 case EFDCTSIDZ: 432 case EFDCTSIDZ:
440 _FP_ROUND_ZERO(2, DB); 433 if (DB_c == FP_CLS_NAN) {
441 FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0)); 434 vc.dp[0] = 0;
435 FP_SET_EXCEPTION(FP_EX_INVALID);
436 } else {
437 FP_TO_INT_D(vc.dp[0], DB, 64,
438 ((func & 0x1) == 0));
439 }
442 goto update_regs; 440 goto update_regs;
443 441
444 case EFDCTUI: 442 case EFDCTUI:
445 case EFDCTSI: 443 case EFDCTSI:
444 if (DB_c == FP_CLS_NAN) {
445 vc.wp[1] = 0;
446 FP_SET_EXCEPTION(FP_EX_INVALID);
447 } else {
448 FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
449 ((func & 0x3) != 0));
450 }
451 goto update_regs;
452
446 case EFDCTUIZ: 453 case EFDCTUIZ:
447 case EFDCTSIZ: 454 case EFDCTSIZ:
448 if (func & 0x4) { 455 if (DB_c == FP_CLS_NAN) {
449 _FP_ROUND(2, DB); 456 vc.wp[1] = 0;
457 FP_SET_EXCEPTION(FP_EX_INVALID);
450 } else { 458 } else {
451 _FP_ROUND_ZERO(2, DB); 459 FP_TO_INT_D(vc.wp[1], DB, 32,
460 ((func & 0x3) != 0));
452 } 461 }
453 FP_TO_INT_D(vc.wp[1], DB, 32,
454 (((func & 0x3) != 0) || DB_s));
455 goto update_regs; 462 goto update_regs;
456 463
457 default: 464 default:
@@ -556,37 +563,60 @@ cmp_d:
556 cmp = -1; 563 cmp = -1;
557 goto cmp_vs; 564 goto cmp_vs;
558 565
559 case EVFSCTSF:
560 __asm__ __volatile__ ("mtspr 512, %4\n"
561 "efsctsf %0, %2\n"
562 "efsctsf %1, %3\n"
563 : "=r" (vc.wp[0]), "=r" (vc.wp[1])
564 : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
565 goto update_regs;
566
567 case EVFSCTUF: 566 case EVFSCTUF:
568 __asm__ __volatile__ ("mtspr 512, %4\n" 567 case EVFSCTSF:
569 "efsctuf %0, %2\n" 568 if (SB0_c == FP_CLS_NAN) {
570 "efsctuf %1, %3\n" 569 vc.wp[0] = 0;
571 : "=r" (vc.wp[0]), "=r" (vc.wp[1]) 570 FP_SET_EXCEPTION(FP_EX_INVALID);
572 : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0)); 571 } else {
572 SB0_e += (func == EVFSCTSF ? 31 : 32);
573 FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
574 (func == EVFSCTSF));
575 }
576 if (SB1_c == FP_CLS_NAN) {
577 vc.wp[1] = 0;
578 FP_SET_EXCEPTION(FP_EX_INVALID);
579 } else {
580 SB1_e += (func == EVFSCTSF ? 31 : 32);
581 FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
582 (func == EVFSCTSF));
583 }
573 goto update_regs; 584 goto update_regs;
574 585
575 case EVFSCTUI: 586 case EVFSCTUI:
576 case EVFSCTSI: 587 case EVFSCTSI:
588 if (SB0_c == FP_CLS_NAN) {
589 vc.wp[0] = 0;
590 FP_SET_EXCEPTION(FP_EX_INVALID);
591 } else {
592 FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
593 ((func & 0x3) != 0));
594 }
595 if (SB1_c == FP_CLS_NAN) {
596 vc.wp[1] = 0;
597 FP_SET_EXCEPTION(FP_EX_INVALID);
598 } else {
599 FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
600 ((func & 0x3) != 0));
601 }
602 goto update_regs;
603
577 case EVFSCTUIZ: 604 case EVFSCTUIZ:
578 case EVFSCTSIZ: 605 case EVFSCTSIZ:
579 if (func & 0x4) { 606 if (SB0_c == FP_CLS_NAN) {
580 _FP_ROUND(1, SB0); 607 vc.wp[0] = 0;
581 _FP_ROUND(1, SB1); 608 FP_SET_EXCEPTION(FP_EX_INVALID);
582 } else { 609 } else {
583 _FP_ROUND_ZERO(1, SB0); 610 FP_TO_INT_S(vc.wp[0], SB0, 32,
584 _FP_ROUND_ZERO(1, SB1); 611 ((func & 0x3) != 0));
612 }
613 if (SB1_c == FP_CLS_NAN) {
614 vc.wp[1] = 0;
615 FP_SET_EXCEPTION(FP_EX_INVALID);
616 } else {
617 FP_TO_INT_S(vc.wp[1], SB1, 32,
618 ((func & 0x3) != 0));
585 } 619 }
586 FP_TO_INT_S(vc.wp[0], SB0, 32,
587 (((func & 0x3) != 0) || SB0_s));
588 FP_TO_INT_S(vc.wp[1], SB1, 32,
589 (((func & 0x3) != 0) || SB1_s));
590 goto update_regs; 620 goto update_regs;
591 621
592 default: 622 default:
@@ -630,9 +660,27 @@ update_ccr:
630 regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2)); 660 regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2));
631 661
632update_regs: 662update_regs:
633 __FPU_FPSCR &= ~FP_EX_MASK; 663 /*
664 * If the "invalid" exception sticky bit was set by the
665 * processor for non-finite input, but was not set before the
666 * instruction being emulated, clear it. Likewise for the
667 * "underflow" bit, which may have been set by the processor
668 * for exact underflow, not just inexact underflow when the
669 * flag should be set for IEEE 754 semantics. Other sticky
670 * exceptions will only be set by the processor when they are
671 * correct according to IEEE 754 semantics, and we must not
672 * clear sticky bits that were already set before the emulated
673 * instruction as they represent the user-visible sticky
674 * exception status. "inexact" traps to kernel are not
675 * required for IEEE semantics and are not enabled by default,
676 * so the "inexact" sticky bit may have been set by a previous
677 * instruction without the kernel being aware of it.
678 */
679 __FPU_FPSCR
680 &= ~(FP_EX_INVALID | FP_EX_UNDERFLOW) | current->thread.spefscr_last;
634 __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK); 681 __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK);
635 mtspr(SPRN_SPEFSCR, __FPU_FPSCR); 682 mtspr(SPRN_SPEFSCR, __FPU_FPSCR);
683 current->thread.spefscr_last = __FPU_FPSCR;
636 684
637 current->thread.evr[fc] = vc.wp[0]; 685 current->thread.evr[fc] = vc.wp[0];
638 regs->gpr[fc] = vc.wp[1]; 686 regs->gpr[fc] = vc.wp[1];
@@ -644,6 +692,23 @@ update_regs:
644 pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]); 692 pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]);
645 pr_debug("vb: %08x %08x\n", vb.wp[0], vb.wp[1]); 693 pr_debug("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
646 694
695 if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
696 if ((FP_CUR_EXCEPTIONS & FP_EX_DIVZERO)
697 && (current->thread.fpexc_mode & PR_FP_EXC_DIV))
698 return 1;
699 if ((FP_CUR_EXCEPTIONS & FP_EX_OVERFLOW)
700 && (current->thread.fpexc_mode & PR_FP_EXC_OVF))
701 return 1;
702 if ((FP_CUR_EXCEPTIONS & FP_EX_UNDERFLOW)
703 && (current->thread.fpexc_mode & PR_FP_EXC_UND))
704 return 1;
705 if ((FP_CUR_EXCEPTIONS & FP_EX_INEXACT)
706 && (current->thread.fpexc_mode & PR_FP_EXC_RES))
707 return 1;
708 if ((FP_CUR_EXCEPTIONS & FP_EX_INVALID)
709 && (current->thread.fpexc_mode & PR_FP_EXC_INV))
710 return 1;
711 }
647 return 0; 712 return 0;
648 713
649illegal: 714illegal:
@@ -662,21 +727,28 @@ int speround_handler(struct pt_regs *regs)
662{ 727{
663 union dw_union fgpr; 728 union dw_union fgpr;
664 int s_lo, s_hi; 729 int s_lo, s_hi;
665 unsigned long speinsn, type, fc; 730 int lo_inexact, hi_inexact;
731 int fp_result;
732 unsigned long speinsn, type, fb, fc, fptype, func;
666 733
667 if (get_user(speinsn, (unsigned int __user *) regs->nip)) 734 if (get_user(speinsn, (unsigned int __user *) regs->nip))
668 return -EFAULT; 735 return -EFAULT;
669 if ((speinsn >> 26) != 4) 736 if ((speinsn >> 26) != 4)
670 return -EINVAL; /* not an spe instruction */ 737 return -EINVAL; /* not an spe instruction */
671 738
672 type = insn_type(speinsn & 0x7ff); 739 func = speinsn & 0x7ff;
740 type = insn_type(func);
673 if (type == XCR) return -ENOSYS; 741 if (type == XCR) return -ENOSYS;
674 742
675 __FPU_FPSCR = mfspr(SPRN_SPEFSCR); 743 __FPU_FPSCR = mfspr(SPRN_SPEFSCR);
676 pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR); 744 pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR);
677 745
746 fptype = (speinsn >> 5) & 0x7;
747
678 /* No need to round if the result is exact */ 748 /* No need to round if the result is exact */
679 if (!(__FPU_FPSCR & FP_EX_INEXACT)) 749 lo_inexact = __FPU_FPSCR & (SPEFSCR_FG | SPEFSCR_FX);
750 hi_inexact = __FPU_FPSCR & (SPEFSCR_FGH | SPEFSCR_FXH);
751 if (!(lo_inexact || (hi_inexact && fptype == VCT)))
680 return 0; 752 return 0;
681 753
682 fc = (speinsn >> 21) & 0x1f; 754 fc = (speinsn >> 21) & 0x1f;
@@ -685,9 +757,68 @@ int speround_handler(struct pt_regs *regs)
685 fgpr.wp[0] = current->thread.evr[fc]; 757 fgpr.wp[0] = current->thread.evr[fc];
686 fgpr.wp[1] = regs->gpr[fc]; 758 fgpr.wp[1] = regs->gpr[fc];
687 759
760 fb = (speinsn >> 11) & 0x1f;
761 switch (func) {
762 case EFSCTUIZ:
763 case EFSCTSIZ:
764 case EVFSCTUIZ:
765 case EVFSCTSIZ:
766 case EFDCTUIDZ:
767 case EFDCTSIDZ:
768 case EFDCTUIZ:
769 case EFDCTSIZ:
770 /*
771 * These instructions always round to zero,
772 * independent of the rounding mode.
773 */
774 return 0;
775
776 case EFSCTUI:
777 case EFSCTUF:
778 case EVFSCTUI:
779 case EVFSCTUF:
780 case EFDCTUI:
781 case EFDCTUF:
782 fp_result = 0;
783 s_lo = 0;
784 s_hi = 0;
785 break;
786
787 case EFSCTSI:
788 case EFSCTSF:
789 fp_result = 0;
790 /* Recover the sign of a zero result if possible. */
791 if (fgpr.wp[1] == 0)
792 s_lo = regs->gpr[fb] & SIGN_BIT_S;
793 break;
794
795 case EVFSCTSI:
796 case EVFSCTSF:
797 fp_result = 0;
798 /* Recover the sign of a zero result if possible. */
799 if (fgpr.wp[1] == 0)
800 s_lo = regs->gpr[fb] & SIGN_BIT_S;
801 if (fgpr.wp[0] == 0)
802 s_hi = current->thread.evr[fb] & SIGN_BIT_S;
803 break;
804
805 case EFDCTSI:
806 case EFDCTSF:
807 fp_result = 0;
808 s_hi = s_lo;
809 /* Recover the sign of a zero result if possible. */
810 if (fgpr.wp[1] == 0)
811 s_hi = current->thread.evr[fb] & SIGN_BIT_S;
812 break;
813
814 default:
815 fp_result = 1;
816 break;
817 }
818
688 pr_debug("round fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]); 819 pr_debug("round fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]);
689 820
690 switch ((speinsn >> 5) & 0x7) { 821 switch (fptype) {
691 /* Since SPE instructions on E500 core can handle round to nearest 822 /* Since SPE instructions on E500 core can handle round to nearest
692 * and round toward zero with IEEE-754 complied, we just need 823 * and round toward zero with IEEE-754 complied, we just need
693 * to handle round toward +Inf and round toward -Inf by software. 824 * to handle round toward +Inf and round toward -Inf by software.
@@ -696,25 +827,52 @@ int speround_handler(struct pt_regs *regs)
696 if ((FP_ROUNDMODE) == FP_RND_PINF) { 827 if ((FP_ROUNDMODE) == FP_RND_PINF) {
697 if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */ 828 if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */
698 } else { /* round to -Inf */ 829 } else { /* round to -Inf */
699 if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */ 830 if (s_lo) {
831 if (fp_result)
832 fgpr.wp[1]++; /* Z < 0, choose Z2 */
833 else
834 fgpr.wp[1]--; /* Z < 0, choose Z2 */
835 }
700 } 836 }
701 break; 837 break;
702 838
703 case DPFP: 839 case DPFP:
704 if (FP_ROUNDMODE == FP_RND_PINF) { 840 if (FP_ROUNDMODE == FP_RND_PINF) {
705 if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */ 841 if (!s_hi) {
842 if (fp_result)
843 fgpr.dp[0]++; /* Z > 0, choose Z1 */
844 else
845 fgpr.wp[1]++; /* Z > 0, choose Z1 */
846 }
706 } else { /* round to -Inf */ 847 } else { /* round to -Inf */
707 if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */ 848 if (s_hi) {
849 if (fp_result)
850 fgpr.dp[0]++; /* Z < 0, choose Z2 */
851 else
852 fgpr.wp[1]--; /* Z < 0, choose Z2 */
853 }
708 } 854 }
709 break; 855 break;
710 856
711 case VCT: 857 case VCT:
712 if (FP_ROUNDMODE == FP_RND_PINF) { 858 if (FP_ROUNDMODE == FP_RND_PINF) {
713 if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */ 859 if (lo_inexact && !s_lo)
714 if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */ 860 fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
861 if (hi_inexact && !s_hi)
862 fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
715 } else { /* round to -Inf */ 863 } else { /* round to -Inf */
716 if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */ 864 if (lo_inexact && s_lo) {
717 if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */ 865 if (fp_result)
866 fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
867 else
868 fgpr.wp[1]--; /* Z_low < 0, choose Z2 */
869 }
870 if (hi_inexact && s_hi) {
871 if (fp_result)
872 fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
873 else
874 fgpr.wp[0]--; /* Z_high < 0, choose Z2 */
875 }
718 } 876 }
719 break; 877 break;
720 878
@@ -727,6 +885,8 @@ int speround_handler(struct pt_regs *regs)
727 885
728 pr_debug(" to fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]); 886 pr_debug(" to fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]);
729 887
888 if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
889 return (current->thread.fpexc_mode & PR_FP_EXC_RES) ? 1 : 0;
730 return 0; 890 return 0;
731} 891}
732 892
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 07ba45b0f07c..94cd728166d3 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -52,6 +52,7 @@
52#include <asm/smp.h> 52#include <asm/smp.h>
53#include <asm/machdep.h> 53#include <asm/machdep.h>
54#include <asm/setup.h> 54#include <asm/setup.h>
55#include <asm/paca.h>
55 56
56#include "mmu_decl.h" 57#include "mmu_decl.h"
57 58
@@ -171,11 +172,10 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
171 return 1UL << camsize; 172 return 1UL << camsize;
172} 173}
173 174
174unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) 175static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
176 unsigned long ram, int max_cam_idx)
175{ 177{
176 int i; 178 int i;
177 unsigned long virt = PAGE_OFFSET;
178 phys_addr_t phys = memstart_addr;
179 unsigned long amount_mapped = 0; 179 unsigned long amount_mapped = 0;
180 180
181 /* Calculate CAM values */ 181 /* Calculate CAM values */
@@ -192,9 +192,23 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
192 } 192 }
193 tlbcam_index = i; 193 tlbcam_index = i;
194 194
195#ifdef CONFIG_PPC64
196 get_paca()->tcd.esel_next = i;
197 get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
198 get_paca()->tcd.esel_first = i;
199#endif
200
195 return amount_mapped; 201 return amount_mapped;
196} 202}
197 203
204unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
205{
206 unsigned long virt = PAGE_OFFSET;
207 phys_addr_t phys = memstart_addr;
208
209 return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx);
210}
211
198#ifdef CONFIG_PPC32 212#ifdef CONFIG_PPC32
199 213
200#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) 214#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
@@ -222,7 +236,9 @@ void __init adjust_total_lowmem(void)
222 /* adjust lowmem size to __max_low_memory */ 236 /* adjust lowmem size to __max_low_memory */
223 ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); 237 ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
224 238
239 i = switch_to_as1();
225 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); 240 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
241 restore_to_as0(i, 0, 0, 1);
226 242
227 pr_info("Memory CAM mapping: "); 243 pr_info("Memory CAM mapping: ");
228 for (i = 0; i < tlbcam_index - 1; i++) 244 for (i = 0; i < tlbcam_index - 1; i++)
@@ -241,4 +257,62 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
241 /* 64M mapped initially according to head_fsl_booke.S */ 257 /* 64M mapped initially according to head_fsl_booke.S */
242 memblock_set_current_limit(min_t(u64, limit, 0x04000000)); 258 memblock_set_current_limit(min_t(u64, limit, 0x04000000));
243} 259}
260
261#ifdef CONFIG_RELOCATABLE
262int __initdata is_second_reloc;
263notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
264{
265 unsigned long base = KERNELBASE;
266
267 kernstart_addr = start;
268 if (is_second_reloc) {
269 virt_phys_offset = PAGE_OFFSET - memstart_addr;
270 return;
271 }
272
273 /*
274 * Relocatable kernel support based on processing of dynamic
275 * relocation entries. Before we get the real memstart_addr,
276 * We will compute the virt_phys_offset like this:
277 * virt_phys_offset = stext.run - kernstart_addr
278 *
279 * stext.run = (KERNELBASE & ~0x3ffffff) +
280 * (kernstart_addr & 0x3ffffff)
281 * When we relocate, we have :
282 *
283 * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
284 *
285 * hence:
286 * virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
287 * (kernstart_addr & ~0x3ffffff)
288 *
289 */
290 start &= ~0x3ffffff;
291 base &= ~0x3ffffff;
292 virt_phys_offset = base - start;
293 early_get_first_memblock_info(__va(dt_ptr), NULL);
294 /*
295 * We now get the memstart_addr, then we should check if this
296 * address is the same as what the PAGE_OFFSET map to now. If
297 * not we have to change the map of PAGE_OFFSET to memstart_addr
298 * and do a second relocation.
299 */
300 if (start != memstart_addr) {
301 int n;
302 long offset = start - memstart_addr;
303
304 is_second_reloc = 1;
305 n = switch_to_as1();
306 /* map a 64M area for the second relocation */
307 if (memstart_addr > start)
308 map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM);
309 else
310 map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
311 0x4000000, CONFIG_LOWMEM_CAM_NUM);
312 restore_to_as0(n, offset, __va(dt_ptr), 1);
313 /* We should never reach here */
314 panic("Relocation error");
315 }
316}
317#endif
244#endif 318#endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index d3cbda62857b..1136d26a95ae 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -148,7 +148,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
148 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 148 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
149 andc r0,r30,r0 /* r0 = pte & ~r0 */ 149 andc r0,r30,r0 /* r0 = pte & ~r0 */
150 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 150 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
151 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ 151 /*
152 * Always add "C" bit for perf. Memory coherence is always enabled
153 */
154 ori r3,r3,HPTE_R_C | HPTE_R_M
152 155
153 /* We eventually do the icache sync here (maybe inline that 156 /* We eventually do the icache sync here (maybe inline that
154 * code rather than call a C function...) 157 * code rather than call a C function...)
@@ -457,7 +460,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
457 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 460 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
458 andc r0,r3,r0 /* r0 = pte & ~r0 */ 461 andc r0,r3,r0 /* r0 = pte & ~r0 */
459 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 462 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
460 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ 463 /*
464 * Always add "C" bit for perf. Memory coherence is always enabled
465 */
466 ori r3,r3,HPTE_R_C | HPTE_R_M
461 467
462 /* We eventually do the icache sync here (maybe inline that 468 /* We eventually do the icache sync here (maybe inline that
463 * code rather than call a C function...) 469 * code rather than call a C function...)
@@ -795,7 +801,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
795 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 801 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
796 andc r0,r30,r0 /* r0 = pte & ~r0 */ 802 andc r0,r30,r0 /* r0 = pte & ~r0 */
797 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 803 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
798 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ 804 /*
805 * Always add "C" bit for perf. Memory coherence is always enabled
806 */
807 ori r3,r3,HPTE_R_C | HPTE_R_M
799 808
800 /* We eventually do the icache sync here (maybe inline that 809 /* We eventually do the icache sync here (maybe inline that
801 * code rather than call a C function...) 810 * code rather than call a C function...)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6176b3cdf579..de6881259aef 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -169,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags)
169 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && 169 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
170 (pteflags & _PAGE_DIRTY))) 170 (pteflags & _PAGE_DIRTY)))
171 rflags |= 1; 171 rflags |= 1;
172 172 /*
173 /* Always add C */ 173 * Always add "C" bit for perf. Memory coherence is always enabled
174 return rflags | HPTE_R_C; 174 */
175 return rflags | HPTE_R_C | HPTE_R_M;
175} 176}
176 177
177int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 178int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 34de9e0cdc34..826893fcb3a7 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -127,7 +127,11 @@ repeat:
127 127
128 /* Add in WIMG bits */ 128 /* Add in WIMG bits */
129 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | 129 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
130 _PAGE_COHERENT | _PAGE_GUARDED)); 130 _PAGE_GUARDED));
131 /*
132 * enable the memory coherence always
133 */
134 rflags |= HPTE_R_M;
131 135
132 /* Insert into the hash table, primary slot */ 136 /* Insert into the hash table, primary slot */
133 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, 137 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 74551b5e41e5..5e4ee2573903 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -8,6 +8,44 @@
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/hugetlb.h> 9#include <linux/hugetlb.h>
10 10
11#ifdef CONFIG_PPC_FSL_BOOK3E
12#ifdef CONFIG_PPC64
13static inline int tlb1_next(void)
14{
15 struct paca_struct *paca = get_paca();
16 struct tlb_core_data *tcd;
17 int this, next;
18
19 tcd = paca->tcd_ptr;
20 this = tcd->esel_next;
21
22 next = this + 1;
23 if (next >= tcd->esel_max)
24 next = tcd->esel_first;
25
26 tcd->esel_next = next;
27 return this;
28}
29#else
30static inline int tlb1_next(void)
31{
32 int index, ncams;
33
34 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
35
36 index = __get_cpu_var(next_tlbcam_idx);
37
38 /* Just round-robin the entries and wrap when we hit the end */
39 if (unlikely(index == ncams - 1))
40 __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
41 else
42 __get_cpu_var(next_tlbcam_idx)++;
43
44 return index;
45}
46#endif /* !PPC64 */
47#endif /* FSL */
48
11static inline int mmu_get_tsize(int psize) 49static inline int mmu_get_tsize(int psize)
12{ 50{
13 return mmu_psize_defs[psize].enc; 51 return mmu_psize_defs[psize].enc;
@@ -47,7 +85,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
47 struct mm_struct *mm; 85 struct mm_struct *mm;
48 86
49#ifdef CONFIG_PPC_FSL_BOOK3E 87#ifdef CONFIG_PPC_FSL_BOOK3E
50 int index, ncams; 88 int index;
51#endif 89#endif
52 90
53 if (unlikely(is_kernel_addr(ea))) 91 if (unlikely(is_kernel_addr(ea)))
@@ -77,18 +115,11 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
77 } 115 }
78 116
79#ifdef CONFIG_PPC_FSL_BOOK3E 117#ifdef CONFIG_PPC_FSL_BOOK3E
80 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
81
82 /* We have to use the CAM(TLB1) on FSL parts for hugepages */ 118 /* We have to use the CAM(TLB1) on FSL parts for hugepages */
83 index = __get_cpu_var(next_tlbcam_idx); 119 index = tlb1_next();
84 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); 120 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
85
86 /* Just round-robin the entries and wrap when we hit the end */
87 if (unlikely(index == ncams - 1))
88 __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
89 else
90 __get_cpu_var(next_tlbcam_idx)++;
91#endif 121#endif
122
92 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); 123 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
93 mas2 = ea & ~((1UL << shift) - 1); 124 mas2 = ea & ~((1UL << shift) - 1);
94 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; 125 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
@@ -103,7 +134,8 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
103 if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) { 134 if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
104 mtspr(SPRN_MAS7_MAS3, mas7_3); 135 mtspr(SPRN_MAS7_MAS3, mas7_3);
105 } else { 136 } else {
106 mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); 137 if (mmu_has_feature(MMU_FTR_BIG_PHYS))
138 mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
107 mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); 139 mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
108 } 140 }
109 141
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 0b7fb6761015..a5bcf9301196 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -99,6 +99,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
99 /* Add in WIMG bits */ 99 /* Add in WIMG bits */
100 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | 100 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
101 _PAGE_COHERENT | _PAGE_GUARDED)); 101 _PAGE_COHERENT | _PAGE_GUARDED));
102 /*
103 * enable the memory coherence always
104 */
105 rflags |= HPTE_R_M;
102 106
103 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, 107 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
104 mmu_psize, ssize); 108 mmu_psize, ssize);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8c1dd23652a1..4b5cd5c2594d 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -307,6 +307,12 @@ static void __init register_page_bootmem_info(void)
307 307
308void __init mem_init(void) 308void __init mem_init(void)
309{ 309{
310 /*
311 * book3s is limited to 16 page sizes due to encoding this in
312 * a 4-bit field for slices.
313 */
314 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
315
310#ifdef CONFIG_SWIOTLB 316#ifdef CONFIG_SWIOTLB
311 swiotlb_init(0); 317 swiotlb_init(0);
312#endif 318#endif
@@ -507,7 +513,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
507 * System memory should not be in /proc/iomem but various tools expect it 513 * System memory should not be in /proc/iomem but various tools expect it
508 * (eg kdump). 514 * (eg kdump).
509 */ 515 */
510static int add_system_ram_resources(void) 516static int __init add_system_ram_resources(void)
511{ 517{
512 struct memblock_region *reg; 518 struct memblock_region *reg;
513 519
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 83eb5d5f53d5..9615d82919b8 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -148,6 +148,8 @@ extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
148extern void MMU_init_hw(void); 148extern void MMU_init_hw(void);
149extern unsigned long mmu_mapin_ram(unsigned long top); 149extern unsigned long mmu_mapin_ram(unsigned long top);
150extern void adjust_total_lowmem(void); 150extern void adjust_total_lowmem(void);
151extern int switch_to_as1(void);
152extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
151#endif 153#endif
152extern void loadcam_entry(unsigned int index); 154extern void loadcam_entry(unsigned int index);
153 155
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5a944f25e94f..86a63de072c6 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -31,6 +31,8 @@
31#include <asm/sparsemem.h> 31#include <asm/sparsemem.h>
32#include <asm/prom.h> 32#include <asm/prom.h>
33#include <asm/smp.h> 33#include <asm/smp.h>
34#include <asm/cputhreads.h>
35#include <asm/topology.h>
34#include <asm/firmware.h> 36#include <asm/firmware.h>
35#include <asm/paca.h> 37#include <asm/paca.h>
36#include <asm/hvcall.h> 38#include <asm/hvcall.h>
@@ -152,9 +154,22 @@ static void __init get_node_active_region(unsigned long pfn,
152 } 154 }
153} 155}
154 156
155static void map_cpu_to_node(int cpu, int node) 157static void reset_numa_cpu_lookup_table(void)
158{
159 unsigned int cpu;
160
161 for_each_possible_cpu(cpu)
162 numa_cpu_lookup_table[cpu] = -1;
163}
164
165static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
156{ 166{
157 numa_cpu_lookup_table[cpu] = node; 167 numa_cpu_lookup_table[cpu] = node;
168}
169
170static void map_cpu_to_node(int cpu, int node)
171{
172 update_numa_cpu_lookup_table(cpu, node);
158 173
159 dbg("adding cpu %d to node %d\n", cpu, node); 174 dbg("adding cpu %d to node %d\n", cpu, node);
160 175
@@ -522,11 +537,24 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
522 */ 537 */
523static int numa_setup_cpu(unsigned long lcpu) 538static int numa_setup_cpu(unsigned long lcpu)
524{ 539{
525 int nid = 0; 540 int nid;
526 struct device_node *cpu = of_get_cpu_node(lcpu, NULL); 541 struct device_node *cpu;
542
543 /*
544 * If a valid cpu-to-node mapping is already available, use it
545 * directly instead of querying the firmware, since it represents
546 * the most recent mapping notified to us by the platform (eg: VPHN).
547 */
548 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
549 map_cpu_to_node(lcpu, nid);
550 return nid;
551 }
552
553 cpu = of_get_cpu_node(lcpu, NULL);
527 554
528 if (!cpu) { 555 if (!cpu) {
529 WARN_ON(1); 556 WARN_ON(1);
557 nid = 0;
530 goto out; 558 goto out;
531 } 559 }
532 560
@@ -542,16 +570,38 @@ out:
542 return nid; 570 return nid;
543} 571}
544 572
573static void verify_cpu_node_mapping(int cpu, int node)
574{
575 int base, sibling, i;
576
577 /* Verify that all the threads in the core belong to the same node */
578 base = cpu_first_thread_sibling(cpu);
579
580 for (i = 0; i < threads_per_core; i++) {
581 sibling = base + i;
582
583 if (sibling == cpu || cpu_is_offline(sibling))
584 continue;
585
586 if (cpu_to_node(sibling) != node) {
587 WARN(1, "CPU thread siblings %d and %d don't belong"
588 " to the same node!\n", cpu, sibling);
589 break;
590 }
591 }
592}
593
545static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, 594static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
546 void *hcpu) 595 void *hcpu)
547{ 596{
548 unsigned long lcpu = (unsigned long)hcpu; 597 unsigned long lcpu = (unsigned long)hcpu;
549 int ret = NOTIFY_DONE; 598 int ret = NOTIFY_DONE, nid;
550 599
551 switch (action) { 600 switch (action) {
552 case CPU_UP_PREPARE: 601 case CPU_UP_PREPARE:
553 case CPU_UP_PREPARE_FROZEN: 602 case CPU_UP_PREPARE_FROZEN:
554 numa_setup_cpu(lcpu); 603 nid = numa_setup_cpu(lcpu);
604 verify_cpu_node_mapping((int)lcpu, nid);
555 ret = NOTIFY_OK; 605 ret = NOTIFY_OK;
556 break; 606 break;
557#ifdef CONFIG_HOTPLUG_CPU 607#ifdef CONFIG_HOTPLUG_CPU
@@ -1069,6 +1119,7 @@ void __init do_init_bootmem(void)
1069 */ 1119 */
1070 setup_node_to_cpumask_map(); 1120 setup_node_to_cpumask_map();
1071 1121
1122 reset_numa_cpu_lookup_table();
1072 register_cpu_notifier(&ppc64_numa_nb); 1123 register_cpu_notifier(&ppc64_numa_nb);
1073 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, 1124 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1074 (void *)(unsigned long)boot_cpuid); 1125 (void *)(unsigned long)boot_cpuid);
@@ -1447,6 +1498,33 @@ static int update_cpu_topology(void *data)
1447 return 0; 1498 return 0;
1448} 1499}
1449 1500
1501static int update_lookup_table(void *data)
1502{
1503 struct topology_update_data *update;
1504
1505 if (!data)
1506 return -EINVAL;
1507
1508 /*
1509 * Upon topology update, the numa-cpu lookup table needs to be updated
1510 * for all threads in the core, including offline CPUs, to ensure that
1511 * future hotplug operations respect the cpu-to-node associativity
1512 * properly.
1513 */
1514 for (update = data; update; update = update->next) {
1515 int nid, base, j;
1516
1517 nid = update->new_nid;
1518 base = cpu_first_thread_sibling(update->cpu);
1519
1520 for (j = 0; j < threads_per_core; j++) {
1521 update_numa_cpu_lookup_table(base + j, nid);
1522 }
1523 }
1524
1525 return 0;
1526}
1527
1450/* 1528/*
1451 * Update the node maps and sysfs entries for each cpu whose home node 1529 * Update the node maps and sysfs entries for each cpu whose home node
1452 * has changed. Returns 1 when the topology has changed, and 0 otherwise. 1530 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
@@ -1515,6 +1593,14 @@ int arch_update_cpu_topology(void)
1515 1593
1516 stop_machine(update_cpu_topology, &updates[0], &updated_cpus); 1594 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1517 1595
1596 /*
1597 * Update the numa-cpu lookup table with the new mappings, even for
1598 * offline CPUs. It is best to perform this update from the stop-
1599 * machine context.
1600 */
1601 stop_machine(update_lookup_table, &updates[0],
1602 cpumask_of(raw_smp_processor_id()));
1603
1518 for (ud = &updates[0]; ud; ud = ud->next) { 1604 for (ud = &updates[0]; ud; ud = ud->next) {
1519 unregister_cpu_under_node(ud->cpu, ud->old_nid); 1605 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1520 register_cpu_under_node(ud->cpu, ud->new_nid); 1606 register_cpu_under_node(ud->cpu, ud->new_nid);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 841e0d00863c..c695943a513c 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -24,7 +24,6 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/gfp.h> 25#include <linux/gfp.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/init.h>
28#include <linux/percpu.h> 27#include <linux/percpu.h>
29#include <linux/hardirq.h> 28#include <linux/hardirq.h>
30#include <linux/hugetlb.h> 29#include <linux/hugetlb.h>
@@ -174,7 +173,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
174 pte_t pte) 173 pte_t pte)
175{ 174{
176#ifdef CONFIG_DEBUG_VM 175#ifdef CONFIG_DEBUG_VM
177 WARN_ON(pte_present(*ptep)); 176 WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);
178#endif 177#endif
179 /* Note: mm->context.id might not yet have been assigned as 178 /* Note: mm->context.id might not yet have been assigned as
180 * this context might not have been activated yet when this 179 * this context might not have been activated yet when this
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5b9601715289..343a87fa78b5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -299,6 +299,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
299 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 299 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
300 __pgprot(flags))); 300 __pgprot(flags)));
301 } 301 }
302 smp_wmb();
302 return err; 303 return err;
303} 304}
304 305
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 9d95786aa80f..65b7b65e8708 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -33,7 +33,6 @@
33#include <linux/swap.h> 33#include <linux/swap.h>
34#include <linux/stddef.h> 34#include <linux/stddef.h>
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/init.h>
37#include <linux/bootmem.h> 36#include <linux/bootmem.h>
38#include <linux/memblock.h> 37#include <linux/memblock.h>
39#include <linux/slab.h> 38#include <linux/slab.h>
@@ -153,6 +152,18 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
153 } 152 }
154#endif /* !CONFIG_PPC_MMU_NOHASH */ 153#endif /* !CONFIG_PPC_MMU_NOHASH */
155 } 154 }
155
156#ifdef CONFIG_PPC_BOOK3E_64
157 /*
158 * With hardware tablewalk, a sync is needed to ensure that
159 * subsequent accesses see the PTE we just wrote. Unlike userspace
160 * mappings, we can't tolerate spurious faults, so make sure
161 * the new PTE will be seen the first time.
162 */
163 mb();
164#else
165 smp_wmb();
166#endif
156 return 0; 167 return 0;
157} 168}
158 169
@@ -687,7 +698,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
687 pmd_t *pmdp, pmd_t pmd) 698 pmd_t *pmdp, pmd_t pmd)
688{ 699{
689#ifdef CONFIG_DEBUG_VM 700#ifdef CONFIG_DEBUG_VM
690 WARN_ON(!pmd_none(*pmdp)); 701 WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT);
691 assert_spin_locked(&mm->page_table_lock); 702 assert_spin_locked(&mm->page_table_lock);
692 WARN_ON(!pmd_trans_huge(pmd)); 703 WARN_ON(!pmd_trans_huge(pmd));
693#endif 704#endif
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 36e44b4260eb..c99f6510a0b2 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -23,7 +23,6 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/init.h>
27#include <linux/percpu.h> 26#include <linux/percpu.h>
28#include <linux/hardirq.h> 27#include <linux/hardirq.h>
29#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index b4113bf86353..16250b162375 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -136,7 +136,7 @@ BEGIN_MMU_FTR_SECTION
136 */ 136 */
137 PPC_TLBSRX_DOT(0,R16) 137 PPC_TLBSRX_DOT(0,R16)
138 ldx r14,r14,r15 /* grab pgd entry */ 138 ldx r14,r14,r15 /* grab pgd entry */
139 beq normal_tlb_miss_done /* tlb exists already, bail */ 139 beq tlb_miss_done_bolted /* tlb exists already, bail */
140MMU_FTR_SECTION_ELSE 140MMU_FTR_SECTION_ELSE
141 ldx r14,r14,r15 /* grab pgd entry */ 141 ldx r14,r14,r15 /* grab pgd entry */
142ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) 142ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
@@ -192,6 +192,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
192 mtspr SPRN_MAS7_MAS3,r15 192 mtspr SPRN_MAS7_MAS3,r15
193 tlbwe 193 tlbwe
194 194
195tlb_miss_done_bolted:
195 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) 196 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
196 tlb_epilog_bolted 197 tlb_epilog_bolted
197 rfi 198 rfi
@@ -239,6 +240,177 @@ itlb_miss_fault_bolted:
239 beq tlb_miss_common_bolted 240 beq tlb_miss_common_bolted
240 b itlb_miss_kernel_bolted 241 b itlb_miss_kernel_bolted
241 242
243/*
244 * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
245 *
246 * Linear mapping is bolted: no virtual page table or nested TLB misses
247 * Indirect entries in TLB1, hardware loads resulting direct entries
248 * into TLB0
249 * No HES or NV hint on TLB1, so we need to do software round-robin
250 * No tlbsrx. so we need a spinlock, and we have to deal
251 * with MAS-damage caused by tlbsx
252 * 4K pages only
253 */
254
255 START_EXCEPTION(instruction_tlb_miss_e6500)
256 tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
257
258 ld r11,PACA_TCD_PTR(r13)
259 srdi. r15,r16,60 /* get region */
260 ori r16,r16,1
261
262 TLB_MISS_STATS_SAVE_INFO_BOLTED
263 bne tlb_miss_kernel_e6500 /* user/kernel test */
264
265 b tlb_miss_common_e6500
266
267 START_EXCEPTION(data_tlb_miss_e6500)
268 tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
269
270 ld r11,PACA_TCD_PTR(r13)
271 srdi. r15,r16,60 /* get region */
272 rldicr r16,r16,0,62
273
274 TLB_MISS_STATS_SAVE_INFO_BOLTED
275 bne tlb_miss_kernel_e6500 /* user vs kernel check */
276
277/*
278 * This is the guts of the TLB miss handler for e6500 and derivatives.
279 * We are entered with:
280 *
281 * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
282 * r15 = crap (free to use)
283 * r14 = page table base
284 * r13 = PACA
285 * r11 = tlb_per_core ptr
286 * r10 = crap (free to use)
287 */
288tlb_miss_common_e6500:
289 /*
290 * Search if we already have an indirect entry for that virtual
291 * address, and if we do, bail out.
292 *
293 * MAS6:IND should be already set based on MAS4
294 */
295 addi r10,r11,TCD_LOCK
2961: lbarx r15,0,r10
297 cmpdi r15,0
298 bne 2f
299 li r15,1
300 stbcx. r15,0,r10
301 bne 1b
302 .subsection 1
3032: lbz r15,0(r10)
304 cmpdi r15,0
305 bne 2b
306 b 1b
307 .previous
308
309 mfspr r15,SPRN_MAS2
310
311 tlbsx 0,r16
312 mfspr r10,SPRN_MAS1
313 andis. r10,r10,MAS1_VALID@h
314 bne tlb_miss_done_e6500
315
316 /* Undo MAS-damage from the tlbsx */
317 mfspr r10,SPRN_MAS1
318 oris r10,r10,MAS1_VALID@h
319 mtspr SPRN_MAS1,r10
320 mtspr SPRN_MAS2,r15
321
322 /* Now, we need to walk the page tables. First check if we are in
323 * range.
324 */
325 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
326 bne- tlb_miss_fault_e6500
327
328 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
329 cmpldi cr0,r14,0
330 clrrdi r15,r15,3
331 beq- tlb_miss_fault_e6500 /* No PGDIR, bail */
332 ldx r14,r14,r15 /* grab pgd entry */
333
334 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
335 clrrdi r15,r15,3
336 cmpdi cr0,r14,0
337 bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */
338 ldx r14,r14,r15 /* grab pud entry */
339
340 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
341 clrrdi r15,r15,3
342 cmpdi cr0,r14,0
343 bge tlb_miss_fault_e6500
344 ldx r14,r14,r15 /* Grab pmd entry */
345
346 mfspr r10,SPRN_MAS0
347 cmpdi cr0,r14,0
348 bge tlb_miss_fault_e6500
349
350 /* Now we build the MAS for a 2M indirect page:
351 *
352 * MAS 0 : ESEL needs to be filled by software round-robin
353 * MAS 1 : Fully set up
354 * - PID already updated by caller if necessary
355 * - TSIZE for now is base ind page size always
356 * - TID already cleared if necessary
357 * MAS 2 : Default not 2M-aligned, need to be redone
358 * MAS 3+7 : Needs to be done
359 */
360
361 ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
362 mtspr SPRN_MAS7_MAS3,r14
363
364 clrrdi r15,r16,21 /* make EA 2M-aligned */
365 mtspr SPRN_MAS2,r15
366
367 lbz r15,TCD_ESEL_NEXT(r11)
368 lbz r16,TCD_ESEL_MAX(r11)
369 lbz r14,TCD_ESEL_FIRST(r11)
370 rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */
371 addi r15,r15,1 /* increment esel_next */
372 mtspr SPRN_MAS0,r10
373 cmpw r15,r16
374 iseleq r15,r14,r15 /* if next == last use first */
375 stb r15,TCD_ESEL_NEXT(r11)
376
377 tlbwe
378
379tlb_miss_done_e6500:
380 .macro tlb_unlock_e6500
381 li r15,0
382 isync
383 stb r15,TCD_LOCK(r11)
384 .endm
385
386 tlb_unlock_e6500
387 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
388 tlb_epilog_bolted
389 rfi
390
391tlb_miss_kernel_e6500:
392 mfspr r10,SPRN_MAS1
393 ld r14,PACA_KERNELPGD(r13)
394 cmpldi cr0,r15,8 /* Check for vmalloc region */
395 rlwinm r10,r10,0,16,1 /* Clear TID */
396 mtspr SPRN_MAS1,r10
397 beq+ tlb_miss_common_e6500
398
399tlb_miss_fault_e6500:
400 tlb_unlock_e6500
401 /* We need to check if it was an instruction miss */
402 andi. r16,r16,1
403 bne itlb_miss_fault_e6500
404dtlb_miss_fault_e6500:
405 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
406 tlb_epilog_bolted
407 b exc_data_storage_book3e
408itlb_miss_fault_e6500:
409 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
410 tlb_epilog_bolted
411 b exc_instruction_storage_book3e
412
413
242/********************************************************************** 414/**********************************************************************
243 * * 415 * *
244 * TLB miss handling for Book3E with TLB reservation and HES support * 416 * TLB miss handling for Book3E with TLB reservation and HES support *
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 358d74303138..735839b74dc5 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -43,6 +43,7 @@
43#include <asm/tlb.h> 43#include <asm/tlb.h>
44#include <asm/code-patching.h> 44#include <asm/code-patching.h>
45#include <asm/hugetlb.h> 45#include <asm/hugetlb.h>
46#include <asm/paca.h>
46 47
47#include "mmu_decl.h" 48#include "mmu_decl.h"
48 49
@@ -58,6 +59,10 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
58 .shift = 12, 59 .shift = 12,
59 .enc = BOOK3E_PAGESZ_4K, 60 .enc = BOOK3E_PAGESZ_4K,
60 }, 61 },
62 [MMU_PAGE_2M] = {
63 .shift = 21,
64 .enc = BOOK3E_PAGESZ_2M,
65 },
61 [MMU_PAGE_4M] = { 66 [MMU_PAGE_4M] = {
62 .shift = 22, 67 .shift = 22,
63 .enc = BOOK3E_PAGESZ_4M, 68 .enc = BOOK3E_PAGESZ_4M,
@@ -136,7 +141,7 @@ static inline int mmu_get_tsize(int psize)
136int mmu_linear_psize; /* Page size used for the linear mapping */ 141int mmu_linear_psize; /* Page size used for the linear mapping */
137int mmu_pte_psize; /* Page size used for PTE pages */ 142int mmu_pte_psize; /* Page size used for PTE pages */
138int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ 143int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
139int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 144int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
140unsigned long linear_map_top; /* Top of linear mapping */ 145unsigned long linear_map_top; /* Top of linear mapping */
141 146
142#endif /* CONFIG_PPC64 */ 147#endif /* CONFIG_PPC64 */
@@ -377,7 +382,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
377{ 382{
378 int tsize = mmu_psize_defs[mmu_pte_psize].enc; 383 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
379 384
380 if (book3e_htw_enabled) { 385 if (book3e_htw_mode != PPC_HTW_NONE) {
381 unsigned long start = address & PMD_MASK; 386 unsigned long start = address & PMD_MASK;
382 unsigned long end = address + PMD_SIZE; 387 unsigned long end = address + PMD_SIZE;
383 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; 388 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
@@ -430,7 +435,7 @@ static void setup_page_sizes(void)
430 def = &mmu_psize_defs[psize]; 435 def = &mmu_psize_defs[psize];
431 shift = def->shift; 436 shift = def->shift;
432 437
433 if (shift == 0) 438 if (shift == 0 || shift & 1)
434 continue; 439 continue;
435 440
436 /* adjust to be in terms of 4^shift Kb */ 441 /* adjust to be in terms of 4^shift Kb */
@@ -440,21 +445,40 @@ static void setup_page_sizes(void)
440 def->flags |= MMU_PAGE_SIZE_DIRECT; 445 def->flags |= MMU_PAGE_SIZE_DIRECT;
441 } 446 }
442 447
443 goto no_indirect; 448 goto out;
444 } 449 }
445 450
446 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 451 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
447 u32 tlb1ps = mfspr(SPRN_TLB1PS); 452 u32 tlb1cfg, tlb1ps;
453
454 tlb0cfg = mfspr(SPRN_TLB0CFG);
455 tlb1cfg = mfspr(SPRN_TLB1CFG);
456 tlb1ps = mfspr(SPRN_TLB1PS);
457 eptcfg = mfspr(SPRN_EPTCFG);
458
459 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
460 book3e_htw_mode = PPC_HTW_E6500;
461
462 /*
463 * We expect 4K subpage size and unrestricted indirect size.
464 * The lack of a restriction on indirect size is a Freescale
465 * extension, indicated by PSn = 0 but SPSn != 0.
466 */
467 if (eptcfg != 2)
468 book3e_htw_mode = PPC_HTW_NONE;
448 469
449 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 470 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
450 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 471 struct mmu_psize_def *def = &mmu_psize_defs[psize];
451 472
452 if (tlb1ps & (1U << (def->shift - 10))) { 473 if (tlb1ps & (1U << (def->shift - 10))) {
453 def->flags |= MMU_PAGE_SIZE_DIRECT; 474 def->flags |= MMU_PAGE_SIZE_DIRECT;
475
476 if (book3e_htw_mode && psize == MMU_PAGE_2M)
477 def->flags |= MMU_PAGE_SIZE_INDIRECT;
454 } 478 }
455 } 479 }
456 480
457 goto no_indirect; 481 goto out;
458 } 482 }
459#endif 483#endif
460 484
@@ -471,8 +495,11 @@ static void setup_page_sizes(void)
471 } 495 }
472 496
473 /* Indirect page sizes supported ? */ 497 /* Indirect page sizes supported ? */
474 if ((tlb0cfg & TLBnCFG_IND) == 0) 498 if ((tlb0cfg & TLBnCFG_IND) == 0 ||
475 goto no_indirect; 499 (tlb0cfg & TLBnCFG_PT) == 0)
500 goto out;
501
502 book3e_htw_mode = PPC_HTW_IBM;
476 503
477 /* Now, we only deal with one IND page size for each 504 /* Now, we only deal with one IND page size for each
478 * direct size. Hopefully all implementations today are 505 * direct size. Hopefully all implementations today are
@@ -497,8 +524,8 @@ static void setup_page_sizes(void)
497 def->ind = ps + 10; 524 def->ind = ps + 10;
498 } 525 }
499 } 526 }
500 no_indirect:
501 527
528out:
502 /* Cleanup array and print summary */ 529 /* Cleanup array and print summary */
503 pr_info("MMU: Supported page sizes\n"); 530 pr_info("MMU: Supported page sizes\n");
504 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 531 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
@@ -518,44 +545,25 @@ static void setup_page_sizes(void)
518 } 545 }
519} 546}
520 547
521static void __patch_exception(int exc, unsigned long addr)
522{
523 extern unsigned int interrupt_base_book3e;
524 unsigned int *ibase = &interrupt_base_book3e;
525
526 /* Our exceptions vectors start with a NOP and -then- a branch
527 * to deal with single stepping from userspace which stops on
528 * the second instruction. Thus we need to patch the second
529 * instruction of the exception, not the first one
530 */
531
532 patch_branch(ibase + (exc / 4) + 1, addr, 0);
533}
534
535#define patch_exception(exc, name) do { \
536 extern unsigned int name; \
537 __patch_exception((exc), (unsigned long)&name); \
538} while (0)
539
540static void setup_mmu_htw(void) 548static void setup_mmu_htw(void)
541{ 549{
542 /* Check if HW tablewalk is present, and if yes, enable it by: 550 /*
543 * 551 * If we want to use HW tablewalk, enable it by patching the TLB miss
544 * - patching the TLB miss handlers to branch to the 552 * handlers to branch to the one dedicated to it.
545 * one dedicates to it 553 */
546 *
547 * - setting the global book3e_htw_enabled
548 */
549 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
550 554
551 if ((tlb0cfg & TLBnCFG_IND) && 555 switch (book3e_htw_mode) {
552 (tlb0cfg & TLBnCFG_PT)) { 556 case PPC_HTW_IBM:
553 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); 557 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
554 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); 558 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
555 book3e_htw_enabled = 1; 559 break;
560 case PPC_HTW_E6500:
561 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
562 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
563 break;
556 } 564 }
557 pr_info("MMU: Book3E HW tablewalk %s\n", 565 pr_info("MMU: Book3E HW tablewalk %s\n",
558 book3e_htw_enabled ? "enabled" : "not supported"); 566 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
559} 567}
560 568
561/* 569/*
@@ -595,8 +603,16 @@ static void __early_init_mmu(int boot_cpu)
595 /* Set MAS4 based on page table setting */ 603 /* Set MAS4 based on page table setting */
596 604
597 mas4 = 0x4 << MAS4_WIMGED_SHIFT; 605 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
598 if (book3e_htw_enabled) { 606 switch (book3e_htw_mode) {
599 mas4 |= mas4 | MAS4_INDD; 607 case PPC_HTW_E6500:
608 mas4 |= MAS4_INDD;
609 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
610 mas4 |= MAS4_TLBSELD(1);
611 mmu_pte_psize = MMU_PAGE_2M;
612 break;
613
614 case PPC_HTW_IBM:
615 mas4 |= MAS4_INDD;
600#ifdef CONFIG_PPC_64K_PAGES 616#ifdef CONFIG_PPC_64K_PAGES
601 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; 617 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
602 mmu_pte_psize = MMU_PAGE_256M; 618 mmu_pte_psize = MMU_PAGE_256M;
@@ -604,13 +620,16 @@ static void __early_init_mmu(int boot_cpu)
604 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; 620 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
605 mmu_pte_psize = MMU_PAGE_1M; 621 mmu_pte_psize = MMU_PAGE_1M;
606#endif 622#endif
607 } else { 623 break;
624
625 case PPC_HTW_NONE:
608#ifdef CONFIG_PPC_64K_PAGES 626#ifdef CONFIG_PPC_64K_PAGES
609 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; 627 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
610#else 628#else
611 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; 629 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
612#endif 630#endif
613 mmu_pte_psize = mmu_virtual_psize; 631 mmu_pte_psize = mmu_virtual_psize;
632 break;
614 } 633 }
615 mtspr(SPRN_MAS4, mas4); 634 mtspr(SPRN_MAS4, mas4);
616 635
@@ -630,8 +649,11 @@ static void __early_init_mmu(int boot_cpu)
630 /* limit memory so we dont have linear faults */ 649 /* limit memory so we dont have linear faults */
631 memblock_enforce_memory_limit(linear_map_top); 650 memblock_enforce_memory_limit(linear_map_top);
632 651
633 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); 652 if (book3e_htw_mode == PPC_HTW_NONE) {
634 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); 653 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
654 patch_exception(0x1e0,
655 exc_instruction_tlb_miss_bolted_book3e);
656 }
635 } 657 }
636#endif 658#endif
637 659
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 626ad081639f..43ff3c797fbf 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -402,7 +402,9 @@ _GLOBAL(set_context)
402 * Load TLBCAM[index] entry in to the L2 CAM MMU 402 * Load TLBCAM[index] entry in to the L2 CAM MMU
403 */ 403 */
404_GLOBAL(loadcam_entry) 404_GLOBAL(loadcam_entry)
405 LOAD_REG_ADDR(r4, TLBCAM) 405 mflr r5
406 LOAD_REG_ADDR_PIC(r4, TLBCAM)
407 mtlr r5
406 mulli r5,r3,TLBCAM_SIZE 408 mulli r5,r3,TLBCAM_SIZE
407 add r3,r5,r4 409 add r3,r5,r4
408 lwz r4,TLBCAM_MAS0(r3) 410 lwz r4,TLBCAM_MAS0(r3)
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
index ff617246d128..d29b6e4e5e72 100644
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ b/arch/powerpc/oprofile/op_model_7450.c
@@ -16,7 +16,6 @@
16 */ 16 */
17 17
18#include <linux/oprofile.h> 18#include <linux/oprofile.h>
19#include <linux/init.h>
20#include <linux/smp.h> 19#include <linux/smp.h>
21#include <asm/ptrace.h> 20#include <asm/ptrace.h>
22#include <asm/processor.h> 21#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index b9589c19ccda..1f0ebdeea5f7 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -16,7 +16,6 @@
16 16
17#include <linux/cpufreq.h> 17#include <linux/cpufreq.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/jiffies.h> 19#include <linux/jiffies.h>
21#include <linux/kthread.h> 20#include <linux/kthread.h>
22#include <linux/oprofile.h> 21#include <linux/oprofile.h>
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
index 2a82d3ed464d..14cf86fdddab 100644
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ b/arch/powerpc/oprofile/op_model_fsl_emb.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include <linux/oprofile.h> 16#include <linux/oprofile.h>
17#include <linux/init.h>
18#include <linux/smp.h> 17#include <linux/smp.h>
19#include <asm/ptrace.h> 18#include <asm/ptrace.h>
20#include <asm/processor.h> 19#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
index 42f778dff919..a114a7c22d40 100644
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ b/arch/powerpc/oprofile/op_model_pa6t.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/oprofile.h> 24#include <linux/oprofile.h>
25#include <linux/init.h>
26#include <linux/smp.h> 25#include <linux/smp.h>
27#include <linux/percpu.h> 26#include <linux/percpu.h>
28#include <asm/processor.h> 27#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index f444b94935f5..962fe7b3e3fb 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/oprofile.h> 12#include <linux/oprofile.h>
13#include <linux/init.h>
14#include <linux/smp.h> 13#include <linux/smp.h>
15#include <asm/firmware.h> 14#include <asm/firmware.h>
16#include <asm/ptrace.h> 15#include <asm/ptrace.h>
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index 9b801b8c8c5a..7e5b8ed3a1b7 100644
--- a/arch/powerpc/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/oprofile.h> 10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h> 11#include <linux/smp.h>
13#include <asm/ptrace.h> 12#include <asm/ptrace.h>
14#include <asm/processor.h> 13#include <asm/processor.h>
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 670a033264c0..2bdc8c862c46 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -99,7 +99,6 @@ config SBC834x
99config ASP834x 99config ASP834x
100 bool "Analogue & Micro ASP 834x" 100 bool "Analogue & Micro ASP 834x"
101 select PPC_MPC834x 101 select PPC_MPC834x
102 select REDBOOT
103 help 102 help
104 This enables support for the Analogue & Micro ASP 83xx 103 This enables support for the Analogue & Micro ASP 83xx
105 board. 104 board.
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index fd71cfdf2380..e238b6a55b15 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -11,7 +11,6 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/init.h>
15#include <linux/kernel.h> 14#include <linux/kernel.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <linux/device.h> 16#include <linux/device.h>
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 3d9716ccd327..4b4c081df94d 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -10,7 +10,6 @@
10 * by the Free Software Foundation. 10 * by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/init.h>
14#include <linux/pm.h> 13#include <linux/pm.h>
15#include <linux/types.h> 14#include <linux/types.h>
16#include <linux/ioport.h> 15#include <linux/ioport.h>
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 4d4634958cfb..c17aae80e7ff 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -123,6 +123,12 @@ config P1023_RDS
123 help 123 help
124 This option enables support for the P1023 RDS and RDB boards 124 This option enables support for the P1023 RDS and RDB boards
125 125
126config TWR_P102x
127 bool "Freescale TWR-P102x"
128 select DEFAULT_UIMAGE
129 help
130 This option enables support for the TWR-P1025 board.
131
126config SOCRATES 132config SOCRATES
127 bool "Socrates" 133 bool "Socrates"
128 select DEFAULT_UIMAGE 134 select DEFAULT_UIMAGE
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index dd4c0b59577b..25cebe74ac46 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_P1010_RDB) += p1010rdb.o
18obj-$(CONFIG_P1022_DS) += p1022_ds.o 18obj-$(CONFIG_P1022_DS) += p1022_ds.o
19obj-$(CONFIG_P1022_RDK) += p1022_rdk.o 19obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
20obj-$(CONFIG_P1023_RDS) += p1023_rds.o 20obj-$(CONFIG_P1023_RDS) += p1023_rds.o
21obj-$(CONFIG_TWR_P102x) += twr_p102x.o
21obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o 22obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o
22obj-$(CONFIG_STX_GP3) += stx_gp3.o 23obj-$(CONFIG_STX_GP3) += stx_gp3.o
23obj-$(CONFIG_TQM85xx) += tqm85xx.o 24obj-$(CONFIG_TQM85xx) += tqm85xx.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index eba78c85303f..3b085c7ee539 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -9,6 +9,7 @@
9#include <linux/of_irq.h> 9#include <linux/of_irq.h>
10#include <linux/of_platform.h> 10#include <linux/of_platform.h>
11 11
12#include <asm/qe.h>
12#include <sysdev/cpm2_pic.h> 13#include <sysdev/cpm2_pic.h>
13 14
14#include "mpc85xx.h" 15#include "mpc85xx.h"
@@ -82,3 +83,40 @@ void __init mpc85xx_cpm2_pic_init(void)
82 irq_set_chained_handler(irq, cpm2_cascade); 83 irq_set_chained_handler(irq, cpm2_cascade);
83} 84}
84#endif 85#endif
86
87#ifdef CONFIG_QUICC_ENGINE
88void __init mpc85xx_qe_init(void)
89{
90 struct device_node *np;
91
92 np = of_find_compatible_node(NULL, NULL, "fsl,qe");
93 if (!np) {
94 np = of_find_node_by_name(NULL, "qe");
95 if (!np) {
96 pr_err("%s: Could not find Quicc Engine node\n",
97 __func__);
98 return;
99 }
100 }
101
102 if (!of_device_is_available(np)) {
103 of_node_put(np);
104 return;
105 }
106
107 qe_reset();
108 of_node_put(np);
109
110 np = of_find_node_by_name(NULL, "par_io");
111 if (np) {
112 struct device_node *ucc;
113
114 par_io_init(np);
115 of_node_put(np);
116
117 for_each_node_by_name(ucc, "ucc")
118 par_io_of_config(ucc);
119
120 }
121}
122#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
index 2aa7c5dc2c7f..fc51dd4092e5 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -8,4 +8,10 @@ extern void mpc85xx_cpm2_pic_init(void);
8static inline void __init mpc85xx_cpm2_pic_init(void) {} 8static inline void __init mpc85xx_cpm2_pic_init(void) {}
9#endif /* CONFIG_CPM2 */ 9#endif /* CONFIG_CPM2 */
10 10
11#ifdef CONFIG_QUICC_ENGINE
12extern void mpc85xx_qe_init(void);
13#else
14static inline void __init mpc85xx_qe_init(void) {}
15#endif
16
11#endif 17#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index a7b3621a8df5..34f3c5eb3bee 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2010, 2012 Freescale Semiconductor, Inc. 2 * Copyright (C) 2006-2010, 2012-2013 Freescale Semiconductor, Inc.
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * Author: Andy Fleming <afleming@freescale.com> 5 * Author: Andy Fleming <afleming@freescale.com>
@@ -238,32 +238,7 @@ static void __init mpc85xx_mds_qe_init(void)
238{ 238{
239 struct device_node *np; 239 struct device_node *np;
240 240
241 np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 241 mpc85xx_qe_init();
242 if (!np) {
243 np = of_find_node_by_name(NULL, "qe");
244 if (!np)
245 return;
246 }
247
248 if (!of_device_is_available(np)) {
249 of_node_put(np);
250 return;
251 }
252
253 qe_reset();
254 of_node_put(np);
255
256 np = of_find_node_by_name(NULL, "par_io");
257 if (np) {
258 struct device_node *ucc;
259
260 par_io_init(np);
261 of_node_put(np);
262
263 for_each_node_by_name(ucc, "ucc")
264 par_io_of_config(ucc);
265 }
266
267 mpc85xx_mds_reset_ucc_phys(); 242 mpc85xx_mds_reset_ucc_phys();
268 243
269 if (machine_is(p1021_mds)) { 244 if (machine_is(p1021_mds)) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 53b6fb0a3d56..e15bdd18fdb2 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * MPC85xx RDB Board Setup 2 * MPC85xx RDB Board Setup
3 * 3 *
4 * Copyright 2009,2012 Freescale Semiconductor Inc. 4 * Copyright 2009,2012-2013 Freescale Semiconductor Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -98,26 +98,7 @@ static void __init mpc85xx_rdb_setup_arch(void)
98 fsl_pci_assign_primary(); 98 fsl_pci_assign_primary();
99 99
100#ifdef CONFIG_QUICC_ENGINE 100#ifdef CONFIG_QUICC_ENGINE
101 np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 101 mpc85xx_qe_init();
102 if (!np) {
103 pr_err("%s: Could not find Quicc Engine node\n", __func__);
104 goto qe_fail;
105 }
106
107 qe_reset();
108 of_node_put(np);
109
110 np = of_find_node_by_name(NULL, "par_io");
111 if (np) {
112 struct device_node *ucc;
113
114 par_io_init(np);
115 of_node_put(np);
116
117 for_each_node_by_name(ucc, "ucc")
118 par_io_of_config(ucc);
119
120 }
121#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE) 102#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
122 if (machine_is(p1025_rdb)) { 103 if (machine_is(p1025_rdb)) {
123 104
@@ -148,8 +129,6 @@ static void __init mpc85xx_rdb_setup_arch(void)
148 129
149 } 130 }
150#endif 131#endif
151
152qe_fail:
153#endif /* CONFIG_QUICC_ENGINE */ 132#endif /* CONFIG_QUICC_ENGINE */
154 133
155 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); 134 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index b9197cea1854..bb75add67084 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -14,7 +14,6 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
19#include <linux/of_irq.h> 18#include <linux/of_irq.h>
20#include <linux/workqueue.h> 19#include <linux/workqueue.h>
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 393f975ab397..6382098d6f8d 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -389,15 +389,18 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
389} 389}
390#endif /* CONFIG_KEXEC */ 390#endif /* CONFIG_KEXEC */
391 391
392static void smp_85xx_setup_cpu(int cpu_nr) 392static void smp_85xx_basic_setup(int cpu_nr)
393{ 393{
394 if (smp_85xx_ops.probe == smp_mpic_probe)
395 mpic_setup_this_cpu();
396
397 if (cpu_has_feature(CPU_FTR_DBELL)) 394 if (cpu_has_feature(CPU_FTR_DBELL))
398 doorbell_setup_this_cpu(); 395 doorbell_setup_this_cpu();
399} 396}
400 397
398static void smp_85xx_setup_cpu(int cpu_nr)
399{
400 mpic_setup_this_cpu();
401 smp_85xx_basic_setup(cpu_nr);
402}
403
401static const struct of_device_id mpc85xx_smp_guts_ids[] = { 404static const struct of_device_id mpc85xx_smp_guts_ids[] = {
402 { .compatible = "fsl,mpc8572-guts", }, 405 { .compatible = "fsl,mpc8572-guts", },
403 { .compatible = "fsl,p1020-guts", }, 406 { .compatible = "fsl,p1020-guts", },
@@ -412,13 +415,14 @@ void __init mpc85xx_smp_init(void)
412{ 415{
413 struct device_node *np; 416 struct device_node *np;
414 417
415 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
416 418
417 np = of_find_node_by_type(NULL, "open-pic"); 419 np = of_find_node_by_type(NULL, "open-pic");
418 if (np) { 420 if (np) {
419 smp_85xx_ops.probe = smp_mpic_probe; 421 smp_85xx_ops.probe = smp_mpic_probe;
422 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
420 smp_85xx_ops.message_pass = smp_mpic_message_pass; 423 smp_85xx_ops.message_pass = smp_mpic_message_pass;
421 } 424 } else
425 smp_85xx_ops.setup_cpu = smp_85xx_basic_setup;
422 426
423 if (cpu_has_feature(CPU_FTR_DBELL)) { 427 if (cpu_has_feature(CPU_FTR_DBELL)) {
424 /* 428 /*
@@ -427,6 +431,7 @@ void __init mpc85xx_smp_init(void)
427 */ 431 */
428 smp_85xx_ops.message_pass = NULL; 432 smp_85xx_ops.message_pass = NULL;
429 smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 433 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
434 smp_85xx_ops.probe = NULL;
430 } 435 }
431 436
432 np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids); 437 np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
new file mode 100644
index 000000000000..c25ff10f05ee
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2010-2011, 2013 Freescale Semiconductor, Inc.
3 *
4 * Author: Michael Johnston <michael.johnston@freescale.com>
5 *
6 * Description:
7 * TWR-P102x Board Setup
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/pci.h>
19#include <linux/of_platform.h>
20
21#include <asm/pci-bridge.h>
22#include <asm/udbg.h>
23#include <asm/mpic.h>
24#include <asm/qe.h>
25#include <asm/qe_ic.h>
26#include <asm/fsl_guts.h>
27
28#include <sysdev/fsl_soc.h>
29#include <sysdev/fsl_pci.h>
30#include "smp.h"
31
32#include "mpc85xx.h"
33
34static void __init twr_p1025_pic_init(void)
35{
36 struct mpic *mpic;
37
38#ifdef CONFIG_QUICC_ENGINE
39 struct device_node *np;
40#endif
41
42 mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
43 MPIC_SINGLE_DEST_CPU,
44 0, 256, " OpenPIC ");
45
46 BUG_ON(mpic == NULL);
47 mpic_init(mpic);
48
49#ifdef CONFIG_QUICC_ENGINE
50 np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
51 if (np) {
52 qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
53 qe_ic_cascade_high_mpic);
54 of_node_put(np);
55 } else
56 pr_err("Could not find qe-ic node\n");
57#endif
58}
59
60/* ************************************************************************
61 *
62 * Setup the architecture
63 *
64 */
65static void __init twr_p1025_setup_arch(void)
66{
67#ifdef CONFIG_QUICC_ENGINE
68 struct device_node *np;
69#endif
70
71 if (ppc_md.progress)
72 ppc_md.progress("twr_p1025_setup_arch()", 0);
73
74 mpc85xx_smp_init();
75
76 fsl_pci_assign_primary();
77
78#ifdef CONFIG_QUICC_ENGINE
79 mpc85xx_qe_init();
80
81#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
82 if (machine_is(twr_p1025)) {
83 struct ccsr_guts __iomem *guts;
84
85 np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts");
86 if (np) {
87 guts = of_iomap(np, 0);
88 if (!guts)
89 pr_err("twr_p1025: could not map global utilities register\n");
90 else {
91 /* P1025 has pins muxed for QE and other functions. To
92 * enable QE UEC mode, we need to set bit QE0 for UCC1
93 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
94 * and QE12 for QE MII management signals in PMUXCR
95 * register.
96 * Set QE mux bits in PMUXCR */
97 setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
98 MPC85xx_PMUXCR_QE(3) |
99 MPC85xx_PMUXCR_QE(9) |
100 MPC85xx_PMUXCR_QE(12));
101 iounmap(guts);
102
103#if defined(CONFIG_SERIAL_QE)
104 /* On P1025TWR board, the UCC7 acted as UART port.
105 * However, The UCC7's CTS pin is low level in default,
106 * it will impact the transmission in full duplex
107 * communication. So disable the Flow control pin PA18.
108 * The UCC7 UART just can use RXD and TXD pins.
109 */
110 par_io_config_pin(0, 18, 0, 0, 0, 0);
111#endif
112 /* Drive PB29 to CPLD low - CPLD will then change
113 * muxing from LBC to QE */
114 par_io_config_pin(1, 29, 1, 0, 0, 0);
115 par_io_data_set(1, 29, 0);
116 }
117 of_node_put(np);
118 }
119 }
120#endif
121#endif /* CONFIG_QUICC_ENGINE */
122
123 pr_info("TWR-P1025 board from Freescale Semiconductor\n");
124}
125
126machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices);
127
128static int __init twr_p1025_probe(void)
129{
130 unsigned long root = of_get_flat_dt_root();
131
132 return of_flat_dt_is_compatible(root, "fsl,TWR-P1025");
133}
134
135define_machine(twr_p1025) {
136 .name = "TWR-P1025",
137 .probe = twr_p1025_probe,
138 .setup_arch = twr_p1025_setup_arch,
139 .init_IRQ = twr_p1025_pic_init,
140#ifdef CONFIG_PCI
141 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
142#endif
143 .get_irq = mpic_get_irq,
144 .restart = fsl_rstcr_restart,
145 .calibrate_decr = generic_calibrate_decr,
146 .progress = udbg_progress,
147};
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 8dec3c0911ad..bd6f1a1cf922 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -45,7 +45,6 @@ config PPC_EP88XC
45config PPC_ADDER875 45config PPC_ADDER875
46 bool "Analogue & Micro Adder 875" 46 bool "Analogue & Micro Adder 875"
47 select CPM1 47 select CPM1
48 select REDBOOT
49 help 48 help
50 This enables support for the Analogue & Micro Adder 875 49 This enables support for the Analogue & Micro Adder 875
51 board. 50 board.
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index bca2465a9c34..434fda39bf8b 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -72,6 +72,7 @@ config PPC_BOOK3S_64
72 select PPC_HAVE_PMU_SUPPORT 72 select PPC_HAVE_PMU_SUPPORT
73 select SYS_SUPPORTS_HUGETLBFS 73 select SYS_SUPPORTS_HUGETLBFS
74 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES 74 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES
75 select ARCH_SUPPORTS_NUMA_BALANCING
75 76
76config PPC_BOOK3E_64 77config PPC_BOOK3E_64
77 bool "Embedded processors" 78 bool "Embedded processors"
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index c34ee4e60873..d4d245c0d787 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -111,7 +111,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
111 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 111 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
112 112
113 if (rflags & _PAGE_NO_CACHE) 113 if (rflags & _PAGE_NO_CACHE)
114 hpte_r &= ~_PAGE_COHERENT; 114 hpte_r &= ~HPTE_R_M;
115 115
116 raw_spin_lock(&beat_htab_lock); 116 raw_spin_lock(&beat_htab_lock);
117 lpar_rc = beat_read_mask(hpte_group); 117 lpar_rc = beat_read_mask(hpte_group);
@@ -337,7 +337,7 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
337 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 337 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
338 338
339 if (rflags & _PAGE_NO_CACHE) 339 if (rflags & _PAGE_NO_CACHE)
340 hpte_r &= ~_PAGE_COHERENT; 340 hpte_r &= ~HPTE_R_M;
341 341
342 /* insert into not-volted entry */ 342 /* insert into not-volted entry */
343 lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r, 343 lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index b53560660b72..2b90ff8a93be 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
197 197
198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
199 199
200 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) 200 for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); 201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
202 202
203 mb(); 203 mb();
@@ -430,7 +430,7 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
430{ 430{
431 cell_iommu_setup_stab(iommu, base, size, 0, 0); 431 cell_iommu_setup_stab(iommu, base, size, 0, 0);
432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, 432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
433 IOMMU_PAGE_SHIFT); 433 IOMMU_PAGE_SHIFT_4K);
434 cell_iommu_enable_hardware(iommu); 434 cell_iommu_enable_hardware(iommu);
435} 435}
436 436
@@ -487,8 +487,10 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
487 window->table.it_blocksize = 16; 487 window->table.it_blocksize = 16;
488 window->table.it_base = (unsigned long)iommu->ptab; 488 window->table.it_base = (unsigned long)iommu->ptab;
489 window->table.it_index = iommu->nid; 489 window->table.it_index = iommu->nid;
490 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; 490 window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
491 window->table.it_size = size >> IOMMU_PAGE_SHIFT; 491 window->table.it_offset =
492 (offset >> window->table.it_page_shift) + pte_offset;
493 window->table.it_size = size >> window->table.it_page_shift;
492 494
493 iommu_init_table(&window->table, iommu->nid); 495 iommu_init_table(&window->table, iommu->nid);
494 496
@@ -773,7 +775,7 @@ static void __init cell_iommu_init_one(struct device_node *np,
773 775
774 /* Setup the iommu_table */ 776 /* Setup the iommu_table */
775 cell_iommu_setup_window(iommu, np, base, size, 777 cell_iommu_setup_window(iommu, np, base, size,
776 offset >> IOMMU_PAGE_SHIFT); 778 offset >> IOMMU_PAGE_SHIFT_4K);
777} 779}
778 780
779static void __init cell_disable_iommus(void) 781static void __init cell_disable_iommus(void)
@@ -1122,7 +1124,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
1122 1124
1123 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); 1125 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1124 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, 1126 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1125 IOMMU_PAGE_SHIFT); 1127 IOMMU_PAGE_SHIFT_4K);
1126 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1128 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1127 fbase, fsize); 1129 fbase, fsize);
1128 cell_iommu_enable_hardware(iommu); 1130 cell_iommu_enable_hardware(iommu);
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index dead91b177b9..b6c9a0dcc924 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -14,7 +14,6 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h> 15#include <linux/kernel_stat.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h> 17#include <linux/spinlock.h>
19 18
20#include <asm/ptrace.h> 19#include <asm/ptrace.h>
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 302ba43d73a1..6d3c7a9fd047 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -67,6 +67,18 @@ config PPC_C2K
67 This option enables support for the GE Fanuc C2K board (formerly 67 This option enables support for the GE Fanuc C2K board (formerly
68 an SBS board). 68 an SBS board).
69 69
70config MVME5100
71 bool "Motorola/Emerson MVME5100"
72 depends on EMBEDDED6xx
73 select MPIC
74 select PCI
75 select PPC_INDIRECT_PCI
76 select PPC_I8259
77 select PPC_NATIVE
78 help
79 This option enables support for the Motorola (now Emerson) MVME5100
80 board.
81
70config TSI108_BRIDGE 82config TSI108_BRIDGE
71 bool 83 bool
72 select PCI 84 select PCI
@@ -113,4 +125,3 @@ config WII
113 help 125 help
114 Select WII if configuring for the Nintendo Wii. 126 Select WII if configuring for the Nintendo Wii.
115 More information at: <http://gc-linux.sourceforge.net/> 127 More information at: <http://gc-linux.sourceforge.net/>
116
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index 66c23e423f40..cdd48d402b93 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_USBGECKO_UDBG) += usbgecko_udbg.o
11obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o 11obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o
12obj-$(CONFIG_GAMECUBE) += gamecube.o 12obj-$(CONFIG_GAMECUBE) += gamecube.o
13obj-$(CONFIG_WII) += wii.o hlwd-pic.o 13obj-$(CONFIG_WII) += wii.o hlwd-pic.o
14obj-$(CONFIG_MVME5100) += mvme5100.o
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 6c03034dbbd3..c269caee58f9 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -15,7 +15,6 @@
15#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt 15#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/irq.h> 18#include <linux/irq.h>
20#include <linux/of.h> 19#include <linux/of.h>
21#include <linux/of_address.h> 20#include <linux/of_address.h>
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
new file mode 100644
index 000000000000..25e3bfb64efb
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -0,0 +1,221 @@
1/*
2 * Board setup routines for the Motorola/Emerson MVME5100.
3 *
4 * Copyright 2013 CSC Australia Pty. Ltd.
5 *
6 * Based on earlier code by:
7 *
8 * Matt Porter, MontaVista Software Inc.
9 * Copyright 2001 MontaVista Software Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * Author: Stephen Chivers <schivers@csc.com>
17 *
18 */
19
20#include <linux/of_platform.h>
21
22#include <asm/i8259.h>
23#include <asm/pci-bridge.h>
24#include <asm/mpic.h>
25#include <asm/prom.h>
26#include <mm/mmu_decl.h>
27#include <asm/udbg.h>
28
29#define HAWK_MPIC_SIZE 0x00040000U
30#define MVME5100_PCI_MEM_OFFSET 0x00000000
31
32/* Board register addresses. */
33#define BOARD_STATUS_REG 0xfef88080
34#define BOARD_MODFAIL_REG 0xfef88090
35#define BOARD_MODRST_REG 0xfef880a0
36#define BOARD_TBEN_REG 0xfef880c0
37#define BOARD_SW_READ_REG 0xfef880e0
38#define BOARD_GEO_ADDR_REG 0xfef880e8
39#define BOARD_EXT_FEATURE1_REG 0xfef880f0
40#define BOARD_EXT_FEATURE2_REG 0xfef88100
41
42static phys_addr_t pci_membase;
43static u_char *restart;
44
45static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc)
46{
47 struct irq_chip *chip = irq_desc_get_chip(desc);
48 unsigned int cascade_irq = i8259_irq();
49
50 if (cascade_irq != NO_IRQ)
51 generic_handle_irq(cascade_irq);
52
53 chip->irq_eoi(&desc->irq_data);
54}
55
56static void __init mvme5100_pic_init(void)
57{
58 struct mpic *mpic;
59 struct device_node *np;
60 struct device_node *cp = NULL;
61 unsigned int cirq;
62 unsigned long intack = 0;
63 const u32 *prop = NULL;
64
65 np = of_find_node_by_type(NULL, "open-pic");
66 if (!np) {
67 pr_err("Could not find open-pic node\n");
68 return;
69 }
70
71 mpic = mpic_alloc(np, pci_membase, 0, 16, 256, " OpenPIC ");
72
73 BUG_ON(mpic == NULL);
74 of_node_put(np);
75
76 mpic_assign_isu(mpic, 0, pci_membase + 0x10000);
77
78 mpic_init(mpic);
79
80 cp = of_find_compatible_node(NULL, NULL, "chrp,iic");
81 if (cp == NULL) {
82 pr_warn("mvme5100_pic_init: couldn't find i8259\n");
83 return;
84 }
85
86 cirq = irq_of_parse_and_map(cp, 0);
87 if (cirq == NO_IRQ) {
88 pr_warn("mvme5100_pic_init: no cascade interrupt?\n");
89 return;
90 }
91
92 np = of_find_compatible_node(NULL, "pci", "mpc10x-pci");
93 if (np) {
94 prop = of_get_property(np, "8259-interrupt-acknowledge", NULL);
95
96 if (prop)
97 intack = prop[0];
98
99 of_node_put(np);
100 }
101
102 if (intack)
103 pr_debug("mvme5100_pic_init: PCI 8259 intack at 0x%016lx\n",
104 intack);
105
106 i8259_init(cp, intack);
107 of_node_put(cp);
108 irq_set_chained_handler(cirq, mvme5100_8259_cascade);
109}
110
111static int __init mvme5100_add_bridge(struct device_node *dev)
112{
113 const int *bus_range;
114 int len;
115 struct pci_controller *hose;
116 unsigned short devid;
117
118 pr_info("Adding PCI host bridge %s\n", dev->full_name);
119
120 bus_range = of_get_property(dev, "bus-range", &len);
121
122 hose = pcibios_alloc_controller(dev);
123 if (hose == NULL)
124 return -ENOMEM;
125
126 hose->first_busno = bus_range ? bus_range[0] : 0;
127 hose->last_busno = bus_range ? bus_range[1] : 0xff;
128
129 setup_indirect_pci(hose, 0xfe000cf8, 0xfe000cfc, 0);
130
131 pci_process_bridge_OF_ranges(hose, dev, 1);
132
133 early_read_config_word(hose, 0, 0, PCI_DEVICE_ID, &devid);
134
135 if (devid != PCI_DEVICE_ID_MOTOROLA_HAWK) {
136 pr_err("HAWK PHB not present?\n");
137 return 0;
138 }
139
140 early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
141
142 if (pci_membase == 0) {
143 pr_err("HAWK PHB mibar not correctly set?\n");
144 return 0;
145 }
146
147 pr_info("mvme5100_pic_init: pci_membase: %x\n", pci_membase);
148
149 return 0;
150}
151
152static struct of_device_id mvme5100_of_bus_ids[] __initdata = {
153 { .compatible = "hawk-bridge", },
154 {},
155};
156
157/*
158 * Setup the architecture
159 */
160static void __init mvme5100_setup_arch(void)
161{
162 struct device_node *np;
163
164 if (ppc_md.progress)
165 ppc_md.progress("mvme5100_setup_arch()", 0);
166
167 for_each_compatible_node(np, "pci", "hawk-pci")
168 mvme5100_add_bridge(np);
169
170 restart = ioremap(BOARD_MODRST_REG, 4);
171}
172
173
174static void mvme5100_show_cpuinfo(struct seq_file *m)
175{
176 seq_puts(m, "Vendor\t\t: Motorola/Emerson\n");
177 seq_puts(m, "Machine\t\t: MVME5100\n");
178}
179
180static void mvme5100_restart(char *cmd)
181{
182
183 local_irq_disable();
184 mtmsr(mfmsr() | MSR_IP);
185
186 out_8((u_char *) restart, 0x01);
187
188 while (1)
189 ;
190}
191
192/*
193 * Called very early, device-tree isn't unflattened
194 */
195static int __init mvme5100_probe(void)
196{
197 unsigned long root = of_get_flat_dt_root();
198
199 return of_flat_dt_is_compatible(root, "MVME5100");
200}
201
202static int __init probe_of_platform_devices(void)
203{
204
205 of_platform_bus_probe(NULL, mvme5100_of_bus_ids, NULL);
206 return 0;
207}
208
209machine_device_initcall(mvme5100, probe_of_platform_devices);
210
211define_machine(mvme5100) {
212 .name = "MVME5100",
213 .probe = mvme5100_probe,
214 .setup_arch = mvme5100_setup_arch,
215 .init_IRQ = mvme5100_pic_init,
216 .show_cpuinfo = mvme5100_show_cpuinfo,
217 .get_irq = mpic_get_irq,
218 .restart = mvme5100_restart,
219 .calibrate_decr = generic_calibrate_decr,
220 .progress = udbg_progress,
221};
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index f3defd8a2806..aafa01ba062f 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/export.h> 21#include <linux/export.h>
23#include <linux/pci.h> 22#include <linux/pci.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 7d2d036754b5..2e576f2ae442 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -138,8 +138,11 @@ static void iommu_table_iobmap_setup(void)
138 pr_debug(" -> %s\n", __func__); 138 pr_debug(" -> %s\n", __func__);
139 iommu_table_iobmap.it_busno = 0; 139 iommu_table_iobmap.it_busno = 0;
140 iommu_table_iobmap.it_offset = 0; 140 iommu_table_iobmap.it_offset = 0;
141 iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT;
142
141 /* it_size is in number of entries */ 143 /* it_size is in number of entries */
142 iommu_table_iobmap.it_size = 0x80000000 >> IOBMAP_PAGE_SHIFT; 144 iommu_table_iobmap.it_size =
145 0x80000000 >> iommu_table_iobmap.it_page_shift;
143 146
144 /* Initialize the common IOMMU code */ 147 /* Initialize the common IOMMU code */
145 iommu_table_iobmap.it_base = (unsigned long)iob_l2_base; 148 iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index d588e48dff74..43075081721f 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -5,7 +5,6 @@
5 * FIXME: LOCKING !!! 5 * FIXME: LOCKING !!!
6 */ 6 */
7 7
8#include <linux/init.h>
9#include <linux/delay.h> 8#include <linux/delay.h>
10#include <linux/kernel.h> 9#include <linux/kernel.h>
11#include <linux/spinlock.h> 10#include <linux/spinlock.h>
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 9fced3f6d2dc..895e8a20a3fc 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -13,11 +13,6 @@ config PPC_POWERNV
13 select ARCH_RANDOM 13 select ARCH_RANDOM
14 default y 14 default y
15 15
16config POWERNV_MSI
17 bool "Support PCI MSI on PowerNV platform"
18 depends on PCI_MSI
19 default y
20
21config PPC_POWERNV_RTAS 16config PPC_POWERNV_RTAS
22 depends on PPC_POWERNV 17 depends on PPC_POWERNV
23 bool "Support for RTAS based PowerNV platforms such as BML" 18 bool "Support for RTAS based PowerNV platforms such as BML"
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 873fa1370dc4..8d767fde5a6a 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_SMP) += smp.o
6obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o 6obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
7obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o 7obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
8obj-$(CONFIG_PPC_SCOM) += opal-xscom.o 8obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
9obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index d7ddcee7feb8..e1e71618b70c 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -14,7 +14,6 @@
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/debugfs.h> 15#include <linux/debugfs.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/io.h> 17#include <linux/io.h>
19#include <linux/irq.h> 18#include <linux/irq.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -578,11 +577,8 @@ static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
578 return -EIO; 577 return -EIO;
579 } 578 }
580 579
581 /* 580 /* The PHB diag-data is always indicative */
582 * FIXME: We probably need log the error in somewhere. 581 pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
583 * Lets make it up in future.
584 */
585 /* pr_info("%s", phb->diag.blob); */
586 582
587 spin_unlock_irqrestore(&phb->lock, flags); 583 spin_unlock_irqrestore(&phb->lock, flags);
588 584
@@ -670,143 +666,9 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
670 } 666 }
671} 667}
672 668
673static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
674 struct OpalIoPhbErrorCommon *common)
675{
676 struct OpalIoP7IOCPhbErrorData *data;
677 int i;
678
679 data = (struct OpalIoP7IOCPhbErrorData *)common;
680
681 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n",
682 hose->global_number, common->version);
683
684 pr_info(" brdgCtl: %08x\n", data->brdgCtl);
685
686 pr_info(" portStatusReg: %08x\n", data->portStatusReg);
687 pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
688 pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
689
690 pr_info(" deviceStatus: %08x\n", data->deviceStatus);
691 pr_info(" slotStatus: %08x\n", data->slotStatus);
692 pr_info(" linkStatus: %08x\n", data->linkStatus);
693 pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
694 pr_info(" devSecStatus: %08x\n", data->devSecStatus);
695
696 pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
697 pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
698 pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
699 pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
700 pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
701 pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
702 pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
703 pr_info(" sourceId: %08x\n", data->sourceId);
704
705 pr_info(" errorClass: %016llx\n", data->errorClass);
706 pr_info(" correlator: %016llx\n", data->correlator);
707 pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
708 pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
709 pr_info(" lemFir: %016llx\n", data->lemFir);
710 pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
711 pr_info(" lemWOF: %016llx\n", data->lemWOF);
712 pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
713 pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
714 pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
715 pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
716 pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
717 pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
718 pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
719 pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
720 pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
721 pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
722 pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
723 pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
724 pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
725 pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
726 pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
727 pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
728
729 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
730 if ((data->pestA[i] >> 63) == 0 &&
731 (data->pestB[i] >> 63) == 0)
732 continue;
733
734 pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
735 pr_info(" PESTB: %016llx\n", data->pestB[i]);
736 }
737}
738
739static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
740 struct OpalIoPhbErrorCommon *common)
741{
742 struct OpalIoPhb3ErrorData *data;
743 int i;
744
745 data = (struct OpalIoPhb3ErrorData*)common;
746 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
747 hose->global_number, common->version);
748
749 pr_info(" brdgCtl: %08x\n", data->brdgCtl);
750
751 pr_info(" portStatusReg: %08x\n", data->portStatusReg);
752 pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
753 pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
754
755 pr_info(" deviceStatus: %08x\n", data->deviceStatus);
756 pr_info(" slotStatus: %08x\n", data->slotStatus);
757 pr_info(" linkStatus: %08x\n", data->linkStatus);
758 pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
759 pr_info(" devSecStatus: %08x\n", data->devSecStatus);
760
761 pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
762 pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
763 pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
764 pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
765 pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
766 pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
767 pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
768 pr_info(" sourceId: %08x\n", data->sourceId);
769 pr_info(" errorClass: %016llx\n", data->errorClass);
770 pr_info(" correlator: %016llx\n", data->correlator);
771 pr_info(" nFir: %016llx\n", data->nFir);
772 pr_info(" nFirMask: %016llx\n", data->nFirMask);
773 pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
774 pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
775 pr_info(" PhbCsr: %016llx\n", data->phbCsr);
776 pr_info(" lemFir: %016llx\n", data->lemFir);
777 pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
778 pr_info(" lemWOF: %016llx\n", data->lemWOF);
779 pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
780 pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
781 pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
782 pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
783 pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
784 pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
785 pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
786 pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
787 pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
788 pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
789 pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
790 pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
791 pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
792 pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
793 pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
794 pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
795
796 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
797 if ((data->pestA[i] >> 63) == 0 &&
798 (data->pestB[i] >> 63) == 0)
799 continue;
800
801 pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
802 pr_info(" PESTB: %016llx\n", data->pestB[i]);
803 }
804}
805
806static void ioda_eeh_phb_diag(struct pci_controller *hose) 669static void ioda_eeh_phb_diag(struct pci_controller *hose)
807{ 670{
808 struct pnv_phb *phb = hose->private_data; 671 struct pnv_phb *phb = hose->private_data;
809 struct OpalIoPhbErrorCommon *common;
810 long rc; 672 long rc;
811 673
812 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, 674 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
@@ -817,18 +679,7 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
817 return; 679 return;
818 } 680 }
819 681
820 common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; 682 pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
821 switch (common->ioType) {
822 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
823 ioda_eeh_p7ioc_phb_diag(hose, common);
824 break;
825 case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
826 ioda_eeh_phb3_phb_diag(hose, common);
827 break;
828 default:
829 pr_warning("%s: Unrecognized I/O chip %d\n",
830 __func__, common->ioType);
831 }
832} 683}
833 684
834static int ioda_eeh_get_phb_pe(struct pci_controller *hose, 685static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
@@ -862,11 +713,7 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
862 dev.phb = hose; 713 dev.phb = hose;
863 dev.pe_config_addr = pe_no; 714 dev.pe_config_addr = pe_no;
864 dev_pe = eeh_pe_get(&dev); 715 dev_pe = eeh_pe_get(&dev);
865 if (!dev_pe) { 716 if (!dev_pe) return -EEXIST;
866 pr_warning("%s: Can't find PE for PHB#%x - PE#%x\n",
867 __func__, hose->global_number, pe_no);
868 return -EEXIST;
869 }
870 717
871 *pe = dev_pe; 718 *pe = dev_pe;
872 return 0; 719 return 0;
@@ -884,12 +731,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
884 */ 731 */
885static int ioda_eeh_next_error(struct eeh_pe **pe) 732static int ioda_eeh_next_error(struct eeh_pe **pe)
886{ 733{
887 struct pci_controller *hose, *tmp; 734 struct pci_controller *hose;
888 struct pnv_phb *phb; 735 struct pnv_phb *phb;
889 u64 frozen_pe_no; 736 u64 frozen_pe_no;
890 u16 err_type, severity; 737 u16 err_type, severity;
891 long rc; 738 long rc;
892 int ret = 1; 739 int ret = EEH_NEXT_ERR_NONE;
893 740
894 /* 741 /*
895 * While running here, it's safe to purge the event queue. 742 * While running here, it's safe to purge the event queue.
@@ -899,7 +746,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
899 eeh_remove_event(NULL); 746 eeh_remove_event(NULL);
900 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); 747 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
901 748
902 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 749 list_for_each_entry(hose, &hose_list, list_node) {
903 /* 750 /*
904 * If the subordinate PCI buses of the PHB has been 751 * If the subordinate PCI buses of the PHB has been
905 * removed, we needn't take care of it any more. 752 * removed, we needn't take care of it any more.
@@ -938,19 +785,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
938 switch (err_type) { 785 switch (err_type) {
939 case OPAL_EEH_IOC_ERROR: 786 case OPAL_EEH_IOC_ERROR:
940 if (severity == OPAL_EEH_SEV_IOC_DEAD) { 787 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
941 list_for_each_entry_safe(hose, tmp, 788 list_for_each_entry(hose, &hose_list,
942 &hose_list, list_node) { 789 list_node) {
943 phb = hose->private_data; 790 phb = hose->private_data;
944 phb->eeh_state |= PNV_EEH_STATE_REMOVED; 791 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
945 } 792 }
946 793
947 pr_err("EEH: dead IOC detected\n"); 794 pr_err("EEH: dead IOC detected\n");
948 ret = 4; 795 ret = EEH_NEXT_ERR_DEAD_IOC;
949 goto out;
950 } else if (severity == OPAL_EEH_SEV_INF) { 796 } else if (severity == OPAL_EEH_SEV_INF) {
951 pr_info("EEH: IOC informative error " 797 pr_info("EEH: IOC informative error "
952 "detected\n"); 798 "detected\n");
953 ioda_eeh_hub_diag(hose); 799 ioda_eeh_hub_diag(hose);
800 ret = EEH_NEXT_ERR_NONE;
954 } 801 }
955 802
956 break; 803 break;
@@ -962,37 +809,61 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
962 pr_err("EEH: dead PHB#%x detected\n", 809 pr_err("EEH: dead PHB#%x detected\n",
963 hose->global_number); 810 hose->global_number);
964 phb->eeh_state |= PNV_EEH_STATE_REMOVED; 811 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
965 ret = 3; 812 ret = EEH_NEXT_ERR_DEAD_PHB;
966 goto out;
967 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) { 813 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
968 if (ioda_eeh_get_phb_pe(hose, pe)) 814 if (ioda_eeh_get_phb_pe(hose, pe))
969 break; 815 break;
970 816
971 pr_err("EEH: fenced PHB#%x detected\n", 817 pr_err("EEH: fenced PHB#%x detected\n",
972 hose->global_number); 818 hose->global_number);
973 ret = 2; 819 ret = EEH_NEXT_ERR_FENCED_PHB;
974 goto out;
975 } else if (severity == OPAL_EEH_SEV_INF) { 820 } else if (severity == OPAL_EEH_SEV_INF) {
976 pr_info("EEH: PHB#%x informative error " 821 pr_info("EEH: PHB#%x informative error "
977 "detected\n", 822 "detected\n",
978 hose->global_number); 823 hose->global_number);
979 ioda_eeh_phb_diag(hose); 824 ioda_eeh_phb_diag(hose);
825 ret = EEH_NEXT_ERR_NONE;
980 } 826 }
981 827
982 break; 828 break;
983 case OPAL_EEH_PE_ERROR: 829 case OPAL_EEH_PE_ERROR:
984 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) 830 /*
985 break; 831 * If we can't find the corresponding PE, the
832 * PEEV / PEST would be messy. So we force an
833 * fenced PHB so that it can be recovered.
834 */
835 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
836 if (!ioda_eeh_get_phb_pe(hose, pe)) {
837 pr_err("EEH: Escalated fenced PHB#%x "
838 "detected for PE#%llx\n",
839 hose->global_number,
840 frozen_pe_no);
841 ret = EEH_NEXT_ERR_FENCED_PHB;
842 } else {
843 ret = EEH_NEXT_ERR_NONE;
844 }
845 } else {
846 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
847 (*pe)->addr, (*pe)->phb->global_number);
848 ret = EEH_NEXT_ERR_FROZEN_PE;
849 }
986 850
987 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", 851 break;
988 (*pe)->addr, (*pe)->phb->global_number); 852 default:
989 ret = 1; 853 pr_warn("%s: Unexpected error type %d\n",
990 goto out; 854 __func__, err_type);
991 } 855 }
856
857 /*
858 * If we have no errors on the specific PHB or only
859 * informative error there, we continue poking it.
860 * Otherwise, we need actions to be taken by upper
861 * layer.
862 */
863 if (ret > EEH_NEXT_ERR_INF)
864 break;
992 } 865 }
993 866
994 ret = 0;
995out:
996 return ret; 867 return ret;
997} 868}
998 869
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 73b981438cc5..a79fddc5e74e 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -344,6 +344,27 @@ static int powernv_eeh_next_error(struct eeh_pe **pe)
344 return -EEXIST; 344 return -EEXIST;
345} 345}
346 346
347static int powernv_eeh_restore_config(struct device_node *dn)
348{
349 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
350 struct pnv_phb *phb;
351 s64 ret;
352
353 if (!edev)
354 return -EEXIST;
355
356 phb = edev->phb->private_data;
357 ret = opal_pci_reinit(phb->opal_id,
358 OPAL_REINIT_PCI_DEV, edev->config_addr);
359 if (ret) {
360 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
361 __func__, edev->config_addr, ret);
362 return -EIO;
363 }
364
365 return 0;
366}
367
347static struct eeh_ops powernv_eeh_ops = { 368static struct eeh_ops powernv_eeh_ops = {
348 .name = "powernv", 369 .name = "powernv",
349 .init = powernv_eeh_init, 370 .init = powernv_eeh_init,
@@ -359,7 +380,8 @@ static struct eeh_ops powernv_eeh_ops = {
359 .configure_bridge = powernv_eeh_configure_bridge, 380 .configure_bridge = powernv_eeh_configure_bridge,
360 .read_config = pnv_pci_cfg_read, 381 .read_config = pnv_pci_cfg_read,
361 .write_config = pnv_pci_cfg_write, 382 .write_config = pnv_pci_cfg_write,
362 .next_error = powernv_eeh_next_error 383 .next_error = powernv_eeh_next_error,
384 .restore_config = powernv_eeh_restore_config
363}; 385};
364 386
365/** 387/**
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index d8773079ce19..714ef972406b 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -76,8 +76,8 @@
76/* Validate buffer size */ 76/* Validate buffer size */
77#define VALIDATE_BUF_SIZE 4096 77#define VALIDATE_BUF_SIZE 4096
78 78
79/* XXX: Assume candidate image size is <= 256MB */ 79/* XXX: Assume candidate image size is <= 1GB */
80#define MAX_IMAGE_SIZE 0x10000000 80#define MAX_IMAGE_SIZE 0x40000000
81 81
82/* Flash sg list version */ 82/* Flash sg list version */
83#define SG_LIST_VERSION (1UL) 83#define SG_LIST_VERSION (1UL)
@@ -103,27 +103,6 @@ struct image_header_t {
103 uint32_t size; 103 uint32_t size;
104}; 104};
105 105
106/* Scatter/gather entry */
107struct opal_sg_entry {
108 void *data;
109 long length;
110};
111
112/* We calculate number of entries based on PAGE_SIZE */
113#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
114
115/*
116 * This struct is very similar but not identical to that
117 * needed by the opal flash update. All we need to do for
118 * opal is rewrite num_entries into a version/length and
119 * translate the pointers to absolute.
120 */
121struct opal_sg_list {
122 unsigned long num_entries;
123 struct opal_sg_list *next;
124 struct opal_sg_entry entry[SG_ENTRIES_PER_NODE];
125};
126
127struct validate_flash_t { 106struct validate_flash_t {
128 int status; /* Return status */ 107 int status; /* Return status */
129 void *buf; /* Candidate image buffer */ 108 void *buf; /* Candidate image buffer */
@@ -333,7 +312,7 @@ static struct opal_sg_list *image_data_to_sglist(void)
333 addr = image_data.data; 312 addr = image_data.data;
334 size = image_data.size; 313 size = image_data.size;
335 314
336 sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL); 315 sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
337 if (!sg1) 316 if (!sg1)
338 return NULL; 317 return NULL;
339 318
@@ -351,8 +330,7 @@ static struct opal_sg_list *image_data_to_sglist(void)
351 330
352 sg1->num_entries++; 331 sg1->num_entries++;
353 if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { 332 if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
354 sg1->next = kzalloc((sizeof(struct opal_sg_list)), 333 sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
355 GFP_KERNEL);
356 if (!sg1->next) { 334 if (!sg1->next) {
357 pr_err("%s : Failed to allocate memory\n", 335 pr_err("%s : Failed to allocate memory\n",
358 __func__); 336 __func__);
@@ -402,7 +380,10 @@ static int opal_flash_update(int op)
402 else 380 else
403 sg->next = NULL; 381 sg->next = NULL;
404 382
405 /* Make num_entries into the version/length field */ 383 /*
384 * Convert num_entries to version/length format
385 * to satisfy OPAL.
386 */
406 sg->num_entries = (SG_LIST_VERSION << 56) | 387 sg->num_entries = (SG_LIST_VERSION << 56) |
407 (sg->num_entries * sizeof(struct opal_sg_entry) + 16); 388 (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
408 } 389 }
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
new file mode 100644
index 000000000000..ec4132239cdf
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -0,0 +1,146 @@
1/*
2 * OPAL asynchronus Memory error handling support in PowreNV.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20 */
21
22#undef DEBUG
23
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/of.h>
27#include <linux/mm.h>
28#include <linux/slab.h>
29
30#include <asm/opal.h>
31#include <asm/cputable.h>
32
33static int opal_mem_err_nb_init;
34static LIST_HEAD(opal_memory_err_list);
35static DEFINE_SPINLOCK(opal_mem_err_lock);
36
37struct OpalMsgNode {
38 struct list_head list;
39 struct opal_msg msg;
40};
41
42static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
43{
44 uint64_t paddr_start, paddr_end;
45
46 pr_debug("%s: Retrived memory error event, type: 0x%x\n",
47 __func__, merr_evt->type);
48 switch (merr_evt->type) {
49 case OPAL_MEM_ERR_TYPE_RESILIENCE:
50 paddr_start = merr_evt->u.resilience.physical_address_start;
51 paddr_end = merr_evt->u.resilience.physical_address_end;
52 break;
53 case OPAL_MEM_ERR_TYPE_DYN_DALLOC:
54 paddr_start = merr_evt->u.dyn_dealloc.physical_address_start;
55 paddr_end = merr_evt->u.dyn_dealloc.physical_address_end;
56 break;
57 default:
58 return;
59 }
60
61 for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
62 memory_failure(paddr_start >> PAGE_SHIFT, 0, 0);
63 }
64}
65
66static void handle_memory_error(void)
67{
68 unsigned long flags;
69 struct OpalMemoryErrorData *merr_evt;
70 struct OpalMsgNode *msg_node;
71
72 spin_lock_irqsave(&opal_mem_err_lock, flags);
73 while (!list_empty(&opal_memory_err_list)) {
74 msg_node = list_entry(opal_memory_err_list.next,
75 struct OpalMsgNode, list);
76 list_del(&msg_node->list);
77 spin_unlock_irqrestore(&opal_mem_err_lock, flags);
78
79 merr_evt = (struct OpalMemoryErrorData *)
80 &msg_node->msg.params[0];
81 handle_memory_error_event(merr_evt);
82 kfree(msg_node);
83 spin_lock_irqsave(&opal_mem_err_lock, flags);
84 }
85 spin_unlock_irqrestore(&opal_mem_err_lock, flags);
86}
87
88static void mem_error_handler(struct work_struct *work)
89{
90 handle_memory_error();
91}
92
93static DECLARE_WORK(mem_error_work, mem_error_handler);
94
95/*
96 * opal_memory_err_event - notifier handler that queues up the opal message
97 * to be preocessed later.
98 */
99static int opal_memory_err_event(struct notifier_block *nb,
100 unsigned long msg_type, void *msg)
101{
102 unsigned long flags;
103 struct OpalMsgNode *msg_node;
104
105 if (msg_type != OPAL_MSG_MEM_ERR)
106 return 0;
107
108 msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
109 if (!msg_node) {
110 pr_err("MEMORY_ERROR: out of memory, Opal message event not"
111 "handled\n");
112 return -ENOMEM;
113 }
114 memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
115
116 spin_lock_irqsave(&opal_mem_err_lock, flags);
117 list_add(&msg_node->list, &opal_memory_err_list);
118 spin_unlock_irqrestore(&opal_mem_err_lock, flags);
119
120 schedule_work(&mem_error_work);
121 return 0;
122}
123
124static struct notifier_block opal_mem_err_nb = {
125 .notifier_call = opal_memory_err_event,
126 .next = NULL,
127 .priority = 0,
128};
129
130static int __init opal_mem_err_init(void)
131{
132 int ret;
133
134 if (!opal_mem_err_nb_init) {
135 ret = opal_message_notifier_register(
136 OPAL_MSG_MEM_ERR, &opal_mem_err_nb);
137 if (ret) {
138 pr_err("%s: Can't register OPAL event notifier (%d)\n",
139 __func__, ret);
140 return ret;
141 }
142 opal_mem_err_nb_init = 1;
143 }
144 return 0;
145}
146subsys_initcall(opal_mem_err_init);
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 7d07c7e80ec0..b1885db8fdf3 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/opal.h> 19#include <asm/opal.h>
20#include <asm/firmware.h> 20#include <asm/firmware.h>
21#include <asm/machdep.h>
21 22
22static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm) 23static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
23{ 24{
@@ -48,8 +49,11 @@ unsigned long __init opal_get_boot_time(void)
48 else 49 else
49 mdelay(10); 50 mdelay(10);
50 } 51 }
51 if (rc != OPAL_SUCCESS) 52 if (rc != OPAL_SUCCESS) {
53 ppc_md.get_rtc_time = NULL;
54 ppc_md.set_rtc_time = NULL;
52 return 0; 55 return 0;
56 }
53 y_m_d = be32_to_cpu(__y_m_d); 57 y_m_d = be32_to_cpu(__y_m_d);
54 h_m_s_ms = be64_to_cpu(__h_m_s_ms); 58 h_m_s_ms = be64_to_cpu(__h_m_s_ms);
55 opal_to_tm(y_m_d, h_m_s_ms, &tm); 59 opal_to_tm(y_m_d, h_m_s_ms, &tm);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e7806504e976..3e8829c40fbb 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -126,3 +126,6 @@ OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU);
126OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); 126OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
127OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); 127OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
128OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); 128OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
129OPAL_CALL(opal_get_msg, OPAL_GET_MSG);
130OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION);
131OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 1c798cd55372..65499adaecff 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -18,9 +18,12 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/sched.h>
21#include <linux/kobject.h> 22#include <linux/kobject.h>
23#include <linux/delay.h>
22#include <asm/opal.h> 24#include <asm/opal.h>
23#include <asm/firmware.h> 25#include <asm/firmware.h>
26#include <asm/mce.h>
24 27
25#include "powernv.h" 28#include "powernv.h"
26 29
@@ -38,6 +41,7 @@ extern u64 opal_mc_secondary_handler[];
38static unsigned int *opal_irqs; 41static unsigned int *opal_irqs;
39static unsigned int opal_irq_count; 42static unsigned int opal_irq_count;
40static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 43static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
44static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
41static DEFINE_SPINLOCK(opal_notifier_lock); 45static DEFINE_SPINLOCK(opal_notifier_lock);
42static uint64_t last_notified_mask = 0x0ul; 46static uint64_t last_notified_mask = 0x0ul;
43static atomic_t opal_notifier_hold = ATOMIC_INIT(0); 47static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
@@ -88,14 +92,10 @@ static int __init opal_register_exception_handlers(void)
88 if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) 92 if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
89 return -ENODEV; 93 return -ENODEV;
90 94
91 /* Hookup some exception handlers. We use the fwnmi area at 0x7000 95 /* Hookup some exception handlers except machine check. We use the
92 * to provide the glue space to OPAL 96 * fwnmi area at 0x7000 to provide the glue space to OPAL
93 */ 97 */
94 glue = 0x7000; 98 glue = 0x7000;
95 opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER,
96 __pa(opal_mc_secondary_handler[0]),
97 glue);
98 glue += 128;
99 opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 99 opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
100 0, glue); 100 0, glue);
101 glue += 128; 101 glue += 128;
@@ -169,6 +169,95 @@ void opal_notifier_disable(void)
169 atomic_set(&opal_notifier_hold, 1); 169 atomic_set(&opal_notifier_hold, 1);
170} 170}
171 171
172/*
173 * Opal message notifier based on message type. Allow subscribers to get
174 * notified for specific messgae type.
175 */
176int opal_message_notifier_register(enum OpalMessageType msg_type,
177 struct notifier_block *nb)
178{
179 if (!nb) {
180 pr_warning("%s: Invalid argument (%p)\n",
181 __func__, nb);
182 return -EINVAL;
183 }
184 if (msg_type > OPAL_MSG_TYPE_MAX) {
185 pr_warning("%s: Invalid message type argument (%d)\n",
186 __func__, msg_type);
187 return -EINVAL;
188 }
189 return atomic_notifier_chain_register(
190 &opal_msg_notifier_head[msg_type], nb);
191}
192
193static void opal_message_do_notify(uint32_t msg_type, void *msg)
194{
195 /* notify subscribers */
196 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
197 msg_type, msg);
198}
199
200static void opal_handle_message(void)
201{
202 s64 ret;
203 /*
204 * TODO: pre-allocate a message buffer depending on opal-msg-size
205 * value in /proc/device-tree.
206 */
207 static struct opal_msg msg;
208
209 ret = opal_get_msg(__pa(&msg), sizeof(msg));
210 /* No opal message pending. */
211 if (ret == OPAL_RESOURCE)
212 return;
213
214 /* check for errors. */
215 if (ret) {
216 pr_warning("%s: Failed to retrive opal message, err=%lld\n",
217 __func__, ret);
218 return;
219 }
220
221 /* Sanity check */
222 if (msg.msg_type > OPAL_MSG_TYPE_MAX) {
223 pr_warning("%s: Unknown message type: %u\n",
224 __func__, msg.msg_type);
225 return;
226 }
227 opal_message_do_notify(msg.msg_type, (void *)&msg);
228}
229
230static int opal_message_notify(struct notifier_block *nb,
231 unsigned long events, void *change)
232{
233 if (events & OPAL_EVENT_MSG_PENDING)
234 opal_handle_message();
235 return 0;
236}
237
238static struct notifier_block opal_message_nb = {
239 .notifier_call = opal_message_notify,
240 .next = NULL,
241 .priority = 0,
242};
243
244static int __init opal_message_init(void)
245{
246 int ret, i;
247
248 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
249 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
250
251 ret = opal_notifier_register(&opal_message_nb);
252 if (ret) {
253 pr_err("%s: Can't register OPAL event notifier (%d)\n",
254 __func__, ret);
255 return ret;
256 }
257 return 0;
258}
259early_initcall(opal_message_init);
260
172int opal_get_chars(uint32_t vtermno, char *buf, int count) 261int opal_get_chars(uint32_t vtermno, char *buf, int count)
173{ 262{
174 s64 rc; 263 s64 rc;
@@ -254,119 +343,62 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
254 return written; 343 return written;
255} 344}
256 345
346static int opal_recover_mce(struct pt_regs *regs,
347 struct machine_check_event *evt)
348{
349 int recovered = 0;
350 uint64_t ea = get_mce_fault_addr(evt);
351
352 if (!(regs->msr & MSR_RI)) {
353 /* If MSR_RI isn't set, we cannot recover */
354 recovered = 0;
355 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
356 /* Platform corrected itself */
357 recovered = 1;
358 } else if (ea && !is_kernel_addr(ea)) {
359 /*
360 * Faulting address is not in kernel text. We should be fine.
361 * We need to find which process uses this address.
362 * For now, kill the task if we have received exception when
363 * in userspace.
364 *
365 * TODO: Queue up this address for hwpoisioning later.
366 */
367 if (user_mode(regs) && !is_global_init(current)) {
368 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
369 recovered = 1;
370 } else
371 recovered = 0;
372 } else if (user_mode(regs) && !is_global_init(current) &&
373 evt->severity == MCE_SEV_ERROR_SYNC) {
374 /*
375 * If we have received a synchronous error when in userspace
376 * kill the task.
377 */
378 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
379 recovered = 1;
380 }
381 return recovered;
382}
383
257int opal_machine_check(struct pt_regs *regs) 384int opal_machine_check(struct pt_regs *regs)
258{ 385{
259 struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt; 386 struct machine_check_event evt;
260 struct opal_machine_check_event evt; 387
261 const char *level, *sevstr, *subtype; 388 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
262 static const char *opal_mc_ue_types[] = { 389 return 0;
263 "Indeterminate",
264 "Instruction fetch",
265 "Page table walk ifetch",
266 "Load/Store",
267 "Page table walk Load/Store",
268 };
269 static const char *opal_mc_slb_types[] = {
270 "Indeterminate",
271 "Parity",
272 "Multihit",
273 };
274 static const char *opal_mc_erat_types[] = {
275 "Indeterminate",
276 "Parity",
277 "Multihit",
278 };
279 static const char *opal_mc_tlb_types[] = {
280 "Indeterminate",
281 "Parity",
282 "Multihit",
283 };
284
285 /* Copy the event structure and release the original */
286 evt = *opal_evt;
287 opal_evt->in_use = 0;
288 390
289 /* Print things out */ 391 /* Print things out */
290 if (evt.version != OpalMCE_V1) { 392 if (evt.version != MCE_V1) {
291 pr_err("Machine Check Exception, Unknown event version %d !\n", 393 pr_err("Machine Check Exception, Unknown event version %d !\n",
292 evt.version); 394 evt.version);
293 return 0; 395 return 0;
294 } 396 }
295 switch(evt.severity) { 397 machine_check_print_event_info(&evt);
296 case OpalMCE_SEV_NO_ERROR:
297 level = KERN_INFO;
298 sevstr = "Harmless";
299 break;
300 case OpalMCE_SEV_WARNING:
301 level = KERN_WARNING;
302 sevstr = "";
303 break;
304 case OpalMCE_SEV_ERROR_SYNC:
305 level = KERN_ERR;
306 sevstr = "Severe";
307 break;
308 case OpalMCE_SEV_FATAL:
309 default:
310 level = KERN_ERR;
311 sevstr = "Fatal";
312 break;
313 }
314 398
315 printk("%s%s Machine check interrupt [%s]\n", level, sevstr, 399 if (opal_recover_mce(regs, &evt))
316 evt.disposition == OpalMCE_DISPOSITION_RECOVERED ? 400 return 1;
317 "Recovered" : "[Not recovered"); 401 return 0;
318 printk("%s Initiator: %s\n", level,
319 evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown");
320 switch(evt.error_type) {
321 case OpalMCE_ERROR_TYPE_UE:
322 subtype = evt.u.ue_error.ue_error_type <
323 ARRAY_SIZE(opal_mc_ue_types) ?
324 opal_mc_ue_types[evt.u.ue_error.ue_error_type]
325 : "Unknown";
326 printk("%s Error type: UE [%s]\n", level, subtype);
327 if (evt.u.ue_error.effective_address_provided)
328 printk("%s Effective address: %016llx\n",
329 level, evt.u.ue_error.effective_address);
330 if (evt.u.ue_error.physical_address_provided)
331 printk("%s Physial address: %016llx\n",
332 level, evt.u.ue_error.physical_address);
333 break;
334 case OpalMCE_ERROR_TYPE_SLB:
335 subtype = evt.u.slb_error.slb_error_type <
336 ARRAY_SIZE(opal_mc_slb_types) ?
337 opal_mc_slb_types[evt.u.slb_error.slb_error_type]
338 : "Unknown";
339 printk("%s Error type: SLB [%s]\n", level, subtype);
340 if (evt.u.slb_error.effective_address_provided)
341 printk("%s Effective address: %016llx\n",
342 level, evt.u.slb_error.effective_address);
343 break;
344 case OpalMCE_ERROR_TYPE_ERAT:
345 subtype = evt.u.erat_error.erat_error_type <
346 ARRAY_SIZE(opal_mc_erat_types) ?
347 opal_mc_erat_types[evt.u.erat_error.erat_error_type]
348 : "Unknown";
349 printk("%s Error type: ERAT [%s]\n", level, subtype);
350 if (evt.u.erat_error.effective_address_provided)
351 printk("%s Effective address: %016llx\n",
352 level, evt.u.erat_error.effective_address);
353 break;
354 case OpalMCE_ERROR_TYPE_TLB:
355 subtype = evt.u.tlb_error.tlb_error_type <
356 ARRAY_SIZE(opal_mc_tlb_types) ?
357 opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type]
358 : "Unknown";
359 printk("%s Error type: TLB [%s]\n", level, subtype);
360 if (evt.u.tlb_error.effective_address_provided)
361 printk("%s Effective address: %016llx\n",
362 level, evt.u.tlb_error.effective_address);
363 break;
364 default:
365 case OpalMCE_ERROR_TYPE_UNKNOWN:
366 printk("%s Error type: Unknown\n", level);
367 break;
368 }
369 return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1;
370} 402}
371 403
372static irqreturn_t opal_interrupt(int irq, void *data) 404static irqreturn_t opal_interrupt(int irq, void *data)
@@ -451,10 +483,25 @@ subsys_initcall(opal_init);
451void opal_shutdown(void) 483void opal_shutdown(void)
452{ 484{
453 unsigned int i; 485 unsigned int i;
486 long rc = OPAL_BUSY;
454 487
488 /* First free interrupts, which will also mask them */
455 for (i = 0; i < opal_irq_count; i++) { 489 for (i = 0; i < opal_irq_count; i++) {
456 if (opal_irqs[i]) 490 if (opal_irqs[i])
457 free_irq(opal_irqs[i], NULL); 491 free_irq(opal_irqs[i], NULL);
458 opal_irqs[i] = 0; 492 opal_irqs[i] = 0;
459 } 493 }
494
495 /*
496 * Then sync with OPAL which ensure anything that can
497 * potentially write to our memory has completed such
498 * as an ongoing dump retrieval
499 */
500 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
501 rc = opal_sync_host_reboot();
502 if (rc == OPAL_BUSY)
503 opal_poll_events(NULL);
504 else
505 mdelay(10);
506 }
460} 507}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 2c6d173842b2..7d6dcc6d5fa9 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -460,7 +460,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
460 return; 460 return;
461 461
462 pe = &phb->ioda.pe_array[pdn->pe_number]; 462 pe = &phb->ioda.pe_array[pdn->pe_number];
463 set_iommu_table_base(&pdev->dev, &pe->tce32_table); 463 set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
464} 464}
465 465
466static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) 466static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
@@ -468,7 +468,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
468 struct pci_dev *dev; 468 struct pci_dev *dev;
469 469
470 list_for_each_entry(dev, &bus->devices, bus_list) { 470 list_for_each_entry(dev, &bus->devices, bus_list) {
471 set_iommu_table_base(&dev->dev, &pe->tce32_table); 471 set_iommu_table_base_and_group(&dev->dev, &pe->tce32_table);
472 if (dev->subordinate) 472 if (dev->subordinate)
473 pnv_ioda_setup_bus_dma(pe, dev->subordinate); 473 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
474 } 474 }
@@ -644,7 +644,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
644 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number); 644 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
645 645
646 if (pe->pdev) 646 if (pe->pdev)
647 set_iommu_table_base(&pe->pdev->dev, tbl); 647 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
648 else 648 else
649 pnv_ioda_setup_bus_dma(pe, pe->pbus); 649 pnv_ioda_setup_bus_dma(pe, pe->pbus);
650 650
@@ -723,7 +723,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
723 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number); 723 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
724 724
725 if (pe->pdev) 725 if (pe->pdev)
726 set_iommu_table_base(&pe->pdev->dev, tbl); 726 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
727 else 727 else
728 pnv_ioda_setup_bus_dma(pe, pe->pbus); 728 pnv_ioda_setup_bus_dma(pe, pe->pbus);
729 729
@@ -1144,7 +1144,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1144{ 1144{
1145 struct pci_controller *hose; 1145 struct pci_controller *hose;
1146 struct pnv_phb *phb; 1146 struct pnv_phb *phb;
1147 unsigned long size, m32map_off, iomap_off, pemap_off; 1147 unsigned long size, m32map_off, pemap_off, iomap_off = 0;
1148 const __be64 *prop64; 1148 const __be64 *prop64;
1149 const __be32 *prop32; 1149 const __be32 *prop32;
1150 int len; 1150 int len;
@@ -1231,7 +1231,6 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1231 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); 1231 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1232 m32map_off = size; 1232 m32map_off = size;
1233 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]); 1233 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1234 iomap_off = size;
1235 if (phb->type == PNV_PHB_IODA1) { 1234 if (phb->type == PNV_PHB_IODA1) {
1236 iomap_off = size; 1235 iomap_off = size;
1237 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]); 1236 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index f8b4bd8afb2e..e3807d69393e 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -92,7 +92,7 @@ static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
92 pci_domain_nr(phb->hose->bus), phb->opal_id); 92 pci_domain_nr(phb->hose->bus), phb->opal_id);
93 } 93 }
94 94
95 set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table); 95 set_iommu_table_base_and_group(&pdev->dev, &phb->p5ioc2.iommu_table);
96} 96}
97 97
98static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id, 98static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 4eb33a9ed532..b555ebc57ef5 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -124,77 +124,157 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
124} 124}
125#endif /* CONFIG_PCI_MSI */ 125#endif /* CONFIG_PCI_MSI */
126 126
127static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb) 127static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
128 struct OpalIoPhbErrorCommon *common)
128{ 129{
129 struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc; 130 struct OpalIoP7IOCPhbErrorData *data;
130 int i; 131 int i;
131 132
132 pr_info("PHB %d diagnostic data:\n", phb->hose->global_number); 133 data = (struct OpalIoP7IOCPhbErrorData *)common;
133 134 pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
134 pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl); 135 hose->global_number, common->version);
135 136
136 pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg); 137 pr_info(" brdgCtl: %08x\n", data->brdgCtl);
137 pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus); 138
138 pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus); 139 pr_info(" portStatusReg: %08x\n", data->portStatusReg);
139 140 pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
140 pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus); 141 pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
141 pr_info(" slotStatus = 0x%08x\n", data->slotStatus); 142
142 pr_info(" linkStatus = 0x%08x\n", data->linkStatus); 143 pr_info(" deviceStatus: %08x\n", data->deviceStatus);
143 pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus); 144 pr_info(" slotStatus: %08x\n", data->slotStatus);
144 pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus); 145 pr_info(" linkStatus: %08x\n", data->linkStatus);
145 146 pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
146 pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus); 147 pr_info(" devSecStatus: %08x\n", data->devSecStatus);
147 pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus); 148
148 pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus); 149 pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
149 pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1); 150 pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
150 pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2); 151 pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
151 pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3); 152 pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
152 pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4); 153 pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
153 pr_info(" sourceId = 0x%08x\n", data->sourceId); 154 pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
154 155 pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
155 pr_info(" errorClass = 0x%016llx\n", data->errorClass); 156 pr_info(" sourceId: %08x\n", data->sourceId);
156 pr_info(" correlator = 0x%016llx\n", data->correlator); 157 pr_info(" errorClass: %016llx\n", data->errorClass);
157 158 pr_info(" correlator: %016llx\n", data->correlator);
158 pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr); 159 pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
159 pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr); 160 pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
160 pr_info(" lemFir = 0x%016llx\n", data->lemFir); 161 pr_info(" lemFir: %016llx\n", data->lemFir);
161 pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask); 162 pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
162 pr_info(" lemWOF = 0x%016llx\n", data->lemWOF); 163 pr_info(" lemWOF: %016llx\n", data->lemWOF);
163 pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus); 164 pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
164 pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus); 165 pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
165 pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0); 166 pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
166 pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1); 167 pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
167 pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus); 168 pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
168 pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus); 169 pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
169 pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0); 170 pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
170 pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1); 171 pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
171 pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus); 172 pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
172 pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus); 173 pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
173 pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0); 174 pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
174 pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1); 175 pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
175 pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus); 176 pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
176 pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus); 177 pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
177 pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0); 178 pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
178 pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1); 179 pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
179 180
180 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { 181 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
181 if ((data->pestA[i] >> 63) == 0 && 182 if ((data->pestA[i] >> 63) == 0 &&
182 (data->pestB[i] >> 63) == 0) 183 (data->pestB[i] >> 63) == 0)
183 continue; 184 continue;
184 pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]); 185
185 pr_info(" PESTB = 0x%016llx\n", data->pestB[i]); 186 pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
187 pr_info(" PESTB: %016llx\n", data->pestB[i]);
188 }
189}
190
191static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
192 struct OpalIoPhbErrorCommon *common)
193{
194 struct OpalIoPhb3ErrorData *data;
195 int i;
196
197 data = (struct OpalIoPhb3ErrorData*)common;
198 pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
199 hose->global_number, common->version);
200
201 pr_info(" brdgCtl: %08x\n", data->brdgCtl);
202
203 pr_info(" portStatusReg: %08x\n", data->portStatusReg);
204 pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
205 pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
206
207 pr_info(" deviceStatus: %08x\n", data->deviceStatus);
208 pr_info(" slotStatus: %08x\n", data->slotStatus);
209 pr_info(" linkStatus: %08x\n", data->linkStatus);
210 pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
211 pr_info(" devSecStatus: %08x\n", data->devSecStatus);
212
213 pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
214 pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
215 pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
216 pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
217 pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
218 pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
219 pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
220 pr_info(" sourceId: %08x\n", data->sourceId);
221 pr_info(" errorClass: %016llx\n", data->errorClass);
222 pr_info(" correlator: %016llx\n", data->correlator);
223
224 pr_info(" nFir: %016llx\n", data->nFir);
225 pr_info(" nFirMask: %016llx\n", data->nFirMask);
226 pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
227 pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
228 pr_info(" PhbCsr: %016llx\n", data->phbCsr);
229 pr_info(" lemFir: %016llx\n", data->lemFir);
230 pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
231 pr_info(" lemWOF: %016llx\n", data->lemWOF);
232 pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
233 pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
234 pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
235 pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
236 pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
237 pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
238 pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
239 pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
240 pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
241 pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
242 pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
243 pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
244 pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
245 pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
246 pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
247 pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
248
249 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
250 if ((data->pestA[i] >> 63) == 0 &&
251 (data->pestB[i] >> 63) == 0)
252 continue;
253
254 pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
255 pr_info(" PESTB: %016llx\n", data->pestB[i]);
186 } 256 }
187} 257}
188 258
189static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb) 259void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
260 unsigned char *log_buff)
190{ 261{
191 switch(phb->model) { 262 struct OpalIoPhbErrorCommon *common;
192 case PNV_PHB_MODEL_P7IOC: 263
193 pnv_pci_dump_p7ioc_diag_data(phb); 264 if (!hose || !log_buff)
265 return;
266
267 common = (struct OpalIoPhbErrorCommon *)log_buff;
268 switch (common->ioType) {
269 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
270 pnv_pci_dump_p7ioc_diag_data(hose, common);
271 break;
272 case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
273 pnv_pci_dump_phb3_diag_data(hose, common);
194 break; 274 break;
195 default: 275 default:
196 pr_warning("PCI %d: Can't decode this PHB diag data\n", 276 pr_warn("%s: Unrecognized ioType %d\n",
197 phb->hose->global_number); 277 __func__, common->ioType);
198 } 278 }
199} 279}
200 280
@@ -222,7 +302,7 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
222 * with the normal errors generated when probing empty slots 302 * with the normal errors generated when probing empty slots
223 */ 303 */
224 if (has_diag) 304 if (has_diag)
225 pnv_pci_dump_phb_diag_data(phb); 305 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
226 else 306 else
227 pr_warning("PCI %d: No diag data available\n", 307 pr_warning("PCI %d: No diag data available\n",
228 phb->hose->global_number); 308 phb->hose->global_number);
@@ -484,7 +564,8 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
484{ 564{
485 tbl->it_blocksize = 16; 565 tbl->it_blocksize = 16;
486 tbl->it_base = (unsigned long)tce_mem; 566 tbl->it_base = (unsigned long)tce_mem;
487 tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT; 567 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
568 tbl->it_offset = dma_offset >> tbl->it_page_shift;
488 tbl->it_index = 0; 569 tbl->it_index = 0;
489 tbl->it_size = tce_size >> 3; 570 tbl->it_size = tce_size >> 3;
490 tbl->it_busno = 0; 571 tbl->it_busno = 0;
@@ -536,7 +617,7 @@ static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
536 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose); 617 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
537 if (!pdn->iommu_table) 618 if (!pdn->iommu_table)
538 return; 619 return;
539 set_iommu_table_base(&pdev->dev, pdn->iommu_table); 620 set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
540} 621}
541 622
542static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 623static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
@@ -657,3 +738,32 @@ void __init pnv_pci_init(void)
657 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs; 738 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
658#endif 739#endif
659} 740}
741
742static int tce_iommu_bus_notifier(struct notifier_block *nb,
743 unsigned long action, void *data)
744{
745 struct device *dev = data;
746
747 switch (action) {
748 case BUS_NOTIFY_ADD_DEVICE:
749 return iommu_add_device(dev);
750 case BUS_NOTIFY_DEL_DEVICE:
751 if (dev->iommu_group)
752 iommu_del_device(dev);
753 return 0;
754 default:
755 return 0;
756 }
757}
758
759static struct notifier_block tce_iommu_bus_nb = {
760 .notifier_call = tce_iommu_bus_notifier,
761};
762
763static int __init tce_iommu_bus_notifier_init(void)
764{
765 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
766 return 0;
767}
768
769subsys_initcall_sync(tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 1ed8d5f40f5a..13f1942a9a5f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -176,6 +176,7 @@ struct pnv_phb {
176 union { 176 union {
177 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; 177 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
178 struct OpalIoP7IOCPhbErrorData p7ioc; 178 struct OpalIoP7IOCPhbErrorData p7ioc;
179 struct OpalIoPhb3ErrorData phb3;
179 struct OpalIoP7IOCErrorData hub_diag; 180 struct OpalIoP7IOCErrorData hub_diag;
180 } diag; 181 } diag;
181 182
@@ -186,6 +187,8 @@ extern struct pci_ops pnv_pci_ops;
186extern struct pnv_eeh_ops ioda_eeh_ops; 187extern struct pnv_eeh_ops ioda_eeh_ops;
187#endif 188#endif
188 189
190void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
191 unsigned char *log_buff);
189int pnv_pci_cfg_read(struct device_node *dn, 192int pnv_pci_cfg_read(struct device_node *dn,
190 int where, int size, u32 *val); 193 int where, int size, u32 *val);
191int pnv_pci_cfg_write(struct device_node *dn, 194int pnv_pci_cfg_write(struct device_node *dn,
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 19884b2a51b4..a932feb2901c 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -145,8 +145,10 @@ static void pnv_shutdown(void)
145 /* Let the PCI code clear up IODA tables */ 145 /* Let the PCI code clear up IODA tables */
146 pnv_pci_shutdown(); 146 pnv_pci_shutdown();
147 147
148 /* And unregister all OPAL interrupts so they don't fire 148 /*
149 * up while we kexec 149 * Stop OPAL activity: Unregister all OPAL interrupts so they
150 * don't fire up while we kexec and make sure all potentially
151 * DMA'ing ops are complete (such as dump retrieval).
150 */ 152 */
151 opal_shutdown(); 153 opal_shutdown();
152} 154}
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index e17fa1432d80..a0bca05e26b0 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -143,7 +143,7 @@ static void _dump_areas(unsigned int spe_id, unsigned long priv2,
143 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow); 143 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
144} 144}
145 145
146inline u64 ps3_get_spe_id(void *arg) 146u64 ps3_get_spe_id(void *arg)
147{ 147{
148 return spu_pdata(arg)->spe_id; 148 return spu_pdata(arg)->spe_id;
149} 149}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 62b4f8025de0..e66643250fee 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -34,7 +34,7 @@ config PPC_SPLPAR
34 34
35config PSERIES_MSI 35config PSERIES_MSI
36 bool 36 bool
37 depends on PCI_MSI && EEH 37 depends on PCI_MSI && PPC_PSERIES && EEH
38 default y 38 default y
39 39
40config PSERIES_ENERGY 40config PSERIES_ENERGY
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 1e561bef459b..2d8bf15879fd 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -25,7 +25,6 @@
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/gfp.h> 27#include <linux/gfp.h>
28#include <linux/init.h>
29#include <linux/kthread.h> 28#include <linux/kthread.h>
30#include <linux/module.h> 29#include <linux/module.h>
31#include <linux/oom.h> 30#include <linux/oom.h>
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 5db66f1fbc26..7d61498e45c0 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -20,7 +20,6 @@
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#include <linux/init.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/debugfs.h> 24#include <linux/debugfs.h>
26#include <linux/spinlock.h> 25#include <linux/spinlock.h>
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ccb633e077b1..9ef3cc8ebc11 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -689,7 +689,9 @@ static struct eeh_ops pseries_eeh_ops = {
689 .get_log = pseries_eeh_get_log, 689 .get_log = pseries_eeh_get_log,
690 .configure_bridge = pseries_eeh_configure_bridge, 690 .configure_bridge = pseries_eeh_configure_bridge,
691 .read_config = pseries_eeh_read_config, 691 .read_config = pseries_eeh_read_config,
692 .write_config = pseries_eeh_write_config 692 .write_config = pseries_eeh_write_config,
693 .next_error = NULL,
694 .restore_config = NULL
693}; 695};
694 696
695/** 697/**
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index f253361552ae..33b552ffbe57 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -486,9 +486,10 @@ static void iommu_table_setparms(struct pci_controller *phb,
486 memset((void *)tbl->it_base, 0, *sizep); 486 memset((void *)tbl->it_base, 0, *sizep);
487 487
488 tbl->it_busno = phb->bus->number; 488 tbl->it_busno = phb->bus->number;
489 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
489 490
490 /* Units of tce entries */ 491 /* Units of tce entries */
491 tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT; 492 tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
492 493
493 /* Test if we are going over 2GB of DMA space */ 494 /* Test if we are going over 2GB of DMA space */
494 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { 495 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
@@ -499,7 +500,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
499 phb->dma_window_base_cur += phb->dma_window_size; 500 phb->dma_window_base_cur += phb->dma_window_size;
500 501
501 /* Set the tce table size - measured in entries */ 502 /* Set the tce table size - measured in entries */
502 tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT; 503 tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
503 504
504 tbl->it_index = 0; 505 tbl->it_index = 0;
505 tbl->it_blocksize = 16; 506 tbl->it_blocksize = 16;
@@ -537,11 +538,12 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
537 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); 538 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
538 539
539 tbl->it_busno = phb->bus->number; 540 tbl->it_busno = phb->bus->number;
541 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
540 tbl->it_base = 0; 542 tbl->it_base = 0;
541 tbl->it_blocksize = 16; 543 tbl->it_blocksize = 16;
542 tbl->it_type = TCE_PCI; 544 tbl->it_type = TCE_PCI;
543 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 545 tbl->it_offset = offset >> tbl->it_page_shift;
544 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 546 tbl->it_size = size >> tbl->it_page_shift;
545} 547}
546 548
547static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) 549static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
@@ -687,7 +689,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
687 iommu_table_setparms(phb, dn, tbl); 689 iommu_table_setparms(phb, dn, tbl);
688 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); 690 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
689 iommu_register_group(tbl, pci_domain_nr(phb->bus), 0); 691 iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
690 set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); 692 set_iommu_table_base_and_group(&dev->dev,
693 PCI_DN(dn)->iommu_table);
691 return; 694 return;
692 } 695 }
693 696
@@ -699,7 +702,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
699 dn = dn->parent; 702 dn = dn->parent;
700 703
701 if (dn && PCI_DN(dn)) 704 if (dn && PCI_DN(dn))
702 set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); 705 set_iommu_table_base_and_group(&dev->dev,
706 PCI_DN(dn)->iommu_table);
703 else 707 else
704 printk(KERN_WARNING "iommu: Device %s has no iommu table\n", 708 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
705 pci_name(dev)); 709 pci_name(dev));
@@ -717,21 +721,6 @@ static int __init disable_ddw_setup(char *str)
717 721
718early_param("disable_ddw", disable_ddw_setup); 722early_param("disable_ddw", disable_ddw_setup);
719 723
720static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
721{
722 int ret;
723
724 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
725 if (ret)
726 pr_warning("%s: failed to remove DMA window: rtas returned "
727 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
728 np->full_name, ret, ddw_avail[2], liobn);
729 else
730 pr_debug("%s: successfully removed DMA window: rtas returned "
731 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
732 np->full_name, ret, ddw_avail[2], liobn);
733}
734
735static void remove_ddw(struct device_node *np) 724static void remove_ddw(struct device_node *np)
736{ 725{
737 struct dynamic_dma_window_prop *dwp; 726 struct dynamic_dma_window_prop *dwp;
@@ -761,7 +750,15 @@ static void remove_ddw(struct device_node *np)
761 pr_debug("%s successfully cleared tces in window.\n", 750 pr_debug("%s successfully cleared tces in window.\n",
762 np->full_name); 751 np->full_name);
763 752
764 __remove_ddw(np, ddw_avail, liobn); 753 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
754 if (ret)
755 pr_warning("%s: failed to remove direct window: rtas returned "
756 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
757 np->full_name, ret, ddw_avail[2], liobn);
758 else
759 pr_debug("%s: successfully removed direct window: rtas returned "
760 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
761 np->full_name, ret, ddw_avail[2], liobn);
765 762
766delprop: 763delprop:
767 ret = of_remove_property(np, win64); 764 ret = of_remove_property(np, win64);
@@ -790,68 +787,33 @@ static u64 find_existing_ddw(struct device_node *pdn)
790 return dma_addr; 787 return dma_addr;
791} 788}
792 789
793static void __restore_default_window(struct eeh_dev *edev,
794 u32 ddw_restore_token)
795{
796 u32 cfg_addr;
797 u64 buid;
798 int ret;
799
800 /*
801 * Get the config address and phb buid of the PE window.
802 * Rely on eeh to retrieve this for us.
803 * Retrieve them from the pci device, not the node with the
804 * dma-window property
805 */
806 cfg_addr = edev->config_addr;
807 if (edev->pe_config_addr)
808 cfg_addr = edev->pe_config_addr;
809 buid = edev->phb->buid;
810
811 do {
812 ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
813 BUID_HI(buid), BUID_LO(buid));
814 } while (rtas_busy_delay(ret));
815 pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
816 ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
817}
818
819static int find_existing_ddw_windows(void) 790static int find_existing_ddw_windows(void)
820{ 791{
792 int len;
821 struct device_node *pdn; 793 struct device_node *pdn;
794 struct direct_window *window;
822 const struct dynamic_dma_window_prop *direct64; 795 const struct dynamic_dma_window_prop *direct64;
823 const u32 *ddw_extensions;
824 796
825 if (!firmware_has_feature(FW_FEATURE_LPAR)) 797 if (!firmware_has_feature(FW_FEATURE_LPAR))
826 return 0; 798 return 0;
827 799
828 for_each_node_with_property(pdn, DIRECT64_PROPNAME) { 800 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
829 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL); 801 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
830 if (!direct64) 802 if (!direct64)
831 continue; 803 continue;
832 804
833 /* 805 window = kzalloc(sizeof(*window), GFP_KERNEL);
834 * We need to ensure the IOMMU table is active when we 806 if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
835 * return from the IOMMU setup so that the common code 807 kfree(window);
836 * can clear the table or find the holes. To that end, 808 remove_ddw(pdn);
837 * first, remove any existing DDW configuration. 809 continue;
838 */ 810 }
839 remove_ddw(pdn);
840 811
841 /* 812 window->device = pdn;
842 * Second, if we are running on a new enough level of 813 window->prop = direct64;
843 * firmware where the restore API is present, use it to 814 spin_lock(&direct_window_list_lock);
844 * restore the 32-bit window, which was removed in 815 list_add(&window->list, &direct_window_list);
845 * create_ddw. 816 spin_unlock(&direct_window_list_lock);
846 * If the API is not present, then create_ddw couldn't
847 * have removed the 32-bit window in the first place, so
848 * removing the DDW configuration should be sufficient.
849 */
850 ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
851 NULL);
852 if (ddw_extensions && ddw_extensions[0] > 0)
853 __restore_default_window(of_node_to_eeh_dev(pdn),
854 ddw_extensions[1]);
855 } 817 }
856 818
857 return 0; 819 return 0;
@@ -921,12 +883,6 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
921 return ret; 883 return ret;
922} 884}
923 885
924static void restore_default_window(struct pci_dev *dev,
925 u32 ddw_restore_token)
926{
927 __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
928}
929
930struct failed_ddw_pdn { 886struct failed_ddw_pdn {
931 struct device_node *pdn; 887 struct device_node *pdn;
932 struct list_head list; 888 struct list_head list;
@@ -954,13 +910,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
954 u64 dma_addr, max_addr; 910 u64 dma_addr, max_addr;
955 struct device_node *dn; 911 struct device_node *dn;
956 const u32 *uninitialized_var(ddw_avail); 912 const u32 *uninitialized_var(ddw_avail);
957 const u32 *uninitialized_var(ddw_extensions);
958 u32 ddw_restore_token = 0;
959 struct direct_window *window; 913 struct direct_window *window;
960 struct property *win64; 914 struct property *win64;
961 struct dynamic_dma_window_prop *ddwprop; 915 struct dynamic_dma_window_prop *ddwprop;
962 const void *dma_window = NULL;
963 unsigned long liobn, offset, size;
964 struct failed_ddw_pdn *fpdn; 916 struct failed_ddw_pdn *fpdn;
965 917
966 mutex_lock(&direct_window_init_mutex); 918 mutex_lock(&direct_window_init_mutex);
@@ -991,42 +943,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
991 */ 943 */
992 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); 944 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
993 if (!ddw_avail || len < 3 * sizeof(u32)) 945 if (!ddw_avail || len < 3 * sizeof(u32))
994 goto out_unlock; 946 goto out_failed;
995
996 /*
997 * the extensions property is only required to exist in certain
998 * levels of firmware and later
999 * the ibm,ddw-extensions property is a list with the first
1000 * element containing the number of extensions and each
1001 * subsequent entry is a value corresponding to that extension
1002 */
1003 ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
1004 if (ddw_extensions) {
1005 /*
1006 * each new defined extension length should be added to
1007 * the top of the switch so the "earlier" entries also
1008 * get picked up
1009 */
1010 switch (ddw_extensions[0]) {
1011 /* ibm,reset-pe-dma-windows */
1012 case 1:
1013 ddw_restore_token = ddw_extensions[1];
1014 break;
1015 }
1016 }
1017
1018 /*
1019 * Only remove the existing DMA window if we can restore back to
1020 * the default state. Removing the existing window maximizes the
1021 * resources available to firmware for dynamic window creation.
1022 */
1023 if (ddw_restore_token) {
1024 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1025 of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
1026 __remove_ddw(pdn, ddw_avail, liobn);
1027 }
1028 947
1029 /* 948 /*
1030 * Query if there is a second window of size to map the 949 * Query if there is a second window of size to map the
1031 * whole partition. Query returns number of windows, largest 950 * whole partition. Query returns number of windows, largest
1032 * block assigned to PE (partition endpoint), and two bitmasks 951 * block assigned to PE (partition endpoint), and two bitmasks
@@ -1035,7 +954,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1035 dn = pci_device_to_OF_node(dev); 954 dn = pci_device_to_OF_node(dev);
1036 ret = query_ddw(dev, ddw_avail, &query); 955 ret = query_ddw(dev, ddw_avail, &query);
1037 if (ret != 0) 956 if (ret != 0)
1038 goto out_restore_window; 957 goto out_failed;
1039 958
1040 if (query.windows_available == 0) { 959 if (query.windows_available == 0) {
1041 /* 960 /*
@@ -1044,7 +963,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1044 * trading in for a larger page size. 963 * trading in for a larger page size.
1045 */ 964 */
1046 dev_dbg(&dev->dev, "no free dynamic windows"); 965 dev_dbg(&dev->dev, "no free dynamic windows");
1047 goto out_restore_window; 966 goto out_failed;
1048 } 967 }
1049 if (be32_to_cpu(query.page_size) & 4) { 968 if (be32_to_cpu(query.page_size) & 4) {
1050 page_shift = 24; /* 16MB */ 969 page_shift = 24; /* 16MB */
@@ -1055,7 +974,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1055 } else { 974 } else {
1056 dev_dbg(&dev->dev, "no supported direct page size in mask %x", 975 dev_dbg(&dev->dev, "no supported direct page size in mask %x",
1057 query.page_size); 976 query.page_size);
1058 goto out_restore_window; 977 goto out_failed;
1059 } 978 }
1060 /* verify the window * number of ptes will map the partition */ 979 /* verify the window * number of ptes will map the partition */
1061 /* check largest block * page size > max memory hotplug addr */ 980 /* check largest block * page size > max memory hotplug addr */
@@ -1064,14 +983,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1064 dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " 983 dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
1065 "%llu-sized pages\n", max_addr, query.largest_available_block, 984 "%llu-sized pages\n", max_addr, query.largest_available_block,
1066 1ULL << page_shift); 985 1ULL << page_shift);
1067 goto out_restore_window; 986 goto out_failed;
1068 } 987 }
1069 len = order_base_2(max_addr); 988 len = order_base_2(max_addr);
1070 win64 = kzalloc(sizeof(struct property), GFP_KERNEL); 989 win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
1071 if (!win64) { 990 if (!win64) {
1072 dev_info(&dev->dev, 991 dev_info(&dev->dev,
1073 "couldn't allocate property for 64bit dma window\n"); 992 "couldn't allocate property for 64bit dma window\n");
1074 goto out_restore_window; 993 goto out_failed;
1075 } 994 }
1076 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); 995 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
1077 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); 996 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
@@ -1133,9 +1052,7 @@ out_free_prop:
1133 kfree(win64->value); 1052 kfree(win64->value);
1134 kfree(win64); 1053 kfree(win64);
1135 1054
1136out_restore_window: 1055out_failed:
1137 if (ddw_restore_token)
1138 restore_default_window(dev, ddw_restore_token);
1139 1056
1140 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); 1057 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1141 if (!fpdn) 1058 if (!fpdn)
@@ -1193,7 +1110,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1193 pr_debug(" found DMA window, table: %p\n", pci->iommu_table); 1110 pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
1194 } 1111 }
1195 1112
1196 set_iommu_table_base(&dev->dev, pci->iommu_table); 1113 set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
1197} 1114}
1198 1115
1199static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) 1116static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4fca3def9db9..b02af9ef3ff6 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -92,7 +92,7 @@ void vpa_init(int cpu)
92 * PAPR says this feature is SLB-Buffer but firmware never 92 * PAPR says this feature is SLB-Buffer but firmware never
93 * reports that. All SPLPAR support SLB shadow buffer. 93 * reports that. All SPLPAR support SLB shadow buffer.
94 */ 94 */
95 addr = __pa(&slb_shadow[cpu]); 95 addr = __pa(paca[cpu].slb_shadow_ptr);
96 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 96 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
97 ret = register_slb_shadow(hwcpu, addr); 97 ret = register_slb_shadow(hwcpu, addr);
98 if (ret) 98 if (ret)
@@ -153,7 +153,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
153 153
154 /* Make pHyp happy */ 154 /* Make pHyp happy */
155 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU)) 155 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
156 hpte_r &= ~_PAGE_COHERENT; 156 hpte_r &= ~HPTE_R_M;
157
157 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) 158 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
158 flags |= H_COALESCE_CAND; 159 flags |= H_COALESCE_CAND;
159 160
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 94134a5aecaa..002d5b4112f2 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -17,7 +17,6 @@
17#include <asm/reg.h> 17#include <asm/reg.h>
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/firmware.h> 19#include <asm/firmware.h>
20#include <asm/runlatch.h>
21#include <asm/plpar_wrappers.h> 20#include <asm/plpar_wrappers.h>
22 21
23struct cpuidle_driver pseries_idle_driver = { 22struct cpuidle_driver pseries_idle_driver = {
@@ -62,7 +61,6 @@ static int snooze_loop(struct cpuidle_device *dev,
62 set_thread_flag(TIF_POLLING_NRFLAG); 61 set_thread_flag(TIF_POLLING_NRFLAG);
63 62
64 while ((!need_resched()) && cpu_online(cpu)) { 63 while ((!need_resched()) && cpu_online(cpu)) {
65 ppc64_runlatch_off();
66 HMT_low(); 64 HMT_low();
67 HMT_very_low(); 65 HMT_very_low();
68 } 66 }
@@ -102,7 +100,6 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
102 idle_loop_prolog(&in_purr); 100 idle_loop_prolog(&in_purr);
103 get_lppaca()->donate_dedicated_cpu = 1; 101 get_lppaca()->donate_dedicated_cpu = 1;
104 102
105 ppc64_runlatch_off();
106 HMT_medium(); 103 HMT_medium();
107 check_and_cede_processor(); 104 check_and_cede_processor();
108 105
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6f76ae417f47..8e639d7cbda7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -72,7 +72,7 @@
72 72
73int CMO_PrPSP = -1; 73int CMO_PrPSP = -1;
74int CMO_SecPSP = -1; 74int CMO_SecPSP = -1;
75unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT); 75unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
76EXPORT_SYMBOL(CMO_PageSize); 76EXPORT_SYMBOL(CMO_PageSize);
77 77
78int fwnmi_active; /* TRUE if an FWNMI handler is present */ 78int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -569,7 +569,7 @@ void pSeries_cmo_feature_init(void)
569{ 569{
570 char *ptr, *key, *value, *end; 570 char *ptr, *key, *value, *end;
571 int call_status; 571 int call_status;
572 int page_order = IOMMU_PAGE_SHIFT; 572 int page_order = IOMMU_PAGE_SHIFT_4K;
573 573
574 pr_debug(" -> fw_cmo_feature_init()\n"); 574 pr_debug(" -> fw_cmo_feature_init()\n");
575 spin_lock(&rtas_data_buf_lock); 575 spin_lock(&rtas_data_buf_lock);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index 62cb527493e7..9a15e5b39bb8 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -260,7 +260,7 @@ static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
261 261
262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n", 262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT); 263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K);
264 264
265 uaddr += TCE_PAGE_SIZE; 265 uaddr += TCE_PAGE_SIZE;
266 index++; 266 index++;
@@ -381,8 +381,9 @@ static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
381 381
382 /* Init bits and pieces */ 382 /* Init bits and pieces */
383 tbl->table.it_blocksize = 16; 383 tbl->table.it_blocksize = 16;
384 tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT; 384 tbl->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
385 tbl->table.it_size = size >> IOMMU_PAGE_SHIFT; 385 tbl->table.it_offset = addr >> tbl->table.it_page_shift;
386 tbl->table.it_size = size >> tbl->table.it_page_shift;
386 387
387 /* 388 /*
388 * It's already blank but we clear it anyway. 389 * It's already blank but we clear it anyway.
@@ -449,8 +450,8 @@ static void wsp_pci_dma_dev_setup(struct pci_dev *pdev)
449 if (table) { 450 if (table) {
450 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n", 451 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
451 pci_name(pdev), 452 pci_name(pdev),
452 table->table.it_offset << IOMMU_PAGE_SHIFT, 453 table->table.it_offset << IOMMU_PAGE_SHIFT_4K,
453 (table->table.it_offset << IOMMU_PAGE_SHIFT) 454 (table->table.it_offset << IOMMU_PAGE_SHIFT_4K)
454 + phb->dma32_region_size - 1); 455 + phb->dma32_region_size - 1);
455 archdata->dma_data.iommu_table_base = &table->table; 456 archdata->dma_data.iommu_table_base = &table->table;
456 return; 457 return;
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 13ec968be4c7..7baa70d6dc01 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -19,7 +19,7 @@ config PPC_MSI_BITMAP
19 default y if MPIC 19 default y if MPIC
20 default y if FSL_PCI 20 default y if FSL_PCI
21 default y if PPC4xx_MSI 21 default y if PPC4xx_MSI
22 default y if POWERNV_MSI 22 default y if PPC_POWERNV
23 23
24source "arch/powerpc/sysdev/xics/Kconfig" 24source "arch/powerpc/sysdev/xics/Kconfig"
25 25
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 10386b676d87..a11bd1d433ad 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/stddef.h> 29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/sched.h> 30#include <linux/sched.h>
32#include <linux/signal.h> 31#include <linux/signal.h>
33#include <linux/irq.h> 32#include <linux/irq.h>
diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c
index d7fc72239144..fbc885b31946 100644
--- a/arch/powerpc/sysdev/fsl_ifc.c
+++ b/arch/powerpc/sysdev/fsl_ifc.c
@@ -19,7 +19,6 @@
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22#include <linux/init.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include <linux/kernel.h> 23#include <linux/kernel.h>
25#include <linux/compiler.h> 24#include <linux/compiler.h>
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 6bc5a546d49f..d631022ffb4b 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -214,10 +214,14 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
214 struct fsl_lbc_ctrl *ctrl = data; 214 struct fsl_lbc_ctrl *ctrl = data;
215 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 215 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
216 u32 status; 216 u32 status;
217 unsigned long flags;
217 218
219 spin_lock_irqsave(&fsl_lbc_lock, flags);
218 status = in_be32(&lbc->ltesr); 220 status = in_be32(&lbc->ltesr);
219 if (!status) 221 if (!status) {
222 spin_unlock_irqrestore(&fsl_lbc_lock, flags);
220 return IRQ_NONE; 223 return IRQ_NONE;
224 }
221 225
222 out_be32(&lbc->ltesr, LTESR_CLEAR); 226 out_be32(&lbc->ltesr, LTESR_CLEAR);
223 out_be32(&lbc->lteatr, 0); 227 out_be32(&lbc->lteatr, 0);
@@ -260,6 +264,7 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
260 if (status & ~LTESR_MASK) 264 if (status & ~LTESR_MASK)
261 dev_err(ctrl->dev, "Unknown error: " 265 dev_err(ctrl->dev, "Unknown error: "
262 "LTESR 0x%08X\n", status); 266 "LTESR 0x%08X\n", status);
267 spin_unlock_irqrestore(&fsl_lbc_lock, flags);
263 return IRQ_HANDLED; 268 return IRQ_HANDLED;
264} 269}
265 270
@@ -298,8 +303,8 @@ static int fsl_lbc_ctrl_probe(struct platform_device *dev)
298 goto err; 303 goto err;
299 } 304 }
300 305
301 fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 306 fsl_lbc_ctrl_dev->irq[0] = irq_of_parse_and_map(dev->dev.of_node, 0);
302 if (fsl_lbc_ctrl_dev->irq == NO_IRQ) { 307 if (!fsl_lbc_ctrl_dev->irq[0]) {
303 dev_err(&dev->dev, "failed to get irq resource\n"); 308 dev_err(&dev->dev, "failed to get irq resource\n");
304 ret = -ENODEV; 309 ret = -ENODEV;
305 goto err; 310 goto err;
@@ -311,20 +316,34 @@ static int fsl_lbc_ctrl_probe(struct platform_device *dev)
311 if (ret < 0) 316 if (ret < 0)
312 goto err; 317 goto err;
313 318
314 ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0, 319 ret = request_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_irq, 0,
315 "fsl-lbc", fsl_lbc_ctrl_dev); 320 "fsl-lbc", fsl_lbc_ctrl_dev);
316 if (ret != 0) { 321 if (ret != 0) {
317 dev_err(&dev->dev, "failed to install irq (%d)\n", 322 dev_err(&dev->dev, "failed to install irq (%d)\n",
318 fsl_lbc_ctrl_dev->irq); 323 fsl_lbc_ctrl_dev->irq[0]);
319 ret = fsl_lbc_ctrl_dev->irq; 324 ret = fsl_lbc_ctrl_dev->irq[0];
320 goto err; 325 goto err;
321 } 326 }
322 327
328 fsl_lbc_ctrl_dev->irq[1] = irq_of_parse_and_map(dev->dev.of_node, 1);
329 if (fsl_lbc_ctrl_dev->irq[1]) {
330 ret = request_irq(fsl_lbc_ctrl_dev->irq[1], fsl_lbc_ctrl_irq,
331 IRQF_SHARED, "fsl-lbc-err", fsl_lbc_ctrl_dev);
332 if (ret) {
333 dev_err(&dev->dev, "failed to install irq (%d)\n",
334 fsl_lbc_ctrl_dev->irq[1]);
335 ret = fsl_lbc_ctrl_dev->irq[1];
336 goto err1;
337 }
338 }
339
323 /* Enable interrupts for any detected events */ 340 /* Enable interrupts for any detected events */
324 out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE); 341 out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
325 342
326 return 0; 343 return 0;
327 344
345err1:
346 free_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_dev);
328err: 347err:
329 iounmap(fsl_lbc_ctrl_dev->regs); 348 iounmap(fsl_lbc_ctrl_dev->regs);
330 kfree(fsl_lbc_ctrl_dev); 349 kfree(fsl_lbc_ctrl_dev);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4dfd61df8aba..a625dcf26b2b 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -122,7 +122,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
122 * address width of the SoC such that we can address any internal 122 * address width of the SoC such that we can address any internal
123 * SoC address from across PCI if needed 123 * SoC address from across PCI if needed
124 */ 124 */
125 if ((dev->bus == &pci_bus_type) && 125 if ((dev_is_pci(dev)) &&
126 dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) { 126 dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
127 set_dma_ops(dev, &dma_direct_ops); 127 set_dma_ops(dev, &dma_direct_ops);
128 set_dma_offset(dev, pci64_dma_offset); 128 set_dma_offset(dev, pci64_dma_offset);
@@ -454,7 +454,7 @@ void fsl_pcibios_fixup_bus(struct pci_bus *bus)
454 } 454 }
455} 455}
456 456
457int __init fsl_add_bridge(struct platform_device *pdev, int is_primary) 457int fsl_add_bridge(struct platform_device *pdev, int is_primary)
458{ 458{
459 int len; 459 int len;
460 struct pci_controller *hose; 460 struct pci_controller *hose;
@@ -1035,6 +1035,7 @@ static const struct of_device_id pci_ids[] = {
1035 { .compatible = "fsl,mpc8548-pcie", }, 1035 { .compatible = "fsl,mpc8548-pcie", },
1036 { .compatible = "fsl,mpc8610-pci", }, 1036 { .compatible = "fsl,mpc8610-pci", },
1037 { .compatible = "fsl,mpc8641-pcie", }, 1037 { .compatible = "fsl,mpc8641-pcie", },
1038 { .compatible = "fsl,qoriq-pcie", },
1038 { .compatible = "fsl,qoriq-pcie-v2.1", }, 1039 { .compatible = "fsl,qoriq-pcie-v2.1", },
1039 { .compatible = "fsl,qoriq-pcie-v2.2", }, 1040 { .compatible = "fsl,qoriq-pcie-v2.2", },
1040 { .compatible = "fsl,qoriq-pcie-v2.3", }, 1041 { .compatible = "fsl,qoriq-pcie-v2.3", },
diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h
index 6149916da3f4..908dbd9826b6 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.h
+++ b/arch/powerpc/sysdev/ge/ge_pic.h
@@ -1,7 +1,6 @@
1#ifndef __GEF_PIC_H__ 1#ifndef __GEF_PIC_H__
2#define __GEF_PIC_H__ 2#define __GEF_PIC_H__
3 3
4#include <linux/init.h>
5 4
6void gef_pic_cascade(unsigned int, struct irq_desc *); 5void gef_pic_cascade(unsigned int, struct irq_desc *);
7unsigned int gef_pic_get_irq(void); 6unsigned int gef_pic_get_irq(void);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 997df6a7ab5d..45598da0b321 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -8,7 +8,6 @@
8 */ 8 */
9#undef DEBUG 9#undef DEBUG
10 10
11#include <linux/init.h>
12#include <linux/ioport.h> 11#include <linux/ioport.h>
13#include <linux/interrupt.h> 12#include <linux/interrupt.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index c6c8b526a4f6..1f6c570d66d4 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -152,10 +152,8 @@ static struct pci_ops indirect_pci_ops =
152 .write = indirect_write_config, 152 .write = indirect_write_config,
153}; 153};
154 154
155void __init 155void setup_indirect_pci(struct pci_controller *hose, resource_size_t cfg_addr,
156setup_indirect_pci(struct pci_controller* hose, 156 resource_size_t cfg_data, u32 flags)
157 resource_size_t cfg_addr,
158 resource_size_t cfg_data, u32 flags)
159{ 157{
160 resource_size_t base = cfg_addr & PAGE_MASK; 158 resource_size_t base = cfg_addr & PAGE_MASK;
161 void __iomem *mbase; 159 void __iomem *mbase;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index b724622c3a0b..c4828c0be5bd 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -1,6 +1,5 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/stddef.h> 2#include <linux/stddef.h>
3#include <linux/init.h>
4#include <linux/sched.h> 3#include <linux/sched.h>
5#include <linux/signal.h> 4#include <linux/signal.h>
6#include <linux/irq.h> 5#include <linux/irq.h>
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index 22d7d57eead9..9d9b06217f8b 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -41,6 +41,7 @@
41#define MPIC_TIMER_TCR_ROVR_OFFSET 24 41#define MPIC_TIMER_TCR_ROVR_OFFSET 24
42 42
43#define TIMER_STOP 0x80000000 43#define TIMER_STOP 0x80000000
44#define GTCCR_TOG 0x80000000
44#define TIMERS_PER_GROUP 4 45#define TIMERS_PER_GROUP 4
45#define MAX_TICKS (~0U >> 1) 46#define MAX_TICKS (~0U >> 1)
46#define MAX_TICKS_CASCADE (~0U) 47#define MAX_TICKS_CASCADE (~0U)
@@ -96,8 +97,11 @@ static void convert_ticks_to_time(struct timer_group_priv *priv,
96 time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq); 97 time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq);
97 tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq; 98 tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
98 99
99 time->tv_usec = (__kernel_suseconds_t) 100 time->tv_usec = 0;
100 div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq); 101
102 if (tmp_sec <= ticks)
103 time->tv_usec = (__kernel_suseconds_t)
104 div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
101 105
102 return; 106 return;
103} 107}
@@ -327,11 +331,13 @@ void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time)
327 casc_priv = priv->timer[handle->num].cascade_handle; 331 casc_priv = priv->timer[handle->num].cascade_handle;
328 if (casc_priv) { 332 if (casc_priv) {
329 tmp_ticks = in_be32(&priv->regs[handle->num].gtccr); 333 tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
334 tmp_ticks &= ~GTCCR_TOG;
330 ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE; 335 ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
331 tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr); 336 tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
332 ticks += tmp_ticks; 337 ticks += tmp_ticks;
333 } else { 338 } else {
334 ticks = in_be32(&priv->regs[handle->num].gtccr); 339 ticks = in_be32(&priv->regs[handle->num].gtccr);
340 ticks &= ~GTCCR_TOG;
335 } 341 }
336 342
337 convert_ticks_to_time(priv, ticks, time); 343 convert_ticks_to_time(priv, ticks, time);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index a88807b3dd57..d09994164daf 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -16,7 +16,6 @@
16 16
17#include <linux/stddef.h> 17#include <linux/stddef.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h> 19#include <linux/errno.h>
21#include <linux/module.h> 20#include <linux/module.h>
22#include <linux/ioport.h> 21#include <linux/ioport.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index 134b07d29435..621575b7e84a 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -14,7 +14,6 @@
14 * option) any later version. 14 * option) any later version.
15 */ 15 */
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/errno.h> 17#include <linux/errno.h>
19#include <linux/stddef.h> 18#include <linux/stddef.h>
20#include <linux/spinlock.h> 19#include <linux/spinlock.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index cceb2e366738..65aaf15032ae 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -13,7 +13,6 @@
13 * option) any later version. 13 * option) any later version.
14 */ 14 */
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/errno.h> 16#include <linux/errno.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/stddef.h> 18#include <linux/stddef.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index 1c062f48f1ac..befaf1123f7f 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -13,7 +13,6 @@
13 * option) any later version. 13 * option) any later version.
14 */ 14 */
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/errno.h> 16#include <linux/errno.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/stddef.h> 18#include <linux/stddef.h>
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
index ce5a7b489e4b..9998c0de12d0 100644
--- a/arch/powerpc/sysdev/udbg_memcons.c
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -18,7 +18,6 @@
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21#include <linux/init.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <asm/barrier.h> 22#include <asm/barrier.h>
24#include <asm/page.h> 23#include <asm/page.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index df0fc5821469..c1917cf67c3d 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -12,7 +12,6 @@
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/cpu.h> 15#include <linux/cpu.h>
17#include <linux/of.h> 16#include <linux/of.h>
18 17
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index af9d3469fb99..a90731b3d44a 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2051,6 +2051,10 @@ static void dump_one_paca(int cpu)
2051 DUMP(p, stab_addr, "lx"); 2051 DUMP(p, stab_addr, "lx");
2052#endif 2052#endif
2053 DUMP(p, emergency_sp, "p"); 2053 DUMP(p, emergency_sp, "p");
2054#ifdef CONFIG_PPC_BOOK3S_64
2055 DUMP(p, mc_emergency_sp, "p");
2056 DUMP(p, in_mce, "x");
2057#endif
2054 DUMP(p, data_offset, "lx"); 2058 DUMP(p, data_offset, "lx");
2055 DUMP(p, hw_cpu_id, "x"); 2059 DUMP(p, hw_cpu_id, "x");
2056 DUMP(p, cpu_start, "x"); 2060 DUMP(p, cpu_start, "x");
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 9ef32b3df91f..590214ba736c 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -133,7 +133,7 @@ static int wf_lm75_probe(struct i2c_client *client,
133 lm->inited = 0; 133 lm->inited = 0;
134 lm->ds1775 = ds1775; 134 lm->ds1775 = ds1775;
135 lm->i2c = client; 135 lm->i2c = client;
136 lm->sens.name = (char *)name; /* XXX fix constness in structure */ 136 lm->sens.name = name;
137 lm->sens.ops = &wf_lm75_ops; 137 lm->sens.ops = &wf_lm75_ops;
138 i2c_set_clientdata(client, lm); 138 i2c_set_clientdata(client, lm);
139 139
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 945a25b2f31e..87e439b10318 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -95,7 +95,7 @@ static int wf_max6690_probe(struct i2c_client *client,
95 } 95 }
96 96
97 max->i2c = client; 97 max->i2c = client;
98 max->sens.name = (char *)name; /* XXX fix constness in structure */ 98 max->sens.name = name;
99 max->sens.ops = &wf_max6690_ops; 99 max->sens.ops = &wf_max6690_ops;
100 i2c_set_clientdata(client, max); 100 i2c_set_clientdata(client, max);
101 101
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index cde0fd941f0c..4be971590461 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1275,18 +1275,21 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1275{ 1275{
1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1277 struct ibmveth_adapter *adapter; 1277 struct ibmveth_adapter *adapter;
1278 struct iommu_table *tbl;
1278 unsigned long ret; 1279 unsigned long ret;
1279 int i; 1280 int i;
1280 int rxqentries = 1; 1281 int rxqentries = 1;
1281 1282
1283 tbl = get_iommu_table_base(&vdev->dev);
1284
1282 /* netdev inits at probe time along with the structures we need below*/ 1285 /* netdev inits at probe time along with the structures we need below*/
1283 if (netdev == NULL) 1286 if (netdev == NULL)
1284 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); 1287 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1285 1288
1286 adapter = netdev_priv(netdev); 1289 adapter = netdev_priv(netdev);
1287 1290
1288 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1291 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1289 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1292 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1290 1293
1291 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1294 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1292 /* add the size of the active receive buffers */ 1295 /* add the size of the active receive buffers */
@@ -1294,11 +1297,12 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1294 ret += 1297 ret +=
1295 adapter->rx_buff_pool[i].size * 1298 adapter->rx_buff_pool[i].size *
1296 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. 1299 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1297 buff_size); 1300 buff_size, tbl);
1298 rxqentries += adapter->rx_buff_pool[i].size; 1301 rxqentries += adapter->rx_buff_pool[i].size;
1299 } 1302 }
1300 /* add the size of the receive queue entries */ 1303 /* add the size of the receive queue entries */
1301 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); 1304 ret += IOMMU_PAGE_ALIGN(
1305 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1302 1306
1303 return ret; 1307 return ret;
1304} 1308}
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 978db344bda0..b24aa010f68c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -366,7 +366,7 @@ config TRACE_SINK
366 "Trace data router for MIPI P1149.7 cJTAG standard". 366 "Trace data router for MIPI P1149.7 cJTAG standard".
367 367
368config PPC_EPAPR_HV_BYTECHAN 368config PPC_EPAPR_HV_BYTECHAN
369 tristate "ePAPR hypervisor byte channel driver" 369 bool "ePAPR hypervisor byte channel driver"
370 depends on PPC 370 depends on PPC
371 select EPAPR_PARAVIRT 371 select EPAPR_PARAVIRT
372 help 372 help
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
81 * enforcing the limit based on the max that the guest can map. 81 * enforcing the limit based on the max that the guest can map.
82 */ 82 */
83 down_write(&current->mm->mmap_sem); 83 down_write(&current->mm->mmap_sem);
84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
85 locked = current->mm->locked_vm + npages; 85 locked = current->mm->locked_vm + npages;
86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { 87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
110 110
111 down_write(&current->mm->mmap_sem); 111 down_write(&current->mm->mmap_sem);
112 current->mm->locked_vm -= (container->tbl->it_size << 112 current->mm->locked_vm -= (container->tbl->it_size <<
113 IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 113 IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
114 up_write(&current->mm->mmap_sem); 114 up_write(&current->mm->mmap_sem);
115} 115}
116 116
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
174 if (info.argsz < minsz) 174 if (info.argsz < minsz)
175 return -EINVAL; 175 return -EINVAL;
176 176
177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; 177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; 178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
179 info.flags = 0; 179 info.flags = 0;
180 180
181 if (copy_to_user((void __user *)arg, &info, minsz)) 181 if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
205 VFIO_DMA_MAP_FLAG_WRITE)) 205 VFIO_DMA_MAP_FLAG_WRITE))
206 return -EINVAL; 206 return -EINVAL;
207 207
208 if ((param.size & ~IOMMU_PAGE_MASK) || 208 if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
209 (param.vaddr & ~IOMMU_PAGE_MASK)) 209 (param.vaddr & ~IOMMU_PAGE_MASK_4K))
210 return -EINVAL; 210 return -EINVAL;
211 211
212 /* iova is checked by the IOMMU API */ 212 /* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
220 if (ret) 220 if (ret)
221 return ret; 221 return ret;
222 222
223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { 223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
224 ret = iommu_put_tce_user_mode(tbl, 224 ret = iommu_put_tce_user_mode(tbl,
225 (param.iova >> IOMMU_PAGE_SHIFT) + i, 225 (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
226 tce); 226 tce);
227 if (ret) 227 if (ret)
228 break; 228 break;
229 tce += IOMMU_PAGE_SIZE; 229 tce += IOMMU_PAGE_SIZE_4K;
230 } 230 }
231 if (ret) 231 if (ret)
232 iommu_clear_tces_and_put_pages(tbl, 232 iommu_clear_tces_and_put_pages(tbl,
233 param.iova >> IOMMU_PAGE_SHIFT, i); 233 param.iova >> IOMMU_PAGE_SHIFT_4K, i);
234 234
235 iommu_flush_tce(tbl); 235 iommu_flush_tce(tbl);
236 236
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
256 if (param.flags) 256 if (param.flags)
257 return -EINVAL; 257 return -EINVAL;
258 258
259 if (param.size & ~IOMMU_PAGE_MASK) 259 if (param.size & ~IOMMU_PAGE_MASK_4K)
260 return -EINVAL; 260 return -EINVAL;
261 261
262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, 262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
263 param.size >> IOMMU_PAGE_SHIFT); 263 param.size >> IOMMU_PAGE_SHIFT_4K);
264 if (ret) 264 if (ret)
265 return ret; 265 return ret;
266 266
267 ret = iommu_clear_tces_and_put_pages(tbl, 267 ret = iommu_clear_tces_and_put_pages(tbl,
268 param.iova >> IOMMU_PAGE_SHIFT, 268 param.iova >> IOMMU_PAGE_SHIFT_4K,
269 param.size >> IOMMU_PAGE_SHIFT); 269 param.size >> IOMMU_PAGE_SHIFT_4K);
270 iommu_flush_tce(tbl); 270 iommu_flush_tce(tbl);
271 271
272 return ret; 272 return ret;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d9992fc128ca..f28f46eade6a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1895,7 +1895,7 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1895} 1895}
1896#endif 1896#endif
1897 1897
1898#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE 1898#ifdef CONFIG_NUMA_BALANCING
1899unsigned long change_prot_numa(struct vm_area_struct *vma, 1899unsigned long change_prot_numa(struct vm_area_struct *vma,
1900 unsigned long start, unsigned long end); 1900 unsigned long start, unsigned long end);
1901#endif 1901#endif
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0beaee9dac1f..2b77058a7335 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -116,6 +116,7 @@ extern const void *of_flat_dt_match_machine(const void *default_match,
116extern void unflatten_device_tree(void); 116extern void unflatten_device_tree(void);
117extern void unflatten_and_copy_device_tree(void); 117extern void unflatten_and_copy_device_tree(void);
118extern void early_init_devtree(void *); 118extern void early_init_devtree(void *);
119extern void early_get_first_memblock_info(void *, phys_addr_t *);
119#else /* CONFIG_OF_FLATTREE */ 120#else /* CONFIG_OF_FLATTREE */
120static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 121static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
121static inline void unflatten_device_tree(void) {} 122static inline void unflatten_device_tree(void) {}
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 9696a5e2c437..6bdf8c61d221 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -685,7 +685,7 @@ do { \
685 else \ 685 else \
686 { \ 686 { \
687 r = 0; \ 687 r = 0; \
688 if (X##_s) \ 688 if (!X##_s) \
689 r = ~r; \ 689 r = ~r; \
690 } \ 690 } \
691 FP_SET_EXCEPTION(FP_EX_INVALID); \ 691 FP_SET_EXCEPTION(FP_EX_INVALID); \
@@ -743,12 +743,17 @@ do { \
743 } \ 743 } \
744 else \ 744 else \
745 { \ 745 { \
746 int _lz0, _lz1; \
746 if (X##_e <= -_FP_WORKBITS - 1) \ 747 if (X##_e <= -_FP_WORKBITS - 1) \
747 _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \ 748 _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \
748 else \ 749 else \
749 _FP_FRAC_SRS_##wc(X, _FP_FRACBITS_##fs - 1 - X##_e, \ 750 _FP_FRAC_SRS_##wc(X, _FP_FRACBITS_##fs - 1 - X##_e, \
750 _FP_WFRACBITS_##fs); \ 751 _FP_WFRACBITS_##fs); \
752 _FP_FRAC_CLZ_##wc(_lz0, X); \
751 _FP_ROUND(wc, X); \ 753 _FP_ROUND(wc, X); \
754 _FP_FRAC_CLZ_##wc(_lz1, X); \
755 if (_lz1 < _lz0) \
756 X##_e++; /* For overflow detection. */ \
752 _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \ 757 _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \
753 _FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \ 758 _FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \
754 } \ 759 } \
@@ -762,7 +767,7 @@ do { \
762 if (!rsigned) \ 767 if (!rsigned) \
763 { \ 768 { \
764 r = 0; \ 769 r = 0; \
765 if (X##_s) \ 770 if (!X##_s) \
766 r = ~r; \ 771 r = ~r; \
767 } \ 772 } \
768 else if (rsigned != 2) \ 773 else if (rsigned != 2) \
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 463b7fbf0d1d..36cb46cddf61 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -613,7 +613,7 @@ static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
613 return 0; 613 return 0;
614} 614}
615 615
616#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE 616#ifdef CONFIG_NUMA_BALANCING
617/* 617/*
618 * This is used to mark a range of virtual addresses to be inaccessible. 618 * This is used to mark a range of virtual addresses to be inaccessible.
619 * These are later cleared by a NUMA hinting fault. Depending on these 619 * These are later cleared by a NUMA hinting fault. Depending on these
@@ -627,7 +627,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
627 unsigned long addr, unsigned long end) 627 unsigned long addr, unsigned long end)
628{ 628{
629 int nr_updated; 629 int nr_updated;
630 BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
631 630
632 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); 631 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
633 if (nr_updated) 632 if (nr_updated)
@@ -641,7 +640,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
641{ 640{
642 return 0; 641 return 0;
643} 642}
644#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ 643#endif /* CONFIG_NUMA_BALANCING */
645 644
646/* 645/*
647 * Walk through page tables and collect pages to be migrated. 646 * Walk through page tables and collect pages to be migrated.
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 17855761e6b7..40610984a1b5 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -584,12 +584,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
584 if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 || 584 if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
585 strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 || 585 strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
586 strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 || 586 strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
587 strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0) 587 strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
588 strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
589 strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
588 return 1; 590 return 1;
589 if (info->hdr->e_machine == EM_PPC64) 591 if (info->hdr->e_machine == EM_PPC64)
590 /* Special register function linked on all modules during final link of .ko */ 592 /* Special register function linked on all modules during final link of .ko */
591 if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || 593 if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
592 strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0) 594 strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
595 strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
596 strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
593 return 1; 597 return 1;
594 /* Do not ignore this symbol */ 598 /* Do not ignore this symbol */
595 return 0; 599 return 0;